query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns the parity of the permutation >>> parity((3, 4)) 1 >>> parity((2,1)) 1 >>> parity((1, 2, 6, 5)) 1
def parity(p): f = dict(zip(sorted(p), p)) seen, neven = set(), 0 for x in p: if x in seen: continue c, l = x, 0 while c not in seen: seen.add(c) l += 1 c = f[c] neven += (l - 1) % 2 return 1 if neven % 2 == 0 else -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __parity_of_permutation(cls, lst: Iterable) -> int:\n\t\tparity = 1\n\t\tlst = list(lst)\n\t\tfor i in range(0, len(lst) - 1):\n\t\t\tif lst[i] != i:\n\t\t\t\tparity *= -1\n\t\t\t\tmn = SquareMatrix.__idx_of_minimum(lst[i:]) + i\n\t\t\t\t\n\t\t\t\tlst[i], lst[mn] = lst[mn], lst[i]\n\t\treturn parity", "def parity(n):\n if n%2==0:\n p=1\n else:\n p=-1\n return p", "def parity(self):\n if self._cyclic_form is not None:\n return (self.size - len(self._cyclic_form)) % 2\n\n return perm_af_parity(self.array_form)", "def parity(it):\n \n return sum(it)%2", "def parity(n):\n # bin(n) returns 0b.... \n # bin(n)[2:] trims \"ob\"\n return sum(int(x) for x in bin(n)[2:]) % 2", "def perm_af_parity(pi):\n n = len(pi)\n a = [0] * n\n c = 0\n for j in xrange(n):\n if a[j] == 0:\n c += 1\n a[j] = 1\n i = j\n while pi[i] != j:\n i = pi[i]\n a[i] = 1\n return (n - c) % 2", "def parity_odd(x):\r\n\t\t\tx = x ^ (x >> 4)\r\n\t\t\tx = x ^ (x >> 2)\r\n\t\t\tx = x ^ (x >> 1)\r\n\t\t\treturn x & 1", "def parity(x):\n\n res = 0\n while x:\n # XOR flips last bit\n res ^= 1\n # x & (x - 1) removes lowest set bit\n x &= x - 1\n return bool(res)", "def calc_parity_vector(parity_vector):\r\n return reduce(lambda x, y: x ^ y, parity_vector[1:])", "def is_odd_parity(n=None):\n\tn=int(n,16)\n\tc = 0\n\twhile n:\n\t\tc += 1\n\t\tn &= n - 1\n\tif c%2: \n\t\treturn True\n\telse:\n\t\treturn False", "def parityLabel(parity):\n try:\n parity = int(parity)\n except Exception:\n return parity\n\n if parity % 2:\n return 'odd'\n else:\n return 'even'", "def parity_player(self, board):\n valid_moves = self.game.find_valid_moves(self.computer_color, board, self.board_size)\n rows, columns = np.where(valid_moves == 1)\n max_parity = -200\n location = (-2, -2)\n for i in range(len(rows)):\n temp_board = np.copy(board)\n temp_board = self.game.flip_opponent_stones((rows[i], columns[i]), temp_board, self.board_size,\n self.computer_num, self.opponent_num)\n parity_value = self.stone_parity(temp_board)\n if parity_value > max_parity:\n max_parity = parity_value\n location = (rows[i], columns[i])\n\n return location", "def parity64(x):\n\n x ^= x >> 32\n x ^= x >> 16\n x ^= x >> 8\n x ^= x >> 4\n x ^= x >> 2\n x ^= x >> 1\n return bool(x & 1)", "def xor_columns(col, parity):\n result = []\n for i in range(len(col)):\n result.append(col[i] ^ parity[i])\n return result", "def gen_parity_oracle():\n secret = base64.b64decode(\"VGhhdCdzIHdoeSBJIGZvdW5kIHlvdSBkb24ndCBwbGF5IGF\"\n \"yb3VuZCB3aXRoIHRoZSBGdW5reSBDb2xkIE1lZGluYQ==\")\n r = rsa.Rsa(e=3, bits=1024)\n ciphertext = r.encrypt_bytes(secret)\n\n def parity_oracle(c):\n p = r.decrypt_bytes(c)\n return p[-1] % 2 == 0\n\n return parity_oracle, ciphertext, r.public_key()", "def parity_check_matrix(self):\n F = self.base_ring()\n zero = F.zero()\n one = F.one()\n H = self.original_code().parity_check_matrix()\n nr, nc = H.nrows(), H.ncols()\n Hlist = H.list()\n v = matrix(F, nr + 1, 1, [one] + [zero] * nr)\n return matrix(F, nr + 1, nc, [one] * nc + Hlist).augment(v)", "def parity_of_very_long(x, word_size=8):\n res = 0\n hash_map = {}\n while x!=0:\n word = x & ( (1<<word_size)-1)\n if not(word in hash_map):\n hash_map[word] = parityOf(word)\n res ^= hash_map[word]\n x >>= word_size\n print(hash_map)\n return res", "def nextPermutation(self, nums: List[int]) -> None:\n pass", "def _pfunc(i,j,perm):\n if perm[i-1] == j:\n return 1\n else:\n return 0", "def fit_dparity(network):\n\n # Create random vectors of zeros and ones\n X = 1*(np.random.normal(size=(samples,num_inputs)) > 0)\n # Compute parity of each row of X\n Y = X.sum(axis=1)%2\n\n outputs = np.zeros(samples)\n for i in range(samples):\n outputs[i] = network.activate(X[i])\n\n fitness = samples - np.sum((Y - outputs)**2)\n return fitness", "def parity_check_matrix(self):\n\n generator_mat = self.get_generator_matrix()\n\n # Initialize empty parity check matrix\n self.parity_check = np.zeros((self.n, self.n-self.k), dtype=int)\n # Use A_matrix of the generator matrix to construct first part\n self.parity_check[:self.k, :] = generator_mat[:, self.k:]\n # Add the identity matrix to the second part\n self.parity_check[self.k:, :] = np.identity(self.n-self.k, dtype=int)\n\n\n# for i in range(self.n):\n# print(self.parity_check[i,:])\n\n return self.parity_check", "def single_number(nums):\n i = 0\n for num in nums:\n i ^= num\n return i", "def _parity_set(index):\n indices = set()\n\n # For bit manipulation we need to count from 1 rather than 0\n index += 1\n\n while index > 0:\n indices.add(index - 1)\n # Remove least significant one from index\n # E.g. 00010100 -> 00010000\n index &= index - 1\n return indices", "def stone_parity(self, board):\n computer_score = sum(sum(board == self.computer_num))\n opponent_score = sum(sum(board == self.opponent_num))\n return 100 * (computer_score - opponent_score) / (computer_score + opponent_score)", "def __call__(self, *args, **kwargs):\n if not (len(args)==1 and not bool(kwargs)):\n raise self.ParityNException('only one argument is required')\n num = args[0]\n if not (isinstance(num, int) and 0 <= num < self.bound):\n raise self.ParityNException('num should be an integer within [0,{})'.format(self.bound))\n vec_num = np.zeros(self.N, dtype=np.bool)\n i = self.N - 1\n while num > 0:\n vec_num[i] = num%2\n num, i = num//2, i-1\n return vec_num, vec_num.sum()%2==0", "def nextPermutation(self, nums: List[int]) -> None:\n #m1 1/25/2021\n def reverse(low: int, high: int):\n i, j = low, high\n while i < j:\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n j -= 1\n\n size = len(nums)\n if size < 2:\n return\n found = False\n i = size - 2\n while i >= 0:\n j = size - 1\n while j > i:\n if nums[j] > nums[i]:\n nums[j],nums[i] = nums[i], nums[j]\n reverse(i + 1, size - 1)\n return\n j -= 1\n i -= 1\n i, j = 0, size - 1\n while i < j:\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n j -= 1", "def xor(it):\n return 0 if it[0]==it[1] else 1", "def nextPermutation(self, nums: List[int]) -> None:\n\n def reverse(i, j, nums):\n while i < j:\n nums[i], nums[j] = nums[j], nums[i]\n i = i + 1\n j = j - 1\n\n r, curr = len(nums) - 1, len(nums) - 2\n while nums[curr] >= nums[curr + 1] and curr >= 0:\n if curr == 0:\n reverse(0, len(nums) - 1, nums) \n return\n curr = curr -1\n reverse(curr + 1, len(nums) - 1, nums)\n\n l = curr + 1\n while l < r:\n mid = (l + r)//2\n if nums[mid] > nums[curr]:\n r = mid\n elif nums[mid] <= nums[curr]:\n l = mid + 1\n nums[curr], nums[r] = nums[r], nums[curr]\n\n return", "def nextPermutation(self, nums) -> None:\n l = len(nums)\n #print(nums)\n if l == 1:\n return\n li = l-1\n found = True\n while li and nums[li] <= nums[li-1]:\n li -= 1\n\n \n if l > 1 and nums[0] >= nums[1] and li == 0:\n found = False\n li -= 1\n print(\"li\",li, found)\n if found:\n ti = self.binary_search(nums, li+1, l-1, nums[li])\n print(\"ti\",ti)\n nums[li], nums[ti] = nums[ti], nums[li]\n \n print(nums, li+1, l-1)\n i = li+1\n j = l-1\n while i < j:\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n j -= 1", "def fit_dparity(network, num_inputs, samples=100):\n\n # Create random vectors of zeros and ones\n X = 1*(np.random.normal(size=(samples,num_inputs)) > 0)\n # Compute parity of each row of X\n Y = X.sum(axis=1)%2\n\n outputs = np.zeros(samples)\n for i in range(samples):\n outputs[i] = network.activate(X[i])\n\n fitness = np.sum(Y*outputs)/float(samples)\n return fitness" ]
[ "0.7810365", "0.751558", "0.74588585", "0.74564564", "0.7020123", "0.6833049", "0.68051773", "0.6797909", "0.60460204", "0.59874743", "0.58930457", "0.585515", "0.5851539", "0.5723222", "0.5665174", "0.5619301", "0.5564634", "0.55510086", "0.550783", "0.54255444", "0.540439", "0.5278438", "0.5274186", "0.52595454", "0.5257819", "0.52564836", "0.5226751", "0.5116865", "0.5087889", "0.50847435" ]
0.7581894
1
>>> jig('1', '1') (1, []) >>> jig('12', '1') (1, ['2']) >>> jig('12', '2') (1, ['1']) >>> jig('1', '12') (1, ['2']) >>> jig('2', '12') (1, ['1']) >>> jig('12', '21') (1, [])
def jig(a, b): # Assumptions: # elements are unique per list out = list(a) sign = 1 for x in b: if x not in out: out.append(x) else: swaps = len(out) - out.index(x) - 1 sign *= (-1) ** swaps out.remove(x) return sign, out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pij_dagger(i: int, j: int):\n return hermitian_conjugated(_pij(i, j))", "def _qij_vec_dagger(i: int, j: int):\n return [hermitian_conjugated(i) for i in _qij_vec(i, j)]", "def test_get_ijk_list():\n\n l0 = get_ijk_list(0)\n assert l0 == [\n [0, 0, 0]\n ]\n\n l1 = get_ijk_list(1)\n assert l1 == [\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]\n ]\n\n l2 = get_ijk_list(2)\n assert l2 == [\n [2, 0, 0],\n [1, 1, 0],\n [1, 0, 1],\n [0, 2, 0],\n [0, 1, 1],\n [0, 0, 2]\n ]", "def putaijlist(self,subi,subj,valij): # 3\n num_ = None\n if num_ is None:\n num_ = len(subi)\n elif num_ != len(subi):\n raise IndexError(\"Inconsistent length of array subi\")\n if num_ is None:\n num_ = len(subj)\n elif num_ != len(subj):\n raise IndexError(\"Inconsistent length of array subj\")\n if num_ is None:\n num_ = len(valij)\n elif num_ != len(valij):\n raise IndexError(\"Inconsistent length of array valij\")\n if num_ is None: num_ = 0\n if subi is None: raise TypeError(\"Invalid type for argument subi\")\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n \n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n \n if subj is None: raise TypeError(\"Invalid type for argument subj\")\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n \n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n \n if valij is None: raise TypeError(\"Invalid type for argument valij\")\n if valij is None:\n valij_ = None\n else:\n try:\n valij_ = memoryview(valij)\n except TypeError:\n try:\n _tmparr_valij = array.array(\"d\",valij)\n except TypeError:\n raise TypeError(\"Argument valij has wrong type\")\n else:\n valij_ = memoryview(_tmparr_valij)\n \n else:\n if valij_.format != \"d\":\n valij_ = memoryview(array.array(\"d\",valij))\n \n res = self.__obj.putaijlist64(num_,subi_,subj_,valij_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def i(surj, iterate=1):\n\n if iterate == 1:\n answer = surj.zero()\n for k, v in surj.items():\n answer += answer.create(\n {(1,) + tuple(j + 1 for j in k): v})\n return answer\n if iterate > 1:\n return i(i(surj, iterate=iterate - 1))", "def zigzag(a):", "def _qij_vec(i: int, j: int):\n return [_qij_plus(i, j), _qij_minus(i, j), _qij_0(i, j)]", "def J (self, n):", "def putaijlist(self,subi_,subj_,valij_):\n num_ = None\n if num_ is None:\n num_ = len(subi_)\n elif num_ != len(subi_):\n raise IndexError(\"Inconsistent length of array subi\")\n if num_ is None:\n num_ = len(subj_)\n elif num_ != len(subj_):\n raise IndexError(\"Inconsistent length of array subj\")\n if num_ is None:\n num_ = len(valij_)\n elif num_ != len(valij_):\n raise IndexError(\"Inconsistent length of array valij\")\n if subi_ is None:\n raise ValueError(\"Argument subi cannot be None\")\n if subi_ is None:\n raise ValueError(\"Argument subi may not be None\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n if subj_ is None:\n raise ValueError(\"Argument subj cannot be None\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n if valij_ is None:\n raise ValueError(\"Argument valij cannot be None\")\n if valij_ is None:\n raise ValueError(\"Argument valij may not be None\")\n if isinstance(valij_, numpy.ndarray) and valij_.dtype is numpy.dtype(numpy.float64) and valij_.flags.contiguous:\n _valij_copyarray = False\n _valij_tmp = ctypes.cast(valij_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif valij_ is not None:\n _valij_copyarray = True\n _valij_np_tmp = numpy.zeros(len(valij_),numpy.dtype(numpy.float64))\n _valij_np_tmp[:] = valij_\n assert _valij_np_tmp.flags.contiguous\n _valij_tmp = ctypes.cast(_valij_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _valij_copyarray = False\n _valij_tmp = None\n \n res = __library__.MSK_XX_putaijlist64(self.__nativep,num_,_subi_tmp,_subj_tmp,_valij_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def jsd_self(*sigs):\n return [jsd(sigs[i], sigs[i+1]) for i in range(len(sigs)-1)]", "def getaij(self,i_,j_): # 3\n res,resargs = self.__obj.getaij(i_,j_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _aij_return_value = resargs\n return _aij_return_value", "def select_ijentry(self, matrices = '', i = '' , j = ''):\n lista = [ m[i, j] for m in matrices ]\n return lista", "def h(surj):\n answer = s(surj)\n for r in range(1, arity - 1):\n answer += i(s(p(surj, r)), r)\n return answer", "def pathij(self, i, j, pathlist):\n import math\n path = []\n \n visit = np.zeros(self.nodenum)\n \n self.DFS(i, j, visit, path, pathlist)\n \n return pathlist", "def jmat(ind: int):\n return _jm[ind - 1]", "def do_jls(self, arg):\n\n arg = str(arg).split(' ') \n arg.insert(0,'jls')\n arg = [i for i in arg if i != '']\n \n jail_table(arg)", "def GetJflag(cmdline):\n\n for i in range(len(cmdline)):\n if (cmdline[i] == '-j' and i + 1 < len(cmdline)\n and cmdline[i + 1].isdigit()):\n return int(cmdline[i + 1])\n\n if (cmdline[i].startswith('-j') and cmdline[i][len('-j'):].isdigit()):\n return int(cmdline[i][len('-j'):])", "def qj(self, *a, **kw):\n depth = kw.pop('_depth', 0) + 2\n return qj(self, _depth=depth, *a, **kw)", "def qj(self, *a, **kw):\n depth = kw.pop('_depth', 0) + 2\n return qj(self, _depth=depth, *a, **kw)", "def qj(self, *a, **kw):\n depth = kw.pop('_depth', 0) + 2\n return qj(self, _depth=depth, *a, **kw)", "def qj(self, *a, **kw):\n depth = kw.pop('_depth', 0) + 2\n return qj(self, _depth=depth, *a, **kw)", "def qj(self, *a, **kw):\n depth = kw.pop('_depth', 0) + 2\n return qj(self, _depth=depth, *a, **kw)", "def zernIndex(j):\r\n n = int((-1.+np.sqrt(8*(j-1)+1))/2.)\r\n p = (j-(n*(n+1))/2.)\r\n k = n%2\r\n m = int((p+k)/2.)*2 - k\r\n\r\n if m!=0:\r\n if j%2==0:\r\n s=1\r\n else:\r\n s=-1\r\n m *= s\r\n\r\n return [n, m]", "def jot(self):\r\n\t\t\r\n\t\t# empty list?\r\n\t\tif len(self) < 1:\r\n\t\t\r\n\t\t\treturn '(0)'\r\n\t\t\r\n\t\t# go through terms\r\n\t\ts = ''\r\n\t\tfor i in self:\r\n\t\t\t\r\n\t\t\t# positive or negative\r\n\t\t\tif i.look('i') % 4 in (0,1):\r\n\t\t\t\ts += ' +('\r\n\t\t\tif i.look('i') % 4 in (2,3):\r\n\t\t\t\ts += ' -('\r\n\t\t\t\t\r\n\t\t\t# list of variables\r\n\t\t\ty = i.keys()\r\n\t\t\ty = [str(j) for j in y if j != 'i']\r\n\t\t\ty = [j for j in y if j.isalpha()]\r\n\t\t\ty = Pa._tidy(y)\r\n\t\t\t\t\r\n\t\t\t# coefficient\r\n\t\t\tn,d = i.fuse()\r\n\t\t\tif n > 1 or d > 1 or len(y) < 1:\r\n\t\t\t\ts += str(n)\r\n\t\t\tif d > 1:\r\n\t\t\t\ts += '/' + str(d)\r\n\t\t\tif i.look('i') % 4 in (1,3):\r\n\t\t\t\ts += 'i'\r\n\t\t\t\r\n\t\t\t# add variables to string\r\n\t\t\tfor k in y:\r\n\t\t\t\tif s[-1] == '(':\r\n\t\t\t\t\ts += k\r\n\t\t\t\telse:\r\n\t\t\t\t\ts += ' ' + k\r\n\t\t\t\tif i[k] != 1:\r\n\t\t\t\t\ts += str(i[k])\r\n\t\t\t\t\t\r\n\t\t\t# close\r\n\t\t\ts += ')'\r\n\t\t\t\t\r\n\t\t# remove leading ' '\r\n\t\tif s[0] == ' ':\r\n\t\t\ts = s[1:]\r\n\t\t\t\t\r\n\t\t# remove leading +\r\n\t\tif s[0] == '+':\r\n\t\t\ts = s[1:]\r\n\t\t\t\t\r\n\t\treturn s", "def ij(ij, pol, ant) :\n s.ij(pol, ij, ant)", "def putaij(self,i_,j_,aij_): # 3\n res = self.__obj.putaij(i_,j_,aij_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def superkron(*args, val=0, string=''):\n\n out = 1\n if val == 0:\n for i in range(len(args)):\n out = kron(out, args[i])\n else:\n for digit in string:\n out = kron(out, args[0][digit])\n return out", "def s(surj):\n answer = surj.zero()\n for k, v in surj.items():\n answer += answer.create({(1,) + tuple(j for j in k): v})\n return answer", "def jqk2(visible):\n\tjqk = [0, 0, 0]\n\tif 11 in visible and 12 in visible and 13 in visible:\t#brief says function should check. I think it's easier to check outside the function...\n\t\tfor i in range(len(visible)):\n\t\t\tif visible[i] == 11:\n\t\t\t\tjqk[0] = i\n\t\t\telif visible[i] == 12:\n\t\t\t\tjqk[1] = i\n\t\t\telif visible[i] == 13:\n\t\t\t\tjqk[2] = i\n\t\tjqk = tuple(jqk)\n\t\treturn jqk", "def entiers(i: int, j: int) -> None:\n\n if i > j:\n raise ValueError(\"i must be less than or equal to j\")\n print('-'.join([str(i) for i in range(i, j+1)]))" ]
[ "0.5384816", "0.53626597", "0.5330242", "0.5321761", "0.50911194", "0.50391024", "0.50116795", "0.49772155", "0.49516168", "0.4926835", "0.49146804", "0.4865859", "0.48324883", "0.47729522", "0.47677034", "0.47592914", "0.47451174", "0.47234073", "0.47234073", "0.47234073", "0.47234073", "0.47234073", "0.4706103", "0.46973947", "0.46871844", "0.46610364", "0.46544573", "0.4650011", "0.46143967", "0.4608483" ]
0.5998455
0
Saves the corresponding tag from the input field to the tag list. Ideally, we would like to use the MATCH function to determine which button was clicked. However, since we only have one save tag toast for all the tags, we can't use MATCH in the Output field. To use MATCH, Dash requires the Output field to match the same properties as the input field.
def save_tag(n_clicks_timestamp, input_values): ctx = dash.callback_context triggered_id, triggered_prop, triggered_value = utils.ctx_triggered_info(ctx) if triggered_value is None: raise PreventUpdate # Unfortunately, we have to convert the stringified dict back to a dict. # Dash doesn't provide us any other method to see which element triggered the callback. # This isn't very elegant, but I don't see any other way to proceed. id_dict = utils.string_to_dict(triggered_id) tag_idx = id_dict["index"] tag_value = input_values[tag_idx] if " " in tag_value: raise PreventUpdate # TODO: display an error UI element or something state.update_tag(tag_idx, tag_value) # since we pattern matched the SAVE_TAG_TOAST, we need to provide output as a list return [True], constants.OK_SIGNAL
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self) -> None:\n if not self.found:\n return\n self.tag_helper.save()", "def _save(self, **kwargs): #signal, sender, instance):\r\n tags = self._get_instance_tag_cache(kwargs['instance'])\r\n if tags is not None:\r\n Tag.objects.update_tags(kwargs['instance'], tags)", "def add_tag(e, driver):\n tag = random_tag(8)\n e.find_element_by_class_name('add-tag').click()\n \n driver.find_element_by_class_name('tag_input')\\\n .send_keys(tag)\n driver.find_element_by_class_name('tag_input')\\\n .send_keys(Keys.ENTER)\n # driver.find_elements_by_class_name('save-tag').click()\n return tag", "def processTags(request, media, form, update):\n if update:\n if 'tags' in request.POST:\n tag_names = form.cleaned_data['tags'].split(',')\n media.tag_set.clear()\n for tag_name in tag_names:\n tag, dummy = Tag.objects.get_or_create(name=tag_name.strip())\n media.tag_set.add(tag)\n media.save()\n else:\n if 'tags' in request.POST:\n tag_names = form.cleaned_data['tags'].split(',')\n for tag_name in tag_names:\n tag, dummy = Tag.objects.get_or_create(name=tag_name.strip())\n media.tag_set.add(tag)\n media.save()", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n self.update_generated_tags()", "def edit_tags(self):\n os.system(\"clear\")\n while True:\n tag_categories = [\"meal\", \"genre\", \"complexity\", \"course\", \"no change\"]\n _, key = _num_select(\"Which tag would you like to edit\", tag_categories)\n if key == \"meal\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"breakfast\", \"lunch\", \"dinner\"])\n self.tags[key]=value\n elif key == \"genre\":\n genres = [\"american\", \"italian\", \"mexican\", \"asian\", \"indian\", \"misc\"]\n _, value = _num_select(\"Which tag would you like to apply\",\n genres)\n elif key == \"complexity\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"simple\", \"intermediate\", \"complicated\"])\n elif key == \"course\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"appetizer\", \"salad\", \"side\", \"main\", \"dessert\"])\n else:\n return", "def save_tags(self, post_getlist_tags):\n cleaned_tags = []\n for name in post_getlist_tags:\n if Tag.objects.filter(name=name).exists():\n tag = Tag.objects.filter(name=name).first()\n cleaned_tags.append(tag)\n else:\n if bool(name.strip()):\n tag = Tag.objects.create(name=name)\n tag.save()\n cleaned_tags.append(tag)\n return cleaned_tags", "def save_result(self, value: Any) -> None:\n self.run_logger.set_tags({self.name: value})", "def post(self):\n\n form = TagForm()\n\n if form.validate_on_submit():\n name = form.name.data\n tag_obj = Tag(name=name)\n database.session.add(tag_obj)\n database.session.commit()\n flask.flash(f\"Tag {tag_obj.name} stored.\")\n tags = Tag.query.filter().all()\n\n if not tags:\n tags = None\n\n template_return = flask.render_template(\"tags.html\", table_data=tags, form=form)\n\n return flask.Response(template_return, mimetype=\"text/html\")", "def process_tag_edit(user_id, tag_id):\n\n title = request.form.get('title')\n content = request.form.get('content')\n\n tag = Tag.query.get_or_404(tag_id)\n\n tag.title = title\n tag.content = content\n\n db.session.add(tag)\n db.session.commit()\n\n return redirect(f'/users/{user_id}/tags/{tag_id}')", "def _handle_new_tag(self, tag_name):\n log.debug(\"Handling new tag: %s\", tag_name)\n tag, created = Tag.get_or_create(name=tag_name)\n if tag not in self.doc.tags:\n self.listbox.body.insert(-1, MenuButton(tag_name, None))\n tag.documents.add(self.doc)\n self.new_tag.set_edit_text(\"\")", "def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})", "def tag(self, tag):\n \n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n #Handle this better?\n return\n \n if isinstance(tag, six.string_types):\n tname = tag\n try:\n tag = Tag(owner=self.owner, name=tag)\n tag.save()\n except IntegrityError:\n tag = Tag.objects.get(slug=makeslug(tname), owner=self.owner)\n \n tag.save() # If this isn't here there are crashes for some reason\n self.tags.add(tag)", "def add_tags(event):\n\n add_tags_from_presets()", "def save_tags(context):\n items = context.response.json()['items']\n tags = set()\n for item in items:\n for tag in item['tags']:\n tags.add(tag)\n context.tags = list(tags)\n logging.debug('Saved all tags in context.tags:\\n%s', pformat(sorted(context.tags)))", "def handleActionSave(self):\n for w in self.filesList.selectedItems():\n self.filesList.saveFile(w.text(2))", "def update_tag(request_form, tag_id):\n values = {'tag': request_form.get('tag').lower()}\n db_session.query(Tags).filter_by(id=tag_id).update(values)\n db_session.commit()\n return 'Updated tag #%s: %s.' % (tag_id, values['tag']), 'success'", "def search_tag(input) :\n j = _jpdb()\n _input = _process_search_input(input)\n if not _input : return None\n f = j.base_format\n q = Query().select(f.tags, f.tags.id, f.tags.name)\n q.where().equal(f.tags.name, _input)\n tag_data = j.executeQuery(q)\n\n if tag_data:\n tag_id, tag_name = tag_data[0]\n examples = _create_examples(j.list_word_by_tag, tag_name)\n return SelectorResult('tag', tag_id, tag_name, *examples)", "def update_tag(tag):\n remove_tag(tag)\n add_tag(tag)", "def handle_add_new_tag():\n tag = Tag(name=request.form['name'])\n\n db.session.add(tag)\n db.session.commit()\n\n return redirect('/tags')", "def save(self, output, data):", "def update_tag_on_odoo(self, json_data, instance_id):\n tag_ids = []\n if json_data:\n for each_tag in json_data:\n tag_exist_in_odoo = self.search([('ks_woo_id', '=', each_tag.get('id')),\n ('ks_woo_instance_id', '=', instance_id.id)], limit=1)\n woo_formated_data = self._ks_prepare_woo_product_tag_data(each_tag, instance_id)\n if tag_exist_in_odoo:\n tag_ids.append(tag_exist_in_odoo.id)\n ks_operation_type = 'update'\n else:\n new_tag_record = self.create(woo_formated_data)\n tag_ids.append(new_tag_record.id)\n ks_operation_type = 'create'\n\n self.env['ks.woo.sync.log'].create_log_param(\n ks_woo_id=new_tag_record.ks_woo_id if ks_operation_type == 'create' else tag_exist_in_odoo.ks_woo_id ,\n ks_status='success',\n ks_type='tags',\n ks_woo_instance_id=instance_id,\n ks_operation='woo_to_odoo',\n ks_operation_type=ks_operation_type,\n response= 'Tag [' + new_tag_record.ks_name + '] has been succesfully created' if ks_operation_type == 'create' else 'Tag [' + tag_exist_in_odoo.ks_name + '] has been succesfully updated'\n )\n return tag_ids", "def on_post(self):\n return \"Ok, the stuff is being saved\"", "def slotSave(self):\n if self.categoryDialog.exec_loop() == QDialog.Accepted:\n category = self.categoryDialog.category\n id = str(time.time())\n grooveFile = Globals.GrooveFile(id, category)\n grooveFile.setPitch(self.sampleGroup.groupPitch)\n for frame in self.trackFrames:\n path = frame.track.sampleControl.path\n volume = frame.track.controllerWidget.volumeSlider.value()\n grooveFile.addSample(path, volume)\n grooveFile.save()\n self.emit(PYSIGNAL('saved'), (self,))\n else:\n pass", "def save(self, output, data):\n return", "def ks_manage_product_tags(self, each_record, instance_id):\n tag_exist_in_odoo = self.search([('ks_woo_id', '=', each_record.get('id')),\n ('ks_woo_instance_id', '=', instance_id.id)], limit=1)\n woo_formated_data = self._ks_prepare_woo_product_tag_data(each_record, instance_id)\n if tag_exist_in_odoo:\n tag_exist_in_odoo.write(woo_formated_data)\n ks_operation_type = 'update'\n else:\n tag_exist_in_odoo = self.create(woo_formated_data)\n ks_operation_type = 'create'\n self.env['ks.woo.sync.log'].create_log_param(\n ks_woo_id=each_record.get('id'),\n ks_status='success',\n ks_type='tags',\n ks_woo_instance_id=instance_id,\n ks_operation='woo_to_odoo',\n ks_operation_type=ks_operation_type,\n response='Tag [' + tag_exist_in_odoo.ks_name + '] has been succesfully created' if ks_operation_type == 'create' else 'Tag [' + tag_exist_in_odoo.ks_name + '] has been succesfully updated',\n )\n tag_exist_in_odoo.ks_sync_date = datetime.datetime.now()\n tag_exist_in_odoo.ks_last_exported_date = tag_exist_in_odoo.ks_sync_date\n tag_exist_in_odoo.sync_update()", "def save(self, output, data):\n pass", "def getTag(self, inputs, tag):\n result = {}\n for into in inputs:\n for i in into:\n if i in self.sim.agents:\n agentTags = self.sim.agents[i].access[\"tags\"]\n if tag in agentTags:\n result[i] = agentTags[tag]\n return result", "def add_tagging(self, task_instance):", "def add_tag_to_db():\n new_tag = Tag(name=request.form['name'])\n\n db.session.add(new_tag)\n db.session.commit()\n\n flash(f\"Tag '{new_tag.name}' was successfully added\")\n\n return redirect('/tags')" ]
[ "0.6423813", "0.5827545", "0.5687878", "0.5630163", "0.5520939", "0.5326943", "0.51490104", "0.50914985", "0.5063527", "0.50503564", "0.5007661", "0.49620467", "0.4947558", "0.4932287", "0.4920134", "0.4887643", "0.48833373", "0.48679376", "0.48314855", "0.4811858", "0.4810644", "0.480719", "0.47954285", "0.475295", "0.4738558", "0.47230887", "0.46997094", "0.46952388", "0.4692309", "0.46821308" ]
0.6465949
0
Updates the options in the batch applied tag dropdown on tag changes. Notice that we don't need to have a similar callback to update the options of the nonbatch applied tag dropdowns. This is because the whole selected info panel is dynamically generated.
def update_batch_applied_tag_dropdown_options(tag_update_signal): tag_list = state.get_tag_list() return converters.tag_dropdown_options_from_tags(tag_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_change_over_time_tag_dropdown_options(tag_update_signal):\n tag_list = state.get_tag_list()\n tag_options = converters.timestamped_tag_dropdown_options_from_tags(tag_list)\n custom_time_range_option = {\n \"label\": constants.CUSTOM_TIME_RANGE_DROPDOWN_VALUE,\n \"value\": constants.CUSTOM_TIME_RANGE_DROPDOWN_VALUE,\n }\n return tag_options + [custom_time_range_option]", "def edit_tags(self):\n os.system(\"clear\")\n while True:\n tag_categories = [\"meal\", \"genre\", \"complexity\", \"course\", \"no change\"]\n _, key = _num_select(\"Which tag would you like to edit\", tag_categories)\n if key == \"meal\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"breakfast\", \"lunch\", \"dinner\"])\n self.tags[key]=value\n elif key == \"genre\":\n genres = [\"american\", \"italian\", \"mexican\", \"asian\", \"indian\", \"misc\"]\n _, value = _num_select(\"Which tag would you like to apply\",\n genres)\n elif key == \"complexity\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"simple\", \"intermediate\", \"complicated\"])\n elif key == \"course\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"appetizer\", \"salad\", \"side\", \"main\", \"dessert\"])\n else:\n return", "def tags_changed(self, tags):\n pass", "def on_update(self, evt):\n print(evt)\n for name in self.widgetNames:\n try:\n widget = wx.FindWindowByName(name)\n if isinstance(widget, wx.ComboBox):\n selection = widget.GetValue()\n choices = widget.GetItems()\n choices.insert(0, selection)\n value = choices\n else:\n value = widget.GetValue()\n\n data = self.tree.GetPyData(self.current_selection)\n data['Config'][name] = value\n self.tree.SetPyData(self.current_selection, data)\n except Exception as E:\n logging.error(\"{0!s}: {1!s}\".format(E, name))\n raise E", "def _selected_labels_changed(self, name, old, new):\n if self.value_lock.acquire(False):\n try:\n self.value = [self._options_dict[name] for name in new]\n finally:\n self.value_lock.release()", "def on_tagCombo_editTextChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def update_from_tags():\n tags.update_diagrams()\n tags.update_tiles()", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def update_dropdowns_years_options(df_in, aux):\n\n df = u.uos.b64_to_df(df_in)\n return lay.get_options(df[c.cols.YEAR].unique().tolist())", "def _on_selection_changed(self, selection):\n if self._updating_selection:\n return\n\n self._updating_selection = True\n\n self._tree_selection.unselect_all()\n for widget in selection:\n gadget_iter = self._find_iter_by_widget(widget)\n if gadget_iter:\n select_iter(self._tree_view, gadget_iter)\n\n self._updating_selection = False", "def add_tags(event):\n\n add_tags_from_presets()", "def cb_receive_tag_set_values(cls, session, node_id, tg_id, tag_id, value):\n tag = super(AvatarLens, cls).cb_receive_tag_set_values(session, node_id, tg_id, tag_id, value)\n update_3dview(tag.tg.node)\n return tag", "def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))", "def update_eligs(self, *args):\n self.splitGD.update_eligs()", "def update_eligs(self, *args):\n self.splitGD.update_eligs()", "def setTags(self,newtags):\n\t\tself.tags = newtags;", "def update_categories(self):\n\n for btn in self._tags_btn_grp.buttons():\n self._tags_btn_grp.removeButton(btn)\n\n qtutils.clear_layout(self._tags_menu_layout)\n\n if not self._outliners:\n return\n\n total_buttons = 0\n\n categories_list = self._outliners.keys()\n for category in categories_list:\n new_btn = QPushButton(category.title())\n new_btn.category = category\n category_icon = tpDcc.ResourcesMgr().icon(category.strip().lower())\n new_btn.setIcon(category_icon)\n new_btn.setCheckable(True)\n new_btn.clicked.connect(partial(self._on_change_outliner, new_btn))\n self._tags_menu_layout.addWidget(new_btn)\n self._tags_btn_grp.addButton(new_btn)\n if total_buttons == 0:\n new_btn.blockSignals(True)\n new_btn.setChecked(True)\n new_btn.blockSignals(False)\n total_buttons += 1", "def update_tag(tag):\n remove_tag(tag)\n add_tag(tag)", "def _options_changed(self, name, old, new):\n if self.options_lock.acquire(False):\n try:\n self.options = new\n\n options = self._make_options(new)\n self._options_dict = {i[0]: i[1] for i in options}\n self._options_labels = [i[0] for i in options]\n self._options_values = [i[1] for i in options]\n self._value_in_options()\n finally:\n self.options_lock.release()", "def update_tags(self, obj, tags):\n content_type = ContentType.objects.get_for_model(obj)\n current_tags = list(self.filter(items__content_type__pk=content_type.pk,\n items__object_id=obj.pk))\n updated_tags = self.model.get_tag_list(tags)\n \n # Remove tags which no longer apply\n tags_for_removal = [tag for tag in current_tags \\\n if tag not in updated_tags]\n if len(tags_for_removal):\n self.intermediary_table_model._default_manager.filter(content_type__pk=content_type.pk,\n object_id=obj.pk,\n tag__in=tags_for_removal).delete()\n # Add new tags\n for tag in updated_tags:\n if tag not in current_tags:\n self.intermediary_table_model._default_manager.create(tag=tag, content_object=obj)", "def callback_selectstate(self, attrname, old, new):\n self._update_chart(self.selectstate.value)", "def apply_changes(self, updated_talk=None):\r\n self.presentationModel.select()\r\n self.select_talk(updated_talk)\r\n self.update_autocomplete_fields()", "def cb_receive_tag_set_values(cls, session, node_id, tg_id, tag_id, value):\n tag = super(AvatarHeight, cls).cb_receive_tag_set_values(session, node_id, tg_id, tag_id, value)\n update_3dview(tag.tg.node)\n return tag", "def cb_receive_tag_set_values(cls, session, node_id, tg_id, tag_id, value):\n tag = super(AvatarScene, cls).cb_receive_tag_set_values(session, node_id, tg_id, tag_id, value)\n update_3dview(tag.tg.node)\n return tag", "def enable_selected(self, window, values, branch_log_dict, key_event):\n utils.convert_to_numeric(values)\n if(values[key_event] in branch_log_dict[key_event]):#if there is branching for the chosen option\n for element_key in branch_log_dict[key_event][values[key_event]]:\n #values the element can take\n if not isinstance(window[element_key], sg.Text):\n window[element_key].update(disabled = False)\n window[element_key].metadata = True\n window[element_key+\"_label\"].update(text_color = \"#FFFFFF\")#every non-text field has a label\n window[element_key].update(visible = True)", "def cb_receive_tag_set_values(cls, session, node_id, tg_id, tag_id, value):\n tag = super(AvatarWidth, cls).cb_receive_tag_set_values(session, node_id, tg_id, tag_id, value)\n update_3dview(tag.tg.node)\n return tag", "def tags_updated(sender, instance, action, **kwargs):\n task_type = instance\n pk_set = kwargs.pop('pk_set')\n if action == 'post_add':\n for task in task_type.tasks.all():\n task.tags.add(*pk_set)\n elif action == 'post_remove':\n for task in task_type.tasks.all():\n task.tags.remove(*pk_set)", "def on_category(self):\n super(ToolSettings, self).on_category()\n selItems = self.tw_category.selectedItems() or []\n #--- Build Tree ---#\n if selItems:\n if hasattr(selItems[0], 'itemWidget'):\n if selItems[0].itemWidget is not None:\n if not selItems[0].itemWidget.__edited__:\n selItems[0].itemWidget._initWidget()\n selItems[0].itemWidget.buildTree()", "async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter):\n await tag.edit_options(ctx)", "def tag(self, prop, tagger_dict):\n for tag_label, tagger in tagger_dict.items():\n self._category_tagger(prop, tag_label, tagger)\n if tag_label not in self.tags:\n self.tags.append(tag_label)" ]
[ "0.6000653", "0.59050953", "0.5842685", "0.55560786", "0.5249337", "0.5249142", "0.52388763", "0.5198426", "0.5176975", "0.50794584", "0.50590396", "0.50292253", "0.49184924", "0.49001974", "0.49001974", "0.48938623", "0.48720917", "0.48586974", "0.4852081", "0.48486608", "0.4847992", "0.4839905", "0.48319983", "0.48130354", "0.48110214", "0.47854266", "0.47815722", "0.47652784", "0.47583956", "0.47552252" ]
0.7676964
0
Updates the options in the change over time tag dropdown on tag changes.
def update_change_over_time_tag_dropdown_options(tag_update_signal): tag_list = state.get_tag_list() tag_options = converters.timestamped_tag_dropdown_options_from_tags(tag_list) custom_time_range_option = { "label": constants.CUSTOM_TIME_RANGE_DROPDOWN_VALUE, "value": constants.CUSTOM_TIME_RANGE_DROPDOWN_VALUE, } return tag_options + [custom_time_range_option]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_batch_applied_tag_dropdown_options(tag_update_signal):\n tag_list = state.get_tag_list()\n return converters.tag_dropdown_options_from_tags(tag_list)", "def tags_changed(self, tags):\n pass", "def edit_tags(self):\n os.system(\"clear\")\n while True:\n tag_categories = [\"meal\", \"genre\", \"complexity\", \"course\", \"no change\"]\n _, key = _num_select(\"Which tag would you like to edit\", tag_categories)\n if key == \"meal\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"breakfast\", \"lunch\", \"dinner\"])\n self.tags[key]=value\n elif key == \"genre\":\n genres = [\"american\", \"italian\", \"mexican\", \"asian\", \"indian\", \"misc\"]\n _, value = _num_select(\"Which tag would you like to apply\",\n genres)\n elif key == \"complexity\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"simple\", \"intermediate\", \"complicated\"])\n elif key == \"course\":\n _, value = _num_select(\"Which tag would you like to apply\",\n [\"appetizer\", \"salad\", \"side\", \"main\", \"dessert\"])\n else:\n return", "def _options_changed(self, name, old, new):\n if self.options_lock.acquire(False):\n try:\n self.options = new\n\n options = self._make_options(new)\n self._options_dict = {i[0]: i[1] for i in options}\n self._options_labels = [i[0] for i in options]\n self._options_values = [i[1] for i in options]\n self._value_in_options()\n finally:\n self.options_lock.release()", "def optionsChanged(self, options: ghidra.framework.options.Options, optionName: unicode, oldValue: object, newValue: object) -> None:\n ...", "def _selected_labels_changed(self, name, old, new):\n if self.value_lock.acquire(False):\n try:\n self.value = [self._options_dict[name] for name in new]\n finally:\n self.value_lock.release()", "def update_dropdowns_years_options(df_in, aux):\n\n df = u.uos.b64_to_df(df_in)\n return lay.get_options(df[c.cols.YEAR].unique().tolist())", "def setTags(self,newtags):\n\t\tself.tags = newtags;", "def update_tag(tag):\n remove_tag(tag)\n add_tag(tag)", "def update_from_tags():\n tags.update_diagrams()\n tags.update_tiles()", "def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)", "def on_update(self, evt):\n print(evt)\n for name in self.widgetNames:\n try:\n widget = wx.FindWindowByName(name)\n if isinstance(widget, wx.ComboBox):\n selection = widget.GetValue()\n choices = widget.GetItems()\n choices.insert(0, selection)\n value = choices\n else:\n value = widget.GetValue()\n\n data = self.tree.GetPyData(self.current_selection)\n data['Config'][name] = value\n self.tree.SetPyData(self.current_selection, data)\n except Exception as E:\n logging.error(\"{0!s}: {1!s}\".format(E, name))\n raise E", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def on_tagCombo_editTextChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def callback_selectstate(self, attrname, old, new):\n self._update_chart(self.selectstate.value)", "def add_tags(event):\n\n add_tags_from_presets()", "def update_volume_tag(self, info, key, value):\n keys = []\n for tag in info[0]['tags']:\n if key == list(tag.keys())[0]:\n if len(value) == 0:\n info[0]['tags'].remove(tag)\n keys.append(list(tag.keys())[0])\n else:\n tag.update({key: value})\n keys.append(list(tag.keys())[0])\n if key not in keys:\n tag = {key: value}\n info[0]['tags'].append(tag)\n info[0]['time'] = datetime.datetime.now()\n return info", "def set_tags(self, tags):\n uniques = set()\n distinct = []\n for tag in tags:\n if tag not in uniques:\n distinct.append(tag)\n uniques.add(tag)\n self.__post_changes(distinct)", "def change_ranges(self, data):\n for tag, loc in data.items():\n self.tag_remove(tag, \"1.0\", END)\n for start, stop in loc:\n self.tag_add(tag, start, stop)\n return", "def update_dropdown_options(surface_picks):\n surface_picks = pd.read_json(surface_picks)\n tops_dropdown_options = [{'label': k, 'value': k} for k in list(surface_picks['PICK'].unique())]\n return tops_dropdown_options", "def set_tag(self, tag):\n self.update(tag=tag)", "def __communicate_changes_to_stdout(self):\n keys_to_update = list_of_keys_of(self.taglist.updates)\n additions = [\n addition for addition in self.taglist.additions\n if addition['Key'] not in keys_to_update\n ]\n deletions = self.taglist.deletions\n updates = self.taglist.updates\n\n if additions:\n io.echo('Added Tags:')\n io.echo(\n linesep.join(\n [\n \" Key: '{0}' Value: '{1}'\".format(\n addition['Key'],\n addition['Value']\n ) for addition in additions\n ]\n )\n )\n io.echo('')\n\n if deletions:\n io.echo('Deleted Tags:')\n io.echo(linesep.join([\" Key: '{0}'\".format(deletion) for deletion in deletions]))\n io.echo('')\n\n if updates:\n io.echo('Updated Tags:')\n io.echo(\n linesep.join(\n [\n \" Key: '{0}' Value: '{1}'\".format(\n update['Key'],\n update['Value']\n ) for update in updates\n ]\n )\n )\n io.echo('')", "def _value_changed(self, name, old, new):\n if self.value_lock.acquire(False):\n try:\n self.selected_labels = [\n self._options_labels[self._options_values.index(v)]\n for v in new\n ]\n except:\n self.value = old\n raise KeyError(new)\n finally:\n self.value_lock.release()", "def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})", "def update_tags(self, obj, tags):\n content_type = ContentType.objects.get_for_model(obj)\n current_tags = list(self.filter(items__content_type__pk=content_type.pk,\n items__object_id=obj.pk))\n updated_tags = self.model.get_tag_list(tags)\n \n # Remove tags which no longer apply\n tags_for_removal = [tag for tag in current_tags \\\n if tag not in updated_tags]\n if len(tags_for_removal):\n self.intermediary_table_model._default_manager.filter(content_type__pk=content_type.pk,\n object_id=obj.pk,\n tag__in=tags_for_removal).delete()\n # Add new tags\n for tag in updated_tags:\n if tag not in current_tags:\n self.intermediary_table_model._default_manager.create(tag=tag, content_object=obj)", "async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter):\n await tag.edit_options(ctx)", "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n self._tags = tags", "def _selected_label_changed(self, name, old, new):\n if self.value_lock.acquire(False):\n try:\n self.value = self._options_dict[new]\n finally:\n self.value_lock.release()" ]
[ "0.7586686", "0.6757567", "0.6380285", "0.595404", "0.58845717", "0.57431585", "0.5715949", "0.57073283", "0.5632384", "0.56054455", "0.55602384", "0.554889", "0.5507737", "0.5482022", "0.5447586", "0.5431845", "0.5413043", "0.5364032", "0.53272116", "0.5323045", "0.52857333", "0.5236446", "0.52265525", "0.5221534", "0.5214484", "0.52094156", "0.52013206", "0.52013206", "0.52013206", "0.5125871" ]
0.7404219
1
Initializes an instance the ClosurizedNamespacesInfo class.
def __init__(self, closurized_namespaces, ignored_extra_namespaces): self._closurized_namespaces = closurized_namespaces self._ignored_extra_namespaces = (ignored_extra_namespaces + DEFAULT_EXTRA_NAMESPACES) self.Reset()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args):\n this = _libsbml.new_SBMLNamespaces(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _libsbml.new_XMLNamespaces(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, default_ns, namespaces=[]):\n self.document = prov.ProvDocument ()\n self.default_ns = default_ns\n self.document.set_default_namespace (self.default_ns)\n self.namespaces = namespaces\n self.subspaces = {}\n for namespace in self.namespaces:\n self.subspaces[namespace] = self.add_namespace (self.default_ns, namespace)", "def namespaces(self):\n return ()", "def __init__(self, namespace_id='', namespace_url=''):\n self.namespace_id = namespace_id\n self.namespace_url = namespace_url", "def _load_namespaces(self):\n nsdocs = self._docset.get_namespaces()\n for nsdoc in nsdocs:\n nsobj = Namespace(nsdoc)\n self._docmap[nsdoc] = nsobj\n self._namespaces.add(nsobj)", "def __init__(self):\n this = _libsbml.new_ListWrapperSBMLNamespaces()\n try: self.this.append(this)\n except: self.this = this", "def __init__(\n self, name: str, headers: List[str], uses: List[str],\n namespaces: List[Namespace]) -> None:\n self.name = name\n self.headers = headers\n self.uses = uses\n self.namespaces = namespaces\n\n # set ns_prefix throughout the description\n ns_for_name: Dict[str, Tuple[str, str]] = dict()\n for namespace in self.namespaces:\n for member in namespace.members:\n ns_for_name[member.name] = (namespace.c_prefix, namespace.f_prefix)\n\n for namespace in namespaces:\n namespace.set_ns_prefix(ns_for_name)", "def __init__(__self__, *,\n namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if namespaces is not None:\n pulumi.set(__self__, \"namespaces\", namespaces)", "def __init__(__self__, *,\n namespaced_names: Optional[pulumi.Input[Sequence[pulumi.Input['NamespacedNameArgs']]]] = None):\n if namespaced_names is not None:\n pulumi.set(__self__, \"namespaced_names\", namespaced_names)", "def __init__(self, default_namespace=None, names=None):\n if names is None:\n names = {}\n self._names = names\n self._default_namespace = default_namespace", "def init_ns(self, ns):\n\n def slist(body):\n \"\"\" Return body as SList (string list) \"\"\"\n return SList(body.split(\"\\n\"))\n\n ns['slist'] = slist\n\n # xxx todo perhaps add more?", "def initialize_namespace(name, objects=None, root_class=None, suffix=None):\n\n if root_class:\n base = collect_subclasses(root_class, suffix)\n else:\n base = {}\n\n if objects:\n base.update(objects)\n _namespaces[name] = base\n return base", "def _init_namespace(self):\n if GrondwaterMonsterSearch.__wfs_namespace is None:\n GrondwaterMonsterSearch.__wfs_namespace = self._get_namespace()", "def __init__(self, namespace_prefix):\n self._namespace_prefix = namespace_prefix", "def update_namespaces_info(self):\n namespaces = BlockDev.nvdimm_list_namespaces(idle=True)\n\n self._namespaces = dict((namespace.dev, namespace) for namespace in namespaces)", "def _some_namespaces(self):\n n = Namespace(doc='top')\n n.add_option('aaa', '2011-05-04T15:10:00', 'the a',\n short_form='a',\n from_string_converter=dtu.datetime_from_ISO_string\n )\n n.c = Namespace(doc='c space')\n n.c.add_option('fred', 'stupid', 'husband from Flintstones')\n n.c.add_option('wilma', 'waspish', 'wife from Flintstones')\n n.c.e = Namespace(doc='e space')\n n.c.e.add_option('dwight',\n default=97,\n doc='my uncle')\n n.c.add_option('dwight',\n default=98,\n doc='your uncle')\n n.d = Namespace(doc='d space')\n n.d.add_option('fred', 'crabby', 'male neighbor from I Love Lucy')\n n.d.add_option('ethel', 'silly',\n 'female neighbor from I Love Lucy')\n n.x = Namespace(doc='x space')\n n.x.add_option('size', 100, 'how big in tons', short_form='s')\n n.x.add_option('password', 'secret', 'the password')\n return n", "def setNamespaces(self, *args):\n return _libsbml.SBase_setNamespaces(self, *args)", "def get_namespaces(self):\n if self.namespaces is None:\n namespaces = unpack(self.api.get_namespaces())\n self.namespaces = {\n namespace['name']: DevopsSecurityNamespace(namespace)\n for namespace in namespaces\n }\n return self.namespaces", "def create_or_fetch_namespace(self):\n\n def _create_new_namespace():\n logger.info(\n f\"Creating a new namespace: {self.namespace_name} in {self.namespace_region}\"\n )\n\n data = {\n \"name\": self.namespace_name,\n \"resource_group_id\": self.resource_group_id,\n \"resource_plan_id\": \"functions-base-plan\",\n }\n\n res = requests.post(\n self.cf_namespaces_url, headers=self.get_headers(), json=data\n ).json()\n if res.status_code != 200:\n logger.error(res.text)\n namespace_id = res[\"id\"]\n logger.info(f\"Created new namespace with id: {namespace_id}\")\n return namespace_id\n\n def _get_cloud_function_namespaces_metadata(offset=0):\n \"\"\"returns meta data on namespaces of ibm cloud functions within a specified region\n :param offset - offset from the beginning of the list of results attained from the GET request,\n which may contain up to 200 namespaces per http response\"\"\"\n\n res = requests.get(\n f\"{self.cf_namespaces_url}?limit=200&offset={offset}\",\n headers=self.get_headers(),\n )\n return json.loads(res.text)\n\n def _get_cloud_function_namespaces():\n \"\"\"returns relevant metadata on existing namespaces within a given region.\"\"\"\n logger.info(\n f\"Obtaining Cloud Function namespaces in {self.namespace_region}\"\n )\n\n namespaces = []\n\n collecting_namespaces = True\n max_limit = 200\n offset = 0\n\n # request for namespaces is limited to 200 at a time, thus the request is fulfilled in increments of 200s.\n while collecting_namespaces:\n namespace_metadata = _get_cloud_function_namespaces_metadata(offset)\n if namespace_metadata[\"total_count\"] == max_limit:\n offset += max_limit\n else:\n collecting_namespaces = False\n\n for name_space in namespace_metadata[\"namespaces\"]:\n if \"name\" in name_space: # API based namespace\n namespaces.append(\n {\n \"name\": name_space[\"name\"],\n \"type\": \"API_based\",\n \"id\": name_space[\"id\"],\n \"region\": name_space[\"location\"],\n }\n )\n\n else: # cloud foundry based namespace\n namespaces.append(\n {\n \"name\": name_space[\"id\"],\n \"type\": \"CF_based\",\n \"region\": name_space[\"location\"],\n }\n )\n\n return namespaces\n\n namespaces_in_region = _get_cloud_function_namespaces()\n target_namespace_id = None\n if namespaces_in_region:\n target_namespace_id = next(\n (\n namespace[\"id\"]\n for namespace in namespaces_in_region\n if namespace[\"name\"] == self.namespace_name\n ),\n None,\n )\n if not target_namespace_id:\n target_namespace_id = _create_new_namespace()\n else:\n logger.info(f\"Reusing namespace: {target_namespace_id}\")\n return target_namespace_id", "def namespaces(self, psuedo=True):\n if self._namespaces == None:\n result = self.call({'action': 'query',\n 'meta': 'siteinfo',\n 'siprop': 'namespaces'})\n self._namespaces = {}\n self._psuedo_namespaces = {}\n for nsid in result['query']['namespaces']:\n if int(nsid) >= 0:\n self._namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n else:\n self._psuedo_namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n if psuedo:\n retval = {}\n retval.update(self._namespaces)\n retval.update(self._psuedo_namespaces)\n return retval\n else:\n return self._namespaces", "def _init_temporary_namespace(cls, nsObj):\n pass", "def _getnamespaces(cls):\n return \" \".join(Kmlable._namespaces)", "def getNamespaces(self):\n return _libsbml.SBase_getNamespaces(self)", "def __init__(self, openid_namespace=None):\n self.args = {}\n self.namespaces = NamespaceMap()\n if openid_namespace is None:\n self._openid_ns_uri = None\n else:\n implicit = openid_namespace in OPENID1_NAMESPACES\n self.setOpenIDNamespace(openid_namespace, implicit)", "def namespaces(self):\n if not self._namespaces:\n self.update_namespaces_info()\n\n return self._namespaces", "def testGetClosurizedNamespace(self):\n namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(\n closurized_namespaces=['package'], ignored_extra_namespaces=[])\n for identifier, expected_namespace in self.__test_cases.items():\n actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)\n self.assertEqual(\n expected_namespace,\n actual_namespace,\n 'expected namespace \"' + str(expected_namespace) +\n '\" for identifier \"' + str(identifier) + '\" but was \"' +\n str(actual_namespace) + '\"')", "def prefixes(self):\n # a new OntCuries-like object that wraps NamespaceManager\n # and can leverage its trie\n self.namespace_manager\n raise NotImplementedError('yet')", "def __init__(self, namespace_topology):\n self.s_namespaces = namespace_topology\n self.validated_indexes = self._get_index_validity()", "def test_get_namespaces_names(self):\n pass" ]
[ "0.64506286", "0.6413823", "0.62931633", "0.61669827", "0.60347897", "0.5980294", "0.58920556", "0.58700234", "0.5846751", "0.5769954", "0.57318336", "0.5715054", "0.5680259", "0.566", "0.56599915", "0.5620923", "0.5602918", "0.554831", "0.55419225", "0.5532889", "0.5530244", "0.5518675", "0.5498795", "0.5485668", "0.54694927", "0.5454805", "0.54466915", "0.5445592", "0.5377831", "0.53666925" ]
0.66668606
0
Returns the namespaces which are already required by this file.
def GetRequiredNamespaces(self): return set(self._required_namespaces)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_namespaces():\n return list(StaticAsset._load_namespaces().keys())", "def GetProvidedNamespaces(self):\n return set(self._provided_namespaces)", "def namespaces(self):\n namespaces = set()\n for namespace_package in self.namespace_packages:\n dotted_name = []\n for component in namespace_package.split('.'):\n dotted_name.append(component)\n namespaces.add(tuple(dotted_name))\n return sorted(namespaces, key=lambda n: len(n))", "def namespaces(self):\n return [self._namespace_prefix]", "def namespaces(self):\n return ()", "def namespaces(self):\n if not self._namespaces:\n self.update_namespaces_info()\n\n return self._namespaces", "def getNamespaces(self):\n return _libsbml.SBase_getNamespaces(self)", "def namespaces(self):\n return list(self._namespace_schemas.keys())", "def getNamespaces(self):\n return _libsbml.SBMLDocument_getNamespaces(self)", "def included_namespaces(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"included_namespaces\")", "def namespace_packages(self):\n dotted_names = []\n namespace_packages_file = self.find_egg_info_file('namespace_packages.txt')\n if namespace_packages_file:\n with open(namespace_packages_file) as handle:\n for line in handle:\n line = line.strip()\n if line:\n dotted_names.append(line)\n return dotted_names", "def _getnamespaces(cls):\n return \" \".join(Kmlable._namespaces)", "def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"namespaces\")", "def importedNamespaces (self):\n return frozenset(self.__importedNamespaces)", "def get_namespaces(self):\n if self.namespaces is None:\n namespaces = unpack(self.api.get_namespaces())\n self.namespaces = {\n namespace['name']: DevopsSecurityNamespace(namespace)\n for namespace in namespaces\n }\n return self.namespaces", "def get_pyxb_namespaces():\n return pyxb.namespace.utility.AvailableNamespaces()", "def getNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_getNamespaces(self, *args)", "def getNamespaces(self):\n return _libsbml.XMLToken_getNamespaces(self)", "def referencedNamespaces (self):\n return frozenset(self.__referencedNamespaces)", "def get_namespaces():\r\n\r\n print 'Getting namespaces'\r\n tree = etree.parse('http://lesswrong.wikia.com/wiki/Special:AllPages', parser)\r\n options = tree.xpath('//select[@id=\"namespace\"]/option')\r\n namespaces = [option.get('value') for option in options]\r\n pprint(namespaces)\r\n return namespaces", "def getSupportedNamespaces():\n return _libsbml.SBMLNamespaces_getSupportedNamespaces()", "def namespaces(self) -> NamespacesType:\n return self.schema.namespaces", "def inScopeNamespaces (self):\n return self.__inScopeNamespaces", "def get_all_namespaces():\n cmds.namespace(setNamespace=':')\n return cmds.namespaceInfo(listOnlyNamespaces=True, recurse=True)", "def GetNamespaces(self):\n return list(self.type_namespaces_map.values())", "def _get_required_schemas(self, root):\n def _get_schemalocs(node):\n schemalocs = {}\n\n for ns in itervalues(node.nsmap):\n if ns not in self._schemalocs:\n continue\n\n schemalocs[ns] = self._schemalocs[ns]\n return schemalocs\n\n imports = {}\n for elem in root.iter():\n schemalocs = _get_schemalocs(elem)\n imports.update(schemalocs)\n\n return imports", "def GetMissingRequires(self):\n external_dependencies = set(self._required_namespaces)\n\n # Assume goog namespace is always available.\n external_dependencies.add('goog')\n # goog.module is treated as a builtin, too (for goog.module.get).\n external_dependencies.add('goog.module')\n\n created_identifiers = set()\n for unused_namespace, identifier, unused_line_number in (\n self._created_namespaces):\n created_identifiers.add(identifier)\n\n missing_requires = dict()\n illegal_alias_statements = dict()\n\n def ShouldRequireNamespace(namespace, identifier):\n \"\"\"Checks if a namespace would normally be required.\"\"\"\n return (\n not self._IsPrivateIdentifier(identifier) and\n namespace not in external_dependencies and\n namespace not in self._provided_namespaces and\n identifier not in external_dependencies and\n identifier not in created_identifiers and\n namespace not in missing_requires)\n\n # First check all the used identifiers where we know that their namespace\n # needs to be provided (unless they are optional).\n for ns in self._used_namespaces:\n namespace = ns.namespace\n identifier = ns.identifier\n if (not ns.alias_definition and\n ShouldRequireNamespace(namespace, identifier)):\n missing_requires[namespace] = ns.GetLine()\n\n # Now that all required namespaces are known, we can check if the alias\n # definitions (that are likely being used for typeannotations that don't\n # need explicit goog.require statements) are already covered. If not\n # the user shouldn't use the alias.\n for ns in self._used_namespaces:\n if (not ns.alias_definition or\n not ShouldRequireNamespace(ns.namespace, ns.identifier)):\n continue\n if self._FindNamespace(ns.identifier, self._provided_namespaces,\n created_identifiers, external_dependencies,\n missing_requires):\n continue\n namespace = ns.identifier.rsplit('.', 1)[0]\n illegal_alias_statements[namespace] = ns.token\n\n return missing_requires, illegal_alias_statements", "def included_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"included_namespaces\")", "def get_packages_with_prefixes():\n return get_resources('packages')", "def namespaces(self):\n return self.namespaced_fields().namespaces()" ]
[ "0.7514273", "0.7426737", "0.7389762", "0.7381562", "0.73246866", "0.7281683", "0.72748566", "0.71076447", "0.70576507", "0.70466834", "0.700075", "0.6840143", "0.6832781", "0.6823503", "0.6747387", "0.6740858", "0.6717921", "0.6711543", "0.66982067", "0.66495764", "0.6623423", "0.65996754", "0.656574", "0.6547791", "0.6530248", "0.65251565", "0.65116507", "0.65088993", "0.6494279", "0.6468498" ]
0.8207242
0
Returns whether the given goog.provide token is unnecessary.
def IsExtraProvide(self, token): namespace = tokenutil.GetStringAfterToken(token) if self.GetClosurizedNamespace(namespace) is None: return False if token in self._duplicate_provide_tokens: return True # TODO(user): There's probably a faster way to compute this. for created_namespace, created_identifier, _ in self._created_namespaces: if namespace == created_namespace or namespace == created_identifier: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsFirstProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[0]", "def IsLastProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[-1]", "def isImportantToken(self, token, ignoreSemanticTagList=[]):\n if len(ignoreSemanticTagList) > 0: \n tags = token.getSemanticTagMatches(ignoreSemanticTagList)\n else:\n tags = []\n return token.isSymbol() == False \\\n and token.text not in self.ignoreWords and len(tags) == 0", "def _suppress(self, key):\n return key in self.SUPPRESS", "def has_unk(self) -> bool:\n return hasattr(self, 'unk_token')", "def _is_param_allowed(self, param, value):\n if param.startswith(\"utm_\"):\n return False\n if param == \"spref\" and value == \"tw\":\n return False\n return True", "def IsExtraRequire(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if namespace in self._ignored_extra_namespaces:\n return False\n\n if token in self._duplicate_require_tokens:\n return True\n\n if namespace in self._suppressed_requires:\n return False\n\n # If the namespace contains a component that is initial caps, then that\n # must be the last component of the namespace.\n parts = namespace.split('.')\n if len(parts) > 1 and parts[-2][0].isupper():\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for ns in self._used_namespaces:\n if (not ns.alias_definition and (\n namespace == ns.namespace or namespace == ns.identifier)):\n return False\n\n return True", "def requires_token(self) -> bool:\n # both attribute and placeholder in url are required to make it work\n for key, val in self.items():\n if isinstance(val, str) and \"<insert your\" in val and key in self.url:\n return True\n return False", "def is_blacklisted(token):\n if Revoked.query.filter_by(token=token).first():\n return True\n return False", "def _check_required_if_provider(self):\n return", "def not_required(self, gp: GriddedPerm) -> bool:\n return all(\n any(gp not in req for req in req_list)\n for req_list in self._tiling.requirements\n )", "def is_exempt(self) -> bool:\n\n if self.exempt_when:\n return self.exempt_when()\n return False", "def is_uncased(request) -> bool:\n return request.param", "def IsLastRequire(self, token):\n return self._require_tokens and token == self._require_tokens[-1]", "def allow_unresolved_secret_tokens(self):\n return self._allow_unresolved_secret_tokens", "def is_missing_token_service(request):\n if request.json == {}:\n return True\n schema = schema_utils.get_auth_schema()\n validator = Validator(schema, require_all=True)\n result = validator.validate(request.json)\n if validator.errors:\n logging.error(str(validator.errors))\n return not result", "def provider(provider):\n if provider in (\"alditalk\", \"netzclub\", \"congstar\"):\n return True\n else:\n return False", "def is_token_required(self):\n return any([self.app_id, self._login, self._password])", "def IsFirstRequire(self, token):\n return self._require_tokens and token == self._require_tokens[0]", "def is_public_token(token):\n return (\n token.payload\n and token.payload.get(\"roles\") == [NONE]\n and token.payload.get(\"context_id\") is None\n and token.payload.get(\"consumer_site\") is None\n )", "def _is_reserved_name(content_name: str) -> bool:\n return content_name in RESERVED_NAMES", "def _should_ignore(self, name):\n _name = name.lower()\n return (_name.startswith(\"deprecated\") or\n _name.startswith(\"_\") or\n _name in (\"remote\", \"reserved\",\n \"dialogs_py\", \"dialogs_ipy\", \"dialogs_jy\"))", "def always_include_in_token(self) -> bool:\n return pulumi.get(self, \"always_include_in_token\")", "def is_emptiable(self) -> bool:\n raise NotImplementedError()", "def available(self):\n return self.access_token is not None", "def discard_name(self) -> bool:\n\n if not self.is_name_length_valid():\n return True\n\n if self.app_name_no_punc().lower() in self.discard:\n return True\n\n if self.is_substring_unwanted():\n return True\n\n if self.unwanted_regex_match():\n return True\n\n return self.is_name_mostly_numeric()", "def ignore_builtin_verification():\n return not current_space().skip_builtin_verification", "def valid_spotify_token(token: str) -> bool:\n test_url = \"https://api.spotify.com/v1/tracks/11dFghVXANMlKmJXsNCbNl\"\n headers = {\"Authorization\": \"Bearer {}\".format(token)}\n response = requests.get(test_url, headers=headers)\n return response.status_code == 200", "def is_not_used(self):\n pass", "def is_unk(self, token: str) -> bool:\n return self.stoi[token] == DEFAULT_UNK_ID" ]
[ "0.68526685", "0.6557919", "0.5716231", "0.54505974", "0.54359466", "0.54258895", "0.5409967", "0.5333457", "0.52942836", "0.5160305", "0.51457137", "0.51277775", "0.5105873", "0.5047557", "0.50473565", "0.50441617", "0.50110626", "0.49972895", "0.49934402", "0.4982881", "0.497799", "0.49607432", "0.4949489", "0.49422997", "0.4941259", "0.49356815", "0.49202728", "0.4903269", "0.49018687", "0.4897257" ]
0.7508166
0
Returns whether the given goog.require token is unnecessary.
def IsExtraRequire(self, token): namespace = tokenutil.GetStringAfterToken(token) if self.GetClosurizedNamespace(namespace) is None: return False if namespace in self._ignored_extra_namespaces: return False if token in self._duplicate_require_tokens: return True if namespace in self._suppressed_requires: return False # If the namespace contains a component that is initial caps, then that # must be the last component of the namespace. parts = namespace.split('.') if len(parts) > 1 and parts[-2][0].isupper(): return True # TODO(user): There's probably a faster way to compute this. for ns in self._used_namespaces: if (not ns.alias_definition and ( namespace == ns.namespace or namespace == ns.identifier)): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsFirstRequire(self, token):\n return self._require_tokens and token == self._require_tokens[0]", "def IsLastRequire(self, token):\n return self._require_tokens and token == self._require_tokens[-1]", "def isImportantToken(self, token, ignoreSemanticTagList=[]):\n if len(ignoreSemanticTagList) > 0: \n tags = token.getSemanticTagMatches(ignoreSemanticTagList)\n else:\n tags = []\n return token.isSymbol() == False \\\n and token.text not in self.ignoreWords and len(tags) == 0", "def IsExtraProvide(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if token in self._duplicate_provide_tokens:\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for created_namespace, created_identifier, _ in self._created_namespaces:\n if namespace == created_namespace or namespace == created_identifier:\n return False\n\n return True", "def token_filter(tok):\n return tok is token or \\\n tok.dep_.endswith(\"mod\") or \\\n tok.dep_ == \"compound\"", "def not_required(self, gp: GriddedPerm) -> bool:\n return all(\n any(gp not in req for req in req_list)\n for req_list in self._tiling.requirements\n )", "def is_token_required(self):\n return any([self.app_id, self._login, self._password])", "def is_missing_token_service(request):\n if request.json == {}:\n return True\n schema = schema_utils.get_auth_schema()\n validator = Validator(schema, require_all=True)\n result = validator.validate(request.json)\n if validator.errors:\n logging.error(str(validator.errors))\n return not result", "def requires_token(self) -> bool:\n # both attribute and placeholder in url are required to make it work\n for key, val in self.items():\n if isinstance(val, str) and \"<insert your\" in val and key in self.url:\n return True\n return False", "def requires_scope(required_scope):\n token = get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n if unverified_claims.get(\"scope\"):\n token_scopes = unverified_claims[\"scope\"].split()\n for token_scope in token_scopes:\n if token_scope == required_scope:\n return True\n return False", "def requires_scope(required_scope):\n token = get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n if unverified_claims.get(\"scope\"):\n token_scopes = unverified_claims[\"scope\"].split()\n for token_scope in token_scopes:\n if token_scope == required_scope:\n return True\n return False", "def validate_scope(scope_required, scope_token):\n if scope_required:\n service, function, actions = scope_required.split(':')\n\n if (service != scope_token['type'] and scope_token['type'] != '*') or \\\n (function != scope_token['name'] and scope_token['name'] != '*') or \\\n (actions not in scope_token['actions'] and '*' not in scope_token['actions']):\n raise Unauthorized('Scope not allowed!')", "def IsFirstProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[0]", "def shall_skip(module):\n # skip it, if there is nothing (or just \\n or \\r\\n) in the file\n return path.getsize(module) < 3", "def has_unk(self) -> bool:\n return hasattr(self, 'unk_token')", "def _is_opinion_mod(token: tokens.Token) -> bool:\n is_mod = token.dep_ in {\"amod\", \"advmod\"}\n is_op = token.text.lower() in _OPINION_WORDS\n return is_mod and is_op", "def always_include_in_token(self) -> bool:\n return pulumi.get(self, \"always_include_in_token\")", "def verify_token(self, token):\n return False", "def _is_user_included_op(self, op):\n for opname_re in self._parameters.included_opname_re_list:\n if opname_re.match(op.name):\n return True\n\n for optype_re in self._parameters.included_optype_re_list:\n if optype_re.match(op.type):\n return True\n return False", "def always_include_in_token(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"always_include_in_token\")", "def always_include_in_token(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"always_include_in_token\")", "def google_validate(self, token):\n try:\n payload = client.verify_id_token(token, GOOGLE_USER_ID)\n if payload['iss'] not in ['accounts.google.com', 'https://accounts.google.com'] or payload['aud'] != GOOGLE_USER_ID:\n return False\n else:\n return payload\n except crypt.AppIdentityError:\n return False", "def test_unused_token_is_valid(self):\n assert self.token.is_valid()", "def _entry_has_required_features(entry: _LexiconEntry) -> None:\n features = _features_of(entry)\n tag = _tag_of(entry)\n required = tags.REQUIRED_FEATURES[tag]\n\n if features == \"~\" and required:\n raise InvalidLexiconEntryError(\"Entry is missing required features.\")", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def _skip(app, what, name, obj, skip, options):\n if name.startswith(\"_\") and name not in \\\n [\"__qualname__\",\n \"__module__\",\n \"__dict__\",\n \"__doc__\",\n \"__weakref__\",\n ]:\n return False\n return skip", "def IsLastProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[-1]", "def verify_local_token(self, token):\n return token == self.master_local_token.get_token()", "def verify_token(token):\n try:\n idinfo = client.verify_id_token(token, app.config['GOOGLE_CLIENT_ID'])\n if idinfo['iss'] not in [\n 'accounts.google.com',\n 'https://accounts.google.com'\n ]:\n raise crypt.AppIdentityError(\"Wrong issuer.\")\n except crypt.AppIdentityError:\n return False\n return True", "def _is_real_word(self, token):\n return not (token in self._non_words)" ]
[ "0.7097087", "0.6847048", "0.6291652", "0.6257764", "0.6020306", "0.6010145", "0.59261143", "0.57647413", "0.5722998", "0.5658726", "0.5658726", "0.5645742", "0.5449439", "0.5385633", "0.53722316", "0.5368333", "0.5328896", "0.5321386", "0.5280161", "0.5277081", "0.5277081", "0.5222387", "0.5217622", "0.5152063", "0.51438373", "0.5139744", "0.51387846", "0.51321214", "0.51218575", "0.5116538" ]
0.7031952
1
Returns the dict of missing required namespaces for the current file. For each nonprivate identifier used in the file, find either a goog.require, goog.provide or a created identifier that satisfies it. goog.require statements can satisfy the identifier by requiring either the namespace of the identifier or the identifier itself. goog.provide statements can satisfy the identifier by providing the namespace of the identifier. A created identifier can only satisfy the used identifier if it matches it exactly (necessary since things can be defined on a namespace in more than one file). Note that provided namespaces should be a subset of created namespaces, but we check both because in some cases we can't always detect the creation of the namespace.
def GetMissingRequires(self): external_dependencies = set(self._required_namespaces) # Assume goog namespace is always available. external_dependencies.add('goog') # goog.module is treated as a builtin, too (for goog.module.get). external_dependencies.add('goog.module') created_identifiers = set() for unused_namespace, identifier, unused_line_number in ( self._created_namespaces): created_identifiers.add(identifier) missing_requires = dict() illegal_alias_statements = dict() def ShouldRequireNamespace(namespace, identifier): """Checks if a namespace would normally be required.""" return ( not self._IsPrivateIdentifier(identifier) and namespace not in external_dependencies and namespace not in self._provided_namespaces and identifier not in external_dependencies and identifier not in created_identifiers and namespace not in missing_requires) # First check all the used identifiers where we know that their namespace # needs to be provided (unless they are optional). for ns in self._used_namespaces: namespace = ns.namespace identifier = ns.identifier if (not ns.alias_definition and ShouldRequireNamespace(namespace, identifier)): missing_requires[namespace] = ns.GetLine() # Now that all required namespaces are known, we can check if the alias # definitions (that are likely being used for typeannotations that don't # need explicit goog.require statements) are already covered. If not # the user shouldn't use the alias. for ns in self._used_namespaces: if (not ns.alias_definition or not ShouldRequireNamespace(ns.namespace, ns.identifier)): continue if self._FindNamespace(ns.identifier, self._provided_namespaces, created_identifiers, external_dependencies, missing_requires): continue namespace = ns.identifier.rsplit('.', 1)[0] illegal_alias_statements[namespace] = ns.token return missing_requires, illegal_alias_statements
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetMissingProvides(self):\n missing_provides = dict()\n for namespace, identifier, line_number in self._created_namespaces:\n if (not self._IsPrivateIdentifier(identifier) and\n namespace not in self._provided_namespaces and\n identifier not in self._provided_namespaces and\n namespace not in self._required_namespaces and\n namespace not in missing_provides):\n missing_provides[namespace] = line_number\n\n return missing_provides", "def GetRequiredNamespaces(self):\n return set(self._required_namespaces)", "def ShouldRequireNamespace(namespace, identifier):\n return (\n not self._IsPrivateIdentifier(identifier) and\n namespace not in external_dependencies and\n namespace not in self._provided_namespaces and\n identifier not in external_dependencies and\n identifier not in created_identifiers and\n namespace not in missing_requires)", "def Reset(self):\n\n # A list of goog.provide tokens in the order they appeared in the file.\n self._provide_tokens = []\n\n # A list of goog.require tokens in the order they appeared in the file.\n self._require_tokens = []\n\n # Namespaces that are already goog.provided.\n self._provided_namespaces = []\n\n # Namespaces that are already goog.required.\n self._required_namespaces = []\n\n # Note that created_namespaces and used_namespaces contain both namespaces\n # and identifiers because there are many existing cases where a method or\n # constant is provided directly instead of its namespace. Ideally, these\n # two lists would only have to contain namespaces.\n\n # A list of tuples where the first element is the namespace of an identifier\n # created in the file, the second is the identifier itself and the third is\n # the line number where it's created.\n self._created_namespaces = []\n\n # A list of UsedNamespace instances.\n self._used_namespaces = []\n\n # A list of seemingly-unnecessary namespaces that are goog.required() and\n # annotated with @suppress {extraRequire}.\n self._suppressed_requires = []\n\n # A list of goog.provide tokens which are duplicates.\n self._duplicate_provide_tokens = []\n\n # A list of goog.require tokens which are duplicates.\n self._duplicate_require_tokens = []\n\n # Whether this file is in a goog.scope. Someday, we may add support\n # for checking scopified namespaces, but for now let's just fail\n # in a more reasonable way.\n self._scopified_file = False\n\n # TODO(user): Handle the case where there are 2 different requires\n # that can satisfy the same dependency, but only one is necessary.", "def calculate_incorrect_name_dict(graph: BELGraph) -> Mapping[str, List[str]]:\n missing = defaultdict(list)\n\n for namespace, name in _iterate_namespace_name(graph):\n missing[namespace].append(name)\n\n return dict(missing)", "def GetProvidedNamespaces(self):\n return set(self._provided_namespaces)", "def _filter_non_existing_namespaces(namespaces, k8s_cli):\n return_code, out = run_shell_command(\n \"{} get ns -o=custom-columns=\\\"DATA:metadata.name\\\" --no-headers=true\".format(k8s_cli))\n if return_code:\n return []\n res = []\n existing_namespaces = set(out.split())\n for namespace in namespaces:\n if namespace in existing_namespaces:\n res.append(namespace)\n else:\n logger.warning(\"Namespace %s doesn't exist - Skipping\", namespace)\n return res", "def _get_required_schemas(self, root):\n def _get_schemalocs(node):\n schemalocs = {}\n\n for ns in itervalues(node.nsmap):\n if ns not in self._schemalocs:\n continue\n\n schemalocs[ns] = self._schemalocs[ns]\n return schemalocs\n\n imports = {}\n for elem in root.iter():\n schemalocs = _get_schemalocs(elem)\n imports.update(schemalocs)\n\n return imports", "def testIgnoredExtraNamespaces(self):\n token = self._GetRequireTokens('package.Something')\n namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(\n closurized_namespaces=['package'],\n ignored_extra_namespaces=['package.Something'])\n\n self.assertFalse(namespaces_info.IsExtraRequire(token),\n 'Should be valid since it is in ignored namespaces.')\n\n namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(\n ['package'], [])\n\n self.assertTrue(namespaces_info.IsExtraRequire(token),\n 'Should be invalid since it is not in ignored namespaces.')", "def _determine_uses(self, included_files, forward_declarations):\n file_uses = dict.fromkeys(included_files, UNUSED)\n decl_uses = dict.fromkeys(forward_declarations, UNUSED)\n symbol_table = self.symbol_table\n\n def _add_reference(name, namespace):\n if name in decl_uses:\n decl_uses[name] |= USES_REFERENCE\n else:\n nss = ''\n for ns in namespace:\n if ns is None:\n continue\n nss += ns + '::'\n if nss + name in decl_uses:\n decl_uses[nss + name] |= USES_REFERENCE\n return\n\n try:\n file_use_node = symbol_table.lookup_symbol(name, namespace)\n except symbols.Error:\n return\n name = file_use_node[1].normalized_filename\n if name in file_uses:\n if isinstance(file_use_node[0], ast.Typedef):\n file_uses[name] |= USES_DECLARATION\n else:\n file_uses[name] |= USES_REFERENCE\n\n def _add_use(name, namespace):\n if isinstance(name, list):\n # name contains a list of tokens.\n name = '::'.join([n.name for n in name])\n elif not isinstance(name, basestring):\n # Happens when variables are defined with inlined types, e.g.:\n # enum {...} variable;\n return\n try:\n file_use_node = symbol_table.lookup_symbol(name, namespace)\n except symbols.Error:\n # TODO(nnorwitz): symbols from the current module\n # should be added to the symbol table and then this\n # exception should not happen...unless the code relies\n # on another header for proper compilation.\n # Store the use since we might really need to #include it.\n if namespace and None not in namespace and '::' not in name:\n name = '::'.join(namespace) + '::' + name\n file_uses[name] = file_uses.get(name, 0) | USES_DECLARATION\n return\n\n # TODO(nnorwitz): do proper check for ref/pointer/symbol.\n name = file_use_node[1].normalized_filename\n if name in file_uses:\n file_uses[name] |= USES_DECLARATION\n\n def _add_variable(node, namespace):\n if node.reference or node.pointer:\n _add_reference(node.name, namespace)\n else:\n _add_use(node.name, namespace)\n # This needs to recurse when the node is a templated type.\n _add_template_use(node.name,\n node.templated_types,\n namespace)\n\n def _process_function(function):\n if function.return_type:\n return_type = function.return_type\n _add_variable(return_type,\n function.namespace)\n\n templated_types = function.templated_types or ()\n for p in function.parameters:\n if p.type.name not in templated_types:\n if function.body and p.name and p.type.name:\n # Assume that if the the function has a body and a name\n # the parameter type is really used.\n # NOTE(nnorwitz): this is over-aggressive. It would be\n # better to iterate through the body and determine\n # actual uses based on local vars and data members\n # used.\n _add_use(p.type.name, function.namespace)\n else:\n _add_variable(p.type, function.namespace)\n\n def _process_function_body(function, namespace):\n iterator = iter(function.body)\n for t in iterator:\n if t.token_type == tokenize.NAME:\n if not keywords.is_keyword(t.name):\n # TODO(nnorwitz): handle :: names.\n # TODO(nnorwitz): handle static function calls.\n # TODO(nnorwitz): handle using statements in file.\n # TODO(nnorwitz): handle using statements in function.\n # TODO(nnorwitz): handle namespace assignment in file.\n _add_use(t.name, namespace)\n elif t.name in ('.', '->'):\n # Skip tokens after a dereference.\n next(iterator)\n\n def _add_template_use(name, types, namespace):\n if types:\n for cls in types:\n if name.endswith('_ptr') or cls.pointer:\n # Special case templated classes that end w/_ptr.\n # These are things like auto_ptr which do\n # not require the class definition, only decl.\n _add_reference(cls.name, namespace)\n else:\n _add_use(cls.name, namespace)\n _add_template_use(cls.name, cls.templated_types, namespace)\n\n # Iterate through the source AST/tokens, marking each symbols use.\n ast_seq = [self.ast_list]\n while ast_seq:\n for node in ast_seq.pop():\n if isinstance(node, ast.VariableDeclaration):\n _add_variable(node.type, node.namespace)\n elif isinstance(node, ast.Function):\n _process_function(node)\n if node.body:\n _process_function_body(node, node.namespace)\n elif isinstance(node, ast.Typedef):\n alias = node.alias\n if isinstance(alias, ast.Type):\n if alias.reference or alias.pointer:\n _add_reference(alias.name, node.namespace)\n else:\n _add_use(alias.name, node.namespace)\n _add_template_use('<typedef>', alias.templated_types,\n node.namespace)\n elif isinstance(node, ast.Friend):\n if node.expr and node.expr[0].name == 'class':\n name = ''.join([n.name for n in node.expr[1:]])\n _add_reference(name, node.namespace)\n elif isinstance(node, ast.Class) and node.body is not None:\n if node.body:\n ast_seq.append(node.body)\n _add_template_use('', node.bases, node.namespace)\n elif isinstance(node, ast.Union) and node.fields:\n pass # TODO(nnorwitz): impl\n\n return file_uses, decl_uses", "def _verify_include_files_used(self, file_uses, included_files):\n for include_file, use in file_uses.items():\n if not use & USES_DECLARATION:\n node, module = included_files[include_file]\n if module.ast_list is not None:\n msg = \"'{}' does not need to be #included\".format(\n node.filename)\n if use & USES_REFERENCE:\n msg += '; use a forward declaration instead'\n self._add_warning(msg, node)", "def _GetRequireTokens(self, namespace):\n line_text = 'goog.require(\\'' + namespace + '\\');\\n'\n return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])", "def test_get_free_ns(self):\n xmlns = {\"a\": \"b\"}\n self.assertEqual(utils._get_free_ns(xmlns, \"abrac:adabra\"),\n \"_abrac_adabra\")\n self.assertEqual(xmlns, {\"a\": \"b\",\n \"_abrac_adabra\": \"abrac:adabra\"})\n # duplicate\n self.assertEqual(utils._get_free_ns(xmlns, \"abrac/adabra\"),\n \"__abrac_adabra_\")\n self.assertEqual(xmlns, {\"a\": \"b\",\n \"_abrac_adabra\": \"abrac:adabra\",\n \"__abrac_adabra_\": \"abrac/adabra\"})", "def find_define_file_uses(self):\n # Executing git grep is substantially faster than using the define_re\n # directly on the contents of the file in Python.\n for define_file in self.get_checked_define_files():\n excluded_files = set([define_file])\n excluded_files.update(define_file.get_included_files(recursive=True))\n all_defines = define_file.get_declared_defines()\n args = ['git', 'grep', '-zwIF']\n for define in all_defines:\n args.extend(['-e', define])\n args.extend(['--', '*.cpp', '*.c', '*.cu', '*.h', '*.cuh'])\n define_re = r'\\b(?:' + '|'.join(all_defines)+ r')\\b'\n output = subprocess.check_output(args, cwd=self._source_root).decode()\n for line in output.splitlines():\n (filename, text) = line.split('\\0')\n fileobj = self._files.get(filename)\n if fileobj is not None and fileobj not in excluded_files:\n defines = re.findall(define_re, text)\n fileobj.add_used_defines(define_file, defines)", "def mk_id_lookup(self, ns):\n if 'structures' not in self.ddef[ns].keys():\n print \"** Error. Namespace '%s' does not contain key 'structures'\" % ns\n sys.exit(1)\n if 'locations' not in self.ddef[ns].keys():\n print \"** Error. Namespace '%s' does not contain key 'locations'\" % ns\n sys.exit(1)\n # print \"found structures and locations in \" + ns\n id_lookup = {}\n referenced_structures = []\n for location in self.ddef[ns]['locations'].keys():\n ids = self.ddef[ns]['locations'][location]\n for id in ids:\n id_str, qty_str = self.parse_qty(id, \"?\")\n if id_str not in self.ddef[ns]['structures'] and id_str != '__custom':\n print \"** Error, in namespace '%s':\" % ns\n print \"structure '%s' referenced in nwb['%s']['locations']['%s'],\" % (id_str, ns, location)\n print \"but is not defined in nwb['%s']['structures']\" % ns\n sys.exit(1)\n referenced_structures.append(id_str)\n type = 'group' if id_str.endswith('/') else 'dataset'\n if id_str not in id_lookup.keys():\n id_lookup[id_str] = {} # initialize dictionary of locations\n id_lookup[id_str][location] = {'type': type, 'qty': qty_str, 'created':[] }\n # print \"Location=%s, id=%s, id_str=%s, qty_str=%s\" % (location, id, id_str, qty_str)\n # make sure every structure has at least one location\n no_location = []\n for id in self.ddef[ns]['structures']:\n if id not in referenced_structures:\n no_location.append(id)\n if len(no_location) > 0:\n pass\n # print \"** Warning, no location was specified for the following structure(s)\"\n # print \", \".join(no_location)\n # print \"This is not an error if they are referenced by a merge or include\" \n return id_lookup", "def check(self):\n illegalNamespaces = list()\n\n progStandard = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}$\")\n progShot = re.compile(\"^SH[0-9]{4}_[0-9]{3}$\")\n\n for namespaces in pm.namespaceInfo(listOnlyNamespaces=True, internal=False, recurse=True):\n for namespace in namespaces.split(\":\"):\n if not progStandard.match(namespace) and not progShot.match(namespace) not in [\"UI\", \"shared\"]:\n illegalNamespaces.append(namespace)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s is a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s illegal namespace\" % (\n len(illegalNamespaces))", "def _verify_includes(self):\n files_seen = {}\n for node in self.ast_list:\n # Ignore #include <> files. Only handle #include \"\".\n # Assume that <> are used for only basic C/C++ headers.\n if isinstance(node, ast.Include) and not node.system:\n module = self._get_module(node)\n filename = module.normalized_filename\n\n normalized_filename = module.normalized_filename\n\n if is_cpp_file(filename):\n self._add_warning(\n \"should not #include C++ source file '{}'\".format(\n node.filename),\n node)\n\n if normalized_filename == self.normalized_filename:\n self._add_warning(\n \"'{}' #includes itself\".format(node.filename),\n node)\n\n if normalized_filename in files_seen:\n include_node = files_seen[normalized_filename]\n line_num = get_line_number(self.metrics, include_node)\n self._add_warning(\n \"'{}' already #included on line {}\".format(\n node.filename,\n line_num),\n node)\n\n files_seen[normalized_filename] = node", "def declares_namespace_package(cls, filename):\n with open(filename) as fp:\n init_py = ast.parse(fp.read(), filename)\n calls = [node for node in ast.walk(init_py) if isinstance(node, ast.Call)]\n for call in calls:\n if len(call.args) != 1:\n continue\n if isinstance(call.func, ast.Attribute) and call.func.attr != 'declare_namespace':\n continue\n if isinstance(call.func, ast.Name) and call.func.id != 'declare_namespace':\n continue\n if isinstance(call.args[0], ast.Name) and call.args[0].id == '__name__':\n return True\n return False", "def missing_in_gn_by_file(self):\n return self._missing_gn_files", "def _load_namespaces(self):\n nsdocs = self._docset.get_namespaces()\n for nsdoc in nsdocs:\n nsobj = Namespace(nsdoc)\n self._docmap[nsdoc] = nsobj\n self._namespaces.add(nsobj)", "def _validate_namespaces(self, input_namespaces):\r\n output_namespaces = []\r\n if input_namespaces == []:\r\n return output_namespaces\r\n elif '*' in input_namespaces:\r\n if len(input_namespaces) > 1:\r\n warning = 'Warning: Multiple namespaces are '\r\n warning += 'ignored when one namespace is \"*\"\\n'\r\n sys.stderr.write(warning)\r\n return output_namespaces\r\n else:\r\n for namespace in input_namespaces:\r\n if not isinstance(namespace, unicode):\r\n namespace = unicode(namespace)\r\n namespace_tuple = self._tuplefy_namespace(namespace)\r\n if namespace_tuple is None:\r\n warning = 'Warning: Invalid namespace ' + namespace\r\n warning += ' will be ignored\\n'\r\n sys.stderr.write(warning)\r\n else:\r\n if namespace_tuple not in output_namespaces:\r\n output_namespaces.append(namespace_tuple)\r\n else:\r\n warning = 'Warning: Duplicate namespace ' + namespace\r\n warning += ' will be ignored\\n'\r\n sys.stderr.write(warning)\r\n return output_namespaces", "def pkg_ifcs_requires(me, pkg, ifcs):\n un = set()\n for i in ifcs:\n if (pkg,i) in me._pkg_ifc_reqs:\n un.update(me._pkg_ifc_reqs[pkg,i])\n return un", "def get_contrib_requirements(filepath: str) -> Dict:\n with open(filepath) as file:\n tree = ast.parse(file.read())\n\n requirements_info = {\"requirements\": []}\n for child in ast.iter_child_nodes(tree):\n if not isinstance(child, ast.ClassDef):\n continue\n current_class = child.name\n for node in ast.walk(child):\n if isinstance(node, ast.Assign):\n try:\n target_ids = [target.id for target in node.targets]\n except (ValueError, AttributeError):\n # some assignment types assign to non-node objects (e.g. Tuple)\n target_ids = []\n if \"library_metadata\" in target_ids:\n library_metadata = ast.literal_eval(node.value)\n requirements = library_metadata.get(\"requirements\", [])\n if type(requirements) == str:\n requirements = [requirements]\n requirements_info[current_class] = requirements\n requirements_info[\"requirements\"] += requirements\n\n return requirements_info", "def calculate_missing(base_pkg, missing, file_deps, use_test_depends=False):\n rospack = rospkg.RosPack()\n for launch_file in file_deps.keys():\n pkg = rospkg.get_package_name(os.path.dirname(os.path.abspath(launch_file)))\n\n if pkg is None: #cannot determine package\n print(\"ERROR: cannot determine package for [%s]\"%pkg, file=sys.stderr)\n continue\n m = rospack.get_manifest(pkg)\n d_pkgs = set([d.name for d in m.depends])\n if m.is_catkin:\n # for catkin packages consider the run dependencies instead\n # else not released packages will not appear in the dependency list\n # since rospkg does uses rosdep to decide which dependencies to return\n from catkin_pkg.package import parse_package\n p = parse_package(os.path.dirname(m.filename))\n d_pkgs = set([d.name for d in p.run_depends])\n if use_test_depends:\n for d in p.test_depends:\n d_pkgs.add(d.name)\n # make sure we don't count ourselves as a dep\n d_pkgs.add(pkg)\n\n diff = list(set(file_deps[launch_file].pkgs) - d_pkgs)\n if not pkg in missing:\n missing[pkg] = set()\n missing[pkg].update(diff)\n return missing", "def check(self):\n illegalNamespaces = list()\n\n prog = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}:$\")\n\n for assetNode in pm.ls(type=\"gAsset\"):\n if assetNode.isReferenced() and not prog.match(assetNode.namespace()):\n illegalNamespaces.append(assetNode)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s has a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s asset(s) have a illegal namespace\" % (\n len(illegalNamespaces))", "def get_required_packages(file_contents):\n # Make sure the only ``install_requires`` happens in the\n # call to setup()\n if file_contents.count(INST_REQS_KWARG) != 1:\n raise ValueError('Expected only one use of keyword',\n INST_REQS_KWARG, file_contents)\n # Make sure the only usage of ``install_requires`` is to set\n # install_requires=REQUIREMENTS.\n keyword_stmt = INST_REQS_KWARG + '=' + REQ_VAR\n if file_contents.count(keyword_stmt) != 1:\n raise ValueError('Expected keyword to be set with variable',\n INST_REQS_KWARG, REQ_VAR, file_contents)\n # Split file on ``REQUIREMENTS`` variable while asserting that\n # it only appear twice.\n _, reqs_section, _ = file_contents.split(REQ_VAR)\n # Find ``REQUIREMENTS`` list variable defined in ``reqs_section``.\n reqs_begin = reqs_section.index('[')\n reqs_end = reqs_section.index(']') + 1\n\n # Convert the text to an actual list, but make sure no\n # locals or globals can be used.\n reqs_list_text = reqs_section[reqs_begin:reqs_end]\n # We use literal_eval() because it limits to evaluating\n # strings that only consist of a few Python literals: strings,\n # numbers, tuples, lists, dicts, booleans, and None.\n requirements = ast.literal_eval(reqs_list_text)\n\n # Take the list of requirements and strip off the package name\n # from each requirement.\n result = []\n for required in requirements:\n parts = required.split()\n result.append(parts[0])\n return result", "def get_used_define_files(self):\n return set(self._used_defines.keys())", "def _compute_missing_deps(self, srcs, actual_deps):\r\n def must_be_explicit_dep(dep):\r\n # We don't require explicit deps on the java runtime, so we shouldn't consider that\r\n # a missing dep.\r\n return not dep.startswith(self._context.java_home)\r\n\r\n # TODO: If recomputing these every time becomes a performance issue, memoize for\r\n # already-seen targets and incrementally compute for new targets not seen in a previous\r\n # partition, in this or a previous chunk.\r\n targets_by_file = self._compute_targets_by_file()\r\n transitive_deps_by_target = self._compute_transitive_deps_by_target()\r\n\r\n # Find deps that are actual but not specified.\r\n with self._context.new_workunit(name='scan_deps'):\r\n missing_file_deps = OrderedSet() # (src, src).\r\n missing_tgt_deps_map = defaultdict(list) # (tgt, tgt) -> a list of (src, src) as evidence.\r\n missing_direct_tgt_deps_map = defaultdict(list) # The same, but for direct deps.\r\n\r\n buildroot = get_buildroot()\r\n abs_srcs = [os.path.join(buildroot, src) for src in srcs]\r\n for src in abs_srcs:\r\n src_tgt = next(iter(targets_by_file.get(src)))\r\n if src_tgt is not None:\r\n for actual_dep in filter(must_be_explicit_dep, actual_deps.get(src, [])):\r\n actual_dep_tgts = targets_by_file.get(actual_dep)\r\n # actual_dep_tgts is usually a singleton. If it's not, we only need one of these\r\n # to be in our declared deps to be OK.\r\n if actual_dep_tgts is None:\r\n missing_file_deps.add((src_tgt, actual_dep))\r\n elif src_tgt not in actual_dep_tgts: # Obviously intra-target deps are fine.\r\n canonical_actual_dep_tgt = next(iter(actual_dep_tgts))\r\n if actual_dep_tgts.isdisjoint(transitive_deps_by_target.get(src_tgt, [])):\r\n missing_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append((src, actual_dep))\r\n elif canonical_actual_dep_tgt not in src_tgt.dependencies:\r\n # The canonical dep is the only one a direct dependency makes sense on.\r\n missing_direct_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append(\r\n (src, actual_dep))\r\n else:\r\n raise TaskError('Requested dep info for unknown source file: %s' % src)\r\n\r\n return (list(missing_file_deps),\r\n missing_tgt_deps_map.items(),\r\n missing_direct_tgt_deps_map.items())", "def get_requirements(package):\n requirements: list = requires(package)\n requires_dict = defaultdict(dict)\n for requirement in requirements:\n req = Requirement(requirement)\n package_name, package_marker = req.name, req.marker\n if package_marker and \"extra ==\" in str(package_marker):\n group = str(package_marker).split(\"extra == \")[1].strip('\"').strip(\"'\").strip()\n else:\n group = \"required\"\n # De-duplicate (the same package could appear more than once in the extra == 'all' group)\n if package_name in requires_dict[group]:\n continue\n requires_dict[group][package_name] = req\n return requires_dict", "def ResolveSiblingNamespaces (sibling_namespaces):\n\n for ns in sibling_namespaces:\n ns.configureCategories([archive.NamespaceArchive._AnonymousCategory()])\n ns.validateComponentModel()\n\n def __keyForCompare (dependency_map):\n \"\"\"Sort namespaces so dependencies get resolved first.\n\n Uses the trick underlying functools.cmp_to_key(), but optimized for\n this special case. The dependency map is incorporated into the class\n definition by scope.\n \"\"\"\n class K (object):\n def __init__ (self, ns, *args):\n self.__ns = ns\n\n # self compares less than other if self.ns is in the dependency set\n # of other.ns but not vice-versa.\n def __lt__ (self, other):\n return ((self.__ns in dependency_map.get(other.__ns, set())) \\\n and not (other.__ns in dependency_map.get(self.__ns, set())))\n\n # self compares equal to other if their namespaces are either\n # mutually dependent or independent.\n def __eq__ (self, other):\n return (self.__ns in dependency_map.get(other.__ns, set())) == (other.__ns in dependency_map.get(self.__ns, set()))\n\n # All other order metrics are derived.\n def __ne__ (self, other):\n return not self.__eq__(other)\n def __le__ (self, other):\n return self.__lt__(other) or self.__eq__(other)\n def __gt__ (self, other):\n return other.__lt__(self.__ns)\n def __ge__ (self, other):\n return other.__lt__(self.__ns) or self.__eq__(other)\n return K\n\n need_resolved_set = set(sibling_namespaces)\n dependency_map = {}\n last_state = None\n while need_resolved_set:\n need_resolved_list = list(need_resolved_set)\n if dependency_map:\n need_resolved_list.sort(key=__keyForCompare(dependency_map))\n need_resolved_set = set()\n dependency_map = {}\n for ns in need_resolved_list:\n if not ns.needsResolution():\n continue\n if not ns.resolveDefinitions(allow_unresolved=True):\n deps = dependency_map.setdefault(ns, set())\n for (c, dcs) in six.iteritems(ns._unresolvedDependents()):\n for dc in dcs:\n dns = dc.expandedName().namespace()\n if dns != ns:\n deps.add(dns)\n _log.info('Holding incomplete resolution %s depending on: ', ns.uri(), six.u(' ; ').join([ six.text_type(_dns) for _dns in deps ]))\n need_resolved_set.add(ns)\n # Exception termination check: if we have the same set of incompletely\n # resolved namespaces, and each has the same number of unresolved\n # components, assume there's an truly unresolvable dependency: either\n # due to circularity, or because there was an external namespace that\n # was missed from the sibling list.\n state = []\n for ns in need_resolved_set:\n state.append( (ns, len(ns._unresolvedComponents())) )\n state = tuple(state)\n if last_state == state:\n raise pyxb.LogicError('Unexpected external dependency in sibling namespaces: %s' % (six.u('\\n ').join( [six.text_type(_ns) for _ns in need_resolved_set ]),))\n last_state = state" ]
[ "0.63311404", "0.5920848", "0.5731538", "0.5633661", "0.56284595", "0.5426965", "0.54253066", "0.52527624", "0.5209391", "0.512581", "0.5070434", "0.5059201", "0.49837956", "0.49681267", "0.49222076", "0.4893362", "0.48832375", "0.48443305", "0.4834767", "0.48293024", "0.48059365", "0.4801445", "0.47941127", "0.47852084", "0.47808653", "0.47801557", "0.47376668", "0.47363704", "0.4727177", "0.47076562" ]
0.7333173
0
Checks if a namespace would normally be required.
def ShouldRequireNamespace(namespace, identifier): return ( not self._IsPrivateIdentifier(identifier) and namespace not in external_dependencies and namespace not in self._provided_namespaces and identifier not in external_dependencies and identifier not in created_identifiers and namespace not in missing_requires)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _namespace_requested(self, namespace):\r\n if namespace is None:\r\n return False\r\n namespace_tuple = self._tuplefy_namespace(namespace)\r\n if namespace_tuple[0] in IGNORE_DBS:\r\n return False\r\n elif namespace_tuple[1] in IGNORE_COLLECTIONS:\r\n return False\r\n else:\r\n return self._tuple_requested(namespace_tuple)", "def IsExtraRequire(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if namespace in self._ignored_extra_namespaces:\n return False\n\n if token in self._duplicate_require_tokens:\n return True\n\n if namespace in self._suppressed_requires:\n return False\n\n # If the namespace contains a component that is initial caps, then that\n # must be the last component of the namespace.\n parts = namespace.split('.')\n if len(parts) > 1 and parts[-2][0].isupper():\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for ns in self._used_namespaces:\n if (not ns.alias_definition and (\n namespace == ns.namespace or namespace == ns.identifier)):\n return False\n\n return True", "async def namespace_exists(self, namespace: str) -> bool:\n return await self.AD.state.namespace_exists(namespace)", "def check(self):\n illegalNamespaces = list()\n\n progStandard = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}$\")\n progShot = re.compile(\"^SH[0-9]{4}_[0-9]{3}$\")\n\n for namespaces in pm.namespaceInfo(listOnlyNamespaces=True, internal=False, recurse=True):\n for namespace in namespaces.split(\":\"):\n if not progStandard.match(namespace) and not progShot.match(namespace) not in [\"UI\", \"shared\"]:\n illegalNamespaces.append(namespace)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s is a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s illegal namespace\" % (\n len(illegalNamespaces))", "def validate(self, namespace):\n pass", "def check_no_namespace(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n if len(pm.listNamespaces()):\n progress_controller.complete()\n raise PublishError(\n \"There should be no <b>Namespaces</b> in a <b>Model</b> scene.\"\n )\n progress_controller.complete()", "def matchesRequiredSBMLNamespacesForAddition(self, *args):\n return _libsbml.SBase_matchesRequiredSBMLNamespacesForAddition(self, *args)", "def hasNS(self, *args):\n return _libsbml.XMLNamespaces_hasNS(self, *args)", "def testIgnoredExtraNamespaces(self):\n token = self._GetRequireTokens('package.Something')\n namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(\n closurized_namespaces=['package'],\n ignored_extra_namespaces=['package.Something'])\n\n self.assertFalse(namespaces_info.IsExtraRequire(token),\n 'Should be valid since it is in ignored namespaces.')\n\n namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(\n ['package'], [])\n\n self.assertTrue(namespaces_info.IsExtraRequire(token),\n 'Should be invalid since it is not in ignored namespaces.')", "def check(self):\n illegalNamespaces = list()\n\n prog = re.compile(\"^[A-Z]{4}[0-9]{2}_[0-9]{3}:$\")\n\n for assetNode in pm.ls(type=\"gAsset\"):\n if assetNode.isReferenced() and not prog.match(assetNode.namespace()):\n illegalNamespaces.append(assetNode)\n\n if not illegalNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = illegalNamespaces\n for illegalNamespace in illegalNamespaces:\n self.addError(\"%s has a illegal namespace\" % illegalNamespace)\n self.errorMessage = \"%s asset(s) have a illegal namespace\" % (\n len(illegalNamespaces))", "def check(self):\n BadNamespaces = list()\n\n for namespace in pm.listNamespaces():\n BadNamespaces.append(namespace)\n\n if not BadNamespaces:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = namespace\n for namespace in BadNamespaces:\n self.addError(\"namespace %s exist\" % namespace)\n self.errorMessage = \"%s namespace\" % (len(BadNamespaces))", "def requirement_missing(script):\n if \"requires\" in script:\n if script[\"requires\"] is None:\n return False\n for package in script[\"requires\"].split():\n try:\n pkg_resources.working_set.require(package)\n except Exception:\n return True\n return False", "def hasNamespaceNS(self, *args):\n return _libsbml.XMLToken_hasNamespaceNS(self, *args)", "def checkMathMLNamespace(self, *args):\n return _libsbml.SBase_checkMathMLNamespace(self, *args)", "def isSetPackageRequired(self, *args):\n return _libsbml.SBMLDocument_isSetPackageRequired(self, *args)", "def _check_required_section_found(self, docstring: PetscDocStringImpl) -> None:\n if not self and self.required:\n diag = self.diags.section_header_missing\n mess = f'Required section \\'{self.titles[0]}\\' not found'\n docstring.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, docstring.extent, highlight=False\n )\n return", "def isSBMLNamespace(*args):\n return _libsbml.SBMLNamespaces_isSBMLNamespace(*args)", "def verify_namespace_attrs(self, node):\n for cls in node.classes:\n for var in cls.variables:\n self.check_var_attrs(cls, var)\n for func in cls.functions:\n self.check_fcn_attrs(func)\n\n for func in node.functions:\n self.check_fcn_attrs(func)\n\n for ns in node.namespaces:\n self.verify_namespace_attrs(ns)", "def GetRequiredNamespaces(self):\n return set(self._required_namespaces)", "def satisfyRequirements(path):\n if not hasVerb(path):\n return False\n if not hasConceptsAtTheEnds(path):\n return False\n if not isConceptDefinition(path):\n return False\n\n return True", "def isSetPkgRequired(self, *args):\n return _libsbml.SBMLDocument_isSetPkgRequired(self, *args)", "def _check_namespace_access(self, namespace, user):\n if not namespace.owners.filter(id=user.id).count():\n raise exceptions.PermissionDenied(\n 'The namespace listed on your filename must match one of '\n 'the namespaces you have access to.'\n )", "def _check_required_opts(self, namespace=None):\n for info, group in self._all_opt_infos():\n opt = info['opt']\n\n if opt.required:\n if 'default' in info or 'override' in info:\n continue\n\n if self._get(opt.dest, group, namespace) is None:\n raise RequiredOptError(opt.name, group)", "def has_package(self, doc):\n return doc.package is not None", "def test_incorrect_namespace(self):\n self._test( # pylint: disable=no-value-for-parameter\n [u\":meth:`path.that.does.not.exist`\"], error_classes.MissingNamespace\n )", "def test_require():\n assert is_required('test') is None\n assert is_required(None)", "def SBMLNamespaces_isSBMLNamespace(*args):\n return _libsbml.SBMLNamespaces_isSBMLNamespace(*args)", "def is_required(self) -> bool:\n return self.required", "def has_required(self) -> bool:\n return any(child.is_required() for child in self.children)", "def hasPrefix(self, *args):\n return _libsbml.XMLNamespaces_hasPrefix(self, *args)" ]
[ "0.73953635", "0.7301753", "0.65251887", "0.6509852", "0.64805037", "0.64667636", "0.6344185", "0.6334187", "0.61623454", "0.6138466", "0.61357164", "0.60664326", "0.6056696", "0.60520434", "0.6033502", "0.5975159", "0.58885986", "0.5803981", "0.5801867", "0.5779978", "0.57779074", "0.5768324", "0.57560503", "0.5746598", "0.57049614", "0.5703521", "0.569461", "0.56671876", "0.56660426", "0.56415105" ]
0.7544301
0
Finds the namespace of an identifier given a list of other namespaces.
def _FindNamespace(self, identifier, *namespaces_list): identifier = identifier.rsplit('.', 1)[0] identifier_prefix = identifier + '.' for namespaces in namespaces_list: for namespace in namespaces: if namespace == identifier or namespace.startswith(identifier_prefix): return namespace return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ns_list(logger,body,v1=None):\n if v1 is None:\n v1 = client.CoreV1Api()\n logger.debug('new client - fn get_ns_list')\n \n try:\n matchNamespace = body.get('matchNamespace')\n except KeyError:\n matchNamespace = '*'\n logger.debug(\"matching all namespaces.\")\n logger.debug(f'Matching namespaces: {matchNamespace}')\n \n try:\n avoidNamespaces = body.get('avoidNamespaces')\n except KeyError:\n avoidNamespaces = ''\n logger.debug(\"not avoiding namespaces\")\n\n nss = v1.list_namespace().items\n matchedns = []\n avoidedns = []\n\n for matchns in matchNamespace:\n for ns in nss:\n if re.match(matchns, ns.metadata.name):\n matchedns.append(ns.metadata.name)\n logger.debug(f'Matched namespaces: {ns.metadata.name} matchpathern: {matchns}')\n if avoidNamespaces:\n for avoidns in avoidNamespaces:\n for ns in nss:\n if re.match(avoidns, ns.metadata.name):\n avoidedns.append(ns.metadata.name)\n logger.debug(f'Skipping namespaces: {ns.metadata.name} avoidpatrn: {avoidns}') \n # purge\n for ns in matchedns.copy():\n if ns in avoidedns:\n matchedns.remove(ns)\n\n return matchedns", "def namespace_for(uri: Union[URIRef, Namespace, str]) -> str:\n uri = str(uri)\n if uri not in namespaces.values():\n namespaces[AnonNS().ns] = uri\n return [k for k, v in namespaces.items() if uri == v][0]", "def matchNamespace(self, namespace, *, ignore_predicates=tuple()):\n # FIXME can't we hit the cache for these?\n sns = str(namespace)\n for s, p, o in self:\n if p not in ignore_predicates:\n for e in (s, p, o):\n if isinstance(e, rdflib.URIRef):\n try:\n pre, ns, suff = self.compute_qname(e, generate=False)\n if str(ns) == sns:\n yield e\n except KeyError:\n pass", "def SBMLNamespaces_getSBMLNamespaceURI(*args):\n return _libsbml.SBMLNamespaces_getSBMLNamespaceURI(*args)", "def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"namespaces\")", "def prefixForNamespace (self, namespace):\n pfxs = self.__inScopePrefixes.get(namespace)\n if pfxs:\n return next(iter(pfxs))\n return None", "def target_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"target_namespaces\")", "def from_ns(match):\n return ns.get(match.group(1), match.group())", "def get_identifiers_org_curie(prefix: str, identifier: str) -> Optional[str]:\n miriam_prefix = get_identifiers_org_prefix(prefix)\n if miriam_prefix is None or miriam_prefix in MIRIAM_BLACKLIST:\n return None\n banana = get_banana(prefix)\n if banana:\n if identifier.startswith(f\"{banana}:\"):\n return identifier\n else:\n return f\"{banana}:{identifier}\"\n elif namespace_in_lui(prefix):\n if identifier.startswith(prefix.upper()):\n return identifier\n else:\n return f\"{prefix.upper()}:{identifier}\"\n else:\n return f\"{miriam_prefix}:{identifier}\"", "def namespace_name_to_id(self, name):\n lname = name.lower()\n for ns_id, names in self._namespaces.items():\n lnames = [n.lower() for n in names] # Be case-insensitive\n if lname in lnames:\n return ns_id\n\n e = \"There is no namespace with name '{0}'.\".format(name)\n raise exceptions.NamespaceNotFoundError(e)", "def getIndex(self, *args):\n return _libsbml.XMLNamespaces_getIndex(self, *args)", "def prefix_to_ns(self, prefix):\n defin = self.module.i_ctx.get_module(\n self.module.i_prefixes[prefix][0])\n return defin.search_one(\"namespace\").arg", "def get_namespaces(self, label_selector=None):\n return self.core_client.list_namespace(label_selector=label_selector)", "def is_in_namespace(variable_names, namespace, func_logic=all):\n assert hasattr(variable_names, \"__iter__\"), \"`variable_names` should be either a single string on an object or an iterable of strings of variable names\"\n if isinstance(variable_names, str):\n variable_names = [variable_names]\n namespace = set(namespace)\n return func_logic(map(lambda x: x in namespace, variable_names))", "def get_ns(name):\n ensembles = conventions.ensembles\n mask = (ensembles['name'] == name)\n return utils.extract_unique(ensembles[mask], 'ns')", "def namespaces(\n self, index: Union[int, str] = \"len\"\n ) -> Union[List[str], int]:\n if index == \"len\":\n return len(self._namespaces)\n try:\n return self._namespaces[index] # type: ignore\n except IndexError:\n return []", "def selected_namespaces(self) -> Optional[pulumi.Input['NamespacesArgs']]:\n return pulumi.get(self, \"selected_namespaces\")", "def selected_namespaces(self) -> Optional[pulumi.Input['NamespacesArgs']]:\n return pulumi.get(self, \"selected_namespaces\")", "def namespace_id_to_name(self, ns_id, all=False):\n try:\n if all:\n return self._namespaces[ns_id]\n else:\n return self._namespaces[ns_id][0]\n except KeyError:\n e = \"There is no namespace with id {0}.\".format(ns_id)\n raise exceptions.NamespaceNotFoundError(e)", "def pyreq_ns_search(httpreq_handler: HTTPRequestHandler, namespace: str,\n json_resp: bool = False, need_global_lookup_result: bool = False,\n glblsrch_totmp: bool = False, ) -> Optional[Union[Dict, str, Tuple[Dict, Dict]]]:\n result: List[Dict] = pyreq_list_affiliated_ns(httpreq_handler)\n for ns_meta in result:\n if ns_meta['name'] == namespace:\n httpreq_handler.debug(f'Found member namespace that matches {namespace}: ' + json.dumps(ns_meta))\n if json_resp:\n return ns_meta\n else:\n return ns_meta['id']\n httpreq_handler.debug(f'Target namespace {namespace} not found in affiliated results, need global search.')\n httpreq_handler.debug(f'Look up on overall nuance.com namespace. We redirect network payloads to a file')\n # this request would return a huge payload containing exhaustive info for all application configurations\n # for all users and namespaces. Therefore we rather ask curl to save the output to a file instead of\n # trying to receive that from stdout piping\n endpoint = '/bolt/applications'\n tmp_outfile = None\n if glblsrch_totmp:\n timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n tmp_outfile = os.path.join(os.getcwd(),\n f'tmp_app_config_lookup_{timestamp}.json')\n httpreq_handler.debug(f'Temp file for redirected CURL output: {tmp_outfile}')\n resp: Dict = httpreq_handler.request(url=endpoint, method=GET_METHOD, default_headers=True,\n stream=True, outfile=tmp_outfile, json_resp=True)\n for app_conf_grp in resp['data']:\n if app_conf_grp['namespace_name'] != namespace:\n continue\n ns_search_result = dict()\n ns_search_result['name'] = namespace\n ns_search_result['id'] = app_conf_grp['namespace_id']\n ns_search_result['is_member'] = False\n if need_global_lookup_result:\n httpreq_handler.debug(f'Retruning global lookup resp and result: {json.dumps(ns_search_result)}')\n return ns_search_result, resp\n else:\n httpreq_handler.debug(f'Retruning result: {json.dumps(ns_search_result)}')\n return ns_search_result\n raise ValueError(__ERR_MSG_NS_NOTFOUND.format(ns_name=namespace))", "def get_edge_namespaces():\n hint = request.form['namespaces']\n\n result = {'status': FAIL, 'message': '', 'data': {}}\n try:\n result['status'] = SUCCESS\n result['data']['autocomplete_field'] = []\n result['data']['select_field'] = []\n if hint != '':\n edge_session = edge(edge_create_internal_ns_configuration.edge_url,\n edge_create_internal_ns_configuration.client_id,\n edge_create_internal_ns_configuration.clientSecret)\n\n namespaces = edge_session.get_namespaces()\n count = 0\n for namespace in namespaces:\n if namespace['name'].startswith(hint):\n\n result['data']['autocomplete_field'].append({\n 'input': namespace['id'],\n 'value': '%s (%s)' % (namespace['name'], namespace['id'])\n })\n result['data']['select_field'].append({\n 'id': namespace['id'],\n 'txt': namespace['name']\n })\n if count == 10:\n break\n count += 1\n except Exception as e:\n result['status'] = FAIL\n result['message'] = 'Error while searching for Namespaces: %s and hint: %s!' % (util.safe_str(e), hint)\n return result", "def find_conflict(paths: List[Path]) -> Optional[Tuple[int, int]]:\n for i in range(len(paths)):\n for j in range(i + 1, len(paths)):\n if paths[i].conflicts(paths[j]):\n return paths[i].identifier, paths[j].identifier\n return None", "def _namespace_package_path(fqname, pathnames, path=None):\n working_set = pkg_resources.WorkingSet(path)\n\n path = list(pathnames)\n\n for dist in working_set:\n if dist.has_metadata('namespace_packages.txt'):\n namespaces = dist.get_metadata(\n 'namespace_packages.txt').splitlines()\n if fqname in namespaces:\n nspath = os.path.join(dist.location, *fqname.split('.'))\n if nspath not in path:\n path.append(nspath)\n\n return path", "def _validate_namespaces(self, input_namespaces):\r\n output_namespaces = []\r\n if input_namespaces == []:\r\n return output_namespaces\r\n elif '*' in input_namespaces:\r\n if len(input_namespaces) > 1:\r\n warning = 'Warning: Multiple namespaces are '\r\n warning += 'ignored when one namespace is \"*\"\\n'\r\n sys.stderr.write(warning)\r\n return output_namespaces\r\n else:\r\n for namespace in input_namespaces:\r\n if not isinstance(namespace, unicode):\r\n namespace = unicode(namespace)\r\n namespace_tuple = self._tuplefy_namespace(namespace)\r\n if namespace_tuple is None:\r\n warning = 'Warning: Invalid namespace ' + namespace\r\n warning += ' will be ignored\\n'\r\n sys.stderr.write(warning)\r\n else:\r\n if namespace_tuple not in output_namespaces:\r\n output_namespaces.append(namespace_tuple)\r\n else:\r\n warning = 'Warning: Duplicate namespace ' + namespace\r\n warning += ' will be ignored\\n'\r\n sys.stderr.write(warning)\r\n return output_namespaces", "def getNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_getNamespaces(self, *args)", "def namespace_selection(base_url, namespaces):\n top_menu = ConsoleMenu(\"Select Namespace\", \"\")\n for ns in namespaces:\n namespace_url = ns['Self']\n namespace_menu = FunctionItem(ns['Id'], stream_selection, [namespace_url, get_streams(namespace_url)])\n top_menu.append_item(namespace_menu)\n top_menu.show()", "def getSBMLNamespaceURI(*args):\n return _libsbml.SBMLNamespaces_getSBMLNamespaceURI(*args)", "def _extract_namespaces(self) -> Tuple[List[List[str]], List[str]]:\n nparts: Set[int] = set()\n namespaces: List[Set[str]] = []\n terminals: Set[str] = set()\n for name in self.names:\n parts: List[str] = name.split(\".\")\n nparts.add(len(parts))\n if len(nparts) > 1:\n raise PyParamNameError(\n \"Parameter names must have the same number of namespaces.\"\n )\n namespaces = namespaces or [{part} for part in parts[:-1]]\n for i, part in enumerate(parts[:-1]):\n namespaces[i].add(part)\n terminals.add(parts[-1])\n return [list(ns) for ns in namespaces], list(terminals)", "def test_get_namespaces_names(self):\n pass", "def FindQualifiedTargets(target, qualified_list):\n return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]" ]
[ "0.5511319", "0.5437833", "0.54365224", "0.533564", "0.5334929", "0.5333034", "0.520893", "0.51984334", "0.5171764", "0.51552564", "0.51050156", "0.5095089", "0.50680137", "0.50477624", "0.503882", "0.50184745", "0.49847686", "0.49847686", "0.49530628", "0.49236485", "0.49200925", "0.49159005", "0.48964658", "0.4896063", "0.48957813", "0.48956585", "0.4886877", "0.4885317", "0.48769897", "0.48533723" ]
0.81949735
0
Returns whether the given identifier is private.
def _IsPrivateIdentifier(self, identifier): pieces = identifier.split('.') for piece in pieces: if piece.endswith('_'): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_private_id(private_id):\n\n if private_id == identifier.private_id:\n return True\n return False", "def is_private(self):\n return self.has_label(PRIVATE_LABEL)", "def private(self) -> bool:\n return pulumi.get(self, \"private\")", "def isPrivate(id):\n db = core.connect()\n theShift = db[id]\n publishData = theShift[\"publishData\"]\n return publishData[\"private\"]", "def is_private():", "def is_private(self):\n if self[:13] == '1.2.840.10008':\n return False\n\n return True", "def is_private(function):\n name = function.__name__\n if name.startswith(\"__\"):\n return True\n else:\n return getattr(function, \"private\", False)", "def isPublic(id):\n db = core.connect()\n return not db[id][\"private\"]", "def private(self) -> bool:\n return self._private", "def is_private(self, path: str) -> bool:\n return self.private_files.test(path)", "def get_private_id_by(data):\n\n if data[\"id\"] == identifier.public_id:\n return identifier.private_id\n return False", "def is_private(code):\n return 4000 <= code <= 4999", "def is_private(path):\n for p in path.split(\".\"):\n if p.startswith(\"_\") and not p.startswith(\"__\"):\n return True\n return False", "def private_session(self):\n return bool(\n lib.sp_session_is_private_session(self._session._sp_session))", "def _is_private(self, path, name, obj):\n # Skip objects blocked by doc_controls.\n if doc_controls.should_skip(obj):\n return True\n\n # Skip modules outside of the package root.\n if inspect.ismodule(obj):\n if hasattr(obj, \"__file__\"):\n if not obj.__file__.startswith(self._base_dir):\n return True\n\n # Skip objects blocked by the private_map\n if name in self._private_map.get(\".\".join(path), []):\n return True\n\n # Skip \"_\" hidden attributes\n is_dunder = name.startswith(\"__\") and name.endswith(\"__\")\n if name.startswith(\"_\") and not is_dunder:\n return True\n\n if name in [\"__base__\", \"__class__\"]:\n return True\n\n return False", "def hasPrivateKey(self):\r\n raise NotImplementedError()", "def private_instance(self) -> bool:\n return pulumi.get(self, \"private_instance\")", "def isPublic(id):\n db = core.connect()\n theShift = db[id]\n publishData = theShift[\"publishData\"]\n return (not publishData[\"draft\"]) and (not publishData[\"private\"])", "def is_private(event):\n channel = event.get('channel')\n return channel.startswith('D')", "def on_private_cluster(self) -> bool:\n return self.cluster.spec.private if self.cluster.spec else False", "def is_public(self):\n return not self.name.startswith('_')", "def is_private(self, is_private):\n\n self._is_private = is_private", "def pcap_contains_priv_ips(self):\n return self.contains_priv_ips", "def is_public(self) -> bool:\n return True", "def isPublic(self, code) -> bool:\n return True", "def publicly_accessible(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"publicly_accessible\")", "def private(self):\n return self._private", "def _disallow_public_access(self) -> typing.Optional[bool]:\n return jsii.get(self, \"disallowPublicAccess\")", "def isPublicUserStream(id):\n db = core.connect()\n return (not db[id][\"meta\"] == \"public\")", "def privatelink_access(self) -> Optional[pulumi.Input['PgPgUserConfigPrivatelinkAccessArgs']]:\n return pulumi.get(self, \"privatelink_access\")" ]
[ "0.81769556", "0.775468", "0.7722495", "0.77160704", "0.7646948", "0.7562728", "0.739211", "0.73734015", "0.7295112", "0.69502974", "0.69394654", "0.67882586", "0.6739908", "0.66595906", "0.6556429", "0.65464985", "0.65414155", "0.6533351", "0.6517475", "0.6428402", "0.6386725", "0.6203438", "0.6163643", "0.60263157", "0.59505326", "0.58760923", "0.58506286", "0.58417654", "0.5827232", "0.58132607" ]
0.818094
0
Returns whether token is the first provide token.
def IsFirstProvide(self, token): return self._provide_tokens and token == self._provide_tokens[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsLastProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[-1]", "def IsFirstRequire(self, token):\n return self._require_tokens and token == self._require_tokens[0]", "def IsLastRequire(self, token):\n return self._require_tokens and token == self._require_tokens[-1]", "def IsExtraProvide(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if token in self._duplicate_provide_tokens:\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for created_namespace, created_identifier, _ in self._created_namespaces:\n if namespace == created_namespace or namespace == created_identifier:\n return False\n\n return True", "def has_token(self):\n user_id = getattr(self, '_id', None)\n user_token = getattr(self, 'token', None)\n if user_id is not None and user_token is not None:\n return True\n return False", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def always_include_in_token(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"always_include_in_token\")", "def always_include_in_token(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"always_include_in_token\")", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def always_include_in_token(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"always_include_in_token\")", "def always_include_in_token(self) -> bool:\n return pulumi.get(self, \"always_include_in_token\")", "def containsToken(self, token):\n if token.sentence != self.tokens[0].sentence:\n return False # not in same sentence\n \n return self.tokens[0].index <= token.index and token.index <= self.tokens[-1].index", "def is_token_required(self):\n return any([self.app_id, self._login, self._password])", "def has_more_tokens(self) -> bool:\n return len(self.jack_file_tokens) > self._token_idx", "def has_next(self):\n return self._mu is not None or self._source.has_next()", "def is_token_marker(self):\n return self.id in TOKEN", "def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def has_next(self) -> bool:\n return self.peek() != self.sentinel", "def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type", "def hasNext(self) -> bool:\n return self.stack or self.node", "def has_next():\n\n return True", "def has_next(self):\n if self._count is not None:\n # If count is available, use it\n return bool(self._count)\n else:\n # otherwise we have no idea\n return True", "def match(self, token):\n try:\n if token == 'S' and is_symbol(self.the_input[self.index]) \\\n or self.the_input[self.index] == token:\n self.index += 1\n return True\n except IndexError:\n print 'Error on checking \\'' + token + \\\n '\\': the next token is empty'\n exit(1)\n print 'No' # there is improper grammar\n exit(1)", "def has_next(self):\n return not self.finished_function(self.peek)", "def has_next():", "def __contains__(self, token: Hashable) -> bool:\n return token in self._token_to_idx", "def is_next_token_implemented_in_plugin(self):\n return self.__is_next_token_implemented_in_plugin", "def check_token(self, *args) -> bool:\n if len(args) == 1:\n if isinstance(args[0], str):\n return self.token_name == args[0]\n elif isinstance(args[0], _Enum):\n return self.token_name == args[0].name\n elif isinstance(args[0], _Sequence):\n return self.token_name in args[0]\n raise TypeError(\"_check_token() taking 1 argument, type: str, Enum or Sequence\")", "def hasNext(self) -> bool:\n return self.stack != []", "def requires_token(self) -> bool:\n # both attribute and placeholder in url are required to make it work\n for key, val in self.items():\n if isinstance(val, str) and \"<insert your\" in val and key in self.url:\n return True\n return False" ]
[ "0.7887217", "0.7391644", "0.69115305", "0.6791802", "0.6652381", "0.6512905", "0.6475678", "0.6475678", "0.63664705", "0.629974", "0.6282053", "0.6246363", "0.6235441", "0.61809856", "0.6155028", "0.61259884", "0.61105454", "0.60996723", "0.60899884", "0.5997687", "0.5997313", "0.5922713", "0.5913251", "0.5912281", "0.5900339", "0.5848063", "0.5828599", "0.5821373", "0.57962483", "0.57942635" ]
0.8674835
0
Returns whether token is the first require token.
def IsFirstRequire(self, token): return self._require_tokens and token == self._require_tokens[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsLastRequire(self, token):\n return self._require_tokens and token == self._require_tokens[-1]", "def IsFirstProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[0]", "def IsExtraRequire(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if namespace in self._ignored_extra_namespaces:\n return False\n\n if token in self._duplicate_require_tokens:\n return True\n\n if namespace in self._suppressed_requires:\n return False\n\n # If the namespace contains a component that is initial caps, then that\n # must be the last component of the namespace.\n parts = namespace.split('.')\n if len(parts) > 1 and parts[-2][0].isupper():\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for ns in self._used_namespaces:\n if (not ns.alias_definition and (\n namespace == ns.namespace or namespace == ns.identifier)):\n return False\n\n return True", "def is_token_required(self):\n return any([self.app_id, self._login, self._password])", "def always_include_in_token(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"always_include_in_token\")", "def always_include_in_token(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"always_include_in_token\")", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def IsLastProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[-1]", "def has_token(self):\n user_id = getattr(self, '_id', None)\n user_token = getattr(self, 'token', None)\n if user_id is not None and user_token is not None:\n return True\n return False", "def token_filter(tok):\n return tok is token or \\\n tok.dep_.endswith(\"mod\") or \\\n tok.dep_ == \"compound\"", "def always_include_in_token(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"always_include_in_token\")", "def always_include_in_token(self) -> bool:\n return pulumi.get(self, \"always_include_in_token\")", "def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def match(self, token):\n try:\n if token == 'S' and is_symbol(self.the_input[self.index]) \\\n or self.the_input[self.index] == token:\n self.index += 1\n return True\n except IndexError:\n print 'Error on checking \\'' + token + \\\n '\\': the next token is empty'\n exit(1)\n print 'No' # there is improper grammar\n exit(1)", "def isStart(self):\n return _libsbml.XMLToken_isStart(self)", "def requires_token(self) -> bool:\n # both attribute and placeholder in url are required to make it work\n for key, val in self.items():\n if isinstance(val, str) and \"<insert your\" in val and key in self.url:\n return True\n return False", "def containsToken(self, token):\n if token.sentence != self.tokens[0].sentence:\n return False # not in same sentence\n \n return self.tokens[0].index <= token.index and token.index <= self.tokens[-1].index", "def has_more_tokens(self) -> bool:\n return len(self.jack_file_tokens) > self._token_idx", "def has_next(self):\n return self._mu is not None or self._source.has_next()", "def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def requires_scope(required_scope):\n token = get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n if unverified_claims.get(\"scope\"):\n token_scopes = unverified_claims[\"scope\"].split()\n for token_scope in token_scopes:\n if token_scope == required_scope:\n return True\n return False", "def requires_scope(required_scope):\n token = get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n if unverified_claims.get(\"scope\"):\n token_scopes = unverified_claims[\"scope\"].split()\n for token_scope in token_scopes:\n if token_scope == required_scope:\n return True\n return False", "def verify_token(self, token):\n return False", "def IsExtraProvide(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if token in self._duplicate_provide_tokens:\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for created_namespace, created_identifier, _ in self._created_namespaces:\n if namespace == created_namespace or namespace == created_identifier:\n return False\n\n return True", "def is_required(self) -> bool:\n return self.required", "def is_token_marker(self):\n return self.id in TOKEN", "def is_required(self):\r\n return self._required", "def verify_local_token(self, token):\n return token == self.master_local_token.get_token()", "def has_next():\n\n return True" ]
[ "0.81671214", "0.6949763", "0.68476576", "0.66363865", "0.6201824", "0.6201824", "0.6094359", "0.60599226", "0.5999892", "0.5982507", "0.5949889", "0.590627", "0.58565307", "0.5829618", "0.57798374", "0.57632315", "0.57223994", "0.57069623", "0.56557584", "0.5650901", "0.5618917", "0.56107014", "0.56107014", "0.5606177", "0.5600611", "0.5583801", "0.55663025", "0.5561239", "0.55358773", "0.55067617" ]
0.89671576
0
Returns whether token is the last provide token.
def IsLastProvide(self, token): return self._provide_tokens and token == self._provide_tokens[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsLastRequire(self, token):\n return self._require_tokens and token == self._require_tokens[-1]", "def has_more_tokens(self) -> bool:\n return len(self.jack_file_tokens) > self._token_idx", "def IsFirstProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[0]", "def is_last(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_last\")", "def is_last(self) -> Optional[bool]:\n return pulumi.get(self, \"is_last\")", "def has_finished(self) -> bool:\n return self.pos >= len(self.tokens)", "def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF", "def containsToken(self, token):\n if token.sentence != self.tokens[0].sentence:\n return False # not in same sentence\n \n return self.tokens[0].index <= token.index and token.index <= self.tokens[-1].index", "def isLast(self):\n index = self.parentNode.idevices.index(self)\n return index == len(self.parentNode.idevices) - 1", "def has_next(self) -> bool:\n return self.peek() != self.sentinel", "def at_last_stich(self):\n return len(self.cards) == 1", "def is_last_position(self):\r\n return self.position >= len(self.rule.rightside)", "def IsExtraProvide(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if token in self._duplicate_provide_tokens:\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for created_namespace, created_identifier, _ in self._created_namespaces:\n if namespace == created_namespace or namespace == created_identifier:\n return False\n\n return True", "def has_next(self):\n return not self.finished_function(self.peek)", "def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()", "def isEnd(self):\n return _libsbml.XMLToken_isEnd(self)", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def is_last_page(self):\n return self.page == self.last_page", "def has_token(self):\n user_id = getattr(self, '_id', None)\n user_token = getattr(self, 'token', None)\n if user_id is not None and user_token is not None:\n return True\n return False", "def has_next(self):\n if self._count is not None:\n # If count is available, use it\n return bool(self._count)\n else:\n # otherwise we have no idea\n return True", "def has_next(self):\n return self._mu is not None or self._source.has_next()", "def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def is_token_marker(self):\n return self.id in TOKEN", "def has_end(self):\n return bool(self._end)", "def has_next(self):\n return self.count < len(self)", "def is_last(self, level):\n\n return level == self.levels[-1]", "def hasNext(self) -> bool:\n return self.stack != []", "def has_next():\n\n return True" ]
[ "0.76438874", "0.72274315", "0.68776894", "0.6821207", "0.67988884", "0.66592544", "0.6655954", "0.6569088", "0.65481645", "0.65435654", "0.64170283", "0.6405064", "0.6377603", "0.6337176", "0.62775195", "0.62491417", "0.62417126", "0.62393886", "0.62069696", "0.618613", "0.61777115", "0.6127372", "0.612177", "0.61179763", "0.61128104", "0.60927814", "0.6030181", "0.60261273", "0.60060567", "0.5987528" ]
0.8860514
0
Returns whether token is the last require token.
def IsLastRequire(self, token): return self._require_tokens and token == self._require_tokens[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsFirstRequire(self, token):\n return self._require_tokens and token == self._require_tokens[0]", "def IsLastProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[-1]", "def has_more_tokens(self) -> bool:\n return len(self.jack_file_tokens) > self._token_idx", "def IsExtraRequire(self, token):\n namespace = tokenutil.GetStringAfterToken(token)\n\n if self.GetClosurizedNamespace(namespace) is None:\n return False\n\n if namespace in self._ignored_extra_namespaces:\n return False\n\n if token in self._duplicate_require_tokens:\n return True\n\n if namespace in self._suppressed_requires:\n return False\n\n # If the namespace contains a component that is initial caps, then that\n # must be the last component of the namespace.\n parts = namespace.split('.')\n if len(parts) > 1 and parts[-2][0].isupper():\n return True\n\n # TODO(user): There's probably a faster way to compute this.\n for ns in self._used_namespaces:\n if (not ns.alias_definition and (\n namespace == ns.namespace or namespace == ns.identifier)):\n return False\n\n return True", "def _check(self, token_type):\n if self._is_at_end():\n return False\n\n return self._peek().token_type == token_type", "def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0", "def containsToken(self, token):\n if token.sentence != self.tokens[0].sentence:\n return False # not in same sentence\n \n return self.tokens[0].index <= token.index and token.index <= self.tokens[-1].index", "def is_token_required(self):\n return any([self.app_id, self._login, self._password])", "def has_finished(self) -> bool:\n return self.pos >= len(self.tokens)", "def is_last(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_last\")", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def is_last_position(self):\r\n return self.position >= len(self.rule.rightside)", "def eol(self):\n return self.pos == len(self.tokens)", "def is_last(self) -> Optional[bool]:\n return pulumi.get(self, \"is_last\")", "def lineTerminatorAhead(self):\n # Get the token ahead of the current index.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 1\n ahead = self._input.get(possibleIndexEosToken)\n\n if ahead.channel != Lexer.HIDDEN:\n # We're only interested in tokens on the HIDDEN channel.\n return False\n\n if ahead.type == ECMAScriptParser.LineTerminator:\n # There is definitely a line terminator ahead.\n return True\n\n if ahead.type == ECMAScriptParser.WhiteSpaces:\n # Get the token ahead of the current whitespaces.\n possibleIndexEosToken = self.getCurrentToken().tokenIndex - 2\n ahead = self._input.get(possibleIndexEosToken)\n\n # Get the token's text and type.\n text = ahead.text\n type = ahead.type\n\n # Check if the token is, or contains a line terminator.\n return (type == ECMAScriptParser.MultiLineComment and \\\n ('\\r' in text or '\\n' in text)) or \\\n (type == ECMAScriptParser.LineTerminator)", "def has_next(self):\n return self._mu is not None or self._source.has_next()", "def IsFirstProvide(self, token):\n return self._provide_tokens and token == self._provide_tokens[0]", "def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False", "def has_next(self) -> bool:\n return self.peek() != self.sentinel", "def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF", "def has_token(self):\n user_id = getattr(self, '_id', None)\n user_token = getattr(self, 'token', None)\n if user_id is not None and user_token is not None:\n return True\n return False", "def has_next_page(self):\n if self.page_number == 0:\n return True\n\n return self.next_page_token is not None", "def is_end_of_sentence(prev_token, current_token):\n is_capital = current_token[0].isupper()\n is_punctuation = prev_token in ('!', '?', '.')\n return is_capital and is_punctuation", "def has_next():\n\n return True", "def is_token_marker(self):\n return self.id in TOKEN", "def verify_token(self, token):\n return False", "def isEnd(self):\n return _libsbml.XMLToken_isEnd(self)", "def has_next(self):\n return not self.finished_function(self.peek)", "def _is_eof(self, symbol):\n if symbol.type == self.scanner.EOF:\n return True\n else:\n return False", "def verify_local_token(self, token):\n return token == self.master_local_token.get_token()" ]
[ "0.7509558", "0.7488049", "0.686767", "0.6495814", "0.6475725", "0.6301454", "0.62661564", "0.6179465", "0.614039", "0.6088959", "0.6046331", "0.6027082", "0.60161424", "0.5990586", "0.59724194", "0.5945365", "0.59441656", "0.59059834", "0.58650494", "0.5856617", "0.58212847", "0.5820398", "0.5819024", "0.58033615", "0.5781937", "0.57578987", "0.57485753", "0.5737106", "0.5729388", "0.5692701" ]
0.903281
0
Processes the given token for dependency information.
def ProcessToken(self, token, state_tracker): # Note that this method is in the critical path for the linter and has been # optimized for performance in the following ways: # - Tokens are checked by type first to minimize the number of function # calls necessary to determine if action needs to be taken for the token. # - The most common tokens types are checked for first. # - The number of function calls has been minimized (thus the length of this # function. if token.type == TokenType.IDENTIFIER: # TODO(user): Consider saving the whole identifier in metadata. whole_identifier_string = tokenutil.GetIdentifierForToken(token) if whole_identifier_string is None: # We only want to process the identifier one time. If the whole string # identifier is None, that means this token was part of a multi-token # identifier, but it was not the first token of the identifier. return # In the odd case that a goog.require is encountered inside a function, # just ignore it (e.g. dynamic loading in test runners). if token.string == 'goog.require' and not state_tracker.InFunction(): self._require_tokens.append(token) namespace = tokenutil.GetStringAfterToken(token) if namespace in self._required_namespaces: self._duplicate_require_tokens.append(token) else: self._required_namespaces.append(namespace) # If there is a suppression for the require, add a usage for it so it # gets treated as a regular goog.require (i.e. still gets sorted). if self._HasSuppression(state_tracker, 'extraRequire'): self._suppressed_requires.append(namespace) self._AddUsedNamespace(state_tracker, namespace, token) elif token.string == 'goog.provide': self._provide_tokens.append(token) namespace = tokenutil.GetStringAfterToken(token) if namespace in self._provided_namespaces: self._duplicate_provide_tokens.append(token) else: self._provided_namespaces.append(namespace) # If there is a suppression for the provide, add a creation for it so it # gets treated as a regular goog.provide (i.e. still gets sorted). if self._HasSuppression(state_tracker, 'extraProvide'): self._AddCreatedNamespace(state_tracker, namespace, token.line_number) elif token.string == 'goog.scope': self._scopified_file = True elif token.string == 'goog.setTestOnly': # Since the message is optional, we don't want to scan to later lines. for t in tokenutil.GetAllTokensInSameLine(token): if t.type == TokenType.STRING_TEXT: message = t.string if re.match(r'^\w+(\.\w+)+$', message): # This looks like a namespace. If it's a Closurized namespace, # consider it created. base_namespace = message.split('.', 1)[0] if base_namespace in self._closurized_namespaces: self._AddCreatedNamespace(state_tracker, message, token.line_number) break else: jsdoc = state_tracker.GetDocComment() if token.metadata and token.metadata.aliased_symbol: whole_identifier_string = token.metadata.aliased_symbol elif (token.string == 'goog.module.get' and not self._HasSuppression(state_tracker, 'extraRequire')): # Cannot use _AddUsedNamespace as this is not an identifier, but # already the entire namespace that's required. namespace = tokenutil.GetStringAfterToken(token) namespace = UsedNamespace(namespace, namespace, token, alias_definition=False) self._used_namespaces.append(namespace) if jsdoc and jsdoc.HasFlag('typedef'): self._AddCreatedNamespace(state_tracker, whole_identifier_string, token.line_number, namespace=self.GetClosurizedNamespace( whole_identifier_string)) else: is_alias_definition = (token.metadata and token.metadata.is_alias_definition) self._AddUsedNamespace(state_tracker, whole_identifier_string, token, is_alias_definition) elif token.type == TokenType.SIMPLE_LVALUE: identifier = token.values['identifier'] start_token = tokenutil.GetIdentifierStart(token) if start_token and start_token != token: # Multi-line identifier being assigned. Get the whole identifier. identifier = tokenutil.GetIdentifierForToken(start_token) else: start_token = token # If an alias is defined on the start_token, use it instead. if (start_token and start_token.metadata and start_token.metadata.aliased_symbol and not start_token.metadata.is_alias_definition): identifier = start_token.metadata.aliased_symbol if identifier: namespace = self.GetClosurizedNamespace(identifier) if state_tracker.InFunction(): self._AddUsedNamespace(state_tracker, identifier, token) elif namespace and namespace != 'goog': self._AddCreatedNamespace(state_tracker, identifier, token.line_number, namespace=namespace) elif token.type == TokenType.DOC_FLAG: flag = token.attached_object flag_type = flag.flag_type if flag and flag.HasType() and flag.jstype: is_interface = state_tracker.GetDocComment().HasFlag('interface') if flag_type == 'implements' or (flag_type == 'extends' and is_interface): identifier = flag.jstype.alias or flag.jstype.identifier self._AddUsedNamespace(state_tracker, identifier, token) # Since we process doctypes only for implements and extends, the # type is a simple one and we don't need any iteration for subtypes.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_token(self, token: str) -> Optional[str]:\n raise RuntimeError('Cannot use _handle_token of this abstract class.')", "def parse(token):\n\n pass", "def _handle_token(self, token: str) -> Optional[str]:\n return token", "def next_token(self, context, token):", "def parse_dependency(dep, forge):\n if '|' in dep:\n return [parse_dependency(alt, forge) for alt in dep.split('|')]\n dep = dep.strip()\n name = ''\n version = ''\n arch = ''\n version, dep = extract_text(dep)\n arch, dep = extract_text(dep, ('[', ']'))\n name = dep.strip()\n return {'forge': forge, 'product': name,\n 'constraints': use_mvn_spec(version), 'architectures': arch}", "def handle_input(self, token):\n self.pipeline.handle_input(token)", "def integrate(token_dict, dep_list):\n for dep in dep_list:\n deptype = dep.deptype\n gidx, gtoken = dep.gidx, dep.gtoken\n didx, dtoken = dep.didx, dep.dtoken\n tokenelem = token_dict[didx]\n tokenelem.deptype = deptype\n tokenelem.headidx = gidx\n token_dict[didx] = tokenelem\n token_list = []\n for idx in range(len(token_dict)):\n token_list.append(token_dict[idx + 1])\n return token_list", "def __call__(self, token_received: str, **kwargs) -> str:\n pass", "def _handle_token(self, token: str) -> Optional[str]:\n return token or self._token_handler.token", "def render_directive(self, token: SyntaxTreeNode) -> None:\n first_line = token.info.split(maxsplit=1)\n name = first_line[0][1:-1]\n arguments = \"\" if len(first_line) == 1 else first_line[1]\n content = token.content\n position = token_line(token)\n nodes_list = self.run_directive(name, arguments, content, position)\n self.current_node += nodes_list", "def _collect_token(config, cert):\n try:\n json_cert = json.loads(cert)\n except:\n click.secho(\"There was an error accessing/parsing those files!...\\n\", fg='red', reverse=True)\n if config.verbose:\n click.secho(\"The file you uploaded must be compatible with a JSON parser. Please revise and try again.\", fg='cyan')\n else:\n if config.verbose:\n click.secho(\"Searching for token...\", fg='white')\n try:\n token = json_cert[\"stream_token\"]\n if token is None:\n raise ValueError\n except:\n click.secho(\"Token not found in provided template!...\\n\", fg='red', reverse=True)\n if config.verbose:\n click.secho(\"Make sure your using the template file generated from 'dstream define'!\", fg='cyan')\n else:\n if config.verbose:\n click.secho(\"Found stream_token: \" + token + '\\n', fg='white')\n return token", "def __init__(self, token):\n self.token = token", "def __init__(self, token):\n self.token = token", "def __init__(self, token):\n self.token = token", "def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_atx_heading:\n atx_token = cast(AtxHeadingMarkdownToken, token)\n self.__handle_atx_heading(context, atx_token)\n elif token.is_setext_heading:\n setext_token = cast(SetextHeadingMarkdownToken, token)\n self.__handle_setext_heading(setext_token)\n elif token.is_text:\n text_token = cast(TextMarkdownToken, token)\n self.__handle_text(text_token)\n elif token.is_setext_heading_end:\n end_token = cast(EndMarkdownToken, token)\n self.__handle_setext_heading_end(context, end_token)", "def __init__(self, token):\n\n self.token = token", "def parse_token(bn,token):\n return bn.split(token)[1].split('_')[0]", "def preparse(self, token):\n result = self.check_preparsers(token)\n if result is None:\n return token\n else:\n return result", "def _candidates(self, token):", "def parse_token(self, data: list) -> object:\n\n brand, model = self.retrieve_brand_info(data[0])\n parsed_data = [brand, model]\n price = self.price(data[-1])\n tokens = data[1:-1]\n\n for x, token in enumerate(tokens):\n func = part_funcs[self.part][x]\n if func.__name__ == \"hdd_data\": # Handle special case of hdd_data input being None\n data = func(token)\n if data is None:\n print(data)\n parsed_data.extend(data)\n continue\n elif func.__name__ == \"price\": # Handle special case of price data being None\n parsed_data.append(self.price(token))\n continue\n else:\n if not token or token in none_symbols:\n parsed_data.append(None)\n continue\n\n try:\n result = func(token)\n except ValueError:\n result = func(token)\n result = None\n if isinstance(result, tuple):\n parsed_data.extend(result)\n else:\n parsed_data.append(result)\n\n parsed_data.append(price)\n\n _class = part_classes[self.part]\n try:\n return _class(*parsed_data)\n except (TypeError, ValueError) as _:\n logger.error(f\"{parsed_data} is not valid input data for {_class}!\")", "def _parse_token(token: str):\r\n if token in OPERATOR_TOKENS:\r\n return Operator(token)\r\n if token.isdigit():\r\n return Number(int(token))\r\n if \".\" in token:\r\n if token.count(\".\") > 1 or token[-1] == '.':\r\n raise BadNumber(token)\r\n return Number(float(token))\r\n if token == \"i\":\r\n return ComplexNumber(0, 1)\r\n if token.isalpha():\r\n return Variable(token)\r\n raise UnknownToken(token)", "def get_token_info_remote(self, token_info_url):", "def visit(self, token: tokenize.TokenInfo) -> None:\n self._lines[token.start[0]].append(token)", "def visit(self, token: tokenize.TokenInfo) -> None:\n self._lines[token.start[0]].append(token)", "def request(self, token):\n pass", "def token_filter(tok):\n return tok is token or \\\n tok.dep_.endswith(\"mod\") or \\\n tok.dep_ == \"compound\"", "def extractToken(filename, token, numOfLines=2):\n\tfile = open(filename, \"r\")\n\tlines = file.readlines()\n\tfile.close()\n\tfor aLine in lines:\n\t\tif aLine.find(token) != -1:\n\t\t\treturn lines[lines.index(aLine)+1:lines.index(aLine)+1+numOfLines]\n\treturn \"\"", "def interpret(self, keyword_token, *tokens, preprocessor=None):\n keyword = self._get_local_value(keyword_token)\n if not callable(keyword):\n raise DoxhooksTypeError(keyword, keyword_token, \"callable\")\n try:\n keyword(*tokens, preprocessor=preprocessor)\n except TypeError as error:\n error_message = str(error)\n if error_message.startswith(keyword.__name__ + \"()\"):\n raise DoxhooksDataError(\"Bad syntax:\", keyword_token, tokens) \\\n from error\n raise", "def dependents(self, tokens, head_index):\n # Create head->dependency index.\n head_to_deps = {}\n for i, token in enumerate(tokens):\n head = token.edge_index\n if i != head:\n head_to_deps.setdefault(head, []).append(i)\n return head_to_deps.get(head_index, ())", "def getToken(self):\n \n raise NotImplementedError" ]
[ "0.5602669", "0.5558042", "0.55427074", "0.540906", "0.52787644", "0.5224743", "0.5199154", "0.5164769", "0.51535594", "0.50753266", "0.50697255", "0.5051195", "0.5051195", "0.5051195", "0.50148237", "0.5011514", "0.5000214", "0.49974158", "0.4989386", "0.49775434", "0.49534744", "0.49411076", "0.49222627", "0.49222627", "0.48722073", "0.48548508", "0.481129", "0.48091093", "0.48046198", "0.47618857" ]
0.6788418
0
Adds the namespace of an identifier to the list of created namespaces. If the identifier is annotated with a 'missingProvide' suppression, it is not added.
def _AddCreatedNamespace(self, state_tracker, identifier, line_number, namespace=None): if not namespace: namespace = identifier if self._HasSuppression(state_tracker, 'missingProvide'): return self._created_namespaces.append([namespace, identifier, line_number])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _AddUsedNamespace(self, state_tracker, identifier, token,\n is_alias_definition=False):\n if self._HasSuppression(state_tracker, 'missingRequire'):\n return\n\n identifier = self._GetUsedIdentifier(identifier)\n namespace = self.GetClosurizedNamespace(identifier)\n # b/5362203 If its a variable in scope then its not a required namespace.\n if namespace and not state_tracker.IsVariableInScope(namespace):\n namespace = UsedNamespace(namespace, identifier, token,\n is_alias_definition)\n self._used_namespaces.append(namespace)", "def addNamespace(self, *args):\n return _libsbml.SBMLNamespaces_addNamespace(self, *args)", "def add(self, *args):\n return _libsbml.XMLNamespaces_add(self, *args)", "def add_namespace(self, q, ns):\n if ns in self.namespaces: return self.namespaces[ns]\n self.namespaces[ns] = q\n return q", "def addNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_addNamespaces(self, *args)", "def add(self, *args):\n return _libsbml.ListWrapperSBMLNamespaces_add(self, *args)", "def addPackageNamespace(self, *args):\n return _libsbml.SBMLNamespaces_addPackageNamespace(self, *args)", "def addPackageNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_addPackageNamespaces(self, *args)", "def addNamespace(self, *args):\n return _libsbml.XMLToken_addNamespace(self, *args)", "def declareNamespace (self, namespace, prefix=None, add_to_map=False):\n if not isinstance(namespace, pyxb.namespace.Namespace):\n raise pyxb.UsageError('declareNamespace: must be given a namespace instance')\n if namespace.isAbsentNamespace():\n raise pyxb.UsageError('declareNamespace: namespace must not be an absent namespace')\n if prefix is None:\n prefix = namespace.prefix()\n if prefix is None:\n pfxs = self.__inScopePrefixes.get(namespace)\n if pfxs:\n prefix = next(iter(pfxs))\n while prefix is None:\n self.__namespacePrefixCounter += 1\n candidate_prefix = 'ns%d' % (self.__namespacePrefixCounter,)\n if not (candidate_prefix in self.__inScopeNamespaces):\n prefix = candidate_prefix\n ns = self.__inScopePrefixes.get(prefix)\n if ns:\n if ns != namespace:\n raise pyxb.LogicError('Prefix %s is already in use for %s' % (prefix, ns))\n return prefix\n if not self.__mutableInScopeNamespaces:\n self.__clonePrefixMap()\n self.__mutableInScopeNamespaces = True\n self.__addPrefixMap(prefix, namespace)\n return prefix", "def add(self, namespace_uri):\n # See if this namespace is already mapped to an alias\n alias = self.namespace_to_alias.get(namespace_uri)\n if alias is not None:\n return alias\n\n # Fall back to generating a numerical alias\n i = 0\n while True:\n alias = 'ext' + str(i)\n try:\n self.addAlias(namespace_uri, alias)\n except KeyError:\n i += 1\n else:\n return alias\n\n assert False, \"Not reached\"", "def addPkgNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_addPkgNamespaces(self, *args)", "def new_namespace(key):\n\tif key in REGISTRY:\n\t\traise KeyError(\"key:{0} already exists\".format(key))\n\n\tREGISTRY[key] = Namespace()", "def new_namespace(*sources):\n ns = {}\n flood_namespace(ns, *sources)\n return SimpleNamespace(**ns)", "def addPkgNamespace(self, *args):\n return _libsbml.SBMLNamespaces_addPkgNamespace(self, *args)", "def add_interest(self, namespace, jid=None):\n if not isinstance(namespace, set) and not isinstance(namespace, list):\n namespace = [namespace]\n\n for ns in namespace:\n self.xmpp['xep_0030'].add_feature('%s+notify' % ns,\n jid=jid)\n self.xmpp['xep_0115'].update_caps(jid)", "async def add_namespace(self, namespace: str, **kwargs) -> Union[str, None]:\n if namespace == self.get_namespace(): # if it belongs to this app's namespace\n raise ValueError(\"Cannot add namespace with the same name as operating namespace\")\n\n writeback = kwargs.get(\"writeback\", \"safe\")\n persist = kwargs.get(\"persist\", True)\n\n return await self.AD.state.add_namespace(namespace, writeback, persist, self.name)", "def _add_identifier(self, mono_root, identifier, out=None):\n out = out if out is not None else identifier\n\n if hasattr(self, identifier) and getattr(self, identifier) is not None:\n mono_root[\"r:\" + out] = getattr(self, identifier)", "def create_namespace(node, namespace, delete_before_create=True):\n if delete_before_create:\n Namespaces.delete_namespace(node, namespace)\n\n cmd = f\"ip netns add {namespace}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n Namespaces.__namespaces.append(namespace)", "def add_namespaces(specification):\n\n for ns in specification[\"namespaces\"]:\n specification[\"namespaces\"][ns][\"list\"] = []\n specification[\"namespaces\"][ns][\"list_long\"] = []\n specification[\"namespaces\"][ns][\"list_short\"] = []\n\n specification[\"namespaces\"][ns][\"to_short\"] = {}\n specification[\"namespaces\"][ns][\"to_long\"] = {}\n\n for obj in specification[\"namespaces\"][ns][\"info\"]:\n specification[\"namespaces\"][ns][\"list\"].extend([obj[\"name\"], obj[\"abbreviation\"]])\n specification[\"namespaces\"][ns][\"list_short\"].append(obj[\"abbreviation\"])\n specification[\"namespaces\"][ns][\"list_long\"].append(obj[\"name\"])\n\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"abbreviation\"]] = obj[\"abbreviation\"]\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"name\"]] = obj[\"abbreviation\"]\n\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"abbreviation\"]] = obj[\"name\"]\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"name\"]] = obj[\"name\"]\n\n # For AminoAcid namespace\n if \"abbrev1\" in obj:\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"abbrev1\"]] = obj[\"abbreviation\"]\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"abbrev1\"]] = obj[\"name\"]", "def _declare_xmlns(self, root, xmlns, declared_ns=None):\n\n if declared_ns is None:\n declared = set()\n else:\n declared = set(declared_ns)\n\n if xmlns.name not in declared:\n root.add_attribute(f'xmlns:{xmlns.name}', xmlns.uri)\n declared.add(xmlns.name)\n\n for parent_ns in xmlns.parents:\n if parent_ns.name in declared:\n continue\n\n root.add_attribute(f'xmlns:{parent_ns.name}', parent_ns.uri)\n declared.add(parent_ns.name)\n\n return declared", "def gen_namespace(self, node):\n node.functions = self.define_function_suffix(node.functions)\n for ns in node.namespaces:\n self.gen_namespace(ns)", "def _add_ns(self, ns_name):\n\n if self.allowed_ns and ns_name not in self.allowed_ns:\n raise PermissionError(\"Namespace %s not in allowed list\" % ns_name)\n\n namespace_dir = \"%s/%s\" % (self.charts_dir, ns_name)\n Path(namespace_dir).mkdir(exist_ok=True)\n return namespace_dir", "def prepend(self, *args):\n return _libsbml.ListWrapperSBMLNamespaces_prepend(self, *args)", "def add(self, camera, namespace=None):\n if namespace == '*':\n raise GenericRolloutException(\"* is not allowed to use as namespace name.\")\n with self.lock:\n namespace = namespace or 'default'\n if namespace not in self.camera_namespaces:\n self.camera_namespaces[namespace] = set()\n self.camera_namespaces[namespace].add(camera)", "def create_or_fetch_namespace(self):\n\n def _create_new_namespace():\n logger.info(\n f\"Creating a new namespace: {self.namespace_name} in {self.namespace_region}\"\n )\n\n data = {\n \"name\": self.namespace_name,\n \"resource_group_id\": self.resource_group_id,\n \"resource_plan_id\": \"functions-base-plan\",\n }\n\n res = requests.post(\n self.cf_namespaces_url, headers=self.get_headers(), json=data\n ).json()\n if res.status_code != 200:\n logger.error(res.text)\n namespace_id = res[\"id\"]\n logger.info(f\"Created new namespace with id: {namespace_id}\")\n return namespace_id\n\n def _get_cloud_function_namespaces_metadata(offset=0):\n \"\"\"returns meta data on namespaces of ibm cloud functions within a specified region\n :param offset - offset from the beginning of the list of results attained from the GET request,\n which may contain up to 200 namespaces per http response\"\"\"\n\n res = requests.get(\n f\"{self.cf_namespaces_url}?limit=200&offset={offset}\",\n headers=self.get_headers(),\n )\n return json.loads(res.text)\n\n def _get_cloud_function_namespaces():\n \"\"\"returns relevant metadata on existing namespaces within a given region.\"\"\"\n logger.info(\n f\"Obtaining Cloud Function namespaces in {self.namespace_region}\"\n )\n\n namespaces = []\n\n collecting_namespaces = True\n max_limit = 200\n offset = 0\n\n # request for namespaces is limited to 200 at a time, thus the request is fulfilled in increments of 200s.\n while collecting_namespaces:\n namespace_metadata = _get_cloud_function_namespaces_metadata(offset)\n if namespace_metadata[\"total_count\"] == max_limit:\n offset += max_limit\n else:\n collecting_namespaces = False\n\n for name_space in namespace_metadata[\"namespaces\"]:\n if \"name\" in name_space: # API based namespace\n namespaces.append(\n {\n \"name\": name_space[\"name\"],\n \"type\": \"API_based\",\n \"id\": name_space[\"id\"],\n \"region\": name_space[\"location\"],\n }\n )\n\n else: # cloud foundry based namespace\n namespaces.append(\n {\n \"name\": name_space[\"id\"],\n \"type\": \"CF_based\",\n \"region\": name_space[\"location\"],\n }\n )\n\n return namespaces\n\n namespaces_in_region = _get_cloud_function_namespaces()\n target_namespace_id = None\n if namespaces_in_region:\n target_namespace_id = next(\n (\n namespace[\"id\"]\n for namespace in namespaces_in_region\n if namespace[\"name\"] == self.namespace_name\n ),\n None,\n )\n if not target_namespace_id:\n target_namespace_id = _create_new_namespace()\n else:\n logger.info(f\"Reusing namespace: {target_namespace_id}\")\n return target_namespace_id", "def ShouldRequireNamespace(namespace, identifier):\n return (\n not self._IsPrivateIdentifier(identifier) and\n namespace not in external_dependencies and\n namespace not in self._provided_namespaces and\n identifier not in external_dependencies and\n identifier not in created_identifiers and\n namespace not in missing_requires)", "def addL2Namespaces(self, *args):\n return _libsbml.SBMLExtension_addL2Namespaces(self, *args)", "def addL2Namespaces(self, *args):\n return _libsbml.LayoutExtension_addL2Namespaces(self, *args)", "def addL2Namespaces(self, *args):\n return _libsbml.SBMLExtensionRegistry_addL2Namespaces(self, *args)" ]
[ "0.6178203", "0.5934162", "0.57995856", "0.57396895", "0.56798714", "0.5676811", "0.5642477", "0.55801946", "0.55589324", "0.5475503", "0.54490924", "0.52659315", "0.5214463", "0.5213079", "0.52038467", "0.51981443", "0.49920872", "0.49379152", "0.49338087", "0.49110115", "0.48945013", "0.4893718", "0.48695162", "0.4773202", "0.4715798", "0.46993384", "0.46271014", "0.46027344", "0.45861697", "0.4578373" ]
0.7534255
0
Adds the namespace of an identifier to the list of used namespaces. If the identifier is annotated with a 'missingRequire' suppression, it is not added.
def _AddUsedNamespace(self, state_tracker, identifier, token, is_alias_definition=False): if self._HasSuppression(state_tracker, 'missingRequire'): return identifier = self._GetUsedIdentifier(identifier) namespace = self.GetClosurizedNamespace(identifier) # b/5362203 If its a variable in scope then its not a required namespace. if namespace and not state_tracker.IsVariableInScope(namespace): namespace = UsedNamespace(namespace, identifier, token, is_alias_definition) self._used_namespaces.append(namespace)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _AddCreatedNamespace(self, state_tracker, identifier, line_number,\n namespace=None):\n if not namespace:\n namespace = identifier\n\n if self._HasSuppression(state_tracker, 'missingProvide'):\n return\n\n self._created_namespaces.append([namespace, identifier, line_number])", "def add_namespace(self, q, ns):\n if ns in self.namespaces: return self.namespaces[ns]\n self.namespaces[ns] = q\n return q", "def addNamespace(self, *args):\n return _libsbml.SBMLNamespaces_addNamespace(self, *args)", "def add(self, *args):\n return _libsbml.XMLNamespaces_add(self, *args)", "def addNamespace(self, *args):\n return _libsbml.XMLToken_addNamespace(self, *args)", "def addNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_addNamespaces(self, *args)", "def addPackageNamespace(self, *args):\n return _libsbml.SBMLNamespaces_addPackageNamespace(self, *args)", "def declareNamespace (self, namespace, prefix=None, add_to_map=False):\n if not isinstance(namespace, pyxb.namespace.Namespace):\n raise pyxb.UsageError('declareNamespace: must be given a namespace instance')\n if namespace.isAbsentNamespace():\n raise pyxb.UsageError('declareNamespace: namespace must not be an absent namespace')\n if prefix is None:\n prefix = namespace.prefix()\n if prefix is None:\n pfxs = self.__inScopePrefixes.get(namespace)\n if pfxs:\n prefix = next(iter(pfxs))\n while prefix is None:\n self.__namespacePrefixCounter += 1\n candidate_prefix = 'ns%d' % (self.__namespacePrefixCounter,)\n if not (candidate_prefix in self.__inScopeNamespaces):\n prefix = candidate_prefix\n ns = self.__inScopePrefixes.get(prefix)\n if ns:\n if ns != namespace:\n raise pyxb.LogicError('Prefix %s is already in use for %s' % (prefix, ns))\n return prefix\n if not self.__mutableInScopeNamespaces:\n self.__clonePrefixMap()\n self.__mutableInScopeNamespaces = True\n self.__addPrefixMap(prefix, namespace)\n return prefix", "def addPackageNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_addPackageNamespaces(self, *args)", "def add(self, namespace_uri):\n # See if this namespace is already mapped to an alias\n alias = self.namespace_to_alias.get(namespace_uri)\n if alias is not None:\n return alias\n\n # Fall back to generating a numerical alias\n i = 0\n while True:\n alias = 'ext' + str(i)\n try:\n self.addAlias(namespace_uri, alias)\n except KeyError:\n i += 1\n else:\n return alias\n\n assert False, \"Not reached\"", "async def add_namespace(self, namespace: str, **kwargs) -> Union[str, None]:\n if namespace == self.get_namespace(): # if it belongs to this app's namespace\n raise ValueError(\"Cannot add namespace with the same name as operating namespace\")\n\n writeback = kwargs.get(\"writeback\", \"safe\")\n persist = kwargs.get(\"persist\", True)\n\n return await self.AD.state.add_namespace(namespace, writeback, persist, self.name)", "def add(self, *args):\n return _libsbml.ListWrapperSBMLNamespaces_add(self, *args)", "def ShouldRequireNamespace(namespace, identifier):\n return (\n not self._IsPrivateIdentifier(identifier) and\n namespace not in external_dependencies and\n namespace not in self._provided_namespaces and\n identifier not in external_dependencies and\n identifier not in created_identifiers and\n namespace not in missing_requires)", "def addPkgNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_addPkgNamespaces(self, *args)", "def addPkgNamespace(self, *args):\n return _libsbml.SBMLNamespaces_addPkgNamespace(self, *args)", "def add_interest(self, namespace, jid=None):\n if not isinstance(namespace, set) and not isinstance(namespace, list):\n namespace = [namespace]\n\n for ns in namespace:\n self.xmpp['xep_0030'].add_feature('%s+notify' % ns,\n jid=jid)\n self.xmpp['xep_0115'].update_caps(jid)", "def add_namespaces(specification):\n\n for ns in specification[\"namespaces\"]:\n specification[\"namespaces\"][ns][\"list\"] = []\n specification[\"namespaces\"][ns][\"list_long\"] = []\n specification[\"namespaces\"][ns][\"list_short\"] = []\n\n specification[\"namespaces\"][ns][\"to_short\"] = {}\n specification[\"namespaces\"][ns][\"to_long\"] = {}\n\n for obj in specification[\"namespaces\"][ns][\"info\"]:\n specification[\"namespaces\"][ns][\"list\"].extend([obj[\"name\"], obj[\"abbreviation\"]])\n specification[\"namespaces\"][ns][\"list_short\"].append(obj[\"abbreviation\"])\n specification[\"namespaces\"][ns][\"list_long\"].append(obj[\"name\"])\n\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"abbreviation\"]] = obj[\"abbreviation\"]\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"name\"]] = obj[\"abbreviation\"]\n\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"abbreviation\"]] = obj[\"name\"]\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"name\"]] = obj[\"name\"]\n\n # For AminoAcid namespace\n if \"abbrev1\" in obj:\n specification[\"namespaces\"][ns][\"to_short\"][obj[\"abbrev1\"]] = obj[\"abbreviation\"]\n specification[\"namespaces\"][ns][\"to_long\"][obj[\"abbrev1\"]] = obj[\"name\"]", "def matchesRequiredSBMLNamespacesForAddition(self, *args):\n return _libsbml.SBase_matchesRequiredSBMLNamespacesForAddition(self, *args)", "def setOpenIDNamespace(self, openid_ns_uri, implicit):\n if isinstance(openid_ns_uri, bytes):\n openid_ns_uri = str(openid_ns_uri, encoding=\"utf-8\")\n if openid_ns_uri not in self.allowed_openid_namespaces:\n raise InvalidOpenIDNamespace(openid_ns_uri)\n\n self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit)\n self._openid_ns_uri = openid_ns_uri", "def _load_namespaces(self):\n nsdocs = self._docset.get_namespaces()\n for nsdoc in nsdocs:\n nsobj = Namespace(nsdoc)\n self._docmap[nsdoc] = nsobj\n self._namespaces.add(nsobj)", "def testIgnoredExtraNamespaces(self):\n token = self._GetRequireTokens('package.Something')\n namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(\n closurized_namespaces=['package'],\n ignored_extra_namespaces=['package.Something'])\n\n self.assertFalse(namespaces_info.IsExtraRequire(token),\n 'Should be valid since it is in ignored namespaces.')\n\n namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(\n ['package'], [])\n\n self.assertTrue(namespaces_info.IsExtraRequire(token),\n 'Should be invalid since it is not in ignored namespaces.')", "def addAlias(self, namespace_uri, desired_alias, implicit=False):\n if isinstance(namespace_uri, bytes):\n namespace_uri = str(namespace_uri, encoding=\"utf-8\")\n # Check that desired_alias is not an openid protocol field as\n # per the spec.\n assert desired_alias not in OPENID_PROTOCOL_FIELDS, \\\n \"%r is not an allowed namespace alias\" % (desired_alias,)\n\n # Check that desired_alias does not contain a period as per\n # the spec.\n if isinstance(desired_alias, str):\n assert '.' not in desired_alias, \\\n \"%r must not contain a dot\" % (desired_alias,)\n\n # Check that there is not a namespace already defined for\n # the desired alias\n current_namespace_uri = self.alias_to_namespace.get(desired_alias)\n if (current_namespace_uri is not None and\n current_namespace_uri != namespace_uri):\n\n fmt = ('Cannot map %r to alias %r. '\n '%r is already mapped to alias %r')\n\n msg = fmt % (namespace_uri, desired_alias, current_namespace_uri,\n desired_alias)\n raise KeyError(msg)\n\n # Check that there is not already a (different) alias for\n # this namespace URI\n alias = self.namespace_to_alias.get(namespace_uri)\n if alias is not None and alias != desired_alias:\n fmt = ('Cannot map %r to alias %r. '\n 'It is already mapped to alias %r')\n raise KeyError(fmt % (namespace_uri, desired_alias, alias))\n\n assert (desired_alias == NULL_NAMESPACE or\n type(desired_alias) in [str, str]), repr(desired_alias)\n assert namespace_uri not in self.implicit_namespaces\n self.alias_to_namespace[desired_alias] = namespace_uri\n self.namespace_to_alias[namespace_uri] = desired_alias\n if implicit:\n self.implicit_namespaces.append(namespace_uri)\n return desired_alias", "def new_namespace(key):\n\tif key in REGISTRY:\n\t\traise KeyError(\"key:{0} already exists\".format(key))\n\n\tREGISTRY[key] = Namespace()", "def __setattr__(self, name, value):\n if not isinstance(name, str):\n raise ValueError('Namespace label must be a string')\n if name.startswith('_'):\n raise ValueError('Namespace cannot start with an underscore')\n\n if name in self._namespaces:\n raise ValueError('Namespaces cannot be redefined')\n\n self._namespaces[name] = Namespace(name, label=value)", "def _declare_xmlns(self, root, xmlns, declared_ns=None):\n\n if declared_ns is None:\n declared = set()\n else:\n declared = set(declared_ns)\n\n if xmlns.name not in declared:\n root.add_attribute(f'xmlns:{xmlns.name}', xmlns.uri)\n declared.add(xmlns.name)\n\n for parent_ns in xmlns.parents:\n if parent_ns.name in declared:\n continue\n\n root.add_attribute(f'xmlns:{parent_ns.name}', parent_ns.uri)\n declared.add(parent_ns.name)\n\n return declared", "def _add_ns(self, ns_name):\n\n if self.allowed_ns and ns_name not in self.allowed_ns:\n raise PermissionError(\"Namespace %s not in allowed list\" % ns_name)\n\n namespace_dir = \"%s/%s\" % (self.charts_dir, ns_name)\n Path(namespace_dir).mkdir(exist_ok=True)\n return namespace_dir", "def _add_identifier(self, mono_root, identifier, out=None):\n out = out if out is not None else identifier\n\n if hasattr(self, identifier) and getattr(self, identifier) is not None:\n mono_root[\"r:\" + out] = getattr(self, identifier)", "def prepend(self, *args):\n return _libsbml.ListWrapperSBMLNamespaces_prepend(self, *args)", "def set_namespace(key, dic):\n\tnew_namespace(key)\n\tREGISTRY[key] = Namespace(dic)", "def GetRequiredNamespaces(self):\n return set(self._required_namespaces)" ]
[ "0.66084504", "0.6232781", "0.61949253", "0.5883757", "0.58255833", "0.5819052", "0.5810038", "0.5806278", "0.5666905", "0.5602587", "0.55705494", "0.5505676", "0.547098", "0.5374336", "0.5356941", "0.5347893", "0.5189263", "0.51137614", "0.5087794", "0.50860125", "0.501923", "0.49926707", "0.49510488", "0.49295765", "0.49146864", "0.49128398", "0.4891329", "0.48432338", "0.47616836", "0.4758912" ]
0.70048755
0
Strips apply/call/inherit calls from the identifier.
def _GetUsedIdentifier(self, identifier): for suffix in ('.apply', '.call', '.inherit'): if identifier.endswith(suffix): return identifier[:-len(suffix)] return identifier
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disable_named_call():\n global _use_named_call\n _use_named_call = False", "def restore_user_defined_calls(*args):\n return _ida_hexrays.restore_user_defined_calls(*args)", "def __call__(fun_name):", "def _unwrap_simple_call(self, node: ast.expr) -> ast.expr:\n if isinstance(node, ast.Call) and len(node.args) == 1 and not node.keywords:\n return self._unwrap_simple_call(node.args[0])\n return node", "def replaceSIDWithFunction(self, *args):\n return _libsbml.Delay_replaceSIDWithFunction(self, *args)", "def enable_named_call():\n global _use_named_call\n _use_named_call = True", "def replaceSIDWithFunction(self, *args):\n return _libsbml.Priority_replaceSIDWithFunction(self, *args)", "def toggle_call(self) -> None:", "def replaceSIDWithFunction(self, *args):\n return _libsbml.SBase_replaceSIDWithFunction(self, *args)", "def unpolish(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def replaceIDWithFunction(self, *args):\n return _libsbml.ASTBasePlugin_replaceIDWithFunction(self, *args)", "def transform(self, *args):\n return _libsbml.IdentifierTransformer_transform(self, *args)", "def replaceSIDWithFunction(self, *args):\n return _libsbml.Rule_replaceSIDWithFunction(self, *args)", "def __call__(obj):", "def __call__(self, x, **kwargs):\n del kwargs\n for f in self._functions:\n x = f(x)\n return x", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def stripall(self, lst):\n return map(operator.methodcaller(\"strip\"), lst)", "def transformIdentifiers(self, *args):\n return _libsbml.SBase_transformIdentifiers(self, *args)", "def replace_operators_by_calls(topconstruct, opname, call, call_id_construct):\n # find all computations\n for computation in query([is_computation], TreeItem(topconstruct)):\n replace_op_by_call(computation.construct, opname, call, call_id_construct)", "def replaceSIDWithFunction(self, *args):\n return _libsbml.KineticLaw_replaceSIDWithFunction(self, *args)", "def __call__(object):", "def __call__(value):", "def replaceSIDWithFunction(self, *args):\n return _libsbml.SBasePlugin_replaceSIDWithFunction(self, *args)", "def simplify_IDs(self, IDs):\n raise NotImplementedError", "def identify(func):\n def identified(arg):\n func(arg)\n return arg\n return identified", "def _call(self, args):\n a = args.split(' ', 1)\n if a:\n getattr(self, a[0])(*a[1:])" ]
[ "0.5675074", "0.5346134", "0.5218611", "0.51925397", "0.51191026", "0.509856", "0.5074015", "0.5058325", "0.50206256", "0.49795005", "0.49678898", "0.49467093", "0.49240702", "0.4921765", "0.49191517", "0.49014243", "0.49014243", "0.49014243", "0.49014243", "0.49014243", "0.4899252", "0.48949045", "0.48879677", "0.4868723", "0.48547947", "0.4823897", "0.48198634", "0.4816827", "0.481517", "0.48079315" ]
0.5554346
1
Display each taxi in taxi list.
def display_taxis(taxis): for i, taxi in enumerate(taxis): print(f"{i} - {taxi}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def taxis_available(taxi_types):\n for number, taxi in enumerate(taxi_types):\n print(\"{} - {}\".format(number, taxi))", "def display_taxis(taxis):\n for i, taxi in enumerate(taxis):\n print(\"{} - {}\".format(i, taxi))", "def display(self):\r\n\t\tfor each_item in self.items:\r\n\t\t\teach_item.display()", "def print_taxids(tree):\n\n for n in tree.traverse(\"postorder\"):\n m = re.search('\\[(\\d+)\\]', n.name)\n if not m:\n sys.stderr.write(\"No taxid in {}\\n\".format(n.name))\n else:\n print(\"{}\\t{}\".format(m.groups()[0], n.name))", "def display_list(d):\n print(\"\\nOur generous donors: \\n\")\n for donor_name in iter(d.donors):\n print(donor_name)\n print(\"\\n\")", "def write_taxon_item(tax: TMB_Classes.RankedTaxonClass, ind: str) -> None:\n # starttag, endtag = rank_tags(tax.taxon_rank)\n # outfile.write(ind + \"<li><a href=\\\"#{}\\\">{} {}{}{}</a>\".format(taxon_link(tax), tax.taxon_rank.capitalize(),\n # starttag, tax.name, endtag))\n outfile.write(ind + \"<li>\" + create_taxon_link(tax.taxon_rank, tax.name, do_print, same_page=True) + \"\\n\")\n outfile.write(ind + \" <ul>\\n\")\n if tax.n_children() > 0:\n for cc in sorted(tax.children):\n write_taxon_item(cc, ind + 4 * \" \")\n else:\n ssplist = []\n for ss in specieslist:\n if tax.taxon_rank == \"genus\":\n if ss.genus == tax.name:\n ssplist.append(create_species_link(ss.genus, ss.species, do_print, status=ss.status))\n elif tax.taxon_rank == \"subgenus\":\n if ss.subgenus == tax.name:\n ssplist.append(create_species_link(ss.genus, ss.species, do_print, status=ss.status))\n outfile.write(ind + \" <li>\" + \", \".join(ssplist) + \"</li>\\n\")\n outfile.write(ind + \" </ul>\\n\")\n outfile.write(ind + \"</li>\\n\")", "def display_all(self):\n print(\"Price: \" + str(self.price))\n print(\"Speed: \" + str(self.speed) + \"mph\")\n print(\"Fuel: \" + self.fuel)\n print(\"Mileage: \" + str(self.mileage) + \"mpg\")\n print(\"Tax: \" + str(self.tax))\n return self", "def get_all_teas(self):\n self.tView.all_teas_display(self.manyTea)\n self.tView.prompt_display(0)", "def _compute_tax(self):\n for line in self:\n line.tax = (line.amount_untaxed * 14) / 100", "def _compute_tax_id(self):\n for order in self:\n order.order_line._compute_tax_id()", "def printEntry(self,i):\n for name in self.namelist:\n value = self.getValue(name,i)\n if value is None:\n return\n print(name,self.getValue(name,i))", "def display_cure(self):\n for medicine in self.cure:\n medicine.display()", "def consultI(listaI): # Esta sección fue hecha por Ángel\n for fila in listaI:\n print(\"\\n\")\n for elemento in fila:\n print(elemento + \"\\t\",end = \"\")", "def print_food(self):\n for dish in self.food:\n print(dish.get_name())", "def list_viewer(listt):\n\tif len(listt) == 0:\n\t\tprint(\"There are no elements\")\n\t\tprint()\n\telse:\n\t\ti = 0\n\t\tfor dictionary in listt:\n\t\t\ti += 1\n\t\t\tprint(f\"Account #{i} »»\")\n\t\t\tprint(\n\t\t\t\t\"\\tService Name: \", dictionary[\"service\"], \"\\n\",\n\t\t\t\t\"\\tUser Name: \", dictionary[\"user\"], \"\\n\",\n\t\t\t\t\"\\tPassword: \", dictionary[\"password\"], \"\\n\",\n\t\t\t\t)", "def display(self):\r\n os.system('cls')\r\n index = 0\r\n for i in self.list:\r\n print(str(index) + \" \" + i.showRule())\r\n index += 1", "def print_occupants(self):\n for num, member in enumerate(self.occupants, start=1):\n print(num, member.name)", "def tax_lines(self):\n raise NotImplemented", "def display_tournament_list():\r\n for tournament in tournaments_table:\r\n print(tournament['Nom'])", "def showtrafficitemnames():\n trafficItems = middleware.trafficObj.getAllTrafficItemNames()\n print('\\nAll Traffic Items:\\n')\n for index, eachTrafficItem in enumerate(trafficItems):\n print('\\t{0}: {1}'.format(int(index)+1, eachTrafficItem))\n print()", "def test_client_tax_information_list(self):\n pass", "def get_taxa(self, **kwargs):\n if \"oids\" not in kwargs and \"labels\" not in kwargs:\n raise TypeError(\"Need to specify taxa oid's or labels\")\n oids = kwargs.get(\"oids\", [])\n labels = kwargs.get(\"labels\", [])\n taxa = []\n for oid in oids:\n t = self.get_taxon(oid=oid)\n if t:\n taxa.append(t)\n for label in labels:\n t = self.get_taxon(label=label)\n if t:\n taxa.append(t)\n return taxa", "def displayTriples(triples, qname=qname):\n [print(*(e[:5]\n if isinstance(e, rdflib.BNode) else\n qname(e)\n for e in t), '.')\n for t in sorted(triples)]", "def show(self):\n for x in range(0,3):\n for y in range(0,3):\n item = self[x,y]\n print(f\"({x},{y}): {item.id}, {item.cw}\")", "def display_list(the_list):\n print(\"\\n===================================\")\n for person in the_list:\n print(\"{name:12s}\\t\\t{phone}\".format(name=person.name, phone=person.phone))\n if the_list == []:\n print(\"\\nNo entries found!\\n\")\n print(\"===================================\\n\")", "def print_kraken_otu_table(linked_dict, tax_ids, out=\"contig_taxonomy_table.txt\"):\n unique_tax = get_unique_dict(tax_ids)\n\n\n sorted_tax = sorted(unique_tax.keys())\n with open(out, 'w') as OUT:\n OUT.write(\"\\t\".join([\"contig\"] + sorted_tax) + \"\\n\")\n\n \n for contig, tid_dict in linked_dict.items():\n to_print = []\n to_print.append(contig)\n\n \n for tax in sorted_tax:\n count = 0\n for tid in unique_tax[tax]:\n count += tid_dict.get(tid, 0)\n\n to_print.append(str(count))\n\n OUT.write(\"\\t\".join(to_print) + \"\\n\")", "def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()", "def print_users(self):\n for i, item in enumerate(self.users):\n print(\"{}. {}\".format(i, item.name))", "def table_info(self):\n for customer in self.customers:\n print(customer.get_name())", "def display(self):\n for x, p in zip(self.xs, self.ps):\n print(x, p)" ]
[ "0.65396684", "0.6429241", "0.61385715", "0.58961475", "0.57848", "0.57381195", "0.56497097", "0.5648627", "0.55053335", "0.5472496", "0.5460303", "0.53348786", "0.53228974", "0.5319868", "0.52896756", "0.52867186", "0.52786404", "0.5254497", "0.5254258", "0.52528995", "0.52475476", "0.5221158", "0.5215936", "0.5211438", "0.5202912", "0.5201453", "0.5193274", "0.5178052", "0.51438385", "0.5092915" ]
0.66690373
0
Returns the sorted topN docs for a single queryID.
def get_topN_docs(click_model, queryID): # for every queryID, find the first top 10 relevant docs unordered_docs = [] # list of [ind, rank] for all relevant docs for ind in click_model[queryID]: document = click_model[queryID][ind] rank = document['rank'] # moving the docID into the doc-dictionary, so I can shuffle documents and easily keep this info # a little hackey, but can be changed if needed later down the pipeline document['docID'] = ind if rank is not None: unordered_docs.append([ind, rank]) # sorts unordered_docs by doc rankings ordered_docs = (sorted(unordered_docs, key=lambda docs: docs[1])) ordered_docIDs = list(zip(*ordered_docs))[0] ranked_docs = [] for docID in ordered_docIDs: ranked_docs.append(click_model[queryID][docID]) return ranked_docs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_top_docs(self, query_vectors, n_docs):\n raise NotImplementedError", "def top_files(query, files, idfs, n):\n tf_idfs = []\n for filename, filewords in files.items():\n tf_idf = 0\n\n for word in query:\n if word not in idfs:\n continue\n idf = idfs[word]\n tf = filewords.count(word)\n tf_idf += idf * tf\n t = (filename, tf_idf)\n tf_idfs.append(t)\n\n sorted_list = sorted(tf_idfs, key=lambda k: k[1])\n sorted_list.reverse()\n file_list = [item[0] for item in sorted_list]\n\n return file_list[:n]", "def top_files(query, files, idfs, n):\n file_scores = dict()\n\n for file, words in files.items():\n total_tf_idf = 0\n for word in query:\n total_tf_idf += words.count(word) * idfs[word]\n file_scores[file] = total_tf_idf\n\n ranked_files = sorted(file_scores.items(), key=lambda x: x[1], reverse=True)\n ranked_files = [x[0] for x in ranked_files]\n\n return ranked_files[:n]", "def top_files(query, files, idfs, n):\n # calculate term-frequency of each words in query\n tf = dict()\n for query_word in query:\n tf[query_word] = dict()\n for file_name in files:\n tf[query_word][file_name] = files[file_name].count(query_word)\n\n # claculate tf-idfs of each document\n tf_idfs = dict()\n for file_name in files:\n tf_idfs[file_name] = 0\n for query_word in query:\n tf_idfs[file_name] += tf[query_word][file_name] * idfs[query_word]\n \n # create sorted list by tf_idfs\n sorted_tf_idfs = sorted(tf_idfs, key= lambda item: tf_idfs[item], reverse= True)\n\n # return list contains top n file names\n top_files_names = []\n for index in range(n):\n top_files_names.append(sorted_tf_idfs[index]) \n\n return top_files_names", "def top_files(query, files, idfs, n):\n tfidfs = dict()\n for filename in files:\n tfidfs[filename] = 0\n for word in query:\n tfidfs[filename] += files[filename].count(word) * idfs[word]\n\n files_idfs = sorted(tfidfs.items(), key=lambda item: item[1], reverse=True)[:n]\n\n return [key for key, value in files_idfs]", "def top_files(query, files, idfs, n):\n tf_idfs = dict()\n for key, value in files.items():\n tf_idfs[key] = 0\n for word in query:\n if word in list(idfs):\n # tf-idf for a term is computed by multiplying the number of times the term appears in the document by the IDF value for that term\n # len vs .count\n tf_idfs[key] += value.count(word) * idfs[word]\n else:\n tf_idfs[key] += 0\n # .items() -- Returns: A view object that displays a list of a given dictionary's (key, value) tuple pair\n # use sorted(iterable, *, key, reverse) -- key=lambda: numerical order, x[1]: second column i.e. sum of values from .count, reverse=TRUE: descending order\n # returned list of filenames should be of length n -- [:n] (begining of list to length n)\n return [key for key, value in sorted(tf_idfs.items(), key=lambda x: x[1], reverse=True)][:n]", "def top_files(query, files, idfs, n):\n ranking = {}\n for f in files:\n currentSum = 0\n for word in query:\n currentSum += files[f].count(word) * idfs[word]\n ranking[f] = currentSum\n sortedRank = sorted(ranking.keys(), key=lambda x: ranking[x], reverse=True)\n return sortedRank[:n]", "def top_sentences(query, sentences, idfs, n):\n tf_idfs = []\n for sentence, words in sentences.items():\n tf_idf = 0\n\n for word in query:\n if word not in idfs:\n continue\n idf = idfs[word]\n tf = (1 if word in words else 0)\n tf_idf += idf * tf\n t = (sentence, tf_idf)\n tf_idfs.append(t)\n\n sorted_list = sorted(tf_idfs, key=sorter)\n sorted_list.reverse()\n file_list = [item[0] for item in sorted_list]\n\n return file_list[:n]", "def top_files(query, files, idf, n):\n # file name vs total idf value\n tfidf={}\n for file in files:\n total_idf=0\n for word in query:\n if word in files[file]:\n total_idf+=idf[word]\n tfidf[file]=total_idf\n ll=[(tfidf[i],i) for i in tfidf]\n ll.sort(reverse=True)\n ans=[]\n for i in range(n):\n ans.append(ll[i][1])\n return ans", "def top_files(query, files, idfs, n):\n\n _dict = {}\n _idfs = idfs.copy()\n \n # Restructure our data to make it easier to work with\n for word in query:\n for f in files:\n try:\n check = _dict[f]\n except KeyError:\n _dict[f] = {}\n\n if word in files[f]:\n # +1 for laplace smoothing\n _dict[f][word] = (_idfs[word] * ( files[f].count(word) + 1 ) )\n\n # Sum of all file's tf-idf's\n sum_files = {}\n for f in _dict:\n\n try:\n check = sum_files[f]\n except KeyError:\n sum_files[f] = 0\n\n sum_files[f] = sum(_dict[f].values())\n \n top_files = []\n\n stop = 0\n\n while len(top_files) < n:\n\n top = \"\"\n g = 0.0\n\n # If all of theh file idfs are 0 - the selection is arbitrary\n if sum(sum_files.values()) == 0:\n _next = list(sum_files.keys())[0]\n top_files.append(_next)\n del sum_files[_next]\n continue\n \n for k, v in sum_files.items():\n \n if float(v) > float(g):\n g = v\n top = k\n \n top_files.append(top)\n del sum_files[top]\n\n # print(f\"Top Files:\\n\\t{top_files}\")\n return top_files", "def get_ranked_docs(self, query, k=-1):\n sim_vector, sim_id2doc_id = self.get_sim_vector(query, k)\n return self.get_sorted_docs(sim_vector, sim_id2doc_id, k)", "def top_n(userid):\n agg = [s[\"search_id\"] for s in db_searches.find()]\n table = pd.DataFrame()\n table[\"searches\"] = Counter(agg).keys()\n table[\"count\"] = Counter(agg).values()\n table = table.sort_values(\"count\", ascending=False)\n table = table[:10]\n search_ids = table[\"searches\"].values\n counts = table[\"count\"].values\n n = 0\n top_n = []\n while n < len(search_ids):\n top_n.append([str(db_search_terms.find_one({\"_id\": search_ids[n]}).get(\"value\")), str(counts[n])])\n n += 1\n jsonob = jsonify(top_n=top_n)\n return jsonob", "def top_sentences(query, sentences, idfs, n):\n\n # claculate idfs of each sentence\n sent_score = dict()\n for sentence in sentences:\n sent_score[sentence] = 0\n for query_word in query:\n if query_word in sentences[sentence]:\n sent_score[sentence] += idfs[query_word]\n\n # create sorted list of sentences\n sorted_sentences = sorted(sent_score, key= lambda item: sent_score[item], reverse= True)\n\n # re-order sentences with the same rank of idfs according to query term density\n loop_sentences = sorted_sentences.copy()\n for sentence1 in loop_sentences:\n for sentence2 in loop_sentences:\n if sentence1 != sentence2:\n if sent_score[sentence1] == sent_score[sentence2]:\n qtd1 = query_term_density(sentence1, query, sentences)\n qtd2 = query_term_density(sentence2, query, sentences)\n index1 = sorted_sentences.index(sentence1)\n index2 = sorted_sentences.index(sentence2)\n if qtd1 > qtd2:\n if index1 > index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n elif qtd1 < qtd2:\n if index1 < index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n\n # get list contains top n sentences\n top_sentences = []\n for index in range(n):\n top_sentences.append(sorted_sentences[index]) \n\n return top_sentences", "def topTags(db, topN=1000):\n c=db.cursor()\n c.execute(\"\"\"\n SELECT\n tag\n FROM tags\n GROUP BY tag\n ORDER BY COUNT(*) DESC\n LIMIT %d\n \"\"\" % topN)\n tops = [tag0[0] for tag0 in c.fetchall()]\n c.close()\n return tops", "def topArticles():\n c = db.cursor()\n c.execute(\"select titles.title, tophits.hits\\\n from tophits, titles\\\n where tophits.path = titles.slug\\\n order by hits desc limit 3;\")\n results = c.fetchall()\n c.close()\n return results", "def top_sentences(query, sentences, idfs, n):\n rank = []\n\n for sentence in sentences:\n sentence_values = [sentence, 0, 0]\n\n for word in query:\n if word in sentences[sentence]:\n # Compute matching word measure. Sum of IDF values.\n sentence_values[1] += idfs[word]\n # Compute query term density. Proportion of words in a sentence that are in the query.\n sentence_values[2] += sentences[sentence].count(\n word) / len(sentences[sentence])\n\n rank.append(sentence_values)\n\n rank = sorted(rank, key=lambda x: (x[1], x[2]), reverse=True)[:n]\n \n return [sentence for sentence, mwm, qtd in rank]", "def getDocs(self, qryid, counter = None):\n if (counter is None) or (counter>=len(self.qryDocs[qryid])):\n return self.qryDocs[qryid]\n else:\n return self.qryDocs[qryid][:counter]", "def search(self, query: str, n=10):\n query_terms = self.tokenize(query)\n scores = self.calc_cosine_scores(query_terms)\n\n # return the top N document\n results = nlargest(n, scores.items(), key=lambda x: x[1])\n for i, (doc_id, score) in enumerate(results):\n print(i, self.documents[doc_id], score)", "def print_top_docs(self, n=10):\n ###TODO\n for c_id, clust in self.fin_clust.items():\n dict_1 = defaultdict(float)\n m = self.means[c_id]\n nor = self.sqnorm(m)\n for dc in clust:\n if len(set(self.docs[dc].keys())) >= 4:\n dict_1[dc] = self.distance(self.docs[dc],m,nor)\n sorted_items = [x[0] for x in sorted(dict_1.items(), key=lambda x:x[1])]\n sorted_items = sorted_items[0:n]\n print ('CLUSTER ', c_id)\n for d_id in sorted_items: \n string = ''\n for word in self.docs[d_id].keys():\n string += word + ' '\n print (string)", "def top_sentences(query, sentences, idfs, n):\n sentence_scores = dict()\n\n for sentence, words in sentences.items():\n words_in_query = query.intersection(words)\n \n # idf value of sentence\n idf = 0\n for word in words_in_query:\n idf += idfs[word]\n \n # query term density of sentence\n num_words_in_query = sum(map(lambda x: x in words_in_query, words))\n query_term_density = num_words_in_query / len(words)\n\n # update sentence scores with idf and query term density values\n sentence_scores[sentence] = {'idf': idf, 'qtd': query_term_density}\n \n # rank sentences by idf then query term density\n ranked_sentences = sorted(sentence_scores.items(), key=lambda x: (x[1]['idf'], x[1]['qtd']), reverse=True)\n ranked_sentences = [x[0] for x in ranked_sentences]\n\n return ranked_sentences[:n]", "def get_top_featured_entries(number=5):\n return list(Entry.published.filter(featured=True)[:number])", "def rank_and_store_documents(self, query,querynumber):\n return", "def retrieve(self, query: str, filters: dict = None, top_k: Optional[int] = None, index: str = None) -> List[Document]:\n if top_k is None:\n top_k = self.top_k\n if index is None:\n index = self.document_store.index\n documents = self.document_store.query(query=None, filters=filters, top_k=top_k,\n custom_query=self.custom_query, index=index)\n return documents", "def get_similarities(self, query, n=5):\n scores = self.get_scores(query)\n rank_n = np.argsort(scores)[::-1]\n if n > 0:\n rank_n = rank_n[:n]\n return [self.corpus[i] for i in rank_n]", "def top_sentences(query, sentences, idf, n):\n ll=[]\n for s in sentences:\n st=sentences[s]\n st=[word.lower() for word in st]\n found_word=0\n total_idf=0\n\n for word in query:\n if word in st:\n total_idf+=idf[word]\n found_word+=1 \n ll.append((total_idf,found_word/len(st),s))\n ll.sort(reverse=True)\n #print(ll)\n ans=[]\n for i in range(n):\n ans.append(ll[i][2])\n #print(\"answer is : \",*ans)\n return ans", "def retrieve(self, query: str, filters: dict = None, top_k: Optional[int] = None, index: str = None) -> List[Document]:\n if top_k is None:\n top_k = self.top_k\n if index is None:\n index = self.document_store.index\n\n documents = self.document_store.query(query, filters, top_k, self.custom_query, index)\n return documents", "def get_relevant_documents(self, n_top_hits: Optional[int] = 10) -> List[str]:\n search_string_clean = [self.clean_document(self.search_string)]\n q_vec = self.vectorizer.transform(search_string_clean).toarray().reshape(self.df_tdm.shape[0], )\n sim = {} # Calculate the similarity\n for i in range(n_top_hits):\n print(i)\n sim[i] = np.dot(self.df_tdm.loc[:, i].values, q_vec) / np.linalg.norm(\n self.df_tdm.loc[:, i]) * np.linalg.norm(q_vec)\n # Sort the values\n sim_sorted = sorted(sim.items(), key=lambda item: item[1],\n reverse=True) # Print the articles and their similarity values\n for k, v in sim_sorted:\n if v != 0.0:\n self.search_results.append(self.documents_names[k])\n # print(docs[k])\n return self.search_results", "def get_n_most_read_books(self, n):\n if type(n) != int:\n print(\"The argument n = {n} is not an integer. Try again with an integer\".format(n=n))\n else:\n sorted_books = [ book for book in sorted(self.books, key=self.books.get, reverse=True)]\n return sorted_books", "def get_top_N(root,n):\n\n\tglobal files\n\tfind_files(root)\n\theapq.heapify(files)\n\tret = heapq.nlargest(n,files)\n\tfiles = []\n\treturn ret", "def getQueries(self):\n return sorted( self.qryDocs.keys() )" ]
[ "0.7397129", "0.7190849", "0.7050551", "0.7031025", "0.6947377", "0.6884981", "0.68634695", "0.67815554", "0.6779356", "0.66407865", "0.6520463", "0.63906056", "0.617372", "0.6103085", "0.6072736", "0.60727215", "0.60646886", "0.59424716", "0.58749926", "0.57950824", "0.57085073", "0.56906945", "0.5678638", "0.56538653", "0.5650434", "0.56487787", "0.56476086", "0.5611045", "0.5606462", "0.55881083" ]
0.8405925
0
Method to create the Dataset class gtk.Notebook() page for displaying assessment inputs for the selected data set.
def _create_analyses_input_page(self, notebook): # pylint: disable=R0914 # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # # Build-up the containers for the tab. # # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # _hbox = gtk.HPaned() _fixed = gtk.Fixed() _frame = Widgets.make_frame(label=_(u"Analysis Inputs")) _frame.set_shadow_type(gtk.SHADOW_ETCHED_IN) _frame.add(_fixed) _hbox.pack1(_frame, True, True) # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # # Place the widgets used to display analysis input information. # # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # # Load the gtk.ComboBox() widgets. _results = [[u"MCF"], [u"Kaplan-Meier"], [_(u"NHPP - Power Law")], [u"NHPP - Loglinear"], [_(u"Exponential")], [_(u"Lognormal")], [_(u"Normal")], [u"Weibull"], ["WeiBayes"]] Widgets.load_combo(self.cmbDistribution, _results) _results = [[_(u"Lower One-Sided")], [_(u"Upper One-Sided")], [_(u"Two-Sided")]] Widgets.load_combo(self.cmbConfType, _results) _results = [[_(u"Crow (NHPP Only)")], [_(u"Duane (NHPP Only)")], [_(u"Fisher Matrix")], [_(u"Likelihood")], [_(u"Bootstrap")]] Widgets.load_combo(self.cmbConfMethod, _results) _results = [["MLE"], [_(u"Regression")]] Widgets.load_combo(self.cmbFitMethod, _results) # Create the labels for the left half of the right side. _labels = [_(u"Assembly:"), _(u"Description:"), _(u"Distribution:"), _("Fit Method:"), _(u"Confidence:"), _(u"Confidence Type:"), _("Confidence Method:")] (_x_pos1, _y_pos1) = Widgets.make_labels(_labels, _fixed, 5, 5) _x_pos1 += 55 # Create the labels for the right half of the right side. _labels = [_(u"Start Time:"), _(u"End Time:"), _(u"Step Interval:"), _(u"Start Date:"), _(u"End Date:")] (_x_pos2, _y_pos2) = Widgets.make_labels(_labels, _fixed, _x_pos1 + 215, 5) _x_pos2 += _x_pos1 _x_pos2 += 275 # Place widgets on the left side. _fixed.put(self.cmbAssembly, _x_pos1, _y_pos1[0]) _fixed.put(self.txtDescription, _x_pos1, _y_pos1[1]) _fixed.put(self.cmbDistribution, _x_pos1, _y_pos1[2]) _fixed.put(self.cmbFitMethod, _x_pos1, _y_pos1[3]) _fixed.put(self.txtConfidence, _x_pos1, _y_pos1[4]) _fixed.put(self.cmbConfType, _x_pos1, _y_pos1[5]) _fixed.put(self.cmbConfMethod, _x_pos1, _y_pos1[6]) # Place widgets on the right side. _fixed.put(self.txtStartTime, _x_pos2, _y_pos2[0]) _fixed.put(self.txtEndTime, _x_pos2, _y_pos2[1]) _fixed.put(self.txtRelPoints, _x_pos2, _y_pos2[2]) _fixed.put(self.txtStartDate, _x_pos2, _y_pos2[3]) _fixed.put(self.btnStartDate, _x_pos2 + 105, _y_pos2[3]) _fixed.put(self.txtEndDate, _x_pos2, _y_pos2[4]) _fixed.put(self.btnEndDate, _x_pos2 + 105, _y_pos2[4]) _fixed.put(self.chkGroup, _x_pos2, _y_pos2[4] + 30) _fixed.put(self.chkParts, _x_pos2, _y_pos2[4] + 60) _fixed.show_all() # Insert the tab. _label = gtk.Label() _label.set_markup("<span weight='bold'>" + _(u"Analysis\nInputs") + "</span>") _label.set_alignment(xalign=0.5, yalign=0.5) _label.set_justify(gtk.JUSTIFY_CENTER) _label.show_all() _label.set_tooltip_text(_(u"Displays analysis inputs for the selected " u"dataset.")) notebook.insert_page(_hbox, tab_label=_label, position=-1) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_notebook(self):\r\n\r\n _notebook = gtk.Notebook()\r\n\r\n # Set the user's preferred gtk.Notebook tab position.\r\n if Configuration.TABPOS[2] == 'left':\r\n _notebook.set_tab_pos(gtk.POS_LEFT)\r\n elif Configuration.TABPOS[2] == 'right':\r\n _notebook.set_tab_pos(gtk.POS_RIGHT)\r\n elif Configuration.TABPOS[2] == 'top':\r\n _notebook.set_tab_pos(gtk.POS_TOP)\r\n else:\r\n _notebook.set_tab_pos(gtk.POS_BOTTOM)\r\n\r\n self._create_analyses_input_page(_notebook)\r\n\r\n for __, _dist in enumerate(self._lst_results):\r\n _dist.create_results_page()\r\n for __, _dist in enumerate(self._lst_plots):\r\n _dist.create_plot_page()\r\n\r\n return _notebook", "def _create_general_data_page(self, notebook):\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC,\r\n gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"General Information\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Place the widgets used to display general information about #\r\n # the function. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _labels = [_(u\"Function Code:\"), _(u\"Function Name:\")]\r\n (_max1, _y_pos1) = Widgets.make_labels(_labels, _fixed, 5, 5)\r\n\r\n _labels = [_(u\"Total Cost:\"), _(u\"Total Mode Count:\"),\r\n _(u\"Total Part Count:\"), _(u\"Remarks:\")]\r\n _y_start = self.txtName.size_request()[1] + _y_pos1[1] + 5\r\n (_max2, _y_pos2) = Widgets.make_labels(_labels, _fixed, 5, _y_start)\r\n _x_pos = max(_max1, _max2) + 50\r\n\r\n # Set the tooltips.\r\n self.txtCode.set_tooltip_text(_(u\"Enter a unique code for the \"\r\n u\"selected function.\"))\r\n self.txtName.set_tooltip_text(_(u\"Enter the name of the selected \"\r\n u\"function.\"))\r\n self.txtTotalCost.set_tooltip_text(_(u\"Displays the total cost of \"\r\n u\"the selected function.\"))\r\n self.txtModeCount.set_tooltip_text(_(u\"Displays the total number \"\r\n u\"of failure modes \"\r\n u\"associated with the \"\r\n u\"selected function.\"))\r\n self.txtPartCount.set_tooltip_text(_(u\"Displays the total number \"\r\n u\"of components associated \"\r\n u\"with the selected \"\r\n u\"function.\"))\r\n self.txtRemarks.set_tooltip_text(_(u\"Enter any remarks related to \"\r\n u\"the selected function.\"))\r\n self.chkSafetyCritical.set_tooltip_text(_(u\"Indicates whether or \"\r\n u\"not the selected \"\r\n u\"function is safety \"\r\n u\"critical.\"))\r\n\r\n # Place the widgets.\r\n _fixed.put(self.txtCode, _x_pos, _y_pos1[0])\r\n _fixed.put(self.txtName, _x_pos, _y_pos1[1])\r\n _fixed.put(self.txtTotalCost, _x_pos, _y_pos2[0])\r\n _fixed.put(self.txtModeCount, _x_pos, _y_pos2[1])\r\n _fixed.put(self.txtPartCount, _x_pos, _y_pos2[2])\r\n _fixed.put(self.txtRemarks, _x_pos, _y_pos2[3])\r\n _fixed.put(self.chkSafetyCritical, 5, _y_pos2[3] + 110)\r\n\r\n # Connect to callback functions for editable gtk.Widgets().\r\n self._lst_handler_id.append(\r\n self.txtCode.connect('focus-out-event', self._on_focus_out, 4))\r\n _textview = self.txtName.get_child().get_child()\r\n self._lst_handler_id.append(\r\n _textview.connect('focus-out-event', self._on_focus_out, 14))\r\n _textview = self.txtRemarks.get_child().get_child()\r\n self._lst_handler_id.append(\r\n _textview.connect('focus-out-event', self._on_focus_out, 15))\r\n\r\n # Connect to callback functions for uneditable gtk.Widgets().\r\n self.txtTotalCost.connect('changed', self._on_changed, 5)\r\n self.txtModeCount.connect('changed', self._on_changed, 16)\r\n self.txtPartCount.connect('changed', self._on_changed, 17)\r\n\r\n _fixed.show_all()\r\n\r\n # Insert the tab.\r\n _label = gtk.Label()\r\n _label.set_markup(\"<span weight='bold'>\" + _(u\"General\\nData\") +\r\n \"</span>\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.set_tooltip_text(_(u\"Displays general information for the \"\r\n u\"selected function.\"))\r\n _label.show_all()\r\n notebook.insert_page(_frame, tab_label=_label, position=-1)\r\n\r\n return False", "def _create_notebook(self):\r\n\r\n _notebook = gtk.Notebook()\r\n\r\n # Set the user's preferred gtk.Notebook tab position.\r\n if Configuration.TABPOS[2] == 'left':\r\n _notebook.set_tab_pos(gtk.POS_LEFT)\r\n elif Configuration.TABPOS[2] == 'right':\r\n _notebook.set_tab_pos(gtk.POS_RIGHT)\r\n elif Configuration.TABPOS[2] == 'top':\r\n _notebook.set_tab_pos(gtk.POS_TOP)\r\n else:\r\n _notebook.set_tab_pos(gtk.POS_BOTTOM)\r\n\r\n self._create_general_data_page(_notebook)\r\n self._create_fmea_page(_notebook)\r\n self._create_assessment_results_page(_notebook)\r\n\r\n return _notebook", "def create_risk_analysis_page(self, notebook):\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _hpaned = gtk.HPaned()\r\n self.pack1(_hpaned, resize=True, shrink=True)\r\n\r\n # Create the organizational risk pane.\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Organization\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack1(_frame, True, True)\r\n\r\n _labels = [_(u\"1. There are separate design and coding \"\r\n u\"organizations.\"),\r\n _(u\"2. There is an independent software test \"\r\n u\"organization.\"),\r\n _(u\"3. There is an independent software quality \"\r\n u\"assurance organization.\"),\r\n _(u\"4. There is an independent software configuration \"\r\n u\"management organization.\"),\r\n _(u\"5. There is an independent software verification \"\r\n u\"and validation organization.\"),\r\n _(u\"6. A structured programming team will develop the \"\r\n u\"software.\"),\r\n _(u\"7. The educational level of the software team members \"\r\n u\"is above average.\"),\r\n _(u\"8. The experience level of the software team members \"\r\n u\"is above average.\")]\r\n (_x_pos,\r\n _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n _x_pos += 125\r\n\r\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\r\n\r\n # Create the methods risk pane.\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Methods\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack2(_frame, True, True)\r\n\r\n _labels = [_(u\"1. Standards are defined and will be enforced.\"),\r\n _(u\"2. Software will be developed using a higher order \"\r\n u\"language.\"),\r\n _(u\"3. The development process will include formal \"\r\n u\"reviews (PDR, CDR, etc.).\"),\r\n _(u\"4. The development process will include frequent \"\r\n u\"walkthroughs.\"),\r\n _(u\"5. Development will take a top-down and \"\r\n u\"structured approach.\"),\r\n _(u\"6. Unit development folders will be used.\"),\r\n _(u\"7. A software development library will be used.\"),\r\n _(u\"8. A formal change and error reporting process \"\r\n u\"will be used.\"),\r\n _(u\"9. Progress and status will routinely be \"\r\n u\"reported.\")]\r\n (__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n\r\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\r\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\r\n\r\n # Create the documentation risk pane.\r\n _hpaned = gtk.HPaned()\r\n self.pack2(_hpaned, resize=True, shrink=True)\r\n\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Documentation\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack1(_frame, True, True)\r\n\r\n _labels = [_(u\" 1. System requirements specifications will be \"\r\n u\"documented.\"),\r\n _(u\" 2. Software requirements specifications will be \"\r\n u\"documented.\"),\r\n _(u\" 3. Interface design specifications will be \"\r\n u\"documented.\"),\r\n _(u\" 4. Software design specification will be \"\r\n u\"documented.\"),\r\n _(u\" 5. Test plans, procedures, and reports will be \"\r\n u\"documented.\"),\r\n _(u\" 6. The software development plan will be \"\r\n u\"documented.\"),\r\n _(u\" 7. The software quality assurance plan will be \"\r\n u\"documented.\"),\r\n _(u\" 8. The software configuration management plan will \"\r\n u\"be documented.\"),\r\n _(u\" 9. A requirements traceability matrix will be \"\r\n u\"used.\"),\r\n _(u\"10. The software version description will be \"\r\n u\"documented.\"),\r\n _(u\"11. All software discrepancies will be \"\r\n u\"documented.\")]\r\n (__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n\r\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\r\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\r\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\r\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\r\n\r\n # Create the tools and test techniques risk pane.\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Tools &amp; Test Techniques\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack2(_frame, True, True)\r\n\r\n _labels = [_(u\" 1. The software language requirements will be \"\r\n u\"specified.\"),\r\n _(u\" 2. Formal program design language will be used.\"),\r\n _(u\" 3. Program design graphical techniques \"\r\n u\"(flowcharts, HIPO, etc.) will be used.\"),\r\n _(u\" 4. Simulation/emulation tools will be used.\"),\r\n _(u\" 5. Configuration management tools will be used.\"),\r\n _(u\" 6. A code auditing tool will be used.\"),\r\n _(u\" 7. A data flow analyzer will be used.\"),\r\n _(u\" 8. A programmer's workbench will be used.\"),\r\n _(u\" 9. Measurement tools will be used.\"),\r\n _(u\"10. Software code reviews will be used.\"),\r\n _(u\"11. Software branch testing will be used.\"),\r\n _(u\"12. Random testing will be used.\"),\r\n _(u\"13. Functional testing will be used.\"),\r\n _(u\"14. Error and anomaly detection testing will be \"\r\n u\"used.\"),\r\n _(u\"15. Structure analysis will be used.\")]\r\n (__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n\r\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\r\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\r\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\r\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\r\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\r\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\r\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\r\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\r\n\r\n _label = gtk.Label()\r\n _label.set_markup(\"<span weight='bold'>\" +\r\n _(u\"Development\\nEnvironment\") +\r\n \"</span>\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.set_angle(0)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Assesses risk due to the development \"\r\n u\"environment.\"))\r\n notebook.insert_page(self, tab_label=_label, position=-1)\r\n\r\n return False", "def _create_assessment_results_page(self, notebook):\r\n\r\n _hbox = gtk.HBox()\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build the left half of the page. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Reliability Results\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hbox.pack_start(_frame)\r\n\r\n _labels = [_(u\"Predicted h(t):\"), _(u\"Mission h(t):\"), _(u\"MTBF:\"),\r\n _(u\"Mission MTBF:\")]\r\n (_x_pos, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5)\r\n _x_pos += 50\r\n\r\n self.txtPredictedHt.set_tooltip_text(_(u\"Displays the predicted \"\r\n u\"failure intensity for \"\r\n u\"the selected function.\"))\r\n self.txtMissionHt.set_tooltip_text(_(u\"Displays the mission \"\r\n u\"failure intensity for the \"\r\n u\"selected function.\"))\r\n self.txtMTBF.set_tooltip_text(_(u\"Displays the limiting mean time \"\r\n u\"between failure (MTBF) for the \"\r\n u\"selected function.\"))\r\n self.txtMissionMTBF.set_tooltip_text(_(u\"Displays the mission \"\r\n u\"mean time between \"\r\n u\"failure (MTBF) for the \"\r\n u\"selected function.\"))\r\n\r\n _fixed.put(self.txtPredictedHt, _x_pos, _y_pos[0])\r\n _fixed.put(self.txtMissionHt, _x_pos, _y_pos[1])\r\n _fixed.put(self.txtMTBF, _x_pos, _y_pos[2])\r\n _fixed.put(self.txtMissionMTBF, _x_pos, _y_pos[3])\r\n\r\n # Connect to callback functions for uneditable gtk.Widgets().\r\n self.txtMissionHt.connect('changed', self._on_changed, 6)\r\n self.txtPredictedHt.connect('changed', self._on_changed, 7)\r\n self.txtMissionMTBF.connect('changed', self._on_changed, 11)\r\n self.txtMTBF.connect('changed', self._on_changed, 12)\r\n\r\n _fixed.show_all()\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build the right half of the page. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Maintainability Results\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hbox.pack_end(_frame)\r\n\r\n _labels = [_(u\"MPMT:\"), _(u\"MCMT:\"), _(u\"MTTR:\"), _(u\"MMT:\"),\r\n _(u\"Availability:\"), _(u\"Mission Availability:\")]\r\n\r\n (_x_pos, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5)\r\n _x_pos += 50\r\n\r\n self.txtMPMT.set_tooltip_text(_(u\"Displays the mean preventive \"\r\n u\"maintenance time (MPMT) for the \"\r\n u\"selected function.\"))\r\n self.txtMCMT.set_tooltip_text(_(u\"Displays the mean corrective \"\r\n u\"maintenance time (MCMT) for the \"\r\n u\"selected function.\"))\r\n self.txtMTTR.set_tooltip_text(_(u\"Displays the mean time to \"\r\n u\"repair (MTTR) for the selected \"\r\n u\"function.\"))\r\n self.txtMMT.set_tooltip_text(_(u\"Displays the mean maintenance \"\r\n u\"time (MMT) for the selected \"\r\n u\"function.\"))\r\n self.txtAvailability.set_tooltip_text(_(u\"Displays the limiting \"\r\n u\"availability for the \"\r\n u\"selected function.\"))\r\n self.txtMissionAt.set_tooltip_text(_(u\"Displays the mission \"\r\n u\"availability for the \"\r\n u\"selected function.\"))\r\n\r\n _fixed.put(self.txtMPMT, _x_pos, _y_pos[0])\r\n _fixed.put(self.txtMCMT, _x_pos, _y_pos[1])\r\n _fixed.put(self.txtMTTR, _x_pos, _y_pos[2])\r\n _fixed.put(self.txtMMT, _x_pos, _y_pos[3])\r\n _fixed.put(self.txtAvailability, _x_pos, _y_pos[4])\r\n _fixed.put(self.txtMissionAt, _x_pos, _y_pos[5])\r\n\r\n # Connect to callback functions for uneditable gtk.Widgets().\r\n self.txtAvailability.connect('changed', self._on_changed, 2)\r\n self.txtMissionAt.connect('changed', self._on_changed, 3)\r\n self.txtMMT.connect('changed', self._on_changed, 8)\r\n self.txtMCMT.connect('changed', self._on_changed, 9)\r\n self.txtMPMT.connect('changed', self._on_changed, 10)\r\n self.txtMTTR.connect('changed', self._on_changed, 13)\r\n\r\n _fixed.show_all()\r\n\r\n # Insert the tab.\r\n _label = gtk.Label()\r\n _label.set_markup(\"<span weight='bold'>\" + _(u\"Assessment\\nResults\") +\r\n \"</span>\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.set_tooltip_text(_(u\"Displays reliability, maintainability, \"\r\n u\"and availability assessment results for \"\r\n u\"the selected function.\"))\r\n _label.show_all()\r\n notebook.insert_page(_hbox, tab_label=_label, position=-1)\r\n\r\n return False", "def _create_notebook(self):\r\n\r\n _notebook = gtk.Notebook()\r\n\r\n # Set the user's preferred gtk.Notebook tab position.\r\n if Configuration.TABPOS[1] == 'left':\r\n _notebook.set_tab_pos(gtk.POS_LEFT)\r\n elif Configuration.TABPOS[1] == 'right':\r\n _notebook.set_tab_pos(gtk.POS_RIGHT)\r\n elif Configuration.TABPOS[1] == 'top':\r\n _notebook.set_tab_pos(gtk.POS_TOP)\r\n else:\r\n _notebook.set_tab_pos(gtk.POS_BOTTOM)\r\n\r\n self._create_risk_matrix_page(_notebook)\r\n self._create_testing_matrix_page(_notebook)\r\n\r\n return _notebook", "def _make_page(self):\n _fixed = gtk.Fixed()\n\n _scrollwindow = ramstk.RAMSTKScrolledWindow(_fixed)\n _frame = ramstk.RAMSTKFrame(label=_(u\"General Information\"))\n _frame.add(_scrollwindow)\n\n _x_pos, _y_pos = ramstk.make_label_group(self._lst_gendata_labels,\n _fixed, 5, 5)\n _x_pos += 50\n\n _fixed.put(self.txtCode, _x_pos, _y_pos[0])\n _fixed.put(self.txtName, _x_pos, _y_pos[1])\n _fixed.put(self.txtRemarks.scrollwindow, _x_pos, _y_pos[2])\n _fixed.put(self.chkSafetyCritical, 5, _y_pos[2] + 110)\n\n _fixed.show_all()\n\n _label = ramstk.RAMSTKLabel(\n _(u\"General\\nData\"),\n height=30,\n width=-1,\n justify=gtk.JUSTIFY_CENTER,\n tooltip=_(u\"Displays general information for the selected \"\n u\"function.\"))\n self.hbx_tab_label.pack_start(_label)\n\n return _frame", "def buildPage(self):\n args = {}\n args['valueCol'] = 'value'\n args['textCol'] = 'size'\n args['y'] = 'index'\n args['x'] = 'number'\n args['orientation'] = 'h'\n args['title'] = ''\n args['x_title'] = ''\n args['y_title'] = ''\n args['height'] = 900\n args['width'] = 900\n\n self.add_basic_layout()\n layout = hpstats.quick_numbers_panel()\n dfs = hpstats.get_db_stats_data()\n plots = []\n plots.append(hpstats.plot_store_size_components(dfs, title='DB Store Size', args=args))\n plots.append(hpstats.plot_node_rel_per_label(dfs, focus='nodes', title='Nodes per Label', args=args))\n plots.append(hpstats.plot_node_rel_per_label(dfs, focus='relationships', title='Relationships per Type', args=args))\n self.extend_layout(layout)\n self.extend_layout(plots)", "def _make_assessment_results_page(self):\n _hbox = gtk.HBox()\n\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n # Build the left half of the page. #\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n _fxd_left = gtk.Fixed()\n\n _scrollwindow = ramstk.RAMSTKScrolledWindow(_fxd_left)\n _frame = ramstk.RAMSTKFrame(label=_(u\"Reliability Results\"))\n _frame.add(_scrollwindow)\n\n _hbox.pack_start(_frame)\n\n _x_pos_l, _y_pos_l = ramstk.make_label_group(\n self._lst_assess_labels[0], _fxd_left, 5, 5)\n _x_pos_l += 50\n\n _fxd_left.put(self.txtActiveHt, _x_pos_l, _y_pos_l[0])\n _fxd_left.put(self.txtDormantHt, _x_pos_l, _y_pos_l[1])\n _fxd_left.put(self.txtSoftwareHt, _x_pos_l, _y_pos_l[2])\n _fxd_left.put(self.txtPredictedHt, _x_pos_l, _y_pos_l[3])\n _fxd_left.put(self.txtMissionHt, _x_pos_l, _y_pos_l[4])\n _fxd_left.put(self.txtMTBF, _x_pos_l, _y_pos_l[5])\n _fxd_left.put(self.txtMissionMTBF, _x_pos_l, _y_pos_l[6])\n _fxd_left.put(self.txtReliability, _x_pos_l, _y_pos_l[7])\n _fxd_left.put(self.txtMissionRt, _x_pos_l, _y_pos_l[8])\n _fxd_left.put(self.txtPartCount, _x_pos_l, _y_pos_l[9])\n\n _fxd_left.show_all()\n\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n # Build the right half of the page. #\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n _fxd_right = gtk.Fixed()\n\n _scrollwindow = ramstk.RAMSTKScrolledWindow(_fxd_right)\n _frame = ramstk.RAMSTKFrame(label=_(u\"Maintainability Results\"))\n _frame.add(_scrollwindow)\n\n _hbox.pack_end(_frame)\n\n _x_pos_r, _y_pos_r = ramstk.make_label_group(\n self._lst_assess_labels[1], _fxd_right, 5, 5)\n _x_pos_r += 55\n\n _fxd_right.put(self.txtMPMT, _x_pos_r, _y_pos_r[0])\n _fxd_right.put(self.txtMCMT, _x_pos_r, _y_pos_r[1])\n _fxd_right.put(self.txtMTTR, _x_pos_r, _y_pos_r[2])\n _fxd_right.put(self.txtMMT, _x_pos_r, _y_pos_r[3])\n _fxd_right.put(self.txtAvailability, _x_pos_r, _y_pos_r[4])\n _fxd_right.put(self.txtMissionAt, _x_pos_r, _y_pos_r[5])\n _fxd_right.put(self.txtTotalCost, _x_pos_r, _y_pos_r[6])\n _fxd_right.put(self.txtCostFailure, _x_pos_r, _y_pos_r[7])\n _fxd_right.put(self.txtCostHour, _x_pos_r, _y_pos_r[8])\n\n _fxd_right.show_all()\n\n _label = ramstk.RAMSTKLabel(\n _(u\"Assessment\\nResults\"),\n height=30,\n width=-1,\n justify=gtk.JUSTIFY_CENTER,\n tooltip=_(u\"Displays reliability, \"\n u\"maintainability, and availability \"\n u\"assessment results for the selected \"\n u\"{0:s}.\").format(self._module))\n self.hbx_tab_label.pack_start(_label)\n\n return (_hbox, _fxd_left, _fxd_right, _x_pos_l, _x_pos_r, _y_pos_l,\n _y_pos_r)", "def app():\n # Add title to the page\n st.title(\"Welcome to the Data Info page\")\n\n # Add subheader for the section\n st.subheader(\"View Data\")\n\n # Load the dataset\n X, y = load_data()\n df = pd.concat([X, y], axis=1)\n\n # Create an expansion option to check the data\n with st.expander(\"View data\"):\n st.dataframe(df)\n\n # Create a section to columns values\n # Give subheader\n st.subheader(\"Columns Summary:\")\n\n # Create a checkbox to get the summary.\n if st.checkbox(\"View Summary\"):\n st.dataframe(df.describe())\n\n # Create multiple check box in row\n col_name, col_dtype, col_data = st.columns(3)\n\n # Show name of all dataframe\n with col_name:\n if st.checkbox(\"Column Names\"):\n st.dataframe(df.columns)\n\n # Show datatype of all columns \n with col_dtype:\n if st.checkbox(\"Columns data types\"):\n dtypes = df.dtypes.apply(lambda x: x.name)\n st.dataframe(dtypes)\n \n # Show data for each columns\n with col_data: \n if st.checkbox(\"Columns Data\"):\n col = st.selectbox(\"Column Name\", list(df.columns))\n st.dataframe(df[col])\n \n # Add image for your data describtion.\n #st.image(\"./images/iris_classification_model.jpg\")\n\n # Add info about your dataset\\\n # st.write(\"Data Info\")\n\n # Add the link to you dataset\n # st.markdown(\"\"\"\n # <p style=\"font-size:24px\">\n # <a \n # href=\"https://github.com/ShishirShekhar/car-price-prediction/blob/main/about.py\"\n # target=_blank\n # style=\"text-decoration:none; color:red\"\n # >Dataset\n # </a> \n # </p>\n # \"\"\", unsafe_allow_html=True\n # )", "def layout_data_list(self):\n # Add splitter\n w, h = self.parent.GetSize()\n splitter = wx.SplitterWindow(self)\n splitter.SetMinimumPaneSize(50)\n splitter.SetSashGravity(1.0)\n\n file_sizer = wx.BoxSizer(wx.VERTICAL)\n file_sizer.SetMinSize(wx.Size(w/13, h*2/5))\n theory_sizer = wx.BoxSizer(wx.VERTICAL)\n theory_sizer.SetMinSize(wx.Size(w/13, h*2/5))\n\n self.tree_ctrl = DataTreeCtrl(parent=splitter,\n style=wx.SUNKEN_BORDER,\n root=\"Available Data\")\n\n self.tree_ctrl.Bind(CT.EVT_TREE_ITEM_CHECKING, self.on_check_item)\n self.tree_ctrl.Bind(CT.EVT_TREE_ITEM_MENU, self.on_right_click_data)\n # Create context menu for page\n self.data_menu = wx.Menu()\n id = wx.NewId()\n name = \"Data Info\"\n msg = \"Show Data Info\"\n self.data_menu.Append(id, name, msg)\n wx.EVT_MENU(self, id, self.on_data_info)\n\n id = wx.NewId()\n name = \"Save As\"\n msg = \"Save Theory/Data as a file\"\n self.data_menu.Append(id, name, msg)\n wx.EVT_MENU(self, id, self.on_save_as)\n\n quickplot_id = wx.NewId()\n name = \"Quick Plot\"\n msg = \"Plot the current Data\"\n self.data_menu.Append(quickplot_id, name, msg)\n wx.EVT_MENU(self, quickplot_id, self.on_quick_plot)\n\n self.plot3d_id = wx.NewId()\n name = \"Quick 3DPlot (Slow)\"\n msg = \"Plot3D the current 2D Data\"\n self.data_menu.Append(self.plot3d_id, name, msg)\n wx.EVT_MENU(self, self.plot3d_id, self.on_plot_3d)\n\n self.editmask_id = wx.NewId()\n name = \"Edit Mask\"\n msg = \"Edit Mask for the current 2D Data\"\n self.data_menu.Append(self.editmask_id, name, msg)\n wx.EVT_MENU(self, self.editmask_id, self.on_edit_data)\n\n self.tree_ctrl_theory = DataTreeCtrl(parent=splitter,\n style=wx.SUNKEN_BORDER,\n root=\"Available Theory\")\n self.tree_ctrl_theory.Bind(CT.EVT_TREE_ITEM_CHECKING,\n self.on_check_item)\n self.tree_ctrl_theory.Bind(CT.EVT_TREE_ITEM_MENU,\n self.on_right_click_theory)\n splitter.SplitHorizontally(self.tree_ctrl, self.tree_ctrl_theory)\n self.sizer1.Add(splitter, 1, wx.EXPAND | wx.ALL, 10)", "def __init__(self, parent):\n wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)\n \n # create the AuiNotebook instance\n self.nb = wx.aui.AuiNotebook(self)\n \n \n #self.param = Data.param\n #self.currentdata = Data.currentdata\n \n \"\"\"\n Each tab on the main gui gets an entry here. Then is added to the \n \"\"\"\n self.datapanel = self.DataPanel.TabPanel(self.nb, wx.ID_ANY)\n self.plotpanel = self.PlotPanel.TabPanel(self.nb, wx.ID_ANY)\n self.parampanel = self.ParamPanel.TabPanel(self.nb, wx.ID_ANY)\n #self.fitcodepanel = wx.py.editor.EditorFrame(self.nb , filename='imports/models/default.py')\n \n \"\"\"\n Set the visable names for the tabs.\n \"\"\" \n self.tabs = [\n (self.datapanel, \"Data\"),\n (self.plotpanel, \"Plot\"),\n (self.parampanel, \"Parameters\")\n #(self.fitcodepanel, \"Fitting Code\")\n ]\n \n \"\"\"\n Add the tabs to the manager and setup the automatic sizer.\n \"\"\" \n for page, label in self.tabs:\n self.nb.AddPage(page, label)\n \n self.sizer = wx.GridSizer()\n self.sizer.Add(self.nb, 1, wx.EXPAND)\n self.SetSizerAndFit(self.sizer)", "def _create_testing_matrix_page(self, notebook):\r\n\r\n # Build up the containers for the Software/Testing matrix page.\r\n _hbox = gtk.HBox()\r\n\r\n _bbox = gtk.VButtonBox()\r\n _bbox.set_layout(gtk.BUTTONBOX_START)\r\n _bbox.pack_start(self.btnSaveTest, False, False)\r\n\r\n _hbox.pack_start(_bbox, False, True)\r\n _hbox.pack_end(self.fraTestSelection, True, True)\r\n\r\n _label = gtk.Label()\r\n _label.set_markup(_(u\"<span weight='bold'>Testing\\nMatrix</span>\"))\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Displays the matrix showing relationships \"\r\n u\"between system software and system \"\r\n u\"tests.\"))\r\n\r\n notebook.insert_page(_hbox, tab_label=_label, position=-1)\r\n\r\n return False", "def getWidget(self):\n \n firstDataset = DashboardDataset.objects.filter(visualisation=self)[0]\n \n widget = {'name': self.name,\n 'id': \"vis\" + str(self.pk),\n 'pk': self.pk,\n 'category': self.category.name,\n 'type': self.type,\n 'dataset': [json.loads(d.dataJSON, cls=util.DateTimeDecoder) for d in DashboardDataset.objects.filter(visualisation=self)],\n 'datasetLabels': [d.name for d in DashboardDataset.objects.filter(visualisation=self)],\n 'sourceName': self.dataSource.name,\n 'sourceLink': self.dataSource.link,\n 'datasetName': firstDataset.name,\n 'datasetLink': firstDataset.link,\n 'description': self.description,\n 'xLabel': self.xLabel,\n 'yLabel': self.yLabel,\n 'sizeX': self.sizeX,\n 'sizeY': self.sizeY}\n return widget", "def start_GUI(self):\n experiment_names = list(GUI.api.get_experiment_names())\n #selected_exp = None #value picked in the list\n\n branch_log_dict = GUI.api.get_branching_indep_to_dep()\n #Separate columns for a new trial and a new experiment\n\n col_new_trial = [[sg.Radio('New Trial', \"RADIO1\", default=True, enable_events = True, key=\"new_trial_radio\", metadata='not_disable')],\n [sg.Text(text = \"Please pick your experiment from the list below:\")], \n [sg.Listbox(values=experiment_names, size=(30, 6), key=\"list\", select_mode = sg.LISTBOX_SELECT_MODE_SINGLE, enable_events= True)]]\n \n\n #metadata ahs true if we need to input filed\n col_new_experiment = [[sg.Radio('New experiment', \"RADIO1\", enable_events=True, key=\"new_exp_radio\", metadata='not_disable')]]\n col_new_experiment.extend(self.make_fields())#add fields to the form\n layout = [[sg.Column(col_new_trial), sg.Column(col_new_experiment)], \n [sg.Button(button_text= \"OK\", enable_events= True, key =\"OK\")]]\n \n window = sg.Window('New Data', layout, keep_on_top=True)#Creation of the window\n while True:\n event, values = window.read()\n # End program if user closes window or\n # presses the OK button\n # you can use switch-case here instead of if statements\n if event == sg.WIN_CLOSED:\n #Indicate abort\n return None, None, None, None\n elif event == \"new_exp_radio\":#if new experiment is picked, then disable the elements for the new trial\n #for evey field on which branching logic depends on, disable everything not selected\n window['list'].update(disabled = True)\n for row in col_new_experiment:\n for elem in row:\n if(elem.metadata != 'not_disable' and not isinstance(elem, sg.Text)):#do not block the radio button):\n window[elem.Key].update(disabled = False)\n \n self.clear_disable_all(window, branch_log_dict, col_new_experiment)#we could just enable a few, instead\n elif event == \"new_trial_radio\":#if new trial is picked, disable the elements for the new experiment, enable for the new trua\n #disable everything in the form\n for row in col_new_experiment:\n for elem in row:\n if(elem.metadata != 'not_disable' and not isinstance(elem, sg.Text)):#do not block the radio button and do not update textboxes\n window[elem.Key].update(disabled = True)\n #enable the listbox\n \n window['list'].update(disabled = False)\n elif event == \"OK\":\n field_missing = False\n #Check if the listbox has a value or the form has a value\n if values['new_exp_radio']:#we are doing new expriment\n # printing_params = {\"paxton\":\"\"}\n printing_params = {}\n #Check the all the stuff in the form of the new experiment\n for row in col_new_experiment:\n if(field_missing):\n break#do not check anymore\n for elem in row:\n if(elem.metadata != 'not_disable' and not isinstance(elem, sg.Text)):#do not check labels and the radio button\n if (elem.metadata and values[elem.Key]== \"\"): #value ahs to be filled and not empty\n field_missing = True\n sg.popup_ok('Required fields are missing!')#if at least one field is empty, throw a popup and stop checking\n break # Shows OK button\n #if at least one field does not have a value, then we generate a popup\n elif(values[elem.Key] != \"\"):#add to the dictonary of params\n printing_params[elem.Key] = values[elem.Key]\n \n if not field_missing:\n #if everything is filled, then validate\n \n #if user closes the popup, then the print is considered bad by default\n is_valid, field_name = self.validate_fields(window, values)\n if(is_valid):\n print_result, folderPath = self.getPicturesPrintEval()\n window.close()\n #now, we also return print_result\n return \"add_record\", printing_params, print_result, folderPath\n else:\n sg.popup_ok(\"The field could not be validated: \" + field_name)\n \n elif values['new_trial_radio']:#could use else\n if values['list'] == []:\n sg.popup_ok('Required fields are missing!')\n continue#go to while loop\n #we got here, so we now know the record_id of the experiment we want to do the new trial for\n record_lst = GUI.api.get_elements(values['list'][0])\n #create a new window with print quality + pictures\n print_result, folderPath = self.getPicturesPrintEval()\n window.close()\n return \"add_trial\", record_lst, print_result, folderPath\n elif event in branch_log_dict:#if branching logic is dependent on this element\n #we could only enable/disable stuff affected by the element\n self.enable_selected(window, copy.deepcopy(values), branch_log_dict, event)\n self.disable_not_selected(window, copy.deepcopy(values), branch_log_dict, event)", "def _set_notebook(self):\n\n self.notebook = ttk.Notebook(self)\n\n self.main_notebook = tk.Frame(self.notebook, bg='white')\n self.extra_notebook = tk.Frame(self.notebook, bg='white')\n self.settings_notebook = tk.Frame(self.notebook, bg='white')\n\n self.notebook.add(self.main_notebook, text='CCL Tools')\n self.notebook.add(self.extra_notebook, text='Extra Tools')\n self.notebook.add(self.settings_notebook, text='Settings')\n self.notebook.pack(expand=True, fill='both')", "def new_dataset(request):\n context = {'dataset_id': 'false', 'newdataset': True, 'is_owner': 'true'}\n return render(request, 'datasets/dataset.html', context)", "def setup_ui(self, datamodels):\n self.datamodels = []#Use self.add_data_display() to append\n self.dataviews = []\n\n self.centralwidget = QtGui.QWidget(self)\n #self.centralwidget.setMinimumSize(600, 400)\n\n #self.scrollarea = QScrollArea(self)\n self.setWidgetResizable(True)\n\n #Create layout\n self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)\n\n #Create couple of elements\n self.addDataDisplays = QtGui.QPushButton(self) #Draw (+) button to add data displays\n self.addDataDisplays.setText(\"+\")\n\n #self.verticalLayout.addWidget(self.addDataDisplays)\n\n for model in datamodels:\n self.add_data_display(model)\n #self.datamodels.append(model)\n\n self.addDataDisplays.clicked.connect(self.add_data_display)\n\n #add elements to layout\n self.setGeometry(self.geometry())\n self.setWidget(self.centralwidget)", "def _create_risk_matrix_page(self, notebook):\r\n\r\n # Build up the containers for the Software Risk matrix page.\r\n _hbox = gtk.HBox()\r\n\r\n _bbox = gtk.VButtonBox()\r\n _bbox.set_layout(gtk.BUTTONBOX_START)\r\n _bbox.pack_start(self.btnCalcRisk, False, False)\r\n\r\n _hbox.pack_start(_bbox, False, False)\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add(self.tvwRiskMap)\r\n\r\n _frame = Widgets.make_frame()\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hbox.pack_end(_frame, True, True)\r\n\r\n # Add the risk map.\r\n _headings = [_(u\"Software\\nModule\"), _(u\"Application\\nRisk\"),\r\n _(u\"Organization\\nRisk\"), _(u\"Anomaly\\nManagement\\nRisk\"),\r\n _(u\"Traceability\\nRisk\"), _(u\"Quality\\nAssurance\\nRisk\"),\r\n _(u\"Language\\nRisk\"), _(u\"Code\\nComplexity\\nRisk\"),\r\n _(u\"Modularity\\nRisk\"), _(u\"Overall\\nRisk\")]\r\n\r\n _model = gtk.TreeStore(gobject.TYPE_INT, gobject.TYPE_STRING,\r\n gtk.gdk.Pixbuf, gtk.gdk.Pixbuf, gtk.gdk.Pixbuf,\r\n gtk.gdk.Pixbuf, gtk.gdk.Pixbuf, gtk.gdk.Pixbuf,\r\n gtk.gdk.Pixbuf, gtk.gdk.Pixbuf, gtk.gdk.Pixbuf)\r\n self.tvwRiskMap.set_model(_model)\r\n self.tvwRiskMap.set_grid_lines(gtk.TREE_VIEW_GRID_LINES_BOTH)\r\n\r\n _cell = gtk.CellRendererText()\r\n _cell.set_property('visible', False)\r\n _column = gtk.TreeViewColumn()\r\n _column.set_visible(False)\r\n _column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)\r\n _column.pack_start(_cell, True)\r\n _column.set_attributes(_cell, text=0)\r\n\r\n self.tvwRiskMap.append_column(_column)\r\n\r\n _label = gtk.Label()\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.set_property('angle', 90)\r\n _label.set_markup(\"<span weight='bold'>\" + _headings[0] + \"</span>\")\r\n _label.set_use_markup(True)\r\n _label.show_all()\r\n _column = gtk.TreeViewColumn()\r\n _column.set_widget(_label)\r\n _column.set_visible(True)\r\n _column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)\r\n _cell = gtk.CellRendererText()\r\n _cell.set_property('visible', True)\r\n _column.pack_start(_cell, True)\r\n _column.set_attributes(_cell, text=1)\r\n\r\n self.tvwRiskMap.append_column(_column)\r\n\r\n for i in range(2, 11):\r\n _label = gtk.Label()\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.set_property('angle', 90)\r\n _label.set_markup(\"<span weight='bold'>\" +\r\n _headings[i - 1] +\r\n \"</span>\")\r\n _label.set_use_markup(True)\r\n _label.show_all()\r\n _column = gtk.TreeViewColumn()\r\n _column.set_widget(_label)\r\n _column.set_visible(True)\r\n _column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)\r\n _cell = gtk.CellRendererPixbuf()\r\n _cell.set_property('xalign', 0.5)\r\n _cell.set_property('yalign', 0.5)\r\n _column.pack_start(_cell, False)\r\n _column.set_attributes(_cell, pixbuf=i)\r\n\r\n self.tvwRiskMap.append_column(_column)\r\n\r\n # Add the Software Risk Matrix page to the gtk.Notebook().\r\n _label = gtk.Label()\r\n _label.set_markup(_(u\"<span weight='bold'>Risk\\nMatrix</span>\"))\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Displays the matrix showing risk \"\r\n u\"between system functions and system \"\r\n u\"software items.\"))\r\n\r\n notebook.insert_page(_hbox, tab_label=_label, position=-1)\r\n\r\n return False", "def createUserInterface(self):\n\n\t\tself.__layout = self.__parent.createUserInterface()\n\n\t\tstep_label = qt.QLabel( 'Choose the volume you would like to threshold. If you are calculating a subtraction map, check the \\\"Calculate Subtraction Map\\\" box and select a post-contrast image.' )\n\t\tstep_label.setWordWrap(True)\n\t\tself.__primaryGroupBox = qt.QGroupBox()\n\t\tself.__primaryGroupBox.setTitle('Information')\n\t\tself.__primaryGroupBoxLayout = qt.QFormLayout(self.__primaryGroupBox)\n\n\t\tself.__subtractionMappingGroupBox = qt.QGroupBox()\n\t\tself.__subtractionMappingGroupBox.setTitle('Volume Selection')\n\t\tself.__subtractionMappingGroupBoxLayout = qt.QFormLayout(self.__subtractionMappingGroupBox)\n\n\t\tbaselineScanLabel = qt.QLabel( 'Primary / Pre-Contrast Image:' )\n\t\tself.__baselineVolumeSelector = slicer.qMRMLNodeComboBox()\n\t\tself.__baselineVolumeSelector.toolTip = \"Select the volume you wish to threshold. If you are calculating a subtraction map, this will be the pre-contrast scan.\"\n\t\tself.__baselineVolumeSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']\n\t\tself.__baselineVolumeSelector.setMRMLScene(slicer.mrmlScene)\n\t\tself.__baselineVolumeSelector.addEnabled = 0\n\n\t\tsubtractionMappingLabel = qt.QLabel( 'Calculate Subtraction Map:' )\n\t\tself.__enableSubtractionMapping = qt.QCheckBox()\n\t\tself.__enableSubtractionMapping.checked = False\n\t\tself.__enableSubtractionMapping.setToolTip(\"Check if you would like to calculate a subtraction map\")\n\t\tself.__enableSubtractionMapping.connect('clicked()', self.setSubtractionMapping)\n\n\t\tfollowupScanLabel = qt.QLabel( 'Post-Contrast Image:' )\n\t\tself.__followupVolumeSelector = slicer.qMRMLNodeComboBox()\n\t\tself.__followupVolumeSelector.toolTip = \"Choose the post-contrast scan\"\n\t\tself.__followupVolumeSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']\n\t\tself.__followupVolumeSelector.setMRMLScene(slicer.mrmlScene)\n\t\tself.__followupVolumeSelector.addEnabled = 0\n\t\tself.__followupVolumeSelector.enabled = 0\n\n\t\tself.__layout.addRow(self.__primaryGroupBox)\n\t\tself.__primaryGroupBoxLayout.addRow( step_label )\n\t\tself.__subtractionMappingGroupBoxLayout.addRow( baselineScanLabel, self.__baselineVolumeSelector )\n\n\t\tself.__layout.addRow(self.__subtractionMappingGroupBox)\n\t\tself.__subtractionMappingGroupBoxLayout.addRow( subtractionMappingLabel, self.__enableSubtractionMapping )\n\t\tself.__subtractionMappingGroupBoxLayout.addRow( followupScanLabel, self.__followupVolumeSelector )\n\n\t\tself.updateWidgetFromParameters(self.parameterNode())\n\n\t\t# This timer is a trick to wait for buttons to load BEFORE deleting them.\n\t\tqt.QTimer.singleShot(0, self.killButton)", "def __init__(self, modulebook):\r\n\r\n gtk.VBox.__init__(self)\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lst_handler_id = []\r\n\r\n # Define private scalar attributes.\r\n self._mdcRTK = modulebook.mdcRTK\r\n self._dtcBoM = modulebook.mdcRTK.dtcSoftwareBoM\r\n self._model = None\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.btnCalcRisk = Widgets.make_button(width=35, image='calculate')\r\n self.btnSaveTest = Widgets.make_button(width=35, image='save')\r\n\r\n self.fraTestSelection = Widgets.make_frame(\r\n label=_(u\"Test Technique Selection\"))\r\n self.fraTestSelection.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n\r\n self.scwCSCITestSelection = TestSelection.CSCITestSelection()\r\n self.scwCSCITestSelection.create_test_planning_matrix()\r\n\r\n self.scwUnitTestSelection = TestSelection.UnitTestSelection()\r\n self.scwUnitTestSelection.create_test_planning_matrix()\r\n\r\n self.tvwRiskMap = gtk.TreeView()\r\n\r\n # Set tooltips for the gtk.Widgets().\r\n self.btnCalcRisk.set_tooltip_text(_(u\"Calculate the reliability \"\r\n u\"risk assessment.\"))\r\n self.tvwRiskMap.set_tooltip_markup(_(u\"Displays the risk associated \"\r\n u\"with the software system.\"))\r\n\r\n # Connect widget signals to callback methods.\r\n self._lst_handler_id.append(\r\n self.btnCalcRisk.connect('clicked',\r\n self._on_button_clicked, 0))\r\n self._lst_handler_id.append(\r\n self.btnSaveTest.connect('clicked',\r\n self._on_button_clicked, 1))\r\n\r\n # Put it all together.\r\n _notebook = self._create_notebook()\r\n self.pack_start(_notebook)\r\n\r\n self.show_all()", "def show_data(self):\r\n try:\r\n app.entry1.delete(0,END)\r\n app.entry2.delete(0,END)\r\n app.entry3.delete(0,END)\r\n app.entry4.delete(0,END)\r\n app.entry5.delete(0,END)\r\n self.name=app.tree.item(self.item)['values'][0]+' '+app.tree.item(self.item)['values'][1]\r\n app.space1.configure(text=app.tree.item(self.item)['values'][0])\r\n app.space2.configure(text=app.tree.item(self.item)['values'][1])\r\n app.space3.configure(text=app.tree.item(self.item)['text'])\r\n app.space4.configure(text=self.student[self.name][3])\r\n app.space5.configure(text=self.student[self.name][4])\r\n if self.student[self.name][5] != '':\r\n app.space6.configure(text=int(self.student[self.name][5]))\r\n app.entry1.insert(0,int(self.student[self.name][5]))\r\n else:\r\n app.space6.configure(text=self.student[self.name][5])\r\n app.entry1.insert(0,self.student[self.name][5])\r\n if self.student[self.name][6] != '':\r\n app.space7.configure(text=int(self.student[self.name][6]))\r\n app.entry2.insert(0,int(self.student[self.name][6]))\r\n else:\r\n app.space7.configure(text=self.student[self.name][6])\r\n app.entry2.insert(0,self.student[self.name][6])\r\n if self.student[self.name][7] != '':\r\n app.space8.configure(text=int(self.student[self.name][7]))\r\n app.entry3.insert(0,int(self.student[self.name][7]))\r\n else:\r\n app.space8.configure(text=self.student[self.name][7])\r\n app.entry3.insert(0,self.student[self.name][7])\r\n if self.student[self.name][8] != '':\r\n app.space9.configure(text=int(self.student[self.name][8]))\r\n app.entry4.insert(0,int(self.student[self.name][8]))\r\n else:\r\n app.space9.configure(text=self.student[self.name][8])\r\n app.entry4.insert(0,self.student[self.name][5])\r\n if self.student[self.name][9] != '':\r\n app.space10.configure(text=int(self.student[self.name][9]))\r\n app.entry5.insert(0,int(self.student[self.name][9]))\r\n else:\r\n app.space10.configure(text=self.student[self.name][9])\r\n app.entry5.insert(0,self.student[self.name][9])\r\n except AttributeError:\r\n if len(app.tree.get_children()) == 0:\r\n app.info.configure(text=\"INFO: Please Load the Files First.\", font=('', '7'))\r\n # If the user pressed on either 'Show Data' button or 'Save Grades' button before loading students list\r\n # file, 'Info' Label shows the message: 'INFO: Please Load the Files First.'\r\n else:\r\n app.info.configure(text=\"INFO: Please Select A Student First.\", font=('', '7'))\r\n # If the user pressed on 'Show Data' button without selecting a student from the students treeview,\r\n # 'Info' Label shows the following message: 'INFO: Please Select A Student First.'\r", "def create_panel(self):\n # Main Frame creation\n frame1 = Frame(self.window)\n frame1.pack(fill=\"both\")\n tablayout = Notebook(frame1)\n \n ##### TRACKER #####\n tab = Frame(tablayout) # creating 1st nested frame\n tab.pack(fill=\"both\")\n table = Frame(tab)\n table.pack(fill=\"both\")\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table) # Grids the week with data\n self.add_buttons(tab, table)\n tablayout.add(tab, text=\"Current Week\") \n \n \n ##### STATS #####\n tab = Frame(tablayout) # creating 2nd nested frame\n tab.pack(fill=\"both\")\n self.stats.create_canvas(tab)\n\n\n # once its packed you can add it to the window object under a title\n tablayout.add(tab, text=\"Statistics\") \n tablayout.pack(fill=\"both\") # once everything is done now you pack the tablayout", "def create_widgets( self ):", "def create_layout() -> None:\n\n st.sidebar.title(\"Menu\")\n app_mode = st.sidebar.selectbox(\"Please select a page\", [' I. Homepage',\n \"II. Download data\" ,\n \"III. Statistic Data\",\n ' IV. AGF Indices',\n ' V. Notes',\n \" VI. Rank of patient\" ])\n \n if app_mode == ' I. Homepage':\n load_homepage() \n elif app_mode == \"III. Statistic Data\":\n leyer.leyer() \n elif app_mode == ' IV. AGF Indices':\n single.AGF_indices() \n elif app_mode == \"II. Download data\":\n download_data.download_data() \n elif app_mode == ' V. Notes':\n text_input.text_input()\n elif app_mode == \" VI. Rank of patient\":\n rank_of_patient.rank_of_patient()", "def create_widgets(self):", "def create_widgets( self ):\n\n self.selectionView = SelectionView()\n self.selectionView.setModel( self.proxyPhotosModel )\n self.selectionView.activated.connect( self.selectionActivation )\n self.selectionView.selectionModel().selectionChanged.connect( self.selectionChange )\n self.selectionView.setColumnHidden( self.ID_COLUMN, True ) # hide the ID\n\n self.selectionBox = QComboBox()\n\n self.selectionBox.addItem( \"all\", \"all\" )\n for state in self.db.get_processing_states():\n self.selectionBox.addItem( state, state )\n\n self.selectionBox.activated.connect( self.selectionTypeActivation )\n\n self.selectionBoxLabel = QLabel( \"&Processing Type:\" )\n self.selectionBoxLabel.setBuddy( self.selectionBox )\n\n self.previewArea = grafwidgets.PhotoPreviewArea()\n\n # informational labels for the photo record.\n self.infoStateLabel = QLabel()\n self.infoSummaryLabel = QLabel()\n self.infoLocationLabel = QLabel()\n self.infoTakenLabel = QLabel()\n self.infoTagsLabel = QLabel()\n\n # dock widget which will hold the selection layout once created\n # in create_layout, for now it gets an empty widget.\n self.selection_dock = QDockWidget()\n self.selection_dock.setFeatures( QDockWidget.DockWidgetMovable )\n self.selection_dock.setWidget( QWidget() )", "def dataset(value=None):\n data = getDBData()\n return render_template(\"dataset.html\",\n value=data\n )", "def show_data(self):\n\n self.area_canvas.axes.cla()\n self.draw_scatterplot(self.scatter_canvas, 'x [µm]', 'y [µm]', self.p_inputs['flip y-axis'].isChecked())\n self.draw_hist(self.area_canvas, 'area', 'cluster area [µm²]', 'number of clusters')\n self.draw_hist(self.number_canvas, 'nclusters', 'number of cluster', 'number of regions')\n self.draw_hist(self.density_canvas, 'density', 'cluster density [µm⁻²]', 'number of clusters')\n self.draw_hist(self.percentage_canvas, 'pclustered', 'percentage clustered',\n 'number of regions')\n self.draw_hist(self.ratio_canvas, 'reldensity', 'relative density clusters/background',\n 'number of regions')", "def __init__(self, controller, **kwargs):\n _module = kwargs['module']\n gtk.HBox.__init__(self)\n ramstk.RAMSTKBaseView.__init__(self, controller, module=_module)\n\n self._module = None\n for __, char in enumerate(_module):\n if char.isalpha():\n self._module = _module.capitalize()\n\n # Initialize private dictionary attributes.\n\n # Initialize private list attributes.\n self._lst_gendata_labels = [\n _(u\"{0:s} Code:\").format(self._module),\n _(u\"{0:s} Name:\").format(self._module),\n _(u\"Remarks:\")\n ]\n \"\"\"\n There are three labels that will appear on all General Data pages.\n Insert additional, WorkView specific labels into this list starting at\n position 2. In the __init__() method for the WorkView requiring\n specific labels, do something like the following:\n\n self._lst_gendata_labels.insert(1, _(u\"Specific Label:\"))\n\n This will ensure the Remarks widget is always at the bottom of the row\n of General Data page widgets. This, then, ensures WorkView specific\n widgets don't overlap the Remarks widget.\n \"\"\"\n\n self._lst_assess_labels = [[\n _(u\"Active Failure Intensity [\\u039B(t)]:\"),\n _(u\"Dormant \\u039B(t):\"),\n _(u\"Software \\u039B(t):\"),\n _(u\"Predicted h(t):\"),\n _(u\"Mission h(t):\"),\n _(u\"MTBF:\"),\n _(u\"Mission MTBF:\"),\n _(u\"Reliability [R(t)]:\"),\n _(u\"Mission R(t):\"),\n _(u\"Total Parts:\")\n ], [\n _(u\"Mean Preventive Maintenance Time [MPMT]:\"),\n _(u\"Mean Corrective Maintenance Time [MCMT]:\"),\n _(u\"Mean Time to Repair [MTTR]:\"),\n _(u\"Mean Maintenance Time [MMT]:\"),\n _(u\"Availability [A(t)]:\"),\n _(u\"Mission A(t):\"),\n _(u\"Total Cost:\"),\n _(u\"Cost/Failure:\"),\n _(u\"Cost/Hour:\")\n ]]\n \"\"\"\n There are 10 labels that will appear in the left half and nine labels\n that will appear in the right half of all Assessment Results pages.\n Append additional, WorkView specific labels onto this list. In the\n __init__() method for the WorkView requiring specific labels, do\n something like the following:\n\n self._lst_assess_labels[0].append(_(u\"Specific Label:\"))\n \"\"\"\n\n # Initialize private scalar attributes.\n self._revision_id = None\n\n # Initialize public dictionary attributes.\n\n # Initialize public list attributes.\n\n # Initialize public scalar attributes.\n self.txtCode = ramstk.RAMSTKEntry(\n width=125,\n tooltip=_(u\"A unique code for the \"\n u\"selected {0:s}.\").format(self._module))\n self.txtName = ramstk.RAMSTKEntry(\n width=125,\n tooltip=_(u\"The name of the selected \"\n u\"{0:s}.\").format(self._module))\n self.txtRemarks = ramstk.RAMSTKTextView(\n gtk.TextBuffer(),\n width=400,\n tooltip=_(u\"Enter any remarks \"\n u\"associated with the \"\n u\"selected {0:s}.\").format(self._module))\n\n self.txtActiveHt = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the active \"\n u\"failure intensity for the \"\n u\"selected {0:s}.\").format(self._module))\n self.txtDormantHt = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the dormant \"\n u\"failure intensity for \"\n u\"the selected {0:s}.\").format(self._module))\n self.txtSoftwareHt = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the software \"\n u\"failure intensity for \"\n u\"the selected {0:s}.\").format(self._module))\n self.txtPredictedHt = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the logistics \"\n u\"failure intensity for \"\n u\"the selected {0:s}. \"\n u\"This is the sum of the \"\n u\"active, dormant, and \"\n u\"software hazard \"\n u\"rates.\").format(self._module))\n self.txtMissionHt = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the mission \"\n u\"failure intensity for \"\n u\"the selected {0:s}.\").format(self._module))\n self.txtMTBF = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the logistics mean \"\n u\"time between failure (MTBF) \"\n u\"for the selected {0:s}.\").format(self._module))\n self.txtMissionMTBF = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the mission \"\n u\"mean time between \"\n u\"failure (MTBF) for the \"\n u\"selected {0:s}.\").format(self._module))\n self.txtReliability = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the logistics \"\n u\"reliability for the \"\n u\"selected {0:s}.\").format(self._module))\n self.txtMissionRt = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the mission \"\n u\"reliability for the \"\n u\"selected {0:s}.\").format(self._module))\n\n self.txtMPMT = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the mean preventive \"\n u\"maintenance time (MPMT) for \"\n u\"the selected {0:s}.\").format(self._module))\n self.txtMCMT = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the mean corrective \"\n u\"maintenance time (MCMT) for \"\n u\"the selected {0:s}.\").format(self._module))\n self.txtMTTR = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the mean time to \"\n u\"repair (MTTR) for the \"\n u\"selected {0:s}.\").format(self._module))\n self.txtMMT = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the mean maintenance \"\n u\"time (MMT) for the selected \"\n u\"{0:s}. This includes \"\n u\"preventive and corrective \"\n u\"maintenance.\").format(self._module))\n self.txtAvailability = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the \"\n u\"logistics \"\n u\"availability for the \"\n u\"selected {0:s}.\").format(self._module))\n self.txtMissionAt = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n bold=True,\n tooltip=_(u\"Displays the mission \"\n u\"availability for the \"\n u\"selected {0:s}.\").format(self._module))\n self.txtPartCount = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n tooltip=_(u\"Displays the total part \"\n u\"count for the selected \"\n u\"{0:s}.\").format(self._module))\n self.txtTotalCost = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n tooltip=_(u\"Displays the total cost \"\n u\"of the selected \"\n u\"{0:s}.\").format(self._module))\n self.txtCostFailure = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n tooltip=_(u\"Displays the cost per \"\n u\"failure of the \"\n u\"selected {0:s}.\").format(self._module))\n self.txtCostHour = ramstk.RAMSTKEntry(\n width=125,\n editable=False,\n tooltip=_(u\"Displays the failure cost \"\n u\"per operating hour for \"\n u\"the selected {0:s}.\").format(self._module))\n\n pub.subscribe(self._on_select_revision, 'selectedRevision')" ]
[ "0.67163163", "0.669597", "0.65538305", "0.654544", "0.6439258", "0.62485445", "0.5970818", "0.5954215", "0.5790756", "0.57078046", "0.5697864", "0.5688682", "0.56205344", "0.557784", "0.5577001", "0.5522175", "0.5494343", "0.5491092", "0.5468219", "0.5451919", "0.5449248", "0.54434925", "0.5429991", "0.5429372", "0.5403703", "0.53837365", "0.5367112", "0.5355089", "0.5331676", "0.53129" ]
0.6900534
0
Method to load the gtk.Widgets() on the analysis inputs page.
def _load_analysis_inputs_page(self): # Load the gtk.ComboBox() with system hardware names. self.cmbAssembly.handler_block(self._lst_handler_id[0]) Widgets.load_combo(self.cmbAssembly, Configuration.RTK_HARDWARE_LIST, simple=False) self.cmbAssembly.handler_unblock(self._lst_handler_id[0]) self.cmbAssembly.set_active(self._model.assembly_id) self.cmbDistribution.set_active(self._model.distribution_id) self.cmbConfType.set_active(self._model.confidence_type) self.cmbConfMethod.set_active(self._model.confidence_method) self.cmbFitMethod.set_active(self._model.fit_method) self.txtDescription.set_text(self._model.description) if self._model.confidence < 1.0: _confidence = self._model.confidence * 100.0 else: _confidence = self._model.confidence self.txtConfidence.set_text(str(_confidence)) self.txtStartTime.set_text(str(self._model.start_time)) self.txtEndTime.set_text(str(self._model.rel_time)) self.txtRelPoints.set_text(str(self._model.n_rel_points)) _start_date = Utilities.ordinal_to_date(self._model.start_date) _end_date = Utilities.ordinal_to_date(self._model.end_date) self.txtStartDate.set_text(str(_start_date)) self.txtEndDate.set_text(str(_end_date)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_analyses_input_page(self, notebook): # pylint: disable=R0914\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _hbox = gtk.HPaned()\r\n\r\n _fixed = gtk.Fixed()\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Analysis Inputs\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_IN)\r\n _frame.add(_fixed)\r\n\r\n _hbox.pack1(_frame, True, True)\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Place the widgets used to display analysis input information. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Load the gtk.ComboBox() widgets.\r\n _results = [[u\"MCF\"], [u\"Kaplan-Meier\"], [_(u\"NHPP - Power Law\")],\r\n [u\"NHPP - Loglinear\"], [_(u\"Exponential\")],\r\n [_(u\"Lognormal\")], [_(u\"Normal\")], [u\"Weibull\"],\r\n [\"WeiBayes\"]]\r\n Widgets.load_combo(self.cmbDistribution, _results)\r\n _results = [[_(u\"Lower One-Sided\")], [_(u\"Upper One-Sided\")],\r\n [_(u\"Two-Sided\")]]\r\n Widgets.load_combo(self.cmbConfType, _results)\r\n _results = [[_(u\"Crow (NHPP Only)\")], [_(u\"Duane (NHPP Only)\")],\r\n [_(u\"Fisher Matrix\")], [_(u\"Likelihood\")],\r\n [_(u\"Bootstrap\")]]\r\n Widgets.load_combo(self.cmbConfMethod, _results)\r\n _results = [[\"MLE\"], [_(u\"Regression\")]]\r\n Widgets.load_combo(self.cmbFitMethod, _results)\r\n\r\n # Create the labels for the left half of the right side.\r\n _labels = [_(u\"Assembly:\"), _(u\"Description:\"), _(u\"Distribution:\"),\r\n _(\"Fit Method:\"), _(u\"Confidence:\"), _(u\"Confidence Type:\"),\r\n _(\"Confidence Method:\")]\r\n (_x_pos1, _y_pos1) = Widgets.make_labels(_labels, _fixed, 5, 5)\r\n _x_pos1 += 55\r\n\r\n # Create the labels for the right half of the right side.\r\n _labels = [_(u\"Start Time:\"), _(u\"End Time:\"), _(u\"Step Interval:\"),\r\n _(u\"Start Date:\"), _(u\"End Date:\")]\r\n (_x_pos2,\r\n _y_pos2) = Widgets.make_labels(_labels, _fixed, _x_pos1 + 215, 5)\r\n _x_pos2 += _x_pos1\r\n _x_pos2 += 275\r\n\r\n # Place widgets on the left side.\r\n _fixed.put(self.cmbAssembly, _x_pos1, _y_pos1[0])\r\n _fixed.put(self.txtDescription, _x_pos1, _y_pos1[1])\r\n _fixed.put(self.cmbDistribution, _x_pos1, _y_pos1[2])\r\n _fixed.put(self.cmbFitMethod, _x_pos1, _y_pos1[3])\r\n _fixed.put(self.txtConfidence, _x_pos1, _y_pos1[4])\r\n _fixed.put(self.cmbConfType, _x_pos1, _y_pos1[5])\r\n _fixed.put(self.cmbConfMethod, _x_pos1, _y_pos1[6])\r\n\r\n # Place widgets on the right side.\r\n _fixed.put(self.txtStartTime, _x_pos2, _y_pos2[0])\r\n _fixed.put(self.txtEndTime, _x_pos2, _y_pos2[1])\r\n _fixed.put(self.txtRelPoints, _x_pos2, _y_pos2[2])\r\n _fixed.put(self.txtStartDate, _x_pos2, _y_pos2[3])\r\n _fixed.put(self.btnStartDate, _x_pos2 + 105, _y_pos2[3])\r\n _fixed.put(self.txtEndDate, _x_pos2, _y_pos2[4])\r\n _fixed.put(self.btnEndDate, _x_pos2 + 105, _y_pos2[4])\r\n _fixed.put(self.chkGroup, _x_pos2, _y_pos2[4] + 30)\r\n _fixed.put(self.chkParts, _x_pos2, _y_pos2[4] + 60)\r\n\r\n _fixed.show_all()\r\n\r\n # Insert the tab.\r\n _label = gtk.Label()\r\n _label.set_markup(\"<span weight='bold'>\" +\r\n _(u\"Analysis\\nInputs\") + \"</span>\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Displays analysis inputs for the selected \"\r\n u\"dataset.\"))\r\n notebook.insert_page(_hbox, tab_label=_label, position=-1)\r\n\r\n return False", "def process_widgets(self):\r\n\r\n self.runmode_menu.add_radiobutton(label=\"Graphical User Interface\", value=0, variable=self.gui_menu_var,\r\n command=self.disable_debugging_mode)\r\n self.runmode_menu.add_radiobutton(label=\"Command Line Interface\", value=1, variable=self.gui_menu_var,\r\n command=lambda gui=self: load_cli(self))\r\n self.runmode_menu.add_radiobutton(label=\"Debugging Mode (GUI + CLI)\", value=2, variable=self.gui_menu_var,\r\n command=self.enable_debugging_mode)\r\n\r\n # Placing all the submenus\r\n self.filemenu.add_cascade(label=\"Run Mode\", menu=self.runmode_menu)\r\n self.menubar.add_cascade(label=\"File\", menu=self.filemenu)\r\n\r\n self.config(menu=self.menubar) # Indicating that the \"menubar\" variable is the filemenu of the application\r\n\r\n self.folder_frame.pack()\r\n\r\n # self.folder_locator.pack(side=LEFT, padx=10, pady=10)\r\n\r\n self.media_folder_label.pack(side=LEFT, padx=10, pady=10)\r\n\r\n self.folder_button.pack(side=LEFT)\r\n\r\n self.path_frame_parent.pack(side=LEFT)\r\n\r\n self.search_frame.pack()\r\n\r\n self.search_frame.pack()\r\n self.search_entry.grid(row=0, column=0, padx=10, pady=20)\r\n self.search_button.grid(row=0, column=1, padx=5)\r\n # self.advanced_search_button.grid(row=0, column=2, padx=5)\r\n\r\n self.media_frame.pack()\r\n\r\n self.button_frame.pack()", "def load_search_gui(self):\n pass", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n self.input_elements['factor Tm Tp'] = widgets.ParameterInputLine(\n label='Factor Tm naar Tp:',\n labelwidth=labelwidth,\n unitlabel='(NVT: Tp aanwezig)' if 'Tp' in self.hydraulic_loads.columns else '',\n validator=QtGui.QDoubleValidator(0.01, 99.99, 20),\n )\n\n if 'Tp' in self.hydraulic_loads.columns or self.parent_tab.step != 'I1':\n self.input_elements['factor Tm Tp'].set_enabled(False)\n\n # Add line edit with browsebutton for Master template\n self.input_elements['mastertemplate'] = widgets.ExtendedLineEdit(\n label='Master template bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_master_template)\n )\n\n # Add line edit with browsebutton for depth file\n self.input_elements['depthfile'] = widgets.ExtendedLineEdit(\n label='Bathymetry bestand:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_bathymetry_file)\n )\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['swanfolder'] = widgets.ExtendedLineEdit(\n label='SWAN uitvoer folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_swan_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Genereer invoer')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def create_widgets(self):", "def initWidgets(self):\r\n if self.autoExampleWidgets:\r\n self.initExampleWidgets()", "def create_widgets( self ):", "def initialize_gui(self) -> None:\n # pymol.Qt provides the PyQt5 interface\n from PyQt5 import QtWidgets\n from PyQt5.uic import loadUi\n # from pymol.Qt.utils import loadUi\n\n # populate the QMainWindow from our *.ui file\n uifile = os.path.join(os.path.dirname(__file__), 'PyMOL-KVFinder-web-tools.ui')\n loadUi(uifile, self)\n\n # ScrollBars binded to QListWidgets in Descriptors\n scroll_bar_volume = QtWidgets.QScrollBar(self)\n self.volume_list.setVerticalScrollBar(scroll_bar_volume)\n scroll_bar_area = QtWidgets.QScrollBar(self)\n self.area_list.setVerticalScrollBar(scroll_bar_area)\n scroll_bar_residues = QtWidgets.QScrollBar(self)\n self.residues_list.setVerticalScrollBar(scroll_bar_residues)\n\n # about text\n self.about_text.setHtml(about_text)\n\n ########################\n ### Buttons Callback ###\n ########################\n\n # hook up QMainWindow buttons callbacks\n self.button_run.clicked.connect(self.run)\n self.button_exit.clicked.connect(self.close)\n self.button_restore.clicked.connect(self.restore)\n self.button_grid.clicked.connect(self.show_grid)\n \n # hook up Parameters button callbacks\n self.button_browse.clicked.connect(self.select_directory)\n self.refresh_input.clicked.connect(lambda: self.refresh(self.input))\n \n # hook up Search Space button callbacks\n # Box Adjustment\n self.button_draw_box.clicked.connect(self.set_box)\n self.button_delete_box.clicked.connect(self.delete_box)\n self.button_redraw_box.clicked.connect(self.redraw_box)\n self.button_box_adjustment_help.clicked.connect(self.box_adjustment_help)\n # Ligand Adjustment\n self.refresh_ligand.clicked.connect(lambda: self.refresh(self.ligand))\n\n # hook up methods to results tab\n # Jobs\n self.available_jobs.currentIndexChanged.connect(self.fill_job_information)\n self.button_show_job.clicked.connect(self.show_id)\n self.button_add_job_id.clicked.connect(self.add_id)\n # Visualization\n self.button_browse_results.clicked.connect(self.select_results_file)\n self.button_load_results.clicked.connect(self.load_results)\n self.volume_list.itemSelectionChanged.connect(lambda list1=self.volume_list, list2=self.area_list: self.show_cavities(list1, list2))\n self.area_list.itemSelectionChanged.connect(lambda list1=self.area_list, list2=self.volume_list: self.show_cavities(list1, list2))\n self.residues_list.itemSelectionChanged.connect(self.show_residues)", "def _read_from_loader(self, loader):\n self._domain = loader.get_domain() or ''\n self._version = loader.get_version()\n\n # Load UIM manager\n # Do this first since the custom menubar and toolbars adapters\n # depends on all ui definitions being loaded\n self.uim.load(loader)\n\n # Load models before widgets\n models = [w for w in loader.toplevels if isinstance(w, gtk.ListStore)]\n for model in models:\n self.model_manager.load_model(model)\n\n # Load the widgets\n for widget in loader.toplevels:\n if isinstance(widget, gtk.Widget):\n self._load_widget(widget)\n\n # Load sizegroups, must be done after loading all the widgets,\n # since the sizegroups has references to the widgets\n for sizegroup in loader.sizegroups:\n name = sizegroup.get_data('gazpacho::object-id')\n widgets = sizegroup.get_data('gazpacho::sizegroup-widgets') or []\n gadgets = [Gadget.from_widget(widget)\n for widget in widgets]\n self.add_sizegroup(GSizeGroup(name, sizegroup, gadgets))\n\n # Signals\n for signal in loader.get_signals():\n gobj, signal_name, signal_handler, signal_after = signal[:4]\n gadget = Gadget.from_widget(gobj)\n if gadget is None:\n continue\n gadget.add_signal_handler(SignalInfo(name=signal_name,\n handler=signal_handler,\n after=signal_after))\n\n self._unsupported_widgets = loader.get_unsupported_widgets()\n\n self.changed = False", "def add_widgets(self):\n tkinter.Label(self.top_frame, text=\"File Path:\").grid(row=1, column=0)\n self.data_path_entry = tkinter.Entry(self.top_frame)\n self.data_path_entry.grid(row=1, column=1)\n self.data_path_entry.insert(10, self.data_path)\n # Create the Browse button\n tkinter.Button(self.top_frame,\n text=\"Browse...\",\n command=self.get_file).grid(row=1, column=2)\n # Create the Ok button\n tkinter.Button(self.top_frame,\n text=\"OK\",\n command=self.save_configurations).grid(row=2, column=0, sticky=tkinter.W, pady=3)\n # Create the Cancel button\n tkinter.Button(self.top_frame,\n text=\"Cancel\",\n command=self.exit).grid(row=2, column=1, sticky=tkinter.E, pady=3)", "def do_startup(self):\n \n import json\n\n GLib.set_application_name(\"Deity\")\n Gtk.Application.do_startup(self)\n \n settings = self.get_settings()\n\n menub = Gtk.MenuButton(name=\"input-menu_button\",\n use_popover=True)\n\n headerbar = Gtk.HeaderBar(name=\"input-headerbar\",\n show_close_button=True,\n title=\"Deity\")\n\n main_grid = Gtk.Grid(name=\"input-main_grid\")\n\n statusbar = Gtk.Box(name=\"input-statusbar\",\n orientation=0,\n spacing=2)\n statusbar.pack_start(self.statuslabel, 1, 1, 1)\n\n self.connector.connect(\"query-status\", self.show_output)\n self.connector.connect(\"query-waiting\",\n lambda wid, count: self.statuslabel.set_text(\n f\"Queries on hold : {count}\"))\n self.connector.connect(\"request\", print)\n\n headerbar.pack_end(menub)\n\n main_grid.attach(self.iogrid.get_widget(), 0, 0, 1, 1)\n main_grid.attach(statusbar, 0, 1, 1, 1)\n\n self.output_window.add(self.get_placeholder_image())\n\n self.window.set_titlebar(headerbar)\n self.window.set_default_icon_from_file(\"artwork/Logo.png\")\n self.window.add(main_grid)\n\n self.window.connect(\"key-press-event\", self.parse_keypress)\n self.window.connect(\"delete-event\", self.request_quit)\n \n self.other[\"connector\"] = self.connector\n self.other[\"headerbar\"] = headerbar\n self.other[\"history\"] = self.history\n self.other[\"input-window\"] = self.window\n self.other[\"iogrid\"] = self.iogrid\n self.other[\"plugins\"] = self.get_plugins(settings[\"enabled-plugins\"])\n self.other[\"statusbar\"] = statusbar\n self.other[\"statuslabel\"] = self.statuslabel\n self.other[\"output-notebook\"] = self.notebook\n self.other[\"output-window\"] = self.output_window\n self.other[\"main-grid\"] = main_grid\n self.other[\"menu_button\"] = menub\n \n self.apply_settings(settings)\n self.current_prompt = self.iogrid.add_prompt()\n\n self.window.set_application(self)\n self.output_window.set_application(self)\n\n self.output_window.move(800, 150)\n self.window.move(75, 160)", "def _populate_widgets(self):\n\n if self.parent.session is None:\n # No point populating the widgets with the default values from the\n # SMH file because these will be updated when a session is loaded.\n return\n\n keys = (\"function\", \"order\", \"low_sigma_clip\", \"high_sigma_clip\",\n \"knot_spacing\", \"max_iterations\")\n self._cache = {\n \"input\": {}\n }\n for key in keys:\n self._cache[\"input\"][key] \\\n = self.parent.session.setting((\"normalization\", key))\n\n # Continuum masks.\n self._cache[\"masks\"] \\\n = self.parent.session.setting((\"normalization\", \"masks\"))\n self._cache[\"default_mask\"] \\\n = self.parent.session.setting((\"normalization\", \"default_mask\")) \\\n or self._cache[\"masks\"].keys()[0]\n\n\n # Put these values into the widgets.\n self.low_sigma_clip.setText(\n str(self._cache[\"input\"][\"low_sigma_clip\"]))\n self.high_sigma_clip.setText(\n str(self._cache[\"input\"][\"high_sigma_clip\"]))\n self.knot_spacing.setText(str(\n self._cache[\"input\"][\"knot_spacing\"]))\n\n functions = [self.function.itemText(i).lower() \\\n for i in range(self.function.count())]\n self.function.setCurrentIndex(functions.index(\n self._cache[\"input\"][\"function\"]))\n\n # Normalization order.\n orders = [int(self.order.itemText(i)) \\\n for i in range(self.order.count())]\n self.order.setCurrentIndex(orders.index(\n self._cache[\"input\"][\"order\"]))\n\n # Normalization maximum iterations.\n norm_max_iters = [int(self.norm_max_iter.itemText(i)) \\\n for i in range(self.norm_max_iter.count())]\n self.norm_max_iter.setCurrentIndex(norm_max_iters.index(\n self._cache[\"input\"][\"max_iterations\"]))\n\n # Mask names.\n for name in self._cache[\"masks\"].keys():\n self.continuum_mask.addItem(name)\n\n self.continuum_mask.setCurrentIndex(\n self._cache[\"masks\"].keys().index(\n self._cache[\"default_mask\"]))\n\n self.order_slide.setMaximum(len(self.parent.session.input_spectra) - 1)\n self.current_order_label.setText(\"Order 1 of {}\".format(\n len(self.parent.session.input_spectra)))\n\n # Draw the widgets.\n try:\n self.order_slide.setValue(0)\n self.update_order_index(0)\n self.update_continuum_mask(refresh=False)\n self.fit_continuum(clobber=False)\n self.draw_order(refresh=False)\n self.draw_continuum(refresh=True)\n\n except (AttributeError, KeyError):\n # HACK\n # when loading a fresh session, it will skip all those blocks\n # I think this is okay?\n pass\n return None", "def init_ui(self):\n # Create GUI elements, set them in dict structure\n labelwidth = 150\n\n # Add parameter line edit for Factor Tm to Tp\n\n # Add line edit with browsebutton for swan result folder\n self.input_elements['hares folder'] = widgets.ExtendedLineEdit(\n label='HARES uitvoerbestanden folder:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton('...', clicked=self.select_hares_folder)\n )\n\n\n self.setLayout(QtWidgets.QVBoxLayout())\n self.layout().setSpacing(10)\n\n for _, item in self.input_elements.items():\n self.layout().addWidget(item)\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n self.generateButton = QtWidgets.QPushButton('Start lezen uitvoerbestanden')\n self.generateButton.setDefault(True)\n self.generateButton.clicked.connect(self.generate)\n\n self.cancelButton = QtWidgets.QPushButton('Annuleren')\n self.cancelButton.setAutoDefault(False)\n self.cancelButton.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generateButton, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n\n self.layout().addWidget(button_box)", "def widgets(self):\n raise NotImplementedError(\"This method is not ready to be used yet\")", "def _init_ui(self):\n # Create GUI elements, set them in dict structure\n inf_symbol = u'\\u221E'\n gamma_symbol = u'\\u03B3'\n unicode_squared = u'\\u00B9'\n labelwidth=175\n\n if len(self.waterlevels) > 100:\n raise NotImplementedError('More than 100 water levels where discovered in the hydraulic loads. The method with PHAROS is not implemented for this number of loads. Recalculate the wave conditions at given water levels, or pick a method without PHAROS.')\n\n self.input_elements['hydraulic loads'] = {\n 'Hs_max': widgets.ParameterLabel(\n label='Max. significante golfhoogte:',\n labelwidth=labelwidth,\n value='{:.3f}'.format(self.Hs_max),\n unit='m'\n ),\n # 'Tp_max': widgets.ParameterLabel(\n # label='Maximale piekperiode',\n # labelwidth=150\n # ),\n 'factor Tm Tp': widgets.ParameterInputLine(\n label='Factor Tm naar Tp:',\n labelwidth=labelwidth,\n validator=QtGui.QDoubleValidator(0.01, 99.99, 20),\n ),\n 'water depth for wave length': widgets.ParameterInputLine(\n label='Waterdiepte voor golflengte:',\n labelwidth=labelwidth,\n unitlabel='m',\n validator=QtGui.QDoubleValidator(0.00, np.inf, 20),\n ),\n }\n\n self.input_elements['wave directions'] = {\n 'lowest': widgets.ParameterInputLine(\n label='Laagste waarde [0-360]:',\n labelwidth=labelwidth,\n unitlabel='graden (nautisch)',\n validator=QtGui.QDoubleValidator(0.00, 360.00, 20),\n ),\n 'highest': widgets.ParameterInputLine(\n label='Hoogste waarde [0-360]:',\n labelwidth=labelwidth,\n unitlabel='graden (nautisch)',\n validator=QtGui.QDoubleValidator(0.00, 360.00, 20),\n ),\n 'bin size': widgets.ParameterInputLine(\n label='Klassegrootte [1-360]:',\n labelwidth=labelwidth,\n unitlabel='graden',\n validator=QtGui.QDoubleValidator(1.00, 360.00, 20),\n )\n }\n\n self.input_elements['frequencies'] = {\n 'lowest': widgets.ParameterInputLine(\n label='Ondergrens [{:.3f} - {:.3f}]:'.format(*self.f_range),\n labelwidth=labelwidth,\n unitlabel='Hz',\n validator=QtGui.QDoubleValidator(self.f_range[0] - 0.01, self.f_range[1] + 0.01, 20),\n ),\n 'highest': widgets.ParameterInputLine(\n label='Bovengrens [{:.3f} - {:.3f}]:'.format(*self.f_range),\n labelwidth=labelwidth,\n unitlabel='Hz',\n validator=QtGui.QDoubleValidator(self.f_range[0] - 0.01, self.f_range[1] + 0.01, 20),\n ),\n 'number of bins': widgets.ParameterInputLine(\n label='Aantal klassen [1-50]:',\n labelwidth=labelwidth,\n validator=QtGui.QIntValidator(1, 50),\n ),\n 'scale': widgets.ComboboxInputLine(\n label='Frequentie schaal:',\n labelwidth=labelwidth,\n items=['lineair', 'logaritmisch'],\n )\n }\n\n self.input_elements['2d wave spectrum'] = {\n 'spread': widgets.ParameterInputLine(\n label='Spreiding [10-70]:',\n labelwidth=labelwidth,\n unitlabel='graden',\n validator=QtGui.QDoubleValidator(10.0, 70.0, 20),\n ),\n 'gamma': widgets.ParameterInputLine(\n label='JONSWAP peak\\nenhancement factor {} [1-7]:'.format(gamma_symbol),\n labelwidth=labelwidth,\n unitlabel='',\n validator=QtGui.QDoubleValidator(1.00, 7.00, 20),\n ),\n 'min energy': widgets.ParameterInputLine(\n label='Signaleringswaarde energie [0-{}]:'.format(inf_symbol),\n labelwidth=labelwidth,\n unitlabel='m{}s/degree'.format(unicode_squared),\n validator=QtGui.QDoubleValidator(0.00, 2.00, 20),\n )\n }\n\n self.input_elements['paths'] = {\n 'pharos folder': widgets.ExtendedLineEdit(\n label='Uitvoermap:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton(\n '...',\n clicked=self._load_pharos_folder\n )\n ),\n 'schematisation folder': widgets.ExtendedLineEdit(\n label='Schematisatiemap:',\n labelwidth=labelwidth,\n browsebutton=QtWidgets.QPushButton(\n '...',\n clicked=self._load_schematisations_folder\n )\n )\n }\n\n self.input_elements['water levels'] = {\n 'checked': widgets.CheckBoxInput(\n labels=self.waterlevels,\n nrows=max(2, len(self.waterlevels) // 20),\n unitlabel='m + NAP'\n )\n }\n\n delta = u'\\u0394'\n self.input_elements['transformation'] = {\n 'dx': widgets.ParameterInputLine(\n label='{}x [RD + {}x = lokaal]:'.format(delta, delta),\n labelwidth=labelwidth,\n ),\n 'dy': widgets.ParameterInputLine(\n label='{}y [RD + {}y = lokaal]:'.format(delta, delta),\n labelwidth=labelwidth,\n )\n }\n\n\n # Define titles for groups\n titles = {\n 'hydraulic loads': 'Hydraulische belastingen',\n 'wave directions': 'Golfrichtingen',\n 'frequencies': 'Frequenties',\n '2d wave spectrum': '2D golfspectrum',\n 'paths': 'Paden',\n 'water levels': 'Te simuleren waterstanden',\n 'transformation': 'Transformatie voor coordinatenstelsel'\n }\n\n\n # Create base layout\n self.setLayout(QtWidgets.QVBoxLayout())\n # self.layout().setSpacing(10)\n\n for tag, title in titles.items():\n if tag in self.input_elements:\n group_layout = QtWidgets.QVBoxLayout()\n for _, item in self.input_elements[tag].items():\n group_layout.addWidget(item)\n\n # Add groupbox with title\n groupbox = QtWidgets.QGroupBox(title)\n groupbox.setLayout(group_layout)\n self.layout().addWidget(groupbox)\n\n\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.layout().addWidget(line)\n\n # OK and Cancel buttons\n\n self.generate_button = QtWidgets.QPushButton('Genereer tabel')\n self.generate_button.setDefault(True)\n # self.generate_button.setEnabled(False)\n self.generate_button.clicked.connect(self.generate)\n\n self.cancel_button = QtWidgets.QPushButton('Annuleren')\n self.cancel_button.setAutoDefault(False)\n self.cancel_button.clicked.connect(self.cancel)\n\n button_box = QtWidgets.QDialogButtonBox(QtCore.Qt.Horizontal, self)\n button_box.addButton(self.generate_button, QtWidgets.QDialogButtonBox.ActionRole)\n button_box.addButton(self.cancel_button, QtWidgets.QDialogButtonBox.RejectRole)\n\n button_box.accepted.connect(QtWidgets.QDialog.accept)\n # button_box.rejected.connect(QtWidgets.QDialog.reject)\n\n self.layout().addWidget(button_box)", "def createWidgets(self):\n raise NotImplementedError", "def _get_from_builder(self):\n # Load the ui from a glade file.\n self.builder = Gtk.Builder()\n try:\n self.builder.add_from_file(os.path.join(self.app.BASE_DIR,\n 'ui',\n 'selectsynaptics.glade')\n )\n except Exception as ex:\n print(str(ex))\n print('\\n{}:\\n{}\\n{}'.format(_('Error loading from Glade file'),\n os.path.join(self.app.BASE_DIR,\n 'ui',\n 'selectsynaptics.glade'), repr(ex))\n )\n sys.exit(ERROR_INVALID_GLADE_FILE)\n\n # Get gui objects.\n self.boxForFooter = self.builder.get_object('boxForFooter')\n self.boxMain = self.builder.get_object('boxMain')\n self.buttonCancel = self.builder.get_object('buttonCancel')\n self.buttonOK = self.builder.get_object('buttonOK')\n self.comboboxtextDevices = self.builder.get_object('comboboxtextDevices')\n self.label1 = self.builder.get_object('label1')\n self.labelSelected = self.builder.get_object('labelSelected')\n self.labelWarning = self.builder.get_object('labelWarning')\n\n # Connect signals existing in the Glade file.\n self.builder.connect_signals(self)\n\n # Reparent our main container from glader file,\n # this way we have all Gtk.Window functionality using \"self\".\n thechild = self.builder.get_object('windowMain').get_child()\n thechild.get_parent().remove(thechild)\n self.add(thechild)\n\n # Connect generated signals:\n # top window signals and/or other generated signals.\n # top window signals were connected, by builder's \"connect_signals\" function,\n # to builder's main window\n self.connect('delete-event', self.on_windowMain_delete_event)\n self.connect('destroy', self.on_windowMain_destroy)\n self.connect('size-allocate', self.on_windowMain_size_allocate)\n self.connect('window-state-event', self.on_windowMain_window_state_event)\n\n\n # :builder top window properties.\n self.can_focus = 'False'\n\n # Load window icon from app, if any.\n self.set_icon(self.app.icon)", "def init_UI(self):\n\n self.master.title(\"Search for different companies\")\n self.master.geometry(\"400x400\")\n\n self.label_combobox = Label(self, text=\"Search by\")\n self.label_combobox.pack()\n\n self.combo_searching_options = Combobox(self, state=\"readonly\")\n self.combo_searching_options['values'] = self.combobox_values\n self.combo_searching_options.pack()\n\n self.label_input = Label(self, text=\"Entry the value\")\n self.label_input.pack()\n\n self.user_input = Entry(self, width=40)\n self.user_input.pack()\n\n self.btn_submit = Button(self, text=\"Submit\", command=self.submit)\n self.btn_submit.pack()\n\n self.text_area = scrolledtext.ScrolledText(self)\n self.text_area.pack()\n\n sys.stdout = RedirectOutputText(self.text_area)\n\n self.btn_back = Button(self, text=\"Back\", command=self.go_back)\n self.btn_back.pack()", "def _init_widgets(self):\n # Container frame\n self.container = Frame(self)\n # Workspace block\n self.main_container = Frame(self.container)\n\n self.text = Label(self.main_container)\n self.text.config(text=\"PyEventLogViewer is a timeline-based tool used to simplify the way\\n\"\n \"a user can view and explore Windows EVTX files. To begin using this\\n\"\n \"software you must do the following:\\n\\n\"\n \"\\t1) File → New → 'Create a new project'\\n\"\n \"\\t2) Tools → Import Log File → 'Open a specified EVTX file'\\n\"\n \"\\t3) Explore the presented timeline.\\n\"\n \"\\t4) Double-click a specific record to view the XML data for that record.\\n\"\n \"\\t5) File → Export → 'Generate a CSV or HTML file for timeline presentation.'\\n\\n\"\n \"At this point, only System and Security EVTX files are parsable with this software.\")\n\n self.show_var = BooleanVar()\n self.show_check = Checkbutton(self.main_container, text=\"Don't Show on Startup\", variable=self.show_var)\n\n # Action block\n self.button_ok = Button(self.main_container, text='Ok', underline=0, command=self.callback_close)\n self.bind('<Return>', self.callback_close)\n self.bind('<Escape>', self.callback_close)\n\n # Focus on window - required for binds to work.\n self.focus_set()", "def _init_widgets(self):\n comps = self.ui.component_list\n comps.addItems(sorted(self._labels.keys()))\n data = self.ui.data_list\n data.addItems(sorted(self._data.keys()))", "def load_gui():\r\n\r\n print(\"\\nLoading graphical user interface...\\n\")\r\n SongStorageGUI().mainloop()", "def initUI(self):\n self.logger.debug('Setting up the Measurement GUI')\n self.setWindowTitle(self.title)\n\n self.show()\n\n self.make_combobox_scanner()\n self.make_combobox_movements()\n self.make_combobox_configurate()\n self.make_combobox_basic()", "def initGui(self):\r\n\r\n # Create help action \r\n self.helpAction = QAction( QIcon(\":/plugins/layercombinations/about.png\"), u\"Help\", self.iface.mainWindow())\r\n # connect the action \r\n self.helpAction.triggered.connect( self.showHelp )\r\n # Add menu item\r\n self.iface.addPluginToMenu(u\"&Layer Combinations\", self.helpAction)\r\n\r\n # Create the action that allows to change the widget type\r\n self.changeWidgetAction = QAction(\"Change widget type\", self.iface.mainWindow())\r\n self.changeWidgetAction.triggered.connect( self.changeWidget )\r\n self.iface.addPluginToMenu(u\"&Layer Combinations\", self.changeWidgetAction)\r\n\r\n # Create the action that will toggle the plugin panel\r\n self.action = QAction(QIcon(\":/plugins/layercombinations/icon.png\"), \"Show/hide the Layer Combinations widgets\", self.iface.mainWindow())\r\n self.action.triggered.connect( self.widget.toggle )\r\n # Add toolbar button and menu item\r\n self.iface.addToolBarIcon(self.action)\r\n self.iface.addPluginToMenu(u\"&Layer Combinations\", self.action)\r\n\r\n\r\n # Add the widget to the mainWindow\r\n self.widget.addToiFace(self.iface)", "def create_widgets(self):\n # self.var_spherical = IntVar()\n # self.var_3d = IntVar()\n # self.var_spatial_audio = IntVar()\n # self.button_open[\"command\"] = self.action_open\n # self.button_inject[\"command\"] = self.action_inject\n pass", "def initUI(self):\n\n lbl_names = ['Название проекта', 'Версия', 'Директория', 'Описание', 'Автор', 'Почта', 'Дополнительные зависимости', 'Название ноды']\n param_list = ['motor_driver', '0.0.0', '/home/mitya/catkin_ws/src/', 'The motor_driver package', 'D. Potapov',\n '[email protected]', 'nav_msgs, geometry_msgs, tf, ', 'motor_driver_node']\n labels = []\n for name in lbl_names:\n labels.append(QLabel(name))\n for i, ph in zip(range(len(labels)), param_list):\n ed_line = QLineEdit()\n if i == 1:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([0-9\\.])*[0-9]$\")))\n elif i == 5:\n ed_line.setValidator(QRegExpValidator(QRegExp(\"^([a-z0-9_-]+\\.)*[a-z0-9_-]+@[a-z0-9_-]+(\\.[a-z0-9_-]+)*\\.[a-z]{2,6}$\")))\n ed_line.setPlaceholderText(ph)\n if i != 0:\n ed_line.textEdited.connect(self.change_data)\n else:\n ed_line.textEdited.connect(self.change_pkg_name)\n self.full_ed_lines.append(ed_line)\n grid = QGridLayout()\n grid.setSpacing(5)\n for i in range(1, len(labels) + 1):\n for j in range(0, 2):\n if j == 0:\n grid.addWidget(labels[i - 1], i, j)\n else:\n grid.addWidget(self.full_ed_lines[i - 1], i, j)\n ch_dirButton = QPushButton(self)\n ch_dirButton.setIcon(QIcon('./icons/open_folder.png'))\n ch_dirButton.clicked.connect(self.ch_dirDialog)\n grid.addWidget(ch_dirButton, 3, 3)\n genButton = QPushButton(\"Сгенерировать\")\n genButton.clicked.connect(self.generate)\n grid.addWidget(genButton, len(labels) + 2, 1)\n self.setLayout(grid)\n self.setMinimumSize(700, 400)\n self.show()", "def __create_ui(self):\n vbox = gtk.VBox()\n\n # Create the viewable area of the file browser\n self.__view_port = gtk.ScrolledWindow()\n self.__view_port.set_policy(gtk.POLICY_AUTOMATIC,\n gtk.POLICY_AUTOMATIC)\n # Create the tree view and add it to the viewable area\n self.__tree_view = ProjectTreeView()\n self.__project_explorer = ProjectExplorer(self.window, self.__tree_view)\n self.__tree_view.connect('button_press_event',\n self.__on_treeview_button_press_event)\n self.__project_explorer.set_repository()\n self.__view_port.add(self.__tree_view)\n # Create the toolbar\n hbox = gtk.HBox()\n toolbar = gtk.Toolbar()\n toolbar.set_style(gtk.TOOLBAR_ICONS)\n toolbar.set_icon_size(gtk.ICON_SIZE_MENU)\n back = gtk.ToolButton(gtk.STOCK_GO_UP)\n back.connect('clicked', self.__on_back_clicked)\n toolbar.insert(back, 0)\n toolbar.insert(gtk.SeparatorToolItem(), 1)\n refresh = gtk.ToolButton(gtk.STOCK_REFRESH)\n refresh.connect('clicked', self.__on_refresh_clicked)\n toolbar.insert(refresh, 2)\n hbox.pack_start(toolbar, True, True, 0)\n vbox.pack_start(hbox, False, False, 0)\n vbox.pack_start(self.__view_port, True, True, 0)\n\n # Setup the create the buttons for:\n # New File, New Folder\n # ----------------------------------------------------------------------\n hbox1 = gtk.VBox()\n toolbar_actions = gtk.Toolbar()\n toolbar_actions.set_style(gtk.TOOLBAR_ICONS)\n toolbar_actions.set_icon_size(gtk.ICON_SIZE_MENU)\n new_file = gtk.ToolButton(gtk.STOCK_NEW)\n new_file.connect('clicked', self.__on_new_file_clicked_cb)\n toolbar_actions.insert(new_file, 0)\n new_dir = gtk.ToolButton(gtk.STOCK_OPEN) # TODO: use a custom icon\n new_dir.connect('clicked', self.__on_new_dir_clicked_cb)\n toolbar_actions.insert(new_dir, 1)\n hbox1.pack_start(gtk.HSeparator(), True, True, 0)\n hbox1.pack_start(toolbar_actions, True, True, 0)\n vbox.pack_end(hbox1, False, False, 0)\n # ----------------------------------------------------------------------\n vbox.show_all()\n # Attach the project explorer to GMate's side panel\n self.__side_panel = self.window.get_side_panel()\n self.__side_panel.add_tab(vbox, msg0005, gtk.STOCK_HARDDISK)", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def init_widget(self):", "def widgets(self):\r\n self.setWindowTitle(\"PyCrypt\")\r\n self.setMinimumSize(QSize(500, 500))\r\n self.setMaximumSize(QSize(500, 500))\r\n# Adding the sub def for widgets etc\r\n self.add_menus_and_status()\r\n self.add_buttons()" ]
[ "0.68877333", "0.6753903", "0.65538627", "0.6503645", "0.6488988", "0.63903576", "0.63797456", "0.62879664", "0.6276804", "0.62472034", "0.622531", "0.6221652", "0.6181676", "0.6164852", "0.6162042", "0.61567295", "0.61434543", "0.6064883", "0.60598457", "0.6035046", "0.6024029", "0.60191476", "0.6013232", "0.5988958", "0.5986579", "0.59845334", "0.59649885", "0.59649885", "0.5956461", "0.59468687" ]
0.6992013
0
Check ongoing hacks and their expiration time.
async def check_hacks(self) -> None: hacks = await self.get_expired_hacks() for h in hacks: await self.delete_skill_action_by_target_id_and_skill_type(h[3], 'hack') channel = self.bots_txt await channel.send( content=f"<@{h[0]}>", embed=discord.Embed( description=f"**<@{h[3]}> updated his firewall so <@{h[0]}>'s hacking has no effect anymore! 💻**", color=discord.Color.red()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def check_hacks(self) -> None:\n\n hacks = await self.get_expired_hacks()\n for h in hacks:\n await self.delete_skill_action_by_target_id_and_skill_type(h[3], 'hack')\n await self.update_user_is_hacked(h[3], 0)\n\n channel = self.bots_txt\n\n await channel.send(\n content=f\"<@{h[0]}>\",\n embed=discord.Embed(\n description=f\"**<@{h[3]}> updated his firewall so <@{h[0]}>'s hacking has no effect anymore! 💻**\",\n color=discord.Color.red()))", "def run(self):\n print('checking for expired cache items...')\n\n while True:\n # Do something\n self.check()\n time.sleep(self.interval)", "def _check_expire(self):\n self._log.debug(\"Checking entry expiration...\")\n current_time = time.time()\n for key in self._obj_cache.keys():\n self._log.debug(' -> %s (type = %s)',\n key, type(self._obj_cache[key]))\n # Remove if the key has a timeout, and the timeout period has been\n # exceeded (last access + timeout period <= current_time).\n if self._obj_timeouts[key] > 0 \\\n and current_time >= (self._obj_last_access[key]\n + self._obj_timeouts[key]):\n self._log.debug(' EXPIRED -- removing')\n # delete\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]", "def check_attack(self):\n now = time.time() * 1000\n if self.prev_time is None:\n return True\n else:\n next_time = self.prev_time + self.get_recharge\n if now >= next_time:\n return True\n else:\n return False", "def _check_timeouts(self):\n\n expired_tokens = []\n for token in self._capability_timeouts:\n interval = datetime.utcnow() - self._capability_timeouts[token]\n if interval.total_seconds() >= 10:\n expired_tokens.append(token)\n\n for token in expired_tokens:\n cap_withdraw = mplane.model.Withdrawal(capability=self._capabilities[token])\n self.handle_message(cap_withdraw, self.identity_for(token))", "def hacked(self):\n logging.critical('account was hacked')", "def cooldown_checker(self):\n self.cooldown_tick += 1\n if self.cooldown_tick == self.pattern_cooldown:\n self.wait = False\n self.cooldown_tick = 0", "def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True", "def check_expiration(self, cur_time):\n\n\t\ttime_limit = 1000\n\t\ttime_elapsed = cur_time - self.time_created\n\n\t\t# Erase cache after an arbitrary amount of time\n\t\tif time_elapsed > time_limit:\n\t\t\tself.cache_expiration()", "def counter_checker(sc):\n t = time.localtime()\n current_time = time.strftime(\"%H:%M:%S\", t)\n print(\"Checking JABS\", current_time)\n jab_checker()\n s.enter(20, 1, counter_checker, (sc,))", "def check(self):\n self.lastcheck = time.time()\n delta = time.time() - self.last\n if delta > 270:\n self.server.restart = True\n self.server.connected = False\n elif delta > 180:\n self.server.printer.raw_message(\"PING :♥\")", "def check_time_server(self):\n ack = self.check_server_activity()\n if self.am_leader:\n t = Timer(5, self.set_offset_for_processes)\n t.daemon = True\n t.start()\n else:\n t = Timer(10, self.check_time_server)\n t.daemon = True\n t.start()\n return ack", "def main():\n populate_satellites_array()\n latitude = float(os.environ['LATITUDE'])\n longitude = float(os.environ['LONGITUDE'])\n radius = int(os.environ['RADIUS'])\n timeout = 1\n previous_satellites = []\n while True:\n if (last_updated[0] + 86400) < int(time.time()):\n print('Expired data, updating from spacetrack')\n cron_refresh_spacetrack_cache()\n populate_satellites_array()\n print('Checking {}, {}'.format(latitude, longitude))\n currently_overhead = get_overhead_satellites_dicts(latitude, longitude, radius)\n for sat in currently_overhead:\n if not sat['name'] in previous_satellites:\n announce_satellite(sat)\n previous_satellites = [x['name'] for x in currently_overhead]\n time.sleep(timeout)", "def BugCheck(self, irc, last_checked):\n page = urlopen(\"https://bugs.alliedmods.net/buglist.cgi?chfieldfrom=%s\"\n \"&product=SourceMod&order=bugs.bug_id%%20desc&ctype=ics\"\n % time.strftime(\"%Y-%m-%d\", last_checked))\n contents = page.read()\n cal = icalendar.Calendar.from_string(contents)\n \n for comp in cal.walk(\"VTODO\"):\n comptime = comp[\"DTSTART\"].dt.utctimetuple()\n if comptime < last_checked:\n break\n \n bug_id = comp[\"UID\"].split(\"%40\", 2)[0]\n irc.queueMsg(ircmsgs.privmsg(\"#sourcemod\", \"New bug #%s: %s (%s)\" %\n (bug_id, comp[\"SUMMARY\"].replace('\\\"', '\"'), unquote(comp[\"URL\"]))))\n \n self.timer = Timer(self.registryValue(\"refreshTime\"), \n self.BugCheck, [irc, time.localtime()])\n self.timer.start()", "def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False", "def run_checks():\n while True:\n if datetime.now() > core.misc_data.check_date+timedelta(minutes=45):\n for stuff in stuff_to_do:\n threading.Thread(target=stuff).start()\n core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every\n time.sleep(5*60*60)", "def pass_good_until(hours_good=config.HOURS_TO_GRANT_ACCESS):\n return datetime.now() + timedelta(hours=hours_good)", "def delete_by_time():\n while True:\n try:\n now = time.time()\n for user in get_time_start():\n ip, start, protocol = str(user[0][0]), user[1][0], str(user[2][0])\n \n if now - start >= 60 and ip not in BLACK_LIST:\n delete_ip(ip)\n\n except Exception as e:\n logging.info(e)", "def delay_checks(self):\n return False", "def Daysleftverification():\n pass", "def check_vulnerability(self):\n\t\tpass", "def checkTokenTime(func):\n def wrapper(*args, **kwargs):\n config = s.query(Config).first()\n time_left = config.LastAuthDateUTC + (config.ExpiredToken * 1000) - int(datetime.datetime.now().timestamp() * 1000)\n if time_left < 10: # give 10 seconds grace\n Issuer.updateToken(Issuer)\n return func(*args, **kwargs)\n return wrapper", "def checkIERS(warn_update=14*u.day):\n \n try:\n \n currentTime = Time.now()\n table = iers.IERS_Auto.open()\n index_of_last_observation = ''.join(table['PolPMFlag_A']).index('IP')\n time_of_last_observation = Time(table['MJD'][index_of_last_observation],format='mjd')\n time_since_last_update = Time.now() - time_of_last_observation\n \n if int(currentTime.mjd)*u.day not in iers.IERS_Auto.open()['MJD']:\n print(\"IERS tables are outdated! Downloading latest table...\")\n download_IERS_A()\n \n if warn_update < time_since_last_update:\n print(\"IERS tables are outdated! Downloading latest table...\")\n download_IERS_A()\n \n if int(currentTime.mjd)*u.day in iers.IERS_Auto.open()['MJD']:\n print(\"Latest IERS tables are present. Proceeding...\")\n except:\n print(\"Could not download latest IERS tables.\\n Rise and Set time will be error prone.\")", "def check_conditions(self,wanted_starting_time,wanted_diff=0):\n print(time.ctime())\n if self.count_bad_attempts >= utils.MAX_ATTEMPTS_ALLOWED:\n raise MyException(FAILURE)\n soup = self.get_main_soup()\n game = soup.find(attrs={'data-id':self.game_id})\n if ('live' not in game['class'] and game.find(class_=[re.compile(\"live\")]) is None)\\\n and self.check_once: #game is over and we tried at least once to test\n raise MyException(FAILURE)\n if game is None:\n self.count_bad_attempts += 1\n self.check_conditions(wanted_starting_time,wanted_diff)\n elif self.check_time(game,wanted_starting_time) and\\\n self.check_diff(game,wanted_diff,wanted_starting_time):\n raise MyException(SUCCESS)\n else:\n self.count_bad_attempts = 0\n self.scheduler.enter(DELAY,1,action=self.check_conditions,\n argument=(wanted_starting_time,wanted_diff))\n self.scheduler.run()", "def pdesk_check_fails(limit=None):\n # get list of tickets on first page\n pdata = pdesk_get_tickets()\n ticklist = pdata['rows']\n\n if limit:\n limit = int(limit)\n\n mdig = MoonDig(nslist=[])\n\n tcount = 0\n for tt in ticklist:\n if re.search(r'autosuspend failure', tt['subject'], re.I):\n tcount += 1\n if tt['status'] == 'OPEN' and tt['owner'] == '':\n # parse user/server from subject\n smat = re.match(r'^\\[Autosuspend failure\\] ([^ ]+) ([^ ]+)', tt['subject'], re.I)\n tuser = smat.group(1).strip()\n tserv = smat.group(2).strip()\n turi = 'https://systemtasks.inmotionhosting.com/cgi-bin/staff.cgi?do=ticket&cid=%d' % (tt['id'])\n\n # check user status on server\n tacct = get_acct_data(tuser, tserv)\n tmxers = None\n if tacct is None:\n tsus = C.GRN + 'ACCOUNT DOES NOT EXIST' + C.OFF\n else:\n if tacct['suspended']:\n tsus = C.GRN + tacct['suspendreason'] + C.OFF\n else:\n tsus = C.RED + \"NOT SUSPENDED\" + C.OFF\n tmxers = domain_mx_check(tacct['domain'])\n try:\n mip = socket.gethostbyname('mail.'+tacct['domain'])\n tmailip = C.WHT+'mail.'+tacct['domain']+' -> '+mip+C.OFF\n except:\n tmailip = C.YEL+'NXDOMAIN'+C.OFF\n\n try:\n tdomip = socket.gethostbyname(tacct['domain'])\n except:\n tdomip = '<???>'\n\n if tdomip == tacct['ip']:\n tip_match = C.RED+'POINTED LOCAL'+C.OFF+' == '+tacct['ip']\n else:\n tip_match = C.GRN+'POINTED EXTERNAL'+C.OFF+' != '+tacct['ip']\n\n # check nameserver delegation\n try:\n soa_dele = mdig.query(tacct['domain'], 'SOA')[4]\n except:\n soa_dele = None\n\n print(\"** [%s] %s@%s -- %s \" % (C.WHT+str(tt['id'])+C.OFF, C.YEL+tuser+C.OFF, C.CYN+tserv+C.OFF, tsus))\n if tacct:\n print(\" %s --> %s [%s]\" % (C.YEL+tacct['domain']+C.OFF, C.WHT+tdomip+C.OFF, tip_match))\n if soa_dele is None:\n print(\" IN SOA %s\" % (C.RED+'SRVFAIL'+C.OFF))\n elif re.search('(inmotionhosting.com|webhostinghub.com|servconfig.com)', soa_dele, re.I):\n print(\" IN SOA %s\" % (C.GRN + soa_dele + C.OFF))\n else:\n print(\" IN SOA %s\" % (C.RED + soa_dele + C.OFF))\n if tmxers:\n print(\" %s\" % (tmailip))\n for tmm in tmxers:\n print(\" IN MX %s %s\" % (C.WHT+str(tmm[0])+C.OFF, C.CYN+tmm[1]+C.OFF))\n print(\" <%s>\" % (turi))\n if tacct and not tacct['suspended']:\n print(\" fab -H %s suspend_user:%s\" % (tserv, tuser))\n print(\"\")\n\n if limit:\n browser_open(turi)\n if tcount >= limit:\n break", "def goodmorning(host):", "def is_invulnerable(self) -> bool:\n return self.invul_timer != 0", "def pass_good_until(hours_good=config.HOURS_TO_GRANT_ACCESS, offset=0):\n pass_time = datetime.now() + timedelta(hours=hours_good)\n if offset > 0:\n pass_time = pass_time - timedelta(hours=offset)\n return pass_time", "def _check_if_statistics_calculation_is_needed():\n expiration_date = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(\n seconds=UploadHandler.EXPIRATION_TIME_IN_SECONDS)\n not_expired_data = UnprocessedData.objects.filter(uploaded_at__gte=expiration_date)\n sites_of_not_expired_data = not_expired_data.values_list('site_id', flat=True).distinct()\n all_sites = UnprocessedData.objects.filter(uploaded_at__lte=expiration_date).values_list('site_id',\n flat=True).distinct()\n for s in all_sites:\n if s not in sites_of_not_expired_data:\n from_date = UnprocessedData.objects.filter(site_id_id=s).order_by('from_date')[0].from_date\n to_date = UnprocessedData.objects.filter(site_id_id=s).order_by('-to_date')[0].to_date\n logger.info(\"should create stats for {} from {} to {}\".format(s, from_date, to_date))\n site_obj = get_object_or_404(Site, pk=s)\n UploadHandler.create_statistics(site=site_obj, from_date=from_date, to_date=to_date)\n UnprocessedData.objects.filter(site_id_id=s).delete()\n\n if len(sites_of_not_expired_data):\n Timer(UploadHandler.INTERVAL, UploadHandler._check_if_statistics_calculation_is_needed).start()\n else:\n UploadHandler.is_interval_running = False", "def precheck(self):\n # making sure it's a time for pull, otherwise just sleep\n if datetime.now() < self.startTime + timedelta(hours=int(self.newsFrequency)):\n logging.info(\"Didn't reach time to wakeup yet, going to sleep\")\n self.sleep()" ]
[ "0.6524886", "0.61418", "0.6007292", "0.5943196", "0.58323187", "0.5762643", "0.57544595", "0.5733186", "0.5719667", "0.56494826", "0.56424487", "0.5639762", "0.56145597", "0.56025016", "0.557354", "0.5539113", "0.55334616", "0.55320907", "0.5497794", "0.54934573", "0.5474352", "0.5457305", "0.54507405", "0.5442637", "0.5436399", "0.54256546", "0.5404", "0.53642946", "0.5349683", "0.53495085" ]
0.6332263
1
Updates all content fields of hacks executed by a specific user.
async def update_hacks_content(self, attacker_id: int) -> None: mycursor, db = await the_database() await mycursor.execute("UPDATE SlothSkills SET content = 'virus' WHERE user_id = %s", (attacker_id,)) await db.commit() await mycursor.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def update_user_is_hacked(self, user_id: int, hacked: int) -> None:\n\n mycursor, db = await the_database()\n await mycursor.execute(\"UPDATE UserCurrency SET hacked = %s WHERE user_id = %s\", (hacked, user_id))\n await db.commit()\n await mycursor.close()", "def update_user():", "def update(self, user: U) -> None:\n ...", "def update_user():\n #TODO user update \n pass", "def modify_user(user_data):\r\n raise NotImplementedError()", "async def check_hacks(self) -> None:\n\n hacks = await self.get_expired_hacks()\n for h in hacks:\n await self.delete_skill_action_by_target_id_and_skill_type(h[3], 'hack')\n await self.update_user_is_hacked(h[3], 0)\n\n channel = self.bots_txt\n\n await channel.send(\n content=f\"<@{h[0]}>\",\n embed=discord.Embed(\n description=f\"**<@{h[3]}> updated his firewall so <@{h[0]}>'s hacking has no effect anymore! 💻**\",\n color=discord.Color.red()))", "def update_entry_curator(self, soft_id, user_id, connection):\n print('updating node ' + str(soft_id) + ' with curator ' + str(user_id))\n http = get_web_service(connection)\n\n # only send the changed part of the entry\n data = {'field_has_entry_curator': [{'target_id': user_id}],\n 'type': [{'target_id': 'software'}]}\n # print(json.dumps(data, indent=4))\n encoded_entry = json.dumps(data).encode('utf-8')\n req_update = http.request('PATCH', connection[\"url\"] + '/node/' + str(soft_id) + '?_format=json',\n body=encoded_entry)\n if not 'OK' in req_update.reason:\n print('!! error when updating curator of node ' + soft_id)", "def update_user_request(self, request):\n # enforce a default arg type, or check incoming?\n meta = json.dumps(request['request_meta'])\n USER_META = \"\"\"\n UPDATE user_request\n SET\n request_meta = :metadata,\n notes = :notes\n WHERE id = :id\n RETURNING id\n \"\"\"\n try:\n result = self.engine.execute(text(USER_META),{'metadata': meta, 'notes': request['notes'], 'id': request['id']})\n except Exception as e:\n logger.info(f\"Error updating user request {request['id']}: {e}\")\n return False\n return True", "def test_patch_user(self):\n pass", "def view_update_user(self, user, username, password):\r\n user.realm._checker.passwd(username, password, True)", "async def check_hacks(self) -> None:\n\n hacks = await self.get_expired_hacks()\n for h in hacks:\n await self.delete_skill_action_by_target_id_and_skill_type(h[3], 'hack')\n\n channel = self.bots_txt\n\n await channel.send(\n content=f\"<@{h[0]}>\",\n embed=discord.Embed(\n description=f\"**<@{h[3]}> updated his firewall so <@{h[0]}>'s hacking has no effect anymore! 💻**\",\n color=discord.Color.red()))", "def update_user(self, user):\n query = TABELLE['id_users']['update']\n return self.execute(query,\n (user['admin'], user['tester'], user['loot_user'], user['loot_admin'], user['banned'],\n user['id']))", "def update(challenge, request):\n challenge.name = request.form['name']\n challenge.description = request.form['description']\n challenge.value = int(request.form.get('value', 0)) if request.form.get('value', 0) else 0\n challenge.max_attempts = int(request.form.get('max_attempts', 0)) if request.form.get('max_attempts', 0) else 0\n challenge.unlock_at = int(request.form.get('unlock_at', 0)) if request.form.get('unlock_at', 0) else 0\n challenge.category = request.form['category']\n challenge.hidden = 'hidden' in request.form\n db.session.commit()\n db.session.close()", "async def set_mod(request: Request, user: User) -> Message:\n user_id = user.user_id\n conn: Connection = request.state.db_conn\n async with conn.transaction():\n user_state = await conn.fetchrow(\"SELECT is_mod FROM users WHERE user_id = $1\", user_id)\n if user_state is None:\n return Message(message=f\"User with user_id {user_id} does not exist.\")\n elif user_state['is_mod']:\n return Message(message=f\"User with user_id {user_id} is already a mod.\")\n\n await conn.execute(\"UPDATE users SET is_mod = true WHERE user_id = $1\", user_id)\n return Message(message=f\"Successfully set user with user_id {user_id} to mod.\")", "def update_user_metrics(self,user_id:int)->None:\n with connection.cursor() as cursor:\n cursor.execute(f\"SELECT update_user_metrics({user_id})\")\n ##TODO: this should return something ", "def put_in_all_user_data(user: dict):\n all_user_data[user.id] = user", "def rescore(self, user=None):\r\n if user:\r\n self.q(css='input[id^=sd_fu_]').first.fill(user)\r\n self.q(css='section.staff-modal a#staff-debug-rescore').click()", "def user_edit(request):\n DEBUG = False\n\n if not has_permission('editUser', request.context, request):\n #print \"NOT has_permission !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n request.message = \"You do not have permissions to edit this user!\"\n raise HTTPForbidden\n\n # if no user_id in URL and not logged in, tell user to login\n\n try:\n user_id = request.matchdict['user_id']\n except KeyError, ke:\n #print ke\n return HTTPFound(location=request.route_url('not_found'))\n\n user = User.get_by_user_id(user_id)\n\n if user is None:\n msg = \"User was not founf in database.\"\n return HTTPFound(location=request.route_url('not_found'))\n\n form = Form(request, schema=UserSettingsSchema, obj=user)\n\n if 'form.submitted' in request.POST and not form.validate():\n # form didn't validate\n request.session.flash('Please check the form below for errors!')\n if DEBUG: # pragma: no cover\n print \"submitted but not validated!\"\n\n if 'form.submitted' in request.POST and form.validate():\n # ready for changing database entries!\n request.session.flash('form validated!')\n if DEBUG: # pragma: no cover\n print \"the form was submitted and validated.\"\n\n if form.data['surname'] != user.surname:\n if DEBUG: # pragma: no cover\n request.session.flash('surname was not same --> changing')\n print \"changing surname\"\n user.surname = form.data['surname']\n if form.data['lastname'] != user.lastname:\n if DEBUG: # pragma: no cover\n request.session.flash('lastname was not same --> changing')\n print \"changing lastname\"\n user.lastname = form.data['lastname']\n if form.data['email'] != user.email:\n request.session.flash('email was not same --> changing')\n user.email = form.data['email']\n if form.data['phone'] != user.phone:\n request.session.flash('phone was not same --> changing')\n user.phone = form.data['phone']\n if form.data['fax'] != user.fax:\n request.session.flash('fax was not same --> changing')\n user.fax = form.data['fax']\n if form.data['street'] != user.street:\n request.session.flash('street was not same --> changing')\n user.street = form.data['street']\n if form.data['number'] != user.number:\n request.session.flash('number was not same --> changing')\n user.number = form.data['number']\n if form.data['city'] != user.city:\n request.session.flash('city was not same --> changing')\n user.city = form.data['city']\n if form.data['postcode'] != user.postcode:\n request.session.flash('postcode was not same --> changing')\n user.postcode = form.data['postcode']\n if form.data['country'] != user.country:\n request.session.flash('country was not same --> changing')\n user.country = form.data['country']\n\n if DEBUG: # pragma: no cover\n print \"returning the form\"\n return {\n 'the_user_id': user_id,\n 'the_username': user.username,\n 'form': FormRenderer(form),\n }", "def put(self, user_id):\r\n return update_user(request, user_id)", "def user_data(update, context):\n text = html.escape(str(context.user_data))\n if context.args and context.args[0] == 'clear' and len(context.args) > 1:\n context.user_data.pop(' '.join(context.args[1:]), None)\n send(text, update, context)", "def save_with_metadata(self, user):\r\n self.save()\r\n self.runtime.modulestore.update_item(self, user.id if user else None)", "def _fill_user_entries(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n # For every enabled verification parameter, set its value in its corresponding entry.\n for param in self.verify_params.enabled:\n self._fill_user_entry(self.computer, param)", "def update_user(id):\n pass", "def put(self, user_id):\n\n user_data, error = user_schema.load(api.payload['data'])\n\n user = User.objects.get_or_404(public_id=user_id)\n user.update(updated_at=datetime.utcnow, **user_data)\n \n return user_schema.dump(user)", "def user_changes(self, user, what=None):\n pass", "def test_040_update_user(self):\n\n testflow.step(\"Updating user %s\", TEST_USER2)\n assert USER_CLI.run(\n 'edit',\n TEST_USER2,\n attribute='firstName=userX2',\n )[0]", "def update_content(self):\n raise NotImplementedError", "def view_update_user(self, user, new_pw, old_pw):\r\n user.realm._checker.passwd(user.userID, new_pw, old_pw)", "def updateContent(content, **kwargs):", "def user_edit(request):\n\n if request.method != 'POST':\n return HttpResponseNotAllowed(['POST'])\n\n data = json.loads(request.body.decode('utf-8'))\n\n auth_token = str(data.get('auth_token', ''))\n edit_user_info = data.get('edit_user_info', '')\n username = str(edit_user_info.get('username', ''))\n\n try:\n if not verify_admin(auth_token):\n raise PlantalyticsAuthException(ADMIN_INVALID)\n\n message = (\n 'Attempting to edit info for user: {}.'\n ).format(username)\n logger.info(message)\n check_user_parameters(edit_user_info)\n cassy.edit_user(edit_user_info)\n message = (\n 'Successfully edited info for user: {}.'\n ).format(username)\n logger.info(message)\n body = {\n 'errors': {}\n }\n return HttpResponse(\n json.dumps(body),\n content_type='application/json'\n )\n except PlantalyticsException as e:\n message = (\n 'Error attempting to edit user info. Error code: {}'\n ).format(str(e))\n logger.warn(message)\n error = custom_error(str(e))\n return HttpResponseForbidden(error, content_type='application/json')\n except Exception as e:\n message = (\n 'Unknown error occurred while attempting to edit user info:'\n )\n logger.exception(message)\n error = custom_error(UNKNOWN, str(e))\n return HttpResponseServerError(error, content_type='application/json')" ]
[ "0.6449547", "0.5994832", "0.5917885", "0.59126174", "0.574175", "0.5576456", "0.53066003", "0.5300011", "0.5268399", "0.5226327", "0.5225666", "0.5216887", "0.52048266", "0.51805025", "0.5174077", "0.5142172", "0.5128559", "0.5101558", "0.5093439", "0.50742656", "0.5071988", "0.5060375", "0.5044387", "0.5044284", "0.5030198", "0.5023069", "0.5022973", "0.5009303", "0.5002853", "0.49981663" ]
0.65136653
0
Checks if the target member has a contagious virus in their hack.
async def check_virus(self, ctx: commands.Context, target: discord.Member) -> None: answer: discord.PartialMessageable = None if isinstance(ctx, commands.Context): answer = ctx.send else: answer = ctx.respond infected = ctx.author hack = await self.get_skill_action_by_target_id_and_skill_type(target.id, skill_type='hack') if hack[8] != 'virus' or hack[0] == infected.id: return effects = await self.get_user_effects(infected) if 'hacked' in effects: return if 'protected' in effects: return if 'reflect' in effects: attacker = await discord.utils.get(ctx.guild.members, id=hack[0]) await self.reflect_attack(ctx, attacker, target, 'hack') try: current_timestamp = await utils.get_timestamp() # Don't need to store it, since it is forever await self.insert_skill_action( user_id=hack[0], skill_type="hack", skill_timestamp=current_timestamp, target_id=infected.id, channel_id=ctx.channel.id, content="virus" ) except Exception as e: print('Failed virus', e) else: virus_embed = await self.get_virus_embed( channel=ctx.channel, perpetrator_id=hack[0], target_id=target.id, infected_id=infected.id) await answer(embed=virus_embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_vulnerability(self):\n\t\tpass", "def basic_check(word):\n if word[-1] == \"b\" or word[-1] == \"g\":\n return False\n consonant_counter = 0\n for char in word:\n if char in VOWELS:\n consonant_counter = 0\n else:\n consonant_counter += 1\n if consonant_counter >= 3:\n return False\n return True", "def is_virus(taxid):\n return taxid and taxid != '1' and (taxid == '10239' or is_virus(parents[taxid]))", "def malicious(self):\n return self.probably_malicious", "def is_vulnerable(self, data=None, proxies=None):\n raise NotImplementedError()", "def avoid_instr_is_valid(bv: BinaryView, addr: int):\n return addr not in bv.session_data.mui_avoid", "def DoMaliciousThings():\r\n\tprint(\"You are infected\")", "def check_antivirus():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><anti-virus><upgrade><check></check></upgrade></anti-virus></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def verify():", "def is_attack(self):\n\n return self.purpose == 'attack'", "def valid(self, target):", "async def hack(self, ctx, target: discord.Member = None) -> None:\n\n attacker = ctx.author\n\n if ctx.channel.id != bots_and_commands_channel_id:\n return await ctx.send(f\"**{attacker.mention}, you can only use this command in {self.bots_txt.mention}!**\")\n\n attacker_fx = await self.get_user_effects(attacker)\n\n if 'knocked_out' in attacker_fx:\n return await ctx.send(f\"**{attacker.mention}, you can't use your skill, because you are knocked-out!**\")\n\n if not target:\n return await ctx.send(f\"**Please, inform a target member, {attacker.mention}!**\")\n\n if attacker.id == target.id:\n return await ctx.send(f\"**{attacker.mention}, you cannot hack yourself!**\")\n\n if target.bot:\n return await ctx.send(f\"**{attacker.mention}, you cannot hack a bot!**\")\n\n target_sloth_profile = await self.get_sloth_profile(target.id)\n if not target_sloth_profile:\n return await ctx.send(f\"**You cannot hack someone who doesn't have an account, {attacker.mention}!**\")\n\n if target_sloth_profile[1] == 'default':\n return await ctx.send(f\"**You cannot hack someone who has a `default` Sloth class, {attacker.mention}!**\")\n\n target_fx = await self.get_user_effects(target)\n\n if 'protected' in target_fx:\n return await ctx.send(f\"**{attacker.mention}, {target.mention} is protected, you can't hack them!**\")\n\n if 'hacked' in target_fx:\n return await ctx.send(f\"**{attacker.mention}, {target.mention} is already hacked!**\")\n\n confirmed = await ConfirmSkill(f\"**{attacker.mention}, are you sure you want to hack {target.mention}?**\").prompt(ctx)\n if not confirmed:\n return await ctx.send(\"**Not hacking them, then!**\")\n\n if ctx.invoked_with == 'mirror':\n mirrored_skill = await self.get_skill_action_by_user_id_and_skill_type(user_id=attacker.id, skill_type='mirror')\n if not mirrored_skill:\n return await ctx.send(f\"**Something went wrong with this, {attacker.mention}!**\")\n else:\n _, exists = await Player.skill_on_cooldown(skill=Skill.ONE).predicate(ctx)\n\n try:\n current_timestamp = await utils.get_timestamp()\n # Don't need to store it, since it is forever\n await self.insert_skill_action(\n user_id=attacker.id, skill_type=\"hack\", skill_timestamp=current_timestamp,\n target_id=target.id, channel_id=ctx.channel.id\n )\n if ctx.invoked_with != 'mirror':\n if exists:\n await self.update_user_skill_ts(attacker.id, Skill.ONE, current_timestamp)\n else:\n await self.insert_user_skill_cooldown(attacker.id, Skill.ONE, current_timestamp)\n # Updates user's skills used counter\n await self.update_user_skills_used(user_id=attacker.id)\n hack_embed = await self.get_hack_embed(\n channel=ctx.channel, perpetrator_id=attacker.id, target_id=target.id)\n await ctx.send(embed=hack_embed)\n except Exception as e:\n print(e)\n return await ctx.send(f\"**Something went wrong and your `Hack` skill failed, {attacker.mention}!**\")\n else:\n if 'reflect' in target_fx:\n await self.reflect_attack(ctx, attacker, target, 'hack')", "def is_invulnerable(self) -> bool:\n return self.invul_timer != 0", "def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True", "def violated(self) -> bool:\n ...", "def IsRegular(info):\n return (info.external_attr >> 28) == 010", "def unknown(self, w):\n\n return w not in self._palabrasvistas", "def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text", "def can_target(name):\n return False", "def _check_for_passively_detected_failures(self, target):\n return self._check_procmon_failures(target=target)", "def check():", "def test_verifyDamaged(self):\n self.testObject.content.setContent('garbage!')\n self.assertRaises(CorruptObject, self.testObject.verify)", "def ConfirmAllowedCopyrightHolder(holder):\n return holder in ALLOWED_COPYRIGHT_HOLDERS", "async def hack(self, ctx, target: discord.Member = None) -> None:\n\n attacker = ctx.author\n\n if ctx.channel.id != bots_and_commands_channel_id:\n return await ctx.send(f\"**{attacker.mention}, you can only use this command in {self.bots_txt.mention}!**\")\n\n if await self.is_user_knocked_out(attacker.id):\n return await ctx.send(f\"**{attacker.mention}, you can't use your skill, because you are knocked-out!**\")\n\n if not target:\n return await ctx.send(f\"**Please, inform a target member, {attacker.mention}!**\")\n\n if attacker.id == target.id:\n return await ctx.send(f\"**{attacker.mention}, you cannot hack yourself!**\")\n\n if target.bot:\n return await ctx.send(f\"**{attacker.mention}, you cannot hack a bot!**\")\n\n target_currency = await self.get_user_currency(target.id)\n if not target_currency:\n return await ctx.send(f\"**You cannot hack someone who doesn't have an account, {attacker.mention}!**\")\n\n if target_currency[7] == 'default':\n return await ctx.send(f\"**You cannot hack someone who has a `default` Sloth class, {attacker.mention}!**\")\n\n if await self.is_user_protected(target.id):\n return await ctx.send(f\"**{attacker.mention}, {target.mention} is protected, you can't hack them!**\")\n\n if await self.is_user_hacked(target.id):\n return await ctx.send(f\"**{attacker.mention}, {target.mention} is already hacked!**\")\n\n confirmed = await ConfirmSkill(f\"**{attacker.mention}, are you sure you want to hack {target.mention}?**\").prompt(ctx)\n if not confirmed:\n return await ctx.send(\"**Not hacking them, then!**\")\n\n await self.check_cooldown(user_id=attacker.id, skill_number=1)\n\n try:\n current_timestamp = await self.get_timestamp()\n # Don't need to store it, since it is forever\n await self.update_user_is_hacked(target.id, 1)\n await self.insert_skill_action(\n user_id=attacker.id, skill_type=\"hack\", skill_timestamp=current_timestamp,\n target_id=target.id, channel_id=ctx.channel.id\n )\n await self.update_user_action_skill_ts(attacker.id, current_timestamp)\n # Updates user's skills used counter\n await self.update_user_skills_used(user_id=attacker.id)\n hack_embed = await self.get_hack_embed(\n channel=ctx.channel, perpetrator_id=attacker.id, target_id=target.id)\n msg = await ctx.send(embed=hack_embed)\n except Exception as e:\n print(e)\n return await ctx.send(f\"**Something went wrong and your `Hack` skill failed, {attacker.mention}!**\")", "def hasVeryTrustedValue(self):", "def testWithoutNoise(self):\n self.checkMatching(self.references)", "def is_protected(target_id: str) -> bool:\n inventories = get_file(\"inventories\")\n return inventories[target_id][\"shield_active\"]", "def find_instr_is_valid(bv: BinaryView, addr: int):\n return addr not in bv.session_data.mui_find", "def check():\n suspicious_telemarketers = get_suspicious_telemarketers(calls, texts)\n outgoing = set()\n non_tele = set()\n for c in calls:\n outgoing.add(c[0])\n non_tele.add(c[1])\n for t in texts:\n non_tele.add(t[0])\n non_tele.add(t[1])\n telemarketers = sorted(outgoing - non_tele)\n if len(suspicious_telemarketers) == len(telemarketers):\n print('Pass')", "def check_dispel_magic(player):\n if \"Dispel Magic\" in player.spell_to_cast:\n return True\n return False" ]
[ "0.67220134", "0.5530193", "0.5503171", "0.5468125", "0.538155", "0.53553593", "0.53199047", "0.5271036", "0.5269808", "0.52576876", "0.5244865", "0.52445847", "0.522711", "0.52121764", "0.5185404", "0.5182956", "0.51672363", "0.5136818", "0.512856", "0.511059", "0.51015955", "0.5077775", "0.50752807", "0.5071438", "0.506556", "0.5054419", "0.5029383", "0.5026879", "0.50170916", "0.50071675" ]
0.6997657
0
Makes an embedded message for a virus infection skill action.
async def get_virus_embed(self, channel: discord.TextChannel, perpetrator_id: int, target_id: int, infected_id: int) -> discord.Embed: timestamp = await utils.get_timestamp() wire_embed = discord.Embed( title="Someone has been infected by a hacking!", timestamp=datetime.fromtimestamp(timestamp) ) wire_embed.description = f"**<@{infected_id}> got infected by <@{perpetrator_id}>'s virus through <@{target_id}>!** ⚜️" wire_embed.color = discord.Color.green() wire_embed.set_image(url='https://media1.tenor.com/images/df4840a6e3ddd163fd5cef6d678a57aa/tenor.gif?itemid=9991524') wire_embed.set_thumbnail(url="https://thelanguagesloth.com/media/sloth_classes/Cybersloth.png") wire_embed.set_footer(text=channel.guild, icon_url=channel.guild.icon.url) return wire_embed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def check_virus(self, ctx: commands.Context, target: discord.Member) -> None:\n\n answer: discord.PartialMessageable = None\n if isinstance(ctx, commands.Context):\n answer = ctx.send\n else:\n answer = ctx.respond\n\n infected = ctx.author\n\n hack = await self.get_skill_action_by_target_id_and_skill_type(target.id, skill_type='hack')\n if hack[8] != 'virus' or hack[0] == infected.id:\n return\n\n effects = await self.get_user_effects(infected)\n if 'hacked' in effects:\n return\n\n if 'protected' in effects:\n return\n\n if 'reflect' in effects:\n attacker = await discord.utils.get(ctx.guild.members, id=hack[0])\n await self.reflect_attack(ctx, attacker, target, 'hack')\n \n try:\n current_timestamp = await utils.get_timestamp()\n # Don't need to store it, since it is forever\n await self.insert_skill_action(\n user_id=hack[0], skill_type=\"hack\", skill_timestamp=current_timestamp,\n target_id=infected.id, channel_id=ctx.channel.id, content=\"virus\"\n ) \n\n except Exception as e:\n print('Failed virus', e)\n else:\n virus_embed = await self.get_virus_embed(\n channel=ctx.channel, perpetrator_id=hack[0], target_id=target.id, infected_id=infected.id)\n await answer(embed=virus_embed)", "def DoMaliciousThings():\r\n\tprint(\"You are infected\")", "def embed():", "async def skill(self, ctx, *, skill: str):\n\n try:\n skill = self.get_entry('Skill', skill.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n name = skill['Name']\n\n embed = discord.Embed(title=name)\n embed.set_thumbnail(url='attachment://skill.png')\n embed.add_field(name='Learned', value=skill['Class/Rank'], inline=False)\n embed.add_field(name='Effect', value=skill['Effect'])\n\n await ctx.send(file=discord.File(f'xenox/skills/{name}.png', 'skill.png'), embed=embed)", "def event1926():\n header(1926)\n\n if_player_has_special_effect(1, SPEFFECT.RuinousHand)\n if_has_tae_event(1, CHR.Player, 675)\n if_event_flag_on(1, 11025405)\n if_condition_true(0, 1)\n\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=1, behavior_id=2000)\n sound.play_sound_effect(CHR.Player, SoundType.s_sfx, 90010) # Bonfire resting sound.\n chr.set_special_effect(CHR.Player, SPEFFECT.RuinousHandPayment) # Lose two humanity.\n flag.disable_chunk(11025401, 11025405)\n if_does_not_have_tae_event(0, CHR.Player, 675)\n\n if_player_has_good(2, GOOD.PaleEyeOrb)\n if_event_flag_off(2, EVENT.BeyondWitness)\n skip_if_condition_false(2, 2)\n flag.enable(EVENT.BeyondWitness)\n message.status_explanation(TEXT.PaleEyeOrbQuivers)\n\n restart()", "def horde_message(self, message):", "async def waifu(self, ctx):\n e = discord.Embed(title=\"Here is a waifu image for you {}.\".format(ctx.author.name), color=discord.Color.magenta())\n e.set_image(url=nekos.img('waifu'))\n await ctx.send(embed=e)", "def inv(self, command):\n\n side = '|'\n blank = 30 * \" \"\n line = 30 * \"-\"\n diff2 = 11 * \" \"\n if not self.inventory:\n #check to see if the inventory is empty and if there is a weapon equipped\n if not self.weapon:\n print(\"Your inventory is empty and you have nothing in your hands.\")\n else:\n print('Your inventory is empty and you have a ' + self.weapon[0].name + ' in your hands')\n else:\n print(\"{}{}{}\".format('+', line, '+'))\n print(\"{}{}{} {}{}\".format(side, (\" \" * 12), 'Items', (\" \" * 12), side))\n for item in self.inventory:\n diff = (30 - len(item.name)) * \" \"\n print(\"{}{}{}{}\".format(side, item.name, diff, side))\n for thing in self.weapon:\n diff3 = (30 - len(thing.name)) * \" \"\n print(\"{}{}{}\".format('+', line, '+'))\n print(\"{}{}{}{}{}\".format(side, diff2, 'Equipped', diff2, side))\n print(\"{}{}{}{}\".format(side, thing.name, diff3, side))\n print(\"{}{}{}\".format('+', line, '+'))", "def trig_code(self, bot, source, target, trigger, argument):\n\t\treturn \"Hello, I'm a pyirkbot based on pynik. My code https://github.com/blueCommand/pyirkbot For feature requests use https://github.com/blueCommand/pyirkbot/issues beer is good also\"", "async def uwu(self, ctx, *, message):\n uwus = ['UwU', 'xwx', 'DwD', 'ÚwÚ', 'uwu', '☆w☆', '✧w✧',\n '♥w♥', '︠uw ︠u', '(uwu)', 'OwO', 'owo', 'Owo', 'owO', '( ͡° ͜ʖ ͡°)']\n res = message.replace(\"r\", \"w\").replace(\n \"l\", \"w\").replace(\"L\", \"W\").replace(\"R\", \"W\")\n res = res.replace(\"the \", \"da \").replace(\n \"The \", \"Da \").replace(\"THE \", \"DA \")\n res = res.replace(\"th\", \"d\").replace(\"TH\", \"D\")\n res = res.replace(\"\\n\", \" \" + random.choice(uwus) + \"\\n\")\n # and send one \"as\" da usew who invoked da command ÚwÚ\n await ctx.send(f\"{res + ' ' + random.choice(uwus)}\")", "def on_action_triggered(self):\n # TODO: not implemented yet\n button=QMessageBox.about(self, '帮助','这只是个摆设23333')", "def msg(self, message, **kwargs):\n self.crafter.msg(message, {\"type\": \"crafting\"})", "def event1923():\n header(1923)\n end_if_this_event_on()\n if_player_has_special_effect(0, SPEFFECT.ExileSoulEffect)\n item.award_item_to_host_only(ITEMLOT.ExileSoulReward)", "def apology(message, code=400):\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code", "def apology(message, code=400):\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code", "def apology(message, code=400):\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code", "def apology(message, code=400):\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code", "def apology(message, code=400):\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code", "def apology(message, code=400):\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code", "async def _aki(self, ctx):\r\n embed = discord.Embed(\r\n title=\":black_cat: Aki has come to see you!\", color=15383739\r\n )\r\n embed.set_image(url=choice(self.akiimage))\r\n await ctx.send(embed=embed)", "async def Codeblock(self, context,message):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"```\"+ message + \"```\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tawait context.send(embed=embed)", "def generate_msg(props, alert=False, user_pref=None, past=False):\n\t\tmessage = emojize(\":rocket:\", use_aliases=True)\n\t\tif past:\n\t\t\tmessage += ' Launch was held on: ' + props['when'].format('YYYY-MM-DD HH:mm:ss ZZ') + '.\\n'\n\t\t\tif props['holdreason']:\n\t\t\t\tmessage += 'The launch has been *held*. Reason: ' + props['holdreason'] + '\\n'\n\t\t\tif props['failreason']:\n\t\t\t\tmessage += 'Unfortunately, the launch *failed*. Reason: ' + props['failreason'] + '\\n'\n\t\telse:\n\t\t\tif alert:\n\t\t\t\tmessage += ' *Launch is going to happen in some minutes!* '\n\t\tmessage += ' *' + props['name'] + '*' + '\\n'\n\n\t\tif not alert and not past:\n\t\t\tmessage += 'A launch will happen _' + props['when'].humanize() + '_! \\n'\n\t\t\tmessage += 'I mean ' + props['when'].format('YYYY-MM-DD HH:mm:ss ZZ') + '\\n'\n\n\t\tif past:\n\t\t\tmessage += 'Taken from *'\n\t\telse:\n\t\t\tmessage += 'Taking from *'\n\n\t\tmessage += props['location'] + '*.\\n'\n\t\tdescr = Interface.generate_description(props['missions'])\n\t\tmessage += '*Mission description*\\n' + descr + '\\n' if descr else ''\n\t\tmessage += '\\n'\n\n\t\tif props['urls']:\n\t\t\tmessage += 'Watch it here: \\n' if not past else 'You could have watched it here: \\n'\n\t\t\tfor url in props['urls']:\n\t\t\t\tmessage += ' • [' + url + '](' + url +')\\n'\n\t\telse:\n\t\t\tmessage += 'Unfortunately there '\n\t\t\tmessage += 'are' if not past else 'were'\n\t\t\tmessage += ' no reported webcasts ' \\\n\t\t\t\t\t + emojize(':disappointed_relieved:', use_aliases=True)\n\n\t\treturn message", "async def say(self, ctx, *args):\n if not args:\n await ctx.send('did you want me to say something?')\n return\n message = ' '.join(args)\n message = profanity_filter(message)\n await ctx.send(message)", "def create_action_msg(self, action):\n raise NotImplementedError(\"Don't know how to translate the action to a msg\")", "def ire_imperfect_quiz(verb, pronoun):\n\n return functions.conjugate_imperfect_ire_verb(verb, pronoun, \"imperfetto\")", "def tr(self, message):\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('Isovist', message)", "async def tapir(self):\n tapir_list = self.config.get('tapirs', [])\n tapir = tapir_list[random.randrange(len(tapir_list))]\n try:\n await self.bot.say(tapir)\n except:\n await self.bot.whisper(tapir)", "def ere_imperfect_quiz(verb, pronoun):\n\n return functions.conjugate_imperfect_ere_verb(verb, pronoun, \"imperfetto\")", "async def embed(self, context,message):\n\t\tembed = discord.Embed(\n\t\t\tdescription=message,\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tawait context.send(embed=embed)", "def get_user_input(self, game, hand, message, allowed_actions):\n return 'hit'" ]
[ "0.61588365", "0.52967834", "0.5241235", "0.52001595", "0.505561", "0.5049495", "0.50357", "0.50017184", "0.49742955", "0.49708763", "0.49689567", "0.49649853", "0.49194732", "0.49093622", "0.49093622", "0.49093622", "0.49093622", "0.49093622", "0.49093622", "0.4882686", "0.48550507", "0.4847321", "0.4845111", "0.48407584", "0.48365656", "0.48135436", "0.48088905", "0.48084852", "0.4798255", "0.4790452" ]
0.5607754
1
Utility function loading an image, converting it into greyscale, and performing histogram equilization
def load_and_preprocess_image(path): img = cv2.imread(path, 0) # Load image into greyscale img = cv2.equalizeHist(img) # Histogram equilization return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def histo_image(image, verbose=False):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n histo_global = cv2.equalizeHist(gray)\n\n _, histo = cv2.threshold(histo_global, thresh=250,\n maxval=255, type=cv2.THRESH_BINARY)\n\n if verbose:\n plt.imshow(histo, cmap='gray')\n plt.show()\n\n return histo", "def cs4243_histequ(image, grey_level=256):\n ###your code here####\n \n # get the original histogram\n x, y = image.shape\n hist = [0] * grey_level\n for i in range(x):\n for j in range(y):\n hist[image[i, j]]+=1\n ori_hist = hist\n \n # get the cumulative distribution function (CDF) normalised to image size\n cum_hist = [sum(ori_hist[:i+1]) for i in range(len(ori_hist))]\n cum_hist = np.array(cum_hist) / (x*y)\n \n # get the uniform histogram from normalised CDF\n uniform_hist = np.uint8((grey_level-1) * cum_hist)\n \n ###\n\n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = image.shape\n res_image = np.zeros(image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = uniform_hist[image[i,j]]\n \n uni_hist = np.bincount(res_image.flatten(), minlength=grey_level)\n return ori_hist, cum_hist, res_image, uni_hist", "def img_histogram(img):\n\n plt.figure()\n\n if len(img.shape) > 2:\n\n plt.subplot(3,1,1)\n plt.hist(img[:,:,0].ravel(),bins=range(257),color='b')\n plt.title('Image Histogram')\n plt.legend('Blue')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,2)\n plt.hist(img[:,:,1].ravel(),bins=range(257),color='g')\n plt.legend('Green')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,3)\n plt.hist(img[:,:,2].ravel(),bins=range(257),color='r')\n plt.legend('Red')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()\n\n else:\n\n plt.hist(img[:,:].ravel(),bins=range(257))\n plt.title('Image Histogram - Grayscale')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()", "def get_histogram(folder_name, image_name, save_location):\n print(\"Getting histogram for:\" + str(folder_name) + '/' + str(image_name))\n image = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n plt.hist(image.ravel(), 256, [0, 65535])\n plt.xlabel('Pixel Intensity')\n plt.ylabel('Number of pixels')\n plt.title('Histogram of normalised reference image. Overnight2')\n plt.savefig(save_location + 'histogram.png')\n plt.savefig(save_location + 'histogram.eps', format='eps')\n # plt.show()", "def cs4243_histnorm(image, grey_level=256):\n res_image = image.copy()\n ##your code here ###\n min_pixel = np.amin(res_image)\n max_pixel = np.amax(res_image)\n res_image = (res_image - min_pixel) / (max_pixel - min_pixel) * (grey_level-1)\n ####\n return res_image", "def compute_histogram(self, image):\n\n hist = [0] * 256\n x, y = image.shape[:2]\n #print(image.shape)\n for i in range(x):\n for j in range(y):\n hist[image[i, j]] += 1\n\n return hist", "def OF1_CalculateRawHistogram(image):\n h = np.zeros(256, np.float_)\n for i in np.nditer(image):\n h[i - 1] = h[i - 1] + 1\n\n return h", "def compute_histogram(self, image):\n hist = [0] * 256\n [h, w] = image.shape\n print(h,w)\n i = 0\n while i < 256:\n for row in range(h):\n for col in range(w):\n if image[row, col] == i:\n hist[i] += 1\n #print(hist[i])\n i += 1\n\n return hist", "def compute_histogram(self, image):\n\n # in-built function to calculate histogram\n print(\"size of image: \", np.shape(image))\n print(\"number of pixels: \", np.shape(image)[0] * np.shape(image)[1])\n # hist1 = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n # hist = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n\n # created function to calculate histogram\n hist = np.zeros(256)\n [rows, columns] = np.shape(image)\n for k in range(256):\n count = 0\n for i in range(rows):\n for j in range(columns):\n if image[i, j] == k:\n count = count + 1\n hist[k] = count\n\n # print(\"Check if histogram is same: \", np.array_equal(hist, hist1))\n\n return hist", "def histogram(img):\n BINS = 8\n RANGE = np.tile(np.array([0, 255]), (3, 1))\n\n # histogram of the first image\n r = np.ravel(img[:, :, 0])\n g = np.ravel(img[:, :, 1])\n b = np.ravel(img[:, :, 2])\n hist, endpoints = np.histogramdd([r, g, b], bins = BINS, range = RANGE)\n\n # normalize the images\n return hist/np.sum(hist)", "def processhed(imagefile, algorithm):\n image = plt.imread(StringIO.StringIO(imagefile), format=\"JPG\")\n\n ihc_hed = rgb2hed(image)\n\n if algorithm == '01':\n result = plt.cm.gray(rescale_intensity(ihc_hed[:, :, 0],\n out_range=(0, 1)))\n elif algorithm == '02':\n result = plt.cm.gray(rescale_intensity(ihc_hed[:, :, 1],\n out_range=(0, 1)))\n elif algorithm == '03':\n result = plt.cm.gray(rescale_intensity(ihc_hed[:, :, 2],\n out_range=(0, 1)))\n else:\n result = image\n\n output = StringIO.StringIO()\n plt.imsave(output, result, format=\"PNG\")\n contents = output.getvalue()\n output.close()\n\n return contents", "def histograma(p):\n img = read_img(p)\n show_histograma(img.reshape((-1)))", "def preprocess_image(image):\n\n tmp_img = cv2.GaussianBlur(image, (3, 3), 5)\n img = cv2.addWeighted(image, 1.5, tmp_img, -0.5, 0)\n img = cv2.equalizeHist(img)\n return img", "def hist_equ(raw_img=None, file_name=None):\n\n if raw_img is None:\n raw_img = cv.imread(file_name, cv.IMREAD_GRAYSCALE)\n\n norm = Normalize(vmin=0, vmax=255)\n L = 2 ** 8\n bins = range(L + 1)\n # row, col = raw_img.shape\n\n # input_hist = np.zeros(L, int)\n # for i in raw_img.flat:\n # input_hist[i] += 1\n\n # input_hist = histogram(raw_img)\n input_hist, _ = np.histogram(raw_img.flat, bins=bins, density=True)\n # print(file_name, 'raw', np.count_nonzero(input_hist))\n\n # s = np.zeros(L, int)\n # for k in range(L):\n # s[k] = (L - 1) * sum(input_hist[:k + 1])\n\n s = np.array([(L - 1) * sum(input_hist[:k + 1]) for k in range(L)])\n\n out_img = np.array([s[r] for r in raw_img], int).reshape(raw_img.shape)\n # output_hist = histogram(out_img)\n output_hist, _ = np.histogram(out_img.flat, bins=bins, density=True)\n # print(file_name, 'equalized', np.count_nonzero(output_hist))\n\n # %% plots\n '''\n plt.subplot(121)\n plt.imshow(raw_img, cmap='gray', norm=norm)\n plt.title(\"Raw \" + file_name)\n\n plt.subplot(122)\n plt.imshow(out_img, cmap='gray', norm=norm)\n plt.title(\"Equalized \" + file_name)\n # plt.savefig(file_name + \"_comparison.png\")\n plt.show()\n\n plt.title(\"Histogram of \" + file_name)\n plt.bar(range(L), input_hist)\n plt.bar(range(L), output_hist)\n plt.legend(('raw image', 'equalized image'))\n # plt.savefig(file_name + \"_histogram.png\")\n plt.show()\n\n plt.plot(range(L), s)\n plt.title(\"Histogram equalization transformation for \" + file_name)\n plt.xlabel('$r_k$')\n plt.ylabel('$s_k$')\n plt.show()\n '''\n\n return out_img, output_hist, input_hist, s", "def calculate_histogram(img, channel):\n\n # histogram arrays for each channel\n hist_gs_or_red = np.zeros((256, 1), dtype=np.int32)\n hist_green = np.zeros((256, 1), dtype=np.int32)\n hist_blue = np.zeros((256, 1), dtype=np.int32)\n\n # Calculate the histogram for red channel for RGB images\n # or the the first channel for gray-scale of shape (M, N, 1) images.\n if channel == [0]:\n # one-dimensional array\n if img.ndim == 1:\n raise Exception('Cannot calculate the hist of one-dimensional array.')\n\n # if there is one channel, or in case of gray-scale images, it's OK!\n elif img.ndim == 2:\n for pixel in np.ceil(img.flatten()).astype(np.int):\n hist_gs_or_red[pixel] = hist_gs_or_red[pixel] + 1\n\n # an RGB image\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 0:1].flatten()).astype(np.int):\n hist_gs_or_red[pixel] = hist_gs_or_red[pixel] + 1\n\n # more than 3 dimensions\n else:\n raise Exception('Cannot calculate the hist of more than 3-dimensional array.')\n\n return hist_gs_or_red\n\n # Calculate the histogram of green channel for RGB images\n elif channel == [1]:\n # Not 3-D array that represent the image with 3 color channels.\n if img.ndim <= 2:\n raise Exception('Cannot calculate the hist of green channel for non-rgb images/ 3-D array')\n\n # If it's a 3-D array of 3 color channels\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 1:2].flatten()).astype(np.int):\n hist_green[pixel] = hist_green[pixel] + 1\n\n # more than 3 dimensions\n else:\n raise Exception('Cannot calculate the hist of more than 3-dimensional array.')\n return hist_green\n\n # Calculate the histogram of green channel for RGB images\n elif channel == [2]:\n if img.ndim <= 2:\n raise Exception('Cannot calculate the hist of blue channel for non-rgb images/ 3-D array')\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 2:].flatten()).astype(np.int):\n hist_blue[pixel] = hist_blue[pixel] + 1\n return hist_blue\n\n # Invalid value of channel parameter\n else:\n raise Exception('ValueError: only [0], [1], [2] are possible as value for the channel parameter.')", "def histogram_equalize(im_orig):\n\n color_flag = False\n image = im_orig\n\n\n if len(im_orig.shape) == 3: #RGB image\n color_flag = True\n y_im = rgb2yiq(im_orig)\n image = y_im[:, :, 0]\n\n image *= NORMALIZE\n hist_orig, bins = np.histogram(image, range(BINS))\n hist_cum = np.cumsum(hist_orig) #cumulative distribution function\n\n cum = ((hist_cum - hist_cum.min()) / ( hist_cum.max() - hist_cum.min())) * NORMALIZE\n\n im_eq = cum[image.astype(np.uint8)]\n\n hist_eq, bins = np.histogram(im_eq, range(BINS)) #before getting back to float64 does the histogram)\n\n im_eq /= NORMALIZE\n im_eq = im_eq.astype(np.float64)\n\n\n if color_flag:\n y_im[:, :, 0] = im_eq\n im_eq = yiq2rgb(y_im)\n\n im_eq = im_eq.clip(0,1)\n return [im_eq, hist_orig, hist_eq]", "def equalise_hist(image, bin_count=256):\n # TODO: your histogram equalization code\n #define arrays\n image = img_as_ubyte(image)\n row,col = image.shape\n new_image = np.zeros((row,col),dtype='uint8') \n\n # compute the value of each grayscale,and save in image_hist \n image_hist = np.bincount(image.flatten(), minlength=(bin_count))\n\n # normalise n[]\n norm_arr = (np.cumsum(image_hist)/(image.size))*(bin_count-1)\n norm_arr = norm_arr.astype('uint8')\n \n #Compute a normalized cumulative histogram\n for x in range(row):\n for y in range(col):\n new_image[x,y] = norm_arr[image[x,y]]\n \n return new_image", "def get_image_stats(image, out_dir, cur_file):\n # Output directory\n output_base = osp.join(out_dir, cur_file.split('.')[0])\n os.mkdir(output_base)\n # Print dimensions of the image\n width, height, color = image.shape\n print('The resolution of the image if of {}x{}x{}'.format(width,\n height,\n color))\n print('Total of {} pixels'.format(width * height * color))\n\n # Get histogram\n print('Calculating histogram')\n flat_img = image.mean(axis=2).flatten()\n counts, bins = np.histogram(flat_img, range(257))\n plt.bar(bins[:-1], counts, width=1, edgecolor='none')\n output_file = osp.join(out_dir, output_base, 'histogram.png')\n plt.xlabel('Intensidad')\n plt.ylabel('Número de pixeles')\n print('Saving histogram')\n plt.savefig(output_file, bbox_inches='tight')\n plt.close()\n\n # LAB space\n lab_image = cv2.cvtColor(image[8000:8500, 8000:8500, :], cv2.COLOR_BGR2LAB)\n output_file = osp.join(out_dir, output_base, 'lab.png')\n cv2.imwrite(output_file, lab_image)\n output_file = osp.join(out_dir, output_base, 'original.png')\n cv2.imwrite(output_file, image[8000:8500, 8000:8500, :])", "def read_image(image_path):\n return np.array(load_img(image_path, color_mode='grayscale')) / 255", "def image_equalise_hist(image: np.ndarray):\n #  Resize image to a shape of (48, 48)\n image = image_as_square(image)\n\n #  Equalize the histogram of the image\n image = equalizeHist(image)\n\n #  Resize the iamge back to a shape of (2304, )\n return image_as_array(image)", "def q_2(input_file, output_file):\n img = cv2.imread(input_file, cv2.IMREAD_COLOR)\n \n # Convert image to gray channel\n np_img = np.array(img)\n b = np_img[:,:,0]\n g = np_img[:,:,1]\n r = np_img[:,:,2]\n img_gray = 0.21 * b + 0.72 * g + 0.07 * r\n img_gray = np.array(img_gray, dtype='uint8')\n # Histogram equalization\n w,h=img_gray.shape\n H=count(img_gray)\n y=np.array([])\n # sap xep lai mang theo thu tu tu 0-255\n x=H.reshape(1,256)\n y=np.append(y,x[0,0])\n # T[i]=[i-1]+h[i]\n for i in range(255):\n k=x[0,i+1]+y[i]\n y=np.append(y,k)\n # chia theo cong thuc\n y=np.round(y/(w*h)*255)\n for i in range(w):\n for j in range(h):\n k=img_gray[i,j]\n img_gray[i,j]=y[k]\n cv2.imwrite(output_file, img_gray)", "def read_img(img): #X\n im = plt.imread(img)\n im = im[:, :, :3]\n if im.max()>200:\n im = im/255.\n return rgb_to_hsv(im)-0.5", "def hist(img):\n bottom_half = img[img.shape[0]//2:,:] # 0:img.shape[0]//2 is the top half\n histogram = bottom_half.sum(axis=0) \n \n return histogram", "def histogram_equalization(img):\n\n if len(img.shape) == 3:\n img_copy = np.copy(img)\n\n blue = img_copy[:,:,0]\n blue = histogram_equalize(blue)\n\n green = img_copy[:,:,1]\n green = histogram_equalize(green)\n\n red = img_copy[:,:,2]\n red = histogram_equalize(red)\n\n new_img = np.zeros(img_copy.shape)\n\n new_img[:,:,0] = blue\n new_img[:,:,1] = green\n new_img[:,:,2] = red\n\n return new_img\n\n else:\n return histogram_equalize(img)", "def compute_histogram(image, n_bins, color_space=\"RGB\"):\n\n n_channels = 1 if color_space == \"GRAY\" else image.shape[2]\n\n hist_channels = list(range(n_channels))\n hist_bins = [n_bins,]*n_channels\n hist_range = [0, 256]*n_channels\n\n hist = cv.calcHist([image], hist_channels, None, hist_bins,\n hist_range)\n hist = cv.normalize(hist, hist, alpha=0, beta=1,\n norm_type=cv.NORM_MINMAX).flatten() # change histogram range from [0,256] to [0,1]\n return hist", "def plot_pixel_intensity(image, path='./pixel_intensity_before_normalization.png'):\n\n plt.figure(figsize=(10, 5))\n plt.subplot(1, 2, 1)\n plt.imshow(image)\n plt.axis('off')\n histo = plt.subplot(1, 2, 2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(image[:, :, 0].flatten(), bins=n_bins, lw=0, color='r', alpha=0.5)\n plt.hist(image[:, :, 1].flatten(), bins=n_bins, lw=0, color='g', alpha=0.5)\n plt.hist(image[:, :, 2].flatten(), bins=n_bins, lw=0, color='b', alpha=0.5)\n plt.savefig(path)\n plt.show()", "def equalizeHist_gray(img):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(2, 2))\n cl = clahe.apply(gray)\n hist = cv2.equalizeHist(cl)\n return hist[:, :, np.newaxis]", "def _histogram_equalize_image(image, hist_orig):\n cum_hist = np.cumsum(hist_orig)\n cum_hist = (cum_hist * 255) / cum_hist[-1]\n\n image = np.interp(image, np.linspace(0, 1, 256), np.round(cum_hist))\n\n return utils.normalize_image(image)", "def __get_color_histogram(self, image, seed, hist_res):\n \n L=[]\n N=len(seed)\n for i in range(N):\n \n L.append(image[seed[i][1],seed[i][0]])\n image_part=np.array(L)\n \n \n hist, bins= np.histogramdd(image_part,bins=hist_res,range=((0,255),(0,255),(0,255)) )\n #hist= ndimage.gaussian_filter(hist,sigma=7) # Gaussian smoothing\n\n return hist /np.linalg.norm(hist)", "def rgb_histogram(img, channels=[\"r\", \"g\", \"b\"]):\n hist = {}\n for ii, color in enumerate(channels):\n hist[color] = cv2.calcHist([img], [ii], None, [256], [0, 256])\n return hist" ]
[ "0.67547977", "0.67301863", "0.6704774", "0.6599597", "0.65497786", "0.6504103", "0.64119923", "0.6376085", "0.63556564", "0.6292844", "0.6277472", "0.62644446", "0.62549794", "0.62410015", "0.6209802", "0.6203148", "0.6193479", "0.61523557", "0.6127991", "0.6120134", "0.6074392", "0.60521716", "0.6049514", "0.6045511", "0.6043892", "0.6009917", "0.6008388", "0.6000463", "0.5997514", "0.5990986" ]
0.7406885
0
Utility function processing all images in a given directory.
def bulk_process_images(inputpath, outputpath, extension): for dirpath, dirnames, filenames in os.walk(inputpath): structure = os.path.join(outputpath, dirpath[len(inputpath) + 1:]) for file in filenames: if file.endswith(extension): src = os.path.join(dirpath, file) dest = os.path.join(structure, file) img = load_and_preprocess_image(src) cv2.imwrite(dest, img)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def process_imgdir(self,imgdir):\n #Write images into resultdir\n resultdir = os.path.join(imgdir, 'results')\n #Read images from input dir\n inputdir = os.path.join(imgdir, 'inputs')\n shutil.rmtree(resultdir)\n os.mkdir(resultdir)\n #Read files from input images\n for fullname in os.listdir(inputdir):\n filepath = os.path.join(inputdir, fullname)\n if os.path.isfile(filepath):\n basename = os.path.basename(filepath)\n image = cv2.imread(filepath, cv2.IMREAD_COLOR)\n if len(image.shape) == 3 and image.shape[2] == 3:\n print('Processing %s ...' % basename)\n else:\n sys.stderr.write('Skipping %s, not RGB' % basename)\n continue\n #Extract haze from the scene and then save the image\n dehazed = self.get_scene_radiance(image)\n cv2.imwrite(os.path.join(resultdir, basename), dehazed)\n return os.path.join(resultdir, basename)", "def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))", "def process_directory(dir, exiftool_path):\n for path_object in pathlib.Path(dir).glob(\"**/*\"):\n if path_object.is_file():\n verbose(f\"Processing file {path_object}\")\n process_file(path_object, exiftool_path)\n elif path_object.is_dir():\n verbose(f\"Processing directory {path_object}\")\n process_directory(path_object, exiftool_path)", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)", "def preprocess_images(images_dir, image_dims, logger):\n find_str = images_dir + '/**/*.jpg'\n images = glob.glob(find_str, recursive=True)\n num_samples = get_num_samples(images_dir)\n\n # Load in the already processed file list\n proc_list_path = images_dir + '/processed_list.txt'\n if os.path.isfile(proc_list_path):\n with open(proc_list_path) as f:\n proc_list = f.read().split('\\n')\n else:\n proc_list = []\n \n i = 1\n for image in images:\n image_name = image.split('/')[-1]\n if image not in proc_list:\n logger.info(\"Processing %s\", \" {} - {}/{}\".format(\n image_name, i, num_samples))\n try:\n processed_image = ImageCheck.check_and_crop(image)\n except (ImageCheck.ObjectMissingError,\n ImageCheck.WormMissingError,\n ImageCheck.MultipleWormsError,\n ImageCheck.TooBlurryError) as e:\n logger.info(\"Processing Error: %s\",\n \"Image at: \\n{} \\n Produced error: {} \\n Removing\"\n \" image\".format(image, e))\n os.remove(image)\n i = i + 1\n continue\n cv2.imwrite(image, processed_image)\n with open(proc_list_path, 'a') as f:\n f.write(image + '\\n')\n else:\n logger.info(\"Skipping %s\", \" {} (already processed) - {}/{}\".format(\n image_name, i, num_samples))\n i = i + 1", "def preprocess_images(file_path, new_file_path):\n if not os.path.isdir(new_file_path):\n os.mkdir(new_file_path)\n i = 0\n for dir in listdir(file_path):\n j = 0\n for image_path in listdir(file_path + '/' + dir):\n image = open_image(image_path)\n cv2.imwrite(file_path + '/' + image_path + '/' str(i) + '/' +str(i) + '.jpg', image)\n j += 1\n i += 1", "def process_images(image_folder: Path) -> List[Dict]:\n images = []\n files = image_folder.glob(\"*.jpg\")\n\n for file_path in files:\n file_name = file_path.name\n file_id = file_name.split(\".jpg\")[0]\n file_id = file_id.split(\"in\")[-1]\n file_id = int(file_id)\n file_id = f\"{file_path.parent.parent.name}_{str(file_id)}\"\n\n width, height = imagesize.get(str(file_path))\n\n image_data = {\"id\": file_id,\n \"width\": width,\n \"height\": height,\n \"filename\": str(file_path)}\n images.append(image_data)\n\n return images", "def process(directory):\n files = []\n\n options = [\"Load\", \"Create\"]\n choice = options[int(ui.prompt(options=options))]\n\n for item in os.listdir(directory):\n if os.path.isfile(os.path.join(directory, item)):\n filename = os.path.join(directory, item)\n if choice == \"Load\" and item.endswith(\".png\"):\n files.append(filename)\n elif choice == \"Create\" and item.endswith(\".file\"):\n files.append(filename)\n\n filenames, pageNames = imagePages(files, choice)\n \n targets = [name.split('/')[-1][:5] for name in filenames]\n return pageNames, targets, filenames", "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def _getImagesFromDirectory(self, directoryPath):\n files = [f for f in listdir(directoryPath)\n if isfile(join(directoryPath, f))]\n for filePath in files:\n self._imageDictionary[filePath] = image.load(\n self._formatPath(directoryPath, filePath))", "def _process_images(self, docname: pathlib.Path, images: List[nodes.image]) -> None:\n logger.debug(\"[nbtutorial]: Processing images for %s\", docname)\n\n if len(images) == 0:\n return\n\n img_dir = pathlib.Path(self.outdir, docname.parent, RESOURCE_DIR)\n\n if not img_dir.exists():\n img_dir.mkdir(parents=True)\n\n for img in images:\n fname = pathlib.Path(img[\"uri\"]).name\n\n source = pathlib.Path(self.app.confdir, img[\"uri\"])\n destination = pathlib.Path(img_dir, fname)\n\n shutil.copy(source, destination)", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles", "def readImages(image_dir):\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(sum(map(glob, search_paths), []))\n images = [cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR) for f in image_files]\n\n bad_read = any([img is None for img in images])\n if bad_read:\n raise RuntimeError(\n \"Reading one or more files in {} failed - aborting.\"\n .format(image_dir))\n\n return images", "def extract_from_dir(directory):\n image_regex = re.compile(r'.+\\.jpeg$')\n for root, _, files in os.walk(directory):\n for name in files:\n if image_regex.match(name) != None:\n filename = os.path.join(root, name)\n image = io.imread(filename)\n no_ext, _ = os.path.splitext(name)\n features = extract(image)\n yield (no_ext, features)", "def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]", "def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)", "def load_images_from_directory(input_dir, batch_shape):\n def input_filenames(input_dir):\n all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n all_files.sort()\n return all_files\n\n\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n\n for filepath in input_filenames(input_dir):\n with tf.gfile.Open(filepath, mode='rb') as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n\n # This is a partial batch left over at end.\n # Note that images will still have the proper size.\n if idx > 0:\n yield filenames, images", "def run_images_analysis(filepath, ID, method):\n for path in filepath:\n try:\n Image.open(path)\n except IOError:\n msg = 'Please import images files, or just a single zip archive'\n else:\n filename, extension = get_file_name(path)\n\n # Save raw image to database\n msg = client.upload_file(ID, filename, extension, path)\n\n err, msg = check_msg(msg)\n\n if err is False: # if no error in uploading image\n # Request to process image\n client.process_image(ID, filename, method)\n return msg", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n existing_dirs = [os.path.basename(dir) for dir in os.listdir(FLAGS.output_dir)]\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.JPEG')):\n with tf.gfile.Open(filepath, 'rb') as f:\n image = np.array(Image.open(f).resize([FLAGS.image_height, FLAGS.image_width]).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n if os.path.basename(os.path.normpath(input_dir))=='*':\n head, tail = os.path.split(filepath)\n dirname=os.path.basename(head)\n if dirname in existing_dirs:\n continue\n filename = os.path.join(dirname, tail)\n else:\n filename = os.path.basename(filepath)\n filenames.append(filename)\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)", "def parse_dir_imgs(root_pth):\n def visit(imgpths, pth, names):\n # Appends detected image filenames to a list.\n imgpths.extend([os.path.join(pth, name) for name in names\n if os.path.splitext(name)[1].lower() in img_exts])\n # Walk down directory tree and get the image file paths\n imgpaths = []\n for dp, foo, names in os.walk(root_pth):\n visit(imgpaths, dp, names)\n # Make lowercased list of imagefilenames\n imgnames = [os.path.split(pth)[1].lower() for pth in imgpaths]\n return imgnames, imgpaths", "def list_images(img_dir) -> Iterable[str]:\n extensions = (\".png\", \".jpg\", \".jpeg\", \".tif\", \".tiff\")\n\n paths = Path(img_dir).glob(\"**/*\")\n paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths)\n return (str(p) for p in paths)", "def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)", "def get_images(directory=None):\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def index_files():\n\n print(\"Indexing files\")\n\n for root, _, files in os.walk(image_directory):\n for item in files:\n for file_type in file_types:\n if file_type in item:\n images_in_directory.append(os.path.join(root, item))\n\n print(f'Finished indexing {len(images_in_directory)} files')\n\n pass", "def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")", "def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs" ]
[ "0.813414", "0.72451013", "0.7199291", "0.7147753", "0.7090039", "0.70295995", "0.70114356", "0.69021356", "0.687131", "0.68625164", "0.6833119", "0.6790636", "0.6766119", "0.67627907", "0.67616725", "0.67560166", "0.6703966", "0.66968566", "0.6695718", "0.6682746", "0.6660913", "0.66302323", "0.6587821", "0.6585301", "0.65625054", "0.6561659", "0.65370834", "0.6536033", "0.6477729", "0.6447385" ]
0.73714215
1
Utility function augmenting all images in an input path, copying them into an output path
def bulk_augment_images(input_path, output_path, extension, augmentation, label_type, label_threshold=-1): for dir_path, dir_names, filenames in os.walk(input_path): structure = os.path.join(output_path, dir_path[len(input_path) + 1:]) for file in filenames: if file.endswith(extension): src = os.path.join(dir_path, file) label = get_labels([src], label_type)[0] if label > label_threshold: img = cv2.imread(src, 0) f_name, f_ext = os.path.splitext(file) if augmentation == 'flip': img = np.flip(img, axis=-1) file = f_name + "_flipped" + f_ext elif augmentation == 'original': file = f_name + "_original" + f_ext elif augmentation == 'rotate_crop': rotation = np.random.choice((-10, 10)) img = rotate_and_crop_image(img, rotation) file = f_name + "_rotated" + f_ext else: raise ValueError( "Invalid value for 'augmentation'. Value can be 'flip', 'original', 'rotate_crop, " "value was: {}".format(augmentation)) dest = os.path.join(structure, file) cv2.imwrite(dest, img)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aug_imgs(in_path, out_path):\n names = os.listdir(in_path)\n aug = iaa.Sequential([iaa.AdditiveGaussianNoise(scale=0.01*255), iaa.Fliplr(p=0.5), iaa.Affine(shear=-10)],\n random_order=True)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n img_new = aug.augment_image(img)\n cv2.imwrite(out_path + \"videoAug_neg_2_\"+str(i)+\".jpg\", img_new)\n\n return", "def bulk_process_images(inputpath, outputpath, extension):\n\n for dirpath, dirnames, filenames in os.walk(inputpath):\n structure = os.path.join(outputpath, dirpath[len(inputpath) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dirpath, file)\n dest = os.path.join(structure, file)\n img = load_and_preprocess_image(src)\n cv2.imwrite(dest, img)", "def preprocess_images(file_path, new_file_path):\n if not os.path.isdir(new_file_path):\n os.mkdir(new_file_path)\n i = 0\n for dir in listdir(file_path):\n j = 0\n for image_path in listdir(file_path + '/' + dir):\n image = open_image(image_path)\n cv2.imwrite(file_path + '/' + image_path + '/' str(i) + '/' +str(i) + '.jpg', image)\n j += 1\n i += 1", "def augment_images(folder, augmenter, images, size = (224, 224), start_index=0, iterations=1):\n # Get the total number of images\n n = len(images)\n \n # Main iteration that applies random transformations to the images\n for i in range(iterations):\n # Apply transformations to the images\n images_augmented = augmenter(images=images)\n \n # Save the augmented images on the disk\n save_images_in_folder(folder=folder, images=images_augmented, size=size, start_index=i*n)", "def augment():\n print(\"augmenting......\")\n path1 = '../trainp1/'\n path2 = '../trainp2/'\n # path of pair1 and pair2 similar to img & mask task for segmentation\n p = Augmentor.Pipeline(path1) # pair1\n p.ground_truth(path2) # pair2\n p.rotate(probability=0.3, max_left_rotation=3, max_right_rotation=3) \n p.flip_left_right(probability=0.2) \n p.random_distortion(0.5, 2, 2, 2)\n p.zoom(probability=0.5, min_factor=0.95, max_factor=1.05)\n p.process()", "def augment(self, directory, crop_ratio=0.75):\n aug_dir = os.path.join(directory, self.AUGMENTED_DIR)\n \n if os.path.exists(aug_dir):\n clear_dir(aug_dir)\n else:\n create_dirs([aug_dir])\n\n images = self._load_dir(directory)\n\n aug_map = self._build_augmentation_map(images)\n\n for i in images:\n for transformation in aug_map[i.y]:\n transformation(i, aug_dir)", "def __augmented_images(self, info, start):\n count = start\n final_img_to_save = []\n for pair in info:\n processedImage = self.__processImage(os.path.join(WORKING_DIR, pair[0]))\n if processedImage == None:\n continue\n # translation is not that important since CNNs are resistant to image translations\n rotatedImages = self.__applyRotations(processedImage)\n\n rotCount = 1\n for img in rotatedImages:\n filename = str(count) + \"_\" + str(rotCount) + \".jpg\"\n # img.save(os.path.join(directory, filename))\n final_img_to_save.append((img, pair[1], filename))\n rotCount += 1\n\n print(\"Augmenting image: {:05}\".format(count))\n count += 1\n return final_img_to_save", "def bulk_crop_images(input_path, output_path, dims, extension):\n for dir_path, dir_names, filenames in os.walk(input_path):\n structure = os.path.join(output_path, dir_path[len(input_path) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dir_path, file)\n width, height = Image.open(src).size\n if width > dims[0] or height > dims[1]:\n img = cv2.imread(src, 0)\n img = crop_around_center(img, dims[0], dims[1])\n dest = os.path.join(structure, file)\n cv2.imwrite(dest, img)", "def crop_and_resize_images(images_path, annotations_path, output_path):\n assert os.path.exists(images_path)\n assert os.path.exists(annotations_path)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n image_file_names = util.get_images_at_path(images_path)\n\n # Filename of annotations file without path, e.g. annotations.json\n annotations_file_name, annotations_file_extension = os.path.splitext(os.path.basename(annotations_path))\n # Full path to new annotations file, e.g. /home/user/annotations/annotations_noisy.json\n modified_annotations_file_path = os.path.join(output_path, annotations_file_name + \"_cropped\" + annotations_file_extension)\n\n with open(annotations_path, \"r\") as json_file, open(modified_annotations_file_path, \"w\") as modified_json_file:\n json_data = json.load(json_file)\n new_json_data = {}\n\n for image_file_name in image_file_names:\n\n # Load the image to augment\n image = cv2.imread(os.path.join(images_path, image_file_name))\n width, height, channels = image.shape\n # At most 20% of the image width or height is used as offset\n x_offset = random.randint(0, width / 5)\n y_offset = random.randint(0, height / 5)\n # And width and height are between 80 and 100% of original\n crop_width = random.randint((4 * width) / 5, width)\n crop_height = random.randint((4 * height) / 5, height)\n # If we have 20% offset plus 100% width or height that's not gonna work\n crop_width = min(crop_width, width - x_offset)\n crop_height = min(crop_height, height - y_offset)\n\n print(\"Cropping image {} at ({}, {}) to ({}, {}).\".format(image_file_name, x_offset, y_offset, crop_width, crop_height))\n\n # Crop image\n cropped_image = image[y_offset:y_offset + crop_height, x_offset:x_offset + crop_width]\n image_file_base_name, extension = os.path.splitext(image_file_name)\n augmented_file_name = image_file_base_name + \"_cropped\" + extension\n augmented_file_path = os.path.join(output_path, augmented_file_name)\n cv2.imwrite(augmented_file_path, cropped_image)\n\n # Adjust the annotations file\n\n for key in json_data:\n if image_file_name in key:\n # We found the currently process image, now adjust bounding boxes and set the new sizes\n image_data = json_data[key]\n new_image_data = {\"base64_img_data\" : image_data[\"base64_img_data\"], \\\n \"fileref\" : image_data[\"fileref\"], \\\n \"file_attributes\" : image_data[\"file_attributes\"]}\n new_image_data[\"regions\"] = {}\n for region_key in image_data[\"regions\"]:\n region_data = image_data[\"regions\"][region_key]\n shape_attributes = region_data[\"shape_attributes\"]\n box_x, box_y, box_width, box_height = shape_attributes[\"x\"], shape_attributes[\"y\"], \\\n shape_attributes[\"width\"], shape_attributes[\"height\"]\n # Same as in conversion.py, we need to care for negative x and y (outside of image)\n box_x = max(box_x, 0)\n box_y = max(box_y, 0)\n # Adjust to regions that go beyond the image\n box_width = min(box_width, width - box_x)\n box_height = min(box_height, height - box_y)\n\n old_x, old_y, old_width, old_height = box_x, box_y, box_width, box_height\n\n box_too_far_left = box_x + box_width < x_offset\n box_too_far_right = box_x > x_offset + crop_width\n box_too_far_up = box_y + box_height < y_offset\n box_too_far_down = box_y > y_offset + crop_height\n if box_too_far_left or box_too_far_right or box_too_far_up or box_too_far_down:\n # If box is outside of cropping skip adding it\n continue\n\n # New x and y positions of the bounding box\n box_x = box_x - x_offset\n if box_x < 0:\n # Old x was outside of cropping, we need to adjust width, i.e. subtract the offset\n box_width = box_width + box_x\n box_x = 0\n if box_x + box_width > crop_width:\n # To care for the new box exceeding the image size\n box_width = crop_width - box_x\n\n box_y = box_y - y_offset\n if box_y < 0:\n # Same goes for height\n box_height = box_height + box_y\n box_y = 0\n if box_y + box_height > crop_height:\n box_height = crop_height - box_y\n \n shape_attributes[\"x\"] = box_x\n shape_attributes[\"y\"] = box_y\n shape_attributes[\"width\"] = box_width\n shape_attributes[\"height\"] = box_height\n new_image_data[\"regions\"][region_key] = region_data\n\n # Now adjust to the new file name\n split = key.split(image_file_name)\n statinfo = os.stat(augmented_file_path)\n new_key = augmented_file_name + str(statinfo.st_size)\n image_data = json_data[key]\n new_image_data[\"filename\"] = augmented_file_name\n new_image_data[\"size\"] = int(statinfo.st_size)\n new_json_data[new_key] = new_image_data\n\n json.dump(new_json_data, modified_json_file)", "def augment_image(images_folder_path, file_extension, rotation=True, flipping=True):\n\n def save_image(img_name, img):\n # this function save image into target folder\n cv2.imwrite(images_folder_path + img_name, img)\n\n def rotate(image_name, angle=90):\n \"\"\"\n Rotate the image\n :param image_name:\n :param angle: Rotation angle in degrees. Positive values mean\n counter-clockwise rotation (the coordinate origin is assumed to be the top-left corner).\n \"\"\"\n img = cv2.imread(images_folder_path + image_name)\n rotated = imutils.rotate_bound(img, angle)\n rotated_image_name = str(angle) + \"_\" + image_name\n return rotated_image_name, rotated\n\n def flip(image_name, vflip=False, hflip=False):\n \"\"\"\n Flip the image\n :param image_name:\n :param vflip: whether to flip the image vertically\n :param hflip: whether to flip the image horizontally\n \"\"\"\n save_name = \"\"\n img = cv2.imread(images_folder_path + image_name)\n if vflip:\n c = 1\n save_name = \"flip_v\"\n if hflip:\n c = 0\n save_name = \"flip_h\"\n if hflip and vflip:\n c = -1\n save_name = \"flip_hv\"\n\n flip_image = cv2.flip(img, flipCode=c)\n flip_image_name = save_name + \"_\" + image_name\n\n return flip_image_name, flip_image\n\n all_images_name = path.read_all_files_name_from(folder_path=images_folder_path,\n file_extension=file_extension)\n counter = 0\n\n # adding random noise to image.\n # img_noise = random_noise(img, mode= 's&p', clip=True)\n\n for image_name in all_images_name:\n # Perform the counter clockwise rotation holding at the center\n # 90 degrees\n if rotation:\n rotated_img_name, rotated_img = rotate(image_name, angle=90)\n save_image(rotated_img_name, rotated_img)\n rotated_img_name, rotated_img = rotate(image_name, angle=180)\n save_image(rotated_img_name, rotated_img)\n rotated_img_name, rotated_img = rotate(image_name, angle=270)\n save_image(rotated_img_name, rotated_img)\n\n if flipping:\n # is same as 180 rotation\n # flip_image_name, flip_image = flip(image_name, vflip=True, hflip=True)\n # save_image(flip_image_name, flip_image)\n flip_image_name, flip_image = flip(image_name, vflip=True, hflip=False)\n save_image(flip_image_name, flip_image)\n flip_image_name, flip_image = flip(image_name, vflip=False, hflip=True)\n save_image(flip_image_name, flip_image)\n\n if counter % 50 == 0:\n print(counter)\n counter = counter + 1", "def jarvis(input_path, output_path): \n \n if not os.path.exists(f'{output_path}'):\n os.makedirs(f'{output_path}')\n\n folder_list = [sample for sample in os.listdir(input_path) if os.path.isdir(f'{input_path}{sample}')]\n\n for folder in folder_list:\n\n file_list = [filename for filename in os.listdir(f'{input_path}{folder}/') if '.tif' in filename]\n mutant = '_'.join(folder.split(' '))\n\n for x, filename in enumerate(file_list):\n pathname = os.path.join(input_path, folder, filename)\n new_name = f'{output_path}{mutant}_{x}.tif'\n copyfile(pathname, new_name)\n # array_stack = skimage.io.imread(f'{pathname}').transpose(1, 2, 0)\n logger.info(f'{new_name}')", "def resize_multiple_images(src_path, dst_path):\n for filename in os.listdir(src_path):\n img=Image.open(src_path+'/'+filename)\n new_img = img.resize((96,96,))\n #new_img.resize(96,96,1)\n if not os.path.exists(dst_path):\n os.makedirs(dst_path)\n new_img.save(dst_path+'/'+filename)\n print('Resized and saved {} successfully.'.format(filename))", "def transform_images(img1,img2):", "def combine_images(args):\n\n # Read all images into a cube (TODO: think about the RAM)\n with fits.open(args.input[0]) as im0:\n lx, ly = im0[0].data.shape\n ref_hdr = im0[0].header\n\n headers = [fits.open(im_name)[0].header for im_name in args.input]\n cube = numpy.ma.zeros((len(args.input), lx, ly))\n cube.mask = numpy.zeros_like(cube.data)\n for ii, im_name in enumerate(args.input):\n with astroim.Astroim(im_name) as im:\n cube.data[ii, :,:] = im.chips[0].data\n if im.chips[0].mask is not None:\n cube.mask[ii,:,:] = im.chips[0].mask\n\n # Scale images\n scale_functions = {\"median\": numpy.ma.median,\n \"mean\": numpy.ma.mean,\n \"mode\": scipy.stats.mstats.mode,\n \"none\": lambda x: 1}\n for ii, im_name in enumerate(args.input):\n func = scale_functions[args.scale.lower()]\n cube[ii,:,:] /= func(cube[ii,:,:])\n\n\n # Reproject all images to the ref_hdr\n for ii, _ in enumerate(args.input):\n if ii == 0:\n continue\n cube.data[ii,:,:], footprint = reproject_interp((cube.data[ii,:,:], headers[ii]), ref_hdr)\n cube.mask[ii,:,:], footprint = reproject_interp((cube.mask[ii,:,:], headers[ii]), ref_hdr)\n #whr = numpy.isnan(cube.data[ii,:,:])\n #cube.mask[ii,:,:][whr] = True\n\n # Do average\n average_functions = {\"median\": numpy.ma.median, \"mean\": numpy.ma.mean, \"sum\": numpy.ma.sum}\n func = average_functions[args.average.lower()]\n final_image = func(cube, axis=0)\n ref_hdr[\"NCOMBINE\"] = len(args.input)\n\n mask_name = utilities.replace_extension(args.output, \".fits.msk\")\n mask_name_header = utilities.replace_extension(os.path.basename(args.output), \".fits.msk\")\n ref_hdr[\"MASK\"] = mask_name_header\n fits.writeto(args.output, final_image.data, ref_hdr, clobber=True )\n fits.writeto(mask_name, numpy.array(final_image.mask, dtype=int), clobber=True)\n\n return args.output", "def augmentation(element: str, output: str, factor: int) -> None:\n\n out_filename = get_output_filename(element, output, -1)\n\n try:\n os.makedirs(\"/\".join(out_filename.split(\"/\")[:-1]))\n except:\n pass\n\n im = ImageOperations.load(element)\n ImageOperations.save(im, path=out_filename)\n\n for i in range(factor):\n out_filename = get_output_filename(element, output, i)\n im_aug = copy.deepcopy(im)\n for operation in set(random.sample(operations, k=random.randint(0, len(operations)))):\n im_aug = operation(im_aug)\n\n ImageOperations.save(im_aug, path=out_filename)", "def jarvis(input_path, output_path): \n\n if not os.path.exists(f'{output_path}'):\n os.makedirs(f'{output_path}')\n\n file_list = [filename for filename in os.listdir(f'{input_path}') if '.tif' in filename]\n\n for filename in file_list:\n pathname = os.path.join(input_path, filename)\n new_name = f\"{output_path}{filename.replace('.lif - ', '_').replace('_5x-', '_')}\"\n copyfile(pathname, new_name)\n logger.info(f'{new_name}')", "def augment(im_path):\n # change directory to toplevel of repo (parent of augmentation)\n os.chdir(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])\n\n im_name, im_ext = os.path.splitext(im_path)\n if im_path not in os.listdir(\"data/raw\"):\n raise FileNotFoundError(f\"{im_path} could not be found in the list of raw images\")\n\n if im_name + \".json\" not in os.listdir(\"data/corrected\"):\n raise FileNotFoundError(f\"{im_name} has not been labelled yet! (no file '{im_name}.json' in corrected)\")\n\n with open(f\"data/corrected/{im_name}.json\") as read_file:\n im_label = json.loads(read_file.read(-1))\n persp = np.float32(im_label[\"perspective\"])\n\n im: Image.Image = Image.open(f\"data/raw/{im_path}\")\n # downscale image to reasonable height\n scale_factor = 500 / im.height\n persp = persp * scale_factor\n im.thumbnail([1000000, 500])\n im_cv = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)\n\n # determine crop box\n crop_amount = (im.width - 500)\n left_crop = random.randint(crop_amount//4, 3 * crop_amount // 4)\n # left_crop = crop_amount//2\n right_crop = crop_amount - left_crop\n box = [\n left_crop,\n 0,\n im.width - right_crop,\n im.height\n ]\n\n # warp perspective\n # basic way: add gaussian noise to the 4 corner points\n warped_persp = persp.copy()\n for i in range(4):\n for j in range(2):\n v = warped_persp[i][j]\n v += random.gauss(0, 5)\n # ensure none of the perspective points will fall outside the cropped image\n v = max(box[j] + 5, v)\n v = min(box[j+2] - 5, v)\n warped_persp[i][j] = v\n\n matrix = cv2.getPerspectiveTransform(persp, warped_persp)\n warped_im = cv2.warpPerspective(im_cv, matrix, (im.width, im.height))\n warped_im = Image.fromarray(cv2.cvtColor(warped_im, cv2.COLOR_BGR2RGB))\n\n # run crop on warped image\n warped_im = warped_im.crop(box)\n # adjust warped coordinates according to crop\n for i in range(4):\n warped_persp[i][0] -= box[0]\n warped_persp[i][1] -= box[1]\n\n # scale down to final size\n warped_im = warped_im.resize((256, 256))\n for i in range(4):\n warped_persp[i][0] *= 256 / 500\n warped_persp[i][1] *= 256 / 500\n\n # adjust image colour balance, saturation and contrast\n warped_im = ImageEnhance.Color(warped_im).enhance(random.uniform(0.9, 1.2))\n warped_im = ImageEnhance.Contrast(warped_im).enhance(random.uniform(0.8, 1.2))\n warped_im = ImageEnhance.Brightness(warped_im).enhance(random.uniform(0.8, 1.2))\n\n # adjust image temperature\n # thanks to Mark Ransom (https://stackoverflow.com/a/11888449)\n temp_r, temp_g, temp_b = random.choice(KELVIN_TABLE)\n convert_matrix = (temp_r / 255.0, 0.0, 0.0, 0.0,\n 0.0, temp_g / 255.0, 0.0, 0.0,\n 0.0, 0.0, temp_b / 255.0, 0.0)\n warped_im = warped_im.convert(\"RGB\", convert_matrix)\n\n # add noise\n noise_strength = random.uniform(5, 10)\n warped_im_arr = np.float64(np.array(warped_im))\n warped_im_arr += np.random.normal(0, noise_strength, warped_im_arr.shape)\n warped_im_arr = np.clip(warped_im_arr, 0, 255)\n warped_im = Image.fromarray(np.uint8(warped_im_arr))\n\n fname = f\"{im_name}-{hex(random.randint(2**20, 2**24))[2:]}\"\n warped_im.save(f\"data/augmented/{fname}{im_ext}\")\n with open(f\"data/augmented/{fname}.json\", \"w\") as write_file:\n data = {\n \"darts\": im_label[\"darts\"],\n \"perspective\": warped_persp.tolist()\n }\n write_file.write(json.dumps(data))\n return warped_im, warped_persp", "def change_imagens(current_folder, destination_folder, name=\"crosswalk\", qtd=0, dim=(128, 64)):\n\n img_path = [os.path.join(current_folder, file) for file in os.listdir(current_folder)]\n qtd_img = 1\n\n for img in img_path:\n img_name = os.path.split(img)[1].split(\"/\")[0]\n extension = os.path.split(img_name)[1].split(\".\")[0]\n\n new_name = name\n saved_name = new_name + \"_\" + str(qtd_img + qtd)\n print(img_name + \" -> \" + saved_name + \".jpg\")\n\n try:\n saved_folder = destination + \"/\"\n\n # carrega a imagem\n img = Image.open(current_folder + \"/\" + img_name)\n # converte a imagem (PIL) para numpy array\n imgNp = np.array(img,'uint8')\n # redimensionar a imagem\n imgNp = cv2.resize(imgNp, dim)\n\n # Cria a pasta positivas_final e salva as imagens\n pathlib.Path(saved_folder).mkdir(parents=True, exist_ok=True)\n cv2.imwrite(saved_folder + saved_name + \".jpg\", imgNp)\n\n qtd_img += 1\n\n except ValueError:\n print('.')", "def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix", "def rename_images(_input_image_paths : list[str], _output_image_dir : str) -> None:\n #Starts at 0 to account for incrementing when seeing a non-alternative\n # (the 1st image can never be an alternative take)\n new_index = 0\n\n #Get the \"base name\" for the images - e.g., PICT, DCIM, IMG_, etc.\n #Assumes that each image has the same base name as the first one.\n #Also gets the \"base index\", e.g., 1, 001, 00018, etc.\n base_name, base_index = get_image_base_name_and_index(_input_image_paths[0])\n debug(f\"Base name ({base_name}) and index({base_index})\")\n\n #Since the first image can't be an alternative take, this lets us\n # get the proper index length for all of the images\n index_length = len(base_index)\n\n #For each image, rename based on the new indices\n for image in _input_image_paths:\n flag = get_alternative_flag(image)\n\n #increment the index if this was not an alternative-take image\n if flag == '':\n new_index += 1\n\n #Create an index padded with a sufficient number of prefixing '0's\n formatted_index = \"0\" * (index_length - len(str(new_index))) + str(new_index)\n\n #Get the file name's extension\n extension = os.path.splitext(image)[1]\n\n #Create the new file name based off of the current index\n new_filepath = _output_image_dir + base_name + formatted_index + flag + extension\n debug(f\"Saving {image} to {new_filepath}\")\n\n #Save the image with the updated path name\n with Image.open(image) as image_object:\n image_object.save(new_filepath)", "def _extract_images(source_path, target_path, merge_labels):\n\n images_path = os.path.join(source_path, 'imagesTr')\n labels_path = os.path.join(source_path, 'labelsTr')\n\n # Filenames have the form 'hippocampus_XX.nii.gz'\n filenames = [x for x in os.listdir(images_path) if x[:5] == 'hippo']\n\n # Create directories\n if not os.path.isdir(target_path):\n os.makedirs(target_path)\n\n for filename in filenames:\n\n # Extract only T2-weighted\n x = sitk.ReadImage(os.path.join(images_path, filename))\n x = sitk.GetArrayFromImage(x)\n y = sitk.ReadImage(os.path.join(labels_path, filename))\n y = sitk.GetArrayFromImage(y)\n\n # Shape expected: (35, 51, 35)\n # Average label shape: (24.5, 37.8, 21.0)\n assert x.shape == y.shape\n\n # No longer distinguish between hippocampus proper and subiculum\n if merge_labels:\n y[y == 2] = 1\n\n # Save new images so they can be loaded directly\n study_name = filename.replace('_', '').split('.nii')[0]\n sitk.WriteImage(sitk.GetImageFromArray(x), join_path([target_path, study_name + \".nii.gz\"]))\n sitk.WriteImage(sitk.GetImageFromArray(y), join_path([target_path, study_name + \"_gt.nii.gz\"]))", "def copy_images_to_new(lab_img, from_dir, to_dir):\n \n for img in lab_img:\n if not os.path.exists(join(todir, img)):\n shutil.copyfile(join(fromdir, img), join(todir, img)) \n print(\"Done\")", "def symlink_images(output_path):\n image_dest = Path(output_path / \"assets/images/integrations\")\n image_dest.mkdir(mode=0o755, parents=True, exist_ok=True)\n\n processed = set()\n for img_file in sorted(INTEGRATIONS_PATH.glob(\"**/img/*.png\")):\n if not img_file.is_file():\n continue\n\n dest_path = image_dest / img_file.name\n\n if img_file.name in processed:\n # print(f\"WARNING image file {img_file} has duplicate name\")\n continue\n processed.add(img_file.name)\n\n dest_path.symlink_to(img_file)", "def prepare_target_imagery(\n source_directory,\n destination_directory,\n include_path=lambda path: True,\n translate_path=lambda p: p,\n after_file_copy=lambda source_path, final_path: None,\n compress_imagery=True,\n hard_link=False):\n if not destination_directory.exists():\n destination_directory.mkdir()\n\n for source_file in source_directory.rglob('*'):\n # Skip hidden files and directories\n if source_file.name.startswith('.') or source_file.is_dir() or not include_path(source_file):\n continue\n\n rel_source_file = source_file.relative_to(source_directory)\n\n rel_target_path = translate_path(rel_source_file)\n\n absolute_target_path = destination_directory / rel_target_path\n\n output_paths = _copy_file(source_file, absolute_target_path, compress_imagery, hard_link=hard_link)\n\n after_file_copy(source_file, output_paths)", "def pad_images(_input_image_paths : list[str], _output_image_dir : str, \\\n _pad_colour : tuple[int,int,int]) -> None:\n for image in _input_image_paths:\n with Image.open(image) as image_object:\n\n #Rotate the image based on the EXIF data's orientation tag.\n #Ensures that images taller than they are wide are kept as such when padding\n image_object = PIL.ImageOps.exif_transpose(image_object)\n\n old_x,old_y = image_object.size\n bigger_dimension = max(old_x,old_y)\n\n #Figure out how much extra should be added to each of the four sides\n x_additive = y_additive = 0\n if old_x > old_y:\n y_additive = (old_x - old_y)//2\n\n elif old_y > old_x:\n x_additive = (old_y - old_x)//2\n\n #Create a new, larger image with the requested padding colour,\n # and then paste the original image overtop in the correct position\n new_canvas = Image.new(\"RGB\", (bigger_dimension,bigger_dimension), _pad_colour)\n new_canvas.paste(image_object, (x_additive, y_additive))\n new_canvas.save(_output_image_dir + os.path.basename(image))", "def compress_img():\n in_path = 'output/templates/rgb/'\n out_path = 'output/templates/imgs/'\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(out_path + name, img)\n\n return", "def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in os.walk(pathofimg+'/'+dir):\n for file in files:\n if(file.endswith('.png')):\n images.append(Image.open(pathofimg+'/'+dir+'/'+file))\n if(len(images)==81):\n break\n target = montage(images, w_sub, h_sub, step)\n target.save(pathofimg +'/'+ dir + '.png', quality=100)\n else:\n dir = 'Generated'\n for file in fileswer:\n if (file.endswith('.png')):\n images.append(Image.open(pathofimg +'/'+ file))\n target1 = montage(images, w_sub, h_sub, step)\n savepath = pathofimg +'/'+ 'generated'\n os.makedirs(savepath)\n target1.save(savepath +'/'+ dir + '.png', quality=100)", "def prepare_test_images(images_path, indices, aug_config, majority_voting=False):\n # Load images\n #NOTE: needed np.arrays to append that stuff easily\n images = np.array(extract_images(images_path, indices))\n \n if majority_voting:\n #This has been chosen so that the structure of images is\n #50 images no rotate | 50 images rotate 90 | 50 images rotate 180 | 50 images rotate 270 \n #instead of\n #img0 | img0 rotate 90 | img0 rotate 180 | img0 rotate 270 | img1 | img1 rotate 90 | ...\n #This is easier then to rotate back the images afters model eval\n r1 = np.array(rotate_images(images, [90]))\n r2 = np.array(rotate_images(images, [180]))\n r3 = np.array(rotate_images(images, [270]))\n images = np.concatenate((images, r1, r2, r3),axis=0)\n \n # Augment channels if necessary\n if aug_config.augment_channels:\n images = augment_channels(images, aug_config)\n return images", "def _crop_write_image(self, inroot, images, outroot):\n for image in images:\n inimage_path = osp.join(inroot, image)\n cvimg = cv2.imread(inimage_path)\n cvimg = cvimg[60:-30, 25:-25]\n h, w, _ = cvimg.shape\n assert h == w == 128\n outimage_path = osp.join(outroot, image)\n cv2.imwrite(outimage_path, cvimg)\n print(outimage_path)", "def augment(self, image):\n pass" ]
[ "0.72657603", "0.7063366", "0.6926456", "0.6781271", "0.6385733", "0.6332555", "0.63126224", "0.6292644", "0.62688696", "0.6258702", "0.62342423", "0.62127364", "0.6199814", "0.61439675", "0.61195624", "0.6092347", "0.60701877", "0.605984", "0.60366774", "0.60089594", "0.5961846", "0.59552646", "0.5940757", "0.59318596", "0.5918599", "0.58855563", "0.5839955", "0.5829714", "0.5797473", "0.5796701" ]
0.7562213
0
Threaded fetching of the udemy course links from tutorialbar.com
def gatherUdemyCourseLinks(courses): thread_pool = Pool() results = thread_pool.map(getUdemyLink, courses) thread_pool.close() thread_pool.join() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_course_page_urls(self,soup):\n\t\tcourse_links =[]\n\t\troot_url = 'http://onlinelearning.cornell.edu'\n\t\tfor link in soup.select('span.field-content a[href]'):\n\t\t\tnew_url = root_url + link['href']\n\t\t\tcourse_links.append(new_url)\n\t\t\tcourse_links.append(' \\n')\n\t\t\n\t\tself.new_list.append(course_links)\n\t\treturn course_links", "def download_course_interactive(self):\n self.domain_prompt()\n self.course_prompt()\n self.get_course_page()\n self.get_course_title()\n self.get_course_unit_titles()\n self.get_course_unit_slugs()\n self.get_course_unit_urls()\n\n print(\"\\nGenerating Path Slugs...\\n\")\n self.get_course_all_slugs()\n self.get_course_youtube_ids()\n self.download_course_videos()", "def get_course_all_slugs(self):\n\n unit_lessons_counter = 0\n # Unit Page -> Subunit Header + Subunit Block -> Lesson Block -> Lesson Title\n for course_unit_url, course_unit_slug in zip(\n self.course_unit_urls, self.course_unit_slugs\n ):\n\n unit_lessons_counter = 0\n # -> Unit Page\n try:\n course_unit_page = BeautifulSoup(\n requests.get(ROOT_URL + course_unit_url).text, \"lxml\"\n )\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n sys.exit(1)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n sys.exit(1)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n sys.exit(1)\n except requests.exceptions.RequestException as err:\n print(\"OOps: Something Else\", err)\n sys.exit(1)\n\n subunit_couter = 0\n\n # -> Subunit Header -> Subunit Block\n for course_subunit_title, course_subunit_body in zip(\n course_unit_page.find_all(attrs=COURSE_SUBUNIT_TITLE_ATTRS),\n course_unit_page.find_all(\n COURSE_SUBUNIT_BODY[\"tag\"], class_=COURSE_SUBUNIT_BODY[\"class\"]\n ),\n ):\n\n logging.debug(\"course_subunit_title:{}\".format(course_subunit_title))\n lesson_counter = 0\n # -> Lesson Block\n for course_lesson_body in course_subunit_body.find_all(\n COURSE_LESSON_BODY[\"tag\"],\n {\n \"class\": [\n COURSE_LESSON_BODY[\"class_i\"],\n COURSE_LESSON_BODY[\"class_ii\"],\n ]\n },\n ):\n course_lesson_span = course_lesson_body.find_all(\n COURSE_LESSON_SPAN[\"tag\"], class_=COURSE_LESSON_SPAN[\"class\"]\n )\n course_lesson_aria_label = course_lesson_span[0][\n COURSE_LESSON_LABEL\n ]\n logging.debug(\n \"course_lesson_aria_label:{}\".format(course_lesson_aria_label)\n )\n # -> Lesson Title\n # Check whether lesson block is a video\n if course_lesson_aria_label == \"Video\":\n lesson_title = course_lesson_body.find(\n COURSE_LESSON_TITLE[\"tag\"],\n class_=COURSE_LESSON_TITLE[\"class\"],\n )\n\n logging.debug(\n \"course_lesson_title:{}\".format(lesson_title.text)\n )\n self.lesson_titles.append(lesson_title.text)\n self.course_all_slugs.append(\n self.output_rel_path\n + course_unit_slug\n + \"/\"\n + str(subunit_couter)\n + \"_\"\n + course_subunit_title.text.replace(\" \", \"_\")\n + \"/\"\n + str(lesson_counter)\n + \"_\"\n + lesson_title.text.replace(\" \", \"_\")\n )\n\n lesson_counter += 1\n unit_lessons_counter += lesson_counter\n subunit_couter += 1\n self.unit_slugs_counter[course_unit_url] = unit_lessons_counter\n logging.info(\"Course - All slugs generated\")", "def thread_function(username: str, password: str):\n\n\n # Adding a dummy-course so it is possible to test pirka with data\n DatabaseInserter.add_user_has_course(username=username, course_code=\"DUMMYCOURSE\")\n DatabaseInserter.add_user_has_course(username=username, course_code=\"DUMMY2222\")\n DatabaseInserter.add_user_has_course(username=username, course_code=\"DUMMY3333\")\n\n\n\n # Scrapes for additional data that is user specific\n itslearning_scraper = ItsLearningScraper(username, password)\n blackboard_scraper = BlackboardScraper(username, password)\n\n # adds user-course relation to database\n itslearning_scraper.get_course_list()\n blackboard_scraper.get_course_list()\n\n # adds user's associated assignment data\n\n # the difference between scraping both user completed and\n # user incompleted assignment in contrast to only scrape user completed assignment\n # is negligible and therefore we scrape all content\n itslearning_scraper.get_all_assignments()\n blackboard_scraper.get_all_assignments()\n\n blackboard_scraper.close_driver()\n itslearning_scraper.close_driver()\n\n\n\n # add ical links\n #itslearning_scraper.get_calendar_feed()", "def getlinks(url):\n page = Linkfetcher(url)\n page.linkfetch()\n for i, url in enumerate(page):\n print(\"%d ==> %s\" % (i, url))", "def download_course_given(self, course_url: str):\n self.course_url = course_url\n self.get_course_page()\n self.get_course_title()\n self.get_course_unit_titles()\n self.get_course_unit_slugs()\n self.get_course_unit_urls()\n\n print(\"\\nGenerating Path Slugs...\\n\")\n self.get_course_all_slugs()\n self.get_course_youtube_ids()\n self.download_course_videos()", "def getURLs():", "def get_courses(self, selected_domain_url: str) -> Tuple[List[str], List[str]]:\n\n courses, courses_url = [], []\n print(\"\\nDownloading Courses...\\n\")\n try:\n selected_domain_page = BeautifulSoup(\n requests.get(selected_domain_url).text, \"lxml\"\n )\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n sys.exit(1)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n sys.exit(1)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n sys.exit(1)\n except requests.exceptions.RequestException as err:\n print(\"OOps: Something Else\", err)\n sys.exit(1)\n\n for course_header in selected_domain_page.find_all(\n COURSE_HEAD[\"tag\"], class_=COURSE_HEAD[\"class\"]\n ):\n course = course_header.find(\n COURSE_URL[\"tag\"], class_=COURSE_URL[\"class\"]\n ).text\n courses.append(course)\n\n course_link = course_header.find(\n COURSE_URL[\"tag\"], class_=COURSE_URL[\"class\"]\n )\n course_slug = course_link[\"href\"]\n courses_url.append(ROOT_URL + course_slug)\n return courses, courses_url", "def parseCourses(self, response):\n sel = Selector(response)\n courses = sel.xpath('//div[@class=\"course-info expandable\"]')\n for c in courses:\n item = CourseItem(response.request.meta[\"item\"])\n item['code'] += '-' + c.xpath('@id').get().strip()\n item['name'] = c.xpath('//a[@class=\"courselink\"]/text()').get().strip()\n # everything works up to here #\n href = c.xpath('div/h3/a/@href').get()\n url = urljoin('https://web-app.usc.edu', href)\n yield Request(url=url,callback=self.parseSection,meta={'item':item})", "def get_course_unit_urls(self):\n\n for url in self.course_page.find_all(attrs=COURSE_UNIT_TITLE):\n self.course_unit_urls.append(url[\"href\"])\n logging.debug(\"course_unit_urls:{}\".format(self.course_unit_urls))\n logging.info(\"Course unit urls retrieved\")", "def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)", "def get_recipe_links(pages):\n recipe_links = []\n for page in xrange(1, pages+1):\n sleep(SCRAPING_REQUEST_STAGGER)\n recipe_links.extend(get_recipe_links_by_page(page))\n cuisine_recipes = get_recipe_details(list(set(recipe_links)))\n return cuisine_recipes", "def run(self):\n urls_to_download = self._get_links()\n results = ThreadPool(8).imap_unordered(self._download_url, urls_to_download)\n for path in results:\n print(path)", "def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses", "def get_course_page(self):\n\n print(\"Course URL: {}\".format(self.course_url))\n try:\n self.course_page = BeautifulSoup(requests.get(self.course_url).text, \"lxml\")\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n sys.exit(1)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n sys.exit(1)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n sys.exit(1)\n except requests.exceptions.RequestException as err:\n print(\"Oops: Something Else\", err)\n sys.exit(1)", "def extract_courses():\n if settings.XPRO_COURSES_API_URL:\n return requests.get(settings.XPRO_COURSES_API_URL, timeout=20).json()\n return []", "def gather_headlines(urls):\n pass", "def available_courses(self):\r\n def _get_course_name(el):\r\n # The first component in the link text is the course number\r\n _, course_name = el.text.split(' ', 1)\r\n return course_name\r\n\r\n return self.q(css='section.info > hgroup > h3 > a').map(_get_course_name).results", "def get_course_youtube_ids(self):\n\n with ProgressBar() as pb:\n for i, unit_url in zip(\n pb(range(len(self.course_unit_urls)), label=\"Collecting Youtube IDs:\"),\n self.course_unit_urls,\n ):\n unit_url = ROOT_URL + unit_url\n yt_dlp_opts = {\n \"logger\": MyLogger(),\n \"retries\": 20,\n \"ignoreerrors:\": True,\n \"skip_download\": True,\n }\n with yt_dlp.YoutubeDL(yt_dlp_opts) as ydl:\n lessons_counter = 0\n try:\n logging.debug(\n \"Collecting youtube ids for unit:{}\".format(unit_url)\n )\n info_dict = ydl.extract_info(unit_url, download=False)\n for video in info_dict[\"entries\"]:\n video_id = video.get(\"id\", None)\n self.lesson_youtube_ids.append(video_id)\n lessons_counter += 1\n except DownloadError as e:\n logging.debug(\n \"Collecting youtube ids for unit:{}\".format(unit_url)\n )\n info_dict = ydl.extract_info(\n unit_url, download=False, process=False\n )\n for video in info_dict[\"entries\"]:\n video_id = video.get(\"url\", None)\n self.lesson_youtube_ids.append(video_id)\n lessons_counter += 1\n except Exception as e:\n print(\"Youtube-dl: An error occured!\", e)\n sys.exit(1)\n\n self.unit_ids_counter[unit_url] = lessons_counter\n\n logging.info(\"Course - Collected Youtube IDs\")", "def get_links() -> list:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n p = re.compile(r'\\d+.html')\n base_url = 'http://stateoftheunion.onetwothree.net/texts/'\n essay_url = base_url + 'index.html'\n res = requests.get(essay_url, headers=headers)\n soup = BeautifulSoup(res.content, 'html')\n links = soup.find_all('a')\n sotu_links = {link.text: base_url + link.get('href', '') for link in links if re.match(p, link.get('href', ''))}\n return sotu_links", "def get_lessons(self, course: str):\n\n lesson_link: Any = self.courses[course][\"link\"]\n lesson_data = self._parse_lesson(lesson_link)\n # self.courses[course][\"lessons\"] = lesson_data\n self.lessons = lesson_data", "def get_videos_urls(author):\n\tfoundAll = False\n\tind = 1\n\tvideos = []\n\twhile not foundAll:\n\t inp = urllib.urlopen(r'http://gdata.youtube.com/feeds/api/videos?start-index={0}&max-results=50&alt=json&orderby=published&author={1}'.format( ind, author ) )\n\t try:\n\t resp = json.load(inp)\n\t inp.close()\n\t returnedVideos = resp['feed']['entry']\n\t for video in returnedVideos:\n\t videos.append( video['link'][0]['href'] ) \n\n\t ind += 50\n\t if ( len( returnedVideos ) < 50 ):\n\t foundAll = True\n\t except:\n\t #catch the case where the number of videos in the channel is a multiple of 50\n\t print \"error\"\n\t foundAll = True\n\n\treturn videos", "def main(url):\n \n words = fetch_words(url)\n print_items(words)", "async def org_info_below_13(org_urls13):\n org_info_till13 = []\n project_urls_till13 = []\n for url in org_urls13:\n # General information about the org\n try:\n soup = await get_page(url)\n org_name = basename(url)\n org_info = soup.find_all('p')\n web_page = org_info[0].text.splitlines()[-1].strip()\n mailing_list = org_info[1].text.split(\":\")[-1].strip()\n detail = org_info[2].text\n org_info_till13.append({'name': org_name, 'about': detail,\n 'page': web_page, 'mail': mailing_list,\n 'link': url})\n project_urls_till13.extend(grab_project_links(soup))\n\n except IndexError:\n print(url)\n\n return org_info_till13, get_project_info(project_urls_till13)", "def main(url):\n words = fetch_words(url)\n print_items(words)", "def get_thread_urls(self, response):\n\n print(\"scraping {0}\".format(response.url))\n url_stories = []\n\n # <li_tags> is a list of all the <li> tags in the html doc with a certain class value.\n # This corresponds to all threads that are NOT sticky.\n li_tags = response.xpath(\"//li[@class='discussionListItem visible ']\")\n\n for thread_tag in li_tags:\n\n author_name = thread_tag.xpath('@data-author').extract_first()\n\n # Get the last post date for a thread ========================================================\n last_post_date = thread_tag.xpath(\".//dl[@class='lastPostInfo']//abbr/text()\").extract_first()\n if last_post_date is not None:\n last_post_date = datetime.strptime(last_post_date, \"%b %d, %Y at %I:%M %p\").replace(tzinfo=utc)\n else:\n # fix with line continuation.\n last_post_date = thread_tag.xpath(\".//span[@class='DateTime']/@title\").extract_first()\n last_post_date = datetime.strptime(last_post_date, \"%b %d, %Y at %I:%M %p\").replace(tzinfo=utc)\n\n # ============================================================================================\n\n author, created = Author.objects.get_or_create(name=author_name)\n if created:\n author.save()\n\n title = thread_tag.xpath(\".//h3[@class='title']/a/text()\").extract_first().encode('utf-8')\n story, created = Story.objects.get_or_create(title=title)\n\n # if created is true, then it's a brand new story, so make sure to save it.\n if created:\n story.save()\n story.authors.add(author)\n\n a_node = thread_tag.xpath(\"div/div/h3/a\")\n thread_url = a_node.xpath(\"@href\").extract_first()\n\n cur_date = datetime.now(tz=utc)\n oldest_date = datetime.min.replace(tzinfo=utc)\n\n created = False\n \"\"\"\n Over here, I am attempting to either update an existing storyhost\n object, OR I am creating a new one. It looks redundant, but I found that\n if I just used get_or_create, I was forced to set last_date automatically.\n\n I didn't always want to create a brand new object, so this verbose code\n was necessary.\n \"\"\"\n try:\n # TRY TO UPDATE EXISTING object\n storyhost = StoryHost.objects.get(host=self.HOST, story=story, url=thread_url)\n storyhost.save()\n except StoryHost.DoesNotExist:\n\n # CREATE BRAND NEW STORYHOST OBJECT\n storyhost, created = StoryHost.objects.get_or_create(host=self.HOST,\n story=story,\n url=thread_url,\n last_scraped=oldest_date)\n\n storyhost.save()\n\n \"\"\"\n Check if the last post date is more recent than the\n storyhost's last scraped date. If it's not, skip it.\n\n If it is, update the last scraped date, and add it to the\n list of url_stories to be returned at the end of this function.\n \"\"\"\n\n last_seg_date = self.get_last_seg_date(story)\n if thread_url is not None:\n if last_post_date > storyhost.last_scraped or last_seg_date < last_post_date:\n storyhost.last_scraped = cur_date\n storyhost.save()\n thread_link = response.urljoin(thread_url)\n\n # Add this story to two separate lists, one for updating, one for just\n # scraping.\n if created:\n url_stories.append((thread_link, story))\n else:\n self.update_list.append((\"{0}threadmarks\".format(thread_link), story))\n else:\n print(\"Skipping {0}\".format(storyhost.url))\n\n return url_stories", "def iter_links(self):", "def main(url):\n words = fetch_words(url)\n\n print_items(words)", "def get_courses_html():\r\n r = requests.get(URL_CS_ALL_REQ)\r\n if r.status_code == 200:\r\n return r.text\r\n else:\r\n return None", "def get_url_pages():\n url = \"https://swapi.co/api/people/\"\n pages_url = []\n \n while True:\n \n pages_url.append(url)\n \n r = requests.get(url)\n \n assert r.status_code == 200, \"There was a problem connecting with SWAPI.\"\n \n url = r.json()[\"next\"] # If there are more pages to check, this will update the URL accordingly.\n \n if url is None: # If there are no more pages to check, this finishes the function.\n \n print(\"\\n\")\n print(\"- - - All URLs were successfully retrieved. - - -\")\n \n return pages_url\n break\n \n print(\"Getting URL from page\", url[-1], \"...\")" ]
[ "0.64871687", "0.6117817", "0.6110424", "0.59748065", "0.59708416", "0.5970168", "0.5913486", "0.591276", "0.58788747", "0.58563405", "0.5855745", "0.58344626", "0.5743032", "0.57388294", "0.5734839", "0.5685268", "0.5638574", "0.5638217", "0.56250453", "0.55969715", "0.5592529", "0.5588122", "0.55824393", "0.55675834", "0.55629706", "0.5554113", "0.5549827", "0.55341804", "0.5532578", "0.55228317" ]
0.7585741
0
Returns if a CORS relevant header was used in the request. In that case the answer should include apropiate headers.
def _has_cors_header(self): return "Access-Control-Request-Method" in self.headers or "Access-Control-Request-Headers" in self.headers or "Origin" in self.headers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_cors_headers(self, res):\r\n self.assertEqual(res.headers['access-control-allow-origin'], '*')\r\n self.assertEqual(\r\n res.headers['access-control-allow-headers'], 'X-Requested-With')", "def _check_cors_headers(self, res):\r\n self.assertEqual(res.headers['access-control-allow-origin'], '*')\r\n self.assertEqual(\r\n res.headers['access-control-allow-headers'], 'X-Requested-With')", "def preflight_checks_cors(self, method, rule):\n if options.http_cors and 'Access-Control-Request-Method' in self.request.headers:\n origin = self.request.headers.get('Origin')\n if origin:\n allowed_methods =list(rule.target.str_allowed_methods) + ['OPTIONS']\n self.set_header('Access-Control-Allow-Methods', '.'.join(allowed_methods))\n\n req_method = self.request.headers.get('Access-Control-Request-Method')\n req_headers = self.request.headers.get('Access-Control-Request-Headers')\n\n method_target = rule.target.get_method_target(HttpMethod[req_method])\n if req_method in allowed_methods and method_target:\n if '*' in method_target.origins:\n self.set_header('Access-Control-Allow-Origin', '*')\n elif origin in method_target.origins:\n self.add_header('Vary', 'Origin')\n self.set_header('Access-Control-Allow-Origin', origin)\n if '*' in method_target.allowed_headers:\n self.set_header('Access-Control-Allow-Headers', req_headers)\n else:\n self.set_header('Access-Contorl-Allow-Headers', '.'.join(method_target.allowed_headers))\n\n if method_target.exposed_headers:\n self.set_header('Access-Control-Expose-Headers', '.'.join(method_target.exposed_headers))\n if method_target.allow_credentials is not None:\n self.set_header('Access-Control-Allow-Credentials', method_target.allow_credentials)\n\n self.set_header('Access-Control-Max-Age', method_target.max_age)\n else:\n self.set_header('Access-Control-Max-Age', 86400)\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', req_headers)\n else:\n self.set_header('Allow', '.'.join(rule.target.str_methods + ['OPTIONS']))\n return False\n return True", "def cors_allow_any(request, response):\n origin = request.META.get('HTTP_ORIGIN')\n if not origin:\n return response\n\n # From the CORS spec: The string \"*\" cannot be used for a resource that supports credentials.\n response['Access-Control-Allow-Origin'] = origin\n patch_vary_headers(response, ['Origin'])\n response['Access-Control-Allow-Credentials'] = 'true'\n\n if request.method == 'OPTIONS':\n if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in request.META:\n response['Access-Control-Allow-Headers'] \\\n = request.META['HTTP_ACCESS_CONTROL_REQUEST_HEADERS']\n response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'\n\n return response", "def get_cors_headers():\n return {\n \"X-Requested-With\": '*',\n \"Access-Control-Allow-Headers\":\n 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with',\n \"Access-Control-Allow-Origin\": '*',\n \"Access-Control-Allow-Methods\": 'POST,GET,OPTIONS'\n }", "def cors(self) -> Optional[pulumi.Input['CorsRulesArgs']]:\n return pulumi.get(self, \"cors\")", "def add_cors_header(resp):\n resp.headers['X-Content-Type-Options'] = os.environ.get(\"X_CONTENT_TYPE_OPTIONS\")\n resp.headers['Access-Control-Allow-Origin'] = os.environ.get(\"ACCESS_CONTROL_ALLOW_ORIGIN\")\n resp.headers['Access-Control-Allow-Headers'] = os.environ.get(\"ACCESS_CONTROL_ALLOW_HEADERS\")\n return resp", "def allow_headers(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"allow_headers\")", "def cors(self) -> pulumi.Output[Optional['outputs.CorsRulesResponse']]:\n return pulumi.get(self, \"cors\")", "def _add_CORS_header(self):\n origin = self.tabpy.get_access_control_allow_origin()\n if len(origin) > 0:\n self.set_header(\"Access-Control-Allow-Origin\", origin)\n logger.debug(\"Access-Control-Allow-Origin:{}\".format(origin))\n\n headers = self.tabpy.get_access_control_allow_headers()\n if len(headers) > 0:\n self.set_header(\"Access-Control-Allow-Headers\",headers)\n logger.debug(\"Access-Control-Allow-Headers:{}\".format(headers))\n\n methods = self.tabpy.get_access_control_allow_methods()\n if len(methods) > 0:\n self.set_header(\"Access-Control-Allow-Methods\",methods)\n logger.debug(\"Access-Control-Allow-Methods:{}\".format(methods))", "def cors(self) -> typing.Optional[typing.List[\"CorsRule\"]]:\n return self._values.get('cors')", "def allowed_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_headers\")", "def is_header_content(response, key, value):\n try:\n if response.headers[key].lower() == value:\n return True\n else:\n return False\n except:\n return False", "def add_cors_headers_to_response(response, request):\n opt_method_list = ','.join(request.allowed_method_list + ['OPTIONS'])\n response['Allow'] = opt_method_list\n response['Access-Control-Allow-Methods'] = opt_method_list\n response['Access-Control-Allow-Origin'] = request.META.get('Origin', '*')\n response['Access-Control-Allow-Headers'] = 'Authorization'\n response['Access-Control-Allow-Credentials'] = 'true'", "def http_header_access_control_allow_origin():\n return 'Access-Control-Allow-Origin'", "def add_cors_headers(resp):\n if 'headers' not in resp:\n resp['headers'] = dict()\n resp['headers']['Access-Control-Allow-Origin'] = '*',\n resp['headers']['Access-Control-Allow-Headers'] = 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token',\n resp['headers']['Access-Control-Allow-Credentials'] = True,\n return resp", "def check_origin(self, origin):\n # import re\n # bool(re.match(r'^.*?\\.mydomain\\.com', origin))\n # allowed = super.check_origin(origin)\n if self.allow_origin == '*':\n return True\n\n host = self.request.headers.get(\"Host\")\n if origin is None:\n origin = self.request.headers.get(\"Origin\")\n\n # If no header is provided, assume we can't verify origin\n if origin is None:\n LOG.warning(\"user {0} Missing Origin header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n if host is None:\n LOG.warning(\"user {0} Missing Host header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n\n origin = origin.lower()\n origin_host = urlparse(origin).netloc\n\n # OK if origin matches host\n if origin_host == host:\n return True\n\n # Check CORS headers\n if self.allow_origin:\n allow = self.allow_origin == origin\n # elif self.allow_origin_pat:\n # allow = bool(self.allow_origin_pat.match(origin))\n else:\n # No CORS headers deny the request\n allow = False\n if not allow:\n LOG.warning(\"user {0} Blocking Cross Origin WebSocket Attempt. Origin: %s, Host: %s\",\n self.client_id, origin, host)\n return allow", "def test_cors(self):\r\n res = self.app.get('/api/app/1')\r\n err_msg = \"CORS should be enabled\"\r\n print res.headers\r\n assert res.headers['Access-Control-Allow-Origin'] == '*', err_msg\r\n methods = ['PUT', 'HEAD', 'DELETE', 'OPTIONS', 'GET']\r\n for m in methods:\r\n assert m in res.headers['Access-Control-Allow-Methods'], err_msg\r\n assert res.headers['Access-Control-Max-Age'] == '21600', err_msg\r\n headers = 'CONTENT-TYPE, AUTHORIZATION'\r\n assert res.headers['Access-Control-Allow-Headers'] == headers, err_msg", "def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False", "def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False", "def access_control_allow_credentials(self):\n return \"Access-Control-Allow-Credentials\" in self.headers", "def add_response_headers(self, request: HttpRequest) -> bool:\n if ADD_RESPONSE_HEADERS:\n return True\n if request.headers.get(\"X-GeoIP2-Debug\", False):\n return True\n if request.GET.get(\"geoip2\", False):\n return True\n return False", "def get_headers(self, environ):\n return [('Content-Type', 'text/html'),\n ('Access-Control-Allow-Origin', '*')]", "def allow_cors(response):\n response.headers['Access-Control-Allow-Origin'] = '*'\n return response", "def accepts_header(request: Request) -> str:\n return request.headers.get('ACCEPTS')", "def coors_handle(response: dict) -> dict:\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add(\n 'Access-Control-Allow-Headers',\n 'Content-Type,Authorization')\n response.headers.add(\n 'Access-Control-Allow-Methods',\n 'GET,PUT,POST,DELETE, PATCH')\n return response", "def enable_cors_after_request_hook():\n\tadd_cors_headers()", "def set_allow_origin(resp):\r\n\r\n h = resp.headers\r\n\r\n # Allow crossdomain for other HTTP Verbs\r\n if request.method != 'OPTIONS' and 'Origin' in request.headers:\r\n h['Access-Control-Allow-Origin'] = request.headers['Origin']\r\n\r\n\r\n return resp", "def allowed_headers(self) -> typing.Optional[typing.List[str]]:\n return self._values.get('allowed_headers')", "def test_specific_headers_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n request_data_headers = self.httpbin.client['get_my_headers']['headers']['All-Request-Headers']\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], request_data_headers)" ]
[ "0.77488124", "0.77488124", "0.6515614", "0.65088147", "0.64439636", "0.63956106", "0.6353866", "0.63124585", "0.6307344", "0.62298346", "0.6204528", "0.6182343", "0.6175846", "0.61416525", "0.612719", "0.60686135", "0.60395163", "0.6036721", "0.601054", "0.601054", "0.60086536", "0.60037315", "0.60032666", "0.59388787", "0.5935854", "0.5902062", "0.58722377", "0.5870966", "0.5844526", "0.5800465" ]
0.81310594
0
Returns request Range start and end if specified. If Range header is not specified returns (None, None)
def _get_range_header(self): try: range_header = self.headers["Range"] if range_header == None: return (None, None) match = self.range_regex.match(range_header) if match == None: return (None, None) except: return (None, None) from_val = int(match.group(1)) if match.group(2) != None: return (from_val, int(match.group(2))) else: return (from_val, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_range_header(self):\n range_header = self.headers.getheader(\"Range\")\n if range_header is None:\n return (None, None)\n if not range_header.startswith(\"bytes=\"):\n print \"Not implemented: parsing header Range: %s\" % range_header\n return (None, None)\n regex = re.compile(r\"^bytes=(\\d+)\\-(\\d+)?\")\n rangething = regex.search(range_header)\n if rangething:\n from_val = int(rangething.group(1))\n if rangething.group(2) is not None:\n return (from_val, int(rangething.group(2)))\n else:\n return (from_val, None)\n else:\n print 'CANNOT PARSE RANGE HEADER:', range_header\n return (None, None)", "def get_range(self, start=None, end=None):\n\n # handle the case of no data\n if self.data.shape[0] == 0 or self.source.data[\"index\"].shape[0] == 0:\n return None, None\n\n first_source_idx = self.source.data[\"index\"][0]\n last_source_idx = self.source.data[\"index\"][-1]\n\n # convert to timestamp if necessary\n if isinstance(self.data.index, pd.DatetimeIndex):\n start = pd.to_datetime(start, unit=\"ms\")\n end = pd.to_datetime(end, unit=\"ms\")\n first_source_idx = pd.to_datetime(first_source_idx, unit=\"ms\")\n last_source_idx = pd.to_datetime(last_source_idx, unit=\"ms\")\n\n # get new start and end\n if start is not None:\n if start < first_source_idx:\n start = max(self.data.index[0], start)\n elif start > last_source_idx:\n start = min(self.data.index[-1], start)\n elif start < self.data.index[0]:\n start = self.data.index[0]\n elif start > self.data.index[-1]:\n start = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n start = first_source_idx\n else:\n start = self.data.index[0]\n\n if end is not None:\n if end < first_source_idx:\n end = max(self.data.index[0], end)\n elif end > last_source_idx:\n end = min(self.data.index[-1], end)\n elif end < self.data.index[0]:\n end = self.data.index[0]\n elif end > self.data.index[-1]:\n end = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n end = last_source_idx\n else:\n end = self.data.index[-1]\n\n return start, end", "def _query_range_get(self):\n return (self.query_start, self.query_end)", "def do_GET(self):\n self.range_from, self.range_to = self._get_range_header()\n if self.range_from is None:\n # nothing to do here\n return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)\n print 'range request', self.range_from, self.range_to\n f = self.send_range_head()\n if f:\n self.copy_file_range(f, self.wfile)\n f.close()", "def getRange(self):\n return self.range", "def GetSRange(self):\n ...", "def _getBounds(self, request):\n start = _getBound(request.args, \"start\")\n stop = _getBound(request.args, \"stop\", self._collection.pageSize)\n return start, stop", "def start(self):\n return _uhd_swig.range_t_start(self)", "def test_get_range(self):\n pass", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def log(self, start=None, end=None):\n headers = {\"Accept\": \"application/octet-stream\"}\n if ((start is not None) and (end is None)) or ((start is None) and (end is not None)):\n raise ValueError(\"For a range [start] and [end] are needed.\")\n if start is not None:\n start = int(start)\n end = int(end)\n if (start >= 0) and (start < end):\n headers[\"Range\"] = \"bytes={}-{}\".format(start, end)\n else:\n raise ValueError(\"Value of [start] must be o or greater and [end] must be greater than [start].\")\n\n response = None\n try:\n response = self.get(\"log\", headers=headers, advanced_mode=True)\n response.raise_for_status()\n except HTTPError as e:\n # A 404 indicates that no log is present.\n if not e.response.status_code == 404:\n # Rethrow the exception\n raise\n return None\n\n if response is None:\n if start is None:\n return None\n return None, None\n\n if start is None:\n return response.content\n return (\n response.headers[\"Content-Range\"].split(\"/\")[1],\n response.content,\n )", "def _read_range(self, start=0, end=None):\n max_read_size = self.channel.connection.negotiate_response.max_read_size\n offset = start\n response_buffers = []\n while end is None or offset < end:\n if end is not None:\n max_read_size = min(end - offset, max_read_size)\n available = min(\n self.channel.connection.credits * smb2.BYTES_PER_CREDIT,\n max_read_size,\n )\n try:\n read_resp = self.channel.read(self, available, offset)\n response_buffers.append(read_resp)\n offset += len(read_resp)\n except ResponseError as re:\n if re.response.status == ntstatus.STATUS_END_OF_FILE:\n break\n raise\n read_buffer = b\"\".join(rb.tobytes() for rb in response_buffers)\n if read_buffer:\n self._offset = start + len(read_buffer)\n # update the EOF marker if we read past it\n self._end_of_file = max(self.end_of_file, self._offset)\n return read_buffer", "def start(self):\n return _uhd_swig.meta_range_t_start(self)", "def _hit_range_get(self):\n return (self.hit_start, self.hit_end)", "def getRange(self, p_int): # real signature unknown; restored from __doc__\n pass", "def _adjustRange(self, start, end):\n adjusted_start = start\n if self._start:\n if end < self._start:\n return None\n adjusted_start = max(self._start, start)\n \n adjusted_end = end\n if self._end:\n if self._end < start:\n return None\n adjusted_end = min(self._end, end)\n \n return (adjusted_start, adjusted_end)", "def range_(headers, data):\n\tcolumn_matrix=data.get_data(headers).getT() # get columns as rows, as this makes analysis much easier by just perfoming operations on column list directly\n\tif column_matrix==[]:\n\t\tprint \"wrong headers, not present in data Object\"\n\t\treturn []\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\tfinal=np.concatenate((column_min, column_max), axis=1)\n\t\n\trng=final.tolist()\n\treturn rng", "def range (self):\n return self._range", "def range (self):\n return self._range", "def range_inclusive(start, stop):\n return range(start, stop + 1)", "def getRange(self, chr, start, end, bins=2000, zoomlvl=-1, metric=\"AVG\", respType=\"DataFrame\"):\n try:\n iter = self.file.fetch(chr, start, end)\n # result = []\n # for x in iter:\n # returnBin = (x.reference_name, x.reference_start, x.reference_end, x.query_alignment_sequence, x.query_sequence)\n # result.append(returnBin)\n\n # if self.columns is None:\n # self.columns = [\"chr\", \"start\", \"end\", \"query_alignment_sequence\", \"query_sequence\"]\n\n # if respType is \"DataFrame\":\n # result = toDataFrame(result, self.columns)\n\n (result, _) = get_range_helper(self.toDF, self.get_bin,\n self.get_col_names, chr, start, end, iter, self.columns, respType)\n\n return result, None\n except ValueError as e:\n raise Exception(\"didn't find chromId with the given name\")", "def range(self):\n return (self._start, self._end)", "def range(self):\n \n return self._range", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IDRangeArgs']]]]:\n return pulumi.get(self, \"ranges\")", "def fusion_api_get_ipv4_range(self, uri=None, param='', api=None, headers=None):\n return self.ipv4range.get(uri=uri, api=api, headers=headers, param=param)", "def getDataRange(self):\n return None if self._dataRange is None else tuple(self._dataRange)", "def getRange(self, epRange):\n epRange = list(map(int, epRange.split('-')))\n if len(epRange) > 1:\n return list(range(epRange[0], epRange[1]+1))\n else:\n return epRange" ]
[ "0.76051366", "0.64007", "0.63162076", "0.62205637", "0.6198985", "0.61585295", "0.6094305", "0.60727465", "0.60231185", "0.6018141", "0.6005612", "0.5990971", "0.5956472", "0.5912372", "0.5882862", "0.58700633", "0.5840941", "0.5808205", "0.5808205", "0.5799625", "0.5738819", "0.571711", "0.5704584", "0.5698296", "0.5698296", "0.5698296", "0.5698296", "0.56710255", "0.5645497", "0.5638689" ]
0.74889433
1
Creates a new ShadowSource. After the creation, a lens can be added with setupPerspectiveLens.
def __init__(self): self.index = self._generateUID() DebugObject.__init__(self, "ShadowSource-" + str(self.index)) ShaderStructElement.__init__(self) self.valid = False self.camera = Camera("ShadowSource-" + str(self.index)) self.cameraNode = NodePath(self.camera) self.cameraNode.reparentTo(Globals.render) self.resolution = 1024 self.atlasPos = Vec2(0) self.doesHaveAtlasPos = False self.sourceIndex = 0 self.mvp = UnalignedLMatrix4f() self.sourceIndex = -1 self.nearPlane = 0.0 self.farPlane = 1000.0 self.converterYUR = None self.transforMat = TransformState.makeMat( Mat4.convertMat(Globals.base.win.getGsg().getInternalCoordinateSystem(), CSZupRight))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, scene = base.render, ambient = 0.2, hardness = 16, fov = 40, near = 10, far = 100):\n \n # Read and store the function parameters\n self.scene = scene\n self.__ambient = ambient\n self.__hardness = hardness\n \n # By default, mark every object as textured.\n self.flagTexturedObject(self.scene)\n \n # Create the buffer plus a texture to store the output in\n buffer = createOffscreenBuffer(-3)\n depthmap = Texture()\n buffer.addRenderTexture(depthmap, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)\n \n # Set the shadow filter if it is supported\n if(base.win.getGsg().getSupportsShadowFilter()):\n depthmap.setMinfilter(Texture.FTShadow)\n depthmap.setMagfilter(Texture.FTShadow) \n \n # Make the camera\n self.light = base.makeCamera(buffer)\n self.light.node().setScene(self.scene)\n self.light.node().getLens().setFov(fov)\n self.light.node().getLens().setNearFar(near, far)\n\n # Put a shader on the Light camera.\n lci = NodePath(PandaNode(\"lightCameraInitializer\"))\n lci.setShader(loader.loadShader(\"caster.sha\"))\n self.light.node().setInitialState(lci.getState())\n\n # Put a shader on the Main camera.\n mci = NodePath(PandaNode(\"mainCameraInitializer\"))\n mci.setShader(loader.loadShader(\"softshadow.sha\"))\n base.cam.node().setInitialState(mci.getState())\n\n # Set up the blurring buffers, one that blurs horizontally, the other vertically\n #blurXBuffer = makeFilterBuffer(buffer, \"Blur X\", -2, loader.loadShader(\"blurx.sha\"))\n #blurYBuffer = makeFilterBuffer(blurXBuffer, \"Blur Y\", -1, loader.loadShader(\"blury.sha\"))\n\n # Set the shader inputs\n self.scene.setShaderInput(\"light\", self.light)\n #self.scene.setShaderInput(\"depthmap\", blurYBuffer.getTexture())\n self.scene.setShaderInput(\"depthmap\", buffer.getTexture())\n self.scene.setShaderInput(\"props\", ambient, hardness, 0, 1)", "def new_source(self, name):\n params = {\"name\": name}\n return JSONRPCRequest(self, \"newSource\", params)", "def new(name, source):", "def create_source_power(\n self,\n face_id,\n input_power=\"0W\",\n thermal_condtion=\"Total Power\",\n surface_heat=\"0irrad_W_per_m2\",\n temperature=\"AmbientTemp\",\n radiate=False,\n source_name=None,\n ):\n if not source_name:\n source_name = generate_unique_name(\"Source\")\n props = {}\n props[\"Faces\"] = [face_id]\n props[\"Thermal Condition\"] = thermal_condtion\n props[\"Total Power\"] = input_power\n props[\"Surface Heat\"] = surface_heat\n props[\"Temperature\"] = temperature\n props[\"Radiation\"] = OrderedDict({\"Radiate\": radiate})\n bound = BoundaryObject(self, source_name, props, \"SourceIcepak\")\n if bound.create():\n self.boundaries.append(bound)\n return bound", "def __generate_shadows(self):\n glEnable(GL_POLYGON_OFFSET_FILL)\n glPolygonOffset(3, 0)\n self.__sh.change_shader(vertex=1, fragment=1)\n\n light = self.__face.directed_light_cartesian\n self.__light_matrix = self.__get_rotation_matrix(\n (light[0], light[1], -light[2]), 2.0)\n\n glDisable(GL_CULL_FACE)\n self.__prepare_shaders(self.__model_matrix, self.__light_matrix, True)\n self.__sh.bind_fbo()\n glClear(GL_DEPTH_BUFFER_BIT)\n glDrawElements(GL_TRIANGLES, View.__triangles.size,\n GL_UNSIGNED_SHORT, View.__triangles)\n glFinish()\n\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n self.__sh.clear()", "def enable_shadows(self):\n self._render_passes.enable_shadow_pass()", "def create_scene(self, ):\n self.scene = create_scene(\n self.opt.splats_img_size, self.opt.splats_img_size, self.opt.fovy,\n self.opt.focal_length, self.opt.n_splats)", "def New(*args, **kargs):\n obj = itkMeshSourcePSD3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def create_project_funding_source(cls, name, description):\n project_funding_source = ProjectFundingSource.objects.create(\n name=name,\n description=description,\n )\n return project_funding_source", "def create_source(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.create_source_with_http_info(**kwargs)\n else:\n (data) = self.create_source_with_http_info(**kwargs)\n return data", "def New(*args, **kargs):\n obj = itkMeshSourcePSF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def create_from_source(self):\n create_statement = self.source.create_statement\n self.create_from_statement(create_statement)\n # Add constraints\n constraints = self.source.constraints\n self.add_constraints(constraints)\n\n # Add indexes\n indexes = self.source.indexes\n self.add_indexes(indexes)\n\n # Add the non-referenced foreign keys\n non_referenced_fks = [x for x in self.source.foreign_keys if not x.referenced]\n self.add_foreign_keys(non_referenced_fks, override_table=self.name)", "def create_factory() -> pygameng.GameObjectFactory:\n from Assets.inventory import images, sounds, assets, game_types\n factory = pygameng.GameObjectFactory(pygameng.ClassRegistrar.registry, images, sounds, assets, game_types)\n factory.set_layer_manager_asset_name(\"LayerManager\")\n return factory", "def New(*args, **kargs):\n obj = itkMeshSourcePSUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkMeshSourcePSUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def makeSource(self, name):\n source = mock.Mock(spec=\"title description\".split())\n source.title = '%s title' % name\n source.description = '%s description' % name\n source.configurationView = '@%s_configuration' % name\n return source", "def New(*args, **kargs):\n obj = itkMeshSourcePSD2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkMeshSourcePSF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, source, *args, **kwargs):\n super(self.__class__, self).__init__()\n self._source = source\n self.provides = source.provides", "def createStageWithNewLayer():\n\n # Simply create a proxy shape. Since it does not have a USD file associated\n # (in the .filePath attribute), the proxy shape base will create an empty\n # stage in memory. This will create the session and root layer as well.\n if hasattr(mayaUsd, 'ufe') and hasattr(mayaUsd.ufe, 'createStageWithNewLayer'):\n shapeNode = mayaUsd.ufe.createStageWithNewLayer('|world')\n cmds.select(shapeNode, replace=True)\n return shapeNode\n else:\n shapeNode = cmds.createNode('mayaUsdProxyShape', skipSelect=True, name='stageShape1')\n cmds.connectAttr('time1.outTime', shapeNode+'.time')\n cmds.select(shapeNode, replace=True)\n fullPath = cmds.ls(shapeNode, long=True)\n return fullPath[0]", "def objectShadowClass( objectPath, groundPath ):\r\n # define the source position of the light, relative to the objectPath\r\n lightPos = Vec3( 0,0,100 )\r\n # add shadows to the object\r\n sc = ShadowCasterClass(objectPath, groundPath, lightPos)\r\n \r\n from direct.task import Task\r\n import math\r\n \r\n def lightRotate( task, sc = sc ):\r\n \"\"\" rotate the light around the object\r\n \"\"\"\r\n global lightAngle\r\n lightAngle += math.pi / 180 * globalClock.getDt() * 10\r\n r = 50\r\n sc.setLightPos( Vec3( r * math.cos(lightAngle), r * math.sin(lightAngle), 100 ) )\r\n return Task.cont\r\n \r\n taskMgr.add(lightRotate, 'lightRotateTask')\r\n \r\n return sc", "def create(self, window):\r\n\r\n # Set the size of the editor area.\r\n if self.editor_area_size != (-1, -1):\r\n window.editor_area_size = self.editor_area_size\r\n\r\n # If the perspective has specific contents then add just those.\r\n if len(self.contents) > 0:\r\n self._add_contents(window, self.contents)\r\n\r\n # Otherwise, add all of the views defined in the window at their\r\n # default positions realtive to the editor area.\r\n else:\r\n self._add_all(window)\r\n\r\n # Activate the first view in every region.\r\n window.reset_views()\r\n \r\n return", "def New(*args, **kargs):\n obj = itkMeshSourcePSSS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def add_source(self, name, position):#)*args, **kwargs):\n return self._add_object(name, Source, position)#*args, **kwargs)", "def New(*args, **kargs):\n obj = itkMeshSourceMUS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def new():\n source = os.environ.get('EVENT_SOURCE', 'commandline')\n if source not in ['filesystem', 'kinesis', 'environment', 'commandline']:\n eprint('docker-lambda.source.unknown source=%s' % source)\n sys.exit(1)\n else:\n log('docker-lambda.source.selected source=%s' % source)\n if source == 'filesystem':\n source = FilesystemEventSource()\n elif source == 'kinesis':\n source = KinesisEventSource()\n elif source == 'environment':\n source = EnvironmentEventSource()\n elif source == 'commandline':\n source = CommandLineEventSource()\n return source", "def New(*args, **kargs):\n obj = itkMeshSourceMUS2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def make(*args, **kwargs):\n return _uhd_swig.amsg_source_make(*args, **kwargs)", "def initialise_shadow_map(self):\n self.shadow_map = np.zeros( self.x_len + 1, np.int8)\n \n for i in range(1, self.x_len + 1):\n self.shadow_map[i] = int((math.tan(math.radians(15)) * i) * (1 / self.slab_ratio))", "def __init__(self, emap, light):\r\n super(ShadowCaster, self).__init__(\"shadow_caster\")\r\n # load shader for casting shadows and camera\r\n self.cshader = Shader(\"uv_flat\")\r\n self.mshader = Shader(\"mat_flat\")\r\n # keep copy of ElevationMap\r\n self.emap = emap\r\n self.emap.set_material((0.0, 0.0, 0.0)) # hide bits below ground\r\n #TODO doesn't cope with z light positions\r\n self.eye = [-500.0 * i for i in light.lightpos] # good distance away\r\n if self.eye[1] <= 0: # must have +ve y\r\n self.eye[1] = 500.0\r\n if abs(self.eye[0]) > abs(self.eye[2]): #x val is bigger than z val\r\n #change scale so map just fits on screen\r\n if self.eye[0] < 0:\r\n su, sv = 1.0, 1.0\r\n else:\r\n su, sv = -1.0, -1.0\r\n self.scaleu = float(self.iy) / self.emap.width\r\n self.scalev = float(self.ix)/ self.emap.depth\r\n self.eye[2] = 0\r\n self.scaleu = self.scaleu / self.eye[1] * (self.eye[0]**2 + self.eye[1]**2)**0.5\r\n self.emap.unif[50] = 1.0 #orientation flag\r\n self.emap.unif[53] = -3.0 * su / self.emap.width * self.eye[0] / self.eye[1] #height adjustment\r\n else:\r\n #change scale so map just fits on screen\r\n if self.eye[2] < 0:\r\n su, sv = 1.0, -1.0\r\n else:\r\n su, sv = -1.0, 1.0\r\n self.scaleu = float(self.iy) / self.emap.depth\r\n self.scalev = float(self.ix)/ self.emap.width\r\n self.eye[0] = 0\r\n self.scaleu = self.scaleu / self.eye[1] * (self.eye[2]**2 + self.eye[1]**2)**0.5\r\n self.emap.unif[50] = 0.0\r\n self.emap.unif[53] = -3.0 * su / self.emap.width * self.eye[2] / self.eye[1]\r\n if abs(self.scaleu) > abs(self.scalev):\r\n self.scale = 3.0 * self.scalev # multiplication factor to reduce pixeliness\r\n else:\r\n self.scale = 3.0 * self.scaleu\r\n self.scaleu = su * self.scale / self.scaleu # reused later in end_cast\r\n self.scalev = sv * self.scale / self.scalev\r\n self.camera0 = Camera() # default instance created as normal, just in case!\r\n self.camera = Camera(is_3d=False, eye=self.eye, scale=self.scale)\r\n # load shader for drawing map with shadows\r\n self.dshader = Shader(\"shadowcast\")" ]
[ "0.56471896", "0.54164994", "0.52801746", "0.51461595", "0.51001674", "0.5033836", "0.50236815", "0.4979795", "0.49654973", "0.49104264", "0.48959804", "0.489492", "0.48817372", "0.48636484", "0.4848264", "0.48168567", "0.48058757", "0.4802296", "0.47864646", "0.47845897", "0.47459304", "0.47346187", "0.4714807", "0.47134453", "0.46709645", "0.46602708", "0.46482477", "0.4629663", "0.46272615", "0.46099228" ]
0.6872107
0
Sets the film size of the source
def setFilmSize(self, size_x, size_y): self.lens.setFilmSize(size_x, size_y) self.rebuildMatrixCache()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_size(self, w, h):\n\t\tpass", "def set_size(self, size):\n \n self.width = size[0]\n self.height = size[1]", "def setsize(self, size):\n self.__size = size", "def setFrameSize(self, frame_size):\n \n self.frame_size = frame_size", "def set_size(self, size=None):\n if not size:\n size = self.output_size\n self.img = cv2.resize(self.img, size)\n self.update_image()\n self.update_size()", "def set_size(self, size):\n self.dtSize = size", "def setFilmWidth(self, width):\r\n if mxs.classOf(self._nativePointer) == mxs.VRayPhysicalCamera:\r\n self._nativePointer.film_width = float(width)\r\n elif mxs.classOf(self._nativePointer) == mxs.Physical:\r\n self._nativePointer.film_width_mm = float(width)\r\n return True", "def size(self, value):\n self.width = value", "def set_episode_size(self, episode_size):\n self.max_episode_steps = episode_size", "def set_frame_size(self, frame_size_selector):\n raise NotImplementedError", "def size(self, size):\n self._size = size", "def resize(self, size):\n self.instance.resize_volume(size)\n self.size = size", "def set_sizes(self, sizes):\n self._sizes = sizes", "def size(self, size):\n self.width = size\n self.height = size", "def size(self, size):\n self.width = size\n self.height = size", "def size(self, value):\n self.width = value\n self.height = value", "def set_size(self, value='S'):\n upper = value.upper()\n\n if upper == 'M': # Medium: double height\n # size = 0x01\n # charHeight = 48\n # maxColumn = 32\n self.double_height_on()\n self.double_width_off()\n elif upper == 'L': # Large: double width and height\n # size = 0x11\n # charHeight = 48\n # maxColumn = 16\n self.double_height_on()\n self.double_width_on()\n else: # Small: standard width and height\n # size = 0x00\n # charHeight = 24\n # maxColumn = 32\n self.double_width_off()\n self.double_height_off()\n # writeBytes(ASCII_GS, '!', size)\n # prevByte = '\\n' # Setting the size adds a linefeed", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def size(self, size):\n\n self._size = size", "def set_test_size(self, new_test_size=0.25):\n self.test_size = new_test_size", "def change_wafer_size(self, size):\n if size not in self.SIZES:\n raise ValueError(\"The wafer must be a valid size: {0}\".format(self.SIZES))\n \n self.size = size * self._MM_IN_MICRONS\n\n self._create_drawing_area()\n self.partition(self.rows, self.cols)", "def size(self, val):\n self.width = val\n self.height = val", "def set_size(self, new_bunch_size):\n self.bunch_size = new_bunch_size", "def set_wished_size(self, size):\n self.__wished_size = size", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def __set_size(self, size):\n if not isinstance(size, int):\n raise TypeError('The size should be an integer')\n if size < 64 or size > 1500: # It should be in the Standard Ethernet Payload range\n raise ValueError('The size should be in the range of Standard Ethernet frames [64,1500] bytes')\n self.__size = size", "def set_frame_size(*args):\n return _ida_frame.set_frame_size(*args)", "def size(self, size: int):\n\n self._size = size" ]
[ "0.66870576", "0.6594383", "0.6515641", "0.6513604", "0.6441292", "0.6421563", "0.63714147", "0.6353532", "0.63481283", "0.63318676", "0.62815446", "0.6245005", "0.62030274", "0.61962676", "0.61962676", "0.6195462", "0.61918056", "0.6174559", "0.6174559", "0.6174559", "0.6174559", "0.61428434", "0.6127668", "0.6119195", "0.61070615", "0.60732394", "0.6062015", "0.602983", "0.5943631", "0.5933837" ]
0.76360935
0
Returns the assigned source index. The source index is the index of the ShadowSource in the ShadowSources array of the assigned Light.
def getSourceIndex(self): return self.sourceIndex
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetAMRBlockSourceIndex(self, p_int, p_int_1):\n ...", "def source_id(self):\n return self._source_id", "def source(self):\n if not self.set_source:\n return sb.NotSpecified\n return uint32_packer.unpack(self[4:8])[0]", "def _get_target_index(self):\n return (self.index + self.source_window * (not self.overlapping) +\n self.offset)", "def get_nth_ast_source(self, n):\n return self.get_nth_ast(n)['source']", "def source_id(self) -> str:\n return self._source_id", "def source_id(self) -> str:\n return self._source_id", "def get_state_src(self, cycle: int) -> int:\n state = self.get_cycle_state(cycle)\n src_id = state.get_metadata_by_key('src')\n return src_id", "def source_identifier(self) -> str:\n return pulumi.get(self, \"source_identifier\")", "def source_identifier(self) -> str:\n return pulumi.get(self, \"source_identifier\")", "def getLightIndex(gltf, idname):\n\n v3dExt = appendExtension(gltf, 'S8S_v3d_data', gltf)\n\n if v3dExt.get('lights') == None:\n return -1\n\n lights = v3dExt['lights']\n\n index = 0\n for light in lights:\n key = 'id' if light.get('id') != None else 'name'\n if light.get(key) == idname:\n return index\n\n index += 1\n\n return -1", "def source_id(self) -> Optional[str]:\n return pulumi.get(self, \"source_id\")", "def get_media_source_id(self, source_name):\n\t\tvalidation.required(source_name, 'source_name')\n\n\t\treturn self.media_sources.get(source_name, 1)", "def setSourceIndex(self, index):\n self.sourceIndex = index", "def __repr__(self):\n return \"ShadowSource[id=\" + str(self.index) + \"]\"", "def _extract_first_from(name, sources):\n for i, source in enumerate(sources):\n if not source:\n continue\n if name in source:\n return (i, source[name])\n raise KeyError(name)", "def get_source_index(self, index: QModelIndex) -> QModelIndex:\n if 0 <= index.row() < len(self._dataframe.values):\n if 0 <= index.column() < len(self._dataframe.columns):\n row = self.get_source_row(index.row())\n column = self.get_source_column(index.column())\n return self.index(row, column)\n return QModelIndex()", "def source(self):\n for source in self.coordinator.data.sources:\n if source.SourceID == self.zone.SourceID:\n return source.Name\n return None", "def grab_external_id(stix_object, source_name):\n for external_reference in stix_object.get(\"external_references\", []):\n if external_reference.get(\"source_name\") == source_name:\n return external_reference[\"external_id\"]", "def getSource(self):\n return self.source", "def get_source_count(self, stmt):\n return self.get_source_count_by_hash(stmt.get_hash(shallow=True))", "def get_source_class_id(self, class_id, source):\n info = self.class_info[class_id]\n assert info['source'] == source\n return info['id']", "def get_source_class_id(self, class_id, source):\n info = self.class_info[class_id]\n assert info['source'] == source\n return info['id']", "def colorssrc(self):\n return self[\"colorssrc\"]", "def colorssrc(self):\n return self[\"colorssrc\"]", "def firstItemIndex(self):\n # We cannot just call the same function of the source model\n # because the first node there may be hidden.\n source_root_index = self.sourceModel().rootIndex()\n proxy_root_index = self.mapFromSource(source_root_index)\n first_item_index = self.index(0, 0, proxy_root_index)\n return first_item_index", "def getSource(self):\n return self.__source", "def getSource():", "def get_source_counts(self):\n return deepcopy(self._source_counts)", "def get_source_count_by_hash(self, stmt_hash):\n return self._source_counts.get(stmt_hash, {})" ]
[ "0.64287513", "0.6268864", "0.5967637", "0.58983713", "0.585019", "0.57937014", "0.57937014", "0.57837826", "0.5761022", "0.5761022", "0.5639242", "0.5594638", "0.5589636", "0.5575055", "0.55677557", "0.5560122", "0.5557805", "0.55487514", "0.5494656", "0.5484412", "0.5480852", "0.54476374", "0.54476374", "0.5447205", "0.5447205", "0.5445358", "0.5432883", "0.54073876", "0.53821087", "0.5379269" ]
0.74652684
0
Sets the source index of this source. This is called by the light, as only the light knows at which position this source is in the Sources array.
def setSourceIndex(self, index): self.sourceIndex = index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_source(self, source_name):\n self.source = source_name", "def source_id(self, source_id):\n\n self._source_id = source_id", "def source_id(self, source_id):\n\n self._source_id = source_id", "def set_source(self, source):\n self.data['source'] = source", "def source(self, source):\n\n self._close()\n self._source = source\n\n self.src = rasterio.open(source)\n\n idx = getattr(self, 'indexes', None)\n if idx is None:\n self.indexes = list(range(1, self.src.count+1))", "def source_id(self, source_id: str):\n\n self._source_id = source_id", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source):\n\n self._source = source", "def source(self, source: Source):\n self._source = source", "def _set_source(self, source):\n if source != self._source:\n self._source = source\n self._channel = \"\"\n self._channel_name = \"\"\n self._is_forced_val = True\n self._forced_count = 0", "def sources(self, sources):\n\n self._sources = sources", "def set_data_source(self, source_id):\n self.data_source = source_id", "def set_flow_source(self, source):\n self._source = source", "def setSourcePath(self, sourcePath):\n self.__sourcePath = sourcePath", "def set_index(self, index):\n self.index = index", "def set_source(self, val: str) -> list:\n sources = self.source_control.list_sources()\n index = self.source().index(val)\n self.source_control.set_source(sources[index])\n return sources", "def _set_index(self):\n self.index = 0\n # If offset is negative, target window might start before 0\n self.index = -min(0, self._get_target_index())", "def set_source(self, source):\n self.qbpm = self.sources[source]\n self.title = self.qbpm.address\n self.setWindowTitle(self.title)", "def index(self, index):\n\n self._index = index", "def setSources(self, xsrc, zsrc):\n xsrc = ascontiguousarray(xsrc, float64)\n zsrc = ascontiguousarray(zsrc, float64)\n nsrc = len(xsrc)\n if (len(xsrc) != len(zsrc)):\n print(\"Inconsistent array lengths\")\n xsrcPointer = xsrc.ctypes.data_as(POINTER(c_double))\n zsrcPointer = zsrc.ctypes.data_as(POINTER(c_double))\n ierr = c_int(1)\n self.fteik2d.fteik_solver2d_setSources64f(nsrc,\n zsrcPointer, xsrcPointer,\n ierr)\n if (ierr.value != 0):\n print(\"Error setting sources\")\n return -1\n self.nsrc = nsrc\n return 0", "def getSourceIndex(self):\n return self.sourceIndex", "def set_index(self, nIndex):\n\t\tcall_sdk_function('PrlVmDev_SetIndex', self.handle, nIndex)", "def source_type(self, source_type):\n\n self._source_type = source_type", "def do_source(self, args):\n self.source = int(args)", "def update_source(self):\n if self.verbose:\n print(\"Updating source\")\n self.source.data = self.source_data\n if self.source.selected is not None:\n self.source.selected.indices = self.selection\n for c in self.callbacks[\"update_source\"]:\n c()\n self.pending_update = False\n if self.update_buffer is not None:\n self.context.doc.add_next_tick_callback(self.update_buffer)\n self.update_buffer = None" ]
[ "0.679036", "0.66052014", "0.66052014", "0.6591564", "0.6569787", "0.62374467", "0.6223604", "0.6223604", "0.6223604", "0.6223604", "0.6223604", "0.6223604", "0.6223604", "0.61710954", "0.61393267", "0.6092534", "0.6084271", "0.6015366", "0.5984961", "0.59811014", "0.5959025", "0.5913503", "0.5900009", "0.58334607", "0.58119", "0.5803889", "0.576728", "0.5764053", "0.5760215", "0.5731313" ]
0.8529315
0
Computes the modelViewProjection matrix for the lens. Actually, this is the worldViewProjection matrix, but for convenience it is called mvp.
def computeMVP(self): projMat = self.converterYUR modelViewMat = self.transforMat.invertCompose( Globals.render.getTransform(self.cameraNode)).getMat() return UnalignedLMatrix4f(modelViewMat * projMat)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()\n ymin, ymax = self.get_ylim3d()\n zmin, zmax = self.get_zlim3d()\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0\n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates\n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down\n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def projection_matrix(self) -> TransformationMatrixType:\n if self._projection_matrix is None:\n if self.projection_mode == Projection.TOP_DOWN:\n self._projection_matrix = self.orthographic_matrix\n else:\n self._projection_matrix = self.perspective_matrix\n\n return self._projection_matrix", "def _ProjectionMatrix(near, far, fov, aspectRatio):\r\n # Matrices are considered to be M[row][col]\r\n # Use DirectX convention, so need to do rowvec*Matrix to transform\r\n size = 1 / tan(radians(fov)/2.0)\r\n M = [[0] * 4 for i in range(4)]\r\n M[0][0] = size/aspectRatio\r\n M[1][1] = size #negative value reflects scene on the Y axis\r\n M[2][2] = (far + near) / (far - near)\r\n M[2][3] = 1\r\n M[3][2] = -(2 * far * near)/(far - near)\r\n return array(M, dtype=float)", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def get_projection_matrix(K, rvec, tvec):\n R = cv2.Rodrigues(np.float32(rvec))[0]\n Rt = np.zeros((3, 4))\n Rt[:, 0:3] = R\n Rt[:, 3] = tvec\n return K @ Rt", "def base_projection_matrix(self, fiber):\n return matrix(ZZ, fiber.vertices()).right_kernel_matrix()", "def world_projection(self, aspect):\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n if aspect < 1:\n gluOrtho2D(\n -self.scale,\n +self.scale,\n -self.scale / aspect,\n +self.scale / aspect)\n else:\n gluOrtho2D(\n -self.scale * aspect,\n +self.scale * aspect,\n -self.scale,\n +self.scale)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(\n self.x, self.y, +1.0,\n self.x, self.y, -1.0,\n sin(self.angle), cos(self.angle), 0.0)", "def getPerspectiveProjectionMatrix(l, r, b, t, n, f):\n e11 = 2 * n / (r - l)\n e13 = (r + l) / (r - l)\n e22 = (2 * n) / (t - b)\n e23 = (t + b) / (t - b)\n e33 = -1 * (f + n) / (f - n)\n e34 = (-2 * f * n) / (f - n)\n\n return MatrixExtended([\n [e11, 0, e13, 0],\n [0, e22, e23, 0],\n [0, 0, e33, e34],\n [0, 0, -1, 0]])", "def _get_proj_mat(self):\n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vec_handles)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vec_handles, self.basis_vec_handles)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def get_view_matrix(self):\n return self.ptr.get_view_matrix()", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def perspective_matrix(self) -> TransformationMatrixType:\n z_near, z_far = self._clipping[self.projection_mode.value]\n return perspective_matrix(\n math.radians(self.fov), self.aspect_ratio, z_near, z_far\n )", "def get_projection_matrix(self, aspect):\n return self.ptr.get_projection_matrix(aspect)", "def _get_proj_mat(self): \n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vecs)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, self.basis_vecs)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def get_projection_matrix(left, right, bottom, top):\r\n zNear = -25.0\r\n zFar = 25.0\r\n inv_z = 1.0 / (zFar - zNear)\r\n inv_y = 1.0 / (top - bottom)\r\n inv_x = 1.0 / (right - left)\r\n mat = [[(2.0 * inv_x), 0.0, 0.0, (-(right + left) * inv_x)],\r\n [0.0, (2.0 * inv_y), 0.0, (-(top + bottom) * inv_y)],\r\n [0.0, 0.0, (-2.0 * inv_z), (-(zFar + zNear) * inv_z)],\r\n [0.0, 0.0, 0.0, 1.0]]\r\n return mat", "def compute_projection(M):\n P = torch.mm(M, torch.pinverse(M.T.matmul(M)).matmul(M.T))\n P = P.double()\n return P", "def set_modelview_from_camera(Rt):\n \n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n \n # rotate teapot 90 deg around x-axis so that z-axis is up\n Rx = np.array([[1,0,0],[0,0,-1],[0,1,0]])\n \n # set rotation to best approximation\n R = Rt[:,:3]\n U,S,V = np.linalg.svd(R)\n R = np.dot(U,V)\n R[0,:] = -R[0,:] # change sign of x-axis\n \n # set translation\n t = Rt[:,3]\n \n # setup 4*4 model view matrix\n M = np.eye(4)\n M[:3,:3] = np.dot(R,Rx)\n M[:3,3] = t\n\n # transpose and flatten to get column order\n M = M.T\n \n m = M.flatten()\n \n # replace model view with the new matrix\n glLoadMatrixf(m)", "def worldToCanonicalViewXform(self):\n return self.perspectiveNormalizationXform().dot(self.worldToCameraCentricXform())", "def model(voxels, transform_matrix, params, is_training):\n del is_training # Doesn't make a difference for projector\n # Rearrangement (batch, z, y, x, channel) --> (batch, y, z, x, channel).\n # By the standard, projection happens along z-axis but the voxels\n # are stored in a different way. So we need to switch the y and z\n # axis for transformation operation.\n voxels = tf.transpose(voxels, [0, 2, 1, 3, 4])\n z_near = params.focal_length\n z_far = params.focal_length + params.focal_range\n transformed_voxels = perspective_transform.transformer(\n voxels, transform_matrix, [params.vox_size] * 3, z_near, z_far)\n views = tf.reduce_max(transformed_voxels, [1])\n views = tf.reverse(views, [1])\n return views", "def m(self) -> np.ndarray:\n assert self._k is not None and self._r is not None and self._t is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k, r=self._r, t=self._t)", "def create_ortho_matrices(self, offset, scale,\r\n window_dimensions, img_dimensions):\r\n window_width, window_height = window_dimensions\r\n object_x = int(window_width/2 - img_dimensions[0]*scale/2) + offset[0]\r\n object_y = int(window_height/2 - img_dimensions[1]*scale/2) + offset[1]\r\n model_matrix = get_4x4_transform(int(img_dimensions[0] * scale),\r\n int(img_dimensions[1] * scale),\r\n object_x,\r\n object_y, 1.0)\r\n proj_matrix = get_projection_matrix(0, window_width, 0, window_height)\r\n view_matrix = get_view_matrix(1.0, 1.0)\r\n return model_matrix, proj_matrix, view_matrix", "def orthographic_matrix(self) -> TransformationMatrixType:\n near, far = self._clipping[self.projection_mode.value]\n return orthographic_matrix(self.fov, self.aspect_ratio, near, far)", "def build(self):\n # Generate a 4x4 identity matrix, which will be the basis for the view matrix.\n vtm = np.identity( 4, float )\n # Generate a translation matrix to move the VRP to the origin and then premultiply the vtm by the translation matrix.\n t1 = np.matrix( [[1, 0, 0, -self.vrp[0, 0]],\n [0, 1, 0, -self.vrp[0, 1]],\n [0, 0, 1, -self.vrp[0, 2]],\n [0, 0, 0, 1] ] )\n\n vtm = t1 * vtm\n\n # Calculate the view reference axes tu, tvup, tvpn.\n tu = np.cross(self.vup, self.vpn)\n tvup = np.cross(self.vpn, tu)\n tvpn = self.vpn.copy()\n\n # Normalize the view axes tu, tvup, and tvpn to unit length.\n\n # if this doesn't work, create my own normalize function\n tu = self.normalize(tu)\n tvup = self.normalize(tvup)\n tvpn = self.normalize(tvpn)\n\n # Copy the orthonormal axes tu, tvup, and tvpn back to self.u, self.vup and self.vpn.\n self.u = tu.copy()\n self.vup = tvup.copy()\n self.vpn = tvpn.copy()\n\n # Use the normalized view reference axes to generate the rotation matrix \n # to align the view reference axes and then premultiply M by the rotation.\n r1 = np.matrix( [[ tu[0, 0], tu[0, 1], tu[0, 2], 0.0 ],\n [ tvup[0, 0], tvup[0, 1], tvup[0, 2], 0.0 ],\n [ tvpn[0, 0], tvpn[0, 1], tvpn[0, 2], 0.0 ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n\n vtm = r1 * vtm\n\n # Translate the lower left corner of the view space to the origin.\n # extent of the view volume in the X and Y view axes.\n vtm = self.T( 0.5*self.extent[0], 0.5*self.extent[1], 0 ) * vtm\n\n vtm = self.S( -self.screen[0] / self.extent[0], -self.screen[1] / self.extent[1], 1.0 / self.extent[2] ) * vtm\n\n vtm = self.T( self.screen[0] + self.offset[0], self.screen[1] + self.offset[1], 0 ) * vtm\n\n return vtm", "def parallel_projection(self):\n return self.camera.parallel_projection", "def projection(self):\n return self._map_projection", "def projection(self):\n return self._map_projection", "def projection(self) -> Projection:\n return self._projection", "def ortho(self):\r\n\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n\r\n x = _vec3(m11, m21, m31)\r\n y = _vec3(m12, m22, m32)\r\n z = _vec3(m13, m23, m33)\r\n\r\n xl = x.length()\r\n xl*=xl\r\n y = y - ((x*y)/xl)*x\r\n z = z - ((x*z)/xl)*x\r\n\r\n yl = y.length()\r\n yl*=yl\r\n z = z - ((y*z)/yl)*y\r\n\r\n return mat4( x.x, y.x, z.x, m14,\r\n x.y, y.y, z.y, m24,\r\n x.z, y.z, z.z, m34,\r\n m41, m42, m43, m44)" ]
[ "0.70178586", "0.6966919", "0.6562363", "0.65267247", "0.64798117", "0.64471877", "0.63473505", "0.6164686", "0.61556834", "0.61460024", "0.6138456", "0.6125117", "0.60684174", "0.60494584", "0.60469806", "0.6032966", "0.6031657", "0.59044665", "0.5889228", "0.5841249", "0.5837033", "0.57958233", "0.5792841", "0.57872933", "0.576329", "0.5761961", "0.57340294", "0.57340294", "0.5669598", "0.56527334" ]
0.7526075
0
Assigns this source a position in the shadow atlas. This is called by the shadow atlas. Coordinates are float from 0 .. 1
def assignAtlasPos(self, x, y): self.atlasPos = Vec2(x, y) self.doesHaveAtlasPos = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def setzePosition(self, x, y):\n self.zielX = x\n self.zielY = y", "def set_position(self, x: float, y: float):\n self._shape.body.position.x = x\n self._shape.body.position.y = y", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def setPosition(self,x):\n if x is None:\n self.x = Cartesian3DVector()\n else:\n if isinstance(x,Cartesian3DVector):\n self.x = Cartesian3DVector(x.x,x.y,x.z)\n else:\n raise CoordinateException(\"Initializing a particle with the incorrect position vector type.\")", "def set_location(self, x, y, z=0):\n self._rect.topleft = (x, y)\n self._z = z\n self._update()", "def start_cast(self, location=(0.0, 0.0, 0.0)):\r\n opengles.glClearColor(ctypes.c_float(0.0), ctypes.c_float(0.0), \r\n ctypes.c_float(0.0), ctypes.c_float(1.0))\r\n super(ShadowCaster, self)._start()\r\n self.camera.reset(is_3d=False, scale=self.scale)\r\n self.camera.position((location[0], 0, location[2]))\r\n self.location = location", "def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y", "def __init__(self):\n self.index = self._generateUID()\n\n DebugObject.__init__(self, \"ShadowSource-\" + str(self.index))\n ShaderStructElement.__init__(self)\n\n self.valid = False\n self.camera = Camera(\"ShadowSource-\" + str(self.index))\n self.cameraNode = NodePath(self.camera)\n self.cameraNode.reparentTo(Globals.render)\n self.resolution = 1024\n self.atlasPos = Vec2(0)\n self.doesHaveAtlasPos = False\n self.sourceIndex = 0\n self.mvp = UnalignedLMatrix4f()\n self.sourceIndex = -1\n self.nearPlane = 0.0\n self.farPlane = 1000.0\n self.converterYUR = None\n self.transforMat = TransformState.makeMat(\n Mat4.convertMat(Globals.base.win.getGsg().getInternalCoordinateSystem(),\n CSZupRight))", "def set_position(self, x, y, z):\n for sec in self.all:\n for i in range(int(nrn.n3d())):\n nrn.pt3dchange(i, \\\n x-self.x+nrn.x3d(i), \\\n y-self.y+nrn.y3d(i), \\\n z-self.z+nrn.z3d(i), \\\n nrn.diam3d(i))\n self.x = x; self.y = y; self.z = z", "def initialize_position(self):\n self.x = self.cell_xl + self.cell_dx * np.random.rand(1)[0]", "def teleport(self, x, y):\n self.rect.x = x\n self.rect.y = y", "def setPos(self, pos):\n self.cameraNode.setPos(pos)", "def set_local_coordinates(self, entity_id, x, y):\n position = getattr(self.entities[entitiy_id],\n self.local_position_system)\n position.x = x \n position.y = y", "def initialize_position(self):\n self.x = (self.cell_xl**3 +\n (self.cell_xr**3 - self.cell_xl**3) *\n np.random.rand(1)[0])**(1./3.)", "def set_position(self, x, y):\n self.tx = -x\n self.ty = -y", "def set_position( self, posx, posy ):\n\n self.__foodx = posx\n self.__foody = posy", "def position(self, x, y, z):\n self.curr_position = Vector([x, y, z])\n self.ptr.position(x, y, z)", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def position(self, position):\n\n self._position = position", "def set_node_position(self, node, x, y, z=0):\n pass", "def setX(self, value):\n self.position[0] = value", "def set_position(self, position):\n self.position = position" ]
[ "0.6156805", "0.60141057", "0.60129917", "0.6004218", "0.6000607", "0.5993239", "0.598556", "0.59563553", "0.58847815", "0.58847356", "0.5878124", "0.58766407", "0.58704585", "0.58442515", "0.58430034", "0.5831376", "0.58271724", "0.5809994", "0.5803376", "0.57787955", "0.57787955", "0.57787955", "0.57787955", "0.57787955", "0.57787955", "0.57787955", "0.57787955", "0.57691395", "0.5766457", "0.57541984" ]
0.6183983
0
Returns the assigned atlas pos, if present. Coordinates are float from 0 .. 1
def getAtlasPos(self): return self.atlasPos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_position(self):\n position = (self.position_x * SPRITE_SIZE, self.position_y * SPRITE_SIZE)\n return position", "def _getCoords(self):\n\n if self._coords is not None:\n return self._coords[self._acsi]", "def fixture_coord():\n\tEXAMPLE_FILE_FOLDER = str(MODULE_DIR) + \"/data/nail1/\"\n\tcoord_x, coord_y, coord = read.load_coord(EXAMPLE_FILE_FOLDER)\n\treturn coord", "def get_position(self, position):", "def get_pos(self) -> tuple:\n return self.rect.center", "def get_coord(self):\n return self.coord", "def get_aa_pos_on_screen(self,position,frame):\n position=position*3+float(frame)-1\n x,y=self.get_base_pos_on_screen(position)\n y=y+20.0+float(frame)*15.0\n return x,y", "def assignAtlasPos(self, x, y):\n self.atlasPos = Vec2(x, y)\n self.doesHaveAtlasPos = True", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def get_DNApos_fromcoords(self,x,y):\n\n # Are we close to the DNA sequence?\n if abs(y-self.seq_row)>10:\n return None\n\n # ok, DNA it is\n pos=int(float(x-self.seq_xstart+4.0)/self.base_scale.get())\n return pos", "def get_position(self):\n response = self.__send_and_receive(protocol.GET_COOR)\n value = self.__gen_response_value(response)\n if value:\n parse_cmd = self.__parse_cmd(response, [\"x\", \"y\", \"z\"])\n coordinate = [parse_cmd[\"x\"], parse_cmd[\"y\"], parse_cmd[\"z\"]]\n return coordinate\n else:\n return False", "def s(self, position: Vector) -> float:\n return self.local_coordinates(position)[0]", "def _get_center_pos(self):\n if not hasattr(self, 'lon_center'):\n raise ValueError('ERROR: You need to specify first the center position!')\n d = np.abs((self.x.lon - self.lon_center) ** 2. + (self.x.lat - self.lat_center) ** 2.)\n dmin = d.min()\n m = d == dmin\n\n idx = np.indices(d.shape)\n i = idx[0][m][0]\n j = idx[1][m][0]\n\n if (np.abs(1. - self.x.lon[i, j] / self.lon_center) > 0.05) or (np.abs(1. - self.x.lat[i, j] / self.lat_center) > 0.05): # at least 5% acc.\n print 'lon: ', self.x.lon[i, j], self.lon_center\n print 'lat: ', self.x.lat[i, j], self.lat_center\n i = None\n j = None\n return i, j", "def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)", "def get_pos(self, off_w=0, off_l=0, off_h=0):\n try:\n return self.world_grid[self.w + off_w][self.l + off_l][self.h + off_h]\n except IndexError:\n return blocks['wall']", "def get_pix_pos(self):\r\n return vec((self.grid_pos[0]*self.app.cell_width)+TOP_BOTTOM_BUFFER//2+self.app.cell_width//2,\r\n (self.grid_pos[1]*self.app.cell_height) +\r\n TOP_BOTTOM_BUFFER//2+self.app.cell_height//2)\r\n # where Pac-Man starts relative to the board\r", "def initialCoordinates():\r\n return (-250,-250)", "def get_position():\n\n return character['Position']", "def local_coordinates(self, position: np.ndarray) -> Tuple[float, float]:\n raise NotImplementedError()", "def get_position(self):\n return self.position", "def get_position(self):\n return self.position", "def getTilePos(self, pos = None):\n\n if not pos:\n pos = self.actor.getPos()\n \n for i in range(len(pos)):\n pos[i] = int(math.floor( (pos[i] + self.dimensions[i]) / 2.0))\n #pos[i] = int(math.floor( pos[i] / 2.0))\n\n return pos", "def _player_loc():\n return _to_my_vec3(_get_mc().player.getTilePos())", "def get_position(self):\n return self.__position", "def __get_position(self, value):\r\n if len(self.__matrix) > 5:\r\n number = self.AminoAcids()\r\n else:\r\n number = self.Bases()\r\n\r\n if value.upper() == self.A:\r\n return number.A\r\n\r\n elif value.upper() == self.R:\r\n return number.R\r\n\r\n elif value.upper() == self.N:\r\n return number.N\r\n\r\n elif value.upper() == self.D:\r\n return number.D\r\n\r\n elif value.upper() == self.C:\r\n return number.C\r\n\r\n elif value.upper() == self.Q:\r\n return number.Q\r\n\r\n elif value.upper() == self.E:\r\n return number.E\r\n\r\n elif value.upper() == self.G:\r\n return number.G\r\n\r\n elif value.upper() == self.H:\r\n return number.H\r\n\r\n elif value.upper() == self.I:\r\n return number.I\r\n\r\n elif value.upper() == self.L:\r\n return number.L\r\n\r\n elif value.upper() == self.K:\r\n return number.K\r\n\r\n elif value.upper() == self.M:\r\n return number.M\r\n\r\n elif value.upper() == self.F:\r\n return number.F\r\n\r\n elif value.upper() == self.P:\r\n return number.P\r\n\r\n elif value.upper() == self.S:\r\n return number.S\r\n\r\n elif value.upper() == self.T:\r\n return number.T\r\n\r\n elif value.upper() == self.W:\r\n return number.W\r\n\r\n elif value.upper() == self.Y:\r\n return number.Y\r\n\r\n elif value.upper() == self.V:\r\n return number.V\r\n\r\n else:\r\n return number.Star", "def get_position(self):\n pos_or_org = self.position.to_object\n if pos_or_org is None:\n return None\n elif pos_or_org.portal_type == 'position':\n return pos_or_org\n else:\n return None", "def getBallPos(self) -> (int,int):\n return self.x, self.y", "def coord(self):\r\n return self.model.coord", "def GetInTextureCoord(self):\n ...", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)" ]
[ "0.66034317", "0.65444887", "0.6444638", "0.63149214", "0.6291102", "0.6274993", "0.6270446", "0.6253127", "0.6180184", "0.6170509", "0.61551327", "0.61384577", "0.60982853", "0.6077503", "0.60670394", "0.6055914", "0.6049513", "0.60348207", "0.60224485", "0.6008913", "0.6008913", "0.5969079", "0.5948909", "0.5938092", "0.59295183", "0.5927111", "0.5926871", "0.5919976", "0.59185845", "0.59172964" ]
0.7560469
0
Returns wheter this ShadowSource has already a position in the shadow atlas
def hasAtlasPos(self): return self.doesHaveAtlasPos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contains_origin(self):\n return self.contains(self.ambient_space().zero())", "def has_positions(self):\n return self.positions.exists()", "def has_pos(self) -> object:\n return self._has_pos", "def is_origin(self) -> bool:\n return self.x == 0 and self.y == 0", "def has_destroyed_ship(self):\n if self.mark == constants.DEAD_SHIP_MARK:\n return True\n return False", "def available(self, position):\n if position is not None:\n x, y = position\n return self.grid[x][y] == 0", "def exists(self):\n try:\n self.world.find(self.ehandle)\n except KeyError:\n return False\n else:\n return True", "def has_position(self):\n if 'position' not in self.attrs:\n return False\n\n pos = self.position()\n if pos is None:\n return False\n\n if isinstance(pos, SharedPosition) and pos.value is None:\n return False\n\n return True", "def is_destroyed(self) -> bool:\n return self._coords == self.damaged_cells", "def __contains__(self, pos):\n if pos in self._coordinates:\n return True\n return False", "def exposed(self, position):\r\n x, y, z = position\r\n for dx, dy, dz in FACES:\r\n if (x + dx, y + dy, z + dz) not in self.world:\r\n return True\r\n return False", "def isstart(self) -> bool:\n return len(self._pile) == 0", "def cell_in_shadow(self, y, x):\n orig_x = x\n orig_value = self.grid[y, orig_x]\n \n # Move to the left\n x = self.add_x(orig_x, -1)\n \n max_in_row = self.grid[y].max()\n \n while x != orig_x:\n # Get the height difference that's needed from the shadow map\n height_needed = self.shadow_map[ (orig_x - x) % self.x_len]\n if self.grid[y, x] - orig_value >= height_needed:\n return True\n elif orig_value + height_needed > max_in_row:\n return False\n # Move to the left\n x = self.add_x(x, -1)\n \n return False", "def calc_is_new_position(self, game_state: dict):\n current_position = game_state['self'][3]\n if current_position in self.positions:\n return False\n else:\n return True", "def is_island(self):\n return bool(not self.children.exists() and not self.parents.exists())", "def isSource(self):\n return (len(self.parents()) == 0)", "def has_previous(self):\n if self.idx < len(self.nodes):\n return True\n else:\n return False", "def collision_check(self):\n return True", "def guard_occupy_transition(self):\n if not self.get_free_positions:\n return True", "def check_position(self, position):\n x_axis, y_axis = position\n try:\n return bool(self.full_map[x_axis][y_axis] not in \"#\"\n and 0 <= x_axis <= self.x_axis\n and 0 <= y_axis <= self.y_axis)\n\n except IndexError:\n return False", "def is_reachable_from(self, position: np.ndarray) -> bool:\n s, r = self.local_coordinates(position)\n return 0 <= s < self.length + CAR_LENGTH and np.abs(r) <= 2 * self.width", "def is_standalone(self, prev_locations):\n if self._locations != prev_locations:\n return True\n elif self.size:\n return True\n elif self._count:\n return True\n return False", "def is_over(self):\n # If menu is over reset the offset\n if self.is_dead:\n Drawable.WINDOW_OFFSET = self._old_offset\n return self.is_dead", "def has_moved(self):\n return self.move_count > 0", "def is_ship_sunk(self, x, y):\n marker = self.markers[x][y]\n total_hits = self.ship_hits[marker]\n return total_hits == MarkerType.MAX_HITS[marker]", "def isInPosition(self, serial):\n return serial in self.in_game and self.position == self.in_game.index(serial)", "def hit(self):\n\n self.units.pop()\n return (len(self.units) == 0) # Returns True if the ship has been sunk", "def has_current_location(self):\n return self.location_set.current_location is not None", "def isSetOffset(self):\n return _libsbml.Unit_isSetOffset(self)", "def is_brush(self) -> bool:\n return len(self.solids) > 0" ]
[ "0.660991", "0.65274745", "0.64275694", "0.6206538", "0.6185759", "0.61701924", "0.6113514", "0.6070053", "0.6006791", "0.59578675", "0.59305793", "0.59122664", "0.5864909", "0.58581674", "0.5855908", "0.581775", "0.58165866", "0.5779871", "0.5778968", "0.57593817", "0.5746244", "0.57451284", "0.5721688", "0.5717089", "0.5710882", "0.56753653", "0.56695575", "0.564671", "0.56429476", "0.56293184" ]
0.6653766
0
Deletes the atlas coordinates, called by the atlas after the Source got removed from the atlas
def removeFromAtlas(self): self.doesHaveAtlasPos = False self.atlasPos = Vec2(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeCoordinatesDumpFile(self):\n os.remove(self.COORDINATES_DUMP_FNAME)", "def cleanAll(self):\n for i in range(len(self.asteroid_type) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_type[i])\n self.del_asteroid(i)\n\n for i in range(len(self.asteroid_id_e) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_id_e[i])\n self.del_asteroid_e(i)", "def del_points(self):\r\n del self._points", "def delX(self):\n del self.components[0]", "def delX(self):\n del self.components[0]", "def clear_geometries(self):", "def delete_reference_array(self):\r\n del self.pxarray\r\n return", "def Remove(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_RemoveLocations_Remove(self, *args)", "def delete(self):\n del self.shx.atoms[self.index]", "def delY(self):\n del self.components[1]", "def delY(self):\n del self.components[1]", "def delete_data(self, *pos):\n r, c = pos\n self._grid[r][c] = None", "def delCoordset(self, index):\n\n n_csets = self._n_csets\n if not n_csets:\n raise AttributeError('coordinates are not set')\n\n which = np.ones(n_csets, bool)\n which[index] = False\n which = which.nonzero()[0]\n if len(which) == 0:\n self._coords = None\n self._n_csets = 0\n self._acsi = None\n self._cslabels = None\n self._kdtrees = None\n else:\n self._coords = self._coords[which]\n self._n_csets = self._coords.shape[0]\n self._acsi = 0\n self._cslabels = [self._cslabels[i] for i in which]\n self._kdtrees = [self._kdtrees[i] for i in which]\n self._timestamps = self._timestamps[which]", "def delete_this_region(self):", "def __del__(self) -> None:\n self.map.solid_id.discard(self.id)", "def delete_grid(self):\n\n\t\tself.a_grid = None\t\t# Deletes the object from memory", "def delete(self):\n if self.shape is not None:\n self.shape.delete()\n if self in shared.obstacles:\n shared.obstacles.remove(self)", "def destroy(self, coords):\n\n block = blocks[self.get_block(coords)]\n self.set_block(coords, block.replace)\n self.set_metadata(coords, 0)", "def destroy(self):\n\t\tfor team in range(len(self.dots)): #will cycle through each team\n\t\t\tfor i in range(len(self.dots[team])): #will cycle through each member of the team\n\t\t\t\tdot = self.dots[team][i]\n\t\t\t\tdot.removeNode()\n\t\tself.mousePosition.removeNode()\n\t\tself.mapimage.removeNode()\n\t\tself.map.removeNode()", "def pre_delete_centroid(sender, instance, **kwargs):\n Link.objects.filter(origin=instance.id).delete()\n Link.objects.filter(destination=instance.id).delete()", "def __del__(self):\n\n # Delete sprite (if it has been defined)\n try:\n self.canvas.delete(self.sprite)\n except AttributeError:\n pass\n except tk.TclError:\n pass", "def delete(self):\n Texture2D.delete_glo(self._ctx, self._glo)\n self._glo.value = 0", "def delPosition(self):\n self.components = [0 for i in range(len(self.components))]", "def delete(self, x, y):\n pass", "def clear(self):\n self.pointscontroller.pop(self.currentlyadded)", "def delete(self):\n\t\tself.canvas.delete('node_'+self.identifier)\n\t\tself.canvas.tag_unbind('node_'+self.identifier,\"<Any>\")", "def GetResult(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_RemoveLocations_GetResult(self, *args)", "def delete(self):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.delete()\n\t\telse:\n\t\t\tsuper( textureFile, self ).delete()", "def remove_coords(cube, unwanted_coords):\n if type(unwanted_coords) != list:\n unwanted_coords = [unwanted_coords]\n for coord in unwanted_coords:\n try:\n cube.remove_coord(coord)\n except iris.exceptions.CoordinateNotFoundError:\n continue\n \n return cube", "def clear(self):\n self._x_prev = None\n self._y_prev = None" ]
[ "0.65160275", "0.64323896", "0.64059365", "0.63009447", "0.63009447", "0.62701464", "0.6259166", "0.622845", "0.62146705", "0.61958385", "0.61958385", "0.6183421", "0.60780424", "0.60723054", "0.6050865", "0.6039283", "0.603769", "0.6031997", "0.60197896", "0.60166305", "0.6008265", "0.59791607", "0.59729576", "0.5965139", "0.59559464", "0.59344447", "0.59050745", "0.59036267", "0.58926666", "0.5873874" ]
0.7005352
0
Setups a PerspectiveLens with a given nearPlane, farPlane and FoV. The FoV is a tuple in the format (Horizontal FoV, Vertical FoV)
def setupPerspectiveLens(self, near=0.1, far=100.0, fov=(90, 90)): # self.debug("setupPerspectiveLens(",near,",",far,",",fov,")") self.lens = PerspectiveLens() self.lens.setNearFar(near, far) self.lens.setFov(fov[0], fov[1]) self.camera.setLens(self.lens) self.nearPlane = near self.farPlane = far self.rebuildMatrixCache()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setupOrtographicLens(self, near=0.1, far=100.0, filmSize=(512, 512)):\n # self.debug(\"setupOrtographicLens(\",near,\",\",far,\",\",filmSize,\")\")\n self.lens = OrthographicLens()\n self.lens.setNearFar(near, far)\n self.lens.setFilmSize(*filmSize)\n self.camera.setLens(self.lens)\n self.nearPlane = near\n self.farPlane = far\n self.rebuildMatrixCache()", "def perspectiveFovLH(field_of_view, aspect, znear, zfar):\n h = 1 / tan(field_of_view / 2)\n w = h / aspect\n m = [\n [w, 0, 0, 0],\n [0, h, 0, 0],\n [0, 0, zfar / (zfar - znear), 1],\n [0, 0, (znear * zfar) / (znear - zfar), 0],\n ]\n return Matrix(m)", "def pinfPerspective( fov, aspect, near, far=None ):\n result = zeros( (4,4),'d')\n # need the cotangent of the field-of-view\n cotFOV = 1/tan(fov)\n result[0,0] = cotFOV/aspect\n result[1,1] = cotFOV\n result[2,2:4] = -1\n result[3,2] = -2*near\n return result", "def build_perspective_camera(field_of_view=60.0,\n aspect_ratio=1.0,\n near_plane=0.01,\n far_plane=1000.0,\n position=(0.0, 0.0, 5.0),\n enable_zoom=False):\n context = build_context()\n camera = context.THREE.PerspectiveCamera.new_object(field_of_view,\n aspect_ratio, near_plane,\n far_plane)\n camera.position.set(*position)\n controls = context.THREE.OrbitControls.new_object(camera)\n controls.enableZoom = enable_zoom\n return camera", "def perspective(self, fovy, aspect, near, far):\r\n\r\n top = near * math.tan(fovy * math.pi / 360.0)\r\n bottom = -top\r\n left = bottom * aspect\r\n right = top * aspect\r\n\r\n return self.frustum(left, right, bottom, top, near, far)", "def setFov(self,fov):\n self.light.node().getLens().setFov(fov)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)", "def project(self, win_width, win_height, fov, viewer_distance):\r\n factor = fov / (viewer_distance + self.z)\r\n x = self.x * factor + win_width / 2\r\n y = -self.y * factor + win_height / 2\r\n return Point3D(x, y, 1)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n\t\tfactor = fov / (viewer_distance + self.z)\n\t\tx = self.x * factor + win_width / 2\n\t\ty = -self.y * factor + win_height / 2\n\t\treturn Point3D(x, y, 1)", "def fov_setting(points, x, y, z, dist, h_fov, v_fov):\n\n if h_fov[1] == 180 and h_fov[0] == -180 and v_fov[1] == 2.0 and v_fov[0] == -24.9:\n return points\n\n if h_fov[1] == 180 and h_fov[0] == -180:\n return points[in_v_range_points(dist, z, v_fov)]\n elif v_fov[1] == 2.0 and v_fov[0] == -24.9:\n return points[in_h_range_points(x, y, h_fov)]\n else:\n h_points = in_h_range_points(x, y, h_fov)\n v_points = in_v_range_points(dist, z, v_fov)\n return points[np.logical_and(h_points, v_points)]", "def __init__(\n self,\n vmf_file: VMF,\n planes: List[Vec],\n des_id: int = -1,\n lightmap: int = 16,\n smoothing: int = 0,\n mat: str = 'tools/toolsnodraw',\n rotation: float = 0,\n uaxis: Optional[UVAxis] = None,\n vaxis: Optional[UVAxis] = None,\n disp_power: DispPower = 0,\n ) -> None:\n self.map = vmf_file\n if len(planes) != 3:\n raise ValueError('Must have only 3 planes!')\n self.planes = planes\n self.id = vmf_file.face_id.get_id(des_id)\n self.lightmap = lightmap\n self.smooth = smoothing\n self.mat = mat\n self.ham_rot = rotation\n self.uaxis = uaxis or UVAxis(0, 1, 0)\n self.vaxis = vaxis or UVAxis(0, 0, -1)\n\n self.disp_power = disp_power\n self.disp_flags = DispFlag.COLL_ALL\n self.disp_elevation = 0.0\n if disp_power > 0:\n self._disp_verts = [\n DispVertex(x, y)\n for y in range(self.disp_size)\n for x in range(self.disp_size)\n ]\n self.disp_pos = Vec()\n self.disp_allowed_vert = Array('i', (-1, ) * 10)\n else:\n self._disp_verts = self.disp_pos = self.disp_allowed_vert = None", "def test_vfov_from_hfov(self):\n width = 700\n height = 480\n hfov = 60\n\n # TODO(marcus): make sure these expected values are correct!\n actual = tesse_ros_bridge.utils.vfov_from_hfov(hfov, width, height)\n expected = 43.19696059328124\n self.assertEqual(actual, expected)", "def set_fov(self, spacing=None, corner=(0, 0, 0), size=None,\n npoints=None):\n if spacing is None and npoints is None:\n spacing = [1, 1, 1]\n if size is None:\n size = self._from_img.shape\n slicer = lambda c, s, sp:\\\n tuple([slice(c[i], s[i] + c[i], sp[i]) for i in range(3)])\n # Adjust spacing to match desired field of view size\n if spacing is not None:\n fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]\n else:\n fov_data = self._from_img.get_data()[slicer(corner, size, [1, 1, 1])]\n spacing = ideal_spacing(fov_data, npoints=npoints)\n fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]\n self._from_data = fov_data\n self._from_npoints = (fov_data >= 0).sum()\n self._from_affine = subgrid_affine(self._from_img.get_affine(),\n slicer(corner, size, spacing))\n # We cache the voxel coordinates of the clamped image\n self._from_spacing = spacing\n self._vox_coords =\\\n np.indices(self._from_data.shape).transpose((1, 2, 3, 0))", "def setup_localxyzs(self):\n self.localxyzs = [sp.vscl(1.0/v[2],v) for v in self.uvlclxyzs]\n self.fovsides = list()\n lastxyz = self.localxyzs[-1]\n for xyz in self.localxyzs:\n self.fovsides.append(FOVSIDE(xyz,lastxyz))\n lastxyz = xyz", "def test_f_from_hfov(self):\n width = 700\n height = 480\n hfov = 60\n vfov = 60\n\n # TODO(marcus): make sure these expected values are correct!\n actual = tesse_ros_bridge.utils.fx_from_hfov(hfov, width)\n expected = 606.2177826491071\n self.assertEqual(actual, expected)\n\n actual = tesse_ros_bridge.utils.fy_from_vfov(vfov, height)\n expected = 415.69219381653056\n self.assertEqual(actual, expected)", "def create_cam_fov(self, name):\n\n # Vertices of FOV\n V = [\n (0, 0, -self.SAT_PROPS[\"Alt\"]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 0]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 1]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 2]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 3])\n ]\n\n # Faces of FOV\n F = [(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 1)]\n\n # Create building blocks of polydata\n cam = vtk.vtkPolyData()\n points = vtk.vtkPoints()\n polys = vtk.vtkCellArray()\n scalars = vtk.vtkFloatArray()\n\n # Load the point, cell and data attributes\n for i in range(5):\n points.InsertPoint(i, V[i])\n for i in range(4):\n polys.InsertNextCell( self.mkVtkIdList(F[i]))\n for i in range(5):\n scalars.InsertTuple1(i,i)\n\n # Assign the pieces to the vtkPolyData.\n cam.SetPoints(points)\n del points\n cam.SetPolys(polys)\n del polys\n cam.GetPointData().SetScalars(scalars)\n del scalars\n\n # Mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(cam)\n mapper.ScalarVisibilityOff()\n\n # Actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(0.5, 1, 0.5)\n actor.GetProperty().SetAmbient(0.5)\n actor.GetProperty().SetOpacity(0.1)\n\n return actor", "def test_register_fov(self):\n task = MesoscopeFOV(self.session_path, device_collection='raw_imaging_data', one=self.one)\n mlapdv = {'topLeft': [2317.2, -1599.8, -535.5], 'topRight': [2862.7, -1625.2, -748.7],\n 'bottomLeft': [2317.3, -2181.4, -466.3], 'bottomRight': [2862.7, -2206.9, -679.4],\n 'center': [2596.1, -1900.5, -588.6]}\n meta = {'FOV': [{'MLAPDV': mlapdv, 'nXnYnZ': [512, 512, 1], 'roiUUID': 0}]}\n with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest:\n task.register_fov(meta, 'estimate')\n calls = mock_rest.call_args_list\n self.assertEqual(3, len(calls))\n\n args, kwargs = calls[1]\n self.assertEqual(('fields-of-view', 'create'), args)\n expected = {'data': {'session': None, 'imaging_type': 'mesoscope', 'name': 'FOV_00', 'stack': None}}\n self.assertEqual(expected, kwargs)\n\n args, kwargs = calls[2]\n self.assertEqual(('fov-location', 'create'), args)\n expected = ['field_of_view', 'default_provenance', 'coordinate_system', 'n_xyz', 'provenance', 'x', 'y', 'z',\n 'brain_region']\n self.assertCountEqual(expected, kwargs.get('data', {}).keys())\n self.assertEqual(5, len(kwargs['data']['brain_region']))\n self.assertEqual([512, 512, 1], kwargs['data']['n_xyz'])\n self.assertIs(kwargs['data']['field_of_view'], mock_rest().get('id'))\n self.assertEqual('E', kwargs['data']['provenance'])\n self.assertEqual([2317.2, 2862.7, 2317.3, 2862.7], kwargs['data']['x'])\n\n # Check dry mode with suffix input = None\n for file in self.session_path.joinpath('alf', 'FOV_00').glob('mpciMeanImage.*'):\n file.replace(file.with_name(file.name.replace('_estimate', '')))\n self.one.mode = 'local'\n with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest:\n out = task.register_fov(meta, None)\n mock_rest.assert_not_called()\n self.assertEqual(1, len(out))\n self.assertEqual('FOV_00', out[0].get('name'))\n locations = out[0]['location']\n self.assertEqual(1, len(locations))\n self.assertEqual('L', locations[0].get('provenance', 'L'))", "def frustum(self, left, right, bottom, top, near, far):\r\n \r\n return mat4( (2.0*near)/(right-left), 0.0, float(right+left)/(right-left), 0.0,\r\n 0.0, (2.0*near)/(top-bottom), float(top+bottom)/(top-bottom), 0.0,\r\n 0.0, 0.0, -float(far+near)/(far-near), -(2.0*far*near)/(far-near),\r\n 0.0, 0.0, -1.0, 0.0)", "def __init__(self, position, focal_point, viewup):\n self._position = position\n self._focal_point = focal_point\n self._viewup = viewup", "def __set_perspective(self):\n\n src = np.float32([[(.42 * self.img_shape[1],.65 * self.img_shape[0] ),\n (.58 * self.img_shape[1], .65 * self.img_shape[0]),\n (0 * self.img_shape[1],self.img_shape[0]),\n (1 * self.img_shape[1], self.img_shape[0])]])\n\n dst = np.float32([[0,0],\n [self.img_shape[1],0],\n [0,self.img_shape[0]],\n [self.img_shape[1],self.img_shape[0]]])\n\n self.M = cv2.getPerspectiveTransform(src, dst)\n self.M_inv = cv2.getPerspectiveTransform(dst, src)", "def __init__(self,fovraws,ralohi=(),declohi=()\n ,obs_pos=None,obs_vel=None,obs_year=None\n ):\n ### Get count of items in FOV sequence; ensure it is 2 or more\n ### and ralohi and declohi are empty, or that fovraws is empty\n ### and ralohi and declohi have 2 values each\n (self.fovraws\n ,self.ralohi\n ,self.declohi\n ,self.obs_pos\n ,self.obs_vel\n ,self.obs_year\n ,)= fovraws,list(ralohi),list(declohi),obs_pos,obs_vel,obs_year\n self.L = len(fovraws)\n assert (1<self.L and not (self.ralohi+self.declohi)\n ) or (0==self.L and 2==len(self.ralohi) and 2==len(self.declohi)\n ), 'Invalid vertices in FOV'\n\n ################################\n ### Initialize: FOV RA,Dec pairs; FOV type (assume polygon); FOV\n ### vector triples; list of RA,Dec boxes\n self.radecdegs = list()\n self.fovtype = 1<self.L and FOV.POLYGONTYPE or FOV.RADECBOXTYPE\n self.uvfovxyzs,fovsum = list(),sp.vpack(0.,0.,0.)\n self.radec_boxes = list()\n rdba = self.radec_boxes.append ### Shorthand to append box to list\n\n ################################\n ### Parse list of vertices:\n ### - [list,float] => Circle (cone)\n ### - [list,list] => RA,Dec box\n ### - [list,list,list,...] => Polygon\n for vertex in fovraws:\n\n ### For second of two vertices ...\n if 1==len(self.radecdegs) and 2==self.L:\n ### Two-vertex items are either a conic FOV, or an [RA,Dec] box\n try:\n ### If second item in list is a float, then it's a half-angle\n ### of the cone\n self.hangdeg = float(vertex)\n assert self.hangdeg < 90.0,'Cone half-angle is not less than 90degrees'\n assert self.hangdeg > 0.0,'Cone half-angle is not greater than 0degrees'\n self.hangrad = self.hangdeg * rpd\n self.min_cosine = math.cos(self.hangrad)\n self.uv_cone_axis = self.uvfovxyzs[0]\n self.fovtype = FOV.CIRCLETYPE\n break\n except AssertionError as e:\n raise\n except:\n ### If the above fails, then it's the second corner of the box\n self.fovtype = FOV.RADECBOXTYPE\n\n ### Parse one vertex\n ra,dec,uvxyz = parse_inertial(vertex)\n\n ### Append RA,Dec and unit vector XYZ onto their resepective lists\n self.radecdegs.append((ra,dec,))\n self.uvfovxyzs.append(uvxyz)\n fovsum = sp.vadd(fovsum,uvxyz)\n\n ################################\n ### Calculate RA,DEC limits as list of [ralo,rahi,declo,dechi] boxes\n ### - .radec_boxes is a list; rdba is .radec_boxes.append\n ### - List will have multiple RA,Dec boxes if FOV crosses the Prime\n ### Meridian (PM) an even number of times.\n\n if self.fovtype == FOV.RADECBOXTYPE:\n ### RA,DEC box FOV: calculate limits; handle PM crossing\n if 2==self.L:\n ras,decs = zip(*self.radecdegs)\n ralo,rahi = sorted(ras)\n declo,dechi = sorted(decs)\n if 180 > (rahi-ralo):\n rdba([ralo,rahi,declo,dechi])\n else:\n rdba([0.0,ralo,declo,dechi])\n rdba([rahi,360.0,declo,dechi])\n else:\n if self.ralohi[1] > self.ralohi[0]:\n rdba(self.ralohi+self.declohi)\n else:\n rdba([self.ralohi[0],360.0]+self.declohi)\n rdba([0.0,self.ralohi[1]]+self.declohi)\n\n elif self.fovtype == FOV.CIRCLETYPE:\n ### Circular FOV: DEC limits determine RA limits; handle PM Xing\n ra,dec = self.radecdegs[0]\n fovdeclo = dec - self.hangdeg\n fovdechi = dec + self.hangdeg\n\n if fovdeclo < -90.0 or fovdechi > 90.0:\n ### A pole is in the FOV; use full RA range\n fovralo,fovrahi = 0.0,360.0\n fovdeclo,fovdechi = max([fovdeclo,-90.0]),min([fovdechi,+90.0])\n\n elif fovdeclo == -90.0 or fovdechi == 90.0:\n ### A pole is on the FOV circumference; RA range is 180 degrees\n fovralo,fovrahi = ra-90.0,ra+90.0\n\n else:\n ### The FOV excludes the poles; calculate the RA range, using\n ### the formula validated in script validate_delta_ra_formula.py\n tanhang,tandec = math.tan(self.hangrad),math.tan(dec*rpd)\n sinhang,cosdec = math.sin(self.hangrad),math.cos(dec*rpd)\n coshang = math.cos(self.hangrad)\n T = sinhang / math.sqrt(1.0 - ((tanhang*tandec)**2))\n deltara = dpr * math.atan(T / (cosdec * coshang))\n fovralo,fovrahi = ra-deltara,ra+deltara\n\n ### Ensure RA limits are within range [0:360] (N.B. inclusive)\n if fovralo < 0.0: fovralo += 360.0\n if fovrahi > 360.0: fovrahi -= 360.0\n\n if fovralo <= fovrahi:\n ### RA lo <= RA hi: no PM crosssing\n rdba([fovralo,fovrahi,fovdeclo,fovdechi])\n else:\n ### RA hi < RA hi: there is a PM crosssing\n rdba([0.0,fovrahi,fovdeclo,fovdechi])\n rdba([fovralo,360.,fovdeclo,fovdechi])\n\n else:\n assert self.fovtype == FOV.POLYGONTYPE\n ### Polygonal FOV: build frame where all vertices will be\n ### projected onto the plane Z=1\n\n ### .uvavg: unit vector = mean of all vertices, will be +Z\n self.uvavg = sp.vhat(fovsum)\n\n ### Create rotation matrix to FOV frame: +Z is mean of vertices'\n ### directions (.uvavg); +X will be a direction that is not\n ### parallel to any side of the polygon\n ### - Start with temporary matrix with +Z as defined above; +X\n ### toward vertex at largest angle from .uvavg\n vother = min([(sp.vdot(self.uvavg,v),list(v),) for v in self.uvfovxyzs])[1]\n tmpmtx = sp.twovec(self.uvavg,3,vother,1)\n ### - Rotate all vectors to that frame; scale Z components to 1.0\n vtmps = list()\n for v in self.uvfovxyzs:\n ### - Ensure all vertices are in the same hemisphere\n assert 0.0 < sp.vdot(self.uvavg,v),'All vertices are not in the same hemisphere'\n vtmp = sp.mxv(tmpmtx,v)\n vtmps.append(sp.vscl(1.0/vtmp[2],vtmp))\n\n ### Find largest azimuth gap between any two sides: that azimuth\n ### will be direction of +X in the final rotation matrix\n ### - Get azimuths of all sides of polygon, in range [-PI:PI]\n azimuths,vlast = list(),vtmps[-1]\n for v in self.uvfovxyzs:\n azimuths.append(numpy.arctan((v[1]-vlast[1])/(v[0]-vlast[0])))\n vlast = v\n ### - Sort angles and add [least angle plus PI] to end of list\n azimuths.sort()\n azimuths.append(azimuths[0]+sp.pi())\n ### - Find largest delta-azimuth and its index\n dazimuths = [hi-lo for hi,lo in zip(azimuths[1:],azimuths[:-1])]\n maxdaz = max(dazimuths)\n imaxdaz = dazimuths.index(maxdaz)\n ### - Calculate azimuth from to mean of that delta-azimuth,\n meanaz = azimuths[imaxdaz] + (maxdaz / 2.0)\n\n ### Final matrix: add rotation of tmpmtx around +Z by that angle\n self.mtxtofov = sp.mxm(sp.rotate(meanaz,3),tmpmtx)\n\n ### Apply final rotation matrix, store results in .uvlclxyzs\n tmpmtx = sp.twovec(self.uvavg,3,vother,1)\n self.uvlclxyzs = [self.rotate_to_local(v) for v in self.uvfovxyzs]\n\n ### Calculate upper and lower RA and Dec limits, with PM crossings\n los,his = list(),list()\n ### - Create [[RA,Dec],[X,Y,Z]] pairs list; ensure last is off PM\n pairs = list(zip(self.radecdegs,self.uvfovxyzs))\n pop_count = 0\n while pairs[-1][0][0] == 0.0:\n pop_count += 1\n assert pop_count < self.L,'All vertices are on the Prime Meridian'\n pairs.append(pairs.pop(0))\n\n ### Count PM crossings\n self.crossing_count = 0\n lastra = pairs[-1][0][0]\n zero_count = 0\n for (ra,dec,),xyz in pairs:\n if ra == 0.0:\n zero_count += 1\n if lastra > 180.0: ra = 360.0\n if 180 < abs(ra-lastra): self.crossing_count += 1\n lastra = ra\n\n if 0==self.crossing_count or 1==(1&self.crossing_count):\n ### If there are either no, or an odd number, of PM crossings,\n ### then use the pairs as-is for a single FOV\n subfovs = [pairs]\n if self.crossing_count:\n ### - For odd crossing count, one pole or the other must be\n ### in the FOV; init full RA range, that pole for Dec ranges\n ralo,rahi = 0.0,360.0\n if sp.vdot(self.uvavg,[0,0,1]) > 0.0: declo = dechi = +90.0\n else : declo = dechi = -90.0\n else:\n ### - For zero crossing count, initialize inverted ranges\n ralo,rahi = 360.0,0.0\n declo,dechi = +90.0,-90.0\n subranges = [[ralo,rahi,declo,dechi]]\n\n else:\n ### If there are an even, non-zero number of PM crossings, break\n ### them into two sub-FOVs, one on either side of the PM\n\n eastfov,westfov = list(),list()\n\n if zero_count:\n ### If there are any zero RA values, rotate the pairs to\n ### ensure a zero-RA pair is the first, so it and the non-zero\n ### last pair will be assigned to the correct side of the PM\n while pairs[0][0][0]!=0.0: pairs.append(pairs.pop(0))\n else:\n ### If there are no zero RA values, rotate the pairs to ensure\n ### a crossing occurs between the last and first pair, so the\n ### corresponding zero crossing will be assigned to the\n ### correct side of the PM\n while abs(pairs[0][0][0]-pairs[-1][0][0])<180:\n pairs.append(pairs.pop(0))\n\n ### Write vertices into the two sub-FOVs\n\n ### - Set last-vertex values for first item in pairs\n (lastra,lastdec,),lastxyz = pairs[-1]\n\n for pair in pairs:\n ### - Loop over vertex pairs ((RA,DEC,),Cartesian_Vector)\n (ra,dec,),xyz = pair\n\n if ra == 0.0:\n\n ### - When RA=0, the previous RA determines if it's 0 ar 360\n if lastra >= 180.0:\n ra = 360.0\n westfov.append([(ra,dec,),xyz])\n iswest = True\n else:\n eastfov.append(pair)\n iswest = False\n\n elif abs(lastra-ra) >= 180.0:\n\n ### - When the change in RA>=180, the PM is being crossed\n\n ### - Find the mid-vector where the PM is crossed\n k1 = -xyz[1] / (lastxyz[1]-xyz[1])\n midxyz = sp.vhat(sp.vlcom(1.0-k1,xyz,k1,lastxyz))\n middec = dpr * sp.recrad(midxyz)[2]\n\n ### - Add that mid-vector, with RA=360, to the west FOV\n westfov.append([(360.0,middec,),midxyz])\n\n ### - Determine if vector is west\n iswest = ra >= 180.0\n\n ### - Add that mid-vector, with RA=0, to the east FOV ...\n if (ra > 0.0) and (not iswest):\n ### - ... only if the ra is not already 0, as it will be\n ### added in the next step\n eastfov.append([(0.0,middec,),midxyz])\n\n ### Add the vector to either east or west FOV\n if iswest: westfov.append(pair)\n else : eastfov.append(pair)\n\n else:\n\n ### PM was not crossed, add vector to same FOV, as last time\n if iswest: westfov.append(pair)\n else : eastfov.append(pair)\n\n ### - Set last-vertex values for next item in pairs\n (lastra,lastdec,),lastxyz = (ra,dec,),xyz\n\n ### - Create subfovs list of east and west FOVs; set subranges\n subfovs = [eastfov,westfov]\n subranges = [[360.0,0.0,90.0,-90.0],[360.0,0.0,90.0,-90.0]]\n\n ### To here, we have list of FOV(s) and list of range(s); use them\n ### to determine RA,DEC box(es) to use for database query\n\n while subfovs:\n\n ### Get sub-FOV, sub-range; set last vertex's XYZ\n subfov,(ralo,rahi,declo,dechi,) = subfovs.pop(),subranges.pop()\n lastxyz = subfov[-1][-1]\n\n for pair in subfov:\n ### Each element of subfov comprises (RA,Dec) and vertex XYZ\n ### - xyz is a unit vector\n (ra,dec,),xyz = pair\n\n ### - Adjust RA limits as needed from RA of vertex\n if ra > rahi: rahi = ra\n elif ra < ralo: ralo = ra\n\n ### - Set Dec extrema from DEC of vertex\n maxdec = mindec = dec\n\n ### - Calculate Dec extrema from lastxyz to xyz\n ### -- Normal to plane of lastxyz and syz\n sidenormal = sp.vcrss(lastxyz,xyz)\n ### -- Z-rates along great circle at lastxyz and at xyz\n lastdz = sp.vcrss(sidenormal,lastxyz)[2]\n dz = sp.vcrss(sidenormal,xyz)[2]\n if 0.0 > (lastdz*dz):\n ### -- If sign of Z-rates differs, there should be an\n ### extreme value between lastxyz and xyz\n ### --- Get vector perpendicular to side normal on equator\n ### --- Use that to calculate the unit vector at Dec extreme\n equinox = sp.vcrss([0,0,1],sidenormal)\n vtoextremez = sp.ucrss(sidenormal,equinox)\n ### --- Cosine of angle between lastxyz and xyz\n mindot = sp.vdot(lastxyz,xyz)\n for none in [None,None]:\n ### --- Two cases: vtoextremez and -vtoextremez\n ### - Angles from vtoextremez to lastxyz and to xyz\n ### must be less than angle between lastxyz and xyz\n ### so cosines of those angles must be greater\n lastxyzdot = sp.vdot(lastxyz,vtoextremez)\n xyzdot = sp.vdot(xyz,vtoextremez)\n if lastxyzdot>mindot and xyzdot>mindot:\n ### --- Adjust maxdec and mindec as needed\n try : extremedec = dpr * math.asin(vtoextremez[2])\n except: extremedec = dpr * sp.recrad(vtoextremez)[2]\n if extremedec > maxdec: maxdec = extremedec\n elif extremedec < mindec: mindec = extremedec\n break\n ### --- Invert vtoextremez for next pass\n vtoextremez = sp.vminus(vtoextremez)\n\n ### - Adjust Dec limits as needed from Dec extrema of side\n if maxdec > dechi: dechi = maxdec\n if mindec < declo: declo = mindec\n lastxyz = xyz\n\n ### Append calculated RA,Dec box(es)\n rdba((ralo,rahi,declo,dechi,))\n\n ### Put None in .localxyzs, in .v_for_stellar_aberr, and in\n ### .v_for_parallax; if no stellar aberration or parallax is\n ### explicitly applied to define it later, then .localxyzs will be\n ### calculated on the fly\n self.localxyzs = None\n self.v_for_stellar_aberr = None\n self.v_for_parallax = None", "def set_hfov_dimension(self, hfov, aspect_ratio, near, far):\n r = near * np.tan(hfov / 2)\n l = -r\n t = r / aspect_ratio\n b = -t\n self.__fdim = l, r, b, t, near, far\n return self", "def fov(self, fov: float):\n assert type(fov) in (int, float)\n self._fov[self.projection_mode.value] = fov\n self._reset_matrix()", "def getPerspectiveProjectionMatrix(l, r, b, t, n, f):\n e11 = 2 * n / (r - l)\n e13 = (r + l) / (r - l)\n e22 = (2 * n) / (t - b)\n e23 = (t + b) / (t - b)\n e33 = -1 * (f + n) / (f - n)\n e34 = (-2 * f * n) / (f - n)\n\n return MatrixExtended([\n [e11, 0, e13, 0],\n [0, e22, e23, 0],\n [0, 0, e33, e34],\n [0, 0, -1, 0]])", "def proj_to_velo(calib_data):\n rect = calib_data[\"R0_rect\"].reshape(3, 3)\n #to transform a point from Lidar framce to camera frame\n #reshape the flat line with 12 elements to 3X4 matrix\n velo_to_cam = calib_data[\"Tr_velo_to_cam\"].reshape(3, 4)\n#print('velo2cam', velo_to_cam)\n inv_rect = np.linalg.inv(rect)\n #select all rows and only first three columns\n#print('velo_to_cam[:, :3]', velo_to_cam[:, :3])\n #select all rows and only first three columns\n inv_velo_to_cam = np.linalg.pinv(velo_to_cam[:, :3])\n return np.dot(inv_velo_to_cam, inv_rect)", "def __init__(self, eye, look_at, up, near, far,\n view_angle_h=45, view_angle_v=45):\n self.near = float(near)\n self.far = float(far)\n # The internally-stored view angles are in radians away from the\n # look vector.\n self.view_angle_h = math.pi*(view_angle_h/2.0)/180\n self.view_angle_v = math.pi*(view_angle_v/2.0)/180\n self.setPose(eye, look_at, up)", "def parse(cls, vmf_file: VMF, tree: Keyvalues) -> 'Side':\n # planes = \"(x1 y1 z1) (x2 y2 z2) (x3 y3 z3)\"\n verts = tree[\"plane\", \"(0 0 0) (0 0 0) (0 0 0)\"][1:-1].split(\") (\")\n if len(verts) != 3:\n raise ValueError('Wrong number of solid planes in \"' +\n tree['plane', ''] +\n '\"')\n planes = [\n Vec.from_str(verts[0]),\n Vec.from_str(verts[1]),\n Vec.from_str(verts[2]),\n ]\n\n side: Side = cls(\n vmf_file,\n planes,\n tree.int('id', -1),\n tree.int('lightmapscale', 16),\n tree.int('smoothing_groups'),\n tree['material', ''],\n tree.float('rotation'),\n UVAxis.parse(tree['uaxis', '[0 1 0 0] 0.25']),\n UVAxis.parse(tree['vaxis', '[0 0 -1 0] 0.25']),\n )\n\n try:\n disp_tree = tree.find_key('dispinfo')\n except LookupError: # Not a displacement.\n return side\n\n # Deal with displacements.\n disp_power = disp_tree.int('power', 4)\n if disp_power in (0, 1, 2, 3, 4):\n side.disp_power = disp_power # type: ignore\n else:\n raise ValueError(f'Invalid displacement power {disp_power}!')\n side.disp_pos = disp_tree.vec('startposition')\n side.disp_elevation = disp_tree.float('elevation')\n disp_flag_ind = disp_tree.int('flags')\n if 0 <= disp_flag_ind <= 16:\n side.disp_flags = _DISP_FLAG_TO_COLL[disp_flag_ind]\n else:\n raise ValueError(f'Invalid displacement flags {disp_flag_ind} in side {side.id}!')\n if disp_tree.bool('subdiv'):\n side.disp_flags |= DispFlag.SUBDIV\n\n # This always has a key of '10', with 10 '-1's...\n vert_key = disp_tree.find_key('allowed_verts')\n allowed_vert = Array('i', map(int, vert_key['10'].split()))\n if len(allowed_vert) != 10:\n raise ValueError(\n f'Displacement allowed_verts in side {side.id} '\n f'must be 10 long!'\n )\n side.disp_allowed_vert = allowed_vert\n\n size = side.disp_size\n side._disp_verts = [\n DispVertex(x, y)\n for y in range(size)\n for x in range(size)\n ]\n # Parse all the rows..\n side._parse_disp_vecrow(disp_tree, 'normals', 'normal')\n side._parse_disp_vecrow(disp_tree, 'offsets', 'offset')\n side._parse_disp_vecrow(disp_tree, 'offset_normals', 'offset_norm')\n\n for y, row in side._iter_disp_row(disp_tree, 'alphas', size):\n try:\n for x, alpha in enumerate(row):\n side._disp_verts[y * size + x].alpha = float(alpha)\n except ValueError as exc:\n raise ValueError(\n f'Displacement array for alpha in side {side.id}, '\n f'row {y} had invalid number: {exc.args[0]}'\n ) from None\n\n for y, row in side._iter_disp_row(disp_tree, 'distances', size):\n try:\n for x, alpha in enumerate(row):\n side._disp_verts[y * size + x].distance = float(alpha)\n except ValueError as exc:\n raise ValueError(\n f'Displacement array for distances in side {side.id}, '\n f'row {y} had invalid number: {exc.args[0]}'\n ) from None\n\n # Not the same, 1 less row and column since it's per-quad.\n tri_tags_count = 2 ** disp_power\n for y, row in side._iter_disp_row(disp_tree, 'triangle_tags', 2 * tri_tags_count):\n try:\n for x in range(tri_tags_count):\n vert = side._disp_verts[y * size + x]\n vert.triangle_a = TriangleTag(int(row[2 * x]))\n vert.triangle_b = TriangleTag(int(row[2 * x + 1]))\n except ValueError as exc:\n raise ValueError(\n f'Displacement array for triangle tags in side {side.id}, '\n f'row {y} had invalid number: {exc.args[0]}'\n ) from None\n\n if 'multiblend' not in disp_tree:\n return side\n # Else: Parse multiblend too.\n # First initialise this list.\n for vert in side._disp_verts:\n vert.multi_colors = [Vec(1, 1, 1), Vec(1, 1, 1), Vec(1, 1, 1), Vec(1, 1, 1)]\n for i in range(4):\n side._parse_disp_vecrow(disp_tree, 'multiblend_color_' + str(i), i)\n\n for y, split in side._iter_disp_row(disp_tree, 'multiblend', 4 * size):\n try:\n for x in range(size):\n side._disp_verts[y * size + x].multi_blend = Vec4(\n float(split[4*x]),\n float(split[4*x + 1]),\n float(split[4*x + 2]),\n float(split[4*x + 3]),\n )\n except ValueError as exc:\n raise ValueError(\n f'Displacement array for multiblend in side {side.id}, '\n f'row {y} had invalid number: {exc.args[0]}'\n ) from None\n\n for y, split in side._iter_disp_row(disp_tree, 'alphablend', 4 * size):\n try:\n for x in range(size):\n side._disp_verts[y * size + x].multi_alpha = Vec4(\n float(split[4*x]),\n float(split[4*x + 1]),\n float(split[4*x + 2]),\n float(split[4*x + 3]),\n )\n except ValueError as exc:\n raise ValueError(\n f'Displacement array for multiblend in side {side.id}, '\n f'row {y} had invalid number: {exc.args[0]}'\n ) from None\n return side" ]
[ "0.70291877", "0.66441834", "0.65732735", "0.65727365", "0.6233378", "0.6114747", "0.6030867", "0.6030412", "0.5988965", "0.5988965", "0.5988965", "0.5973305", "0.596118", "0.5778326", "0.5762084", "0.570791", "0.5695007", "0.5631295", "0.5628302", "0.5604736", "0.5587684", "0.5505666", "0.55005217", "0.5429213", "0.54151917", "0.5375394", "0.53682214", "0.5363884", "0.53538567", "0.53492963" ]
0.80055594
0
Setups a OrtographicLens with a given nearPlane, farPlane and filmSize. The filmSize is a tuple in the format (filmWidth, filmHeight) in world space.
def setupOrtographicLens(self, near=0.1, far=100.0, filmSize=(512, 512)): # self.debug("setupOrtographicLens(",near,",",far,",",filmSize,")") self.lens = OrthographicLens() self.lens.setNearFar(near, far) self.lens.setFilmSize(*filmSize) self.camera.setLens(self.lens) self.nearPlane = near self.farPlane = far self.rebuildMatrixCache()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setupPerspectiveLens(self, near=0.1, far=100.0, fov=(90, 90)):\n # self.debug(\"setupPerspectiveLens(\",near,\",\",far,\",\",fov,\")\")\n self.lens = PerspectiveLens()\n self.lens.setNearFar(near, far)\n self.lens.setFov(fov[0], fov[1])\n self.camera.setLens(self.lens)\n self.nearPlane = near\n self.farPlane = far\n self.rebuildMatrixCache()", "def build_perspective_camera(field_of_view=60.0,\n aspect_ratio=1.0,\n near_plane=0.01,\n far_plane=1000.0,\n position=(0.0, 0.0, 5.0),\n enable_zoom=False):\n context = build_context()\n camera = context.THREE.PerspectiveCamera.new_object(field_of_view,\n aspect_ratio, near_plane,\n far_plane)\n camera.position.set(*position)\n controls = context.THREE.OrbitControls.new_object(camera)\n controls.enableZoom = enable_zoom\n return camera", "def setFilmSize(self, size_x, size_y):\n self.lens.setFilmSize(size_x, size_y)\n self.rebuildMatrixCache()", "def _setUpCamera(self, planets):\n\n if not planets:\n self.camCenter = Vec2()\n self.camSize = MIN_CAM_SIZE\n return\n\n # Find most extreme points\n min_x = min([p.pos.x for p in planets])\n max_x = max([p.pos.x for p in planets])\n min_y = min([p.pos.y for p in planets])\n max_y = max([p.pos.y for p in planets])\n\n self.camCenter = Vec2((max_x + min_x) / 2, (max_y + min_y) / 2)\n self.camSize = max(max_x - min_x, max_y - min_y, MIN_CAM_SIZE)", "def set_fov(self, spacing=None, corner=(0, 0, 0), size=None,\n npoints=None):\n if spacing is None and npoints is None:\n spacing = [1, 1, 1]\n if size is None:\n size = self._from_img.shape\n slicer = lambda c, s, sp:\\\n tuple([slice(c[i], s[i] + c[i], sp[i]) for i in range(3)])\n # Adjust spacing to match desired field of view size\n if spacing is not None:\n fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]\n else:\n fov_data = self._from_img.get_data()[slicer(corner, size, [1, 1, 1])]\n spacing = ideal_spacing(fov_data, npoints=npoints)\n fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]\n self._from_data = fov_data\n self._from_npoints = (fov_data >= 0).sum()\n self._from_affine = subgrid_affine(self._from_img.get_affine(),\n slicer(corner, size, spacing))\n # We cache the voxel coordinates of the clamped image\n self._from_spacing = spacing\n self._vox_coords =\\\n np.indices(self._from_data.shape).transpose((1, 2, 3, 0))", "def camera(*args, aspectRatio: Union[float, bool]=0.0, cameraScale: Union[float, bool]=0.0,\n centerOfInterest: Union[float, bool]=0.0, clippingPlanes: bool=True, depthOfField:\n bool=True, displayFieldChart: bool=True, displayFilmGate: bool=True,\n displayFilmOrigin: bool=True, displayFilmPivot: bool=True, displayGateMask:\n bool=True, displayResolution: bool=True, displaySafeAction: bool=True,\n displaySafeTitle: bool=True, fStop: Union[float, bool]=0.0, farClipPlane:\n Union[float, bool]=0.0, farFocusDistance: Union[float, bool]=0.0, filmFit:\n Union[AnyStr, bool]=\"\", filmFitOffset: Union[float, bool]=0.0, filmRollOrder:\n Union[AnyStr, bool]=\"\", filmRollValue: Union[float, bool]=0.0, filmTranslateH:\n Union[float, bool]=0.0, filmTranslateV: Union[float, bool]=0.0, focalLength:\n Union[float, bool]=0.0, focusDistance: Union[float, bool]=0.0, homeCommand:\n Union[AnyStr, bool]=\"\", horizontalFieldOfView: Union[float, bool]=0.0,\n horizontalFilmAperture: Union[float, bool]=0.0, horizontalFilmOffset: Union[float,\n bool]=0.0, horizontalPan: Union[float, bool]=0.0, horizontalRollPivot: Union[float,\n bool]=0.0, horizontalShake: Union[float, bool]=0.0, journalCommand: bool=True,\n lensSqueezeRatio: Union[float, bool]=0.0, lockTransform: bool=True, motionBlur:\n bool=True, name: Union[AnyStr, bool]=\"\", nearClipPlane: Union[float, bool]=0.0,\n nearFocusDistance: Union[float, bool]=0.0, orthographic: bool=True,\n orthographicWidth: Union[float, bool]=0.0, overscan: Union[float, bool]=0.0,\n panZoomEnabled: bool=True, position: Union[List[float, float, float], bool]=None,\n postScale: Union[float, bool]=0.0, preScale: Union[float, bool]=0.0, renderPanZoom:\n bool=True, rotation: Union[List[float, float, float], bool]=None, shakeEnabled:\n bool=True, shakeOverscan: Union[float, bool]=0.0, shakeOverscanEnabled: bool=True,\n shutterAngle: Union[float, bool]=0.0, startupCamera: bool=True,\n stereoHorizontalImageTranslate: Union[float, bool]=0.0,\n stereoHorizontalImageTranslateEnabled: bool=True, verticalFieldOfView: Union[float,\n bool]=0.0, verticalFilmAperture: Union[float, bool]=0.0, verticalFilmOffset:\n Union[float, bool]=0.0, verticalLock: bool=True, verticalPan: Union[float, bool]=0.0,\n verticalRollPivot: Union[float, bool]=0.0, verticalShake: Union[float, bool]=0.0,\n worldCenterOfInterest: Union[List[float, float, float], bool]=None, worldUp:\n Union[List[float, float, float], bool]=None, zoom: Union[float, bool]=0.0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def set_camera(self, width, height):\n rightt = self.mat[:3, 0]\n upt = self.mat[:3, 1]\n pt = self.mat[:3, 2] * self.d\n t = self.mat[:3, 3]\n\n self.proj_mat = create_perspective_projection_matrix(np.radians(self.fov), 1. * width / height, self.near, self.far)\n gr3.setcameraprojectionparameters(self.fov, self.near, self.far)\n self.lookat_mat = create_look_at_matrix(pt + t, t, upt)\n gr3.cameralookat(pt[0] + t[0], pt[1] + t[1], pt[2] + t[2], t[0], t[1], t[2], upt[0], upt[1], upt[2])", "def from_parameters(size, M1, d1, R1, P1, M2, d2, R2, P2, R, T, E, F, Q):\n return StereoCamera(\n PinholeCamera(size, M1, d1, R1, P1),\n PinholeCamera(size, M2, d2, R2, P2),\n R, T, E, F, Q)", "def set_camera_fov(args_, client_, new_fov):\n\n args_.camera_bp.set_attribute(\"fov\", \"%s\" % new_fov)\n args_.camera_depth_bp.set_attribute(\"fov\", \"%s\" % new_fov)\n\n # destroy the original actor and make a new camera object\n args_.rgb_camera.camera_actor.stop()\n args_.depth_camera.camera_actor.stop()\n commands_ = [\n # destroy the previous actor first\n carla.command.DestroyActor(args_.depth_camera.camera_actor.id),\n carla.command.DestroyActor(args_.rgb_camera.camera_actor.id),\n # spawn the new actor\n carla.command.SpawnActor(\n args_.camera_bp, carla.Transform(), args_.spectator),\n carla.command.SpawnActor(\n args_.camera_depth_bp, carla.Transform(), args_.spectator),\n ]\n response_ = client_.apply_batch_sync(commands_)\n camera_actor_ids_ = [r.actor_id for r in response_[-2:]]\n camera_, camera_depth_ = world.get_actors(\n camera_actor_ids_)\n\n args_.rgb_camera = Camera(camera_, width=args_.width,\n height=args_.height,\n fov=new_fov,\n camera_type=\"rgb\")\n\n args_.depth_camera = Camera(\n camera_depth_, camera_type=\"depth\")\n\n args_.prev_camera_fov = new_fov", "def build_camera(config,\n aspect_ratio):\n return camera.Camera(\n origin=vector.Point(*config.origin),\n view_direction=vector.Vector(*config.view_direction),\n view_up=vector.Vector(*config.view_up),\n vertical_field_of_view=config.vertical_field_of_view,\n aspect_ratio=aspect_ratio,\n )", "def create_scene(width, height, fovy, focal_length, n_samples):\n # Create a splats rendering scene\n scene = copy.deepcopy(SCENE_SPHERE_HALFBOX_0)\n\n # Define the camera parameters\n scene['camera']['viewport'] = [0, 0, width, height]\n scene['camera']['fovy'] = np.deg2rad(fovy)\n scene['camera']['focal_length'] = focal_length\n\n return scene", "def set_camera_pose(dim_height, dim_width, camera_pos, camera_deg):\n # Ryan: fixed DR\n if ADD_DOM_RAND:\n cp = [float(i) for i in camera_pos.split(\",\")]\n cd = [float(i) for i in camera_deg.split(\",\")]\n else:\n cp = [rn(0.,scale=EPS),rn(0.,scale=EPS),rn(0.,scale=EPS)]\n cd = [rn(0.,scale=EPS),rn(0.,scale=EPS),rn(0.,scale=EPS)]\n\n # Select the camera and make it the active object so that we can manipulate it\n bpy.data.objects['Camera'].select = True\n bpy.context.scene.objects.active = bpy.data.objects['Camera']\n\n # https://blender.stackexchange.com/questions/86233/blender-resizing-my-image-in-half\n bpy.data.scenes['Scene'].render.resolution_percentage = 100.0\n bpy.context.scene.render.resolution_x = dim_width\n bpy.context.scene.render.resolution_y = dim_height\n\n # Set the x, y and z location (Top-down view). Daniel: height was 1.5 but let's do 1.45.\n bpy.context.object.location[0] = 0.5 + cp[0]\n bpy.context.object.location[1] = 0.5 + cp[1]\n bpy.context.object.location[2] = 1.45 + cp[2]\n\n # Set the x, y and z rotation (Top-down view).\n bpy.context.object.rotation_euler[0] = DEG_TO_RAD * (0 + cd[0])\n bpy.context.object.rotation_euler[1] = DEG_TO_RAD * (0 + cd[1])\n bpy.context.object.rotation_euler[2] = DEG_TO_RAD * (0 + cd[2])", "def construct_by_ellipse(a_xx, h_xy, b_yy, g_x, f_y, d, focal_length):\n gamma = - focal_length\n a = gamma**2 * a_xx\n b = gamma**2 * b_yy\n c = d\n d = gamma**2 * d\n f = -gamma*(f_y)\n g = -gamma*(g_x)\n h = gamma**2 * h_xy\n #Not needed\n u = gamma**2 * g_x\n v = gamma**2 * f_y\n w = -gamma*(d)\n return ConeCamera(a, b, c, f, g, h)", "def _build(self,\n size=_DEFAULT_PITCH_SIZE,\n goal_size=None,\n top_camera_distance=_TOP_CAMERA_DISTANCE,\n field_box=False,\n name='pitch'):\n super(Pitch, self)._build(name=name)\n self._size = size\n self._goal_size = goal_size\n self._top_camera_distance = top_camera_distance\n\n self._top_camera = self._mjcf_root.worldbody.add(\n 'camera',\n name='top_down',\n pos=[0, 0, top_camera_distance],\n zaxis=[0, 0, 1],\n fovy=_top_down_cam_fovy(self._size, top_camera_distance))\n\n self._mjcf_root.visual.headlight.set_attributes(\n ambient=[.4, .4, .4], diffuse=[.8, .8, .8], specular=[.1, .1, .1])\n\n # Ensure close up geoms are rendered by egocentric cameras.\n self._mjcf_root.visual.map.znear = 0.0005\n\n # Build groundplane.\n if len(self._size) != 2:\n raise ValueError('`size` should be a sequence of length 2: got {!r}'\n .format(self._size))\n self._ground_texture = self._mjcf_root.asset.add(\n 'texture',\n type='2d',\n builtin='checker',\n name='groundplane',\n rgb1=[0.3, 0.8, 0.3],\n rgb2=[0.1, 0.6, 0.1],\n width=300,\n height=300,\n mark='edge',\n markrgb=[0.8, 0.8, 0.8])\n self._ground_material = self._mjcf_root.asset.add(\n 'material', name='groundplane', texture=self._ground_texture)\n self._ground_geom = self._mjcf_root.worldbody.add(\n 'geom',\n type='plane',\n material=self._ground_material,\n size=list(self._size) + [max(self._size) * _GROUND_GEOM_GRID_RATIO])\n\n # Build walls.\n self._walls = []\n for wall_pos, wall_xyaxes in _wall_pos_xyaxes(self._size):\n self._walls.append(\n self._mjcf_root.worldbody.add(\n 'geom',\n type='plane',\n rgba=[.1, .1, .1, .8],\n pos=wall_pos,\n size=[1e-7, 1e-7, 1e-7],\n xyaxes=wall_xyaxes))\n\n # Build goal position detectors.\n # If field_box is enabled, offset goal by 1.0 such that ball reaches the\n # goal position detector before bouncing off the field_box.\n self._fb_offset = 0.5 if field_box else 0.0\n goal_size = self._get_goal_size()\n self._home_goal = props.PositionDetector(\n pos=(-self._size[0] + goal_size[0] + self._fb_offset, 0,\n goal_size[2]),\n size=goal_size,\n rgba=(0, 0, 1, 0.5),\n visible=True,\n name='home_goal')\n self.attach(self._home_goal)\n\n self._away_goal = props.PositionDetector(\n pos=(self._size[0] - goal_size[0] - self._fb_offset, 0, goal_size[2]),\n size=goal_size,\n rgba=(1, 0, 0, 0.5),\n visible=True,\n name='away_goal')\n self.attach(self._away_goal)\n\n # Build inverted field position detectors.\n self._field = props.PositionDetector(\n pos=(0, 0),\n size=(self._size[0] - 2 * goal_size[0],\n self._size[1] - 2 * goal_size[0]),\n rgba=(1, 0, 0, 0.1),\n inverted=True,\n visible=True,\n name='field')\n self.attach(self._field)\n\n # Build field box.\n self._field_box = []\n if field_box:\n for wall_pos, wall_xyaxes in _wall_pos_xyaxes(\n (self._field.upper - self._field.lower) / 2.0):\n self._field_box.append(\n self._mjcf_root.worldbody.add(\n 'geom',\n type='plane',\n rgba=[.3, .3, .3, .3],\n pos=wall_pos,\n size=[1e-7, 1e-7, 1e-7],\n xyaxes=wall_xyaxes))", "def setFov(self,fov):\n self.light.node().getLens().setFov(fov)", "def create_cam_fov(self, name):\n\n # Vertices of FOV\n V = [\n (0, 0, -self.SAT_PROPS[\"Alt\"]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 0]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 1]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 2]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 3])\n ]\n\n # Faces of FOV\n F = [(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 1)]\n\n # Create building blocks of polydata\n cam = vtk.vtkPolyData()\n points = vtk.vtkPoints()\n polys = vtk.vtkCellArray()\n scalars = vtk.vtkFloatArray()\n\n # Load the point, cell and data attributes\n for i in range(5):\n points.InsertPoint(i, V[i])\n for i in range(4):\n polys.InsertNextCell( self.mkVtkIdList(F[i]))\n for i in range(5):\n scalars.InsertTuple1(i,i)\n\n # Assign the pieces to the vtkPolyData.\n cam.SetPoints(points)\n del points\n cam.SetPolys(polys)\n del polys\n cam.GetPointData().SetScalars(scalars)\n del scalars\n\n # Mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(cam)\n mapper.ScalarVisibilityOff()\n\n # Actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(0.5, 1, 0.5)\n actor.GetProperty().SetAmbient(0.5)\n actor.GetProperty().SetOpacity(0.1)\n\n return actor", "def perspectiveFovLH(field_of_view, aspect, znear, zfar):\n h = 1 / tan(field_of_view / 2)\n w = h / aspect\n m = [\n [w, 0, 0, 0],\n [0, h, 0, 0],\n [0, 0, zfar / (zfar - znear), 1],\n [0, 0, (znear * zfar) / (znear - zfar), 0],\n ]\n return Matrix(m)", "def test_register_fov(self):\n task = MesoscopeFOV(self.session_path, device_collection='raw_imaging_data', one=self.one)\n mlapdv = {'topLeft': [2317.2, -1599.8, -535.5], 'topRight': [2862.7, -1625.2, -748.7],\n 'bottomLeft': [2317.3, -2181.4, -466.3], 'bottomRight': [2862.7, -2206.9, -679.4],\n 'center': [2596.1, -1900.5, -588.6]}\n meta = {'FOV': [{'MLAPDV': mlapdv, 'nXnYnZ': [512, 512, 1], 'roiUUID': 0}]}\n with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest:\n task.register_fov(meta, 'estimate')\n calls = mock_rest.call_args_list\n self.assertEqual(3, len(calls))\n\n args, kwargs = calls[1]\n self.assertEqual(('fields-of-view', 'create'), args)\n expected = {'data': {'session': None, 'imaging_type': 'mesoscope', 'name': 'FOV_00', 'stack': None}}\n self.assertEqual(expected, kwargs)\n\n args, kwargs = calls[2]\n self.assertEqual(('fov-location', 'create'), args)\n expected = ['field_of_view', 'default_provenance', 'coordinate_system', 'n_xyz', 'provenance', 'x', 'y', 'z',\n 'brain_region']\n self.assertCountEqual(expected, kwargs.get('data', {}).keys())\n self.assertEqual(5, len(kwargs['data']['brain_region']))\n self.assertEqual([512, 512, 1], kwargs['data']['n_xyz'])\n self.assertIs(kwargs['data']['field_of_view'], mock_rest().get('id'))\n self.assertEqual('E', kwargs['data']['provenance'])\n self.assertEqual([2317.2, 2862.7, 2317.3, 2862.7], kwargs['data']['x'])\n\n # Check dry mode with suffix input = None\n for file in self.session_path.joinpath('alf', 'FOV_00').glob('mpciMeanImage.*'):\n file.replace(file.with_name(file.name.replace('_estimate', '')))\n self.one.mode = 'local'\n with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest:\n out = task.register_fov(meta, None)\n mock_rest.assert_not_called()\n self.assertEqual(1, len(out))\n self.assertEqual('FOV_00', out[0].get('name'))\n locations = out[0]['location']\n self.assertEqual(1, len(locations))\n self.assertEqual('L', locations[0].get('provenance', 'L'))", "def __init__(self, ox, oy, resolution, rr):\n\n self.min_x, self.min_y = None, None\n self.max_x, self.max_y = None, None\n self.x_width, self.y_width, self.obstacle_map = None, None, None\n self.resolution = resolution\n self.rr = rr\n self.calc_obstacle_map(ox, oy)\n self.motion = self.get_motion_model()", "def set_hfov_dimension(self, hfov, aspect_ratio, near, far):\n r = near * np.tan(hfov / 2)\n l = -r\n t = r / aspect_ratio\n b = -t\n self.__fdim = l, r, b, t, near, far\n return self", "def set_camera(di):\n di.cam_mode = di.FIXED\n di.cam_target.cart = vec3(0.1,-0.2,0)\n di.cam_eye.spheric = spheric3(5,0.6,-1.0)", "def fl_set_object_size(ptr_flobject, width, height):\n _fl_set_object_size = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_size\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), xfdata.FL_Coord,\n xfdata.FL_Coord], \\\n \"\"\"void fl_set_object_size(FL_OBJECT * obj, FL_Coord w,\n FL_Coord h)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_width = library.convert_to_FL_Coord(width)\n i_height = library.convert_to_FL_Coord(height)\n library.keep_elem_refs(ptr_flobject, width, i_width, height, i_height)\n _fl_set_object_size(ptr_flobject, i_width, i_height)", "def setup_camera(self) -> None:\n self.world.camera.update(\n cam_base_pos=(0, -3, 0),\n cam_dist=1.2*self.world.env_dim,\n cam_yaw=0,\n cam_pitch=-60\n )", "def set_projection_from_camera(K, width, height):\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n \n fx = K[0,0]\n fy = K[1,1]\n fovy = 2*np.arctan(0.5*height/fy)*180/np.pi\n aspect = (width*fy)/(height*fx)\n\n # define the near and far clipping planes\n near = 0.1\n far = 100.0\n\n # set perspective\n # Need to apt-get install freeglut3 and freeglut3-dev\n # https://github.com/thecountoftuscany/PyTeapot-Quaternion-Euler-cube-rotation/issues/1\n gluPerspective(fovy,aspect,near,far)\n glViewport(0,0,width,height)", "def project(self, win_width, win_height, fov, viewer_distance):\r\n factor = fov / (viewer_distance + self.z)\r\n x = self.x * factor + win_width / 2\r\n y = -self.y * factor + win_height / 2\r\n return Point3D(x, y, 1)", "def __init__(self, graphics, plane):\n # screen / plane\n self.graphics = graphics\n\n self.view = [graphics.screen_width, graphics.screen_height]\n self.plane = list(plane)\n self.recalculate()\n # camera\n self.anchor = None\n self.pan = False # smooth scroll\n self.find_time = 0.5 # seconds\n self.pan_speed = 50.0 # pos/sec\n self.last_time = time.time()", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)", "def setNearFar(self, near, far):\n self.light.node().getLens().setNearFar(near, far)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)" ]
[ "0.6712774", "0.6084861", "0.5635632", "0.55978215", "0.5591141", "0.5473505", "0.5404724", "0.54038227", "0.52609247", "0.52150947", "0.5176009", "0.51734596", "0.5155422", "0.5127603", "0.4930873", "0.49277773", "0.48962718", "0.4891529", "0.4888189", "0.4877433", "0.48719573", "0.48485836", "0.47847512", "0.4778747", "0.47785124", "0.47781646", "0.4772128", "0.4771063", "0.47616202", "0.47616202" ]
0.85450816
0
Gets called when shadow sources was updated
def onUpdated(self):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_source(self):\n pass", "def activate_source(self):\n pass", "def _update_modified_data_sources(self):\n new_last_imported = datetime.utcnow()\n self._update_modified_since(self.last_imported)\n self.last_imported = new_last_imported", "def handle_reload_toolbox(self):", "def update(src):", "def beforeUpdate(self):", "def onFrameUpdated(self):\n pass", "def set_resolved_sources(self, sources):\r\n self._resolved_sources = sources", "def on_refresh(self):\n pass", "def _update_modified_since(self, timestamp):\n for data_source in self.data_source_provider.get_data_sources_modified_since(timestamp):\n pillow_logging.info(f'updating modified registry data source: {data_source.domain}: {data_source._id}')\n self._add_or_update_data_source(data_source)", "def _admin_reload_media_sources(self):\n\t\tcur = self.app.blocking_db_con.cursor(cursor_factory=DictCursor)\n\t\tcur.execute(\"\"\"\n\t\t\tselect\n\t\t\t\tsource_name,\n\t\t\t\tsource_id\n\t\t\tfrom\n\t\t\t\tmedia_sources\n\t\t\t\"\"\", ())\n\t\trows = cur.fetchall()\n\t\tself.media_sources = {}\n\t\tfor r in rows:\n\t\t\tself.media_sources[r[0]] = r[1]\n\t\tcur.close()\n\t\tmsg = \"Loaded %d media_sources\" % len(self.media_sources.keys())\n\t\tself.log.debug(msg)\n\t\treturn msg", "def sources(self):\n raise NotImplementedError()", "def light_sync(self):", "def _update(self):\n pass", "def getChangeSources():", "def refresh_plugin(self):\n pass", "def _hook(self):", "def InjectSources(self):\n # This role is passed onto the injector\n self._injector.InjectSources(self)", "def on_load(self):\n pass", "def on_load(self):\n pass", "def update_source(self):\n if self.verbose:\n print(\"Updating source\")\n self.source.data = self.source_data\n if self.source.selected is not None:\n self.source.selected.indices = self.selection\n for c in self.callbacks[\"update_source\"]:\n c()\n self.pending_update = False\n if self.update_buffer is not None:\n self.context.doc.add_next_tick_callback(self.update_buffer)\n self.update_buffer = None", "def update(self):", "def update(self):", "def update(self):", "def update(self):\r\n pass", "def changed(self):\n\t\tpass", "def on_run(self):\r\n\r\n\t\tpass", "def on_load(self):", "def Sources():\n return _sources", "def update( ):\r\n pass" ]
[ "0.69026774", "0.59549236", "0.58609873", "0.5794801", "0.5756406", "0.57472444", "0.572068", "0.5648752", "0.5624924", "0.55939525", "0.5568056", "0.5460217", "0.5427892", "0.5391734", "0.53892064", "0.5381881", "0.5369996", "0.5367086", "0.5359593", "0.5359593", "0.5359411", "0.5344861", "0.5344861", "0.5344861", "0.5335635", "0.5333128", "0.53083783", "0.53059304", "0.53058743", "0.53057617" ]
0.604301
1
Initializes the public_id, secret_id and the redirect_uri so that init_user can be called anytime
def __init__(self) -> None: self._public_id = 'daf1fbca87e94c9db377c98570e32ece' self._secret_id = '1a674398d1bb44859ccaa4488df1aaa9' self._redirect_uri = 'https://pass-post.netlify.app'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init():\n create_user(app)\n get_all_user()", "def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))", "def initialize(self, *a, **kw):\n\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))", "def __init__(self, requestor, client_id, client_secret, redirect_uri=None):\n super(TrustedAuthenticator, self).__init__(requestor, client_id,\n redirect_uri)\n self.client_secret = client_secret", "def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.get_uid_from_cookie()\n self.user = uid and User.get_by_id(int(uid))", "def init_user(self) -> Any:\n return \\\n spotipy.Spotify(auth_manager=spotipy.oauth2.SpotifyOAuth(scope=\"playlist-modify-public\",\n client_id=self._public_id, client_secret=self._secret_id,\n redirect_uri=self._redirect_uri))", "def initialize(self):\n self.login()", "def __init__(self):\n self.application_id = None\n self.secret = None\n self.token = {}", "def user_init(self):\n pass", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "def __init__(self, requestor, client_id, redirect_uri=None):\n self._requestor = requestor\n self.client_id = client_id\n self.redirect_uri = redirect_uri", "def __init__(self,\n client_id,\n client_secret):\n self.__client_id = client_id\n self.__client_secret = client_secret", "def __init__(__self__, *,\n client_id: pulumi.Input[str],\n secret: pulumi.Input[str]):\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"secret\", secret)", "def __init__(self):\n self.sp, self.user = self.init_auth_client()\n self.logger = logging.getLogger(__name__)", "def __init__(self, oauth=None, client_id=None):\n\t\tself.oauth = oauth\n\t\tself.client_id = client_id or self.default_client_id", "def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret", "def __init__(self, auth_key, auth_secret):\n\n self._auth_key = auth_key\n self._auth_secret = auth_secret", "def __init__(self, callback_url):\n # Credientials\n self.URI_SCHEME = \"https\"\n self.API_ENDPOINT = \"rightsignature.com\"\n self.REQUEST_TOKEN_URL = \"/oauth/request_token\"\n self.ACCESS_TOKEN_URL = \"/oauth/access_token\"\n self.REDIRECT_URL = \"/oauth/authorize\"\n self.version = \"1.0\"\n self.signature_method = \"HMAC-SHA1\" # as I said\n self.BASE_URL = \"%s://%s\" % (self.URI_SCHEME, self.API_ENDPOINT)\n\n self.API_KEY = \"\"\n self.API_SECRET = \"\"\n self.CALLBACK_URL = callback_url\n self.request_token = None # that comes later\n self.access_token = None # that comes later and later\n\n self.request_token_secret = None\n self.access_token_secret = None\n\n self.verifier = None\n self.error = None\n\n self.request_oauth_nonce = None\n self.request_oauth_timestamp = None\n self.access_oauth_nonce = None\n self.access_oauth_timestamp = None\n self.request_oauth_error = None\n self.access_oauth_error = None", "def __init__(__self__, *,\n client_id: pulumi.Input[str],\n secret: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"client_id\", client_id)\n if secret is not None:\n pulumi.set(__self__, \"secret\", secret)", "def __init__(__self__, *,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n metadata_url: Optional[pulumi.Input[str]] = None,\n scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if client_id is not None:\n pulumi.set(__self__, \"client_id\", client_id)\n if client_secret is not None:\n pulumi.set(__self__, \"client_secret\", client_secret)\n if metadata_url is not None:\n pulumi.set(__self__, \"metadata_url\", metadata_url)\n if scopes is not None:\n pulumi.set(__self__, \"scopes\", scopes)", "def __init__(self):\n self.secret = None\n self.on_hook_init()", "def __init__(self, client_id=None, client_secret=None):\n self.client_id = client_id\n self.client_secret = client_secret\n self.access_token = None\n self.refresh_token = None\n self.token_expiration_time = None", "def __init__(self, client_id, client_secret):\n self.client_id = client_id\n self.client_secret = client_secret\n self.token = None\n self.request_time = None\n self._initialized = False", "def config(cls, clientId, clientSecret, redirectUri):\n\n # TODO: make redirectUri not mandatory.\n\n cls.clientId = clientId\n cls.clientSecret = clientSecret\n cls.redirectUri = redirectUri", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def __init__(self, host, access_key, secret_key):\n self._host = host\n self._access_key = access_key\n self._secret_key = secret_key", "def on_start(self):\n admin_user = os.environ['ADMIN_USER']\n admin_password = os.environ['ADMIN_PASSWORD']\n admin_domain_name = os.environ['ADMIN_DOMAIN_NAME']\n admin_project_id = os.environ['ADMIN_PROJECT_ID']\n HEADERS['X-Auth-Token'] = self._get_token(admin_user,\n admin_password,\n admin_domain_name,\n project_id=admin_project_id)\n # Create test user\n self.username = 'test_user'\n self.password = 'Password1'\n self.user_domain_id = 'default'\n self.user_domain_name = 'Default'\n self.project_id = self._create_project()['project']['id']\n self._create_user(self.username, self.password, self.user_domain_id,\n self.project_id)", "def __init__(self, user_id=None, access_token=None):\n default_attr = dict(user_id=str(),\n access_token=str())\n self.user_id = user_id\n self.access_token = access_token\n self._set_default_attr(default_attr)", "def __init__(self, access_key, secret_key, **kwargs):\r\n pass" ]
[ "0.6612536", "0.65369034", "0.6525771", "0.63945854", "0.6338878", "0.6335516", "0.62250316", "0.6218013", "0.61803603", "0.61792296", "0.6086227", "0.6085263", "0.597309", "0.5938068", "0.5929208", "0.59091866", "0.5891024", "0.58577573", "0.5855543", "0.58548415", "0.58487767", "0.5831338", "0.5816402", "0.57932323", "0.5792985", "0.5792985", "0.5792985", "0.5780873", "0.5779737", "0.57796705" ]
0.7228125
0
Initializes an instance of spotipy.Spotify that is logged in
def init_user(self) -> Any: return \ spotipy.Spotify(auth_manager=spotipy.oauth2.SpotifyOAuth(scope="playlist-modify-public", client_id=self._public_id, client_secret=self._secret_id, redirect_uri=self._redirect_uri))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, username):\n self.spotify = spotipy.Spotify(simple_auth_token(username))", "def auth(self):\n token = spotipy.util.prompt_for_user_token(self.username,\n self.scope,\n client_id = self.client_id,\n client_secret = self.client_secret,\n redirect_uri= self.redirect_uri)\n if token:\n self.spotify = spotipy.Spotify(auth=token)\n else:\n print(colored.stylize(\"\"\"\\n[*] \"\"\", colored.fg(\"light_red\")) + 'Cant get token for: %s\\n' % (self.username))\n exit()", "def init_auth_client(self):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n token = util.prompt_for_user_token(\n cfg['username'],\n scope=cfg['scope'],\n client_id=cfg['spotipy_client_id'],\n client_secret=cfg['spotipy_client_secret'],\n redirect_uri=cfg['spotipy_redirect_uri'])\n sp = spotipy.Spotify(auth=token)\n return sp, cfg['username']", "def _create_user_object(self) -> None:\n\n token = util.prompt_for_user_token(self._USERNAME, self.scope, self._CLIENT_ID, self._CLIENT_SECRET, self.redirect_uri)\n self.spotipyObject = spotipy.Spotify(auth=token)", "def authenticate_spotify_api(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET):\r\n auth_manager = SpotifyClientCredentials(client_id = SPOTIPY_CLIENT_ID, \r\n client_secret=SPOTIPY_CLIENT_SECRET)\r\n \r\n return spotipy.Spotify(auth_manager=auth_manager)", "def Connect(self,scope):\n\n \"\"\"\n Calling util.prompt_for_user_token will open Spotify’s application authorization\n page in your browser (and require you to log in if you are not already logged in\n to spotify.com), unless a locally cached access token exist from a previous authorization/authentication.\n \"\"\"\n try:\n token = util.prompt_for_user_token(\n self.username,\n scope,\n self.client_id,\n self.secret_id,\n self.redirect_uri)\n except ImportError:\n self._isConnected = False\n print(\" onnecting to Spotify failed\") \n\n\n if token:\n sp = spotipy.Spotify(auth=token)\n self._isConnected = True\n return sp\n else:\n print(\"Can't get token for\", self.username)\n self._isConnected = False", "def __init__(\n self,\n clientID,\n secretID,\n redirctURI,\n username\n ):\n\n print('SpotifClient starts...')\n \n self.client_id = clientID\n self.secret_id = secretID\n self.redirect_uri = redirctURI\n self.username = username\n self._isConnected = False\n\n #self.Connect()", "async def initialize(self):\r\n self.access_token = await async_get_value(SPOTIFY_ACCESS_TOKEN)\r\n self.refresh_token = await async_get_value(SPOTIFY_REFRESH_TOKEN)\r\n self.should_poll = await async_get_value(SPOTIFY_SHOULD_POLL)\r\n request_code = self.get_currently_playing().status_code\r\n if request_code == requests.codes.ok or request_code == requests.codes.no_content:\r\n self.start_polling_and_refresh()\r\n return\r\n\r\n # Go through the oauth flow.\r\n self.auth_thread = StoppableThread(target=self.check_and_test_auth)\r\n self.auth_thread.start()\r\n return", "def get_spotify_token(self):\n scope = \"playlist-modify-public playlist-modify-private user-read-email user-library-modify playlist-read-private\"\n token = spotipy.util.prompt_for_user_token(\n username=self.username,\n scope=scope,\n client_id=secrets.client_id,\n client_secret=secrets.client_secret,\n redirect_uri=secrets.redirect_uri\n )\n sp = spotipy.Spotify(auth=token)\n return sp", "def __init__(self):\n self.sp, self.user = self.init_auth_client()\n self.logger = logging.getLogger(__name__)", "def initialize(self):\n self.login()", "def __init__(self):\r\n # create a session id\r\n self.session = ViSession()", "def authenticate(redirect_uri, client_cred_manager, username, scope,client_id,client_secret):\r\n\r\n sp = spotipy.Spotify(client_credentials_manager = client_cred_manager)\r\n token = util.prompt_for_user_token(username, scope, client_id, client_secret, redirect_uri)\r\n if token:\r\n sp = spotipy.Spotify(auth=token)\r\n else:\r\n print(\"Can't get token for\", username)\r\n return sp", "def __init__(self, ctx):\n # Debug log\n self.log = logging.getLogger('ipsv.login')\n\n self.cookiejar = cookiejar()\n self.cookies = {cookie.name: cookie.value for cookie in self.cookiejar}\n\n self.browser = Browser()\n self.browser.set_cookiejar(self.cookiejar)", "def __init__(self, kodi_helper, netflix_session):\n self.kodi_helper = kodi_helper\n self.netflix_session = netflix_session\n self.credentials = self.kodi_helper.get_credentials()\n self.profiles = []\n self.video_list_cache = {}\n self.prefetch_login()", "def __init__(self):\n self.token = None\n self.login()", "def __init__(self, username, password=False):\n self.username = username\n self.cookies, self.token = get_cookies_and_token()\n self._login(password)", "def init_session(self):\n pass", "def init_session(self):\n pass", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "def __init__(self, console, numeric=False):\n\n self.spotify = Spotify()\n self.console = console\n self.watson = WatsonIntegrator(self.console)\n self.running = True\n self.numeric = numeric #Boolean stating whether the user is supposed to answer the requests with options via numbers\n\n self.functionCodes = ['login', 'NewQueue', 'AddArtists', 'AddFromPlaylist', 'ShowUpcoming', 'Shuffle']\n # self.listIDs = {'mood:party': '2zJS01uA6baDkuyd3bpD8J', 'mood:motivation': '09S8u5CfsqNykVe4PS7y5x', 'mood:chill': '2gSm5ak3xfip096FV2MutF',\n # 'top:dea': '2pnMZd3r7IrqQVRBxe9CCj', 'top:ignacio': '1J7sfsybA99F8w2UOpQJlM', 'top:alejandro': '5iN04uNssYaPDtgrHCaYUY',\n # 'top:steffen': '1M4nNxSs4748wpBiufTan8', 'playlist:paolo': '1YGHkKQfOpEQHIO06j71Dy',\n # 'playlist:alvaro': '1FTlyHI9BQfiPgINi1zR7a', 'playlist:professor': '78sVdD9qWLWGwJZnioJ6xX'}\n self.listIDs = {'playlist:Alejandro:Christmas': '1hwDrMP1y3fn6QgDUmFysl', \"playlist:Paolo's Playlists\": '3Oev8yETOHlczbqhmURedk',\n 'artist:Mariah Carey': '5VfX5baCsv3QV5y3Z9W2s9', 'playlist:Dea:Traffic': '2pnMZd3r7IrqQVRBxe9CCj',\n \"playlist:Alvaro's Top\": '7lfLPKDPICPC3kffI2A69B'}", "def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.get_uid_from_cookie()\n self.user = uid and User.get_by_id(int(uid))", "def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))", "def create_token():\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\", scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"], client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")", "def __init__(self):\n self.auth()", "def get_spotify_instance(user_id, scope=None, client_id=None, client_secret=None, redirect_uri=None):\n\n token = util.prompt_for_user_token(\n user_id,\n scope=scope,\n client_id=client_id,\n client_secret=client_secret,\n redirect_uri=redirect_uri)\n\n return spotipy.Spotify(auth=token)", "def __init__(self):\n\n # TODO: Add login and data grab logic", "def initialize(self, *a, **kw):\n\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))", "def get_spotify(s_creds, usernum):\n # Authorize Spotify\n\n token = spotipy.util.prompt_for_user_token(\n s_creds[\"usernames\"][usernum],\n s_creds[\"scopes\"],\n s_creds[\"client_id\"],\n s_creds[\"client_secret\"],\n s_creds[\"redirect_uri\"],\n )\n\n return spotipy.Spotify(auth=token)", "def async_setup(hass, config):\n import spotipy.oauth2\n import json\n global AIS_SPOTIFY_TOKEN\n\n try:\n ws_resp = aisCloud.key(\"spotify_oauth\")\n json_ws_resp = ws_resp.json()\n spotify_redirect_url = json_ws_resp[\"SPOTIFY_REDIRECT_URL\"]\n spotify_client_id = json_ws_resp[\"SPOTIFY_CLIENT_ID\"]\n spotify_client_secret = json_ws_resp[\"SPOTIFY_CLIENT_SECRET\"]\n spotify_scope = json_ws_resp[\"SPOTIFY_SCOPE\"]\n try:\n ws_resp = aisCloud.key(\"spotify_token\")\n key = ws_resp.json()[\"key\"]\n AIS_SPOTIFY_TOKEN = json.loads(key)\n except:\n AIS_SPOTIFY_TOKEN = None\n _LOGGER.info(\"No AIS_SPOTIFY_TOKEN\")\n except Exception as e:\n _LOGGER.error(\"No spotify oauth info: \" + str(e))\n return False\n\n cache = hass.config.path(DEFAULT_CACHE_PATH)\n gate_id = ais_global.get_sercure_android_id_dom()\n oauth = spotipy.oauth2.SpotifyOAuth(spotify_client_id, spotify_client_secret, spotify_redirect_url,\n scope=spotify_scope, cache_path=cache, state=gate_id)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token in cache;\")\n if AIS_SPOTIFY_TOKEN is not None:\n with open(cache, 'w') as outfile:\n json.dump(AIS_SPOTIFY_TOKEN, outfile)\n token_info = oauth.get_cached_token()\n if not token_info:\n _LOGGER.info(\"no spotify token; run configurator\")\n async_request_configuration(hass, config, oauth)\n return True\n\n if hass.data.get(DOMAIN):\n configurator = hass.components.configurator\n configurator.request_done(hass.data.get(DOMAIN))\n del hass.data[DOMAIN]\n\n # register services\n data = hass.data[DOMAIN] = SpotifyData(hass, oauth)\n\n # service = configured_service(hass)\n\n @asyncio.coroutine\n def search(call):\n _LOGGER.info(\"search \" + str(call))\n yield from data.process_search_async(call)\n\n def select_track_name(call):\n _LOGGER.info(\"select_track_name\")\n data.process_select_track_name(call)\n\n def change_serive(call):\n _LOGGER.info(\"change_serive\")\n data.change_serive(call)\n\n hass.services.async_register(DOMAIN, 'search', search)\n hass.services.async_register(DOMAIN, 'select_track_name', select_track_name)\n hass.services.async_register(DOMAIN, 'change_serive', change_serive)\n\n return True" ]
[ "0.8046805", "0.7582148", "0.74957615", "0.7105578", "0.6914381", "0.6837434", "0.6777287", "0.6466859", "0.6430304", "0.6380559", "0.62434363", "0.6243389", "0.61116755", "0.61068124", "0.60647935", "0.5974983", "0.5959601", "0.59238493", "0.59238493", "0.5872101", "0.58713", "0.58219856", "0.5813253", "0.5812896", "0.58079046", "0.5801208", "0.57868564", "0.5780477", "0.57715684", "0.57481474" ]
0.7950831
1
Return the audio features of a song
def get_song_features(self, song_id: str) -> List[float]: user = self.init_user() user.trace = True features = user.audio_features(song_id)[0] return [features['acousticness'], features['danceability'], features['energy'], features['duration_ms'], features['instrumentalness'], features['valence'], features['tempo'], features['liveness'], features['loudness'], features['speechiness'], features['key']]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_spotify_features(search):\n\t\n\t# Configure API credentials\n\tclient_credentials_manager = SpotifyClientCredentials(client_id=config.SPOTIFY_CID, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclient_secret=config.SPOTIFY_SECRET)\n\tsp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)\n\t\n\t# Find song ID\n\tquery = sp.search(search)\n\tsong_id = query['tracks']['items'][0]['id']\n\n\t# Use song ID to pull metadata\n\taudio_feature = sp.audio_features(song_id)[0]\n\t\n\treturn audio_feature", "def songfeature_get(): # noqa: E501\n query = 'SELECT * FROM SongFeatures'\n results = query_to_dict(query)\n features_list = []\n for r in results:\n features_list.append(\n Songfeature(acousticness= r['Acousticness'],\n danceability= r['Danceability'],\n duration_ms= r['Duration_ms'],\n energy= r['Energy'],\n instrumentalness= r['Instrumentalness'],\n musicalkey= r['MusicalKey'],\n liveness= r['Liveness'],\n loudness= r['Loudness'],\n mode= r['Mode'],\n speechiness= r['Speechiness'],\n tempo= r['Tempo'],\n timesignature= r['Time_signature'],\n valence= r['Valence'],\n songid= r['SongID']))\n return features_list", "def get_audio_features_of_tracks(self, playlist_items: List[Dict]):\n audio_features_vectors = []\n for track_object in playlist_items:\n track_id = _get_id(track_object)\n track_features = self.spotify_client.get_audio_features(track_id)\n audio_features_vectors.append(list(track_features.values()))\n return np.array([vec for vec in audio_features_vectors])", "def get_song_features(tid):\n\n # dictionary of features to return\n spotify_track_data = SpotifyData[tid]\n\n features = {}\n features['name'] = spotify_track_data.name\n features['artists'] = spotify_track_data.artists\n features['popularity'] = spotify_track_data.popularity\n features['album'] = spotify_track_data.album_name\n features['danceability'] = spotify_track_data.danceability\n features['energy'] = spotify_track_data.energy\n features['key'] = spotify_track_data.key\n features['loudness'] = spotify_track_data.loudness\n features['mode'] = spotify_track_data.mode\n features['speechiness'] = spotify_track_data.speechiness\n features['acousticness'] = spotify_track_data.acousticness\n features['instrumentalness'] = spotify_track_data.instrumentalness\n features['liveness'] = spotify_track_data.liveness\n features['valence'] = spotify_track_data.valence\n features['tempo'] = spotify_track_data.tempo\n features['duration_ms'] = spotify_track_data.duration_ms\n features['time_signature'] = spotify_track_data.time_signature\n\n return features", "def get_features(file, song_index=0):\n\n chroma = get_chroma(file, song_index)\n timbre = get_timbre(file, song_index)\n max_loudness = get_max_loudness(file, song_index)\n\n # normalize to get ~ 0-1\n timbre = (timbre + 1000) / 1200\n max_loudness = (max_loudness + 70) / 70\n max_loudness = max_loudness.reshape(-1, 1)\n features = np.hstack([timbre, chroma, max_loudness])\n return features", "def _get_audio_features(self, sp, trackids):\n\n cols = ['acousticness', 'danceability', 'duration_ms', 'energy',\n 'instrumentalness', 'key', 'liveness', 'loudness', 'mode',\n 'speechiness', 'tempo', 'time_signature', 'valence', 'id']\n\n total_track = len(trackids)\n features = []\n start = 0\n while len(features) < total_track:\n end = start + 100 if start + 100 < total_track else total_track\n\n features += sp.audio_features(tracks=trackids[start: end])\n start = start + 100\n\n return pd.DataFrame.from_records(features, columns=cols)", "def get_features(filename, training=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n wav, _ = librosa.load(filename, \n sr=SAMPLE_RATE, \n mono=True,\n dtype=np.float64)\n energy = np.abs(wav)\n silence_threshold = np.percentile(energy, 95)\n offsets = np.where(energy > silence_threshold)[0]\n if training:\n audio_voice_only = wav[offsets[0]:offsets[-1]]\n else:\n #avoid cutting off too abruptly\n audio_voice_only = wav[offsets[0]:offsets[-1] + 4800]\n if training:\n if len(audio_voice_only) >= 160 * NUM_FRAMES:\n start_ = np.random.randint(len(audio_voice_only) - 160 * NUM_FRAMES + 1)\n end_ = start_ + 160 * NUM_FRAMES - 1\n audio_voice_only = audio_voice_only[start_:end_]\n else:\n return [0], [0]\n wav = librosa.util.normalize(audio_voice_only)\n #deep speaker uses preemphasis here, I do not, because I want the model to correctly transform lower\n #frequencies, too. I apply preemphasis to spectrum before putting data into model embedder instead.\n wav = lfilter([1., -PREEMPH], [1.], wav)[1:]\n #f0 extraction (most time consuming operation in this function)\n f0, timeaxis = pyworld.harvest(wav, SAMPLE_RATE, frame_period=FRAME_PERIOD, f0_floor=71.0, f0_ceil=800.0)\n sp = pyworld.cheaptrick(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n ap = pyworld.d4c(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n mfe = sp2mfe(sp)\n lmfe = np.log(mfe)\n mean = np.mean(lmfe)\n std = np.std(lmfe)\n nmfe = (lmfe - mean) / std\n \n if training:\n return nmfe.T, f0\n else:\n out_len = len(f0) // 4 * 4\n# out_len = len(f0)\n return nmfe[:out_len].T, mean, std, sp[:out_len], f0[:out_len], ap[:out_len]", "def songfeature_songid_get(songid): # noqa: E501\n query = \"SELECT * FROM SongFeatures WHERE SongID = '{}'\".format(songid)\n results = query_to_dict(query)\n features_list = []\n for r in results:\n features_list.append(\n Songfeature(acousticness= r['Acousticness'],\n danceability= r['Danceability'],\n duration_ms= r['Duration_ms'],\n energy= r['Energy'],\n instrumentalness= r['Instrumentalness'],\n musicalkey= r['MusicalKey'],\n liveness= r['Liveness'],\n loudness= r['Loudness'],\n mode= r['Mode'],\n speechiness= r['Speechiness'],\n tempo= r['Tempo'],\n timesignature= r['Time_signature'],\n valence= r['Valence'],\n songid= r['SongID']))\n return features_list", "def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0", "def extract_audio_features(self, _dir_='/Volumes/TOSHIBA EXT/audio_samples10M', load=False):\n\t\tself.movie_aud_features = extract_Afeatures(_dir_=_dir_)\n\t\treturn self.movie_aud_features", "def extract_sound_features(metadata, audio_dataset_path):\n\n import vggish_input\n import vggish_params\n import vggish_slim\n\n with tf.Graph().as_default(), tf.Session() as sess:\n # load pre-trained vggish model\n vggish_slim.define_vggish_slim()\n vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)\n features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)\n embedding_tensor = sess.graph.get_tensor_by_name(\n vggish_params.OUTPUT_TENSOR_NAME\n )\n \n sound_features = []\n # loop through the dataset using information from the metadata file\n for index_num, row in tqdm(metadata.iterrows()):\n # get the file path \n file_name = os.path.join(os.path.abspath(audio_dataset_path),str(row['file_path']))\n \n # extract basic sound data\n audio, sample_rate = librosa.load(file_name, sr=SR, mono=True, offset=0.0, duration=None)\n\n # extract vgg features\n yt, index = librosa.effects.trim(audio, frame_length=FRAME_LEN, hop_length=HOP)\n input_batch = vggish_input.waveform_to_examples(yt, SR_VGG) \n [features_vgg] = sess.run(\n [embedding_tensor], feed_dict={features_tensor: input_batch}\n )\n features_vgg = sta_fun_2(features_vgg)\n features_vgg = features_vgg.reshape(features_vgg.shape[-1],)\n\n # extract hc features\n audio, sample_rate = librosa.load(file_name, res_type='kaiser_fast')\n features_hc = extract_features_hc(audio, sample_rate)\n\n # concat features\n features = np.concatenate((features_hc, features_vgg), axis=0)\n sound_features.append(features)\n\n return sound_features", "def supported_features(self):\n return (\n mp.const.MediaPlayerEntityFeature.VOLUME_SET\n | mp.const.MediaPlayerEntityFeature.VOLUME_STEP\n | mp.const.MediaPlayerEntityFeature.PLAY\n | mp.const.MediaPlayerEntityFeature.PAUSE\n | mp.const.MediaPlayerEntityFeature.TURN_OFF\n | mp.const.MediaPlayerEntityFeature.TURN_ON\n )", "def supported_features(self):\n return (\n mp.const.MediaPlayerEntityFeature.VOLUME_SET\n | mp.const.MediaPlayerEntityFeature.VOLUME_STEP\n | mp.const.MediaPlayerEntityFeature.PLAY\n | mp.const.MediaPlayerEntityFeature.PAUSE\n | mp.const.MediaPlayerEntityFeature.TURN_OFF\n | mp.const.MediaPlayerEntityFeature.TURN_ON\n )", "def get_artist_audio_features(q, interactive = False, genre_delimiter = '-!!-', to_file = '', client = None):\n query = client.search(q = q, type = \"artist\")\n items = query['artists']['items']\n\n if not items:\n raise Exception(\"No artists found\")\n\n if interactive:\n print(\"Select the artist to use...\")\n print(\"\\n\".join(\"[{}]: {}\".format(ii, entry['name']) for ii, entry in enumerate(items)))\n artist_indx = int(input(\"artist number: \").strip())\n if artist_indx > len(items):\n raise IndexError(\"Selected number higher than options available\")\n artist = items[artist_indx]\n else:\n artist = items[0]\n\n # get artist genres\n artist_genres = genre_delimiter.join(artist['genres']) if genre_delimiter else None\n\n # get artist albums\n albums = get_artist_albums(artist['id'])\n albums['artist_genres'] = artist_genres\n\n # get album popularity\n album_popularity = get_album_popularity(albums.id)\n\n # get album tracks\n tracks = get_album_tracks(albums.id)\n\n # get track audio features\n features = get_track_features(tracks.id)\n\n # get track popularity\n popularity = get_track_popularity(tracks.id)\n\n album_data = albums.merge(album_popularity, 'left', 'id')\n\n track_data = tracks \\\n .drop(columns = ['type']) \\\n .merge(popularity, 'left', 'id') \\\n .merge(features.drop(columns = ['uri', 'type', 'duration_ms']), 'left', 'id')\n\n\n merged = prefix_merge(album_data, track_data, ['album_', 'track_'], how = 'left', on = 'album_id')\n\n if to_file:\n merged.to_csv(to_file)\n\n return merged", "def get_audio_features(uri):\n try:\n uri = str(uri)\n res = re.findall(r':(?: *([\\w.-]+):)', uri)\n str_res = ' '.join([str(word) for word in res])\n\n if str_res in ['playlist', 'userplaylist']:\n # from the playlist get URIs for each artist\n artist_uris_total = get_artists_from(uri)\n # from artist uris get a list of album uris\n albums_uris_total = []\n for artist_uri in artist_uris_total:\n album_uris = get_albums_from(artist_uri)\n albums_uris_total.extend(album_uris)\n # from a list of albums get tracks\n track_uris_total = []\n for albums_uri in albums_uris_total:\n tracks_uris = get_tracks_from(albums_uri)\n track_uris_total.extend(tracks_uris)\n print(track_uris_total)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n elif str_res == 'artist':\n albums_uris_total = get_albums_from(uri)\n track_uris_total = []\n for albums_uri in albums_uris_total:\n tracks_uris = get_tracks_from(albums_uri)\n track_uris_total.extend(tracks_uris)\n print(track_uris_total)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n elif str_res == 'album':\n track_uris_total = get_tracks_from(uri)\n print(track_uris_total)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n elif str_res == 'track':\n features_to_db(uri)\n\n except Exception as e:\n print(\"Error processing {}: {}\".format(uri, e))\n raise e\n\n else:\n DB.session.commit()", "def make_returnn_audio_features_func():\n return _extract", "def features_from_label(audio_file, segment):\n duration = segment['end'] - segment['start']\n audio, sample_rate = librosa.core.load(\n audio_file,\n duration=duration,\n offset=segment['start']\n )\n features = fe.get_features(audio, sample_rate)\n return features", "def extract_features(wavfile, feature, sampling_rate=16000):\n\n raw_signal, sr = librosa.core.load(wavfile,\n sampling_rate,\n mono=True,\n dtype='float'\n )\n\n\n if feature == 'MFCC':\n feat_seq = librosa.feature.mfcc(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mfcc=13,\n fmin=75,\n fmax=5999\n )\n # Numerical Stability\n #feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n\n elif feature == 'FBANK':\n feat_seq = librosa.feature.melspectrogram(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mels=13,\n fmin=75,\n fmax=5999\n )\n\n # Numerical Stability\n feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n # 20 * log | convert to Me-Scale\n feat_seq = 20*np.log10(feat_seq)\n\n # z-norm: feature normalization\n feat_norm = preprocessing.scale(feat_seq, axis=1)\n\n return feat_norm", "def read_process_song(path, window=1, overlap=0, debug=True):\n\n arr_features = []\n\n signal, sr = librosa.load(path)\n signal = signal[:660000]\n\n # Debug process\n if debug:\n print(\"Reading file: {}\".format(path))\n\n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n arr_features.append(features)\n return arr_features", "def collect_features(self, wav_path, label_path):\n n_fft = 512\n window_length = 20\n\n sound, fs = librosa.core.load(wav_path, sr=16000)\n\n if fs != 16000:\n print(wav_path)\n\n # Preemphasis\n preemp_sound = np.append(sound[0], sound[1:] - 0.97 * sound[:-1])\n\n # STFT\n spect = librosa.core.stft(preemp_sound,\n n_fft=n_fft,\n win_length=window_length * int(fs / 1000),\n hop_length=window_length * int(fs / 2000),\n window=scipy.signal.hamming,\n center=True)\n\n spect = np.log10(np.transpose(abs(spect[:, 1:]) ** 2) + 1e-16)\n\n return spect", "def audio_features(self, track=None, tracks=None, with_cache=True, **kwargs):\n if track:\n _id = self._get_track_id(track)\n # pylint: disable=no-member\n return self._get(API.AUDIO_FEATURES_SINGLE.value.format(id=_id), **kwargs)\n\n tracks = list(map(self._get_track_id, tracks or []))\n cached_tracks = []\n if with_cache:\n with db_session:\n cached_tracks = select(a for a in AudioFeatures if a.id in tracks)[:]\n tracks = list(set(tracks) - {a.id for a in cached_tracks})\n batches = [tracks[i : i + 100] for i in range(0, len(tracks), 100)]\n audio_features = [\n self._get(API.AUDIO_FEATURES_MULTIPLE.value, ids=\",\".join(t), **kwargs)\n for t in batches\n ]\n with db_session:\n audio_features = [\n AudioFeatures.from_dict(t) for t in chain.from_iterable(audio_features)\n ] + cached_tracks\n return audio_features", "def test_audio_features(self):\n\n # 1ehPJRt49h6N0LoryqKZXq, 8737: How Far I'll Go (Alessia Cara Version) by Alessia Cara\n # 2fGFaTDbE8aS4f31fM0XE4, 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n targets = {8737: {'danceability': 0.317,\n 'energy': 0.562,\n 'key': 9,\n 'loudness': -9.609,\n 'mode': 1,\n 'speechiness': 0.395,\n 'acousticness': 0.124,\n 'instrumentalness': 0.000144,\n 'liveness': 0.0667,\n 'valence': 0.127,\n 'tempo': 181.100,\n 'duration_ms': 175507,\n 'time_signature': 4,\n },\n 5037: {'danceability': 0.756,\n 'energy': 0.658,\n 'key': 11,\n 'loudness': -6.128,\n 'mode': 0,\n 'speechiness': 0.202,\n 'acousticness': 0.0581,\n 'instrumentalness': 0,\n 'liveness': 0.0674,\n 'valence': 0.640,\n 'tempo': 120.018,\n 'duration_ms': 247829,\n 'time_signature': 4,\n },\n }\n\n results = {track.i_id: track for track in self.tracks if track.i_id in targets}\n\n for target, expecteds in targets.iteritems():\n result = results[target]\n for key, expected in expecteds.iteritems():\n self.assertEqual(result.__getattr__(key), expected)", "def track_features(tracks, authorizer, verbose=False):\n spotify_endpoint = 'https://api.spotify.com/v1/audio-features'\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n\n remainder = len(tracks)\n offset = 0\n stride = 100\n features = []\n while remainder > 0:\n params = {'ids': ','.join(tracks[offset:offset + stride])} # spotify can only process 100 tracks at a time\n\n response = requests.get(spotify_endpoint, params=params, headers=headers)\n\n if response.status_code == 200:\n features += response.json()['audio_features']\n offset += stride\n remainder -= stride\n elif response.status_code == 429:\n limit = int(response.headers['Retry-After'])\n print('Hit rate limit, waiting for {} seconds to continue'.format(limit))\n time.sleep(limit)\n elif response.status_code == 401:\n print('Access token expired, refreshing...')\n authorizer.refresh()\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n else:\n print('Error %d' % response.status_code)\n if verbose:\n print(json.loads(response.text))\n return None\n\n return zip(tracks, features)", "def forward(self, audio, feat_kinds=['sp','mcc','f0','ap','en']):\n device = audio.device\n audio = audio.detach().cpu().numpy()\n feat = dict()\n for feat_kind in feat_kinds:\n feat[feat_kind] = list()\n\n for x in audio:\n # Preprocess\n x = x * MAX_WAV_VALUE\n x = self.low_cut_filter(x, cutoff=self.cutoff_freq)\n # Extract f0\n f0, time_axis = pyworld.harvest(x, self.fs, f0_floor=self.minf0, f0_ceil=self.maxf0, frame_period=self.shiftms)\n\n # Extract sp \n sp = pyworld.cheaptrick(x, f0, time_axis, self.fs, fft_size=self.fft_size)\n if 'sp' in feat_kinds:\n feat['sp'].append(torch.from_numpy(sp).float().t())\n\n # Extract ap\n if 'ap' in feat_kinds:\n ap = pyworld.d4c(x, f0, time_axis, self.fs, fft_size=self.fft_size)\n feat['ap'].append(torch.from_numpy(ap).float().t())\n\n # Extract mcc\n if 'mcc' in feat_kinds:\n mcc = pysptk.sp2mc(sp, self.mcc_dim, self.mcc_alpha)\n feat['mcc'].append(torch.from_numpy(mcc).float().t())\n\n # Extract energy\n if 'en' in feat_kinds:\n mcc = pysptk.sp2mc(sp, self.mcc_dim, self.mcc_alpha)\n en = pysptk.mc2e(mcc, alpha=self.mcc_alpha, irlen=256)\n # en = np.clip(en, 1e-10, None)\n feat['en'].append(torch.from_numpy(en).float().view(-1)) \n\n # Fix f0\n if 'f0' in feat_kinds:\n f0[f0 < 0] = 0\n feat['f0'].append(torch.from_numpy(f0).float().view(-1))\n\n for key, val_list in feat.items():\n feat[key] = torch.cat([val.unsqueeze(0) for val in val_list],dim=0).to(device)\n\n return feat", "def get_audio_features(sample, audio_data, max_len, data_truncating, data_filling, audio_cfg):\n if len(audio_data) > max_len:\n if data_truncating == \"fusion\":\n # fusion\n mel = get_mel(audio_data, audio_cfg)\n # split to three parts\n chunk_frames = max_len // audio_cfg['hop_size']+1 # the +1 related to how the spectrogram is computed\n total_frames = mel.shape[0]\n if chunk_frames == total_frames:\n # there is a corner case where the audio length is\n # larger than max_len but smaller than max_len+hop_size.\n # In this case, we just use the whole audio.\n mel_fusion = np.stack([mel, mel, mel, mel], axis=0)\n longer = [[False]]\n else:\n ranges = np.array_split(list(range(0, total_frames-chunk_frames+1)), 3)\n # print('total_frames-chunk_frames:', total_frames-chunk_frames,\n # 'len(audio_data):', len(audio_data),\n # 'chunk_frames:', chunk_frames,\n # 'total_frames:', total_frames)\n if len(ranges[1]) == 0:\n # if the audio is too short, we just use the first chunk\n ranges[1] = [0]\n if len(ranges[2]) == 0:\n # if the audio is too short, we just use the first chunk\n ranges[2] = [0]\n # randomly choose index for each part\n idx_front = np.random.choice(ranges[0])\n idx_middle = np.random.choice(ranges[1])\n idx_back = np.random.choice(ranges[2])\n # select mel\n mel_chunk_front = mel[idx_front:idx_front+chunk_frames, :]\n mel_chunk_middle = mel[idx_middle:idx_middle+chunk_frames, :]\n mel_chunk_back = mel[idx_back:idx_back+chunk_frames, :]\n\n # shrink the mel\n # Output may differ between torchvision.transforms.Resize and numpy.resize.\n #mel_shrink_torch = torch.from_numpy(mel[None])\n #mel_shrink_torch = torchvision.transforms.Resize(size=[chunk_frames, 64])(mel_shrink_torch)[0]\n #mel_shrink_torch = mel_shrink_torch.to('cpu').detach().numpy().copy()\n mel_shrink_numpy = np.resize(mel[None], (chunk_frames, 64))\n # logging.info(f\"mel_shrink.shape: {mel_shrink.shape}\")\n\n # stack\n mel_fusion = np.stack([mel_chunk_front, mel_chunk_middle, mel_chunk_back, mel_shrink_numpy], axis=0)\n longer = [[True]]\n # random crop to max_len (for compatibility)\n overflow = len(audio_data) - max_len\n idx = np.random.randint(0, overflow + 1)\n audio_data = audio_data[idx: idx + max_len]\n\n else: # padding if too short\n if len(audio_data) < max_len: # do nothing if equal\n if data_filling == \"repeatpad\":\n n_repeat = int(max_len/len(audio_data))\n audio_data = np.tile(audio_data, n_repeat)\n # audio_data = audio_data.unsqueeze(0).unsqueeze(0).unsqueeze(0)\n # audio_data = F.interpolate(audio_data,size=max_len,mode=\"bicubic\")[0,0,0]\n audio_data = np.pad(audio_data, [(0, max_len - len(audio_data))], \"constant\")\n elif data_filling == \"pad\":\n audio_data = np.pad(audio_data, [(0, max_len - len(audio_data))], \"constant\")\n elif data_filling == \"repeat\":\n n_repeat = int(max_len/len(audio_data))\n audio_data = np.tile(audio_data, n_repeat+1)[:max_len]\n \n if data_truncating == 'fusion':\n mel = get_mel(audio_data, audio_cfg)\n mel_fusion = np.stack([mel, mel, mel, mel], axis=0)\n longer = [[False]]\n\n return longer, mel_fusion, audio_data", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def songfeature_filter_get(songid=None, genre=None, artist=None, name=None): # noqa: E501\n query = 'SELECT * FROM SongFeatures'\n multi_flag = \"WHERE\"\n if genre and not artist:\n query = \"\"\"\n SELECT Acousticness, Danceability, Duration_ms, Energy,Instrumentalness, MusicalKey,\n Liveness,Loudness,Mode, Speechiness,Tempo, Time_signature, Valence, Songs.SongID,\n Songs.SongName\n FROM SongFeatures\n JOIN Songs\n ON Songs.SongID = SongFeatures.SongID\n AND Songs.SongGenre = '{}'\n \"\"\".format(genre)\n multi_flag = \"AND\"\n\n if artist and not genre:\n #Query too complicated, separate entity\n query = \"\"\"\n SELECT Acousticness, Danceability, Duration_ms, Energy,Instrumentalness, MusicalKey,\n Liveness,Loudness,Mode, Speechiness,Tempo, Time_signature, Valence, Songs.SongID\n FROM SongFeatures\n JOIN Songs\n ON Songs.SongID = SongFeatures.SongID\n JOIN Artists\n ON Songs.ArtistID = Artists.ArtistID\n WHERE Artists.ArtistName = '{}'\n \"\"\".format(artist)\n multi_flag = \"AND\"\n\n if artist and genre:\n query = \"\"\"\n SELECT Acousticness, Danceability, Duration_ms, Energy,Instrumentalness, MusicalKey,\n Liveness,Loudness, Mode, Speechiness,Tempo, Time_signature, Valence, Songs.SongID\n FROM SongFeatures\n JOIN Songs\n ON Songs.SongID = SongFeatures.SongID\n JOIN Artists\n ON Songs.ArtistID = Artists.ArtistID\n WHERE Artists.ArtistName = '{}'\n AND Songs.SongGenre = '{}'\n \"\"\".format(artist, genre)\n\n if songid:\n query = query + \" {} SongFeatures.SongID = '{}'\".format(songid)\n\n if name:\n query = query + \" JOIN Songs ON Songs.SongID = SongFeatures.SongID WHERE Songs.SongName = '{}'\".format(name)\n\n results = query_to_dict(query)\n features_list = []\n\n for r in results:\n features_list.append(\n Songfeature(acousticness= r['Acousticness'],\n danceability= r['Danceability'],\n duration_ms= r['Duration_ms'],\n energy= r['Energy'],\n instrumentalness= r['Instrumentalness'],\n musicalkey= r['MusicalKey'],\n liveness= r['Liveness'],\n loudness= r['Loudness'],\n mode= r['Mode'],\n speechiness= r['Speechiness'],\n tempo= r['Tempo'],\n timesignature= r['Time_signature'],\n valence= r['Valence'],\n songid= r['SongID']))\n return features_list", "def get_song(self): \n\n song = self.tracks.sample(n=1).to_dict('index')\n return list(song.values())[0]" ]
[ "0.7809176", "0.73876536", "0.6952075", "0.6951485", "0.68658644", "0.6713561", "0.66311383", "0.6595504", "0.6544838", "0.64794326", "0.64385945", "0.63735366", "0.63735366", "0.63646275", "0.6312347", "0.62634575", "0.62581265", "0.62259", "0.6213748", "0.61638194", "0.6130454", "0.6121169", "0.6091201", "0.5993508", "0.5962358", "0.59319794", "0.58951104", "0.5882172", "0.58730894", "0.5853166" ]
0.7706416
1
Given the user's playlist URL, return a list of track ids included in the playlist.
def get_song_ids(self, playlist_link: str) -> List[str]: user = self.init_user() playlist_id = self.parse_link_to_id(playlist_link) res = user.playlist_items(playlist_id, offset=0, fields='items.track.id', additional_types=['track'])['items'] return [item['track']['id'] for item in res]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_playlist_tracks_id(self, username, playlist_name):\n track_list = []\n playlist_id = self.get_playlist_id(username, playlist_name)\n tracks = self.spotify.playlist_tracks(playlist_id)\n for i in range(len(tracks['items'])):\n track_list.append(tracks['items'][i]['track']['id'])\n while tracks['next']: # If there are more tracks\n tracks = self.spotify.next(tracks)\n for i in range(len(tracks['items'])):\n track_list.append(tracks['items'][i]['track']['id'])\n return track_list", "def get_track_ids_of_playlist(self, playlist_id):\n def get_playlist_data(url):\n req = requests.get(url, headers=self.__header_bearer())\n return req.json() if req.status_code == 200 else False\n\n track_uris = []\n\n endpoint = f'/playlists/{playlist_id}/tracks'\n url = f'{self.api_base_url}{endpoint}'\n\n playlist_data = get_playlist_data(url)\n while True:\n if not playlist_data:\n break\n\n for track in playlist_data['items']:\n track_uris.append(track['track']['uri'])\n\n if not playlist_data['next']:\n break\n else:\n time.sleep(0.5)\n playlist_data = get_playlist_data(playlist_data['next'])\n return track_uris", "def playlist_track_ids(playlist_id, authorizer, verbose=False):\n spotify_endpoint = f'https://api.spotify.com/v1/playlists/{playlist_id}/tracks'\n params = {'fields':'items(track(id)),next,total'} # only get id's of tracks, and total number of tracks in playlist\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n\n tracks = None\n index = 0\n \n # stops when no more pages left\n while spotify_endpoint:\n response = requests.get(spotify_endpoint, params=params, headers=headers)\n\n if response.status_code == 200:\n data = response.json()\n \n # allocate array for tracks\n if tracks is None:\n tracks = [''] * data['total']\n \n # add tracks to array\n for track in data['items']:\n i = track['track']['id']\n tracks[index] = i\n index += 1\n\n # move forward in paging\n spotify_endpoint = data['next']\n elif response.status_code == 429:\n limit = int(response.headers['Retry-After'])\n print('Hit rate limit, waiting for {} seconds to continue'.format(limit))\n time.sleep(limit)\n elif response.status_code == 401:\n print('Access token expired, refreshing...')\n authorizer.refresh()\n else:\n print('Error %d' % response.status_code)\n if verbose:\n print(json.loads(response.text))\n return None\n\n return [t for t in tracks if t is not None] # filter out null tracks", "def get_playlist_tracks(playlist_id):\n\n results = spotifyObject.playlist_tracks(playlist_id)\n tracks = results['items']\n while results['next']:\n results = spotifyObject.next(results)\n tracks.extend(results['items'])\n return tracks", "def spotify_tracklist():\n sp = credentials()\n chart = chartdata()\n trackid_list = []\n #find a way to get track IDS\n for track in chart:\n searchQuery = track[0]\n searchResults = sp.search(q=searchQuery, limit=1, type='track', market=\"US\")\n trackid_list.append(searchResults['tracks']['items'][0]['uri'])\n return trackid_list", "def get_playlist_contents(playlist_id, user_id, limit=100):\n\ttoken = get_token()\n\theaders = {'Authorization': 'Bearer ' + token}\n\tbase_url = SPOTIFY_API_HOST + 'users/{0}/playlists/{1}/tracks?limit={2}'\n\turl = base_url.format(SPOTIFY_USER_ID, SPOTIFY_PLAYLIST_ID, limit)\n\tresponse = requests.get(url, headers=headers).json() # Todo: Handle errors here. Not using this function so ok for now.\n\n\turis = []\n\tfor item in response['items']:\n\t\turi_string = item['track']['uri']\n\t\turis.append(uri_string[uri_string.rfind(':')+1:])\n\treturn uris", "def getTracks(playlist_id):\n\n tracks = crud.getTracks(session, playlist_id)\n\n return tracks", "def list_pl_songs(self, pl_id, user=None):\n if user:\n res = self.sp.user_playlist_tracks(user, pl_id)\n else:\n res = self.sp.user_playlist_tracks(self.user, pl_id)\n song_uri_ls = [song['track']['uri'] for song in res['items']]\n song_ls = []\n for i, song in enumerate(res['items']):\n song_ls.append([i,\n song['track']['name'][0:20].strip(),\n song['track']['album']['name'][0:20].strip(),\n \"%0.2f\" % (song['track']['duration_ms'] / 60000),\n song['track']['popularity']])\n return song_uri_ls, song_ls", "def get_playlist_items(self):\n results = self.API.playlist(self.playlist_uri)\n return results[\"tracks\"][\"items\"]", "def get_playlist_tracks(user, playlist_id, limit=100):\n info_dict = spotify.user_playlist_tracks(user, playlist_id, limit=limit)\n items = info_dict[\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"track\"][\"album\"][\"name\"]\n album_type = items[i][\"track\"][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"track\"][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"track\"][\"artists\"]))\n ])\n track_name = items[i][\"track\"][\"name\"]\n popularity = items[i][\"track\"][\"popularity\"]\n track_id = items[i][\"track\"][\"id\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks", "def user_playlist_tracks(\n self,\n playlist_id,\n fields=None,\n limit=100,\n offset=0,\n market=\"from_token\",\n **kwargs,\n ):\n _id = self._get_playlist_id(playlist_id)\n # pylint: disable=no-member\n return self._get(\n API.PLAYLIST_TRACKS.value.format(playlist_id=_id),\n limit=limit,\n offset=offset,\n fields=fields,\n market=market,\n **kwargs,\n )", "def get_playlist_uuids(self, *args):\n\n rsp = rsp_codes[0]\n rsp['playlist_uuids'] = self.playlists.keys()\n return rsp", "def spotify_playlist_as_json_tracks(playlist_id: int, access_token: str) -> list:\n query_url = \"https://api.spotify.com/v1/playlists/{}/tracks\".format(playlist_id)\n query_headers = {\"Authorization\": \"Bearer {}\".format(access_token)}\n # Get playlist tracks\n tracks_response = requests.get(query_url, headers=query_headers)\n if tracks_response.status_code != 200:\n return tracks_response.reason\n tracks_json = tracks_response.json()\n if \"error_description\" in tracks_json:\n return []\n # Get list of tracks\n tracks = []\n while \"next\" in tracks_json and tracks_json[\"next\"] is not None:\n for t in tracks_json[\"items\"]:\n tracks.append(t[\"track\"])\n tracks_json = requests.get(tracks_json[\"next\"], headers=query_headers).json()\n return tracks", "def grab_playlist():\n sp = credentials()\n playlists = sp.current_user_playlists()\n for playlist in playlists['items']:\n if playlist['name'] == 'Billboard Hot 100':\n playlist_id = playlist['uri']\n return playlist_id", "def get_playlist_id_from_url(url):\n return match1(url, r'youku\\.com/playlist_show/id_([a-zA-Z0-9=]+)')", "def get_playlist_id(self, username, playlist_name):\n playlist_id = ''\n playlists = self.spotify.user_playlists(username)\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n playlist_id = playlist['id']\n return playlist_id\n while playlists['next']: # If there are more playlists\n playlists = self.spotify.next(playlists)\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n playlist_id = playlist['id']\n return playlist_id\n return playlist_id", "def get_video_ids(playlist_id):\n \n #search for all the videos given a playlist id\n search_response = youtube.playlistItems().list(part='contentDetails',maxResults=50,playlistId=playlist_id).execute()\n all_videos = search_response['items']\n video_ids = []\n for vid in all_videos:\n video_id = vid['contentDetails']['videoId']\n video_ids.append(video_id)\n\n return video_ids", "def get_playlists_for_user(self, request): \n user = Account.find_by_id(request.userid)\n playlists = Playlist.find_by_owner(user.key).fetch(20)\n return self.build_playlist_response(playlists)", "def getPlaylists():\n\n allPlaylistData = []\n\n spotifyPlaylistData = crud.getPlaylists(session)\n if 'items' in spotifyPlaylistData:\n allPlaylistData = spotifyPlaylistData['items']\n \n savedPlaylistIDs = crud.getSavedPlaylistIDsByUser(int(session['user_id']))\n\n regPlaylistData = [i for i in allPlaylistData if i['id'] not in savedPlaylistIDs]\n savedPlaylistData = [i for i in allPlaylistData if i['id'] in savedPlaylistIDs]\n\n data = {\n 'regPlaylistData': regPlaylistData,\n 'savedPlaylistData': savedPlaylistData\n }\n \n return data", "def get_playlist_songs(self, playlist_id):\n url = get_playlist_url(playlist_id)\n result = self.get_request(url)\n return result['result']['tracks'], result['result']['name']", "def playlistid(self, track_id=None):\n track_id = '' if track_id is None else track_id\n lines = yield from self.command('playlistid {}'.format(track_id))\n return parse_playlist(lines)", "def get_playlists(self):\n values = {\n 'action' : 'playlists',\n }\n root = self.__call_api(values)\n nodes = root.getElementsByTagName('playlist')\n if not nodes: # list is empty, reauth\n return None\n\n l = []\n try:\n for child in nodes:\n id = int(child.getAttribute('id'))\n name = child.getElementsByTagName('name')[0].childNodes[0].data\n owner = child.getElementsByTagName('owner')[0].childNodes[0].data\n items = int(child.getElementsByTagName('items')[0].childNodes[0].data)\n type = child.getElementsByTagName('type')[0].childNodes[0].data\n\n d = {\n 'id' : id,\n 'name' : name,\n 'items' : items,\n 'owner' : owner,\n 'type' : type,\n }\n l.append(d)\n except: #something failed\n traceback.print_exc()\n return []\n return l", "def get_all_playlist_videos( playlistURL ):\r\n \r\n request = youtube.playlistItems().list(\r\n part=\"contentDetails,id,snippet\",\r\n maxResults=50,\r\n playlistId=\"PLxgoClQQBFjgTMrhvedWk8Q_CVLWwy3ak\"\r\n )\r\n response = request.execute()", "def playlist_tracks(self, playlist_id: str, fields: str = None,\n market: str = 'from_token', limit: int = 100,\n offset: int = 0):\n return self._get(f'playlists/{playlist_id}/tracks', limit=limit,\n offset=offset, fields=fields, market=market)", "def get_playlists_from(category_id):\n playlist_uris = []\n for item in spotify.category_playlists(category_id)['playlists']['items']:\n playlist_uris.append(item['uri'])\n\n return playlist_uris", "def playlists(self):\n return self._playlists", "def playlist(self, playlist_id: str, fields: str = None,\n market: str = 'from_token'):\n return self._get('playlists/' + playlist_id,\n fields=fields, market=market)", "def search_for_tracks(album_id):\n \n track_results = spotifyObject.album_tracks(album_id)\n track_results = track_results['items']\n ids = [track['id'] for track in track_results]\n\n return ids", "def get_youtube_ids():\n global _id_list\n if _id_list is None:\n all_videos_in = urllib2.urlopen(\"http://www.khanacademy.org/api/internal/videos/localized/all\")\n try:\n all_videos = simplejson.load(all_videos_in)\n finally:\n all_videos_in.close()\n\n # Now get our CS videos that are not part of the content topic tree,\n # but are part of the scratchpad tutorials\n all_talkies_in = urllib2.urlopen(\n \"https://www.khanacademy.org/api/internal/talkthroughs/youtube_ids\")\n try:\n all_talkies = simplejson.load(all_talkies_in)\n finally:\n all_talkies_in.close()\n\n _id_list = all_talkies[:]\n for v in all_videos:\n _id_list += v[\"youtube_ids\"].values()\n\n return _id_list", "def get_playlist_info(self, username, playlist_name):\n playlist_info = []\n playlist_id = self.get_playlist_id(username, playlist_name)\n playlist_items = self.spotify.playlist_tracks(playlist_id)\n for i in range(len(playlist_items['items'])):\n print(playlist_items['items'][i])\n playlist_info.append([playlist_items['items'][i]['track']['name'], \n playlist_items['items'][i]['track']['artists'][0]['name'],\n playlist_items['items'][i]['track']['album']['name']])\n while playlist_items['next']: # If there are more tracks\n playlist_items = self.spotify.next(playlist_items)\n for i in range(len(playlist_items['items'])):\n playlist_info.append([playlist_items['items'][i]['track']['name'], \n playlist_items['items'][i]['track']['artists'][0]['name'],\n playlist_items['items'][i]['track']['album']['name']])\n return playlist_info" ]
[ "0.76453924", "0.74738145", "0.73954004", "0.69144875", "0.6867974", "0.6815433", "0.6652141", "0.663792", "0.66315997", "0.65984553", "0.65869725", "0.65690655", "0.64615995", "0.6455686", "0.6426979", "0.6400573", "0.6325174", "0.6292672", "0.6279484", "0.62740594", "0.62269384", "0.6179692", "0.61614543", "0.61425203", "0.61365896", "0.60598713", "0.6050489", "0.6046213", "0.60441786", "0.60318846" ]
0.7682564
0
Given the playlist link, return the playlist id.
def parse_link_to_id(self, playlist_link: str) -> str: split_1 = playlist_link.split('/')[4] split_2 = split_1.split('?') return split_2[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_playlist_id(self, username, playlist_name):\n playlist_id = ''\n playlists = self.spotify.user_playlists(username)\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n playlist_id = playlist['id']\n return playlist_id\n while playlists['next']: # If there are more playlists\n playlists = self.spotify.next(playlists)\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n playlist_id = playlist['id']\n return playlist_id\n return playlist_id", "def get_playlist_id_from_url(url):\n return match1(url, r'youku\\.com/playlist_show/id_([a-zA-Z0-9=]+)')", "def get_song_ids(self, playlist_link: str) -> List[str]:\n user = self.init_user()\n playlist_id = self.parse_link_to_id(playlist_link)\n res = user.playlist_items(playlist_id,\n offset=0,\n fields='items.track.id',\n additional_types=['track'])['items']\n return [item['track']['id'] for item in res]", "def get_playlist_id(self, request, view, obj):\n # Note, use select_related to avoid making extra requests to get the classroom\n return obj.classroom.playlist_id", "def get_playlist_id(name):\n \n #search for the first playlist result given a drama name\n search_response = youtube.search().list(q=name,type=\"playlist\",part=\"id\",maxResults=1).execute()\n result = search_response.get(\"items\", [])\n playlist_id = result[0]['id']['playlistId']\n return playlist_id", "def grab_playlist():\n sp = credentials()\n playlists = sp.current_user_playlists()\n for playlist in playlists['items']:\n if playlist['name'] == 'Billboard Hot 100':\n playlist_id = playlist['uri']\n return playlist_id", "def playlistid(self, track_id=None):\n track_id = '' if track_id is None else track_id\n lines = yield from self.command('playlistid {}'.format(track_id))\n return parse_playlist(lines)", "def get_video_id_from_link(link):\n query_string = urlparse.urlparse(link).query\n qs_params = urlparse.parse_qs(query_string)\n return qs_params['v'][0]", "def get_id_regular_link(link = None):\n #Legacy compatibility\n choppedLink = legacy_check(link)\n # dont bother if we are none.\n if link == None:\n return link\n\n vid_url_params = choppedLink[3].split(\"&\")\n # Search the id in the list of elements of the url\n vid = search_video_id(vid_url_params)\n\n # And dont forget the links with hashtags #\n vid = vid.split(\"#\")[0]\n\n return vid # change this var names TODO", "def find_playlist(playlist_name):\n\n playlists = spotifyObject.user_playlists(config.USERNAME)\n\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n return playlist['id']\n \n raise PlaylistNotFoundException(\"The given playlist name was not found.\")", "def find_playlist(playlist_name):\n\n playlists = spotifyObject.user_playlists(config.USERNAME)\n\n for playlist in playlists['items']:\n if playlist['name'] == playlist_name:\n return playlist['id']\n \n raise PlaylistNotFoundException(f\"The playlist name: {playlist_name} was not found.\")", "def create_playlist(self):\n playlist=self.sp.user_playlist_create(user=self.username,name=self.nameOfPlaylist,description=self.description)\n return playlist['id']", "def get_yt_list_id(url):\n yt = re.search(_YT_LIST_PATTERN, url)\n if yt:\n return yt.group(1)", "def get_id_shortlink(link = None):\n choppedLink = legacy_check(link)\n id = None\n try:\n id = choppedLink[3] # or -1 instead of 3\n except:\n pass #dont care bout issues here\n return id", "def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n if self.res.get('slideshow_id'):\n return self.res.get('slideshow_id')\n \n p = urlparse.urlparse(self.original_url)\n path = p.path\n if path.endswith('/'):\n path = path[:-1]\n path_list = path[1:].split('/')\n \n if len(path_list) == 3 and (p.path.startswith('/slideshow/embed_code')):\n # http://www.slideshare.net/slideshow/embed_code/1293644\n return path_list[2]\n elif len(path_list) == 2 and p.path.startswith('/swf'):\n # return -1 when url is like : http://static.slideshare.net/swf/ssplayer2.swf?doc=working-dogs-1201800078341935-2\n # FixMe :slideshare oembed api doesnt support this kind of url\n return -1\n return ''", "def current_playlist(self):\n if (self._playlists == {}):\n return None\n else:\n for title, id in self._playlists.items():\n if (id == self._current_playlist_id):\n return title\n return None", "def get_id(html):\n\ttry:\n\t\tsong_id = re.findall('soundcloud://sounds:(.*?)\"', html)[0]\n\t\treturn song_id\n\texcept IndexError:\n\t\tprint(\"\\033[91m✘ Could not find song ID\\033[0m\")\n\t\tsys.exit()", "def get_playlist_tracks_id(self, username, playlist_name):\n track_list = []\n playlist_id = self.get_playlist_id(username, playlist_name)\n tracks = self.spotify.playlist_tracks(playlist_id)\n for i in range(len(tracks['items'])):\n track_list.append(tracks['items'][i]['track']['id'])\n while tracks['next']: # If there are more tracks\n tracks = self.spotify.next(tracks)\n for i in range(len(tracks['items'])):\n track_list.append(tracks['items'][i]['track']['id'])\n return track_list", "def unique_id(self):\n if self._uuid != '':\n return \"linkplay_media_\" + self._uuid", "def get_id_attribution(link = None):\n log.debug(\"attribution link: \" + repr(link))\n choppedLink = legacy_check(link)\n id = None\n try:\n # First try to get the relevant part, that is encoded\n step1 = choppedLink[3][choppedLink[3].find(\"watch\"):]\n # Then stplit the other encoded params\n step2 = step1[12:].split(\"%\")\n # and get the good part\n step3 = step2[0]\n id = step3 # choppedLink[3][choppedLink[3].find(\"watch\"):][12:].split(\"%\")[0]\n except Exception as e:\n raise e # dont care 'bout issues here. all will be NotImplementedError \n\n # If we havent found a match, then this is not implemented.\n if id == \"\":\n raise Exception(\"no recognised kind of link\")\n\n return id", "def get_chunk_id_for_link(self, link):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT chunk_id FROM link WHERE link = %s;\", (link,))\n result = cur.fetchall()\n cur.close()\n return result\n except Exception as e:\n print(e)", "def playlist_link(self, obj):\n if obj.playlist is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_playlist_change', args=(obj.playlist.pk,)),\n obj.playlist.title if obj.playlist.title != '' else '[Untitled]'\n )", "def find_player_id(url):\r\n response = requests.get(url)\r\n result = PLAYER_ID_PATTERN.search(response.text)\r\n return result.group(1)", "def media_playlist(self):\n return self._table.active_playlist.name if self._table.active_playlist else None", "def _get_playlist_index_by_name(library_list, playlist_name):\n for playlist in library_list:\n if playlist['name'] == playlist_name:\n return library_list.index(playlist)\n return None", "def updatePlaylist():\n\n orig_playlist_id = request.json.get('orig_playlist_id')\n new_playlist_id = request.json.get('new_playlist_id')\n snapshot_id = crud.updatePlaylist(session, orig_playlist_id, new_playlist_id)\n\n return snapshot_id", "def get_video_id(url):\n\n if not url:\n return \"\"\n\n # If URL is embedded\n if \"embed\" in url:\n return url.split(\"/\")[-1]\n\n parse_result = urlparse(url)\n query = parse_qs(parse_result.query)\n return query[\"v\"][0]", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n params = cgi.parse_qs(p.query)\n \n if p.path.endswith('/video'):\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n if 'clipId' in params:\n return params['clipId'][0]\n if p.path.startswith('/embed'):\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n if 'clip' in params:\n return params['clip'][0]\n \n return ''", "def get_id(self, url):\n return url.split('/')[-1]" ]
[ "0.7627802", "0.75714946", "0.7359392", "0.73437", "0.7227414", "0.71185565", "0.70467764", "0.66185874", "0.660244", "0.6591759", "0.65801316", "0.64638054", "0.6358947", "0.63445514", "0.62671155", "0.6256702", "0.6156125", "0.61192715", "0.60325164", "0.6021746", "0.599894", "0.5989025", "0.59565914", "0.59541416", "0.59048843", "0.58994216", "0.5898635", "0.5875969", "0.5868465", "0.58643675" ]
0.81976426
0
Selects the given labels from a named SelectWidget control. (A functional replacement for the JavaScript used by this widget.)
def setSelectWidget(browser, name, labels): control = browser.getControl(name='%s.from' % name).mech_control form = control._form for label in labels: value = str(control.get(label=label)) form.new_control('text', 'form.buyable_types', {'value': value})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def htmlSelect(labelText, parName, args, choiceList, hint=None, descriptionSeparator='::',\n labelAttr='', attr=''):\n snippet = htmlLabel(labelText,parName,labelAttr)\n default = args[parName] if parName in args else ''\n if not isinstance(default,list):\n default = [default]\n snippet += '<select name=\"%s\"%s>\\n' % (parName,sep(attr))\n if hint:\n snippet += '<option value=\"\">%s</option>\\n' % hint\n for c in choiceList:\n p = c.split(descriptionSeparator)\n if len(p)==2:\n (desc,val) = p\n else:\n (desc,val) = (c,c)\n if val in default:\n snippet += '<option selected=\"yes\" value=\"%s\">%s</option>\\n' % (val,desc)\n else:\n snippet += '<option value=\"%s\">%s</option>\\n' % (val,desc)\n snippet += '</select>\\n'\n return snippet", "def selectOptionByLabel(self, element_tuple, select_label):\n self.log_info(f\"Browser.selectOptionByLabel: Setting {element_tuple} to {select_label}\")\n Select(self.CORE.find_element(*self.format_element(element_tuple))).select_by_visible_text(select_label)\n return", "def select(self, label, component, config, name, options, default=0):\n\n index = self.setting(config, name)\n index = [x for x, option in enumerate(options) if option == default]\n\n # Derive default index\n default = index[0] if index else default\n\n return st.selectbox(label, options, index=default, key=component + name)", "def __init__(\n self, name: str, values: List[Dict], index: Optional[int] = 0,\n label: Optional[str] = None, help: Optional[str] = None,\n default: Optional[bool] = None, required: Optional[bool] = False,\n group: Optional[str] = None\n ):\n super(Select, self).__init__(\n dtype=PARA_SELECT,\n name=name,\n index=index,\n label=label,\n help=help,\n default=default,\n required=required,\n group=group\n )\n self.values = values", "def the_option_named(text: str) -> \"SelectByText\":\n return SelectByText(text)", "def test_render_value_label(self):\n self.check_html(\n self.widget(choices=self.beatles),\n \"beatles\",\n [\"John\"],\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"J\">John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\">George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def labels(self) -> list[\"Label\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"labels\", _args)\n _ctx = Label(_ctx)._select_multiple(\n _name=\"name\",\n _value=\"value\",\n )\n return _ctx.execute_sync(list[Label])", "def choose_select(select_label, select_item=None):\n try:\n if select_item is not None:\n label = driver.find_element_by_xpath(\"//*[contains(text(), '%s')]\" % select_label)\n label_parent = label.find_element_by_xpath(\"..\")\n select = label_parent.find_element_by_tag_name('select')\n select.click()\n click_on(select_item, scope=select)\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def selector(rules):\n\n T = current.T\n\n selector = DIV(_class = \"anonymize-select\",\n )\n\n for rule in rules:\n\n name = rule.get(\"name\")\n if not name:\n continue\n\n title = T(rule.get(\"title\", name))\n\n selector.append(DIV(INPUT(value = \"on\",\n _name = s3_str(name),\n _type = \"checkbox\",\n _class = \"anonymize-rule\",\n ),\n LABEL(title),\n _class = \"anonymize-option\",\n ))\n\n return selector", "def selector(**kwargs):\n return kwargs", "def test_dbpa003_select(dash_duo):\n app = Dash()\n\n options = {\n \"OptionA\": \"Option 1\",\n \"OptionB\": \"Option 2\",\n \"OptionC\": \"Option 3\",\n }\n\n value = \"OptionB\"\n\n with_keywords = Select(\n options=options,\n value=value,\n id=\"with-keywords\",\n )\n without_keywords = Select(options, value, id=\"without-keywords\")\n\n app.layout = html.Div([with_keywords, without_keywords])\n\n dash_duo.start_server(app)\n\n # Check values\n assert [\n a.get_attribute(\"value\")\n for a in dash_duo.wait_for_element(\n \"#with-keywords\"\n ).find_elements_by_tag_name(\"option\")\n ] == [\n a.get_attribute(\"value\")\n for a in dash_duo.wait_for_element(\n \"#without-keywords\"\n ).find_elements_by_tag_name(\"option\")\n ]\n\n # Check labels\n assert [\n a.text\n for a in dash_duo.wait_for_element(\n \"#with-keywords\"\n ).find_elements_by_tag_name(\"option\")\n ] == [\n a.text\n for a in dash_duo.wait_for_element(\n \"#without-keywords\"\n ).find_elements_by_tag_name(\"option\")\n ]", "def add_selector(self, listing):\n # We will be able to select X-frames and its boundaries\n # will be stored in the given list\n\n def onselect(xmin, xmax):\n# indmin, indmax = np.searchsorted(x, (xmin, xmax))\n# indmax = min(len(x)-1, indmax)\n indmin = xmin\n indmax = xmax\n onselect.listing.append([indmin, indmax])\n print (onselect.listing)\n \n onselect.listing = listing\n \n # set useblit True on gtkagg for enhanced performance\n ax = self.axes\n span = SpanSelector(ax, onselect, 'horizontal', useblit=True,\n rectprops=dict(alpha=0.5, facecolor='red') )\n \n self.widget_list.append(span)", "def select(*args):", "def select(*args):", "def _create_selector_widget(self,frame,name,widget_options):\n #param = self.get_parameter_object(name)\n #self._update_translator(name,param)\n\n ## sort the range for display\n # CEBALERT: extend OptionMenu so that it\n # (a) supports changing its option list (subject of a previous ALERT)\n # (b) supports sorting of its option list\n # (c) supports selecting a new default\n new_range,widget_options = self._X(name,widget_options)\n tkvar = self._tkvars[name]\n\n # Combobox looks bad with standard theme on my ubuntu\n # (and 'changed' marker - blue text - not visible).\n w = Combobox(frame,textvariable=tkvar,\n values=new_range,state='readonly',\n **widget_options)\n\n # Combobox (along with Checkbutton?) somehow sets its\n # associated textvariable without calling that textvariable's\n # set() method. Therefore, to update the Selector's help text\n # when an item is selected, we bind to the\n # <<ComboboxSelected>> event.\n def _combobox_updated(event,name=name):\n w = self.representations[name]['widget']\n help_text = getdoc(\n self._string2object(\n name,\n self._tkvars[name]._original_get()))\n\n self.balloon.bind(w,help_text)\n\n w.bind(\"<<ComboboxSelected>>\",_combobox_updated)\n\n help_text = getdoc(self._string2object(name,tkvar._original_get()))\n self.balloon.bind(w,help_text)\n return w", "def setSelected(*args):", "def setSelected(*args):", "def form_SelectWithOtherChoice(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectWithOtherChoice(options)\n return form", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def select(self, field_paths):\n raise NotImplementedError(\"This should have been implemented.\")", "def _select_labels(self, segmentation, labels=None):\n\n logger.debug(\"select_labels() started with labels={}\".format(labels))\n if self.slab is not None and labels is not None:\n segmentation_out = select_labels(segmentation, labels, slab=self.slab)\n else:\n logger.warning(\"Nothing found for labels \" + str(labels))\n un = np.unique(segmentation)\n if len(un) < 2:\n logger.error(\"Just one label found in input segmenation\")\n segmentation_out = (segmentation > un[0]).astype(segmentation.dtype)\n return segmentation_out", "def select(self, value) -> str:", "def dropdown_multiple(id_, placeholder, size=\"200px\", text=None):\n components = []\n if text:\n components.append(html.Div(text, className=\"select-dropdown-text\"))\n components.append(\n dcc.Dropdown(id=id_, placeholder=placeholder, style={\"width\": size}, multi=True)\n )\n return html.Div(className=\"select-dropdown\", children=components)", "def form_SelectChoiceCallableOptions(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n def _():\n options = [(1,'a'),(2,'b'),(3,'c')]\n for option in options:\n yield option\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(_)\n return form", "def get_label():\n inp = option_text('Input label name (leave blank for no label):')\n add_to_collected('label', inp)\n OPTIONS['label'] = inp\n return", "def list_selector(title=None,\n text=None,\n members=None,\n controller=None,\n preselect=None,\n entry=False):\n combobox = list_selector_widget(members=members,\n preselect=preselect,\n entry=entry)\n\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_OK, gtk.RESPONSE_OK,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n d.vbox.add(l)\n\n d.vbox.add(combobox)\n combobox.show_all()\n\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n res=d.run()\n retval=None\n if res == gtk.RESPONSE_OK:\n retval=combobox.get_current_element()\n d.destroy()\n return retval" ]
[ "0.62763876", "0.6042844", "0.6035751", "0.5838853", "0.58088744", "0.57668555", "0.5582798", "0.54629403", "0.545703", "0.5385097", "0.53836495", "0.5362602", "0.530068", "0.530068", "0.5267683", "0.510479", "0.510479", "0.50969267", "0.50787586", "0.50787586", "0.50787586", "0.50787586", "0.50787586", "0.5069769", "0.5067483", "0.50542676", "0.5042026", "0.50147766", "0.5004807", "0.50031906" ]
0.7560723
0
Returns the 'Add to Cart' button as on Plone 2.5 it is a link but on Plone 3+ it is a control.
def getAddToCartControlOrLink(browser): try: browser.getControl('Add to Cart') return browser.getControl('Add to Cart') except LookupError: return browser.getLink('Add to Cart')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def goto_cart(self):\n self.driver.find_element(*BasePageLocators.GO_CART).click()\n return CartPage(self.driver)", "def checkout_btn(self):\n self._checkout_btn.click()", "def add_create_pl_btn(self):\n self.create_pl = QPushButton(\"Add to playlist\")\n self.create_pl.clicked.connect(self.pl_btn_push)\n self.hbtnbox.addWidget(self.create_pl)", "def add_view_pl_button(self):\n self.view_pl = QPushButton(\"View Playlist\")\n self.view_pl.clicked.connect(self.view_pl_btn_push)\n self.hbtnbox.addWidget(self.view_pl)", "def render_button(self):\n return self.widgets.get('button').render()", "def _create_add_ingredient_button(self):\n add_button = ttk.Button(\n master=self._frame,\n text=\"Add new ingredient\",\n command=self._handle_add_new_ingredient\n )\n return add_button", "def _render_link(self, context, name, label, extra=''):\n product = Product.select(self.env, where={'name' : name})\n if product:\n product = product[0]\n href = context.href.products(product.prefix)\n if 'PRODUCT_VIEW' in context.perm(product.resource):\n return tag.a(label, class_='product', href=href + extra)\n elif 'PRODUCT_CREATE' in context.perm('product', name):\n return tag.a(label, class_='missing product', \n href=context.href('products', action='new'),\n rel='nofollow')\n return tag.a(label, class_='missing product')", "def add_button(self, title, callback, display_opt=None):\n button = wx.Button(self.button_panel, -1, title)\n button.Bind(wx.EVT_BUTTON, callback)\n button.display_opt = display_opt\n self.buttons.append(button)\n self.button_sizer.Add(button, 0)", "def addButton(self, button):\n\t\tself.config._WITH_ACTIONS = True\n\t\tself.config.ACTIONS.append((\"button\", button))", "def test_add_to_cart_item(self, app, pm):\n logging.basicConfig(filename='/home/osboxes/pytest_mobile/logs/test.log', level=logging.DEBUG, filemode=\"w\")\n app.browser.tap_button(pm.start_page.get_skip_button())\n app.browser.tap_button(pm.main_page.get_cart_button())\n result = app.browser.get_text(pm.my_cart_page.get_cart_empty_cart())\n assert result == \"Your Cart is Empty\"\n app.browser.tap_button(pm.menu_items.get_back_button())", "def view_cart(request):\n \n return render(request, \"cart.html\" )", "def pol_to_cart():\n pass", "def add_play_btn(self):\n self.play_btn = QPushButton(\"Play Playlist\")\n self.play_btn.clicked.connect(self.play_btn_push)\n self.hbtnbox.addWidget(self.play_btn)", "def __create_button(self, parent, flag):\n btns = {\n wx.ID_OK: (MSG_ACTION_OK, \"ok\"),\n wx.ID_CANCEL: (MSG_ACTION_CANCEL, \"cancel\"),\n wx.ID_YES: (MSG_ACTION_YES, \"yes\"),\n wx.ID_NO: (MSG_ACTION_NO, \"no\"),\n wx.ID_APPLY: (MSG_ACTION_APPLY, \"apply\"),\n wx.ID_CLOSE: (MSG_ACTION_CLOSE, \"close-window\"),\n wx.ID_SAVE: (MSG_ACTION_SAVE, \"save\"),\n }\n btn = sppasBitmapTextButton(parent, label=btns[flag][0], name=btns[flag][1])\n btn.SetId(flag)\n\n if flag == wx.CANCEL:\n self.SetAffirmativeId(wx.ID_CANCEL)\n\n elif flag in (wx.CLOSE, wx.OK):\n btn.SetDefault()\n btn.SetFocus()\n self.SetAffirmativeId(flag)\n\n elif flag == wx.YES:\n self.SetAffirmativeId(wx.ID_YES)\n\n elif flag == wx.OK:\n btn.SetDefault()\n\n return btn", "def addNewCategoryButton(self):\n newIcon = self.getQIcon('money.png')\n newCategoryAction = QAction(newIcon, 'New Category', self)\n newCategoryAction.setShortcut('Ctrl+N')\n newCategoryAction.setStatusTip(\"Create a New Category.\")\n newCategoryAction.triggered.connect(self.newCategory)\n \n self.addAction(newCategoryAction)", "def add_view_songs_btn(self):\n self.view_songs = QPushButton(\"View Songs\")\n self.view_songs.clicked.connect(self.view_songs_push)\n self.hbtnbox.addWidget(self.view_songs)", "def html(self):\n dis = ('disabled' if not self._enabled else '')\n met = ('post' if self._html_post else 'get')\n act = escape(self._action)\n txt = escape(self._text)\n return '<button %s formaction=\"%s\" formmethod=\"%s\">%s</button>' % (dis, act, met, txt)", "def test_add_to_cart_button(driver, browser, mode, device, username, password):\n login_page = LoginPage(driver)\n login_page.perform_complete_login(username, password)\n product_list_page = ProductListPage(driver)\n\n # Get a random product\n products = product_list_page.get_all_product_elements()\n index = random.randrange(0, len(products))\n product = products[index]\n product_url = product.find_element_by_xpath(product_list_page.URL_XPATH).get_attribute(\"href\")\n driver.get(product_url)\n pdp = ProductDetailPage(driver)\n pdp.click_add_to_cart()\n assert pdp.get_number_cart_items() == 1", "def corner_buttons(self):\r\n buttons = []\r\n if c.user_is_loggedin:\r\n if c.user.name in g.admins:\r\n if c.user_is_admin:\r\n buttons += [NamedButton(\"adminoff\", False,\r\n nocname=not c.authorized_cname,\r\n target = \"_self\")]\r\n else:\r\n buttons += [NamedButton(\"adminon\", False,\r\n nocname=not c.authorized_cname,\r\n target = \"_self\")]\r\n\r\n buttons += [NamedButton('submit', sr_path = not c.default_sr,\r\n nocname=not c.authorized_cname)]\r\n if c.user.safe_karma >= g.discussion_karma_to_post:\r\n buttons += [NamedButton('meetups/new', False,\r\n nocname=not c.authorized_cname)]\r\n buttons += [NamedButton(\"prefs\", False,\r\n css_class = \"pref-lang\")]\r\n buttons += [NamedButton(\"logout\", False,\r\n nocname=not c.authorized_cname,\r\n target = \"_self\")]\r\n\r\n return NavMenu(buttons, base_path = \"/\", type = \"buttons\")", "def test_get_custom_button(self):\n pass", "def click_download_button(self):\n self._basket.click_download_button()", "def view_cart(request):\n return render(request, \"cart.html\")", "def view_cart(request):\n return render(request, \"cart.html\")", "def view_cart(request):\n return render(request, \"cart.html\")", "def item_link(self, obj):\n if obj.item is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_mediaitem_change', args=(obj.item.pk,)),\n obj.item.title if obj.item.title != '' else '[Untitled]'\n )", "def single_item_receipt_template(self):\r\n return 'shoppingcart/receipt.html'", "def view_cart(request):\n\n return render(request, 'cart/cart.html')", "def view_cart(request):\n\n return render(request, 'cart/cart.html')", "def view_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_ackimport_change', args=(obj.id,))\n url += '?preview=true'\n return mark_safe(\n '<a href=\"{}\" target=\"_blank\">View CWR</a>'.format(url))", "def label_link(self, obj):\n if not obj.record_label:\n return None\n url = reverse(\n 'admin:music_publisher_label_change', args=[obj.record_label.id])\n link = '<a href=\"{}\">{}</a>'.format(url, obj.record_label)\n return mark_safe(link)" ]
[ "0.5797779", "0.5744916", "0.57158464", "0.55404997", "0.5528336", "0.54405314", "0.5308705", "0.5291106", "0.5275951", "0.52686775", "0.5261013", "0.5248891", "0.5233275", "0.52236503", "0.5206349", "0.520571", "0.5186595", "0.51761496", "0.51619565", "0.5154222", "0.51391506", "0.51374507", "0.51374507", "0.51374507", "0.5133013", "0.51116484", "0.5102504", "0.5102504", "0.5096798", "0.50949675" ]
0.7175834
0
visualize all boxes in one image
def vis_all_boxes(im_array, boxes): import matplotlib.pyplot as plt from ..fio.load_ct_img import windowing_rev, windowing im = windowing_rev(im_array+config.PIXEL_MEANS, config.WINDOWING) im = windowing(im, [-175,275]).astype(np.uint8) # soft tissue window plt.imshow(im) color = (0.,1.,0.) for bbox in boxes: rect = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor=color, linewidth=2) plt.gca().add_patch(rect) if boxes.shape[1] == 5: score = bbox[-1] plt.gca().text(bbox[0], bbox[1] - 2, '{:s} {:.3f}'.format(name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def vis_gt_boxes(self):\n import cv2\n num_images = len(self.gt)\n for i in range(num_images):\n im = cv2.imread(self.image_path_at(i))\n im = im[:, :, (2, 1, 0)]\n plt.cla()\n plt.imshow(im)\n gt_image = self.gt[i]\n for j in range(len(gt_image['boxes'])):\n bbox = gt_image['boxes'][j]\n c = gt_image['gt_classes'][j] \n plt.gca().add_patch(plt.Rectangle((float(bbox[0]), float(bbox[1])),\n float(bbox[2]) - float(bbox[0]),\n float(bbox[3]) - float(bbox[1]), fill=False,\n edgecolor='r', linewidth=3))\n x = (bbox[0] + bbox[2])/2\n y = bbox[1]\n s = '{}'.format(self.classes[c])\n plt.text(x, y, s, fontsize=14,horizontalalignment='center',weight='bold',backgroundcolor=(1,1,1))\n plt.show()", "def show_boxes(img, boundary_boxes, gt_boxes=None):\n\n for (x_tl, y_tl, x_br, y_br) in boundary_boxes:\n cv2.rectangle(img, (x_tl, y_tl),\n (x_br, y_br),\n (0, 0, 255), 2)\n\n if gt_boxes is not None:\n for (x_tl, y_tl, x_br, y_br) in gt_boxes:\n cv2.rectangle(img, (x_tl, y_tl),\n (x_br, y_br),\n (0, 255, 0), 2)\n\n cv2.imshow(\"img\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def draw_boxes(self, image, boxes):\n return draw_boxes(image, boxes, self.labels)", "def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color, 3)", "def draw_box(image, boxes, box_color=(255, 255, 255)):\r\n for box in boxes:\r\n cv2.rectangle(image,\r\n (box[0], box[1]),\r\n (box[2], box[3]), box_color)", "def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)", "def visualize(scores, faces):\n pc_min, pc_max = np.min(scores, 0), np.max(scores, 0)\n pc_scaled = (scores - pc_min) / (pc_max - pc_min) \n fig, ax = plt.subplots()\n for i in range(len(faces)):\n imagebox = offsetbox.OffsetImage(faces[i, :].reshape(64,64).T, cmap=plt.cm.gray, zoom=0.5)\n box = offsetbox.AnnotationBbox(imagebox, pc_scaled[i, 0:2])\n ax.add_artist(box)\n plt.show()", "def __draw_boxes(self, img, bboxes, color=(128, 0, 0), thick=4):\n\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy", "def draw_boxes_v2(img_name, img, boxes, labels, scores, obj_list=None, figsize=(15,15)):\n fig,ax = plt.subplots(figsize=figsize)\n\n if isinstance(img, torch.Tensor):\n img = img.numpy().squeeze().transpose((1,2,0))\n # Display the image\n ax.imshow(img)\n\n # Create a Rectangle patch\n for box, label, score in zip(boxes, labels, scores):\n label = int(label)\n color = STANDARD_COLORS[label]\n x,y,w,h = box\n rect = patches.Rectangle((x,y),w,h,linewidth=1.5,edgecolor = color,facecolor='none')\n score = np.round(score, 3)\n if obj_list is not None:\n text = '{}: {}'.format(obj_list[label], str(score))\n else:\n text = '{}: {}'.format(label, str(score))\n plt.text(x, y-3,text, color = color, fontsize=15)\n # Add the patch to the Axes\n ax.add_patch(rect)\n plt.axis('off')\n plt.savefig(img_name,bbox_inches='tight')\n plt.close()", "def draw_boxes(image, bboxes, color=(0., 0., 1.0), thick=6):\n # make a copy of the image\n draw_img = np.copy(image)\n # draw each bounding box on your image copy using cv2.rectangle()\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return draw_img", "def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img", "def visualize(self, img, boxes, categories, figsize=(12, 12)):\n _, ax = plt.subplots(figsize=figsize)\n\n # Load image\n ax.imshow(img)\n\n # create boxes and categories by adding rectanges and texts\n for box, category in zip(boxes, categories):\n color = np.random.rand(3,)\n x, y, w, h = box\n rect = patches.Rectangle(\n (x, y), w, h, linewidth=2, edgecolor=color, facecolor='None')\n plt.text(\n x, y - 3, self.idx_cats[category], color=color, fontsize=20)\n ax.add_patches(rect)\n\n plt.show()", "def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # make a copy of the image\n imcopy = np.copy(img)\n # draw each bounding box on your image copy using cv2.rectangle()\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return imcopy", "def generate_boxes(self, img):\r\n return [Box(left, top, img) for (left, top) in self.coords]", "def draw_boxes(bboxes: [[int]], img: 'np.array', line_width: int=2) -> 'np.array':\n for x, y, w, h in bboxes:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), line_width)\n return img", "def _visualize(self, unnorm_image, class_ids, scores, bounding_boxes):\n ax = utils.viz.plot_bbox(unnorm_image,\n bounding_boxes[0],\n scores[0],\n class_ids[0],\n class_names=self._network.classes)\n fig = plt.gcf()\n fig.set_size_inches(14, 14)\n plt.show()", "def plot_boxes_cv2(img, trackers, boxes, colours, savename=None, class_names=None):\n img = np.copy(img)\n\n n_boats = 0\n for tracker in trackers:\n if class_names:\n x1 = int(tracker[0])\n y1 = int(tracker[1])\n x2 = int(tracker[2])\n y2 = int(tracker[3])\n\n # BGR color codes\n rgb = colours[int(tracker[4]) % 32]\n\n img = cv2.putText(\n img,\n \"boat (id: {0})\".format(tracker[4]),\n (x1, y1-6),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n rgb,\n 1,\n cv2.LINE_AA,\n )\n img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 2)\n n_boats += 1\n\n # Infographics box\n sub_img = img[10:60, 10:230]\n white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255\n res = cv2.addWeighted(sub_img, 0.5, white_rect, 0.5, 1.0)\n img[10:60, 10:230] = res\n\n # Display number of boxes\n img = cv2.putText(\n img,\n 'Number of boats: {0}'.format(n_boats),\n (20, 30),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n (0, 0, 0),\n 1,\n cv2.LINE_AA,\n )\n\n if savename:\n print(\"save plot results to {}\".format(savename))\n cv2.imwrite(savename, img)\n return img", "def display_instances(image, boxes, masks, ids, names, scores):\r\n n_instances = boxes.shape[0]\r\n colors = random_colors(n_instances)\r\n\r\n if not n_instances:\r\n print('NO INSTANCES TO DISPLAY')\r\n else:\r\n assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]\r\n\r\n for i, color in enumerate(colors):\r\n if not np.any(boxes[i]):\r\n continue\r\n\r\n y1, x1, y2, x2 = boxes[i]\r\n label = names[ids[i]]\r\n score = scores[i] if scores is not None else None\r\n caption = '{} {:.2f}'.format(label, score) if score else label\r\n mask = masks[:, :, i]\r\n\r\n image = apply_mask(image, mask, color)\r\n image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)\r\n image = cv2.putText(\r\n image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2\r\n )\r\n\r\n return image", "def _visualize_boxes_and_labels_on_image(\n self,\n boxes,\n classes,\n scores,\n category_index,\n instance_masks=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=0.5):\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n display_str = ''\n if classes[i] in category_index.keys():\n class_name = category_index[classes[i]]['name']\n else:\n class_name = 'N/A'\n display_str = str(class_name)\n if not display_str:\n if scores is None:\n display_str = '?%'\n else:\n display_str = '{}%'.format(int(100*scores[i]))\n else:\n if scores is None:\n display_str = '{}: ?%'.format(display_str)\n else:\n display_str = '{}: {}%'.format(display_str, int(100*scores[i]))\n box_to_display_str_map[box].append(display_str)\n box_to_color_map[box] = self.STANDARD_COLORS[classes[i] % len(self.STANDARD_COLORS)]\n first = True\n mask = None\n # Draw all boxes onto image.\n for idx,(box, color) in enumerate(box_to_color_map.items()):\n ymin, xmin, ymax, xmax = box\n if instance_masks is not None:\n\n if self._shuffle:\n # draw mask for each object\n self._draw_mask_on_image(box_to_instance_masks_map[box]*(idx+1))\n else:\n # stack all masks and draw one big mask\n if first:\n first = False\n mask = box_to_instance_masks_map[box]*(idx+1)\n else:\n mask = np.bitwise_or(mask, box_to_instance_masks_map[box])\n\n self._draw_bounding_box_on_image(\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates)\n\n # Draw Masks on Image (only one color for all masks)\n if mask is not None and not self._shuffle:\n self._draw_mask_on_image(mask)", "def vis_one_image(\n im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9,\n kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,\n ext='jpg'):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if isinstance(boxes, list):\n boxes, segms, keypoints, classes = convert_from_cls_format(\n boxes, segms, keypoints)\n\n if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:\n return\n\n if segms is not None:\n masks = mask_util.decode(segms)\n\n color_list = colormap.colormap(rgb=True) / 255\n\n # dataset_keypoints, _ = keypoint_utils.get_keypoints()\n # kp_lines = kp_connections(dataset_keypoints)\n # cmap = plt.get_cmap('rainbow')\n # colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n\n fig = plt.figure(frameon=False)\n fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.axis('off')\n fig.add_axes(ax)\n ax.imshow(im)\n\n # Display in largest to smallest order to reduce occlusion\n areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n sorted_inds = np.argsort(-areas)\n\n mask_color_id = 0\n for i in sorted_inds:\n bbox = boxes[i, :4]\n # score = boxes[i, -1]\n actor_score = boxes[i, -3]\n action_score = boxes[i, -2]\n action_cls = int(boxes[i, -1])\n if actor_score < thresh or action_score < thresh:\n continue\n\n # print(dataset.classes[classes[i]], score)\n # show box (off by default, box_alpha=0.0)\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1],\n fill=False, edgecolor='g',\n linewidth=0.5, alpha=box_alpha))\n\n if show_class:\n ax.text(\n bbox[0], bbox[1] - 2,\n get_class_string(classes[i], actor_score, dataset, is_actor=True)\n + '--' + get_class_string(action_cls, action_score, dataset, is_actor=False),\n fontsize=3,\n family='serif',\n bbox=dict(\n facecolor='g', alpha=0.4, pad=0, edgecolor='none'),\n color='white')\n\n # show mask\n if segms is not None and len(segms) > i:\n img = np.ones(im.shape)\n color_mask = color_list[mask_color_id % len(color_list), 0:3]\n mask_color_id += 1\n\n w_ratio = .4\n for c in range(3):\n color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio\n for c in range(3):\n img[:, :, c] = color_mask[c]\n e = masks[:, :, i]\n\n _, contour, hier = cv2.findContours(\n e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n\n for c in contour:\n polygon = Polygon(\n c.reshape((-1, 2)),\n fill=True, facecolor=color_mask,\n edgecolor='w', linewidth=1.2,\n alpha=0.5)\n ax.add_patch(polygon)\n\n output_name = os.path.basename(im_name) + '.' + ext\n fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)\n plt.close('all')", "def draw_boxes_on_image(img, bboxes, color=(0, 0, 1), thick=6):\n imcopy = np.copy(img)\n\n for bbox in bboxes:\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n\n return imcopy", "def show_yolo_boxes(infile):\n try: # If image is a png RGBA, this throws no error\n img = np.asarray(Image.open(infile))\n height, width, _ = img.shape\n except: # Convert to RGBA and continue\n img = np.asarray(Image.open(infile).convert('RGBA'))\n height, width, _ = img.shape\n\n y_top, y_bottom, x_left, x_right = find_pixel_edges(img)\n x,y,w,h = find_yolo_coordinates(y_top, y_bottom, x_left, x_right, width, height)\n \n # Reload image to draw box on\n image = cv2.imread(infile)\n\n start_point = (x_left, y_top) # Top Left\n end_point = (x_left + int(width * w), y_top + int(height * h)) # Bottom Right\n \n color = (0, 255, 0) # Bounding box is Green - Arbitrary choice\n thickness = 2 # Line thickness: 2 pixels\n \n # Using cv2.rectangle() method, draw green rectangle \n image_with_box = cv2.rectangle(image, start_point, end_point, color, thickness)\n\n # Display the image \n cv2.imshow(infile, image_with_box) \n cv2.waitKey(200) # necessary otherwise the image doesn't render\n time.sleep(1)\n cv2.destroyAllWindows()", "def draw_boxes(image, gt_boxes_norm, pre_boxes_norm):\n # Load Image\n image = (image * 255.0).astype(np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n #image = cv2.add(image,image)\n #image = cv2.bitwise_not(image)\n # Draw prediction boxes\n for pre_box_points in pre_boxes_norm:\n image_shape = np.flip(image.shape[0:2], axis=0)\n\n for pre_box_point_idx in range(len(pre_box_points)):\n\n pre_start_point = pre_box_points[pre_box_point_idx] * image_shape\n pre_end_point = pre_box_points[(pre_box_point_idx + 1) % 4] * image_shape\n\n pre_start_point = pre_start_point.astype(np.int32)\n pre_end_point = pre_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(pre_start_point),\n tuple(pre_end_point),\n (107,222,35), thickness=1)\n\n # Draw boxes if they exist\n if gt_boxes_norm is not None:\n for gt_box_points in gt_boxes_norm:\n for gt_box_point_idx in range(len(gt_box_points)):\n\n gt_start_point = gt_box_points[gt_box_point_idx] * image_shape\n gt_end_point = gt_box_points[(gt_box_point_idx + 1) % 4] * image_shape\n\n gt_start_point = gt_start_point.astype(np.int32)\n gt_end_point = gt_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(gt_start_point),\n tuple(gt_end_point),\n (0,0,205), thickness=1)\n\n return image", "def displayInstances(image, boxes, masks, ids, names, scores):\n n_instances = boxes.shape[0]\n colours = randomColours(n_instances)\n\n if not n_instances:\n print('NO INSTANCES TO DISPLAY')\n else:\n assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]\n\n for i, colour in enumerate(colours):\n if not np.any(boxes[i]):\n continue\n\n y1, x1, y2, x2 = boxes[i]\n label = names[ids[i]]\n score = scores[i] if scores is not None else None\n caption = '{} {:.2f}'.format(label, score) if score else label\n mask = masks[:, :, i]\n\n image = applyMask(image, mask, colour)\n image = cv2.rectangle(image, (x1, y1), (x2, y2), colour, 1)\n image = cv2.putText(\n image, caption, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colour, 1\n )\n\n return image", "def draw_boxes(image, boxes, classes, thickness=4):\n draw = ImageDraw.Draw(image)\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i])\n color = 'blueviolet'\n draw.line([(left, top), (left, bot), (right, bot), (right, top), (left, top)], width=thickness, fill=color)", "def draw_bboxes(img, bboxes):\n colors = tf.cast(np.array([[1, 0, 0, 1]] * 10), dtype=tf.float32)\n img_with_bounding_boxes = tf.image.draw_bounding_boxes(\n img,\n bboxes,\n colors\n )\n plt.figure()\n plt.imshow(img_with_bounding_boxes[0])\n plt.show()", "def visualize_boxes_and_labels_on_image_array(\n debug,\n folder_name,\n last_split_dict,\n image,\n boxes,\n classes,\n scores,\n category_index,\n instance_masks=None,\n instance_boundaries=None,\n keypoints=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=.5,\n agnostic_mode=False,\n line_thickness=4,\n groundtruth_box_visualization_color='black',\n skip_scores=False,\n skip_labels=False):\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n box_to_instance_boundaries_map = {}\n box_to_keypoints_map = collections.defaultdict(list)\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n if instance_boundaries is not None:\n box_to_instance_boundaries_map[box] = instance_boundaries[i]\n if keypoints is not None:\n box_to_keypoints_map[box].extend(keypoints[i])\n if scores is None:\n box_to_color_map[box] = groundtruth_box_visualization_color\n else:\n display_str = ''\n if not skip_labels:\n if not agnostic_mode:\n if classes[i] in category_index.keys():\n class_name = category_index[classes[i]]['name']\n else:\n class_name = 'N/A'\n display_str = str(class_name)\n if not skip_scores:\n if not display_str:\n display_str = '{}%'.format(int(100*scores[i]))\n else:\n display_str = '{}: {}%'.format(display_str, int(100*scores[i]))\n box_to_display_str_map[box].append(display_str)\n if agnostic_mode:\n box_to_color_map[box] = 'DarkOrange'\n else:\n box_to_color_map[box] = STANDARD_COLORS[\n classes[i] % len(STANDARD_COLORS)]\n \n h,w,_ = image.shape\n # Draw all boxes onto image.\n current_split_dict = {}\n count_detection = 0\n for box, color in box_to_color_map.items():\n ymin = int(box[0]*h)\n xmin = int(box[1]*w)\n ymax = int(box[2]*h)\n xmax = int(box[3]*w)\n \n current_split_dict[(xmin,ymin,xmax,ymax)] = image[xmin:xmax, ymin:ymax, :][:]\n stationary = check_stationary(image, (xmin,ymin,xmax,ymax) ,last_split_dict)\n \n if not stationary:\n count_detection += 1\n \n if debug:\n if stationary: #output bounding box in red\n image = cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (255,0,0), line_thickness)\n cv2.putText(image, str(box_to_display_str_map[box][0][-3:-1]), (xmax, ymin), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), lineType=cv2.LINE_AA) \n else:\n image = cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (0,255,0), line_thickness)\n cv2.putText(image, str(box_to_display_str_map[box][0][-3:-1]), (xmax, ymin), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), lineType=cv2.LINE_AA) \n \n if debug: \n cv2.imwrite(os.path.join(folder_name,'{}.jpg'.format(len(os.listdir(folder_name)))), cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n \n return count_detection, current_split_dict", "def draw_boxes(image, boxes, box_classes, class_names, scores=None):\n\n image = Image.fromarray(np.floor(image * 255 + 0.5).astype('uint8'))\n\n colors = get_colors_for_classes(len(class_names))\n\n for i, c in list(enumerate(box_classes)):\n box_class = class_names[c]\n box = boxes[i]\n color = colors[c]\n if scores is not None:\n score = float(scores[i])\n image = draw_box(image, box, box_class, color, score)\n else:\n image = draw_box(image, box, box_class, color)\n\n return np.array(image)", "def display_instances(image, boxes, masks, ids, names, scores):\n n_instances = boxes.shape[0]\n\n if not n_instances:\n print('NO INSTANCES TO DISPLAY')\n else:\n assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]\n\n for i in range(n_instances):\n if not np.any(boxes[i]):\n continue\n\n y1, x1, y2, x2 = boxes[i]\n label = names[ids[i]]\n print(label)\n color = class_dict[label]\n score = scores[i] if scores is not None else None\n caption = '{} {:.2f}'.format(label, score) if score else label\n mask = masks[:, :, i]\n\n image = apply_mask(image, mask, color)\n image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)\n image = cv2.putText(\n image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2\n )\n\n return image" ]
[ "0.7867931", "0.76596415", "0.756796", "0.74380344", "0.7399842", "0.73794574", "0.71901476", "0.71162546", "0.70859426", "0.7064588", "0.70577294", "0.705543", "0.7016237", "0.6984557", "0.69161266", "0.6911", "0.6865214", "0.6836461", "0.67877877", "0.6779726", "0.6752084", "0.67354524", "0.6728501", "0.66996324", "0.6678656", "0.6662869", "0.6657611", "0.66542506", "0.665073", "0.66384333" ]
0.78662
1
Returns the query query that was executed.
def query(self): return self.details[KEY_QUERY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sql_query(self):\n return self._project.sql_query", "def query(self):\n return self._query", "def query(self):\n return self._query", "def query(self):\n return self._query", "def query(self):\n \n return self._query", "def query(self):\n return self.__query", "def getQuery(self):\n return self._query", "def get_query(self):\n return self.query_class(self)", "def query(self):\n return self.session.query", "def query(self) -> Optional[str]:\n return pulumi.get(self, \"query\")", "def query(self):\n return self.snowflake_options.query", "def query(self) -> pulumi.Output[Optional['outputs.JobQuery']]:\n return pulumi.get(self, \"query\")", "def determine_query():\n return query if query is not None \\\n else f\"SELECT * FROM '{table}';\"", "def get_query(self, row_id):\n return self.get(row_id).query", "def _get_query(self):\n\n endpoint = ENDPOINTS[self.sync_data.endpoint_index]\n marker = self.sync_data.markers.get(self.sync_data.endpoint_index, START_OF_TIME)\n\n query_string = \"select \" + \",\".join(ENDPOINT_QUERY_FIELDS[endpoint])\n\n # ZOQL does not support the `order by` sorting\n query_string = query_string + \" from {} where UpdatedDate > '{}'\".format(\n endpoint,\n marker\n )\n\n return query_string", "def base_query(self) -> Optional[str]:\n return pulumi.get(self, \"base_query\")", "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "def get_query_execution(QueryExecutionId=None):\n pass", "def q(cls) -> Query:\n if not cls.s:\n raise M2Error('No DB session defined')\n return cls.s.query(cls)", "def getLastQuery(self):\n return self.lastQuery", "def query(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"query\")", "def generate_query(self):\n return", "def get_query():\r\n table = query_queue_table\r\n\r\n s = table.select(order_by = sa.asc(table.c.date), limit = 1)\r\n s.append_whereclause(sa.and_(*[table.c.iden != i for i in running]))\r\n r = s.execute().fetchone()\r\n\r\n if r:\r\n return r.iden, r.query\r\n else:\r\n return None, None", "def query(self, query):\n cursor = self.database.cursor()\n cursor.execute(query)\n # If it's a query that's expected to return a value (EG: SELECT)\n if query.strip().lower().startswith('select'): return cursor.fetchall()", "def get_query(self):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n if len(split) == 1: return \"\"\r\n else: return split[1]", "def get_last_query(self):\n return self.query_history[-1][0] if self.query_history else None", "def construct_query(self):\n reader = QueryReader(filepath=self.filepath, filename=self.filename, raw_sql=self.raw_sql, params=self.params)\n return reader.sql", "def get_query(self, minimal: bool = False) -> Optional[str]:\n if minimal:\n return self.minimal_query\n return self.query", "def query(self):\n return self.event.get('queryStringParameters', dict())" ]
[ "0.8080436", "0.77075416", "0.77075416", "0.77075416", "0.76990044", "0.767134", "0.7641934", "0.7529619", "0.7150072", "0.71093875", "0.7002335", "0.7000758", "0.6994188", "0.69290036", "0.69209105", "0.6887981", "0.6861571", "0.6861571", "0.6849569", "0.684395", "0.6810813", "0.6749521", "0.6722769", "0.6695268", "0.6679644", "0.6673765", "0.6607311", "0.65559506", "0.6544827", "0.65337664" ]
0.7744766
1
Returns the question configuration item for this query.
def question(self): return self.details[KEY_QUESTION]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_question(self):\n question = self.raw_question\n if question is not None:\n return {\n \"question\": self.raw_question\n }", "def get_item(self, key):\n return self.config[key] if key in self.config.keys() else None", "def get_current_question(self):\n self.logger.info(\"Adapter: Get next question\")\n try:\n return self._current_question\n except Exception as e:\n self.logger.info(\"Error on get question : %s\" % e)\n return None", "def __getitem__(self, item):\n return self._config[item]", "def _get_question(self, question_id):\n return get_object_or_404(Question, pk=question_id)", "def config(self):\n return self[CONFIG_KEY]", "def config(self):\n return CurrentProject().config.config[self.key]", "def config(self):\r\n return skillConfig", "def get_queue_settings(qid):\r\n db = get_db()\r\n rows = query_db(GET_QUEUE_SETTINGS_BY_ID, (qid,))\r\n if (not rows) or (len(rows) == 0):\r\n raise sqlite3.Error('The queue does not exist.')\r\n return rows[0]", "def get_config(self, name):\n return self.configs[name][0]", "def detail(self):\n url = '/question/%d' % self.id\n d = req.get(url)\n return parser.detail(d)", "def _get_adapter_config(self):\n proxy = self.core.get_proxy('/')\n try:\n config = proxy.get('/adapters/' + self.adapter_name)\n return config\n except KeyError:\n return None", "def get_question(self, q_id: int) -> Optional[Questions]:\n try:\n queston = self.session.query(Questions).get(q_id)\n\n return queston\n except Exception as excpt:\n self.session.rollback()\n print(f'Could not get question: {excpt}')\n\n return None", "def __getitem__(self, item):\n return self._config.get(item, '') or os.environ.get(item, '')", "def config(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"config\")", "def get_config_connection(self):\n return self.m_connection.config", "def configuration(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"configuration\")", "def answer(self):\n try:\n return Answer.objects.filter(question=self).all()[0]\n except Answer.DoesNotExist, IndexError:\n return None", "def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"config\")", "def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"config\")", "def query_string_config(self) -> Optional['outputs.RuleRuleConditionQueryStringConfig']:\n return pulumi.get(self, \"query_string_config\")", "def _get_config(self):\n config_dict = self.CONFIG_REGISTRY.get(self._data_type)\n\n # If there is no config for this data_type, use default config and set\n # the query based on the data_type.\n if not config_dict:\n config_dict = self.DEFAULT_CONFIG\n config_dict['query'] = 'data_type:\"{0}\"'.format(self._data_type)\n\n config_dict['index'] = self._index\n config_dict['data_type'] = self._data_type\n return config_dict", "def question():\n\n # @todo: prep to populate question_l10n from question\n\n return s3_rest_controller(rheader = s3db.dc_rheader)", "def __getitem__(self, name):\n return self.config[name]", "def get_config(self):\n return self.config", "def get(query):\n global INITIALIZED\n global CONFIG\n global GLOBAL_CONFIG\n\n if not INITIALIZED:\n raise Exception('[XOS-Config] Module has not been initialized')\n\n val = Config.get_param(query, CONFIG)\n if not val:\n val = Config.get_param(query, GLOBAL_CONFIG)\n if not val:\n val = Config.get_param(query, default.DEFAULT_VALUES)\n if not val:\n # TODO if no val return none\n # raise Exception('[XOS-Config] Config does not have a value (or a default) parameter %s' % query)\n return None\n return val", "def configuration(self):\n return self._config", "def configuration(self) -> Optional[pulumi.Input['BrokerConfigurationArgs']]:\n return pulumi.get(self, \"configuration\")", "def configuration(self) -> Optional[pulumi.Input['BrokerConfigurationArgs']]:\n return pulumi.get(self, \"configuration\")", "def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})" ]
[ "0.6725646", "0.6343248", "0.6231551", "0.62013745", "0.5762635", "0.5752337", "0.5736601", "0.5728157", "0.5649923", "0.5638383", "0.5637754", "0.5636276", "0.56350523", "0.56283134", "0.55637985", "0.5533542", "0.5525235", "0.55054075", "0.5498197", "0.5498197", "0.5478393", "0.54712707", "0.5429216", "0.54215", "0.5414192", "0.54041773", "0.539524", "0.5377708", "0.5377708", "0.5376509" ]
0.68976724
0
Returns the summary configuration item for this query.
def query_summary(self): return self.details[KEY_QUERY_SUMMARY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def summary(self):\n if hasattr(self,\"_summary\"):\n return self._summary\n else:\n return {}", "def getSummary(self):\n return self.base.get(\"summary\", [])", "def getSummary(self):\n return self.summary", "def summary(self) -> str:\n return pulumi.get(self, \"summary\")", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\r\n return summary.Summary(self.parent, self.object_id)", "def _obtain_summary(self):\n if self._summary is None:\n if self._metadata:\n self._summary = ResultSummary(\n self._connection.unresolved_address, **self._metadata\n )\n elif self._connection:\n self._summary = ResultSummary(\n self._connection.unresolved_address,\n server=self._connection.server_info\n )\n\n return self._summary", "def summary(self):\n response = self._get(self.uri_for(\"summary\"))\n return json_to_py(response)", "def get_summary(self):\n return self.model.summary()", "def summary(self) -> Optional[pulumi.Input['OutcomeSummary']]:\n return pulumi.get(self, \"summary\")", "def summary(self):\n res = \", \".join(\n elem[\"summary\"] for elem in self.status[\"health\"][\"summary\"]\n )\n if res:\n return res\n elif self.detail:\n return self.detail[0]\n return \"\"", "def summary(self):\n return self._fastqc_summary", "def summary(self, i):\n return self.__summaries[i]", "def summary(self):\n return get_paragraphs(self.description)[0]", "def get_account_summary(self):\r\n return self.get_object('GetAccountSummary', {}, SummaryMap)", "def summary(self):\n raise NotImplementedError", "def metadata(self) -> global___SummaryMetadata:", "def summary(self, checkid):\r\n return summary.Summary(self, checkid)", "def _get_summary(self):\n\n logger.warning('_get_summary() has been deprecated since 3.6.4. '\n 'Use the summary decorator instead')\n return self.summary", "def summary(self):\n return self.model.summary()", "def GetSummary(self):\n self.max_age = 0\n summary = rdfvalue.ClientSummary(client_id=self.urn)\n summary.system_info.node = self.Get(self.Schema.HOSTNAME)\n summary.system_info.system = self.Get(self.Schema.SYSTEM)\n summary.system_info.release = self.Get(self.Schema.OS_RELEASE)\n summary.system_info.version = str(self.Get(self.Schema.OS_VERSION, \"\"))\n summary.system_info.kernel = self.Get(self.Schema.KERNEL)\n summary.system_info.fqdn = self.Get(self.Schema.FQDN)\n summary.system_info.machine = self.Get(self.Schema.ARCH)\n summary.system_info.install_date = self.Get(\n self.Schema.INSTALL_DATE)\n summary.users = self.Get(self.Schema.USER)\n summary.interfaces = self.Get(self.Schema.LAST_INTERFACES)\n summary.client_info = self.Get(self.Schema.CLIENT_INFO)\n summary.serial_number = self.Get(self.Schema.HARDWARE_INFO).serial_number\n summary.timestamp = self.age\n\n return summary", "def summary(self) -> str:\n pass", "def get_summary(self, s, base=None):\n summary = summary_patt.search(s).group()\n if base is not None:\n self.params[base + \".summary\"] = summary\n return summary", "def summarize(self):\n\n if self.summarizer == None:\n return \"The summarizer has not been initialised\"\n elif not isinstance(self.summarizer, Summarizer):\n raise TypeError(\"summarizer is not a Summarizer object\")\n\n self.summary = self.summarizer.summarize(self.article)\n return self.summary" ]
[ "0.694487", "0.6819714", "0.68172693", "0.6718434", "0.66479325", "0.66479325", "0.66479325", "0.66479325", "0.66479325", "0.66479325", "0.66479325", "0.66351444", "0.65762407", "0.6470729", "0.6427356", "0.63207453", "0.62939316", "0.6261002", "0.6137292", "0.60124767", "0.5913395", "0.588587", "0.586223", "0.58588713", "0.5843496", "0.5837151", "0.5804426", "0.5798297", "0.57917154", "0.5786561" ]
0.7261878
0
Fills in the target_query attribute with observable value and time specification for correlation.
def build_target_query(self, observable: Observable, **kwargs) -> None: # XXX for some reason the self.target_query is getting cached when the same module runs for the same analysis # for different observables if '<O_VALUE>' not in self.target_query: self._reload_target_query() logging.debug(f"had to reset self.target_query to clear previous use") self.target_query = self.target_query.replace('<O_TYPE>', observable.type) \ .replace('<O_VALUE>', observable.value) # TODO property escape stuff source_time = kwargs.get('source_event_time') or observable.time or self.root.event_time_datetime # if we are going off of the event time, then we use the wide duration start_time = source_time - self.wide_duration_before stop_time = source_time + self.wide_duration_after # if observable time is available, we can narrow our time spec duration if observable.time is not None: start_time = source_time - self.narrow_duration_before stop_time = source_time + self.narrow_duration_after self.fill_target_query_timespec(start_time, stop_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fill_target_query_timespec(self, start_time: str or datetime.datetime, stop_time: str or datetime.datetime) -> None:\n pass", "async def test_transaction_specific_response_time_target(self):\n self.set_source_parameter(\"transaction_specific_target_response_times\", [\"[Bb]ar:150\"])\n response = await self.collect(get_request_json_return_value=self.GATLING_JSON)\n self.assert_measurement(response, value=\"1\", entities=self.expected_entities[:1])", "def join_target(self):\n df = self.get_all_data()\n target_df = self.get_target_df().copy(deep=True)\n target_df['ft_data_dt'] = target_df['ft_data_dt'].astype('datetime64[M]') - pd.DateOffset(months=2) + MonthEnd(1)\n df = df.merge(target_df, on=['idd', 'ft_data_dt'], how='left')\n values = {'target': 0}\n df['target'] = df['target'].replace(np.nan, 0)\n self.set_prep_data(df)", "def get_target_info(self, target, time_start=None, time_stop=None,\n time_interval=5):\n\n def _set_time(dtime):\n # Sets time to nice rounded value\n y, m ,d, hh, mm, ss = dtime.tuple()\n mm = mm - (mm % 5)\n return ephem.Date(datetime(y, m , d, hh, mm, 5, 0))\n\n def _set_data_range(time_start, time_stop, t_ival):\n # Returns numpy array of dates\n ss = _set_time(ephem.Date(ephem.Date(time_start) - t_ival))\n sr = _set_time(ephem.Date(ephem.Date(time_stop) + t_ival))\n return np.arange(ss, sr, t_ival)\n\n if time_start is None:\n # default for start time is sunset on the current date\n time_start = self.sunset()\n if time_stop is None:\n # default for stop time is sunrise on the current date\n time_stop = self.sunrise(date=time_start)\n\n t_range = _set_data_range(self.date_to_utc(time_start),\n self.date_to_utc(time_stop),\n time_interval * ephem.minute)\n #print('computing airmass history...')\n history = []\n\n # TODO: this should probably return a generator\n for ut in t_range:\n # ugh\n tup = ephem.Date(ut).tuple()\n args = tup[:-1] + (int(tup[-1]),)\n ut_with_tz = datetime(*args).replace(tzinfo=self.tz_utc)\n info = target.calc(self, ut_with_tz)\n history.append(info)\n #print(('computed airmass history', self.history))\n return history", "def __init__(self, target):\n\n if target <= 1:\n raise ValueError(f\"Target iteration of ETA must be > 1, got {target}\")\n\n self.targetIteration = target\n self._ti = None # initial time\n self._xi = None # iteration when initial time was measured", "def update_rainfall_obs(target_model, method, timestep, start_time, end_time):\n obs_start = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')\n try:\n\n # Connect to the database\n curw_obs_pool = get_Pool(host=con_params.CURW_OBS_HOST, user=con_params.CURW_OBS_USERNAME,\n password=con_params.CURW_OBS_PASSWORD, port=con_params.CURW_OBS_PORT,\n db=con_params.CURW_OBS_DATABASE)\n\n curw_obs_connection = curw_obs_pool.connection()\n\n curw_sim_pool = get_Pool(host=con_params.CURW_SIM_HOST, user=con_params.CURW_SIM_USERNAME,\n password=con_params.CURW_SIM_PASSWORD, port=con_params.CURW_SIM_PORT,\n db=con_params.CURW_SIM_DATABASE)\n\n TS = Timeseries(pool=curw_sim_pool)\n\n # [hash_id, station_id, station_name, latitude, longitude]\n active_obs_stations = extract_active_curw_obs_rainfall_stations(start_time=start_time, end_time=end_time)[1:]\n obs_stations_dict = { } # keys: obs station id , value: [hash id, name, latitude, longitude]\n\n for obs_index in range(len(active_obs_stations)):\n obs_stations_dict[active_obs_stations[obs_index][1]] = [active_obs_stations[obs_index][0],\n active_obs_stations[obs_index][2],\n active_obs_stations[obs_index][3],\n active_obs_stations[obs_index][4]]\n\n for obs_id in obs_stations_dict.keys():\n meta_data = {\n 'latitude': float('%.6f' % float(obs_stations_dict.get(obs_id)[2])),\n 'longitude': float('%.6f' % float(obs_stations_dict.get(obs_id)[3])),\n 'model': target_model, 'method': method,\n 'grid_id': 'rainfall_{}_{}'.format(obs_id, obs_stations_dict.get(obs_id)[1])\n }\n\n tms_id = TS.get_timeseries_id_if_exists(meta_data=meta_data)\n\n if tms_id is None:\n tms_id = TS.generate_timeseries_id(meta_data=meta_data)\n meta_data['id'] = tms_id\n TS.insert_run(meta_data=meta_data)\n\n TS.update_grid_id(id_=tms_id, grid_id=meta_data['grid_id'])\n\n obs_hash_id = obs_stations_dict.get(obs_id)[0]\n\n obs_timeseries = []\n\n if timestep == 5:\n ts = extract_obs_rain_5_min_ts(connection=curw_obs_connection, start_time=obs_start, end_time=end_time,\n id=obs_hash_id)\n if ts is not None and len(ts) > 1:\n obs_timeseries.extend(process_5_min_ts(newly_extracted_timeseries=ts, expected_start=obs_start)[1:])\n # obs_start = ts[-1][0]\n elif timestep == 15:\n ts = extract_obs_rain_15_min_ts(connection=curw_obs_connection, start_time=obs_start, end_time=end_time,\n id=obs_hash_id)\n if ts is not None and len(ts) > 1:\n obs_timeseries.extend(process_15_min_ts(newly_extracted_timeseries=ts, expected_start=obs_start)[1:])\n # obs_start = ts[-1][0]\n\n # for i in range(len(obs_timeseries)):\n # if obs_timeseries[i][1] == -99999:\n # obs_timeseries[i][1] = 0\n\n if obs_timeseries is not None and len(obs_timeseries) > 0:\n TS.replace_data(timeseries=obs_timeseries, tms_id=tms_id)\n\n except Exception as e:\n traceback.print_exc()\n logger.error(\"Exception occurred while updating obs rainfalls in curw_sim.\")\n finally:\n curw_obs_connection.close()\n destroy_Pool(pool=curw_sim_pool)\n destroy_Pool(pool=curw_obs_pool)", "def query_from(self, temporal):\n raise NotImplementedError()", "def update_target(self):\n with torch.no_grad():\n for target_q_param, q_param in zip(self.target_q_funcs.parameters(), self.q_funcs.parameters()):\n target_q_param.data.copy_(self.tau * q_param.data + (1.0 - self.tau) * target_q_param.data)\n for target_pi_param, pi_param in zip(self.target_policy.parameters(), self.policy.parameters()):\n target_pi_param.data.copy_(self.tau * pi_param.data + (1.0 - self.tau) * target_pi_param.data)", "def update_target_dqn(self):\n\n for learning_parameter in self.dqn.learning_parameters:\n dqn_value = self.dqn.get_value(learning_parameter, self.tf_session)\n if(dqn_value is not None):\n self.target_dqn.set_value(\n learning_parameter, dqn_value, self.tf_session)\n else:\n print(\"Impossible to set value: None\")", "def query_runtime(self, query_runtime):\n\n self._query_runtime = query_runtime", "def query(self, query):\n self._query = query", "def simulate(self, query):\n return self.master.simulate(query)", "def query(self, query):\n\n self._query = query", "def query(self, query):\n\n self._query = query", "def query(self, query):\n\n self._query = query", "def _query_end_set(self, value):\n self._query_end = self._prep_coord(value, \"query_start\", ge)", "def _update_target(self):\n self.target_dqn.load_state_dict(self.dqn.state_dict())", "def _reset_query(self):\n self.query = pysnow.QueryBuilder()\n self.desired_response_fields = list()", "def __init__(self, learn_q, target_estimator, td_loss_fcn=None):\n super(FitTargetQ, self).__init__()\n # unpack params\n self._q, self._target_estimator = learn_q, target_estimator\n if td_loss_fcn is None:\n td_loss_fcn = tf.square\n # need computed target Q values and selected action as input\n self._input_target_q = tf.placeholder(\n dtype=tf.float32, shape=[None], name=\"input_target_q\")\n self._input_action = tf.placeholder(\n dtype=tf.uint8, shape=[None], name=\"input_action\")\n self._input_sample_weight = tf.placeholder_with_default([1.0], shape=[None], name=\"input_weight\")\n op_q = learn_q.output().op\n num_actions = learn_q.output().op.shape.as_list()[-1]\n self.selected_q = tf.reduce_sum(\n tf.one_hot(self._input_action, num_actions) * op_q, axis=1)\n self._op_td = self.selected_q - self._input_target_q\n self._op_losses = td_loss_fcn(self._op_td)\n self._op_losses_weighted = self._op_losses * self._input_sample_weight\n self._sym_loss = tf.reduce_mean(self._op_losses_weighted)\n self._update_operation = network.MinimizeLoss(self._sym_loss, var_list=self._q.variables)", "def set_query(self, query):\n query = pylastica.query.Query.create(query)\n data = query.to_dict()\n return self.set_param('query', data['query'])", "def _refresh_target(self) -> None:\n self.target = self.combat.get_target(self.ability, self.actor)\n self.damage_calculator = DamageCalculator(self.target,\n self.actor,\n self.ability)", "def set_target_sequence(self):\n self.target_sequence = ''\n target_residues = []\n if len(self.target_residues) < 1:\n for i in range(0, self.target.size()):\n target_residues.append(i+1)\n self.target_residues = target_residues\n for resnum in self.target_residues:\n self.target_sequence += self.target.sequence(resnum, resnum)", "def observable(self, target, time_start, time_stop,\n el_min_deg, el_max_deg, time_needed,\n airmass=None, moon_sep=None):\n # set observer's horizon to elevation for el_min or to achieve\n # desired airmass\n if airmass != None:\n # compute desired altitude from airmass\n alt_deg = airmass2alt(airmass)\n min_alt_deg = max(alt_deg, el_min_deg)\n else:\n min_alt_deg = el_min_deg\n\n site = self.get_site(date=time_start, horizon_deg=min_alt_deg)\n\n d1 = self.calc(target, time_start)\n\n # TODO: worry about el_max_deg\n\n # important: ephem only deals with UTC!!\n time_start_utc = ephem.Date(self.date_to_utc(time_start))\n time_stop_utc = ephem.Date(self.date_to_utc(time_stop))\n #print(\"period (UT): %s to %s\" % (time_start_utc, time_stop_utc))\n\n if d1.alt_deg >= min_alt_deg:\n # body is above desired altitude at start of period\n # so calculate next setting\n time_rise = time_start_utc\n time_set = site.next_setting(target.body._body,\n start=time_start_utc)\n #print(\"body already up: set=%s\" % (time_set))\n\n else:\n # body is below desired altitude at start of period\n try:\n time_rise = site.next_rising(target.body._body,\n start=time_start_utc)\n time_set = site.next_setting(target.body._body,\n start=time_start_utc)\n except ephem.NeverUpError:\n return (False, None, None)\n\n #print(\"body not up: rise=%s set=%s\" % (time_rise, time_set))\n ## if time_rise < time_set:\n ## print(\"body still rising, below threshold\")\n ## # <-- body is still rising, just not high enough yet\n ## else:\n ## # <-- body is setting\n ## print(\"body setting, below threshold\")\n ## # calculate rise time backward from end of period\n ## #time_rise = site.previous_rising(target.body, start=time_stop_utc)\n ## pass\n\n if time_rise < time_start_utc:\n diff = time_rise - time_start_utc\n ## raise AssertionError(\"time rise (%s) < time start (%s)\" % (\n ## time_rise, time_start))\n print((\"WARNING: time rise (%s) < time start (%s)\" % (\n time_rise, time_start)))\n time_rise = time_start_utc\n\n # last observable time is setting or end of period,\n # whichever comes first\n time_end = min(time_set, time_stop_utc)\n # calculate duration in seconds (subtracting two ephem Date\n # objects seems to give a fraction in days)\n duration = (time_end - time_rise) * 86400.0\n # object is observable as long as the duration that it is\n # up is as long or longer than the time needed\n diff = duration - float(time_needed)\n #can_obs = diff > -0.001\n can_obs = duration > time_needed\n #print(\"can_obs=%s duration=%f needed=%f diff=%f\" % (\n # can_obs, duration, time_needed, diff))\n\n # convert times back to datetime's\n time_rise = self.date_to_local(time_rise.datetime())\n time_end = self.date_to_local(time_end.datetime())\n\n return (can_obs, time_rise, time_end)", "def update(self, target, query):\n node = self._data[target]\n name = \"%s node %.8s\" % (node['type'], target)\n\n query.update({\n 'type': node['type'],\n 'model': node['model']\n })\n\n logger.info(\"Validating query\")\n NodeValidator.validate(query)\n\n self._data[target] = dict_update(node, query, name)\n logger.info(\"Updated parameters above of %s\" % name)\n\n return {target: self._data[target]}", "def _create_target_network_update_op(self, q_network, target_q_network):\n variables = q_network.get_variables()\n target_variables = target_q_network.get_variables()\n # problem\n return tf.group([\n tf.assign(target_v, target_v + self.tau * (v - target_v)) # same as original arm\n for (target_v, v) in zip(target_variables, variables)\n ])", "def target(self, target):\n self.__target = float(target)", "def register_observation(self, target: DriverTarget) -> None:\n self.register_observed_target(target=target)", "def simulate(self, start_time = None, final_time = None, time = pd.DatetimeIndex([]), input = None, complete_res = False):\n \n # Number of input variables needed by the model\n Ninputs = len(self.inputs)\n \n # Check if the parameter time has been provided\n if len(time) == 0:\n # Take the time series: the first because now they are all the same\n time = self.inputs[0].get_data_series().index\n else:\n # Check that the type of the time vector is of type pd.DatetimeIndex\n if not isinstance(time, pd.DatetimeIndex):\n raise TypeError(\"The parameter time has to be a vector of type pd.DatetimeIndex\")\n \n # Define initial start time in seconds\n if start_time is None:\n start_time = time[0]\n else:\n # Check that the type of start time is of type datetime\n if not isinstance(start_time, datetime.datetime):\n raise TypeError(\"The parameter start_time is of type %s, it has to be of datetime.datetime type.\" % (str(start_time)))\n # Check if the start time is within the range\n if not (start_time >= time[0]) and (start_time <= time[-1]):\n raise IndexError(\"The value selected as initialization start time is outside the time frame\")\n \n # If the offset is defined, the start time in seconds needs to reference\n # the offset instead of the first time stamp\n if self.offset:\n start_time_sec = (start_time - self.offset).total_seconds()\n else:\n start_time_sec = (start_time - time[0]).total_seconds()\n \n # Define the final time in seconds\n if final_time == None:\n final_time = time[-1]\n else:\n # Check that the type of start time is of type datetime\n if not isinstance(final_time, datetime.datetime):\n raise TypeError(\"The parameter final_time is of type %s, it has to be of datetime.datetime type.\" % (str(start_time)))\n # Check if the final time is within the range\n if not (final_time >= time[0]) and (final_time <= time[-1]):\n raise IndexError(\"The value selected as initialization start time is outside the time frame\")\n # Check that the final time is after the start time\n if not (final_time >= start_time):\n raise IndexError(\"The final_time %s has to be after the start time %s.\" % \\\n (str(final_time), str(start_time)))\n \n # If the offset is defined, the final time in seconds needs to reference\n # the offset instead of the first time stamp\n if self.offset:\n final_time_sec = (final_time - self.offset).total_seconds()\n else:\n final_time_sec = (final_time - time[0]).total_seconds()\n \n # Transforms to seconds with respect to the first element, again\n # if the offset is defined it needs to be used as reference\n Npoints = len(time)\n time_sec = numpy.zeros((Npoints,1))\n for i in range(Npoints):\n if self.offset:\n time_sec[i,0] = (time[i] - self.offset).total_seconds()\n else:\n time_sec[i,0] = (time[i] - time[0]).total_seconds()\n \n # Convert to numpy matrix in case it will be stacked in a matrix\n time_sec = numpy.matrix(time_sec)\n \n # Reshape to be consistent\n time_sec = time_sec.reshape(-1, 1)\n \n if input is None:\n # Take all the data series\n inputMatrix = numpy.matrix(numpy.zeros((Npoints, Ninputs)))\n \n i = 0\n for inp in self.inputs:\n dataInput = numpy.matrix(inp.get_data_series().values).reshape(-1,1)\n inputMatrix[:, i] = dataInput[:,:]\n i += 1\n # Define the input trajectory\n V = numpy.hstack((time_sec, inputMatrix))\n \n else:\n # Reshape to be consistent\n input = input.reshape(-1, Ninputs)\n # Define the input trajectory\n V = numpy.hstack((time_sec, input))\n \n # The input trajectory must be an array, otherwise pyfmi does not work\n u_traj = numpy.array(V)\n \n # Create input object\n names = self.get_input_names()\n input_object = (names, u_traj)\n \n # Start the simulation\n simulated = False\n i = 0\n while not simulated and i < self.SIMULATION_TRIES:\n try:\n res = self.fmu.simulate(start_time = start_time_sec, input = input_object, final_time = final_time_sec, options = self.opts)\n simulated = True\n except ValueError:\n logger.debug(\"Simulation of the model from {0} to {1} failed, try again\".format(start_time_sec, final_time_sec))\n i += 1\n except Exception, e:\n logger.warn(\"Exception during simulation: {0}\".format(str(e)))\n logger.warn(\"Simulation of the model failed between {0} and {1}, try again\".format(start_time, final_time))\n i += 1 \n \n # Check if the simulation has been done, if not throw an exception\n if not simulated:\n logger.error(\"Not possible to simulate the model, more than {0} unsuccessful tries\".format(self.SIMULATION_TRIES))\n logger.error(\"Error log from PyFMI: {0}\".format(self.fmu.get_log()))\n raise Exception\n \n # Obtain the results\n # TIME in seconds has to be converted to datetime\n # and it has to maintain the same offset specified by the input time series in t[0]\n if self.offset:\n offset_res = self.offset - pd.to_datetime(0, unit = 's', utc = True)\n t = pd.to_datetime(res[fmu_util_strings.TIME_STRING], unit=\"s\", utc = True) + offset_res\n else:\n offset_res = time[0] - pd.to_datetime(res[fmu_util_strings.TIME_STRING][0], utc = True)\n t = pd.to_datetime(res[fmu_util_strings.TIME_STRING], unit=\"s\", utc = True) + offset_res\n \n # Get the results, either all or just the selected ones\n if complete_res is False:\n # OUTPUTS\n output_names = self.get_output_names()\n results = {}\n for name in output_names:\n results[name] = res[name]\n # STATES OBSERVED\n var_names = self.get_variable_names()\n for name in var_names:\n results[name] = res[name]\n # PARAMETERS\n par_names = self.get_parameter_names()\n for name in par_names:\n results[name] = res[name]\n \n # THE OVERALL STATE\n results[\"__ALL_STATE__\"]=self.get_state()\n results[\"__OBS_STATE__\"]=self.get_state_observed_values()\n results[\"__PARAMS__\"]=self.get_parameter_values()\n results[\"__OUTPUTS__\"]=self.get_measured_outputs_values()\n results[\"__ALL_OUTPUTS__\"]=self.get_outputs_values()\n \n else:\n # All the results are given back\n results = res\n \n # Return the results\n return t, results", "def get(self, query):\n\n if not isinstance(query, dict):\n raise ValueError('Query should be a python dict')\n\n ##set default query containing all values\n default_query = {\n 'cell_id': self.cell_id,\n 'treatment': self.treatments,\n 'time_point': self.time_points,\n 'replicate': self.replicates\n }\n default_query.update(query)\n\n for k, v in list(query.items()):\n if not isinstance(v, list):\n query[k] = [v]\n\n\n available_labels = ['treatment', 'cell_id', 'time_point', 'replicate']\n\n\n labels = list(default_query.keys())\n if len(labels) not in list(range(1, 5)):\n raise ValueError('labels must be a list of length 1, 2, 3, or 4')\n\n for label in labels:\n if label not in available_labels:\n raise ValueError('\"{}\" not in accepted list of labels. These are '\n 'accepted \"{}\"'.format(label, available_labels))\n\n\n if label is 'cell_id':\n cell_id_query = reduce(\n lambda x, y: \"{} or {}\".format(x, y),\n ['cell_id == \"{}\"'.format(i) for i in default_query[label]]\n )\n if label is 'treatment':\n treatment_query = reduce(\n lambda x, y: \"{} or {}\".format(x, y),\n ['treatment == \"{}\"'.format(i) for i in default_query[label]]\n )\n\n if label is 'time_point':\n time_query = reduce(\n lambda x, y: \"{} or {}\".format(x, y),\n ['time_point == {}'.format(i) for i in default_query[label]]\n )\n\n if label is 'replicate':\n replicate_query = reduce(\n lambda x, y: \"{} or {}\".format(x, y),\n ['replicate == {}'.format(i) for i in default_query[label]]\n )\n\n final_query = \"({}) and ({}) and ({}) and ({})\".format(\n cell_id_query, replicate_query, treatment_query,\n time_query\n )\n df = self.design.query(final_query)\n return self.data[self.data['Sample'].isin(df['Sample'])]", "def _history(self, target, phases=None, with_actual=True, y0_dict=None):\n # Include actual data or not\n with_actual = with_actual and target in self.VALUE_COLUMNS\n # Get tracking data\n df = self.track(phases=phases, with_actual=with_actual, y0_dict=y0_dict)\n if target not in df.columns:\n col_str = \", \".join(list(df.columns))\n raise KeyError(f\"@target must be selected from {col_str}, but {target} was applied.\")\n # Select the records of target variable\n return df.pivot_table(\n values=target, index=self.DATE, columns=self.SERIES, aggfunc=\"last\")" ]
[ "0.604419", "0.53452486", "0.52948", "0.5195184", "0.5151387", "0.51285744", "0.5099029", "0.5069341", "0.5056171", "0.50234467", "0.5007837", "0.49828333", "0.4980737", "0.4980737", "0.4980737", "0.49167588", "0.49151227", "0.4892735", "0.4863999", "0.4849976", "0.484692", "0.48247382", "0.48150575", "0.48123282", "0.4804629", "0.4794886", "0.47644165", "0.47587827", "0.47580287", "0.47557124" ]
0.75333405
0
Cycle through result keys in order to extract mapped observables and add to alert. REQUIRED in order to 'automatically' add observables from field mapping reccomended to use in self.query_results. Includes a call for each extracted observable to the optional process_field_mapping, which will simply pass if unimplemented.
def extract_result_observables(self, analysis, result: dict, observable: Observable = None, result_time: str or datetime.datetime = None) -> None: for result_field in result.keys(): if result[result_field] is None: continue # do we have this field mapped? if result_field in self.observable_mapping: observable = analysis.add_observable(self.observable_mapping[result_field], self.filter_observable_value(result_field, self.observable_mapping[result_field], result[result_field]), o_time=result_time) self.process_field_mapping(analysis, observable, result, result_field, result_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_field_mapping(self, analysis, observable: Observable, result, result_field, result_time=None) -> None:\n pass", "def process_query_results(self, query_results: dict or list, analysis, observable: Observable) -> None:\n pass", "def handle_result(self, results: List[Dict], **info):\n pass", "def _ProcessQueryResult(self, result):\n self.__more_results = result.more_results()\n\n if self.__keys_only:\n return [Key._FromPb(e.key()) for e in result.result_list()]\n else:\n return [Entity._FromPb(e) for e in result.result_list()]", "def process(self, results):\n raise NotImplementedError", "def _do_mapping(self):\n pass", "def analog_mapping_response(self, data):\n self.analog_mapping_query_results = data", "def aggregate_results(self):\n\n raise NotImplementedError", "def processSearchResult(self):", "def map(self, records, task):\n for key, json in records:\n record = happy.json.decode(json)\n if happy.flow.isIterable(self.aggkey):\n outkey = ''\n for ak in self.aggkey:\n if record.has_key(ak):\n outkey = outkey + record[ak] + \":\"\n task.collect(outkey, json) \n elif record.has_key(self.aggkey):\n if (record[self.aggkey]):\n task.collect(record[self.aggkey], json)", "def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)", "def _check_all_results_provided(self, atom_name, container):\n result_mapping = self._result_mappings.get(atom_name)\n if not result_mapping:\n return\n for name, index in result_mapping.items():\n try:\n _item_from(container, index)\n except _EXTRACTION_EXCEPTIONS:\n LOG.warning(\"Atom '%s' did not supply result \"\n \"with index %r (name '%s')\", atom_name, index,\n name)", "def __format_results__(self, result_rows):\n for row in result_rows:\n self.return_dict['results']['items'].append(row)", "def on_trial_result(self, trial_id: str, result: Dict):\r\n pass", "def mapList(results, key):\n newResult = results.map(lambda x: ee.Dictionary(x).get(key))\n return newResult", "def generic_extract_result(self, request, result):\n return request, map(operator.itemgetter(0), result)", "def get_invalidation_keys(self, results):\r\n related_fields = self.queryset._related_fields\r\n for obj in results:\r\n for field, model_class in related_fields.iteritems():\r\n pk_name = model_class._meta.pk.attname\r\n cache_signals.register(model_class, pk_name, 'exact')\r\n for value in get_values(obj, field):\r\n invalidation_key = get_invalidation_key(\r\n model_class._meta.db_table, \r\n accessor_path = pk_name, \r\n value = value)\r\n yield invalidation_key", "def ProcessResultForPublishing(self, result, key): # pragma: no cover.\n # This method needs to get overwritten by subclasses FinditForCracas and\n # FinditForFracas.\n raise NotImplementedError()", "def add_results(self, results):\n if self.replication_counter < self.replication_num:\n for metric in self.metrics:\n self.metric_final_results[metric].append(results[metric])\n\n self.replication_counter += 1\n else:\n raise Exception(\"The requested metric collection call of {}/{} exceeds the number of pre-defined replication\".format(self.replication_counter, self.replication_num))", "def _extract_results(self) -> None:\n metric_name = self.metric.name\n for inference_name in ['train', 'test', 'opt']:\n # TODO: Extract information from self.search_results\n data = getattr(self.search_results, f'{inference_name}_metric_dict')[metric_name]\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'single::{inference_name}::{metric_name}'] = np.array(data)\n\n if self.ensemble_results.empty() or inference_name == 'opt':\n continue\n\n data = getattr(self.ensemble_results, f'{inference_name}_scores')\n if all([d is None for d in data]):\n if inference_name not in OPTIONAL_INFERENCE_CHOICES:\n raise ValueError(f\"Expected {metric_name} score for {inference_name} set\"\n f\" to not be None, but got {data}\")\n else:\n continue\n self.data[f'ensemble::{inference_name}::{metric_name}'] = np.array(data)", "def _read_group_fill_results(self, cr, uid, domain, groupby,\n remaining_groupbys, aggregated_fields,\n count_field, read_group_result,\n read_group_order=None, context=None):\n if groupby == 'week_number':\n WEEK_DICT = dict(self.WEEKS)\n for result in read_group_result:\n week = result['week_number']\n result['week_number'] = (week, WEEK_DICT.get(week))\n return super(calendar_event, self)._read_group_fill_results(\n cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,\n count_field, read_group_result, read_group_order, context\n )", "def applyMapping(self):\n pass", "def _enrich_results(self, record, query):\n record['metadata.query_name'] = query['name']\n record['metadata.query_id'] = '{}_{}'.format(\n query['name'], self.run_tag)\n record['metadata.query_description'] = query['description']\n record['metadata.query_headers'] = query['headers']\n record['@timestamp'] = int(round(time.time() * 1000))\n return record", "def evaluate_mapped_inputs(self,value,**kwargs):\n result = {\"result\": value}\n return result", "def get_results_from_aggregation_sources(self, context):", "def process_results(self, response, results):\n return results", "def process_results(self, response, results):\n return results", "def __format_results__(self, result_rows):\n for row in result_rows:\n self.return_dict['results']['items'].append(\n {\n 'name': row['name'],\n 'link': self.request.route_url(\n 'api_v1_disease_resource',\n api_version=self.api_version_string,\n url_name=row['id']\n ),\n 'search_link': self.request.route_url(\n 'api_v1_disease_search',\n api_version=self.api_version_string,\n url_name=row['id']\n ),\n 'information_link': row['info_link']\n }\n )", "def extract_results(self, results: Any) -> dict:\n for nested_attribute in self.nested_results_parts:\n results = getattr(results, nested_attribute)\n return results if isinstance(results, dict) else results()", "def process_response(self, response):\n json = response.json()\n for resp in json[\"responses\"]:\n sub_qry = self._current_query.get(int(resp[\"id\"]))\n self.context.pending_request().map_json(resp[\"body\"], sub_qry.return_type)" ]
[ "0.69729936", "0.63311803", "0.53645015", "0.5282289", "0.525873", "0.5136312", "0.5067871", "0.50671", "0.506112", "0.50556207", "0.5048589", "0.500648", "0.4995515", "0.49854666", "0.4978949", "0.49533278", "0.49402848", "0.48627585", "0.4818297", "0.4792701", "0.4790801", "0.4782504", "0.47745368", "0.475116", "0.47322896", "0.47321394", "0.47321394", "0.47283924", "0.4725142", "0.46771246" ]
0.73603886
0
Called for each observable value added to analysis. Returns the observable value to add to the analysis. By default, the observable_value is returned asis.
def filter_observable_value(self, result_field, observable_type, observable_value): return observable_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def observation_value(self):\n pass", "def return_value(value) -> ObservableBase:\n from ..operators.observable.returnvalue import return_value\n return return_value(value)", "def value(self, observation, input_actions=None):\n action, value = self(observation, input_actions)\n return value", "def add_value(trajectories, val_func):\n for trajectory in trajectories:\n observes = trajectory['observes']\n values = val_func.predict(observes)\n trajectory['values'] = values", "def value(self,value):\n if math.isnan(value):\n return\n self.__append(value)", "def calculate_value(self, x: np.array) -> np.array:\n pass", "def __call__(self):\n return self.value", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def add_aggregate_temp(self, value: float) -> float:\n # Check if aggregate samples are too old.\n if self.last_sample_time is not None:\n last_sample_time2 = datetime.fromtimestamp(self.last_sample_time)\n now = datetime.now()\n threshold_time = now - timedelta(hours=1)\n if last_sample_time2 < threshold_time:\n # Too old, clear samples.\n self.samples = []\n\n self.samples.append(value)\n self.samples = self.samples[-self.sample_size:]\n agg_value = reduce(\n lambda a, b: a + b,\n self.samples\n ) / len(self.samples)\n self.last_sample_time = datetime.now().timestamp()\n return agg_value", "def __call__(self, new_val: float) -> float:\n self._past_values.append(new_val)\n return self.do_filter()", "def value(self) -> float:", "def _get_value(self):\n \n return self._value", "def get_value(self) -> Any:\n raise NotImplementedError()", "def add(self, value: float) -> None:\n self.rawValue = self.momentum * self.rawValue + (1 - self.momentum) * value\n self.i += 1", "def _get_value(self):\n return self.__value", "def getValue(self):\n return self.__diastolic", "def value(self):\n return np.array([mv.value for mv in self])", "def onChecked_monitor(self, event):\n monitored_output = event.GetEventObject()\n self.temp[monitored_output.GetLabel()] = monitored_output.GetValue()\n return", "def value(self) -> any:\r\n\r\n return self.__value", "def value(self):\n current_value = self.initial_value * self.schedule(self.step / self.nvalues)\n self.step += 1.\n return current_value", "def compute_value(self, *args, **kwargs):\n\n return None", "def get_value(self):\n raise NotImplementedError", "def value(self, x):\n f = self._objective(\n time_series=self.observed_data,\n a=x[0],\n b=x[1],\n c=x[2],\n sigma=self.sigma\n )\n return f", "def _value(self):\n return self.device.value(*self._id[1:])", "def __call__(self, observation):\n # Validates that the state variable is a scalar with this float() call.\n current_val = float(observation[self.dict_key])\n retval = current_val - self.last_val\n self.last_val = current_val\n return retval", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value" ]
[ "0.6367385", "0.59634846", "0.59626716", "0.5945809", "0.57399595", "0.5591523", "0.5574882", "0.5561304", "0.5561304", "0.5561304", "0.5505398", "0.5402143", "0.53939724", "0.53933454", "0.5378962", "0.53695613", "0.5364363", "0.53551453", "0.53331333", "0.53300166", "0.5328645", "0.5326352", "0.531064", "0.5306581", "0.52980614", "0.5296637", "0.5282251", "0.52677", "0.52677", "0.52677" ]
0.60311365
1
Fills in query time specification dummy strings, such as and or Adjusts the timezone and formatting of start_time and stop_time variables initialized in build_target_query as needed and replaces the dummy variables in configured query.
def fill_target_query_timespec(self, start_time: str or datetime.datetime, stop_time: str or datetime.datetime) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepareQuery(self, qid):\r\n \r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n\r\n if self.granularity == 'day':\r\n extractTime = \"TO_CHAR(t.START_DATE, 'yyyy,mm,dd'), TO_CHAR(t.END_DATE, 'yyyy,mm,dd')\"\r\n elif self.granularity == 'year':\r\n extractTime = \"EXTRACT(YEAR FROM t.START_DATE), EXTRACT(YEAR FROM t.END_DATE)\"\r\n \r\n cursor.execute(\"SELECT t.TYPE, t.GEOMETRY.Get_WKT(), \" + extractTime + \",\" + \\\r\n\"t.DATE_TYPE, t.Z_MIN, t.Z_MAX FROM \" + self.queriesTable + \"\"\" t \r\nWHERE id = \"\"\" + qid + \"\"\" AND dataset = '\"\"\" + self.dataset.lower() + \"'\")\r\n\r\n self.qtype, self.wkt, self.start_date, self.end_date, self.timeType, self.ozmin, self.ozmax = cursor.fetchall()[0]\r\n\r\n if self.wkt is not None:\r\n self.wkt = str(self.wkt)\r\n connection.close()\r\n \r\n # Setting up the missing variables along with transformations to the time encoding. \r\n if self.granularity == 'day':\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n self.end_date = map(int, self.end_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], \r\n self.start_date[1], self.start_date[2]) * self.scale, \r\n reader.daySinceEpoch(self.end_date[0], \r\n self.end_date[1], self.end_date[2]) * self.scale]]\r\n elif self.end_date is None:\r\n self.start_date = map(int, self.start_date.split(','))\r\n times = [[reader.daySinceEpoch(self.start_date[0], self.start_date[1], self.start_date[2]) * self.scale, None]]\r\n else:\r\n if self.start_date is None and self.end_date is None:\r\n times = [[self.mint * self.scale, self.maxt * self.scale]]\r\n elif self.start_date is not None and self.end_date is not None:\r\n times = [[self.start_date * self.scale, self.end_date * self.scale]]\r\n elif self.end_date is None:\r\n times = [[self.start_date * self.scale, None]]\r\n\r\n if self.ozmin is None or self.ozmax is None: #no selectivity on z\r\n zmin = int(round((self.minz - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.maxz - self.offz)/self.scalez, 0))\r\n else:\r\n zmin = int(round((self.ozmin - self.offz)/self.scalez, 0))\r\n zmax = int(round((self.ozmax - self.offz)/self.scalez, 0))\r\n\r\n # Preparing the different types of queries: Space and space - time\r\n continuous = True\r\n if self.wkt:\r\n if self.qtype.replace(' ', '').lower() != 'nn-search':\r\n ordinates = list(loads(self.wkt).exterior.coords)\r\n else:\r\n ordinates = list(loads(self.wkt).coords)\r\n \r\n if self.case == 1: #lxyt\r\n geometry = Polygon(self.list2ScaleOffset(ordinates)).wkt\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[0] #0, 0\r\n else:\r\n coarser = self.params[1] #4, 4\r\n \r\n elif self.case == 2: #lxyzt\r\n geometry = Polygon3D(Polygon(self.list2ScaleOffset(ordinates)), zmin, zmax)\r\n\r\n if self.qtype.lower() == 'space':\r\n coarser = self.params[2] #4, 4\r\n else:\r\n coarser = self.params[3] #3, 3\r\n\r\n elif self.case == 3: #dxyt\r\n geom = Polygon(self.list2ScaleOffset(ordinates)) \r\n if times[0][1] is None:\r\n continuous = False\r\n times[0][1] = times[0][0]\r\n coarser = self.params[4] #1, 8\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n continuous = False\r\n coarser = self.params[5] #-2, 1\r\n else:\r\n coarser = self.params[5] - 7\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[6] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[7] #3, 8\r\n \r\n if self.timeType == 'discrete' and (self.start_date is not None) and (self.end_date is not None):\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1]) \r\n \r\n elif self.case == 4: #dxyzt\r\n geom = Polygon(self.list2ScaleOffset(ordinates))\r\n if times[0][1] == None:\r\n continuous = False\r\n coarser = self.params[8] #4, 9\r\n times[0][1] = times[0][0]\r\n elif self.qtype.lower() == 'space':\r\n if times[0][0] == times[0][1]:\r\n coarser = self.params[9] #0, 2\r\n else:\r\n coarser = self.params[9] - 4\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[10] #0, 2\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[11] #4, 9\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else:\r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n else: #time queries\r\n if self.case == 1:\r\n geometry = []\r\n \r\n elif self.case == 2:\r\n geometry = []\r\n \r\n elif self.case == 3:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny), (self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n \r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[12] #3, 7\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[13] #0, 3\r\n else:\r\n coarser = self.params[14] #3, 8\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [dynamicPolygon(geom, times[0][0], times[0][0]),\r\n dynamicPolygon(geom, times[0][1], times[0][1])]\r\n else:\r\n geometry = dynamicPolygon(geom, times[0][0], times[0][1])\r\n\r\n elif self.case == 4:\r\n temp_geom = self.list2ScaleOffset([(self.minx, self.miny),(self.maxx, self.maxy)])\r\n geom = box(temp_geom[0][0], temp_geom[0][1], temp_geom[1][0], temp_geom[1][1])\r\n if times[0][1] is None:\r\n times[0][1] = times[0][0]\r\n coarser = self.params[15] #4, 12\r\n continuous = False\r\n elif self.timeType == 'continuous':\r\n coarser = self.params[16] #1, 3\r\n elif self.timeType == 'discrete':\r\n coarser = self.params[17] #4, 11\r\n \r\n if self.timeType == 'discrete' and self.start_date is not None and self.end_date is not None:\r\n geometry = [Polygon4D(geom, zmin, zmax, times[0][0], times[0][0]),\r\n Polygon4D(geom, zmin, zmax, times[0][1], times[0][1])]\r\n else: \r\n geometry = Polygon4D(geom, zmin, zmax, times[0][0], times[0][1])\r\n \r\n\r\n \"\"\"The final lines have to do with the way of posing the query to the \r\n database. Two options are possible:\r\n (a) sql: A SQL query is posed to the database. The number of ranges is\r\n limited by a maximum number.\r\n (b) join: The table is joined explicitly with a table containing the \r\n ranges.\"\"\"\r\n if geometry == []:\r\n mortonWhere, self.mortonJoinWhere, ranges, rangeTab, morPrep, insert, Levels = ('', '', 0, None, 0, 0, 0)\r\n else:\r\n if self.method == 'join':\r\n rangeTab = (self.rangeTable + qid).upper()\r\n ranges, morPrep, insert, Levels = self.join(geometry, coarser, rangeTab, continuous)\r\n mortonWhere = self.mortonJoinWhere\r\n elif self.method == 'sql':\r\n rangeTab, insert = None, 0\r\n mortonWhere, ranges, morPrep, Levels = self.sql(geometry, coarser, continuous)\r\n \r\n # if deep the time is in the morton code\r\n if self.integration == 'deep' or (self.start_date is None and self.end_date is None and self.integration == 'loose'): \r\n timeWhere = ''\r\n elif self.integration == 'loose': \r\n timeWhere = whereClause.addTimeCondition(times, 'time', self.timeType)\r\n \r\n return whereClause.getWhereStatement([timeWhere, mortonWhere]), ranges, morPrep, insert, Levels, rangeTab", "def time_settime(currenttime):\r\n\r\n time_query_times.append((getruntime(), currenttime))", "def build_target_query(self, observable: Observable, **kwargs) -> None:\n\n # XXX for some reason the self.target_query is getting cached when the same module runs for the same analysis\n # for different observables\n if '<O_VALUE>' not in self.target_query:\n self._reload_target_query()\n logging.debug(f\"had to reset self.target_query to clear previous use\")\n\n self.target_query = self.target_query.replace('<O_TYPE>', observable.type) \\\n .replace('<O_VALUE>', observable.value) # TODO property escape stuff\n\n source_time = kwargs.get('source_event_time') or observable.time or self.root.event_time_datetime\n # if we are going off of the event time, then we use the wide duration\n start_time = source_time - self.wide_duration_before\n stop_time = source_time + self.wide_duration_after\n\n # if observable time is available, we can narrow our time spec duration\n if observable.time is not None:\n start_time = source_time - self.narrow_duration_before\n stop_time = source_time + self.narrow_duration_after\n\n self.fill_target_query_timespec(start_time, stop_time)", "def _prep_times(self):\n self.test_times = 'diagonal'\n if hasattr(self, 'times'):\n self.train_times = self.times\n if hasattr(self, 'times_'):\n self.train_times_ = self.times_\n self.test_times_ = _DecodingTime()\n self.test_times_['slices'] = [[slic] for slic in\n self.train_times_['slices']]\n self.test_times_['times'] = [[tim] for tim in\n self.train_times_['times']]\n if hasattr(self, 'scores_'):\n self.scores_ = [[score] for score in self.scores_]\n if hasattr(self, 'y_pred_'):\n self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]", "def __init__(__self__, *,\n end_time: pulumi.Input[str],\n start_time: pulumi.Input[str]):\n pulumi.set(__self__, \"end_time\", end_time)\n pulumi.set(__self__, \"start_time\", start_time)", "def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)", "def _get_unit_records(self, start_time):\r\n\r\n if self.optMTTF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT DISTINCT MIN(fld_unit, fld_request_date), \\\r\n fld_incident_id, fld_request_date, \\\r\n fld_unit, fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBBD.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN \\\r\n ( \\\r\n SELECT fld_incident_id, fld_request_date, fld_unit, \\\r\n fld_hardware_id \\\r\n FROM rtk_incident \\\r\n GROUP BY fld_unit, fld_request_date \\\r\n ) AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n GROUP BY t2.fld_unit, t1.fld_age_at_incident \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n elif self.optMTBF.get_active():\r\n _query = \"SELECT t2.fld_unit, t1.fld_incident_id, \\\r\n t1.fld_age_at_incident, t1.fld_failure, \\\r\n t1.fld_suspension, t1.fld_cnd_nff, \\\r\n t1.fld_occ_fault, t1.fld_initial_installation, \\\r\n t1.fld_interval_censored, t2.fld_request_date, \\\r\n t2.fld_hardware_id \\\r\n FROM rtk_incident_detail AS t1 \\\r\n INNER JOIN rtk_incident AS t2 \\\r\n ON t2.fld_incident_id=t1.fld_incident_id \\\r\n WHERE t1.fld_age_at_incident >= {0:f} \\\r\n ORDER BY t2.fld_unit ASC, \\\r\n t1.fld_age_at_incident ASC, \\\r\n t2.fld_request_date ASC\".format(start_time)\r\n\r\n (_results, _error_code, __) = self._dao.execute(_query, commit=False)\r\n\r\n return(_results, _error_code)", "def analysisStartTime(self, val: WQXTime) -> None:\r\n self.__analysisStartTime = None if val is None else WQXTime(val)", "def test_format_optional_time_field(self):\n formatted_time = jiratimereport.format_optional_time_field(99960, \"\")\n expected_result = \"27:46:00\"\n self.assertEqual(expected_result, formatted_time)", "def preprocess_date_and_time(params: Dict) -> None:\n start_date = date.fromisoformat(params[\"start_date\"])\n end_date = date.fromisoformat(params[\"end_date\"])\n\n if end_date < start_date:\n raise Exception(f\"End date is earlier than start date.\")\n \n start_time = time.fromisoformat(params[\"start_time\"])\n end_time = time.fromisoformat(params[\"end_time\"])\n\n if end_time != time.min and end_time <= start_time:\n raise Exception(\"End time is earlier or equal than start time\")\n \n actual_start = time(start_time.hour + 1 if start_time.minute + start_time.second + start_time.microsecond > 0 \n else start_time.hour)\n actual_end = time(end_time.hour)\n\n if actual_end == time.min and end_time != time.min:\n raise Exception(\"Non available blocks to use\")\n \n params.update({\n \"start_date\": start_date,\n \"end_date\": end_date,\n \"start_time\": actual_start,\n \"end_time\": actual_end\n })", "def parse_query(query, default_text='Time is up!'):\n try:\n regex = r'''\n ^((\n (?P<at>at\\ ) # at\n (?P<clock>\n (2[0-3]|[01]?[0-9]) # 0-23\n (:([0-5][0-9]))? # :0-59 (optional)\n )\n )| # OR\n ^(?P<time>\\d+) # 0-infinite digit\n (?P<measure>[mhs])? # mhs (optional, default: m)\n ) \n (?P<message>\\ .*)?$ # optional message\n '''\n m = re.match(regex, query, re.IGNORECASE | re.VERBOSE)\n \n if m.group('at') is not None:\n now = datetime.datetime.now()\n clock = m.group('clock').split(\":\")\n\n # if input has no minutes set to 0\n if(len(clock) == 1):\n clock.append(0)\n # calculate delta between now and inputed clock\n # if clock > now: set timer to next day\n time_sec = int((datetime.timedelta(hours=24) - (now - now.replace(hour=int(clock[0]), minute=int(clock[1])))).total_seconds() % (24 * 3600)) \n time_arg = m.group('clock')\n else:\n time_sec = int(m.group('time')) * TIME_MULT[(m.group('measure') or 'm').lower()]\n time_arg = m.group('time') + (m.group('measure') or \"\")\n\n message = m.group('message') or default_text\n\n return (time_sec, time_arg, message[1:])\n except Exception as e:\n raise ParseQueryError(str(e))", "def handle_time_filter(base_case: Optional[Dict[str, Any]] = None, unit_value: Optional[str] = None,\n amount_value: Optional[int] = None, time_from: Optional[str] = None,\n time_to: Optional[str] = None) -> Dict[str, Any]:\n if (time_from or time_to) and (unit_value or amount_value):\n raise DemistoException(ERROR_TOO_MANY_ARGS)\n elif (time_from and not time_to) or (amount_value and not unit_value):\n raise DemistoException(ERROR_NOT_ENOUGH_ARGS)\n\n if unit_value:\n if amount_value:\n # amount is only for relative time - defines a window of time from a given point of time in the past until now\n if unit_value not in RELATIVE_TIME_UNIT_OPTIONS:\n raise DemistoException(ERROR_RELATIVE_TIME_UNIT)\n return {'type': 'relative', 'value': {'amount': arg_to_number(amount_value), 'unit': unit_value}}\n\n else:\n # using to_now time - represents a window of time from the start of the time unit given until now\n if unit_value not in TO_NOW_TIME_UNIT_OPTIONS:\n raise DemistoException(ERROR_TO_NOW_TIME_UNIT)\n return {'type': 'to_now', 'value': unit_value}\n\n elif time_to:\n # using absolute time\n if time_from:\n return {'type': 'absolute', 'value': {'startTime': convert_date_to_unix(time_from),\n 'endTime': convert_date_to_unix(time_to)}}\n else:\n # alert dismissal requires only an end time in the future\n return {'type': 'absolute', 'value': {'endTime': convert_date_to_unix(time_to)}}\n\n return base_case or TIME_FILTER_BASE_CASE", "def ru_date_time_table_set(host_id, date_time_fields, date_time_param, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n result = \"\"\n param = []\n form_name = ['Year', 'Month', 'Day', 'Hour', 'Minute', 'Second']\n dictarr = []\n resultarray = {}\n err1 = [0, 0, 0, 0, 0, 0]\n param.append('year.1')\n param.append('month.1')\n param.append('day.1')\n param.append('hour.1')\n param.append('min.1')\n param.append('sec.1')\n odu16_date_time_table = []\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n odu16_date_time_table = sqlalche_obj.session.query(SetOdu16RUDateTimeTable).filter(\n SetOdu16RUDateTimeTable.config_profile_id == device_param_list[0][4]).all()\n for i in range(len(date_time_fields)):\n oidname = oid_name[date_time_fields[i]]\n oidtype = oid_type[date_time_fields[i]]\n oidvalue = date_time_param[i]\n result += snmp_set(\n device_param_list[0][0], device_param_list[0][\n 1], device_param_list[0][2],\n device_param_list[0][3], oidname, oidtype, oidvalue)\n err = error_odu16(result, param, err1)\n try:\n el = EventLog()\n if 1 in err1:\n el.log_event(\n \"Values Updated in UBR RU Date Time Form\", \"%s\" % (user_name))\n for j in range(0, len(date_time_fields)):\n dict = {}\n dict[\"name\"] = form_name[j]\n dict[\"value\"] = date_time_param[j]\n dict[\"textbox\"] = date_time_fields[j]\n dict[\"status\"] = err1[j]\n dictarr.append(dict)\n if err1[0] == 1:\n odu16_date_time_table[0].year = date_time_param[0]\n if err1[1] == 1:\n odu16_date_time_table[0].month = date_time_param[1]\n if err1[2] == 1:\n odu16_date_time_table[0].day = date_time_param[2]\n if err1[3] == 1:\n odu16_date_time_table[0].hour = date_time_param[3]\n if err1[4] == 1:\n odu16_date_time_table[0].min = date_time_param[4]\n if err1[5] == 1:\n odu16_date_time_table[0].sec = date_time_param[5]\n sqlalche_obj.session.commit()\n sqlalche_obj.sql_alchemy_db_connection_close()\n if err != '':\n raise Set_exception\n except Set_exception as e:\n resultarray[\"result\"] = dictarr\n resultarray[\"tableName\"] = 'SetOdu16RUDateTimeTable'\n resultarray[\"formAction\"] = 'RU_Date_Time.py'\n sqlalche_obj.sql_alchemy_db_connection_close()\n return str(resultarray)", "def times_filter(d, times, meets_criteria=matches_timestr):\n mapping = map(type, times)\n if [ str, type(None), type(None) ] == mapping and meets_criteria(times[0]):\n d1 = doytimestr_to_datetime('%d:%s:00' % (d[0].year, times[0].replace('/',':')))\n #return '%s' % d1\n return d1, d1, 0\n elif [ str, str, type(None) ] == mapping and meets_criteria(times[0]) and meets_criteria(times[1]):\n d1 = doytimestr_to_datetime('%d:%s:00' % (d[0].year, times[0].replace('/',':')))\n d2 = doytimestr_to_datetime('%d:%s:00' % (d[1].year, times[1].replace('/',':')))\n #return '%s to %s' % (d1, d2)\n return d1, d2, timedelta_hours(d2-d1)\n else:\n #return ''\n return None, None, None", "def change_time_ival(self, start, stop):\n if isinstance(start, datetime):\n self.start = start\n if isinstance(stop, datetime):\n self.stop = stop\n self.raw_results = {}", "def _get_query_timestamps(args=None):\r\n if args is None:\r\n return {'query_start': None,\r\n 'query_end': None,\r\n 'start_timestamp': None,\r\n 'end_timestamp': None,\r\n 'search_offset': 0}\r\n search_offset = int(args.get('search_offset', 0))\r\n\r\n start_timestamp = args.get('start_timestamp')\r\n if start_timestamp:\r\n start_timestamp = timeutils.parse_isotime(start_timestamp)\r\n start_timestamp = start_timestamp.replace(tzinfo=None)\r\n query_start = (start_timestamp -\r\n datetime.timedelta(minutes=search_offset))\r\n else:\r\n query_start = None\r\n\r\n end_timestamp = args.get('end_timestamp')\r\n if end_timestamp:\r\n end_timestamp = timeutils.parse_isotime(end_timestamp)\r\n end_timestamp = end_timestamp.replace(tzinfo=None)\r\n query_end = end_timestamp + datetime.timedelta(minutes=search_offset)\r\n else:\r\n query_end = None\r\n\r\n return {'query_start': query_start,\r\n 'query_end': query_end,\r\n 'start_timestamp': start_timestamp,\r\n 'end_timestamp': end_timestamp,\r\n 'search_offset': search_offset,\r\n }", "def _get_meas_times_sql(self, last_meas_time):\n #meas_times = dict()\n \n # check measurement time calc method\n if self._data['report_measurement_time_calc_method'] == 'sql':\n \n res = self._db.Query(\"\"\"SELECT measurement_time_calc_command.*\n FROM measurement_time_calc_command\n WHERE\n `measurement_time_calc_command_id`=%s\"\"\",(self._data['report_measurement_time_calc_command_id']))\n if not res:\n raise Exception(\"Measurement_time_calc_command not found\")\n meas_time_calc_command = self._db.record[0]\n\n named_placeholders = list()\n last_meas_time_arg = dict()#{'name': '', 'value': '', 'type': ''}\n last_meas_time_arg['name'] = 'last_measurement_time'\n last_meas_time_arg['value'] = last_meas_time.strftime('%Y-%m-%d %H:%M:%S')\n last_meas_time_arg['type'] = 'DATE'\n named_placeholders.append(last_meas_time_arg)\n \n # process segment data\n if meas_time_calc_command['segment_id']:\n if self._segment and self._segment_value:\n segment_arg = dict()\n #segment_arg['value'] = ''\n #segment_arg['type'] = ''\n segment_arg['name'] = self._segment['data_fetch_command_bind_parameter']\n if self._segment['partition_value_type'] == 'int':\n segment_arg['value'] = self._segment_value['value_int']\n segment_arg['type'] = 'INTEGER'\n elif self._segment['partition_value_type'] == 'varchar':\n segment_arg['value'] = self._segment_value['value_varchar']\n segment_arg['type'] = 'NVARCHAR'\n \n named_placeholders.append(segment_arg)\n else:\n raise Exception(\"Try to fetch segmented measurement times for non-segmented report\")\n \n meas_times = self._outer_conn.query(meas_time_calc_command['select_statement'], named_placeholders)\n else:\n meas_times = self._outer_conn.get_current_time()\n\n return meas_times", "def test_parse_time_special_values(self):\n now1 = datetime(2015, 2, 1, 0, 0, 0)\n now2 = datetime(2015, 1, 24, 10, 15, 25)\n self.assertEqual(parse_time(\"now\", now1), now1)\n self.assertEqual(parse_time(\"now\", now2), now2)\n self.assertEqual(\n parse_time(\"yesterday\", now1), datetime(2015, 1, 31, 0, 0, 0))\n self.assertEqual(\n parse_time(\"yesterday\", now2), datetime(2015, 1, 23, 10, 15, 25))\n self.assertEqual(parse_time(\"today\", now1), now1)\n self.assertEqual(parse_time(\"today\", now2), now2)\n self.assertEqual(\n parse_time(\"tomorrow\", now1), datetime(2015, 2, 2, 0, 0, 0))\n self.assertEqual(\n parse_time(\"tomorrow\", now2), datetime(2015, 1, 25, 10, 15, 25))", "def _temporal_subset_params(self, request: Request) -> list:\n if request.temporal:\n t = request.temporal\n start = t['start'].isoformat() if 'start' in t else None\n stop = t['stop'].isoformat() if 'stop' in t else None\n start_quoted = f'\"{start}\"' if start else ''\n stop_quoted = f'\"{stop}\"' if start else ''\n return [f'time({start_quoted}:{stop_quoted})']\n else:\n return []", "def _build_test_query01(self):\n\n\t\tquery = 'select \"GLOBALEVENTID\", \"SQLDATE\", \"MonthYear\", \"Year\" ' + \\\n\t\t\t\t\t 'from \"DEMOUSER00\".\"uni.vlba.gdelt.data::gdelt_dailyupdates\"'\t\t\n\n\t\treturn query", "def make_time_request(self, time_request=None, **kwargs):\n pass", "def setup( self ):\n super( TimeGraph, self ).setup()\n\n if 'span' in self.metadata and isinstance(self.metadata['span'], \\\n types.StringType):\n self.metadata['span'] = float(self.metadata['span'])\n\n vars = dict(self.vars)\n\n do_croptime = str(find_info('croptime', self.metadata, self.kw,False)).\\\n lower().find('t') >= 0\n if do_croptime:\n begin = numpy.inf; end = 0\n for pivot, groups in self.parsed_data.items():\n for timebin, data in groups.items():\n begin = min( to_timestamp(timebin), begin )\n end = max( to_timestamp(timebin), end )\n end += self.metadata.get('span', 0)\n else:\n begin = to_timestamp(find_info( self.starttime_str, vars,\n self.metadata, time.time()-24*3600))\n end = to_timestamp(find_info(self.endtime_str,vars, self.metadata,\n time.time()))\n\n self.begin = begin; self.end = end\n self.begin_datetime = datetime.datetime.utcfromtimestamp( float(begin) )\n self.end_datetime = datetime.datetime.utcfromtimestamp( float(end) )\n self.begin_num = date2num( self.begin_datetime )\n self.end_num = date2num( self.end_datetime )\n\n self.width = int(find_info('span', vars, self.metadata, self.time_interval() ))\n\n title = getattr( self, 'title', '' )\n self.title = self.add_time_to_title( title )", "def calculate_query_times(**kwargs):\n trim_size = int(kwargs[\"trim\"] * len(kwargs[\"total_times\"]))\n return {\n \"total_time_avg\": round(numpy.mean(kwargs[\"total_times\"]), 1),\n \"total_time_min\": round(numpy.min(kwargs[\"total_times\"]), 1),\n \"total_time_max\": round(numpy.max(kwargs[\"total_times\"]), 1),\n \"total_time_85\": round(numpy.percentile(kwargs[\"total_times\"], 85), 1),\n \"total_time_trimmed_avg\": round(\n numpy.mean(\n numpy.sort(kwargs[\"total_times\"])[trim_size:-trim_size]\n ),\n 1,\n )\n if trim_size\n else round(numpy.mean(kwargs[\"total_times\"]), 1),\n \"total_times\": kwargs[\"total_times\"],\n \"execution_time_avg\": round(numpy.mean(kwargs[\"execution_times\"]), 1),\n \"execution_time_min\": round(numpy.min(kwargs[\"execution_times\"]), 1),\n \"execution_time_max\": round(numpy.max(kwargs[\"execution_times\"]), 1),\n \"execution_time_85\": round(\n numpy.percentile(kwargs[\"execution_times\"], 85), 1\n ),\n \"execution_time_25\": round(\n numpy.percentile(kwargs[\"execution_times\"], 25), 1\n ),\n \"execution_time_std\": round(numpy.std(kwargs[\"execution_times\"]), 1),\n \"execution_time_trimmed_avg\": round(\n numpy.mean(\n numpy.sort(kwargs[\"execution_times\"])[trim_size:-trim_size]\n )\n )\n if trim_size > 0\n else round(numpy.mean(kwargs[\"execution_times\"]), 1),\n \"execution_time_trimmed_max\": round(\n numpy.max(\n numpy.sort(kwargs[\"execution_times\"])[trim_size:-trim_size]\n )\n )\n if trim_size > 0\n else round(numpy.max(kwargs[\"execution_times\"]), 1),\n \"execution_times\": kwargs[\"execution_times\"],\n \"connect_time_avg\": round(numpy.mean(kwargs[\"connect_times\"]), 1),\n \"connect_time_min\": round(numpy.min(kwargs[\"connect_times\"]), 1),\n \"connect_time_max\": round(numpy.max(kwargs[\"connect_times\"]), 1),\n \"connect_time_85\": round(\n numpy.percentile(kwargs[\"connect_times\"], 85), 1\n ),\n \"arrow_conversion_time_avg\": round(\n numpy.mean(kwargs[\"arrow_conversion_times\"]), 1\n ),\n \"arrow_conversion_time_min\": round(\n numpy.min(kwargs[\"arrow_conversion_times\"]), 1\n ),\n \"arrow_conversion_time_max\": round(\n numpy.max(kwargs[\"arrow_conversion_times\"]), 1\n ),\n \"arrow_conversion_time_85\": round(\n numpy.percentile(kwargs[\"arrow_conversion_times\"], 85), 1\n ),\n \"arrow_conversion_time_25\": round(\n numpy.percentile(kwargs[\"arrow_conversion_times\"], 25), 1\n ),\n \"arrow_conversion_time_std\": round(\n numpy.std(kwargs[\"arrow_conversion_times\"]), 1\n ),\n }", "def calculate_query_times(**kwargs):\n trim_size = int(kwargs[\"trim\"] * len(kwargs[\"total_times\"]))\n return {\n \"total_time_avg\": round(numpy.mean(kwargs[\"total_times\"]), 1),\n \"total_time_min\": round(numpy.min(kwargs[\"total_times\"]), 1),\n \"total_time_max\": round(numpy.max(kwargs[\"total_times\"]), 1),\n \"total_time_85\": round(numpy.percentile(kwargs[\"total_times\"], 85), 1),\n \"total_time_trimmed_avg\": round(\n numpy.mean(\n numpy.sort(kwargs[\"total_times\"])[trim_size:-trim_size]\n ),\n 1,\n )\n if trim_size\n else round(numpy.mean(kwargs[\"total_times\"]), 1),\n \"total_times\": kwargs[\"total_times\"],\n \"execution_time_avg\": round(numpy.mean(kwargs[\"execution_times\"]), 1),\n \"execution_time_min\": round(numpy.min(kwargs[\"execution_times\"]), 1),\n \"execution_time_max\": round(numpy.max(kwargs[\"execution_times\"]), 1),\n \"execution_time_85\": round(\n numpy.percentile(kwargs[\"execution_times\"], 85), 1\n ),\n \"execution_time_25\": round(\n numpy.percentile(kwargs[\"execution_times\"], 25), 1\n ),\n \"execution_time_std\": round(numpy.std(kwargs[\"execution_times\"]), 1),\n \"execution_time_trimmed_avg\": round(\n numpy.mean(\n numpy.sort(kwargs[\"execution_times\"])[trim_size:-trim_size]\n )\n )\n if trim_size > 0\n else round(numpy.mean(kwargs[\"execution_times\"]), 1),\n \"execution_time_trimmed_max\": round(\n numpy.max(\n numpy.sort(kwargs[\"execution_times\"])[trim_size:-trim_size]\n )\n )\n if trim_size > 0\n else round(numpy.max(kwargs[\"execution_times\"]), 1),\n \"execution_times\": kwargs[\"execution_times\"],\n \"connect_time_avg\": round(numpy.mean(kwargs[\"connect_times\"]), 1),\n \"connect_time_min\": round(numpy.min(kwargs[\"connect_times\"]), 1),\n \"connect_time_max\": round(numpy.max(kwargs[\"connect_times\"]), 1),\n \"connect_time_85\": round(\n numpy.percentile(kwargs[\"connect_times\"], 85), 1\n ),\n \"results_iter_time_avg\": round(\n numpy.mean(kwargs[\"results_iter_times\"]), 1\n ),\n \"results_iter_time_min\": round(\n numpy.min(kwargs[\"results_iter_times\"]), 1\n ),\n \"results_iter_time_max\": round(\n numpy.max(kwargs[\"results_iter_times\"]), 1\n ),\n \"results_iter_time_85\": round(\n numpy.percentile(kwargs[\"results_iter_times\"], 85), 1\n ),\n }", "def build_model_for_time_block(self,\n ndx: int,\n start_t: float,\n end_t: float,\n add_init_conditions: bool) -> Tuple[_BlockData,\n Sequence[_GeneralVarData],\n Sequence[_GeneralVarData]]:\n pass", "def test_as_specified(self):\n self.assertEqual(\n time_display._as_specified(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n '%Y-%m-%d %H:%M:%S'),\n '2020-07-31 23:59:30')\n self.assertEqual(\n time_display._as_specified(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n '%Y-%m-%d %H:%M:%S',\n with_msec=True),\n '2020-07-31 23:59:30.357')\n self.assertEqual(\n time_display._as_specified(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n '%Y-%m-%d %H:%M:%S',\n with_usec=True),\n '2020-07-31 23:59:30.357921')", "def location_time_based_query(proposition_base, time):\n display_env = WumpusEnvironment(agent.width, agent.height)\n start_time = clock()\n print \"Running queries for: {0}<x>_<y>_{1}\".format(proposition_base,time)\n for x in range(1,agent.width+1):\n for y in range(1,agent.height+1):\n query = expr('{0}{1}_{2}_{3}'.format(proposition_base,x,y,time))\n result = agent.kb.ask(query)\n if result == None:\n display_env.add_thing(Proposition(query,'?'),(x,y))\n else:\n display_env.add_thing(Proposition(query,result),(x,y))\n end_time = clock()\n print \" >>> time elapsed while making queries:\" \\\n + \" {0}\".format(end_time-start_time)\n print display_env.to_string(agent.time,\n title=\"All {0}<x>_<y>_{1} queries\".format(proposition_base,\n time))", "def __define_variable_time(self, initial_guess, minimum, maximum):\n i = 0\n for nlp in self.nlp:\n if isinstance(nlp[\"tf\"], self.CX):\n time_bounds = Bounds(minimum[i], maximum[i], interpolation=InterpolationType.CONSTANT)\n time_init = InitialConditions(initial_guess[i])\n Parameters._add_to_v(self, \"time\", 1, None, time_bounds, time_init, nlp[\"tf\"])\n i += 1", "def _parse_time_notes(self, has_end):\n return \"\" if has_end else \"Estimated 3 hour duration\"", "def _fill_results(self,spec,measurements,period,duration):\r\n logging.info(\"Fill measurements for spec {0}\".format(spec))\r\n \r\n if self._verb==mplane.model.VERB_QUERY:\r\n \"\"\"\r\n Query according to the time specified in the specification\r\n \"\"\"\r\n (first_time,last_time) = spec.when().datetimes()\r\n first_time=int(first_time.replace(tzinfo=datetime.timezone.utc).timestamp())\r\n last_time=int(last_time.replace(tzinfo=datetime.timezone.utc).timestamp())\r\n sleep_time = 0\r\n else:\r\n \"\"\"\r\n Query from NOW\r\n \"\"\"\r\n first_time = int(time.time())\r\n if (len(measurements[1])>0 or len(measurements[2])>0) and period<=self._pvsr_default_conf_check_cycle:\r\n #there are newly created or modified measurements\r\n first_time = first_time + self._pvsr_default_conf_check_cycle\r\n if first_time % period > 0:\r\n first_time = first_time - (first_time % period)\r\n last_time = first_time + int(duration / period) * period\r\n sleep_time = duration\r\n\r\n logging.debug(\"From: {0}, To: {1}\".format(datetime.datetime.fromtimestamp(first_time),datetime.datetime.fromtimestamp(last_time)))\r\n \r\n meas_data = {}\r\n\r\n while True:\r\n logging.info(\"Wait {0} seconds\".format(sleep_time))\r\n time.sleep(sleep_time)\r\n sleep_time = 30\r\n \r\n loaded_until=self._pvsr.getLastLoadedDataTimestamp(period)\r\n if int(loaded_until.timestamp())>=last_time or time.time()>last_time+period+300:\r\n for i in (0,1,2):\r\n for j in range(len(measurements[i])):\r\n self._fill_meas_result(measurements[i][j],first_time,last_time,meas_data)\r\n break\r\n else:\r\n logging.debug(\"last loaded is still {0}\".format(loaded_until))\r\n \r\n res = mplane.model.Result(specification=spec)\r\n res.set_when(mplane.model.When(a = datetime.datetime.utcfromtimestamp(first_time+period), b = datetime.datetime.utcfromtimestamp(last_time)))\r\n \r\n tmp_time=first_time+period\r\n row_index=0\r\n while tmp_time<=last_time:\r\n tmp_time2 = datetime.datetime.fromtimestamp(tmp_time)\r\n tmp_time3 = datetime.datetime.utcfromtimestamp(tmp_time)\r\n res.set_result_value(\"time\", tmp_time3, row_index)\r\n if tmp_time2 in meas_data:\r\n for mplane_name in meas_data[tmp_time2]:\r\n value = str(meas_data[tmp_time2][mplane_name])\r\n res.set_result_value(mplane_name, value, row_index)\r\n row_index+=1\r\n tmp_time+=period\r\n \r\n return res" ]
[ "0.6397164", "0.5582921", "0.552272", "0.5454462", "0.54289496", "0.5377006", "0.5312675", "0.5311064", "0.5248024", "0.5233668", "0.52072245", "0.5175267", "0.51748633", "0.5169004", "0.5168347", "0.51603746", "0.5153136", "0.513233", "0.5115868", "0.50992113", "0.50686836", "0.5064039", "0.5049733", "0.5049733", "0.50458276", "0.5039384", "0.50338584", "0.5016147", "0.49875832", "0.49791738" ]
0.72575843
0
Process the query results returned from execute_query. Suggestions for use here would be iterating through query results in order to build analysis results, add observables (use extract_result_observables if you have a mapping, etc.
def process_query_results(self, query_results: dict or list, analysis, observable: Observable) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, results):\n raise NotImplementedError", "def handleQuery(self,query):\n results = None\n return results", "def _process_results(self, timestamp, results):\n\n topic_value = self.create_topic_values(results)\n\n _log.debug('Processing Results!')\n if mode:\n _log.debug(\"ACTUATE ON DEVICE.\")\n actuator_error = False\n if make_reservations and results.devices:\n results, actuator_error = self.actuator_request(results)\n if not actuator_error:\n self.actuator_set(topic_value)\n if make_reservations and results.devices and not actuator_error:\n self.actuator_cancel()\n\n for value in results.log_messages:\n _log.debug(\"LOG: {}\".format(value))\n for key, value in results.table_output.items():\n _log.debug(\"TABLE: {}->{}\".format(key, value))\n if output_file_prefix is not None:\n results = self.create_file_output(results)\n if command_output_file is not None:\n self.create_command_file_output(timestamp, topic_value)\n # if len(results.table_output.keys()):\n # results = self.publish_analysis_results(results)\n return results", "def handle_result(self, results: List[Dict], **info):\n pass", "def process_results(self, response, results):\n return results", "def process_results(self, response, results):\n return results", "def _ProcessQueryResult(self, result):\n self.__more_results = result.more_results()\n\n if self.__keys_only:\n return [Key._FromPb(e.key()) for e in result.result_list()]\n else:\n return [Entity._FromPb(e) for e in result.result_list()]", "def execute_query(self):\n query_sum = self.initialize_totals()\n data = []\n\n with tenant_context(self.tenant):\n query = self.query_table.objects.filter(self.query_filter)\n query_data = query.annotate(**self.annotations)\n group_by_value = self._get_group_by()\n\n query_group_by = [\"date\"] + group_by_value\n query_order_by = [\"-date\"]\n query_order_by.extend([self.order]) # add implicit ordering\n\n query_data = query_data.values(*query_group_by).annotate(**self.report_annotations)\n\n if self._limit and query_data:\n query_data = self._group_by_ranks(query, query_data)\n if not self.parameters.get(\"order_by\"):\n # override implicit ordering when using ranked ordering.\n query_order_by[-1] = \"rank\"\n\n # Populate the 'total' section of the API response\n if query.exists():\n aggregates = self._mapper.report_type_map.get(\"aggregates\")\n metric_sum = query.aggregate(**aggregates)\n query_sum = {key: metric_sum.get(key) for key in aggregates}\n\n query_data, total_capacity = self.get_cluster_capacity(query_data)\n if total_capacity:\n query_sum.update(total_capacity)\n\n if self._delta:\n query_data = self.add_deltas(query_data, query_sum)\n is_csv_output = self.parameters.accept_type and \"text/csv\" in self.parameters.accept_type\n\n query_data = self.order_by(query_data, query_order_by)\n\n if is_csv_output:\n if self._limit:\n data = self._ranked_list(list(query_data))\n else:\n data = list(query_data)\n else:\n # Pass in a copy of the group by without the added\n # tag column name prefix\n groups = copy.deepcopy(query_group_by)\n groups.remove(\"date\")\n data = self._apply_group_by(list(query_data), groups)\n data = self._transform_data(query_group_by, 0, data)\n\n sum_init = {\"cost_units\": self._mapper.cost_units_key}\n if self._mapper.usage_units_key:\n sum_init[\"usage_units\"] = self._mapper.usage_units_key\n query_sum.update(sum_init)\n\n ordered_total = {\n total_key: query_sum[total_key] for total_key in self.report_annotations.keys() if total_key in query_sum\n }\n ordered_total.update(query_sum)\n\n self.query_sum = ordered_total\n self.query_data = data\n return self._format_query_response()", "def _process_results(self, *args, **kwargs): # noqa: E501\n # Lock before processing results to prevent conflicts\n if not self._acquire_pr_lock():\n return\n\n # Get the future instance\n future = self.future\n\n # Skip if no Future\n if not future:\n return\n\n # Skip processing results if forget\n if self.forget:\n # Clean up client\n self.client.close()\n return\n\n try:\n # Get results using the client\n result = self.client.gather(future)\n except Exception as e:\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n # Clean up client\n self.client.close()\n result = e\n log.warning(\n 'Exception encountered when retrieving results: \"{}\"'.format(str(e))\n )\n\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n\n # Handle custom process results function\n if self.process_results_function:\n # Get the process_results_function in TethysJob and call it with the result retrived\n try:\n result = self.process_results_function(result)\n except Exception as e:\n log.exception(\"Process Results Function Error\")\n self._status = \"ERR\"\n result = str(e)\n\n # Serialize the result\n try:\n self.result = result\n except Exception:\n log.exception(\"Results Serialization Error\")\n self._status = \"ERR\"\n else:\n self._status = \"COM\" if self._status != \"ERR\" else \"ERR\"\n\n # Erase the key to avoid problem with dask recycle key\n self.key = \"\"\n\n # save the results or status in the database\n self.save()\n\n # Clean up client\n self.client.close()\n\n if client_fire_forget:\n client_fire_forget.close()\n\n self._release_pr_lock()", "def _compile_results(self):\n self.statements = stmts_from_json(self.__statement_jsons.values())\n if self.use_obtained_counts:\n self.__source_counts = get_available_source_counts(self.statements)\n self.__evidence_counts = get_available_ev_counts(self.statements)", "def process_result(self, result: Any) -> None:\n raise NotImplementedError()", "def execute_query(self, query_, return_results_=False):\n\n try:\n if return_results_:\n logging.info(\"Fetching results...\")\n self.query_results = self.cursor.execute(query_)\n else:\n self.cursor.execute(query_)\n logging.info(\"Query ran successfully.\")\n\n except Exception as e:\n if self.airflow:\n # For Airflow, forces task to fail and set it up for re-try\n raise AirflowException(\"Error running query. {}\"\n .format(str(e)))\n else:\n logging.exception(\"Error running query.\")\n raise e", "def update_results(self):\n try:\n results = self.shared_state[\"active_collection\"].query_source(\n self.query_str\n )\n self.results = results\n\n except Exception:\n self.results = {}\n self.status_textcontrol.text = \"(invalid query)\"\n else:\n self.formatted_results = self._apply_default_format(self.results)\n self.results_textcontrol.text = self.formatted_results\n self.index = 0\n self.status_textcontrol.text = (\n f\"showing {len(self.results)} of \"\n f\"{self.shared_state['active_collection'].df.shape[0]} records \"\n f\"syntax: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax)\"\n )", "def post_process(self, raw_query_result, ts):\n prom_result = {}\n if raw_query_result[\"status\"] != \"success\":\n raise HTTPException(status_code=422, detail=\"Query did not succeed. Check your query template.\")\n elif \"data\" not in raw_query_result:\n return HTTPException(status_code=422, detail=\"Query did not succeed. Prometheus returned without data.\")\n elif raw_query_result[\"data\"]['resultType'] != 'vector':\n return HTTPException(status_code=422, detail=\"Query succeeded but returned with a non-vector result. Check your query template.\")\n else: # query succeeded and we have some proper data to work with\n results = raw_query_result[\"data\"][\"result\"]\n for result in results:\n version_id = self.get_version_id(result['metric'])\n if version_id:\n prom_result[version_id] = self.result_value_to_data_point(result['value'][1], ts)\n\n return prom_result", "def process(self, lists, subqueries):\n pass", "def __get_results(self, query):\n return self.mysql.query_multi_with_fetchall_as_dict(query)", "def _run_query(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n return cursor.fetchall()", "def aggregate_results(self):\n\n raise NotImplementedError", "def _process_results(self):\n self.portfolio.create_backtest_result_dataframe()\n stats = self._show_stats()\n return stats", "def execute_query(self):\n try:\n # get query and templates\n query = self.request.data.get(\"query\", None)\n templates = self.request.data.get(\"templates\", \"[]\")\n registries = self.get_registries()\n order_by_field = self.request.data.get(\"order_by_field\", \"\")\n\n if order_by_field:\n order_by_field = order_by_field.split(\",\")\n\n if query is None:\n content = {\"message\": \"Query should be passed in parameter.\"}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n\n # prepare query\n raw_query = self.build_query(query, templates, registries)\n # execute query\n data_list = self.execute_json_query(raw_query, order_by_field)\n # build and return response\n return self.build_response(data_list)\n\n except Exception as api_exception:\n content = {\"message\": str(api_exception)}\n return Response(\n content, status=status.HTTP_500_INTERNAL_SERVER_ERROR\n )", "def processSearchResult(self):", "def parse_query_results(self):\n # TODO: nicely parsed needs defining; may depend on query\n return self.json_result", "def _Dynamic_RunQuery(self, query, query_result, request_id=None):\n if query.has_transaction():\n if not query.has_ancestor():\n raise apiproxy_errors.ApplicationError(\n datastore_pb.Error.BAD_REQUEST,\n 'Only ancestor queries are allowed inside transactions.')\n (filters, orders) = datastore_index.Normalize(query.filter_list(),\n query.order_list(), [])\n \n old_datastore_stub_util.FillUsersInQuery(filters)\n\n if not query.has_app():\n query.set_app(self.project_id)\n self.__ValidateAppId(query.app())\n\n self._RemoteSend(query, query_result, \"RunQuery\", request_id)\n results = query_result.result_list()\n for result in results:\n old_datastore_stub_util.PrepareSpecialPropertiesForLoad(result)\n\n last_cursor = None\n if query_result.has_compiled_cursor():\n last_cursor = query_result.compiled_cursor()\n\n if query_result.more_results():\n new_cursor = InternalCursor(query, last_cursor, len(results))\n cursor_id = self.__getCursorID()\n cursor = query_result.mutable_cursor()\n cursor.set_app(self.project_id)\n cursor.set_cursor(cursor_id)\n self.__queries[cursor_id] = new_cursor\n\n if query.compile():\n compiled_query = query_result.mutable_compiled_query()\n compiled_query.set_keys_only(query.keys_only())\n compiled_query.mutable_primaryscan().set_index_name(query.Encode())", "def _run_async_query(self, context):\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n self._total_results = len(results)\n self._count_valid = True\n return [self._doc_class(self._cb, item[\"id\"], item) for item in results]", "def process_results(self):\n return self._do_action_under_lock(self._process_results)", "def _process_query(self, query):\n query_search_pattern = r'\\nquery: (\\{.*\\}) nreturned'\n query_search_remove_pattern = r'(.*)(\\nquery: \\{.*\\} )( nreturned.*)'\n\n command_search_pattern = r'command: (\\{.*\\}) reslen'\n command_search_remove_pattern = r'(.*)(command: \\{.*\\})( reslen.*)'\n\n out = {}\n out['millis'] = query.get('millis', 0)\n out['ts'] = query.get('ts')\n\n out['org_info'] = query.get('info')\n\n info = query.get('info').split(' ')\n out['operation_type'] = info[0]\n out['collection'] = info[1]\n\n info = ' '.join(info[2:])\n mongo_query = re.search(query_search_pattern, info)\n mongo_command = re.search(command_search_pattern, info)\n\n if mongo_query:\n out['query'] = mongo_query.group(1)\n info = re.sub(query_search_remove_pattern, r'\\1\\3', info)\n\n elif mongo_command:\n out['query'] = mongo_command.group(1)\n info = re.sub(command_search_remove_pattern, r'\\1\\3', info)\n else:\n out['query'] = \"\"\n\n out['extra'] = info\n out['optimizations'] = ', '.join(self._should_optimize(out))\n\n return out", "def getResults():", "def on_result(self, result):\n # we create a self.results list to store the results as they come back from the process() method\n self.results.append(result)", "def _query_ned_and_add_results_to_database(\n self,\n batchCount):\n self.log.debug(\n 'starting the ``_query_ned_and_add_results_to_database`` method')\n\n tableName = self.dbTableName\n # ASTROCALC UNIT CONVERTER OBJECT\n converter = unit_conversion(\n log=self.log\n )\n\n # QUERY NED WITH BATCH\n totalCount = len(self.theseIds)\n print \"requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)\" % locals()\n search = namesearch(\n log=self.log,\n names=self.theseIds.keys(),\n quiet=True\n )\n results = search.get()\n print \"results returned from ned -- starting to add to database\" % locals()\n\n # CLEAN THE RETURNED DATA AND UPDATE DATABASE\n totalCount = len(results)\n count = 0\n sqlQuery = \"\"\n dictList = []\n\n colList = [\"redshift_quality\", \"redshift\", \"hierarchy\", \"object_type\", \"major_diameter_arcmin\", \"morphology\", \"magnitude_filter\",\n \"ned_notes\", \"eb_v\", \"raDeg\", \"radio_morphology\", \"activity_type\", \"minor_diameter_arcmin\", \"decDeg\", \"redshift_err\", \"in_ned\"]\n\n if not len(results):\n for k, v in self.theseIds.iteritems():\n dictList.append({\n \"in_ned\": 0,\n \"primaryID\": v\n })\n for thisDict in results:\n\n thisDict[\"tableName\"] = tableName\n count += 1\n for k, v in thisDict.iteritems():\n if not v or len(v) == 0:\n thisDict[k] = \"null\"\n if k in [\"major_diameter_arcmin\", \"minor_diameter_arcmin\"] and (\":\" in v or \"?\" in v or \"<\" in v):\n thisDict[k] = v.replace(\":\", \"\").replace(\n \"?\", \"\").replace(\"<\", \"\")\n if isinstance(v, str) and '\"' in v:\n thisDict[k] = v.replace('\"', '\\\\\"')\n if \"Input name not\" not in thisDict[\"input_note\"] and \"Same object as\" not in thisDict[\"input_note\"]:\n if thisDict[\"ra\"] != \"null\" and thisDict[\"dec\"] != \"null\":\n thisDict[\"raDeg\"] = converter.ra_sexegesimal_to_decimal(\n ra=thisDict[\"ra\"]\n )\n thisDict[\"decDeg\"] = converter.dec_sexegesimal_to_decimal(\n dec=thisDict[\"dec\"]\n )\n else:\n thisDict[\"raDeg\"] = None\n thisDict[\"decDeg\"] = None\n thisDict[\"in_ned\"] = 1\n thisDict[\"eb_v\"] = thisDict[\"eb-v\"]\n\n row = {}\n row[\"primary_ned_id\"] = thisDict[\"input_name\"]\n\n try:\n row[\"primaryID\"] = self.theseIds[thisDict[\"input_name\"]]\n for c in colList:\n if thisDict[c] == \"null\":\n row[c] = None\n else:\n row[c] = thisDict[c]\n dictList.append(row)\n except:\n g = thisDict[\"input_name\"]\n self.log.error(\n \"Cannot find database table %(tableName)s primaryID for '%(g)s'\\n\\n\" % locals())\n dictList.append({\n \"in_ned\": 0,\n \"primary_ned_id\": thisDict[\"input_name\"]\n })\n\n else:\n dictList.append({\n \"primary_ned_id\": thisDict[\"input_name\"],\n \"in_ned\": 0,\n \"primaryID\": self.theseIds[thisDict[\"input_name\"]]\n })\n\n self.log.debug(\n 'completed the ``_query_ned_and_add_results_to_database`` method')\n return dictList", "def _parse_query(self, inv_obj, query_results, monitored_metrics):\n datapoints = []\n timestamp = int(time.time()) * 1000\n try:\n result = query_results[0]\n for metric in result.value:\n key = metric.id.counterId\n metric_name = monitored_metrics[key].name\n metric_type = monitored_metrics[key].metric_type\n dimensions = self._get_dimensions(inv_obj, metric)\n value = metric.value[0]\n if monitored_metrics[key].units == 'percent':\n value /= 100.0\n dp = self.Datapoint(metric_name, metric_type, value, dimensions, timestamp)\n datapoints.append(dp)\n except Exception as e:\n self._logger.error(\"Error while parsing query results: {0} : {1}\".format(query_results, e))\n\n return datapoints" ]
[ "0.73964363", "0.7046268", "0.69759685", "0.6945569", "0.6538649", "0.6538649", "0.65130275", "0.6506455", "0.6459518", "0.6354048", "0.6343065", "0.63208896", "0.63202345", "0.6278921", "0.6225547", "0.62215114", "0.62137306", "0.6207395", "0.6198005", "0.6193059", "0.618145", "0.61729866", "0.6169722", "0.61668926", "0.61520904", "0.6132736", "0.61280304", "0.6121455", "0.61094713", "0.6098421" ]
0.81553155
0
(Optional) Called each time an observable is created from the observablefield mapping. The idea of this method is to perform any additional processing when an observable is extracted based off of a field
def process_field_mapping(self, analysis, observable: Observable, result, result_field, result_time=None) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, collection, field):\n pass", "def extract_result_observables(self, analysis, result: dict, observable: Observable = None, result_time: str or datetime.datetime =\n None) -> None:\n for result_field in result.keys():\n if result[result_field] is None:\n continue\n\n # do we have this field mapped?\n if result_field in self.observable_mapping:\n observable = analysis.add_observable(self.observable_mapping[result_field],\n self.filter_observable_value(result_field,\n self.observable_mapping[result_field],\n result[result_field]),\n o_time=result_time)\n\n self.process_field_mapping(analysis, observable, result, result_field, result_time)", "def filter_observable_value(self, result_field, observable_type, observable_value):\n return observable_value", "def on_get_field(self, ins, const, obj):\n pass", "def _set_up(self, observable):\n pass", "def handle_field(self, obj, field):\n value = field._get_val_from_obj(obj)\n if isinstance(field, GeometryField):\n self._current[field.name] = value\n else:\n super(Serializer, self).handle_field(obj, field)", "def filter(self, observable):", "def _do_mapping(self):\n pass", "def __post_init__(self):\n # Only do this if source_data already exists (not during its own initialization)\n if \"SOURCE_DATA\" in globals():\n for data_field in fields(self):\n setattr(self, data_field.name, getattr(SOURCE_DATA, data_field.name))", "def test_fields_updated_with_computed(self):\n pass", "def map_data(self, obj: object):\n pass", "def on_put_field(self, ins, const, obj, value):\n pass", "def __call__(self, field_name):\n return getattr(self, field_name)", "def post_build(self):", "def __call__(self, data):\n if not self.instance or self.instance.status not in self.mapping:\n return\n\n combiner = DataCombiner(self.instance, data, model=self.instance.__class__)\n\n editable_fields = self.mapping[self.instance.status]\n for field in combiner.data:\n if field not in editable_fields and self._has_changed(field, combiner):\n raise ValidationError({field: self.message})", "def process_field(self, field_value):\n\n if is_novabase(field_value):\n if not self.already_processed(field_value):\n self.process_object(field_value, False)\n key = self.get_cache_key(field_value)\n result = self.simple_cache[key]\n else:\n result = self.process_object(field_value, False)\n return result", "def populate_data_from_message(self, msg):\n for field in self:\n try:\n setattr(field, 'data', getattr(msg, field.name))\n except:\n continue", "def _process_plugin_data(self, fields, fetch_related_data=False):\n for field, default_value in fields:\n try:\n setattr(\n self.data,\n field,\n self.plugin_data.get(field, default_value)\n )\n except Exception:\n setattr(self.data, field, default_value)", "def write_field_lazy(self, name, value):\n\t\treader, writer, lazy_writer, bound_info=self.fields[name]\n\t\tlazy_writer(bound_info, value)", "def __init__(self, fieldFunction, geoObject, geoEvent):\n # TODO: restrict value pairs to geoObject\n pass", "def process_observation(self, observation):\n return observation", "def process_observation(self, observation):\n return observation", "def listener(self, proxy, changed_properties, invalidated_properties):\n metadata = changed_properties.lookup_value('Metadata')\n # do not signal if the metadata is empty\n self.process_metadata(metadata, False)", "def post_processor(self):", "def field_to_generator(self, field, field_values):\n if self.qualify:\n # Many of the fields are actually fields in\n # the named obj-type. Take a shot at seeing if that's\n # the case, and call a sample value collector\n\n if self.path_other:\n # These are typically an path|field names.\n parts = self.path_other.split('|')\n o_field = field\n if len(parts) > 1:\n o_field = parts[1]\n sample_path_field = 'sample_path_field'\n if self.is_no_command:\n sample_path_field = 'sample_no_path_field'\n return '+id.path_field(\"%s\", \"%s\")' \\\n % (sample_path_field, parts[0], o_field)\n\n python_field = field.replace(\"-\", \"_\")\n if self.path:\n sample_path_field = 'sample_path_field'\n if self.is_no_command:\n sample_path_field = 'sample_no_path_field'\n return '+id.%s(\"%s\", \"%s\")' \\\n % (sample_path_field, self.path, field)\n else:\n return '+id.%s()' % python_field\n\n elif field_values != None:\n # this can return None, when it does, the command can't continue to be generated\n # when the field does appear in field_values, remove it; field_values then\n # becomes a scoreboard to determine when the command has used up all the requested fields.\n field_string = field_values.get(field)\n if field_string == None:\n return None\n del field_values[field]\n return utif.quote_string(str(field_string))\n else:\n return \"<%s>\" % field", "def lookup(self, entry):\n entry.add_lazy_fields(self.lazy_loader, self.field_map)", "def post_build(self):\n pass", "def handle_m2m_field(self, obj, field):\n if field.rel.through._meta.auto_created:\n # self._start_relational_field(field)\n if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):\n # If the objects in the m2m have a natural key, use it\n def handle_m2m(value):\n natural = value.natural_key()\n nat_key = NATURAL_KEY_JOINER.join(natural)\n field_id = \"%s.%s\" % (obj.pk, nat_key)\n self._start_relational_field(field, field_id=field_id, keytype=\"natural\")\n self.xml.characters(smart_text(nat_key))\n self.xml.endElement(\"source\")\n self.indent(3)\n self.xml.endElement(\"trans-unit\")\n # Iterable natural keys are rolled out as subelements\n # self.xml.startElement(\"object\", {})\n # for key_value in natural:\n # self.xml.startElement(\"natural\", {})\n # self.xml.characters(smart_text(key_value))\n # self.xml.endElement(\"natural\")\n # self.xml.endElement(\"object\")\n else:\n def handle_m2m(value):\n field_id = \"%s.%s\" % (obj.pk, value._get_pk_val())\n self._start_relational_field(field, field_id)\n self.xml.characters(smart_text(value._get_pk_val()))\n self.xml.endElement(\"source\")\n self.indent(3)\n self.xml.endElement(\"trans-unit\")\n # self.xml.addQuickElement(\"object\", attrs={\n # 'pk' : smart_text(value._get_pk_val())\n # })\n for relobj in getattr(obj, field.name).iterator():\n handle_m2m(relobj)", "def __call__(self, obs):\n return self.mapping[obs]", "def observerRead(self, x):\n pass" ]
[ "0.58540356", "0.5765759", "0.5707929", "0.5624119", "0.55814207", "0.5354908", "0.51964396", "0.5195021", "0.50517035", "0.4980307", "0.49428532", "0.49420545", "0.4938556", "0.49383038", "0.49340662", "0.4906365", "0.49007678", "0.4898083", "0.48704743", "0.48574975", "0.4851272", "0.4851272", "0.48368502", "0.4831419", "0.481354", "0.480487", "0.4786017", "0.4785195", "0.4771315", "0.47642928" ]
0.7088125
0
Analysis module execution. See base class for more information. In order for this method to run as expected, all required methods must be implemented in child classes (see BaseAPIAnalyzer docstring). This method may be overridden if analysis 'flow' must be drastically different (ex. executing and correlating using multiple queries or even multiple APIs). However, most complex query processing can be handled without overriding this method by adding additional methods to be called from process_query_results. For an example, see QRadarAPIAnalyzer.process_qradar_event
def execute_analysis(self, observable, **kwargs) -> bool or Analysis: analysis = observable.get_analysis(self.generated_analysis_type, instance=self.instance) if analysis is None: analysis = self.create_analysis(observable) analysis.question = self.config['question'] analysis.query_summary = self.config['summary'] if self.correlation_delay is not None: return self.delay_analysis(observable, analysis, seconds=self.correlation_delay.total_seconds()) self.build_target_query(observable, **kwargs) analysis.query = self.target_query logging.debug(f'Executing {self.api} query: {self.target_query}') try: analysis.query_results = self.execute_query() except Exception as e: logging.error(f'Error when executing {self.api} query: {e}') analysis.query_results = None analysis.query_error = e if analysis.query_results is None: return False logging.debug(f'Processing query results') self.process_query_results(analysis.query_results, analysis, observable) self.process_finalize(analysis, observable) if kwargs.get('return_analysis'): return analysis return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n self.run_measurement()\n self.run_analysis()\n self.results = self.analysis.proc_data_dict['analysis_params_dict']\n if self.get_param_value('update'):\n self.run_update()\n self.dev.update_cancellation_params()\n\n if self.get_param_value('configure_mux_drive'):\n drive_lo_freqs = self.get_param_value('drive_lo_freqs')\n configure_qubit_mux_drive(self.qubits, drive_lo_freqs)", "def run_dataflow(self, *args, **kwargs):\n raise NotImplementedError", "def run_analysis(wf):\n if wf.analysis[\"type\"] == \"one_sample_tests\":\n start_one_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"two_sample_tests\":\n start_two_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"factorial_tests\":\n start_factorial_tests(wf)\n\n elif wf.analysis[\"type\"] == \"n_sample_tests\":\n start_n_sample_tests(wf)\n\n info(\"> Finished analysis\")", "def execute(self, flow_name, flow_arguments):\n for node_args in flow_arguments:\n if self.is_filter_query(node_args):\n for args in self.expand_filter_query(node_args):\n self.run_selinon_flow(flow_name, args)\n else:\n self.run_selinon_flow(flow_name, node_args)", "def execute(self) :\n \n raise NotImplementedError()", "def execute(self):\n raise NotImplementedError", "def execute(self):\n raise NotImplementedError", "def executeAnalysis(config, samples, visitor):\n # store cuts in \"info\" (re-created from TQCuts)\n # ROOT.xAOD.clearTransientTrees()\n #nEventsProcessed = 0\n\n CLI = config.getFolder(\"CLI+\")\n\n # flag indicating to run analysis in debug mode\n debug = CLI.getTagBoolDefault(\"debug\",False)\n # flag indicating to run a dummy analysis\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n downmerge = CLI.getTagBoolDefault(\"downmerge\",False)\n downmergeTo = CLI.getTagStandardStringDefault(\"downmergeTo\",\"\")\n\n pathselect = CLI.getTagVStandardString(\"pathselect\")\n\n if debug:\n maxEvents = 100\n else:\n maxEvents = config.getTagIntegerDefault(\"maxEvents\",-1)\n\n # proceed with analysis\n appname = QFramework.TQLibrary.getApplicationName().Data()\n visitor.setVisitTraceID(appname)\n if maxEvents > 0:\n QFramework.WARN(\"setting maximum number of events per sample to {:d}\".format(maxEvents))\n visitor.setMaxEvents(maxEvents)\n QFramework.TQLibrary.allowRedirection(False)\n timer = ROOT.TStopwatch()\n nsamples = 0\n if pathselect.size() > 0:\n paths = ROOT.TString(\",\".join(map(str,pathselect)))\n else:\n # Read in sample folder restrictions and convert to a single comma-\n # separated string, the same format as it would be passed in via CLI.\n # Can't use `join` since this is a vector<TString>\n # Can't read in the field as a single string with getTagString,\n # perhaps since it has commas\n paths = \"\"\n for path in config.getTagVString(\"restrict\"):\n paths += path.Data() + \",\"\n paths = ROOT.TString(paths[:-1])\n if paths.Length() != 0:\n if not dummy:\n nsamples = samples.visitSampleFolders(visitor,paths)\n QFramework.TQLibrary.recordMemory()\n QFramework.TQObservable.clearAll()\n QFramework.TQLibrary.recordMemory()\n if downmerge or downmergeTo:\n downmergeTargets = downmergeTo\n if not downmergeTargets:\n downmergeTargets = paths\n samples.setTag(\".generalize.histograms\",True,downmergeTargets)\n samples.setTag(\".generalize.cutflow\",True,downmergeTargets)\n else:\n QFramework.WARN(\"dummy run, skipping execution of cutbased analysis on paths '{:s}'\".format(pathselect))\n else:\n if not dummy:\n nsamples = samples.visitMe(visitor)\n QFramework.TQLibrary.recordMemory()\n else:\n QFramework.WARN(\"dummy run, skipping execution of cutbased analysis on root sample folder\")\n\n # TODO: put the rest of this in a separate function like for post processing?\n # right now nsamples is returned but nothing is done with it\n if nsamples > 0:\n if downmerge or downmergeTo:\n samples.generalizeObjects(\".generalize\")\n timer.Stop()\n\n # TODO: put this section in its own function (with cuts available)\n # just get cuts from visitor? (will need to provide a channel in the MCASV case I think)\n if config.getTagBoolDefault(\"checkRun\",True):\n\n if dummy:\n allevents = QFramework.TQCounter(\"dummy\",0,0,0)\n else:\n if isinstance(visitor,QFramework.TQAnalysisSampleVisitor):\n allevents = samples.getCounter(\".\",visitor.getBaseCut().GetName())\n elif isinstance(visitor,QFramework.TQMultiChannelAnalysisSampleVisitor):\n channels = config.getTagVString(\"channels\")\n allevents = samples.getCounter(\".\",visitor.getBaseCut(channels[0]).GetName())\n\n if nsamples > 0:\n # debugging printout\n # TODO: make separate method?\n if config.getTagBoolDefault(\"printCounterValues\",False):\n samples.printListOfCounters()\n printhists = config.getTagVString(\"printHistogramsASCII\")\n for hist in printhists:\n h = samples.getHistogram(\".\",hist)\n if h:\n QFramework.TQHistogramUtils.printHistogramASCII(h)\n else:\n QFramework.ERROR(\"unable to access histogram '{:s}'\".format(hist))\n\n else:\n QFramework.ERROR(\"execution of analysis finished but might have failed, no samples were visited successfully (they might simply be empty).\")\n runtime = config.getFolder(\"runtime+\")\n # store in runtime folder the fact that no samples were visited in the form of an error string\n analysisError = \"execution of analysis finished but might have failed, no samples were visited successfully (they might simply be empty).\"\n runtime.setTagString(\"analysisError\", analysisError)\n #don't quit just now, but instead we'll write an alternative output file later which basically states \"job didn't crash but there is a small chance something went wrong\"\n #quit()\n\n #return nEventsProcessed\n return nsamples", "def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()", "def run_analysis(self):\n\n self._apply_loads_to_framat_model()\n\n # ----- Run the FramAT analysis -----\n results = standard_run(args=StdRunArgs(filename=self.own_files['model_file'], verbose=True))\n self.last_solution = results\n\n # ----- Share loads -----\n logger.info(\"Sharing loads...\")\n frame = results['frame']\n self.shared.structure.def_fields = frame.deformation.get_displacement_fields(frame, n_sup=1000)", "def run(self):\n self.assign_inputs()\n self.execute()\n self.collect_outputs()", "def execute(self) -> None:\n raise NotImplementedError", "def run(self):\n\n input_args = {}\n self._execute(input_args, self.args)", "def run_analysis(self, argv):\n self._run_argparser(argv)\n self.run()", "def execute(self):\n raise NotImplementedError(\"Subclasses should override this method.\")", "def initiateAnalysis(self,):\n\n #\n # Imports\n #\n import os\n import sys\n\n #\n # get optional arguments from commandline\n #\n self.getComandLineOptions()\n \n #\n # for logmessages\n #\n tmpLogMessages = ['----------------\\n']\n tmpLogMessage = self.createLogHeader()\n tmpLogMessages.append(tmpLogMessage)\n #print tmpLogMessage\n \n #\n # check analysis path\n #\n if os.path.isdir(self.analysisPath):\n tmpLogMessage = 'WARNING: the analysis path already exists.\\n'\n print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n else:\n tmpLogMessage = 'Creating directory \"'+self.analysisPath+'\".\\n'\n #print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n os.makedirs(self.analysisPath)\n \n #\n # create the logfile\n #\n tmpLogMessages += self.openLogfileConnection()\n \n #\n # write tmpLogMessages to logfile\n #\n SEAseqPipeLine.logfile.write(''.join(tmpLogMessages))\n \n #\n # create the database\n #\n self.database.create()\n \n #\n # add run to runs table\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, True, MASTER)\n \n return 0", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def execute(self):\n # Put your execute step code here before calling the '_doneExecution' method.\n self._doneExecution()", "def _execute(self):\n\n self.time_point(tag=\"execution\")\n\n main = self.import_engine_as_python_function()\n\n output_file = os.path.join(\n self.params[\"output_dir_path\"], self.params[\"output_file\"]\n )\n\n input_file = os.path.join(\n self.params[\"input_dir_path\"], self.params[\"input_file\"]\n )\n\n translations = self.params['translations']['_grouped_by_translated_key']\n\n pyqms_params = {\n \"PERCENTILE_FORMAT_STRING\": None,\n \"M_SCORE_THRESHOLD\": None,\n \"ELEMENT_MIN_ABUNDANCE\": None,\n \"MIN_REL_PEAK_INTENSITY_FOR_MATCHING\": None,\n \"REQUIRED_PERCENTILE_PEAK_OVERLAP\": None,\n \"MINIMUM_NUMBER_OF_MATCHED_ISOTOPOLOGUES\": None,\n \"INTENSITY_TRANSFORMATION_FACTOR\": None,\n \"UPPER_MZ_LIMIT\": None,\n \"LOWER_MZ_LIMIT\": None,\n \"MZ_TRANSFORMATION_FACTOR\": None,\n \"REL_MZ_RANGE\": None,\n \"REL_I_RANGE\": None,\n \"INTERNAL_PRECISION\": None,\n \"MAX_MOLECULES_PER_MATCH_BIN\": None,\n \"SILAC_AAS_LOCKED_IN_EXPERIMENT\": None,\n \"BUILD_RESULT_INDEX\": None,\n \"MACHINE_OFFSET_IN_PPM\": None,\n \"FIXED_LABEL_ISOTOPE_ENRICHMENT_LEVELS\": None,\n \"MZ_SCORE_PERCENTILE\": None,\n }\n sugarpy_params = {}\n sugarpy_params[\"charges\"] = list(\n range(\n self.params[\"translations\"][\"precursor_min_charge\"],\n self.params[\"translations\"][\"precursor_max_charge\"] + 1,\n )\n )\n\n for translated_key, translation_dict in translations.items():\n if translated_key == \"REL_MZ_RANGE\":\n if self.params[\"translations\"][\"ms_level\"] == 1:\n print(\n \"\"\"\n [ WARNING ] precursor_mass_tolerance_plus and precursor_mass_tolerance_minus\n [ WARNING ] need to be combined for SugarPy (use of symmetric tolerance window).\n [ WARNING ] The arithmetic mean is used.\n \"\"\"\n )\n pyqms_params[\"REL_MZ_RANGE\"] = (\n float(\n self.params[\"translations\"][\"precursor_mass_tolerance_plus\"]\n )\n + float(\n self.params[\"translations\"][\n \"precursor_mass_tolerance_minus\"\n ]\n )\n ) / 2.0\n if (\n self.params[\"translations\"][\"precursor_mass_tolerance_unit\"]\n == \"da\"\n ):\n pyqms_params[\n \"REL_MZ_RANGE\"\n ] = ursgal.ucore.convert_dalton_to_ppm(\n pyqms_params[\"REL_MZ_RANGE\"],\n base_mz=self.params[\"translations\"][\"base_mz\"],\n )\n else:\n pyqms_params[\"REL_MZ_RANGE\"] = self.params[\"translations\"][\n \"frag_mass_tolerance\"\n ]\n if self.params[\"translations\"][\"frag_mass_tolerance_unit\"] == \"da\":\n pyqms_params[\n \"REL_MZ_RANGE\"\n ] = ursgal.ucore.convert_dalton_to_ppm(\n pyqms_params[\"REL_MZ_RANGE\"],\n base_mz=self.params[\"translations\"][\"base_mz\"],\n )\n pyqms_params[\"REL_MZ_RANGE\"] = pyqms_params[\"REL_MZ_RANGE\"] * 1e-6\n elif translated_key in pyqms_params.keys():\n pyqms_params[translated_key] = list(translation_dict.values())[0]\n elif \"charge\" in translated_key:\n continue\n elif translated_key == \"mzml_file\":\n sugarpy_params[translated_key] = list(translation_dict.values())[0][0]\n elif len(translation_dict) == 1:\n sugarpy_params[translated_key] = list(translation_dict.values())[0]\n else:\n print(\n \"The translatd key \",\n translated_key,\n \" maps on more than one ukey, but no special rules have been defined\",\n )\n print(translation_dict)\n sys.exit(1)\n sugarpy_params[\"pyqms_params\"] = pyqms_params\n sugarpy_params[\"ident_file\"] = input_file\n sugarpy_params[\"output_file\"] = output_file\n sugarpy_params[\"force\"] = True\n\n out = main(**sugarpy_params)\n\n self.print_execution_time(tag=\"execution\")\n return out", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def async_analysis(kwargs):\n # we can't pickle our objects for remote works so we pickle the raw request\n # and then load it here.\n data = analysis_input_schema.load(kwargs).data\n return analysis(**data)", "def execute(self):\n raise NotImplementedError('execute')", "def execute(cls):\n pass", "def analyze(self, analysis_step=None):\n analysis_step = self.analyse(analysis_step)\n return analysis_step", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass" ]
[ "0.62735236", "0.60627526", "0.59651273", "0.5931442", "0.5895053", "0.5891176", "0.5891176", "0.5877689", "0.5872152", "0.58661216", "0.58206034", "0.57702005", "0.57657474", "0.5761147", "0.5748475", "0.5739148", "0.57294863", "0.5725235", "0.5660845", "0.56297", "0.56297", "0.56297", "0.56297", "0.5615132", "0.5600247", "0.5561046", "0.55532384", "0.55424774", "0.55424774", "0.55424774" ]
0.6239604
1
Mapping dialogue state, which contains the history utterances and informed/requested slots up to this turn, into vector so that it can be fed into the model. This mapping function uses informed/requested slots that user has informed and requested up to this turn .
def state_to_representation_last(self, state): # Current_slots rep. current_slots = copy.deepcopy(state["current_slots"]["inform_slots"]) current_slots.update(state["current_slots"]["explicit_inform_slots"]) current_slots.update(state["current_slots"]["implicit_inform_slots"]) current_slots.update(state["current_slots"]["proposed_slots"]) current_slots_rep = np.zeros(len(self.slot_set.keys())) for slot in current_slots.keys(): if current_slots[slot] == '1': current_slots_rep[self.slot_set[slot]] = 1.0 elif current_slots[slot] == '0': current_slots_rep[self.slot_set[slot]] = -1.0 else: current_slots_rep[self.slot_set[slot]] = -2.0 # Turn rep. turn_rep = np.zeros(self.parameter["max_turn"]) turn_rep[state["turn"]] = 1.0 ''' # Agent last request slot rep. agent_request_slots_rep = np.zeros(len(self.slot_set.keys())) try: agent_request_slots = copy.deepcopy(state["agent_action"]["request_slots"]) for slot in agent_request_slots.keys(): agent_request_slots_rep[self.slot_set[slot]] = 1.0 except: pass ''' # state_rep = np.hstack((current_slots_rep, wrong_diseases_rep, user_action_rep, user_inform_slots_rep, user_request_slots_rep, agent_action_rep, agent_inform_slots_rep, agent_request_slots_rep, turn_rep)) # state_rep = np.hstack((current_slots_rep, agent_request_slots_rep, turn_rep)) state_rep = np.hstack((current_slots_rep, turn_rep)) return state_rep
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_state_representation(self, state):\n\n user_action = state['user_action']\n current_slots = state['current_slots']\n agent_last = state['agent_action']\n\n ########################################################################\n # Create one-hot of acts to represent the current user action\n ########################################################################\n user_act_rep = np.zeros((1, self.act_cardinality))\n user_act_rep[0, self.act_set[user_action['diaact']]] = 1.0\n\n ########################################################################\n # Create bag of inform slots representation to represent the current user action\n ########################################################################\n user_inform_slots_rep = np.zeros((1, self.slot_cardinality))\n for slot in user_action['inform_slots'].keys():\n user_inform_slots_rep[0, self.slot_set[slot]] = 1.0\n\n ########################################################################\n # Create bag of request slots representation to represent the current user action\n ########################################################################\n user_request_slots_rep = np.zeros((1, self.slot_cardinality))\n for slot in user_action['request_slots'].keys():\n user_request_slots_rep[0, self.slot_set[slot]] = 1.0\n\n ########################################################################\n # Creat bag of filled_in slots based on the current_slots\n ########################################################################\n current_slots_rep = np.zeros((1, self.slot_cardinality))\n for slot in current_slots['inform_slots']:\n current_slots_rep[0, self.slot_set[slot]] = 1.0\n\n ########################################################################\n # Encode last agent act\n ########################################################################\n agent_act_rep = np.zeros((1, self.act_cardinality))\n if agent_last:\n agent_act_rep[0, self.act_set[agent_last['diaact']]] = 1.0\n\n ########################################################################\n # Encode last agent inform slots\n ########################################################################\n agent_inform_slots_rep = np.zeros((1, self.slot_cardinality))\n if agent_last:\n for slot in agent_last['inform_slots'].keys():\n agent_inform_slots_rep[0, self.slot_set[slot]] = 1.0\n\n ########################################################################\n # Encode last agent request slots\n ########################################################################\n agent_request_slots_rep = np.zeros((1, self.slot_cardinality))\n if agent_last:\n for slot in agent_last['request_slots'].keys():\n agent_request_slots_rep[0, self.slot_set[slot]] = 1.0\n\n # turn_rep = np.zeros((1, 1)) + state['turn'] / 10.\n turn_rep = np.zeros((1, 1))\n\n ########################################################################\n # One-hot representation of the turn count?\n ########################################################################\n turn_onehot_rep = np.zeros((1, self.max_turn))\n turn_onehot_rep[0, state['turn']] = 1.0\n\n self.final_representation = np.hstack(\n [\n user_act_rep,\n user_inform_slots_rep,\n user_request_slots_rep,\n agent_act_rep,\n agent_inform_slots_rep,\n agent_request_slots_rep,\n current_slots_rep,\n turn_rep,\n turn_onehot_rep\n ])\n return self.final_representation", "def act(self):\n channel_act = copy.deepcopy(self.observation)\n\n for user_act in channel_act['user_acts']:\n # Dialogue Act\n da_conf = self.generate_confidence()\n da_value = user_act[\"dialogue_act\"][\"value\"]\n\n if np.random.random() > da_conf:\n if da_value == UserAct.AFFIRM:\n da_value = UserAct.NEGATE\n elif da_value == UserAct.NEGATE:\n da_value == UserAct.AFFIRM\n else:\n pass\n\n user_act[\"dialogue_act\"][\"value\"] = da_value\n user_act[\"dialogue_act\"][\"conf\"] = self.generate_confidence()\n\n # Intent\n if \"intent\" in user_act:\n intent_value = user_act[\"intent\"][\"value\"]\n if self.intents[intent_value].get(\"speech\", False):\n intent_conf = 1.\n else:\n intent_conf = self.generate_confidence()\n intent_possible_values = self.slots[\"intent\"][\n \"possible_values\"].copy()\n\n if np.random.random() > intent_conf:\n intent_possible_values.remove(intent_value)\n intent_value = np.random.choice(intent_possible_values)\n\n user_act['intent']['value'] = intent_value\n user_act['intent']['conf'] = intent_conf\n\n # Slot Values\n for slot_dict in user_act.get('slots', list()):\n slot_name = slot_dict[\"slot\"]\n slot_value = slot_dict[\"value\"]\n\n if self.slots[slot_name][\"node\"] != \"BeliefNode\":\n slot_conf = 1.0\n else:\n slot_conf = self.generate_confidence()\n\n slot_possible_values = self.slots[slot_name].get(\n \"possible_values\")\n\n if slot_possible_values is None:\n slot_possible_values = list()\n\n slot_possible_values = slot_possible_values.copy()\n if len(slot_possible_values) and np.random.random() > slot_conf:\n slot_possible_values.remove(slot_value)\n slot_value = np.random.choice(slot_possible_values)\n\n slot_dict['conf'] = slot_conf\n\n channel_act[\"channel_utterance\"] = self.template_nlg(\n channel_act['user_acts'])\n return channel_act", "def result(self, state, action):\n state_after_act = [[0 for i in range(self.col)] for j in range(self.row)]\n for k in action:\n x = k[1][0]\n y = k[1][1]\n if k[0] == \"vaccinate\":\n state_after_act[x][y] = ('I', 1)\n else:\n state_after_act[x][y] = ('Q', 1)\n\n for i in range(self.row):\n for j in range(self.col):\n if state_after_act[i][j] == 0:\n if state[i][j][0] == 'U' or state[i][j][0] == 'I':\n state_after_act[i][j] = state[i][j]\n\n elif state[i][j][0] == 'S':\n if state[i][j][1] == 3:\n state_after_act[i][j] = ('H', 1)\n else:\n if state[i][j][1] == 1:\n state_after_act[i][j] = ('S', 2)\n elif state[i][j][1] == 2:\n state_after_act[i][j] = ('S', 3)\n\n elif state[i][j][0] == 'Q':\n if state[i][j][1] == 2:\n state_after_act[i][j] = ('H', 1)\n else:\n state_after_act[i][j] = ('Q', 2)\n\n elif state[i][j][0] == 'H':\n state_after_act[i][j] = self.healthy(i, j, state,state_after_act)\n state_after_act[i] = tuple(state_after_act[i])\n return tuple(state_after_act)", "def transform_state(state):\n # TODO: automate n_enemies calculation -> only valid fot n_enemies = n_friends\n n_agents = len(state.agents)\n n_enemies = n_agents // 2 # TODO: improve this\n states_v = torch.zeros(n_agents, 5 + n_enemies) # 5 = x, y, alive, ammo, aim, enemy visible ? (x n_enemies)\n for agent_idx, agent in enumerate(state.agents):\n states_v[agent_idx, 0] = state.position[agent][0] # x\n states_v[agent_idx, 1] = state.position[agent][1] # y\n states_v[agent_idx, 2] = state.alive[agent]\n states_v[agent_idx, 3] = state.ammo[agent] / 5 # args.ammo\n states_v[agent_idx, 4] = -1 if state.aim[agent] is None else state.aim[agent].id\n idx = 5\n for other in state.agents:\n if (agent, other) in state.visible:\n states_v[agent_idx, idx] = int(state.visible[(agent, other)])\n idx += 1\n return states_v", "def dialogue(self, user_input, state, user_preferences):\n \n if user_input in [\"configure formal\", \"configure delay\", \"configure informal\", \"configure no delay\"]:\n self.configure(user_input)\n user_input=\"\"\n self.dialogue(user_input,state,user_preferences)\n \n time.sleep(self.delay)\n self.statelog.append([user_input,state]) #tuple of user utterance and its associated state. We use this to keep track of state jumps.\n \n if state == \"exit\":\n print(\"Dialog Agent: \"+random.choice(self.responses.get(\"Goodbye\")))\n return\n \n if state in (\"init\"):\n user_preferences = [0,0,0]\n user_input = input(\"Dialog Agent: \"+random.choice(self.responses.get(\"Welcome\"))+\"User: \")\n state = self.classification(user_input)\n self.dialogue(user_input, state, user_preferences)\n return\n \n if state in (\"inform\", \"reqalts\", 'hello'):\n extracted_preferences = self.preference_extractor(user_input)\n for i,d in enumerate(user_preferences):\n if d == 0:\n user_preferences[i] = extracted_preferences[i]\n \n state=\"fill_blanks\" #if more slots to be filled\n self.suggestions=self.lookup(user_preferences)\n\n if (len(self.suggestions)==0) or (len(self.suggestions)==1):\n \n state=\"answer\" #if there is none or 1 restaurant to suggest\n self.dialogue(user_input, state, user_preferences)\n return \n \n \n if state == \"fill_blanks\": #ask user for area/foodtype/pricerange\n grounding=self.grounding(user_preferences)\n if user_preferences[0] == 0:\n user_input = input(\"Dialog Agent: \"+grounding+random.choice(self.responses.get(\"Area\"))+\"User: \")\n \n state = self.classification(user_input)\n if \"area\" not in user_input:\n user_input+=\" area\"\n if \"dont care\" in user_input:\n user_input='any area'\n elif user_preferences[1] == 0:\n user_input = input(\"Dialog Agent: \"+grounding+random.choice(self.responses.get(\"Price\"))+\"User: \")\n \n state = self.classification(user_input)\n if \"price\" not in user_input:\n user_input+=\" price\"\n if \"dont care\" in user_input:\n user_input='any price'\n elif user_preferences[2] == 0:\n user_input = input(\"Dialog Agent: \"+grounding+random.choice(self.responses.get(\"Food\"))+\"User: \")\n \n state = self.classification(user_input)\n if \"food\" not in user_input:\n user_input+=\" food\"\n if \"dont care\" in user_input:\n user_input='any food'\n else:\n state='ask_extra_preferences'\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state== 'ask_extra_preferences':\n state=self.ask_extra_preferences(user_preferences)\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state==\"confirmpreferences\":\n user_input = input(\"Dialog Agent: \"+random.choice(self.responses.get(\"AffirmPreferences\")).format(user_preferences[0],user_preferences[1],user_preferences[2])+\"User: \")\n accept = self.agree(user_input)\n if accept is True:\n self.suggestions = self.lookup(user_preferences)\n state = \"answer\"\n elif accept is False:\n state = \"inform\"\n user_input = \"\"\n user_preferences = [0,0,0]\n elif accept==\"reqalts\":\n user_preferences=[0,0,0]\n else: \n state = \"accept\"\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state == \"answer\": \n if self.suggestions: #found at least 1 restaurant\n user_input=input(\"Dialog Agent: \"+self.suggest_restaurant()+\"User: \")\n state = self.classification(user_input)\n if state in [\"ack\", \"affirm\"]:\n state = \"goodbye\"\n elif state in [\"reqalts\", \"reqmore\", \"deny\", \"negate\"]:\n state = \"answer\"\n else: #no restaurants found. Search for alternatives\n alternatives=self.get_alternative_restaurants(self.alternative_preferences(user_preferences))#offer alternatives\n if len(alternatives)==1: #found 1 alternative\n print(\"Dialog Agent: \"+random.choice(self.responses.get(\"NoOptions\"))+\"Let me look for an alternative for you...\\n\")\n self.suggestions=alternatives\n self.recommendation=self.suggestions[0]\n user_input=input(\"Dialog Agent: \"+self.suggest_restaurant()+\"User: \")\n if self.agree(user_input):\n self.get_restaurant_contacts(self.recommendation)\n state=\"goodbye\"\n elif alternatives: #found multiple alternatives\n print(\"Dialog Agent: \"+random.choice(self.responses.get(\"NoOptions\"))+\"Here is a list of alternatives:\")\n for a in alternatives:\n print(\"Dialog Agent: \"+self.get_restaurant_info(a))\n user_input = input(\"Dialog Agent: \"+'Would you like to choose one (1) or change your preferences(2)?\\n'+\"User: \")\n if user_input==\"1\":\n user_input=input(\"Dialog Agent: \"+\"Which one would you like to choose?\\n\"+\"User: \")\n for alternative in alternatives:\n if dt(user_input.lower(), alternative.lower())<3:# take into account misspellings\n self.recommendation=alternative\n state=\"thankyou\"\n elif user_input==\"2\":\n user_preferences=[0,0,0]\n state='inform'\n elif user_input==\"exit\":\n state='exit'\n else:\n print(\"Dialog Agent: \"+\"Please choose one of the two options\")\n else:#didnt find any alternative\n print(\"Dialog Agent: \"+random.choice(self.responses.get(\"NoOptions\")))\n user_preferences=[0,0,0]\n state='inform'\n user_input=\"\"\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state in [\"reqalts\",\"thankyou\", \"goodbye\", \"reset\"]:\n \n user_input=input(\"Dialog Agent: \"+self.get_restaurant_contacts(self.recommendation)+\". Would you like to finish here?\\n\"+\"User: \")\n\n if (self.classification(user_input) in (\"ack\",\"affirm\")):\n state=\"exit\"\n else:\n state=\"init\"\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n if state == \"repeat\":\n try:\n user_input = self.statelog[len(self.statelog) - 3][0]\n state = self.statelog[len(self.statelog) - 3][1]\n except IndexError:\n print(\"Dialog Agent: \"+\"Nowhere to go back, starting again\\n\")\n state = \"init\"\n self.dialogue(user_input, state, user_preferences)\n return\n \n \n else:\n print(\"Dialog Agent: \"+\"I could not understand that, could you phrase it differently?\")#statelog[len(statelog) + 1][0]\n state = self.statelog[len(self.statelog) - 2][1]\n self.dialogue(user_input, state, user_preferences)\n return", "def actions(self, state):\n if (state == (3,3,1)): # if yes, send a missionary and a canniable to land B\n return (2,2,0)\n if (state == (2,2,0)): # if yes, send a missionary back to land A\n return (3,2,1)\n if (state == (3,2,1)): # if yes, send a missionary and a canniable to land B\n return (2,1,0)\n if (state == (2,1,0)): # if yes, send a missionary back to land A\n return (3,1,1)\n if (state == (3,1,1)): # if yes, send 2 missionary to land B\n return (1,1,0)\n if (state == (1,1,0)): # if yes, send a missionary and a canniable to land A\n return (2,2,1)\n if (state == (2,2,1)): # if yes, send 2 missionary to land B\n return (0,2,0)\n if (state == (0,2,0)): # if yes, send a missionary to land A\n return (1,2,1)\n if (state == (1,2,1)): # if yes, send a missionary and a canniable to land B\n return (0,1,0)\n if (state == (0,1,0)): # if yes, send a missionary to land A\n return (1,1,1)\n if (state == (1,1,1)): # if yes, send a missionary and a canniable to land B\n return (0,0,0)\n\n raise NotImplementedError", "def result(self, state, action):\n # clone the state\n new_state = state.myclone()\n\n\n\n if action==\"Pass\":\n new_state.maxs_turn = not state.maxs_turn\n new_state.numTurns = state.numTurns + 1\n new_state.stringified = new_state.__str__()\n return new_state\n\n # parse the details of the action\n action = action.rstrip().rsplit(\": \")\n type = action[0]\n details = action[1].rsplit(\" --> \")\n start = details[0].rsplit(\" @ \")\n who = start[0]\n source = start[1]\n source = source[1:len(source)-1]\n source = source.rsplit(\",\")\n source = (int(source[0]), int(source[1]))\n if type==\"Attack\":\n end = details[1].rsplit(\" @ \")\n victim = end[0]\n target = end[1]\n target = target[1:len(target)-1]\n target = target.rsplit(\",\")\n target = (int(target[0]), int(target[1]))\n else:\n target = details[1]\n target = target[1:len(target)-1]\n target = target.rsplit(\",\")\n target = (int(target[0]), int(target[1])) \n \n \n if type==\"Attack\":\n if victim==\"Sith\" or victim==\"Rebel\":\n if who==\"Rebel\" and target[0]==1:\n new_state.gameState[source] = ' '\n new_state.gameState[target] = 'J'\n new_state.numJedi += 1\n new_state.numRebels -= 1\n else:\n new_state.gameState[source] = ' '\n new_state.gameState[target] = who[0]\n if victim==\"Rebel\": new_state.numRebels -= 1\n if victim==\"Sith\": new_state.numSith -= 1\n else:\n new_state.gameState[target] = 'S'\n new_state.numSith += 1\n new_state.numJedi -= 1\n else:\n if who==\"Rebel\" and target[0]==1:\n new_state.gameState[source] = ' '\n new_state.gameState[target] = 'J'\n new_state.numJedi += 1\n new_state.numRebels -= 1\n else:\n new_state.gameState[source] = ' '\n new_state.gameState[target] = who[0]\n \n \n new_state.maxs_turn = not state.maxs_turn\n new_state.numTurns = state.numTurns + 1\n self._cache_winner(new_state)\n new_state.stringified = new_state.__str__()\n \n return new_state", "def Viterbi(self, sent):\n viterbi = defaultdict(dict)\n backpointer = defaultdict(dict)\n sent_tag = []\n pos_list = [end_token]\n viterbi['0'] = 1.0\n\n # Initialization step\n # This loop will run for all the tags of each first word (sent[1][0])(word next to <S>) in dictionary\n for tag in self.dictionary[sent[1][0]]:\n # if any sentance in our trained data starts with a word that has same tag as \"state\"\n if (start_token, tag) in self.transitions:\n viterbi[str(1)][tag] = self.transitions[(start_token, tag)] + self.emissions[(sent[1][0], tag)]\n else:\n viterbi[str(1)][tag] = -float('inf')\n backpointer[str(1)][tag] = start_token\n\n # Recursion step\n # This loop will run for rest of the tuples (word, pos) after first tuple in \"sent\"\n for i in xrange(2, len(sent)):\n # This loop will run for all the tags of each word (sent[idx][0]) in dictionary\n for tag in self.dictionary[sent[i][0]]:\n maximum_value = -float(\"inf\")\n maximum_loc = []\n # This loop will run for all the tags in previous word (sent[idx-1][0]) in dictionary\n for prev_tag in self.dictionary[sent[i - 1][0]]:\n # if any sentance in our trained data has (privious tag, current tag) or (pre_state, state) of given word\n if (prev_tag, tag) in self.transitions:\n t = viterbi[str(i - 1)][prev_tag] + self.transitions[(prev_tag, tag)]\n else:\n t = -float('inf')\n if t >= maximum_value:\n maximum_value = t\n maximum_loc = prev_tag\n\n viterbi[str(i)][tag] = maximum_value + self.emissions[(sent[i][0], tag)]\n backpointer[str(i)][tag] = maximum_loc\n\n t = end_token\n for i in xrange(1, len(sent)):\n t = backpointer[str(len(sent) - i)][t]\n pos_list.append(t)\n\n for tup in sent:\n sent_tag.append((tup[0], pos_list.pop()))\n\n #print \"viterbi:\", viterbi\n #print \"backpointer:\", backpointer\n #print \"sent_tagged\", sent_tag\n\n return sent_tag", "def actions(self, state):\n \"*** YOUR CODE HERE ***\"\n if state[2] == 0: # When agent is facing North\n state_fw = (state[0], state[1] + 1, 0)\n state_tr = (state[0], state[1], 3)\n state_tl = (state[0], state[1], 1)\n elif state[2] == 1: # When agent is facing West\n state_fw = (state[0] - 1, state[1], 1)\n state_tr = (state[0], state[1], 0)\n state_tl = (state[0], state[1], 2)\n elif state[2] == 2: # When agent is facing South\n state_fw = (state[0], state[1] - 1, 2)\n state_tr = (state[0], state[1], 1)\n state_tl = (state[0], state[1], 3)\n elif state[2] == 3: # When agent is facing East\n state_fw = (state[0] + 1, state[1], 3)\n state_tr = (state[0], state[1], 2)\n state_tl = (state[0], state[1], 0)\n else:\n raise Exception(\"This shouldn't be happening. Can't find heading\")\n \n shoot_loc_arr = [] # Initialize Array\n for allowed_state in self.allowed: # Iterate through all allowed states\n for goal_state in self.goals: # Iterate through all goal states\n if allowed_state[0] == goal_state[0] and allowed_state[1] < goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 0)) # X Matches, Head North\n if allowed_state[0] > goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 1)) # Y Matches, Head West\n if allowed_state[0] == goal_state[0] and allowed_state[1] > goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 2)) # X Matches, Head South\n if allowed_state[0] < goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 3)) # Y Matches, Head East \n\n dist_fw_arr, dist_tr_arr, dist_tl_arr = ([9999999] for i in range(3)) # Initialize to large values\n for goal in shoot_loc_arr: # Iterate through arrays\n if (state_fw[0],state_fw[1]) in self.allowed:\n dist_fw_arr.append(manhattan_distance_with_heading(state_fw, goal))\n dist_tr_arr.append(manhattan_distance_with_heading(state_tr, goal))\n dist_tl_arr.append(manhattan_distance_with_heading(state_tl, goal))\n\n if (min(dist_fw_arr) <= min(min(dist_tr_arr),min(dist_tl_arr))) and (state_fw[0],state_fw[1]) in self.allowed: return ['Forward']\n if min(dist_tr_arr) <= min(min(dist_fw_arr),min(dist_tl_arr)): return ['TurnRight']\n if min(dist_tl_arr) <= min(min(dist_tr_arr),min(dist_tr_arr)): return ['TurnLeft']\n raise Exception(\"This shouldn't be happening. Can't determine action\")", "def stateVector(self):\n simulator=Aer.get_backend('statevector_simulator')\n result=execute(self.circuit,backend=simulator).result()\n statevector=result.get_statevector(decimals=4) #\"decimals=4\" doesn't work in version 0.20.0 \n return statevector.tolist()", "def define_state_transition_SettlementStatusProcessingAdvice():\n\n state_chart_name = 'FSwiftSettStatusProcessAdviceIn'\n old_state_chart_name = ''\n state_chart = {\n 'Ready': {'Identified': 'Paired',\n 'NotIdentified': 'Unpaired'},\n\n 'Unpaired': {'Identified': 'Paired'},\n\n 'Paired': {'Acknowledge': 'Acknowledged',\n 'NoMatch': 'NotMatched',\n 'Match': 'Matched',\n 'Pending': 'PendingSettlement',\n 'Failing': 'FailingSettlement',\n 'Reject': 'Rejected',\n 'Cancel': 'Cancelled',},\n\n 'Acknowledged': {'NoMatch': 'NotMatched',\n 'Match': 'Matched',\n 'Pending': 'PendingSettlement',\n 'Failing': 'FailingSettlement',\n 'Cancel': 'Cancelled',\n 'Reject': 'Rejected',\n 'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Done':'Processed'},\n\n 'Matched': {'Pending': 'PendingSettlement',\n 'Cancel': 'Cancelled',\n 'Reject': 'Rejected',\n 'Failing': 'FailingSettlement',\n 'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Done':'Processed'\n },\n\n 'FailingSettlement':{'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Reject': 'Rejected',\n 'Cancel': 'Cancelled',\n 'Done':'Processed'},\n\n 'NotMatched': {'Match': 'Matched',\n 'Pending': 'PendingSettlement',\n 'Failing': 'FailingSettlement',\n 'Cancel': 'Cancelled',\n 'Reject': 'Rejected',\n 'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Done':'Processed'},\n\n 'AmendCancelRequested': {'AmndCancComplete':'AmendCancelCompleted',\n 'Done':'Processed'},\n\n 'AmendCancelPending': {'AmndCancComplete':'AmendCancelCompleted',\n 'Done':'Processed'},\n\n 'PendingSettlement': {'Failing': 'FailingSettlement',\n 'Cancel': 'Cancelled',\n 'AmndCancRequest':'AmendCancelRequested',\n 'AmndCancPending':'AmendCancelPending',\n 'Reject': 'Rejected',\n 'Done':'Processed'},\n 'Cancelled' : { 'Done':'Processed'},\n 'AmendCancelCompleted' : { 'Done':'Processed'},\n\n 'Rejected': {'Cancel': 'Cancelled',\n 'Done': 'Processed'}\n\n }\n\n co_string = 'Paired,73,-105;Cancelled,676,329;Not Match,696,-223;Match,1271,173;AmendCancelPending,728,189;Matched,347,-133;Unpaired,-178,-351;Acknowledge,435,-110;Reject,229,538;AmendCancelCompleted,862,90;Rejected,177,338;NotMatched,432,-283;Acknowledged,549,-429;Processed,1072,-131;Pending Settlement,1193,495;PendingSettlement,304,22;FailingSettlement,255,179;Ready,-237,-98;AmendCancelRequested,737,0;'\n #state_charts_list.append({state_chart_name: [state_chart] + [co_string]})\n\n return state_chart_name, state_chart, old_state_chart_name, co_string", "def toState(self, results):\n trues = HashSet()\n for result in results:\n trues.add(GdlPool.getRelation(self.TRUE, [None]*))\n return MachineState(trues)", "def get_new_gamestate(self):", "def convert_input(self, inlist):\n\n # First map the ordered state list from the simulation into a\n # state dictionary for the brain.\n self.state = {\n 'heat_cost':\t\tinlist[0],\n 'set_temp':\t\t\tinlist[1],\n 'room_temp':\t\tinlist[2],\n 'room_temp_change':\t\tinlist[3],\n 'outside_temp':\t\tinlist[4],\n 'outside_temp_change':\tinlist[5],\n }\n\n # To compute the reward function value we start by taking the\n # difference between the set point temperature and the actual\n # room temperature.\n tdiff = math.fabs(self.state['set_temp'] - self.state['room_temp'])\n\n # Raise the difference to the 0.4 power. The non-linear\n # function enhances the reward distribution near the desired\n # temperature range. Please refer to the Bonsai training\n # video on reward functions for more details.\n nonlinear_diff = pow(tdiff, 0.4)\n\n # Scale the nonlinear difference so differences in the range\n # +/- 2 degrees (C) map between 0 and 1.0.\n # 2 degree ^ 0.4 = 1.32\n scaled_diff = nonlinear_diff / 1.32\n\n # Since we need a positive going reward function, subtract the\n # scaled difference from 1.0. This reward value will be 1.0\n # when we are precisely matching the set point and will fall\n # to less than 0.0 when we exceed 2 degrees (C) from the set\n # point.\n self.reward = 1.0 - scaled_diff\n \n self.terminal = self.nsteps >= 240 or self.reward < 0.0\n\n if self.nsteps > 0:\n self.total_reward += self.reward\n\n return self.state, self.reward, self.terminal", "def Viterbi_Transition(words:Sequence[str], train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Sequence[Tuple[str, str]]:\n state = []\n all_tags = list(set([pair[1] for pair in train_bag]))\n\n for word_idx, word in enumerate(words):\n # initialise list of probability column for a given observation\n p = []\n for tag in all_tags:\n if word_idx == 0:\n transition_p = tags_df.loc['.', tag]\n else:\n transition_p = tags_df.loc[state[-1], tag]\n\n # compute emission and state probabilities\n emission_p_parts = word_given_tag(word, tag)\n emission_p = emission_p_parts[0]/emission_p_parts[1]\n\n if word in V:\n state_probability = transition_p * emission_p\n else:\n state_probability = transition_p\n\n p.append(state_probability)\n\n p_max = max(p)\n # getting state for which probability is maximum\n state_max = all_tags[p.index(p_max)]\n state.append(state_max)\n return list(zip(words, state))", "def prehistory_recept(self, userdialog):\n # get text of prehistory\n # grasp datetimes mentioned before, the most recent datetimes are more confident estimations\n # print(\"21222222222222222222222222222222222222222222222222222222\")\n # import ipdb; ipdb.set_trace()\n\n usermessages = userdialog.list_user_messages()\n # search for the recent slot setting (from recent messages to oldest):\n for each_msg in reversed(usermessages):\n can_rec = self.can_recept(each_msg)\n if can_rec:\n results = self.recept(each_msg)\n return can_rec, results\n\n return False, None", "def gym_to_state(self, state) -> Dict[str, Any]:\n\n joint_speeds = self._env.unwrapped.robot.joint_speeds\n\n joints_at_limit = float(self._env.unwrapped.robot.joints_at_limit)\n\n potential = float(self._env.unwrapped.potential)\n if self.prev_potential is None:\n self.prev_potential = potential\n\n progress = potential - self.prev_potential\n\n self.bonsai_state = {\"obs\": state.tolist(),\n \"joint_speeds\": joint_speeds.tolist(),\n \"joints_at_limit\": joints_at_limit,\n \"progress\": progress}\n\n self.prev_potential = potential\n\n return self.bonsai_state", "def pack_experience(states, actions, rewards, next_states, dones):\n\n# pdb.set_trace()\n \n return (states.flatten(),\n actions.flatten(),\n rewards,\n next_states.flatten(),\n dones)", "def getFeatures(self, state, action):\n features = qutils.Qcounter()\n features['bias'] = 1.0\n\n if state is None:\n return features\n else:\n\n if self.id%2 == 0:\n plrCoords = state.board.plr_coords['r']\n oppCoords = state.board.plr_coords['b']\n else:\n plrCoords = state.board.plr_coords['b']\n oppCoords = state.board.plr_coords['r']\n\n goalState = GoalState(state.board.plr_coords['r'],state.board.plr_coords['b'],state.agents[self.id].hand,\n state.board.draft)\n if action['coords'] is not None:\n draftCoords = goalState.CardsToCoords([action['draft_card']])\n else:\n draftCoords = None\n\n features['euclideanDistanceCentroid'] = eucDist(action, plrCoords)\n features['neighbour'] = neighbour(action, plrCoords, oppCoords)\n features['heart'] = heart(action, plrCoords)\n features['blockHeart'] = blockHeart(action, oppCoords)\n features['eHorizontal'] = eHorizontal(state, action, plrCoords, oppCoords)\n features['eVertical'] = eVertical(state, action, plrCoords, oppCoords)\n features['eIandIIIDiag'] = eIandIIIDiagonal(state, action, plrCoords, oppCoords)\n features['eIIandIVDiag'] = eIIandIVDiagonal(state, action, plrCoords, oppCoords)\n features['draftHorizontal'] = draftHorizontal(state, plrCoords, oppCoords, draftCoords)\n features['draftVertical'] = draftVertical(state, plrCoords, oppCoords, draftCoords)\n features['draftDiagIandIII'] = draftDiagIandIII(state, plrCoords, oppCoords, draftCoords)\n features['draftDiagIIandIV'] = draftDiagIIandIV(state, plrCoords, oppCoords, draftCoords)\n features['draftJacks'] = DraftJacks(action)\n features['PlayCentre'] = PlayCentre(action)\n features['HeuristicValuePlace'] = HeuristicValue(action, goalState)\n features['HeuristicValueDraft'] = HeuristicValueDraft(action, goalState, draftCoords, self.gamma)\n return features", "def _logicGuess(self, confirmed:dict, values:list, candidates:list, history:list) -> tuple[bool, list]:\n cur_guess = [v for v in confirmed.values()]\n unconfirmed_slots = [i for i in confirmed if confirmed[i]==None]\n if not unconfirmed_slots:\n return [confirmed.values()]\n unconfirmed_values = [v for v in values if v not in confirmed.values()]\n \n if unconfirmed_values:\n # TODO:\n pass \n vacant_count = len(unconfirmed_slots) - len(unconfirmed_values)\n if vacant_count == 1:\n for value in candidates:\n cur_guess = [v if v else value for v in cur_guess]\n ok, fb = self._game.checkGuess(cur_guess)\n if ok:\n return (ok, cur_guess)\n elif vacant_count > 1:\n pick = candidates[:2]\n first = True\n for i,v in enumerate(cur_guess):\n if v == None:\n cur_guess[i] = candidates[0] if first else candidates[1]\n first = False\n ok, fb = self._game.checkGuess(cur_guess)\n if ok:\n return (ok, cur_guess)\n countActurateMatch = lambda x:sum([1 for v in x.value if v!=None])\n if fb.acurateMatch >= countActurateMatch(confirmed):\n\n\n \n return cur_guess", "def solve(self, current_state: dict) -> dict:", "def state_(state):\n return tuple( [ tuple( row ) for row in state ] )", "def fix_unpaired_events(self):\n\n if self.observationId:\n\n r, msg = project_functions.check_state_events_obs(self.observationId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][self.observationId])\n if \"not PAIRED\" not in msg:\n QMessageBox.information(self, programName, \"All state events are already paired\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n return\n\n '''\n if self.playerType == VIEWER:\n # max time\n time_ = max(x[0] for x in self.pj[OBSERVATIONS][self.observationId][EVENTS])\n else:\n time_ = self.getLaps()\n '''\n\n w = dialog.JumpTo(self.timeFormat)\n w.setWindowTitle(\"Fix UNPAIRED state events\")\n w.label.setText(\"Fix UNPAIRED events at time\")\n\n if w.exec_():\n if self.timeFormat == HHMMSS:\n fix_at_time = utilities.time2seconds(w.te.time().toString(HHMMSSZZZ))\n elif self.timeFormat == S:\n fix_at_time = Decimal(str(w.te.value()))\n print(\"fix_at_time\", fix_at_time)\n\n events_to_add = project_functions.fix_unpaired_state_events(self.observationId,\n self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][self.observationId],\n fix_at_time - Decimal(\"0.001\")\n )\n if events_to_add:\n self.pj[OBSERVATIONS][self.observationId][EVENTS].extend(events_to_add)\n self.projectChanged = True\n self.pj[OBSERVATIONS][self.observationId][EVENTS].sort()\n self.loadEventsInTW(self.observationId)\n item = self.twEvents.item(\n [i for i, t in enumerate(self.pj[OBSERVATIONS][self.observationId][EVENTS]) if\n t[0] == fix_at_time][0], 0)\n self.twEvents.scrollToItem(item)\n\n # selected observations\n else:\n result, selected_observations = self.selectObservations(MULTIPLE)\n if not selected_observations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obs_id in selected_observations:\n r, msg = project_functions.check_state_events_obs(obs_id, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obs_id])\n print(\"msg\", msg)\n if \"NOT PAIRED\" in msg.upper():\n fix_at_time = max(x[0] for x in self.pj[OBSERVATIONS][obs_id][EVENTS])\n events_to_add = project_functions.fix_unpaired_state_events(obs_id,\n self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obs_id],\n fix_at_time\n )\n if events_to_add:\n events_backup = self.pj[OBSERVATIONS][obs_id][EVENTS][:]\n self.pj[OBSERVATIONS][obs_id][EVENTS].extend(events_to_add)\n\n # check if modified obs if fixed\n r, msg = project_functions.check_state_events_obs(obs_id, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obs_id])\n if \"NOT PAIRED\" in msg.upper():\n out += \"The observation <b>{}</b> can not be automatically fixed.<br><br>\".format(obs_id)\n self.pj[OBSERVATIONS][obs_id][EVENTS] = events_backup\n else:\n out += \"<b>{}</b><br>\".format(obs_id)\n self.projectChanged = True\n if out:\n out = \"The following observations were modified to fix the unpaired state events:<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Fixed observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n self.results.exec_()\n else:\n QMessageBox.information(self, programName, \"All state events are already paired\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def _teacher_action(self, obs, ended):\n a = np.zeros(len(obs), dtype=np.int64)\n for i, ob in enumerate(obs):\n if ended[i]: # Just ignore this index\n a[i] = args.ignoreid\n else:\n for k, candidate in enumerate(ob['candidate']):\n if candidate['viewpointId'] == ob['teacher']: # Next view point\n a[i] = k\n break\n else: # Stop here\n assert ob['teacher'] == ob['viewpoint'] # The teacher action should be \"STAY HERE\"\n a[i] = len(ob['candidate'])\n return a", "def _teacher_action(self, obs, ended):\n a = np.zeros(len(obs), dtype=np.int64)\n for i, ob in enumerate(obs):\n if ended[i]: # Just ignore this index\n a[i] = args.ignoreid\n else:\n for k, candidate in enumerate(ob['candidate']):\n if candidate['viewpointId'] == ob['teacher']: # Next view point\n a[i] = k\n break\n else: # Stop here\n assert ob['teacher'] == ob['viewpoint'] # The teacher action should be \"STAY HERE\"\n a[i] = len(ob['candidate'])\n return a", "def result(self, state, action):\n \"*** YOUR CODE HERE ***\"\n new_x, new_y, new_heading = state\n if action == 'Forward':\n if state[2] == 0: new_y = state[1] + 1 #Forward North\n if state[2] == 1: new_x = state[0] - 1 #Forward West\n if state[2] == 2: new_y = state[1] - 1 #Forward South\n if state[2] == 3: new_x = state[0] + 1 #Forward East \n elif action == 'TurnLeft':\n if state[2] == 0: new_heading = 1 #Turn left to face West\n if state[2] == 1: new_heading = 2 #Turn left to face South \n if state[2] == 2: new_heading = 3 #Turn left to face East\n if state[2] == 3: new_heading = 0 #Turn left to face North\n elif action == 'TurnRight':\n if state[2] == 0: new_heading = 3 #Turn to face East\n if state[2] == 1: new_heading = 0 #Turn to face South\n if state[2] == 2: new_heading = 1 #Turn to face West\n if state[2] == 3: new_heading = 2 #Turn to face North\n new_state = (new_x,new_y,new_heading)\n return new_state", "def result(self, state, action):\n \"*** YOUR CODE HERE ***\"\n new_x, new_y, new_heading = state\n if action == 'Forward':\n if state[2] == 0: new_y = state[1] + 1 #Forward North\n if state[2] == 1: new_x = state[0] - 1 #Forward West\n if state[2] == 2: new_y = state[1] - 1 #Forward South\n if state[2] == 3: new_x = state[0] + 1 #Forward East \n elif action == 'TurnLeft':\n if state[2] == 0: new_heading = 1 #Turn left to face West\n if state[2] == 1: new_heading = 2 #Turn left to face South \n if state[2] == 2: new_heading = 3 #Turn left to face East\n if state[2] == 3: new_heading = 0 #Turn left to face North\n elif action == 'TurnRight':\n if state[2] == 0: new_heading = 3 #Turn to face East\n if state[2] == 1: new_heading = 0 #Turn to face South\n if state[2] == 2: new_heading = 1 #Turn to face West\n if state[2] == 3: new_heading = 2 #Turn to face North\n new_state = (new_x,new_y,new_heading)\n return new_state", "def step(self):\n\n \"\"\" First updates the variables values of the current time form the environment \"\"\"\n self.update_crispval(self.env.context)\n\n \"\"\"\n here the decision making of the agent\n to determine which activity to suggest to the patient\n i apply the creative controller to the current context\n \"\"\"\n curr_input = sample_inputs(False, 0, self.curr_interaction, self.variables_default_val, self.action_var,\n self.fuzzysets_values, self.variables_universe)\n c_out, rules_activations, is_cc_exception = self.creative_controller.computeOutput(curr_input, False)\n\n \"\"\" i obtain a number of ouput crisp values.\n i determine which one achieves the max expected output w.r.t. the a-rules \"\"\"\n best_a = None\n best_a_val = -1000\n best_a_exphapp = 5\n if self.verbose > Constants.VERBOSE_BASIC:\n print(\"rules activations\")\n for a in rules_activations:\n if rules_activations[a] > 0:\n print(str(a) + \"\\n\\t\\t\\t-> \" + str(rules_activations[a]))\n for item in c_out.items(): # for each pair <activity, crisp output>\n if self.verbose > Constants.VERBOSE_BASIC:\n print(item)\n if not item[\n 0] in self.curr_iter_suggestions: # if i didn't suggest the same activity already in the same interaction\n inputs = dict(curr_input) # I create a copy fo the dict\n inputs[item[0]] = item[1]\n assessor_id = self.actions_to_ti[item[0]]\n self.assessors[assessor_id].feed_inputs(inputs)\n is_ac_exception = False\n assout = []\n try:\n a_out, a_rules_activations, is_ac_exception = self.assessors[assessor_id].compute(verbose=False)\n assout = [a_out[ao] for ao in a_out]\n except:\n is_ac_exception = True\n traceback.print_exc()\n # todo the following assumes that every assessor controller has same eval var\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n if len(assout) == 0:\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n w_ta = self.weights_therapeutic_interventions[self.actions_to_ti[item[0]]]\n\n avg_credit_rules_that_suggested_action = 1.0\n nr_rules_that_suggested_action = 0\n for r in rules_activations:\n if (rules_activations[r] > 0) and (str(item[0]) in str(r)):\n avg_credit_rules_that_suggested_action = avg_credit_rules_that_suggested_action + \\\n self.rules_credits[str(r)]\n nr_rules_that_suggested_action = nr_rules_that_suggested_action + 1\n if nr_rules_that_suggested_action > 0:\n avg_credit_rules_that_suggested_action = (\n avg_credit_rules_that_suggested_action - 1.0) / nr_rules_that_suggested_action\n repetition_cost = 1.0\n a_val = (mean(assout) * w_ta * avg_credit_rules_that_suggested_action) / repetition_cost\n if (a_val > best_a_val) and (\n item[1] >= (self.variables_default_val[item[0]] + self.range_step[item[0]])):\n best_a = item\n best_a_val = a_val\n best_a_exphapp = mean(assout)\n\n \"\"\"I suggest the activity with best expected outcome and store the information to populate the interactions \n memory \"\"\"\n self.proposeActivity(best_a)\n if not best_a is None:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"proposing activity\" + str(best_a) + \" which has expected feedback: \" + str(\n best_a_exphapp) + \", which weighted is \" + str(best_a_val))\n self.curr_iter_suggestions.append(best_a[0])\n self.last_suggestion = best_a\n else:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"the activity proposed is \" + str(\n best_a) + \" so I don't suggest anything. I will ask a question instead\")\n self.last_suggestion = []\n self.expected_feedback = best_a_exphapp\n self.last_context = self.env.context.copy()\n self.last_rules_activations = rules_activations", "def required_slots(self,tracker) -> List[Text]:", "def _extract_state(self, state):\n extracted_state = {}\n\n legal_actions = [self.actions.index(a) for a in state[\"legal_actions\"]]\n extracted_state[\"legal_actions\"] = legal_actions\n\n public_card = state[\"public_card\"]\n hand = state[\"hand\"]\n obs = np.zeros(36)\n obs[self.card2index[hand]] = 1\n if public_card:\n obs[self.card2index[public_card] + 3] = 1\n obs[state[\"my_chips\"] + 6] = 1\n obs[state[\"all_chips\"][1] + 20] = 1\n extracted_state[\"obs\"] = obs\n\n if self.allow_raw_data:\n extracted_state[\"raw_obs\"] = state\n extracted_state[\"raw_legal_actions\"] = [a for a in state[\"legal_actions\"]]\n if self.record_action:\n extracted_state[\"action_record\"] = self.action_recorder\n\n return extracted_state" ]
[ "0.640888", "0.566024", "0.5613271", "0.5596102", "0.5474696", "0.54214483", "0.5382502", "0.52699685", "0.52173156", "0.51859146", "0.51858324", "0.51645875", "0.51623684", "0.5160781", "0.5158822", "0.51578665", "0.51083446", "0.51068085", "0.50694966", "0.5061151", "0.50550014", "0.50538117", "0.5048716", "0.50355625", "0.50355625", "0.50299263", "0.50299263", "0.5001525", "0.50006247", "0.49923155" ]
0.5790193
1
Building the Action Space for the RLbased Agent. All diseases are treated as actions.
def _build_action_space(self): feasible_actions = [] # Adding the inform actions and request actions. for slot in sorted(self.slot_set.keys()): feasible_actions.append({'action': 'request', 'inform_slots': {}, 'request_slots': {slot: dialogue_configuration.VALUE_UNKNOWN},"explicit_inform_slots":{}, "implicit_inform_slots":{}}) # Diseases as actions. for disease in sorted(self.disease_symptom.keys()): feasible_actions.append({'action': 'inform', 'inform_slots': {"disease":disease}, 'request_slots': {},"explicit_inform_slots":{}, "implicit_inform_slots":{}}) return feasible_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildActionSpace(self):\n self.action_types = self.AGENT_TYPES\n self.action_space = Dict({\n \"action\": Discrete(len(self.AGENT_TYPES)), \n })\n self.action_space.shape = (len(self.action_types),)", "def set_up_discrete_action_space(self):\n self.action_list = [[self.torque, 0, 0, 0, 0, 0], [-self.torque, 0, 0, 0, 0, 0],\n [0, self.torque, 0, 0, 0, 0], [\n 0, -self.torque, 0, 0, 0, 0],\n [0, 0, self.torque, 0, 0, 0], [\n 0, 0, -self.torque, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]]\n self.action_space = gym.spaces.Discrete(len(self.action_list))\n self.setup_keys_to_action()", "def action_space(self, curr_state):\n # Action space - allowed (position, value) combinations for the agent and environment given the current state\n\n agent_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0]))\n env_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1]))\n return (agent_actions, env_actions)", "def action_space(self, curr_state):\n\n agent_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0])\n env_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1])\n return (agent_actions, env_actions)", "def action_space(self, curr_state):\n\n agent_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0])\n env_actions = product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1])\n return (agent_actions, env_actions)", "def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")", "def get_agent(env) -> DDPGAgent:\n assert len(env.action_space.shape) == 1\n nb_actions = env.action_space.shape[0]\n action_input = Input(shape=(nb_actions,), name='action_input')\n observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')\n\n range_action_input = 0.5 * (env.action_space.high - env.action_space.low)\n constantBias = 1\n lowb = env.action_space.low\n\n # actor = Flatten(input_shape=(1,) + env.observation_space.shape)(observation_input)\n y = Flatten()(observation_input)\n y = Dense(16)(y)\n y = BatchNormalization()(y)\n y = Activation('relu')(y)\n y = Dense(16)(y)\n y = BatchNormalization()(y)\n y = Activation('relu')(y)\n pht = Dense(1)(y)\n pht = BatchNormalization()(pht)\n pht = Activation('tanh')(pht)\n pht = Lambda(lambda a: (a + K.constant(constantBias)) * K.constant(range_action_input[0])\n + K.constant(lowb[0]))(pht)\n rht = Dense(1)(y)\n rht = BatchNormalization()(rht)\n rht = Activation('tanh')(rht)\n rht = Lambda(lambda a: (a + K.constant(constantBias)) * K.constant(range_action_input[1])\n + K.constant(lowb[1]))(rht)\n axn = Concatenate()([pht, rht])\n actor = Model(inputs=observation_input, outputs=axn)\n\n flattened_observation = Flatten()(observation_input)\n x = Concatenate()([action_input, flattened_observation])\n x = Dense(32)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dense(32)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dense(32)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dense(1)(x)\n x = Activation('linear')(x)\n critic = Model(inputs=[action_input, observation_input], outputs=x)\n\n memory = SequentialMemory(limit=1000, window_length=1)\n\n random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.5, size=nb_actions)\n agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,\n memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,\n gamma=.99, target_model_update=1e-3, random_process=random_process)\n agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])\n return agent", "def get_actions(\n self, observations: Observations, action_space: gym.Space\n ) -> Actions:\n return super().get_actions(observations, action_space)", "def initiate_agent(self, env):\n from keras import Sequential\n from keras.optimizers import Adam\n from keras.layers import Dense, Dropout\n from rl.memory import SequentialMemory\n from rl.agents import DQNAgent\n\n self.env = env\n\n nb_actions = self.env.action_space.n\n\n model = Sequential()\n model.add(Dense(512, activation='relu', input_shape=env.observation_space))\n model.add(Dropout(0.2))\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(nb_actions, activation='linear'))\n\n # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n # even the metrics!\n memory = SequentialMemory(limit=memory_limit, window_length=window_length)\n policy = TrumpPolicy()\n from rl.core import Processor\n\n class CustomProcessor(Processor):\n \"\"\"he agent and the environment\"\"\"\n\n def process_state_batch(self, batch):\n \"\"\"\n Given a state batch, I want to remove the second dimension, because it's\n useless and prevents me from feeding the tensor into my CNN\n \"\"\"\n return np.squeeze(batch, axis=1)\n\n def process_info(self, info):\n processed_info = info['player_data']\n if 'stack' in processed_info:\n processed_info = {'x': 1}\n return processed_info\n\n nb_actions = env.action_space.n\n\n self.dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=nb_steps_warmup,\n target_model_update=1e-2, policy=policy,\n processor=CustomProcessor(),\n batch_size=batch_size, train_interval=train_interval, enable_double_dqn=enable_double_dqn)\n self.dqn.compile(Adam(lr=1e-3), metrics=['mae'])", "def make_action(self, game, node, action, moves):\n pass", "def transfer_actions(action, act_space):\n #print(action)\n action_spaces = []\n res = []\n for act in act_space.spaces:\n if act_space[act].__class__.__name__ == 'Discrete':\n action_spaces.append(act_space[act].n)\n res.append(action[act])\n elif act_space[act].__class__.__name__ == 'Enum':\n action_spaces.append(len(act_space[act].values))\n res.append(action[act])\n elif act == 'camera':\n res.append(camera_transform(action[act][0]))\n res.append(camera_transform(action[act][1]))\n action_spaces.append(36)\n action_spaces.append(36)\n\n return res", "def get_all_valid_actions(self):\r\n\r\n # Select, for each agent, the valid actions based on its position (state).\r\n agent_actions = self.searchenv.valid_actions[self.searchstate.positions[0]]\r\n\r\n #print(\"Agent Action: \",agent_actions)\r\n\r\n # Mask the rail transition actions for idle agents.\r\n if self.searchstate.actives == 0:\r\n agent_actions = [0, 0, 1, 0, 1] # STOP_MOVING, or MOVE_FORWARD.\r\n\r\n # Mask the rail transition actions for done agents.\r\n if self.agents_at_goal() == True:\r\n agent_actions = [1, 0, 0, 0, 0] # DO_NOTHING only.\r\n\r\n # Identify for each agent the IDs of the valid actions (i.e., [0, 1, 1, 0, 0] --> [1, 2])\r\n agent_action_list =[]\r\n for i in range(len(agent_actions)):\r\n if agent_actions[i] == 1:\r\n agent_action_list.append(i)\r\n\r\n # Return list containing for each agent, the IDs of the actions available to it.\r\n return agent_action_list", "def generate_action_grammar(self, actions):\n assert isinstance(actions, list), actions\n assert not isinstance(actions[0], list), \"Should be 1 long list of actions - {}\".format(actions[0])\n assert len(actions) > 0, \"Need to provide a list of at least 1 action\"\n assert isinstance(actions[0], int), \"The actions should be integers\"\n new_actions, all_rules, rule_usage, rules_episode_appearance_count = self.discover_all_rules_and_new_actions_representation(actions)\n action_usage = self.extract_action_usage_from_rule_usage(rule_usage, all_rules)\n rules_episode_appearance_count = self.extract_action_usage_from_rule_usage(rules_episode_appearance_count,\n all_rules)\n return new_actions, all_rules, action_usage, rules_episode_appearance_count", "def action_space(self):\n return gym.spaces.Discrete(self._action_dim)", "def _initialize_action_space(self) -> None:\n # Get effort limit\n command_limit = self.robot.command_limit\n\n # Replace inf bounds of the effort limit if requested\n if self.enforce_bounded_spaces:\n for motor_name in self.robot.motors_names:\n motor = self.robot.get_motor(motor_name)\n motor_options = motor.get_options()\n if not motor_options[\"enableCommandLimit\"]:\n command_limit[motor.joint_velocity_idx] = \\\n MOTOR_EFFORT_MAX\n\n # Set the action space\n action_scale = command_limit[self.robot.motors_velocity_idx]\n self.action_space = spaces.Box(\n low=-action_scale, high=action_scale, dtype=np.float64)", "def generate_actions(self):\n \n # For all state nodes\n action = 0\n \n for l in range(self.u0_n):\n for m in range(self.u1_n):\n \n u = np.array([ self.ud[0][l] , self.ud[1][m] ])\n \n # State and grid index based on node #\n self.actions_input[action,:] = u\n self.actions_index[action,:] = np.array([l,m])\n \n # Increment node number\n action = action + 1", "def getAction(self, gameState):\n\n # BEGIN_YOUR_CODE\n def G(gameState):\n return gameState.isWin() or gameState.isLose()\n\n def U(gameState):\n if gameState.isWin():\n return numpy.inf\n if gameState.isLose():\n return -numpy.inf\n\n def Turn(agent_index):\n if agent_index + 1 < gameState.getNumAgents():\n return agent_index + 1\n else:\n return 0\n\n def getDistribution(gameState, agent_index):\n # Read variables from state\n ghostState = gameState.getGhostState(agent_index)\n legalActions = gameState.getLegalActions(agent_index)\n pos = gameState.getGhostPosition(agent_index)\n isScared = ghostState.scaredTimer > 0\n\n speed = 1\n if isScared: speed = 0.5\n\n actionVectors = [Actions.directionToVector(a, speed) for a in legalActions]\n newPositions = [(pos[0] + a[0], pos[1] + a[1]) for a in actionVectors]\n pacmanPosition = gameState.getPacmanPosition()\n\n # Select best actions given the state\n distancesToPacman = [util.manhattanDistance(pos, pacmanPosition) for pos in newPositions]\n if isScared:\n bestScore = max(distancesToPacman)\n bestProb = 0.8\n else:\n bestScore = min(distancesToPacman)\n bestProb = 0.8\n bestActions = [action for action, distance in zip(legalActions, distancesToPacman) if distance == bestScore]\n\n # Construct distribution\n dist = util.Counter()\n for a in bestActions: dist[a] = bestProb / len(bestActions)\n for a in legalActions: dist[a] += (1 - bestProb) / len(legalActions)\n dist.normalize()\n return dist\n\n # The heuristic evaluation function\n evalFumc = self.evaluationFunction\n\n def GetExpectimaxAction(gameState, agent_index, depth):\n # we reached a win or a lose situation.\n if G(gameState):\n return (U(gameState), None, depth)\n # end of search depth.\n if depth == 0:\n return (evalFumc(gameState), None, depth)\n if agent_index == 0:\n # Pacmans turn\n CurrMax = -numpy.inf\n MaxAction = None\n maxDepth = -numpy.inf\n for move in gameState.getLegalActions(agent_index):\n # if there are no agents every call we should go one layer deeper.\n if gameState.getNumAgents() == 1:\n v = GetExpectimaxAction(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth - 1)\n else:\n v = GetExpectimaxAction(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth)\n if CurrMax < v[0] or (CurrMax == v[0] and maxDepth < v[2]):\n CurrMax = v[0]\n MaxAction = move\n maxDepth = v[2]\n return (CurrMax, MaxAction, maxDepth)\n else:\n # Ghosts turn\n values = []\n dist = getDistribution(gameState, agent_index)\n depths = []\n for action in dist:\n if Turn(agent_index) == 0:\n v = GetExpectimaxAction(gameState.generateSuccessor(agent_index, action),\n Turn(agent_index), depth - 1)\n else:\n v = GetExpectimaxAction(gameState.generateSuccessor(agent_index, action),\n Turn(agent_index), depth)\n values.append(dist[action] * v[0])\n depths.append(v[2])\n return (sum(values), None, max(depths))\n\n return GetExpectimaxAction(gameState, 0, self.depth)[1]\n # END_YOUR_CODE", "def generate_actions(self):\n \n # For all state nodes\n action = 0\n \n for k in range(self.u0_n):\n \n u = np.array([ self.ud[0][k] ])\n \n # State and grid index based on node #\n self.actions_input[action,:] = u\n self.actions_index[action,:] = k\n\n # Increment node number\n action = action + 1", "def set_up_continuous_action_space(self):\n self.action_space = gym.spaces.Box(shape=(self.action_dim,),\n low=-1.0,\n high=1.0,\n dtype=np.float32)\n self.action_high = self.torque * np.ones([self.action_dim])\n self.action_low = -self.action_high", "def step(self, actions):\n \n lastidx = 0\n for _i in range(self.nbehavior):\n action_tuple = ActionTuple()\n action_tuple.add_discrete(actions[lastidx:lastidx + self.n_each_agent[_i], :])\n self.env.set_actions(behavior_name=self.behavior_names[_i], action=action_tuple)\n lastidx = self.n_each_agent[_i]\n\n self.env.step()\n self.decision_steps = []\n self.terminal_steps = []\n\n for _i in range(self.nbehavior):\n d_s, t_s = self.env.get_steps(self.behavior_names[_i])\n self.decision_steps.append(d_s)\n self.terminal_steps.append(t_s)\n\n obs = []\n reward = []\n done = []\n info = {}\n\n for _i in range(self.nbehavior):\n _j = 0\n for o in self.reshape_obs(self.decision_steps[_i]):\n obs.append(o)\n reward.append(self.decision_steps[_i].reward[_j])\n done.append(False)\n _j += 1\n\n return obs, reward, done, info", "def get_action_space(self):\n return Discrete(len(self.get_actions()))", "def _get_actions_request(self):\n\n all_actions = {}\n service_description = self.description\n\n root = minidom.parseString(service_description)\n actions = root.getElementsByTagName('action')\n\n for action in actions:\n action_name = action.getElementsByTagName('name')[0].firstChild.nodeValue\n action_arguments = []\n\n # An action's argument list is only required if the action has parameters according to UPnP spec\n try:\n action_argument_list = action.getElementsByTagName('argumentList')[0]\n except IndexError:\n action_argument_list = None\n\n if action_argument_list:\n action_arguments_elements = action_argument_list.getElementsByTagName('argument')\n\n for argument in action_arguments_elements:\n argument_name = argument.getElementsByTagName('name')[0].firstChild.nodeValue\n argument_direction = argument.getElementsByTagName('direction')[0].firstChild.nodeValue\n\n # Argument return value is optional according to UPnP spec\n try:\n argument_return_value = argument.getElementsByTagName('retval')[0].firstChild.nodeValue\n except IndexError:\n argument_return_value = None\n\n argument_related_state_variable = argument.getElementsByTagName(\n 'relatedStateVariable'\n )[0].firstChild.nodeValue\n\n action_arguments.append(\n self.Action.Argument(\n argument_name,\n argument_direction,\n argument_return_value,\n argument_related_state_variable\n )\n )\n\n all_actions[action_name] = self.Action(action_name, action_arguments, self)\n\n self.actions = all_actions\n return all_actions", "def _generate_actions(self) -> list:\n pass", "def parseAction(self, action):\n action = self.AGENT_TYPES[action]\n\n\n full_action = {}\n full_action[\"action\"] = action\n if action == \"eli-kw\":\n keywords = self.dataset.getSuggestedKeywords()\n full_action[\"keywords\"] = keywords[:self.N]\n elif action == \"info\" or action == \"info-all\":\n full_action[\"function\"] = self.current_function\n\n elif action == \"sugg\" or action == \"sugg-info-all\":\n top_hit = self.dataset.getTopHits(1)\n if not top_hit:\n full_action[\"action\"] = \"eli-query\"\n else:\n functions = self.dataset.getTopHits(1, self.result_index)\n if functions:\n full_action[\"function\"] = functions[0]\n else:\n full_action[\"function\"] = \"\"\n\n self.result_index += 1\n\n elif action == \"sugg-all\":\n full_action[\"list\"] = self.dataset.getTopHits(self.K, self.result_index)\n\n elif action == \"change-page\":\n self.result_index += self.K\n full_action[\"list\"] = self.dataset.getTopHits(self.K, self.result_index)\n return full_action", "def gen_action(self, agent_list, observation, free_map=None):\n action_out = []\n # for i in agent_list:\n # action_out.append(self.random.randint(0, 5)) # choose random action\n #\n # return action_out\n if free_map is not None: self.free_map = free_map\n\n for idx, agent in enumerate(agent_list):\n # First choose a random direction to go into\n starting_action = self.random.randint(0, 5)\n # Initializing the direction based on the starting_action\n if starting_action == 1:\n self.heading_up[idx] = True\n elif starting_action == 2:\n self.heading_right[idx] = True\n # elif starting_action == 3:\n # self.heading_down[idx] = True\n # elif starting_action == 4:\n # self.heading_left[idx] = True\n\n a = self.roomba(agent, idx, observation)\n\n # if starting_action < 3:\n # a = self.roomba(agent, idx, observation)\n # else:\n # a = starting_action\n action_out.append(a)\n\n return action_out", "def actions(self, state):\n \"*** YOUR CODE HERE ***\"\n if state[2] == 0: # When agent is facing North\n state_fw = (state[0], state[1] + 1, 0)\n state_tr = (state[0], state[1], 3)\n state_tl = (state[0], state[1], 1)\n elif state[2] == 1: # When agent is facing West\n state_fw = (state[0] - 1, state[1], 1)\n state_tr = (state[0], state[1], 0)\n state_tl = (state[0], state[1], 2)\n elif state[2] == 2: # When agent is facing South\n state_fw = (state[0], state[1] - 1, 2)\n state_tr = (state[0], state[1], 1)\n state_tl = (state[0], state[1], 3)\n elif state[2] == 3: # When agent is facing East\n state_fw = (state[0] + 1, state[1], 3)\n state_tr = (state[0], state[1], 2)\n state_tl = (state[0], state[1], 0)\n else:\n raise Exception(\"This shouldn't be happening. Can't find heading\")\n \n shoot_loc_arr = [] # Initialize Array\n for allowed_state in self.allowed: # Iterate through all allowed states\n for goal_state in self.goals: # Iterate through all goal states\n if allowed_state[0] == goal_state[0] and allowed_state[1] < goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 0)) # X Matches, Head North\n if allowed_state[0] > goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 1)) # Y Matches, Head West\n if allowed_state[0] == goal_state[0] and allowed_state[1] > goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 2)) # X Matches, Head South\n if allowed_state[0] < goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 3)) # Y Matches, Head East \n\n dist_fw_arr, dist_tr_arr, dist_tl_arr = ([9999999] for i in range(3)) # Initialize to large values\n for goal in shoot_loc_arr: # Iterate through arrays\n if (state_fw[0],state_fw[1]) in self.allowed:\n dist_fw_arr.append(manhattan_distance_with_heading(state_fw, goal))\n dist_tr_arr.append(manhattan_distance_with_heading(state_tr, goal))\n dist_tl_arr.append(manhattan_distance_with_heading(state_tl, goal))\n\n if (min(dist_fw_arr) <= min(min(dist_tr_arr),min(dist_tl_arr))) and (state_fw[0],state_fw[1]) in self.allowed: return ['Forward']\n if min(dist_tr_arr) <= min(min(dist_fw_arr),min(dist_tl_arr)): return ['TurnRight']\n if min(dist_tl_arr) <= min(min(dist_tr_arr),min(dist_tr_arr)): return ['TurnLeft']\n raise Exception(\"This shouldn't be happening. Can't determine action\")", "def actions(self, states, agent_indices):\n return NotImplementedError()", "def step(self, actions): # actions is a list,\n\n assert len(actions) == len(self.agents), \"Number of actions (\" + str(\n len(actions)) + \") does not match number of agents (\" + str(self.n_agents) + \")\"\n\n # Process movement based on real states (not belief)\n\n\n rewards = [0.] * self.n_agents\n\n reward = 0.\n\n\n nextcells = [None] * self.n_agents\n rand_nums = self.rng.uniform(size=self.n_agents)\n\n for i in range(self.n_agents):\n\n currcell = self.tocellcoord[self.agents[i].state]\n if isinstance(actions,int):\n act = actions\n else:\n act = actions[i]\n direction = self.directions[act]\n\n if rand_nums[i] > 1/3: # pick action as intended\n if self.occupancy[tuple(currcell + direction)] == 0:\n nextcells[i] = self.tocellnum[tuple(currcell+direction)]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n else: # pick random action, except one initially intended\n adj_cells = self.adjacent_to(currcell) # returns list of tuples\n adj_cells.remove(tuple(currcell+direction))\n\n index = self.rng.choice(range(len(adj_cells)))\n new_cell = adj_cells[i]\n\n if self.occupancy[new_cell] == 0:\n nextcells[i] = self.tocellnum[new_cell]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n\n # check for inter-agent collisions:\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n while(len(collisions) != 0): # While loop needed to handle edge cases\n for i in range(len(nextcells)):\n if nextcells[i] in collisions:\n nextcells[i] = self.agents[i].state # agent collided with another, so no movement\n\n\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n\n\n for i in range(self.n_agents):\n if nextcells[i] == self.agents[i].state: # A collision happened for this agent\n rewards[i] += self.collision_penalty\n else:\n s = nextcells[i] # movement is valid\n self.agents[i].state = s\n if s in self.goals and s not in self.discovered_goals:\n rewards[i] += self.goal_reward\n self.discovered_goals.append(s)\n #rewards[i] += broadcasts[i]*self.broadcast_penalty\n\n\n self.currstate = tuple(nextcells)\n\n\n\n reward = np.sum(rewards)\n\n self.step_count += 1\n\n\n # If all goals were discovered, end episode\n done = len(self.discovered_goals) == len(self.goals)\n\n \n return reward, self.currstate, done, None", "def check_action_sanity(self):\n for action in crest.get_all_actions(self.model):\n assert action._name is not None, f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is 'None'\"\n assert action._name != \"\", f\"There is an Action in {action._parent._name} ({action._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(action.transition, crest.Transition), f\"Action {action._name}'s state is not a crest.Transition. It is: {action.transition} ({action.transition.__class__})\"\n assert action.state in crest.get_transitions(action._parent), f\"Action's transition {action.transition._name} ({action.transition}) is not in the transitions of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.target, crest.Port), f\"Action {action._name}'s target is not a crest.Port\"\n assert action.target in api.get_targets(action._parent), f\"Action's target {action.target._name} ({action.target}) is not in the targets of entity {action._parent._name} ({action._parent})\"\n\n assert isinstance(action.function, (crestml.LearnedFunction, types.FunctionType)), f\"Action {action._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'self' in inspect.signature(action.function).parameters, f\"Action {action._name}'s function has no self parameter. entity: {action._parent._name} ({action._parent.__class__.__name__})\"\n assert len(inspect.signature(action.function).parameters) == 1, f\"An action should have only one one argument 'self'\"\n\n for port in SH.get_read_ports_from_update(action.function, action):\n assert port in api.get_sources(action._parent), f\"Action {action._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {action._parent._name} ({action._parent})\"", "def action_space(self, agent: AgentID) -> gymnasium.spaces.Space:\n warnings.warn(\n \"Your environment should override the action_space function. Attempting to use the action_spaces dict attribute.\"\n )\n return self.action_spaces[agent]" ]
[ "0.79722804", "0.66439545", "0.646315", "0.6356318", "0.6356318", "0.63392097", "0.6334569", "0.63179284", "0.62124294", "0.6210738", "0.62058586", "0.6205503", "0.6194105", "0.6186956", "0.6137761", "0.613573", "0.6117709", "0.60733235", "0.60228527", "0.6015301", "0.60044336", "0.5953245", "0.5952847", "0.5916537", "0.5914001", "0.59135306", "0.5898316", "0.5880001", "0.5879458", "0.5869555" ]
0.68322915
1
I'm the 'api' property.
def api(self): return self._api
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api(self):\n return self.__api", "def get_api(self):\n return self.api", "def api(self):\n return self._api", "def api(self):\n return self._api", "def api(self):\n return self._api", "def api(self):\n return self._api", "def api(self) -> str:", "def api(self):\n if self._api is None:\n self._api = Api(self)\n return self._api", "def _setup_api_properties(self):\n self.implicit_api_logical_id = GeneratedLogicalId.implicit_http_api()\n self.implicit_api_condition = \"ServerlessHttpApiCondition\"\n self.api_event_type = \"HttpApi\"\n self.api_type = SamResourceType.HttpApi.value\n self.api_id_property = \"ApiId\"\n self.editor = OpenApiEditor", "def api(self) -> str:\n return self._api", "def api_access(self):\n return self._api_access", "def api(self) -> Optional[pulumi.Input['ApplicationApiArgs']]:\n return pulumi.get(self, \"api\")", "def api(self) -> Optional[pulumi.Input['ApplicationApiArgs']]:\n return pulumi.get(self, \"api\")", "def __init__(self, api_use=False):\n self.api_use = api_use", "def api(self, api):\n if self._running:\n raise ValueError('API cannot be modified while the server is running')\n\n self._api = api", "def getAPI(self):\n return self.api_url", "def Modifier_API(self):\n\t\tpass", "def get_api(self, ApiId: str) -> Dict:\n pass", "def __init__(self, api=None, properties=None):\n if not api is None:\n self.api = api", "def __init__(self, base_api: BaseApi):\n super().__init__(base_api, self.__class__.__name__)", "def api():\n\treturn \"The API call\"", "def api_properties(self) -> Optional['outputs.ApiPropertiesResponse']:\n return pulumi.get(self, \"api_properties\")", "def api_used(self, api_used):\n\n self._api_used = api_used", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix='/api')", "def prepare_api(self):\n return None", "def api(self) -> pulumi.Output[Optional['outputs.ApplicationApi']]:\n return pulumi.get(self, \"api\")", "def __api(self) -> IWorkspace:\n assert self.__internal_api is not None\n return self.__internal_api", "def apiurl(self):\n return self._apiurl", "def __init__(self, api_url):\n self.api_url = api_url", "def __init__(self, api):\n self.api = api\n self.data = None" ]
[ "0.764146", "0.7558748", "0.74769485", "0.74769485", "0.74769485", "0.74769485", "0.7213842", "0.70904386", "0.708594", "0.7075687", "0.69147086", "0.67615086", "0.67615086", "0.664098", "0.6583839", "0.65325695", "0.64373815", "0.64355713", "0.6426633", "0.6384219", "0.6328449", "0.6308562", "0.6283733", "0.6216522", "0.6213452", "0.6185925", "0.61472166", "0.61245155", "0.61220086", "0.6112075" ]
0.7629988
1
Calculate pairwise diversity given a list of genotype calls. Returns 0 for a monomorphic site, or sites with only one nonmissing call.
def pairwise_diversity(calls): # Count up the number of reference and alternate genotypes. if 0 in calls: ref_count = calls.count(0) else: return 0 if 1 in calls: alt_count = calls.count(1) else: return 0 # This sample size will change depending on how many non-missing genotypes # there are. total_count = ref_count + alt_count # Calculate up the similarities based on the number of reference and # alternative genotypes. Calculate the number of pairwise comparisons # that were made. ref_sim = n_choose_r(ref_count, 2) alt_sim = n_choose_r(alt_count, 2) total_comp = n_choose_r(total_count, 2) # Then pairwise diversity is 1-[(ref_sim + alt_sim)/total_comp] return 1 - ((ref_sim + alt_sim) / float(total_comp))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pairwise_diversity(self, samples=None):\n if samples is None:\n samples = self.samples()\n return float(\n self.diversity(\n [samples], windows=[0, self.sequence_length], span_normalise=False\n )[0]\n )", "def _coverage_of_diploid_alleles(\n cls,\n allele1,\n allele2,\n allele_combination_cov,\n allele_groups_dict,\n ):\n shared_cov = 0\n allele1_total_cov, allele2_total_cov = 0, 0\n\n for allele_key, coverage in allele_combination_cov.items():\n assert coverage >= 0\n allele_combination = allele_groups_dict[allele_key]\n has_allele_1 = allele1 in allele_combination\n has_allele_2 = allele2 in allele_combination\n if has_allele_1 and has_allele_2:\n shared_cov += coverage\n elif has_allele_1:\n allele1_total_cov += coverage\n elif has_allele_2:\n allele2_total_cov += coverage\n\n ## Perform the dispatching in ambiguous equiv classes ##\n if allele1_total_cov != 0 or allele2_total_cov != 0: # If both are zero, there is no dispatching to do.\n allele1_belonging = allele1_total_cov / (allele1_total_cov + allele2_total_cov)\n allele1_total_cov += allele1_belonging * shared_cov\n allele2_total_cov += (1 - allele1_belonging) * shared_cov\n return allele1_total_cov, allele2_total_cov", "def getDivisors(n):", "def d(n):\n return sum(divisors(n))", "def get_diversity(population):\n\t# average variance of each component\n\treturn np.average(np.var(population, axis = 0))\n\treturn np.average(np.std(population, axis = 0))\n\treturn np.average(np.std(population, axis = 0) / ((self.benchmarks.bound[1] - self.benchmarks.bound[0]) / 2))", "def _calcFs(pop_indivs, pop_counts):\n pop_names = pop_counts.keys()\n r = len(pop_names)\n n_i = []\n for pop_name in pop_names:\n n_i.append(len(pop_indivs[pop_name]))\n n = reduce(lambda x, y: x+y, n_i)\n n_bar = 1.0 * n / r\n n_c = n\n for ni in n_i:\n n_c -= 1.0*(ni**2)/n\n n_c = n_c / (r-1)\n\n alleles = _get_all_alleles(pop_counts)\n a = 0.0\n b = 0.0\n c = 0.0\n for allele in alleles:\n p_i = []\n for pop_name in pop_names:\n p_i.append(_get_allele_freq(pop_counts[pop_name], allele))\n p_bar = 0.0\n for i in range(len(p_i)):\n p_bar += n_i[i] * p_i[i]\n p_bar = 1.0 * p_bar / n\n s_2 = 0.0\n for i in range(len(p_i)):\n s_2 += n_i[i] * (p_i[i] - p_bar) * (p_i[i] - p_bar)\n h_bar = 0.0\n for i in range(len(p_i)):\n h_bar += _get_het_allele_freq(pop_indivs[pop_name], allele) *n_i[i]\n h_bar = 1.0 * h_bar / n\n a += n_bar / n_c * (s_2 - (p_bar * (1-p_bar) - (r - 1.0) / r * s_2 - h_bar / 4.0) / (n_bar - 1.0) )\n b += n_bar / (n_bar - 1) * (p_bar * (1-p_bar) - (r - 1.0) / r * s_2 - (2 * n_bar - 1) / (4.0 * n_bar) * h_bar )\n c += h_bar / 2.0\n if a + b + c == 0:\n fst = 0.0\n else:\n fst = a / (a + b + c)\n if a + b + c == 0:\n fit = 1.0\n else:\n fit = (1.0 - c) / (a + b + c)\n if b + c == 0:\n fis = 1.0\n else:\n fis = (1.0 - c) / (b + c)\n\n return fst, fit, fis", "def diversity_termination(population, num_generations, num_evaluations, args):\r\n min_diversity = args.setdefault('min_diversity', 0.001)\r\n cart_prod = itertools.product(population, population)\r\n distance = []\r\n for (p, q) in cart_prod:\r\n d = 0\r\n for x, y in zip(p.candidate, q.candidate):\r\n d += (x - y)**2\r\n distance.append(math.sqrt(d))\r\n return max(distance) < min_diversity", "def calculate_dop(points, iscore=np.array([0])):\n N = points.shape[0]\n if iscore.size == 1:\n iscore = np.ones(N)\n row_norm = np.linalg.norm(points, axis=1)\n coord_norm = np.broadcast_to(np.atleast_2d(row_norm).T, [N, 3])\n unit_vect = points/coord_norm\n G_norm = np.hstack((unit_vect, np.ones([N, 1])))\n G_dash = np.atleast_2d(iscore).T*G_norm\n H_dash = np.linalg.inv(np.matmul(G_dash.T, G_dash))\n IDOP = np.sqrt(np.sum(np.diag(H_dash)))\n return IDOP, np.sum(iscore)", "def benchmark(self):\n nsites = []\n for m in self.methods:\n for name, structure in self.test_structures.items():\n cns = []\n if self.unique_sites:\n es = SpacegroupAnalyzer(structure).get_symmetrized_structure().equivalent_sites\n sites = [structure.index(x[0]) for x in es]\n else:\n sites = range(len(structure))\n\n for key, val in self.hi.items():\n if name == key:\n for j in sites:\n if isinstance(m, NearNeighbors):\n tmpcn = m.get_cn_dict(structure, j, self.use_weights)\n else:\n tmpcn = m.compute(structure, j)\n if tmpcn == \"null\":\n continue\n if self.nround:\n self._roundcns(tmpcn, self.nround)\n cns.append((structure[j].species_string, tmpcn))\n if self.cation_anion:\n for mat, cat in self.cations.items():\n if (name == mat) and cat:\n cns = self._popel(cns, cat)\n elif self.anion_cation:\n for mat, an in self.anions.items():\n if name == mat:\n cns = self._popel(cns, an)\n m._cns[name] = cns\n nsites.append(len(cns))\n self.nsites = max(nsites)", "def sum_pairwise_differences(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,called=True,output=\"sum\",nb_ind_with_min_cov=\"all\"):\n\t###CHOOSE THE RIGHT VCF\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tif inds==\"all\" or inds==[\"all\"]:inds=input_vcf.samples# transform \"all\" in a list of all individuals in the vcf\n\t#Function\n\tsum_pairwise=0#iterator for sampling frequency\n\tnsites_ok=0\n\t###identify individual to remove when calculating stats\n\tinds_to_delete=[]\n\tfor i,ind in enumerate(input_vcf.samples):#check which ind is ion sample and compare it to our list of inds\n\t\t if ind not in inds:#delete this ind\n\t\t \tinds_to_delete.append(i)\n\t#go along the region\n\tif chrom!=\"all\":\n\t\tcheck=len(sh.tabix(vcf_file,str(chrom)+\":\"+str(start)+\"-\"+str(end)))\n\t\t#print \"check;' \",check,\"'\"\n\t\tif check==0: \n\t\t\tif output==\"sum\":\n\t\t\t\treturn 0\n\t\t\telif output==\"extended\":\n\t\t\t\treturn [0,0]\n\t\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t\t\t#print \"HERE\"\n\t\t\t#print input_vcf,record,mincov,maxcov, inds, nb_ind_with_min_cov\n\t\t\t#raise Exception\n\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=[1,2],nb_ind_with_min_cov=nb_ind_with_min_cov)# check if the site respect our condition\n\t\t\tprint inds\n\t\t\t#print \"cond\",cond\n\t\t\t#print \"HERE2\"\n\t\t\tif cond:# if it does\n\t\t\t \tb= record.nucl_diversity\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \t\t#print record.nucl_diversity\n\t\t\t \t#print record.samples\n\t\t\t\tnsites_ok+=1\n\t\t\t\t#print record.nucl_diversity\n\t\t\t\tprint \"samples pairwise\", len(record.samples),record.nucl_diversity \n\t\t\t\tsum_pairwise+=record.nucl_diversity \n\t\t\t\t#if b!=record.nucl_diversity : print \" old and new diversity\", b,record.nucl_diversity\n\t\t\t#compute total information for the window\n\telif chrom==\"all\":\n\t\tfor record in input_vcf:# for every site\n\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=[1,2],nb_ind_with_min_cov=nb_ind_with_min_cov)# check if the site respect our condition\n\t\t\tif cond:# if it does\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \t#print record.samples\n\t\t\t\tnsites_ok+=1\n\t\t\t\tsum_pairwise+=record.nucl_diversity \n\t\t\t#compute total information for the window\n\tif output==\"sum\":\n\t\treturn sum_pairwise\n\telif output==\"extended\":\n\t\treturn [sum_pairwise,nsites_ok]\n\t#Go in normal vcf and count sites", "def diversity(\n self, sample_sets=None, windows=None, mode=\"site\", span_normalise=True\n ):\n return self.__one_way_sample_set_stat(\n self._ll_tree_sequence.diversity,\n sample_sets,\n windows=windows,\n mode=mode,\n span_normalise=span_normalise,\n )", "def dominance(counts):\n freqs = counts/float(counts.sum())\n return (freqs*freqs).sum()", "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def sum_proper_divisors(n):\r\n return sum(proper_divisors(n))", "def innerProd(vcfResults):\n both = vcfResults.get(\"both\", 0)\n onlyX = vcfResults.get(\"onlyX\", 0)\n onlyY = vcfResults.get(\"onlyY\", 0)\n \n # return (both - onlyX - onlyY)/(both + onlyX + onlyY) \n return (both)/(both + onlyX + onlyY)\n # Distance heuristic = # correct variants / total \n # => % correct variants called", "def divisors(decomp):\n combine = lambda acc, p: set(a * (p ** e) for a in acc for e in xrange(decomp[p] + 1))\n return reduce(combine, decomp, {1})", "def segPDist(seg1, seg2, speeds):\n \n seg1Valid=speeds.loc[seg1].dropna()\n seg2Valid=speeds[seg1Valid.index].loc[seg2].dropna()/sum(speeds[seg1Valid.index].loc[seg2].dropna(),1)\n seg1Valid=seg1Valid[seg2Valid.index]/sum(seg1Valid[seg2Valid.index],1)\n if len(seg1Valid.values) == 0 or len(seg2Valid.values) == 0 : return 1 + (1-len(seg2Valid.index)/ speeds.columns.size)\n return np.mean((seg1Valid.values - seg2Valid.values)**2) + (1-len(seg2Valid.index)/ speeds.columns.size)", "def divisors(n):\n return tuple(_divisor_gen(n))", "def find_divisors(n):\n\n\tpd = [1]\n\n\tsqrtN = int(math.sqrt(n))\n\n\tfor d in range(2, sqrtN+1):\n\t\tif n % d == 0:\n\t\t\tpd.append(d)\n\t\t\tpair = int(n/d)\n\t\t\tif not pair == d:\n\t\t\t\tpd.append(pair)\n\n\treturn pd", "def test_calc_shared_phylotypes_pairwise(self):\r\n\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 0, 0), 5)\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 0, 1), 2)\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 0, 2), 3)\r\n #self.assertEqual(_calc_shared_phylotypes_pairwise(self.otu_table, 2, 2), 3)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S1',\r\n 'S1'),\r\n 5)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S1',\r\n 'S2'),\r\n 2)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S1',\r\n 'S3'),\r\n 3)\r\n self.assertEqual(\r\n _calc_shared_phylotypes_pairwise(\r\n self.otu_table,\r\n 'S3',\r\n 'S3'),\r\n 3)", "def doubleStartEndPoints(netlist, chip_to_occurrences=None):\n som = 0\n if chip_to_occurrences is None:\n chips_in_netlist = list(itertools.chain.from_iterable(netlist))\n occurrences = np.bincount(chips_in_netlist)\n for i in occurrences:\n if i > 1:\n som += i\n else:\n for i in chip_to_occurrences.values():\n if i > 1:\n som += i\n return som", "def divisions(self,domain,divisions):\n size = domain.height/divisions\n counter = []\n for i in range(divisions):\n count = ((self.z >= i*size) & (self.z < (i+1)*size)).sum()\n counter.append(count)\n return counter", "def answer(l):\n num_divisors = [0] * len(l)\n triple_count = 0\n for large in range(1, len(l)):\n for small in range (0, large):\n if l[large] % l[small] == 0:\n num_divisors[large] += 1\n triple_count += num_divisors[small]\n return triple_count", "def num_divisors_ii(n):\n set_pf = set(n)\n n_og = 2**(len(set_pf))\n n_div = n_og\n for pf in set_pf:\n x = n.count(pf)\n n_div += n_div//2 * (x - 1)\n return n_div", "def evaluate_diversity_single(indices, distances, weight=0.5):\n i, j = [e for e in zip(*itertools.combinations(indices, 2))]\n subset_distances = distances[i, j]\n minimum = np.min(subset_distances)\n mean = np.mean(subset_distances)\n diversity = (1 - weight) * minimum + weight * mean\n\n return [diversity]", "def findDivisors(n1, n2):\n divisors = () # the empty tuple\n for i in range(1, min(n1, n2) + 1):\n if n1%i == 0 and n2%i == 0:\n divisors = divisors + (i,)\n return divisors", "def num_divisors_iii(n):\n set_pf = set(n)\n n_div = 1\n for pf in set_pf:\n x = n.count(pf)\n n_div *= (1 + x)\n return n_div", "def main(pairs, freq):\n total_dominant_offspring = 0\n pr_dom = [\n pr_dominant_offpring(offspring_zygosity(parent_1, parent_2))\n for parent_1, parent_2 in pairs\n ]\n for freq, pr_dom in zip(freq, pr_dom):\n pair_offspring = freq * 2\n total_dominant_offspring += pr_dom * pair_offspring\n\n return total_dominant_offspring", "def calDominationCount(p,visitedPoints):\n isDominated = utils.MultiThread(utils.dominating, zip([visitedPoints[k].mean for k in visitedPoints],repeat(p.mean)))\n dominationCount = sum(isDominated)\n print('Please _cutils.calDominantionCount(). This method is too slow.')\n return dominationCount", "def sum_divisors(n):\r\n return sum(proper_divisors(n)) + n" ]
[ "0.58707273", "0.5370491", "0.5241782", "0.5118214", "0.50946355", "0.5012713", "0.49800998", "0.49774712", "0.49727434", "0.49480033", "0.49466312", "0.4924413", "0.4903285", "0.4879957", "0.4876379", "0.48494408", "0.48436004", "0.484338", "0.48357964", "0.4832953", "0.48318598", "0.48250172", "0.48200756", "0.47515142", "0.4743692", "0.47208574", "0.47119662", "0.47089007", "0.46883667", "0.4681543" ]
0.82642967
0
Read a VCF, and calculate pairwise diversity for each site. Returns a a dictionary with genomic coordinate as key and pairwise diversity as the value.
def read_vcf(vcf): vcfdata = {} with open(vcf, 'r') as f: for line in f: if line.startswith('#'): continue else: tmp = line.strip().split() pos = int(tmp[1]) sample_info = tmp[9:] # Get the genotype calls from the sample info fields gt = [t.split(':')[0] for t in sample_info] hap_calls = [] for g in gt: if g == '0/0': hap_calls.append(0) elif g == '1/1': hap_calls.append(1) else: hap_calls.append('NA') site_pi = pairwise_diversity(hap_calls) vcfdata[pos] = site_pi return vcfdata
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_vc(mdf, genomecol, vccol, verbose=False):\n vc = {}\n with open(mdf, 'r') as fin:\n for li in fin:\n p = li.strip().split(\"\\t\")\n vc[p[genomecol]] = p[vccol]\n if verbose:\n sys.stderr.write(f\"Found {len(vc)} virus clusters in {mdf}\\n\")\n return vc", "def read_csvs(files_to_read: Dict):\n res = {team: {} for team in TEAM_MAP[CFD]}\n for k, v in files_to_read.items():\n rating_system, file_data = get_csv_data_for_path(v)\n team_name_map_for_rating_system = TEAM_MAP[rating_system]\n for row in file_data:\n team, rtg = row[:2]\n standardized_team_name = team_name_map_for_rating_system[team][CFD]\n res[standardized_team_name].update({rating_system: float(rtg)})\n\n return res", "def readCC(Ped_File, vcfIndivs):\n\n case = {} # case hash table: Key = ID Value = Sex\n control = {} # control hash table: Key = ID Value = Sex\n caseControl = {} # cases and controls hash table: Key = ID Value = index in vcf\n\n indivSet = Set(vcfIndivs) # convert array to Set to decrease lookup time.\n\n with open(Ped_File) as file:\n for line in file:\n field = line.strip().split('\\t')\n\n indiv_ID = field[1]\n father_ID = field[2]\n mother_ID = field[3]\n ptype = field[5] # case/control status: 1=control, 2=case\n\n if indiv_ID not in indivSet:\n sys.stderr.write('Individual {} is not in vcf.\\n'.format(indiv_ID))\n continue\n\n if field[4] == '1':\n sex = 'male'\n elif field[4] == '2':\n sex = 'female'\n else:\n sex = 'NA'\n\n if(father_ID != '0' or mother_ID != '0'):\n continue\n\n elif(ptype == '2'):\n case[indiv_ID] = sex\n caseControl[indiv_ID] = vcfIndivs.index(indiv_ID)\n\n elif(ptype == '1'):\n control[indiv_ID] = sex\n caseControl[indiv_ID] = vcfIndivs.index(indiv_ID)\n\n print 'Number of cases in hash table = {}.'.format(len(case))\n print 'Number of controls in hash table = {}.'.format(len(control))\n return case, control, caseControl", "def readFamily(Ped_File, vcfIndivs, unaff_Flag):\n\n family = {} # family hash table Key = ID Value = (Father ID, Mother ID, Sex)\n indivs = {} # individuals in VCF Key = ID Value = index in the vcf\n\n indivSet = Set(vcfIndivs) # Convert array to Set to decrease lookup time.\n\n with open(Ped_File) as file:\n for line in file:\n field = line.strip().split('\\t')\n\n family_ID = field[0]\n indiv_ID = field[1]\n father_ID = field[2]\n mother_ID = field[3]\n\n if indiv_ID not in indivSet:\n sys.stderr.write('Individual {} is not in vcf.\\n'.format(indiv_ID))\n continue\n\n if field[4] == '1':\n sex = 'male'\n elif field[4] == '2':\n sex = 'female'\n else:\n sex = 'NA'\n\n # Parents, cases, and controls will not have parental IDs.\n if(father_ID == '0' or mother_ID == '0'):\n continue\n\n # Check to see if the parents are in the vcf.\n if father_ID not in indivSet or mother_ID not in indivSet:\n sys.stderr.write('Family {} is incomplete.\\n'.format(family_ID))\n continue\n\n # If we only want affected probands.\n if not unaff_Flag:\n if field[5] != '2':\n continue\n # If we are only looking at unaffected probands.\n else:\n if field[5] != '1':\n continue\n\n # Family dictionary is in the form: {child_ID} = [Dad_ID, Mom_ID, Sex]\n family[indiv_ID] = (father_ID, mother_ID, sex)\n indivs[indiv_ID] = vcfIndivs.index(indiv_ID)\n indivs[father_ID] = vcfIndivs.index(father_ID)\n indivs[mother_ID] = vcfIndivs.index(mother_ID)\n\n print 'Number of families in hash table = {}.'.format(len(family))\n return family, indivs", "def readProcessedFCD():\n procFcdDict = {}\n pqDateDict = {} # each date is a period / quota tupel assigned\n simDate = '2007-07-18 '\n day = 0\n # create keys for the procFcdDict\n for p in period:\n for q in quota:\n day += 86400\n date, time = calcTime.getDateFromDepart(day).split(\" \")\n pqDateDict.setdefault(date, (p, q))\n procFcdDict.setdefault((p, q), {})\n # print date,p,q\n\n inputFile = open(path.FQprocessedFCD, 'r')\n for line in inputFile:\n timestamp, edge, speed, cover, id = line.split('\\t')\n date, time = calcTime.getNiceTimeLabel(timestamp).split(\" \")\n # add values to actual Dict\n timestep = calcTime.getTimeInSecs(simDate + time)\n procFcdDict[pqDateDict[date]].setdefault(\n timestep, []).append((id, edge, float(speed) / 3.6))\n inputFile.close()\n\n return procFcdDict", "def sum_pairwise_differences(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,called=True,output=\"sum\",nb_ind_with_min_cov=\"all\"):\n\t###CHOOSE THE RIGHT VCF\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tif inds==\"all\" or inds==[\"all\"]:inds=input_vcf.samples# transform \"all\" in a list of all individuals in the vcf\n\t#Function\n\tsum_pairwise=0#iterator for sampling frequency\n\tnsites_ok=0\n\t###identify individual to remove when calculating stats\n\tinds_to_delete=[]\n\tfor i,ind in enumerate(input_vcf.samples):#check which ind is ion sample and compare it to our list of inds\n\t\t if ind not in inds:#delete this ind\n\t\t \tinds_to_delete.append(i)\n\t#go along the region\n\tif chrom!=\"all\":\n\t\tcheck=len(sh.tabix(vcf_file,str(chrom)+\":\"+str(start)+\"-\"+str(end)))\n\t\t#print \"check;' \",check,\"'\"\n\t\tif check==0: \n\t\t\tif output==\"sum\":\n\t\t\t\treturn 0\n\t\t\telif output==\"extended\":\n\t\t\t\treturn [0,0]\n\t\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t\t\t#print \"HERE\"\n\t\t\t#print input_vcf,record,mincov,maxcov, inds, nb_ind_with_min_cov\n\t\t\t#raise Exception\n\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=[1,2],nb_ind_with_min_cov=nb_ind_with_min_cov)# check if the site respect our condition\n\t\t\tprint inds\n\t\t\t#print \"cond\",cond\n\t\t\t#print \"HERE2\"\n\t\t\tif cond:# if it does\n\t\t\t \tb= record.nucl_diversity\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \t\t#print record.nucl_diversity\n\t\t\t \t#print record.samples\n\t\t\t\tnsites_ok+=1\n\t\t\t\t#print record.nucl_diversity\n\t\t\t\tprint \"samples pairwise\", len(record.samples),record.nucl_diversity \n\t\t\t\tsum_pairwise+=record.nucl_diversity \n\t\t\t\t#if b!=record.nucl_diversity : print \" old and new diversity\", b,record.nucl_diversity\n\t\t\t#compute total information for the window\n\telif chrom==\"all\":\n\t\tfor record in input_vcf:# for every site\n\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=[1,2],nb_ind_with_min_cov=nb_ind_with_min_cov)# check if the site respect our condition\n\t\t\tif cond:# if it does\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \t#print record.samples\n\t\t\t\tnsites_ok+=1\n\t\t\t\tsum_pairwise+=record.nucl_diversity \n\t\t\t#compute total information for the window\n\tif output==\"sum\":\n\t\treturn sum_pairwise\n\telif output==\"extended\":\n\t\treturn [sum_pairwise,nsites_ok]\n\t#Go in normal vcf and count sites", "def load_variants_from_vcf( vcf_file ):\n\t\n\tsnps_per_chr = {}\n\tindels_per_chr = {}\n\t\n\ttri_counter = 0\n\t\n\twith open( vcf_file, \"r\" ) as f:\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tif line[0] != '#':\n\t\t\t\tparts = line.strip().split('\\t')\n\t\t\t\tif not \",\" in parts[4]:\t#only biallelic variants\n\t\t\t\t\tif len( parts[3] ) == len( parts[4] ) and len( parts[3] ) == 1:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsnps_per_chr[ parts[0] ].append( parts[1] )\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tsnps_per_chr.update( { parts[0]: [ parts[1] ] } )\n\t\t\t\t\t\t\n\t\t\t\t\telif len( parts[3] ) != len( parts[4] ):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tindels_per_chr[ parts[0] ].append( parts[1] )\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tindels_per_chr.update( { parts[0]: [ parts[1] ] } )\n\t\t\t\telse:\t#count triallelic variants\n\t\t\t\t\ttri_counter += 1\n\t\t\t\t\t\t\n\t\t\tline = f.readline()\n\tprint \"number of triallelic variants: \" + str( tri_counter )\n\t\n\treturn snps_per_chr, indels_per_chr", "def get_cve(device_list):\n\n # A list of devices has to be passed in to match device names in strings. Nipper does not do a good job of making a\n # unique identifier for an affected host to a CVE. They just write a sentence with the device name in it.\n\n cves = {}\n\n cve_element = nipper_xml.find(\"./report/part/[@ref='VULNAUDIT']\")\n\n for section in cve_element.findall('./section'):\n # print section.get('title')\n if section.get('title').startswith('CVE-'): # Look only at CVE sections\n cve = section.get('title')\n cves[cve] = {}\n if DEBUG:\n print info + \"CVE: %s\" % cve\n # CVSS_v2 Score\n cvss_score = section.find(\"./infobox/infodata/[@label='CVSSv2 Score']\").text\n cves[cve].update({'CVSSv2_Score': cvss_score})\n if DEBUG:\n print \"\\t\" + info + \"CVSSv2 Score: %s\" % cvss_score\n # Single Devices\n for ad_section in section.findall(\"./section/[@title='Affected Device']\"):\n for d in device_list:\n for i in ad_section.find(\"./text\").text.split():\n if d == i:\n cves[cve].update({'Hosts': [d]})\n if DEBUG:\n print \"\\t\" + note + \"Single Device: %s\" % d\n # Multiple Devices\n ad_list = []\n for ad_section in section.findall(\"./section/[@title='Affected Devices']/list/listitem\"):\n ad_list.append(ad_section.text.split(\" - \")[1].rstrip(\";\").rstrip(\".\"))\n if len(ad_list) > 0:\n cves[cve].update({'Hosts': ad_list})\n if DEBUG:\n print \"\\t\" + note + \"Multiple Devices: %s\" % ad_list\n if DEBUG:\n print info + \"CVE Object:\"\n print cves\n raw_input(warn + \"Press enter to continue\")\n return cves", "def vcf_parsed (vcf_file, sam_dic):\n #real SNPs localiation - snp localiation in Cg_Nara5 genome\n #snps variation - the real modification between Ck and UG1 and UG2\n\n vcf_file = open(arg.vcf_infile)\n vcf_dic = {}\n snps_variation_dic ={}\n counter = 0\n counter_1 = 0\n for line in vcf_file:\n if line.startswith(\"##\"):\n pass\n elif line.startswith(\"#CHROM\"):\n pass\n else:\n line_information = line.strip().split()\n\n loci_chrom = line_information[0].split()\n loci_pos = line_information[1]\n snps_variation = line_information [3]\n snps_variation_2 = line_information [4]\n\n for i in loci_chrom:\n if i not in vcf_dic:\n vcf_dic[i] = [int(loci_pos)]\n snps_variation_dic[i] = [(snps_variation, snps_variation_2)]\n\n else:\n vcf_dic[i].append(int(loci_pos))\n snps_variation_dic[i].append((snps_variation, snps_variation_2))\n\n for locus, position_list in vcf_dic.items():\n\n if locus in sam_dic:\n sam_dic[locus][\"loci_position\"] = position_list\n counter += len(position_list)\n\n real_snp = []\n real_snp_cr = []\n for snp in position_list:\n if sam_dic[locus][\"reading_frame\"] == \"0\":\n real_snp_loc = (sam_dic[locus][\"start\"] + snp)\n real_snp.append(real_snp_loc)\n sam_dic[locus][\"real_snp_localization\"] = real_snp\n else:\n sequence_length = len(sam_dic[locus][\"sequence_locus\"])\n real_snp_loc_cr = ((sequence_length - snp) + sam_dic[locus][\"start\"]) + 1\n real_snp.append(real_snp_loc_cr)\n sam_dic[locus][\"real_snp_localization\"] = real_snp\n\n for locus, variation in snps_variation_dic.items():\n if locus in sam_dic:\n sam_dic[locus][\"snp_variation\"]= snps_variation_dic[locus]\n\n\n\n\n\n print (\"Number of SNPs mapped on Cg: {}\".format(counter))\n\n print (\"Step 2 - Parse the .vcf file -- Done\")\n\n # The sam_dic return: Key - loci name; value - the same information previously\n # described as well as the real snp localization and the snp variation\n # present in the initial dataset (Ck vs UG1/2)\n\n\n\n return sam_dic", "def chrompos_from_vcf_file(vcf_filename):\n vcf_file = open(vcf_filename, 'r')\n snps = {}\n snp_index = 0\n for line in vcf_file:\n if line[0] != VCF_HEADER:\n snp_index += 1\n snps[snp_index] = [] # Create dictionary item for each SNP\n snps[snp_index].append(str(snp_index)) # index\n snps[snp_index].append(line.split()[VCF_CHROM_COL]) # CHROM\n snps[snp_index].append(line.split()[VCF_POS_COL]) # POS\n vcf_file.close()\n return snps", "def read_annovar_vcf(input_vcf):\n hash_table = {}\n vcf_reader = vcf.Reader(filename=input_vcf)\n\n for i, r in enumerate(vcf_reader):\n hash_variant = {}\n\n hash_fields = dict(r.INFO)\n hash_fields.update(dict(zip(r.samples[0].data._fields, r.samples[0].data)))\n\n chrom = r.CHROM\n pos = str(r.POS)\n ref = str(r.REF)\n alt = str(r.ALT[0])\n l_samples = len(r.samples)\n\n if r.FILTER == []:\n hash_variant['FILTER'] = \"PASS\"\n else:\n hash_variant['FILTER'] = str(r.FILTER)\n\n hash_variant['QUAL'] = str(r.QUAL)\n\n hash_variant['chr'] = chrom.strip()\n hash_variant['pos'] = pos.strip()\n hash_variant['ref'] = ref.strip()\n hash_variant['alt'] = alt.strip()\n hash_variant['Func.refGene'] = str(hash_fields.get('Func.refGene', '.')[0])\n hash_variant['Gene.refGene'] = str(hash_fields.get('Gene.refGene', '.')[0])\n hash_variant['GeneDetail.refGene'] = str(hash_fields.get('GeneDetail.refGene', '.')[0])\n hash_variant['ExonicFunc.refGene'] = str(hash_fields.get('ExonicFunc.refGene', '.')[0])\n hash_variant['AAChange.refGene'] = str(hash_fields.get('AAChange.refGene', '.')[0])\n hash_variant['cytoBand'] = str(hash_fields.get('cytoBand', '.')[0])\n hash_variant['ExAC_ALL'] = str(hash_fields.get('ExAC_ALL', '.'))\n hash_variant['ExAC_AFR'] = str(hash_fields.get('ExAC_AFR', '.'))\n hash_variant['ExAC_AMR'] = str(hash_fields.get('ExAC_AMR', '.'))\n hash_variant['ExAC_EAS'] = str(hash_fields.get('ExAC_EAS', '.'))\n hash_variant['ExAC_FIN'] = str(hash_fields.get('ExAC_FIN', '.'))\n hash_variant['ExAC_NFE'] = str(hash_fields.get('ExAC_NFE', '.'))\n hash_variant['ExAC_OTH'] = str(hash_fields.get('ExAC_OTH', '.'))\n hash_variant['ExAC_SAS'] = str(hash_fields.get('ExAC_SAS', '.'))\n hash_variant['avsnp147'] = str(hash_fields.get('avsnp147', '.')[0])\n hash_variant['SIFT_score'] = str(hash_fields.get('SIFT_score', '.')[0])\n hash_variant['SIFT_pred'] = str(hash_fields.get('SIFT_pred', '.')[0])\n hash_variant['Polyphen2_HDIV_score'] = str(hash_fields.get('Polyphen2_HDIV_score', '.')[0])\n hash_variant['Polyphen2_HDIV_pred'] = str(hash_fields.get('Polyphen2_HDIV_pred', '.')[0])\n hash_variant['Polyphen2_HVAR_score'] = str(hash_fields.get('Polyphen2_HVAR_score', '.')[0])\n hash_variant['Polyphen2_HVAR_pred'] = str(hash_fields.get('Polyphen2_HVAR_pred', '.')[0])\n hash_variant['LRT_score'] = str(hash_fields.get('LRT_score', '.')[0])\n hash_variant['LRT_pred'] = str(hash_fields.get('LRT_pred', '.')[0])\n hash_variant['MutationTaster_score'] = str(hash_fields.get('MutationTaster_score', '.')[0])\n hash_variant['MutationTaster_pred'] = str(hash_fields.get('MutationTaster_pred', '.')[0])\n hash_variant['MutationAssessor_score'] = str(hash_fields.get('MutationAssessor_score', '.')[0])\n hash_variant['MutationAssessor_pred'] = str(hash_fields.get('MutationAssessor_pred', '.')[0])\n hash_variant['FATHMM_score'] = str(hash_fields.get('FATHMM_score', '.')[0])\n hash_variant['FATHMM_pred'] = str(hash_fields.get('FATHMM_pred', '.')[0])\n hash_variant['PROVEAN_score'] = str(hash_fields.get('PROVEAN_score', '.')[0])\n hash_variant['PROVEAN_pred'] = str(hash_fields.get('PROVEAN_pred', '.')[0])\n hash_variant['VEST3_score'] = str(hash_fields.get('VEST3_score', '.')[0])\n hash_variant['CADD_raw'] = str(hash_fields.get('CADD_raw', '.')[0])\n hash_variant['CADD_phred'] = str(hash_fields.get('CADD_phred', '.')[0])\n hash_variant['DANN_score'] = str(hash_fields.get('DANN_score', '.')[0])\n hash_variant['fathmm-MKL_coding_score'] = str(hash_fields.get('fathmm-MKL_coding_score', '.')[0])\n hash_variant['fathmm-MKL_coding_pred'] = str(hash_fields.get('fathmm-MKL_coding_pred', '.')[0])\n hash_variant['MetaSVM_score'] = str(hash_fields.get('MetaSVM_score', '.')[0])\n hash_variant['MetaSVM_pred'] = str(hash_fields.get('MetaSVM_pred', '.')[0])\n hash_variant['MetaLR_score'] = str(hash_fields.get('MetaLR_score', '.')[0])\n hash_variant['MetaLR_pred'] = str(hash_fields.get('MetaLR_pred', '.')[0])\n hash_variant['integrated_fitCons_score'] = str(hash_fields.get('integrated_fitCons_score', '.')[0])\n hash_variant['integrated_confidence_value'] = str(hash_fields.get('integrated_confidence_value', '.')[0])\n hash_variant['GERP++_RS'] = str(hash_fields.get('GERP++_RS', '.')[0])\n hash_variant['phyloP7way_vertebrate'] = str(hash_fields.get('phyloP7way_vertebrate', '.')[0])\n hash_variant['phyloP20way_mammalian'] = str(hash_fields.get('phyloP20way_mammalian', '.')[0])\n hash_variant['phastCons7way_vertebrate'] = str(hash_fields.get('phastCons7way_vertebrate', '.')[0])\n hash_variant['phastCons20way_mammalian'] = str(hash_fields.get('phastCons20way_mammalian', '.')[0])\n hash_variant['SiPhy_29way_logOdds'] = str(hash_fields.get('SiPhy_29way_logOdds', '.')[0])\n\n l_samples = r.samples[::]\n l_sample_ids = []\n for sample in l_samples:\n sample_id = sample.sample\n sample_gt = sample.data.GT\n hash_variant[sample_id] = sample_gt\n l_sample_ids.append(sample_id)\n\n hash_table[(chrom, pos, alt)] = hash_variant\n\n return hash_table, l_sample_ids", "def create_information_dictionary_for_sites(hpo_dfs, selected_hpo_names,\n most_popular_race_cids):\n\n racial_percentages = {}\n\n # want to get the percentages for each of the race concept IDs\n for race_concept_id in most_popular_race_cids:\n race_percentage_list = []\n\n # want to look at the sites in parallel - access their dataframe\n for hpo in selected_hpo_names:\n df = hpo_dfs[hpo]\n temp = df.loc[df['race_concept_id'] == race_concept_id]\n\n if temp.empty:\n race_percentage_list.append(0)\n else:\n val = float(temp['percent_of_site_persons']) # convert to float\n race_percentage_list.append(val)\n\n racial_percentages[race_concept_id] = race_percentage_list\n\n return racial_percentages", "def load_vcf_data(vcf_file):\n \n if(vcf_file[-3:]==\".gz\"):\n vcf_data=gzip.open(vcf_file, \"r\")\n else:\n vcf_data=open(vcf_file, \"r\")\n \n snp_names=[]\n snp_pos=[]\n genotype_data=[]\n\n missing=0\n \n for line in vcf_data:\n\n if line[0:2] == '##':\n continue\n elif line[0:1] == '#':\n data=line[1:-1]\n data=data.split(\"\\t\")\n if data[0:9]==[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\"]:\n sample_names=data[9:]\n else:\n print data[0:9]\n raise Exception(\"Bad vcf header line\")\n else:\n data=line[:-1]\n data=data.split(\"\\t\")\n\n if len(data[4].split(\",\"))>1: \n print \"Warning: ignoring multi alleleic site at \" + data[0]+\":\"+data[1] \n continue # multi-allelic sites. \n\n if data[2] != \".\":\n snp_names.append(data[2])\n else:\n snp_names.append(data[0]+\":\"+data[1])\n\n snp_pos.append(int(data[1]))\n\n if not all([(x[0]==\".\" and x[2]==\".\") or (x[0] in [\"0\", \"1\"] and x[2] in [\"0\", \"1\"]) for x in data[9:]]):\n raise Exception(\"Could not read line: \" + line) \n \n genotype_data.append([ 3 if x[0]==\".\" and x[2]==\".\" else int(x[0])+int(x[2]) for x in data[9:] ])\n\n return {\"sample_names\":sample_names, \"snp_names\":snp_names, \"snp_pos\":snp_pos, \"genotype_data\":genotype_data}", "def extract_pi_region(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,min_nsites=0,min_variants=0,verbose=\"min\",called=True,output=\"pi\"):\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tif inds==\"all\" or inds==[\"all\"]:inds=input_vcf.samples# transform \"all\" in a list of all individuals in the vcf\n\t#Function\n\tpi_values=[]#list \n\tnsites_considered=0#iterator for sampling frequency\n\ttotal_nsites=0\n\tnvariants=0# iterator for sites that are varying\n\t###identify individual to remove when calculating stats\n\tinds_to_delete=[]\n\tfor i,ind in enumerate(input_vcf.samples):#check which ind is ion sample and compare it to our list of inds\n\t\t if ind not in inds:#delete this ind\n\t\t \tinds_to_delete.append(i)\n\t#go along the region\n\tif chrom!=\"all\":\n\t\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t\t\tcond=checkRecord_Cov(input_vcf,record,mincov,maxcov,inds=inds,called=True,nalleles=[1,2])# check if the site respect our condition\n\t\t\ttotal_nsites+=1\n\t\t\tif cond:# if it does\n\t\t\t\tnsites_considered+=1 \n\t\t\t \tif total_nsites%100000==0: print total_nsites,\"sites\",nsites_considered,\"sites passed filter\"\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \tif verbose==True:print record.POS\n\t\t\t \tif verbose==True:print \"inds\",inds\t\t \t\n\t\t\t \tif verbose==True:print \"GT\",[sample[\"GT\"] for sample in record.samples] \n\t\t\t \tif verbose==True:print \"DP\",[sample[\"DP\"] for sample in record.samples]\n\t\t\t\tpi_values.append(record.nucl_diversity)#calculate pi\n\t\t\t\tif record.nucl_diversity>0.0:nvariants+=1\n\t\t\t#compute total information for the window\n\telif chrom==\"all\":\n\t\tfor record in input_vcf:# for every site\n\t\t\tcond=checkRecord_Cov(input_vcf,record,mincov,maxcov,inds=inds,called=True,nalleles=[1,2])# check if the site respect our condition\n\t\t\ttotal_nsites+=1\n\t\t\tif cond:# if it does\n\t\t\t\tnsites_considered+=1\n\t\t\t \tif total_nsites%100000==0: print total_nsites,\"sites\",nsites_considered,\"sites passed filter\"\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \tif verbose==True:print record.POS\n\t\t\t \tif verbose==True:print \"inds\",inds\t\t \t\n\t\t\t \tif verbose==True:print \"GT\",[sample[\"GT\"] for sample in record.samples] \n\t\t\t \tif verbose==True:print \"DP\",[sample[\"DP\"] for sample in record.samples]\n\t\t\t\tpi_values.append(record.nucl_diversity)#calculate pi\n\t\t\t\tif record.nucl_diversity>0.0:nvariants+=1\n\tif verbose==True or verbose==\"min\":print \"nvariants:\",nvariants,\"nsites_considered:\",nsites_considered\n\tif output==\"pi\":\n\t\tif nsites_considered>=min_nsites and nvariants>=min_variants and len(pi_values):\n\t\t\tpi_value=sum(pi_values)/nsites_considered\t\t\n\t\t\treturn pi_value\n\t\telse:\n\t\t\treturn \"NA\"\n\telif output==\"extended\":\n\t\tif nsites_considered>=min_nsites and nvariants>=min_variants and len(pi_values):\n\t\t\tpi_value=sum(pi_values)/nsites_considered\t\t\n\t\t\treturn [nvariants,nsites_considered,pi_value]\n\t\telse:\n\t\t\treturn [nvariants,nsites_considered,\"NA\"]\n\telse:\n\t\traise Exception(\"incorrect output argumnent, should be pi or extended\")", "def _extractGloveVects():\n \n embeddings_index = {}\n\n with open(GLOVE_CORPUS_FILE) as f:\n for line in f:\n values = line.split()\n word = values[0].lower()\n if word not in _cachedStopWords:\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n\n return embeddings_index", "def process_VCF(input_vcf, targets_file, out_vcf = None) :\n\n\tfVCF_OUT = None\n\tif out_vcf is not None :\n\t\tfVCF_OUT = open(out_vcf, 'w')\n\tfDUP_OUT = open(targets_file, 'w')\n\n\tvariants_dict = {}\n\tvariants_list = []\n\tnum_redundant, num_kept = 0, 0\n\tfINVCF = open(input_vcf, 'r')\n\tfor line in fINVCF :\n\t\tif line.startswith('#') :\n\t\t\tif line.startswith(\"#CHROM\") :\n\t\t\t\tindividuals = re.split('\\t', line.strip())[9:]\n\t\t\t\tstdout.write(\"%d individuals included in the VCF file: %s\\n\" %(len(individuals), input_vcf))\n\t\t\tif fVCF_OUT :\n\t\t\t\tfVCF_OUT.write(line)\n\t\telse :\n\t\t\ttmp_line = re.split('\\t', line.strip())\n\t\t\tref_base = tmp_line[3]\n\t\t\talt_base = tmp_line[4]\n\t\t\tchrom_id = tmp_line[0]\n\t\t\tchrom_pos = tmp_line[1]\n\t\t\tqual = tmp_line[5]\n\t\t\tfilter = tmp_line[6]\t\t\t\t\t# PASS or FILTERED by VQSR #\n\t\t\t# fix sites having different types of calls: redundant calls #\n\t\t\tif not variants_dict.has_key(chrom_id+':'+chrom_pos) :\n\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\tvariants_list.append(chrom_id+':'+chrom_pos)\n\t\t\telse :\n\t\t\t\tnum_redundant += 1\n\t\t\t\tsame_site_diff_call = re.split('\\t', variants_dict[chrom_id+':'+chrom_pos])\n\t\t\t\ttmp_qual = same_site_diff_call[5]\n\t\t\t\ttmp_filter = same_site_diff_call[6]\n\t\t\t\ttmp_alt_base = same_site_diff_call[4]\n\t\t\t\tfDUP_OUT.write(\"%s\\n%s\\n\" %(variants_dict[chrom_id+':'+chrom_pos], line.strip()))\n\t\t\t\tif (tmp_filter != \"PASS\" and filter != \"PASS\") or (filter == \"PASS\" and tmp_filter == \"PASS\") :\t\t# if two different call both passed the VQSR or both not, we remove it from the final call set #\t\n\t\t\t\t\tvariants_dict.pop(chrom_id+':'+chrom_pos)\n\t\t\t\t\tvariants_list.remove(chrom_id+':'+chrom_pos)\n\t\t\t\t\tif filter == \"PASS\" :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both pass\\n\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both filtered\\n\")\n\t\t\t\telif filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" second kept\\n\")\n\t\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\t\tnum_kept += 1\n\t\t\t\telif tmp_filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" first kept\\n\")\n\t\t\t\t\tnum_kept += 1\n\tstdout.write(\"%d\\t%d\\n\" %(num_redundant, num_kept))\n\n\tif fVCF_OUT :\n\t\tfor i in range(len(variants_list)) :\n\t\t\tfVCF_OUT.write(\"%s\\n\" %(variants_dict[variants_list[i]]))\n\t\tfVCF_OUT.close()\n\tfINVCF.close()", "def _generate_tfs_dfs(self) -> dict:\n tfs, dfs = {}, {}\n\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n if doc_path not in tfs:\n tfs[doc_path] = {}\n with open(doc_path, 'r') as f:\n text = f.readline()\n terms = set(text.split())\n for term in terms:\n tfs[doc_path][term] = text.count(term)\n\n if term not in dfs:\n dfs[term] = 1\n else:\n dfs[term] += 1\n\n return tfs, dfs", "def read_varscan_vcf(vcf_file, min_depth):\n vcf = {}\n with open(vcf_file) as fh:\n for line in fh:\n row = line.split('\\t')\n chrom = row[0]\n bp = row[1]\n ref = row[3]\n alt = row[4]\n info = row[7]\n\n read_depth = int(info.partition('DP=')[-1].partition(';')[0])\n allele_freq = float(info.partition('AF1=')[-1].partition(';')[0])\n\n if read_depth >= min_depth:\n vcf['{0} {1}'.format(chrom, bp)] = '{0}:{1}:{2}'.format(allele_freq, ref, alt)\n\n else:\n vcf['{0} {1}'.format(chrom, bp)] = 'NA'\n\n return vcf", "def load_data(self, f):\n D = {}\n P = {}\n v = 1\n with open(f) as fp:\n lines = fp.read().split(\"\\n\")\n for line in lines[1:]:\n if(len(line.strip()) > 0):\n parts = line.split(\" \")\n P[v] = (decimal.Decimal(parts[0]), decimal.Decimal(parts[1]))\n v += 1\n\n\n for p in P:\n D[p] = {}\n p1 = P[p]\n for d in P:\n #if d == p:\n # continue\n #else:\n p2 = P[d]\n D[p][d] = math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2))\n\n return P, D", "def fCZs(self) -> Dict[Tuple[int, ...], Optional[float]]:\n return {tuple(es.targets): es.fCZ for es in self.edges_specs}", "def getcontactcongressdict(ccdump):\n d = {}\n for line in ccdump.strip().split('\\n'):\n (district, name, party, dc_office, dc_voice, district_voice, email_form, website) = line.split('\\t')\n dist = ''.join( (district[:2], '-', district[2:]) )\n d[dist] = email_form\n return d", "def write_vcf(snps_dict):\n # Header of the vcf file\n header = f\"\"\"#REF: {REFERENCE_FILE}\n#READS: {READS_FILE}\n#K: {K_VALUE}\n#MAX_SUBST: {H_VALUE}\n#MIN_ABUNDANCE: {M_VALUE}\n\"\"\"\n\n with open(OUTPUT_FILE, 'w') as vcf:\n vcf.write(header)\n for position in sorted(snps_dict.keys()): # For each snp position found,\n # count for each nucleotid the number of time it was found in reads mapped\n # at this position\n nA = 0\n nT = 0\n nC = 0\n nG = 0\n for nucleotid in snps_dict[position]:\n if nucleotid == \"A\":\n nA += 1\n elif nucleotid == \"T\":\n nT += 1\n elif nucleotid == \"G\":\n nG += 1\n else:\n nC += 1\n if nA >= int(M_VALUE): # If the same nucleotid was found more than M_VALUE time\n # in reads mapped at this position, write it in the vcf file.\n vcf.write(f\"{position}\\t{GENOME[position]}\\tA\\t{nA}\\n\")\n if nT >= int(M_VALUE):\n vcf.write(f\"{position}\\t{GENOME[position]}\\tT\\t{nT}\\n\")\n if nG >= int(M_VALUE):\n vcf.write(f\"{position}\\t{GENOME[position]}\\tG\\t{nG}\\n\")\n if nC >= int(M_VALUE):\n vcf.write(f\"{position}\\t{GENOME[position]}\\tC\\t{nC}\\n\")", "def get_site_vecs(struct: Structure):\n vecs = get_soap_vec(struct)\n site_vecs = []\n for i, site in enumerate(struct):\n site_vecs.append(SiteVec(species=site.species_string, site=site, vec=vecs[i]))\n return site_vecs", "def read_passages():\n bad_passages = pd.read_csv(\"project_files/passages_in_v2.csv\", sep=\";\")\n bad_passages.drop(columns='index',inplace=True)\n # Get all the ships\n df_vessels = bad_passages[['ShipID', 'CEMTKlasse', 'Width', 'Length', 'Height']].drop_duplicates(inplace=False)\n df_vessels = df_vessels.reset_index(drop=True)\n\n vessel_dict = {}\n vessels = []\n\n # Iterate over the vessels\n for index, row in df_vessels.iterrows():\n # Make a new object\n\n vessel = Vessel(row['ShipID'], row['Length'], row['Width'], row['Height'], row['CEMTKlasse'])\n vessel_dict[row['ShipID']] = vessel\n\n # Connect the route\n for index, row in bad_passages.iterrows():\n # Get the corresponding vessel\n ship_id = row['ShipID']\n trajectory_data = row\n\n vessel = vessel_dict.get(ship_id)\n vessel.trajectory_route.append(trajectory_data)\n\n return vessel_dict", "def calc_div_bed(vcfdata, bed):\n avg_pairwise_divs = []\n with open(bed, 'r') as f:\n for line in f:\n tmp = line.strip().split()\n # Add 1 to the end, for 0-based BED coords\n start = int(tmp[1])\n end = int(tmp[2])\n # Some intervals are of 0 length - return 0 for these.\n if start == end:\n continue\n # Get the pi values that are between the current BED interval\n in_interval = [\n vcfdata[k]\n for k in vcfdata.keys()\n if (k >= start and k <= end)]\n # Next, find the average pairwise diversity. We will just sum the\n # diversities of the individual variants, then divide by the\n # number of sites in the interval.\n pair_div = sum(in_interval) / float(end - start - len(in_interval))\n # Then, append it to the list\n avg_pairwise_divs.append((str(start), str(end), str(pair_div)))\n return avg_pairwise_divs", "def compute_covar_from_instance_centroids(instance_centroids):\n\n cov_mat_allStructures = {}\n radii_allStructures = {}\n ellipsoid_matrix_allStructures = {}\n for name_s, centroids in sorted(instance_centroids.items()):\n centroids2 = np.array(centroids)\n cov_mat = np.cov(centroids2.T)\n cov_mat_allStructures[name_s] = cov_mat\n u, s, vt = np.linalg.svd(cov_mat)\n # print name_s, u[:,0], u[:,1], u[:,2],\n radii_allStructures[name_s] = np.sqrt(s)\n ellipsoid_matrix_allStructures[name_s] = vt\n\n return cov_mat_allStructures, radii_allStructures, ellipsoid_matrix_allStructures", "def _detectors3(self, hdr):\n # Called AnalysisParam in OpenMIMS, only last part\n d = {}\n d['TIC'] = self._electron_multiplier(hdr)\n\n for n in range(1, 8):\n det = 'Detector {}'.format(n)\n d[det] = {}\n d[det]['fc background setup positive'], \\\n d[det]['fc background setup negative'] = \\\n unpack(self._bo + '2i', hdr.read(8))\n\n for n in range(1, 8):\n det = 'Detector {}'.format(n)\n det_type = unpack(self._bo + 'i', hdr.read(4))[0]\n d[det]['detector'] = _detectors.get(det_type, str(det_type))\n return d", "def pairwise_diversity(calls):\n # Count up the number of reference and alternate genotypes.\n if 0 in calls:\n ref_count = calls.count(0)\n else:\n return 0\n if 1 in calls:\n alt_count = calls.count(1)\n else:\n return 0\n # This sample size will change depending on how many non-missing genotypes\n # there are.\n total_count = ref_count + alt_count\n # Calculate up the similarities based on the number of reference and\n # alternative genotypes. Calculate the number of pairwise comparisons\n # that were made.\n ref_sim = n_choose_r(ref_count, 2)\n alt_sim = n_choose_r(alt_count, 2)\n total_comp = n_choose_r(total_count, 2)\n # Then pairwise diversity is 1-[(ref_sim + alt_sim)/total_comp]\n return 1 - ((ref_sim + alt_sim) / float(total_comp))", "def parse_vnf_descriptor(self):\n charms = {}\n\n # You'd think this would be explicit, but it's just an incremental\n # value that should be consistent.\n vnf_member_index = 0\n\n \"\"\"Get all vdu and/or vdu config in a descriptor.\"\"\"\n config = self.get_config()\n for cfg in config:\n if 'juju' in cfg:\n\n # Get the name to be used for the deployed application\n application_name = n2vc.vnf.N2VC().FormatApplicationName(\n self.ns_name,\n self.vnf_name,\n str(vnf_member_index),\n )\n\n charm = {\n 'application-name': application_name,\n 'proxy': True,\n 'vnf-member-index': vnf_member_index,\n 'vnf-name': self.vnf_name,\n 'name': None,\n 'initial-config-primitive': {},\n 'config-primitive': {},\n }\n\n juju = cfg['juju']\n charm['name'] = juju['charm']\n\n if 'proxy' in juju:\n charm['proxy'] = juju['proxy']\n\n if 'initial-config-primitive' in cfg:\n charm['initial-config-primitive'] = \\\n cfg['initial-config-primitive']\n\n if 'config-primitive' in cfg:\n charm['config-primitive'] = cfg['config-primitive']\n\n charms[application_name] = charm\n\n # Increment the vnf-member-index\n vnf_member_index += 1\n\n self.charms = charms", "def _CalculateDissimilarities(self):\n print 'Calculating dissimilarities'\n num_bins = len(self._mix_bins)\n\n for left_cluster_id, left_bin_collection in (self\n ._left_bin_collection_by_cluster_id.iteritems()):\n for right_cluster_id, right_bin_collection in (self\n ._right_bin_collection_by_cluster_id.iteritems()):\n # This operation can be easily parallelized via multiprocessing.\n d = self._CalculateDissimilarityBetweenClusters(\n left_cluster_id, left_bin_collection, right_cluster_id,\n right_bin_collection)\n print 'Left cluster: %s, Right cluster: %s, dissimilarity: %s' % (\n d.left_cluster_id, d.right_cluster_id, d.dissimilarity_score)\n self._dissimilarities.append(d)\n\n print 'Dissimilarities are calculated'" ]
[ "0.55010855", "0.5443325", "0.5411451", "0.5253437", "0.52351487", "0.5203242", "0.51977223", "0.51912194", "0.51445794", "0.5135873", "0.5078174", "0.507168", "0.5062789", "0.50090235", "0.50023085", "0.4940941", "0.4905916", "0.48750815", "0.48076475", "0.47970852", "0.47717917", "0.4757438", "0.47439772", "0.4717558", "0.46868274", "0.46686247", "0.46662092", "0.46645615", "0.46255094", "0.4625124" ]
0.6337871
0
Read a BED file, and for each interval, calculate the average pairwise diversity in it. Prints the interval start, end, and the average pairwise diversity.
def calc_div_bed(vcfdata, bed): avg_pairwise_divs = [] with open(bed, 'r') as f: for line in f: tmp = line.strip().split() # Add 1 to the end, for 0-based BED coords start = int(tmp[1]) end = int(tmp[2]) # Some intervals are of 0 length - return 0 for these. if start == end: continue # Get the pi values that are between the current BED interval in_interval = [ vcfdata[k] for k in vcfdata.keys() if (k >= start and k <= end)] # Next, find the average pairwise diversity. We will just sum the # diversities of the individual variants, then divide by the # number of sites in the interval. pair_div = sum(in_interval) / float(end - start - len(in_interval)) # Then, append it to the list avg_pairwise_divs.append((str(start), str(end), str(pair_div))) return avg_pairwise_divs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseBed(fname):\n \n handle=open(fname,'r')\n for line in handle:\n if line.startswith(\"#\"):\n continue\n if line.startswith(\"track\") or line.startswith(\"browser\"):\n continue\n vals=line.rstrip().split(\"\\t\")\n chr = vals[0]\n start = int(vals[1])\n end = int(vals[2])\n if len(vals)>=3:\n strand = vals[5]\n score = float(vals[4])\n name = vals[3]\n res = Interval(chr,start,end)\n if len(vals)>3:\n res.strand = strand\n res.score = score\n res.name = name\n res = Interval(chr,start,end,strand=strand,score=score,name=name)\n if len(vals)>6:\n res = SplicedInterval(res.chr,res.start,res.end,res.strand,score=res.score,name=res.name,exonLengths=vals[10],exonOffsets=vals[11])\n #res=dict(zip(bed_fields,vals))\n #res['start'],res['end'],res['score'] = int(res['start']),int(res['end']),int(res['score'])\n yield res", "def get_nodes_and_weight_from_file(file, template_start, template_end):\n with open(file, \"rt\") as f:\n data = f.readlines()\n edge_list = [] # This list stores the tuples of valid start and end regions\n edge_duration_dict = {} # Per tuple as key, the dict stores durations per travel as value\n # The following for loop calculates the duration per travel and adds it to edge_duration_dict\n for line in data[1:]: # Starts from 1 since there is a header line\n split_line = line.split(\",\")\n # I found out the some lines have wrong hours figures, such as 25h, so I elimenate them\n try: # I found out the some lines have wrong hours figures, such as 25h, so I elimenate them\n start_time = datetime.datetime.strptime(split_line[1], template_start)\n end_time = datetime.datetime.strptime(split_line[3], template_end)\n travel_duration = end_time-start_time\n except:\n pass\n if split_line[0] in VALID_NODES and split_line[2] in VALID_NODES:\n nodes_frm_to = (split_line[0], split_line[2]) # Defining the start and end regions as the edge\n # If the edge not in edge_list yet, append to the list, and add its duration to duration dict values\n if nodes_frm_to not in edge_list:\n edge_list.append(nodes_frm_to)\n edge_duration_dict[nodes_frm_to] = [travel_duration]\n # If the edge already in edge_list yet, just add its duration to duration dict values\n else:\n edge_duration_dict[nodes_frm_to].append(travel_duration)\n # At this point we have pairs of regions with a list of durations per each\n edge_and_duration_dict = {} # This dictionary stores the mean duration per edge\n for edge, durations in edge_duration_dict.items():\n duration_array = np.array(durations) # Turning the duration list into an array for time saving\n edge_and_duration_dict[edge] = duration_array.mean()\n # At this point we have pairs of regions (from - to) with the mean travel duration for each pair\n # The rest of the code prepares the information to be aligned with Node and Graph definitions\n neighbors_by_region = {}\n start_regions = set([tup[0] for tup in edge_list]) # These are the 'from' regions of every edge\n # For each one of the 'from' regions, make neighbors dict(region and duration)\n # and stores in neighbors_by_region\n for region in start_regions:\n neighbors_dict = {}\n for edge, duration in edge_and_duration_dict.items():\n if edge[0] == region:\n neighbors_dict[edge[1]] = duration\n neighbors_by_region[region] = neighbors_dict\n\n return neighbors_by_region", "def readSegmotion(inDir, inSuffix, startTime, endTime):\n\t\n\tnumTrackTimes = 0\n\ttotNumCells = 0\n\tstormCells = {}\n\tdates = [] \n\t\n\t# Read in Segmotion files\n\tfor root, dirs, files in os.walk(inDir):\n\t\tif inSuffix != '' and not (files and not dirs and os.path.split(root)[-1] == inSuffix): continue\n\t\tfor trackFile in files:\n\t\t\tif trackFile.endswith('.xml'):\n\t\t\t\t\n\t\t\t\t# Skip hidden files\n\t\t\t\tif trackFile.startswith('._'): continue\n\t\t\t\t\n\t\t\t\t# Check if file falls in date range\n\t\t\t\ttry:\n\t\t\t\t\tfileDate = datetime.datetime.strptime(str(trackFile).split('.')[0], '%Y%m%d-%H%M%S')\n\t\t\t\texcept ValueError:\n\t\t\t\t\tprint('File ' + str(trackFile) + ' has an invalid name. Expected format YYYYMMDD-hhmmss.xml...')\n\t\t\t\t\tcontinue\n\t\t\t\tif not startTime <= fileDate < endTime:\n\t\t\t\t\tcontinue\n\t\t\t\tif fileDate.date() not in dates: dates.append(fileDate.date())\n\t\t\t\t\n\t\t\t\t# Open file\n\t\t\t\tf = open(root + '/' + trackFile)\n\t\t\t\tlines = BeautifulSoup(f, 'html.parser').find_all('datacolumn')\n\t\t\t\tf.close()\n\t\t\t\t\n\t\t\t\tprint(trackFile)\n\t\t\t\tnumTrackTimes += 1\n\t\t\t\t\n\t\t\t\tnumCells = len(lines[2].find_all('item'))\n\t\t\t\t\n\t\t\t\tfor i in range(0, numCells):\n\t\t\t\t\ttime = fileDate\n\t\t\t\t\tlatr = float(str(lines[4].find_all('item')[i]).split('\"')[1])\n\t\t\t\t\tlat = float(str(lines[5].find_all('item')[i]).split('\"')[1])\n\t\t\t\t\tlonr = float(str(lines[6].find_all('item')[i]).split('\"')[1])\n\t\t\t\t\tlon = float(str(lines[7].find_all('item')[i]).split('\"')[1])\n\t\t\t\t\torientation = float(str(lines[12].find_all('item')[i]).split('\"')[1])\n\t\t\t\t\ttrack = str(lines[13].find_all('item')[i]).split('\"')[1]\n\t\t\t\t\t\n\t\t\t\t\tcellID = totNumCells\n\t\t\t\t\tstormCells[cellID] = {'time': time, 'latr': latr, 'lat': lat, 'lonr': lonr, 'lon': lon, \n\t\t\t\t\t\t\t\t\t\t\t'orientation': orientation, 'track': track + '_' + str(fileDate.date()), 'old_track': track}\n\t\t\t\t\ttotNumCells += 1\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\n\treturn [stormCells, totNumCells, numTrackTimes, dates]", "def parseGalaxyCons(fname):\n handle=open(fname,'r')\n for line in handle:\n if line.startswith(\"#\"):\n continue\n if line.startswith(\"track\") or line.startswith(\"browser\"):\n continue\n vals=line.rstrip().split(\"\\t\")\n chr = vals[0]\n start = int(vals[1])\n end = int(vals[2])\n strand = vals[5]\n #Field[6] contains the average phastCons score\n score = float(vals[6])\n name = vals[3]\n res = Interval(chr,start,end,strand=strand,score=score,name=name)\n #res=dict(zip(bed_fields,vals))\n #res['start'],res['end'],res['score'] = int(res['start']),int(res['end']),int(res['score'])\n yield res", "def BEDreader(fname):\n\n bed_score = dict() \n bfh = open(fname)\n for line in bfh:\n line = line.strip('\\n\\r').split('\\t')\n assert len(line) == 5, '\\t'.join(line)\n bed_score[float(line[3])] = 1\n bfh.close()\n return bed_score.keys()", "def test_merge_intervals():\n\n a = pybedtools.example_bedtool(\"a.bed\") # path to test file a\n # This file looks like this:\n # chr1\t1\t100\tfeature1\t0\t+\n # chr1\t100\t200\tfeature2\t0\t+\n # chr1\t150\t500\tfeature3\t0\t-\n # chr1 900\t950\tfeature4\t0\t+\n\n assert len(a) == 4\n\n b = pybedtools.example_bedtool(\"b.bed\") # path to test file b\n # This file looks like this:\n # chr1\t155\t200\tfeature5\t0\t-\n # chr1\t800\t901\tfeature6\t0\t+\n\n assert len(b) == 2\n\n merged_bed = merge_intervals([a, b])\n assert len(merged_bed) == 2\n # Merged file looks like this:\n # chr1\t1\t500\n # chr1\t800\t950", "def make_bed_from_intervals(intdir):\n intfiles = [f for f in fs(intdir) if f.endswith('.list')]\n for intfile in intfiles:\n num = intfile.split(\"_\")[-1].replace(\".list\", \"\")\n lines = []\n with open(intfile, 'r') as o:\n text = o.read().split(\"\\n\")\n for line in text:\n scaff, span = line.split(\":\")\n start, stop = span.split(\"-\")\n start, stop = (int(start) - 1, int(stop) - 1)\n lines.append((scaff, start, stop))\n make_bed(lines, num)\n print('\\t\\tcreated %s bedfiles for %s from interval files' % (len(intfiles), ref))", "def diff(args):\n from jcvi.utils.cbook import SummaryStats\n\n p = OptionParser(diff.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (simplefile,) = args\n fp = open(simplefile)\n data = [x.split() for x in fp]\n spans = []\n for block_id, ab in groupby(data[1:], key=lambda x: x[0]):\n a, b = list(ab)\n aspan, bspan = a[4], b[4]\n aspan, bspan = int(aspan), int(bspan)\n spans.append((aspan, bspan))\n aspans, bspans = zip(*spans)\n dspans = [b - a for a, b, in spans]\n s = SummaryStats(dspans)\n print(\"For a total of {0} blocks:\".format(len(dspans)), file=sys.stderr)\n print(\"Sum of A: {0}\".format(sum(aspans)), file=sys.stderr)\n print(\"Sum of B: {0}\".format(sum(bspans)), file=sys.stderr)\n print(\"Sum of Delta: {0} ({1})\".format(sum(dspans), s), file=sys.stderr)", "def solution():\n file = get_source()\n results = []\n for c in range(int(file.readline())):\n grades = sorted([int(v) for v in file.readline().split()][1:])\n average = float(sum(grades))/len(grades)\n first = next((i for i,g in enumerate(grades) if g > average), len(grades))\n people_above_average = len(grades) - first\n results.append(people_above_average * 100.0 / len(grades))\n for r in results:\n print '%.3f%%' % r", "def sum_pairwise_differences(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,called=True,output=\"sum\",nb_ind_with_min_cov=\"all\"):\n\t###CHOOSE THE RIGHT VCF\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tif inds==\"all\" or inds==[\"all\"]:inds=input_vcf.samples# transform \"all\" in a list of all individuals in the vcf\n\t#Function\n\tsum_pairwise=0#iterator for sampling frequency\n\tnsites_ok=0\n\t###identify individual to remove when calculating stats\n\tinds_to_delete=[]\n\tfor i,ind in enumerate(input_vcf.samples):#check which ind is ion sample and compare it to our list of inds\n\t\t if ind not in inds:#delete this ind\n\t\t \tinds_to_delete.append(i)\n\t#go along the region\n\tif chrom!=\"all\":\n\t\tcheck=len(sh.tabix(vcf_file,str(chrom)+\":\"+str(start)+\"-\"+str(end)))\n\t\t#print \"check;' \",check,\"'\"\n\t\tif check==0: \n\t\t\tif output==\"sum\":\n\t\t\t\treturn 0\n\t\t\telif output==\"extended\":\n\t\t\t\treturn [0,0]\n\t\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t\t\t#print \"HERE\"\n\t\t\t#print input_vcf,record,mincov,maxcov, inds, nb_ind_with_min_cov\n\t\t\t#raise Exception\n\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=[1,2],nb_ind_with_min_cov=nb_ind_with_min_cov)# check if the site respect our condition\n\t\t\tprint inds\n\t\t\t#print \"cond\",cond\n\t\t\t#print \"HERE2\"\n\t\t\tif cond:# if it does\n\t\t\t \tb= record.nucl_diversity\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \t\t#print record.nucl_diversity\n\t\t\t \t#print record.samples\n\t\t\t\tnsites_ok+=1\n\t\t\t\t#print record.nucl_diversity\n\t\t\t\tprint \"samples pairwise\", len(record.samples),record.nucl_diversity \n\t\t\t\tsum_pairwise+=record.nucl_diversity \n\t\t\t\t#if b!=record.nucl_diversity : print \" old and new diversity\", b,record.nucl_diversity\n\t\t\t#compute total information for the window\n\telif chrom==\"all\":\n\t\tfor record in input_vcf:# for every site\n\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=[1,2],nb_ind_with_min_cov=nb_ind_with_min_cov)# check if the site respect our condition\n\t\t\tif cond:# if it does\n\t\t\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t\t\t \t\tdel record.samples[index]\n\t\t\t \t#print record.samples\n\t\t\t\tnsites_ok+=1\n\t\t\t\tsum_pairwise+=record.nucl_diversity \n\t\t\t#compute total information for the window\n\tif output==\"sum\":\n\t\treturn sum_pairwise\n\telif output==\"extended\":\n\t\treturn [sum_pairwise,nsites_ok]\n\t#Go in normal vcf and count sites", "def getMinMaxIntron(inputBed, percentBetweenLow, percentBetweenHigh):\n\n minIntron = 1000000\n maxIntron = 0\n countTotal = 0\n countBetween = 0\n sizeList = []\n exonSizeList = []\n numGenes = 0\n for line in open(inputBed):\n if line.startswith(\"track\"):\n continue\n\n numGenes += 1\n pieces = line.split()\n sizes = [int(x) for x in pieces[10].split(\",\")[:-1]]\n exonSizeList.extend(sizes)\n starts = [int(x) for x in pieces[11].split(\",\")[:-1]]\n for i in range(1, len(starts)):\n countTotal += 1\n intronSize = starts[i] - starts[i-1] - sizes[i-1]\n sizeList.append(intronSize)\n if intronSize >= percentBetweenLow and intronSize <= percentBetweenHigh:\n countBetween += 1\n minIntron = min(minIntron, intronSize)\n maxIntron = max(maxIntron, intronSize)\n\n print \"The largest intron was %s and the smallest intron was %s\" % (maxIntron, minIntron)\n print \"There were %s introns total in %s genes, and %s (%s%%) were between %s and %s (inclusive)\" % (countTotal, numGenes, countBetween, \n (countBetween*100.0/countTotal), percentBetweenLow, percentBetweenHigh)\n print \"The average intron size is %.2f\" % ( float(sum(sizeList))/len(sizeList))\n print \"Average number of introns per gene is %.2f\" % ( float(countTotal) / numGenes) \n\n print \"Average exon size is %.2f\" % ( float(sum(exonSizeList)) / len(exonSizeList))", "def Read_MapGen(filename,stats = False):\n from numpy import array\n with open(filename,'rt') as file_:\n data = [s.strip() for s in file_.readlines()]\n\n Shorelines = []\n segment = []\n for line in data:\n if line == \"# -b\": #New segment beginning\n if segment: Shorelines.append(array(segment))\n segment = []\n else:\n segment.append(map(float,string.split(line)))\n if segment: Shorelines.append(array(segment))\n\n if stats:\n NumSegments = len(Shorelines)\n NumPoints = False\n for segment in Shorelines:\n NumPoints = NumPoints + len(segment)\n AvgPoints = NumPoints / NumSegments\n print(\"Number of Segments: \", NumSegments)\n print(\"Average Number of Points per segment: \", AvgPoints)\n\n return Shorelines", "def read(self, filePath):\n \n result = {\n 'coordinates': {\n 'count': 0,\n 'nodes': []\n },\n 'element_groups': { \n 'number_of_elements': 0,\n 'count': 0,\n 'groups': []\n },\n 'bars': [],\n 'materials': {\n 'count': 0,\n 'materials': []\n },\n 'geometric_properties': {\n 'count': 0\n },\n 'bcnodes': {\n 'count': 0\n },\n 'loads': {\n 'count': 0\n }\n }\n # print(result['coordinates']['nodes'])\n \n with open(filePath,'r') as f:\n lines = f.readlines()\n elementCounter = 0\n groupCounter = 0\n geometricCounter = 0\n\n for line in lines:\n line = line.strip()\n el = line.split(' ')\n \n if len(line) == 0:\n continue\n\n if len(line) != 0 and line[0] == \"*\":\n section = line[1:].lower()\n continue\n \n if section == 'coordinates':\n if len(el) == 1 :\n result[section]['count'] = el[0]\n else:\n result[section]['nodes'].append(Node(int(el[0]), float(el[1]), float(el[2])))\n \n elif section == 'element_groups':\n if len(line) == 1:\n result[section]['count'] = int(el[0])\n else: \n result[section]['groups'].append(Group(el[0], el[1], el[2]))\n result[section]['number_of_elements'] += int(el[1])\n\n elif section == 'incidences':\n groups = result['element_groups']['groups']\n nodes = result['coordinates']['nodes']\n print(el)\n\n currentGroup = groups[groupCounter]\n if (currentGroup.amount == 0):\n groupCounter += 1\n currentGroup = groups[groupCounter]\n \n print(\"Group n: {} count: {}\".format(currentGroup.n, currentGroup.amount))\n \n bar = Bar(el[0], nodes[int(el[1])-1], nodes[int(el[2])-1], groups[groupCounter])\n print(\n \"\"\"\n Bar {} created \n Start node: {} End Node: {} Group: {}\n \"\"\".format(bar.id, bar.startNode.n, bar.endNode.n, bar.group))\n result['bars'].append(bar)\n currentGroup.amount -= 1\n \n elif section == 'materials':\n if len(el) == 1:\n result[section]['count'] = el[0]\n groupCounter = 0\n else:\n material = Material(el[0], el[1], el[2])\n result[section]['materials'].append(material)\n result['element_groups']['groups'][groupCounter].setMaterial(material)\n groupCounter += 1\n\n elif section == 'geometric_properties':\n if geometricCounter == 0:\n result[section]['count'] = el[0]\n else:\n result['element_groups']['groups'][geometricCounter - 1].setSectionArea(\n el[0]\n )\n geometricCounter += 1\n\n elif section == 'bcnodes':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].setRestriction(int(el[1]))\n\n elif section == 'loads':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n load = Load(el[1], el[2])\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].addLoad(load)\n\n for bar in result['bars']:\n bar.createLocalArray()\n\n print('---------- Parsing complete! ----------')\n pprint(result)\n print('---------------------------------------')\n\n return result", "def processbins(filechromdict, chromlist, start, segments):\n chrombinaverage = []\n for key, value in filechromdict.iteritems():\n startchrom = []\n segmentchrom = []\n for i, row in enumerate(chromlist):\n if key == row:\n startchrom.append(start[i])\n segmentchrom.append(segments[i])\n if startchrom:\n startchromfull = np.array(startchrom).astype(float)\n segmentchromfull = np.array(segmentchrom).astype(float)\n bins = np.linspace(0, value, 5)\n digitized = np.digitize(startchromfull, bins)\n means = []\n keylist = []\n for i in range(1, len(bins)):\n temp = []\n keylist.append(key)\n for index, value in enumerate(digitized):\n if i == value:\n temp.append(segmentchromfull[index])\n if temp:\n means.append([str(bins[i])] + [str(np.array(temp).mean())])\n else:\n means.append([str(bins[i])] + [str(np.nan)])\n for i, item in enumerate(keylist):\n chrombinaverage.append([item] + means[i]) \n return chrombinaverage", "def read_file(filename):\n with open(filename, 'r') as file:\n # Read the first line containing number of\n # beds and persons\n amount_persons = int(file.readline())\n size_persons1 = int(file.readline())\n size_beds1 = int(file.readline())\n size_persons2 = amount_persons - size_persons1\n size_beds2 = amount_persons - size_beds1\n\n lines = file.readlines()\n\n # Create a list of first set of Person objects storing their\n # x,y coordinates and names\n persons_set1 = lines[:size_persons1]\n persons_set1 = list(map((lambda x: (Person(x.split()[0], float(x.split()[1]), float(x.split()[2])))),\n persons_set1))\n\n # Create a list of first set of Bed objects storing their\n # x,y coordinates and names\n beds_set1 = lines[size_persons1:size_persons1+size_beds1]\n beds_set1 = list(map((lambda x: (Bed(x.split()[0], float(x.split()[1]), float(x.split()[2]), int(x.split()[3])\n ))), beds_set1))\n\n # Create a list of second set of Person objects storing their\n # x,y coordinates and names\n persons_set2 = lines[size_persons1+size_beds1:size_persons1+size_beds1+size_persons2]\n persons_set2 = list(map((lambda x: (Person(x.split()[0], float(x.split()[1]), float(x.split()[2])))),\n persons_set2))\n\n # Create a list of second set of Bed objects storing their\n # x,y coordinates and names\n beds_set2 = lines[size_persons1+size_beds1+size_persons2:size_persons1+size_beds1+size_persons2+size_beds2]\n beds_set2 = list(map((lambda x: (Bed(x.split()[0], float(x.split()[1]), float(x.split()[2]), int(x.split()[3])\n ))), beds_set2))\n\n return persons_set1, beds_set1, persons_set2, beds_set2", "def segment(args):\n from jcvi.formats.base import SetFile\n\n p = OptionParser(segment.__doc__)\n p.add_option(\n \"--chain\",\n default=1,\n type=\"int\",\n help=\"Allow next N genes to be chained\",\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n idsfile, bedfile = args\n bed = Bed(bedfile)\n order = bed.order\n ids = SetFile(idsfile)\n losses = Grouper()\n skip = opts.chain\n for i, a in enumerate(bed):\n a = a.accn\n for j in range(i + 1, i + 1 + skip):\n if j >= len(bed):\n break\n b = bed[j].accn\n if a in ids:\n losses.join(a, a)\n if a in ids and b in ids:\n losses.join(a, b)\n\n losses = list(losses)\n singletons = [x for x in losses if len(x) == 1]\n segments = [x for x in losses if len(x) > 1]\n ns, nm, nt = len(singletons), len(segments), len(losses)\n assert ns + nm == nt\n\n # Summary for all segments\n for x in sorted(singletons) + sorted(segments):\n print(\n \"\\t\".join(\n str(x)\n for x in (\"|\".join(sorted(x)), len(x), estimate_size(x, bed, order))\n )\n )\n\n # Find longest segment stretch\n if segments:\n mx, maxsegment = max([(len(x), x) for x in segments])\n print(\"Longest stretch: run of {0} genes\".format(mx), file=sys.stderr)\n print(\" {0}\".format(\"|\".join(sorted(maxsegment))), file=sys.stderr)\n seg_asize = sum(estimate_size(x, bed, order) for x in segments)\n seg_bsize = sum(\n estimate_size(x, bed, order, conservative=False) for x in segments\n )\n else:\n seg_asize = seg_bsize = 0\n\n sing_asize = sum(estimate_size(x, bed, order) for x in singletons)\n sing_bsize = sum(\n estimate_size(x, bed, order, conservative=False) for x in singletons\n )\n total_asize = sing_asize + seg_asize\n total_bsize = sing_bsize + seg_bsize\n print(\n \"Singleton ({0}): {1} - {2} bp\".format(ns, sing_asize, sing_bsize),\n file=sys.stderr,\n )\n print(\n \"Segment ({0}): {1} - {2} bp\".format(nm, seg_asize, seg_bsize), file=sys.stderr\n )\n print(\n \"Total ({0}): {1} - {2} bp\".format(nt, total_asize, total_bsize),\n file=sys.stderr,\n )\n print(\n \"Average ({0}): {1} bp\".format(nt, (total_asize + total_bsize) / 2),\n file=sys.stderr,\n )", "def read_iers_EOP(input_file):\n #-- read data file splitting at line breaks\n with open(input_file,'r') as f:\n file_contents = f.read().splitlines()\n #-- number of data lines\n n_lines = len(file_contents)\n dinput = {}\n dinput['MJD'] = np.zeros((n_lines))\n dinput['x'] = np.zeros((n_lines))\n dinput['y'] = np.zeros((n_lines))\n #-- for each line in the file\n flag = 'I'\n counter = 0\n while (flag == 'I'):\n line = file_contents[counter]\n i = 2+2+2+1; j = i+8\n dinput['MJD'][counter] = np.float(line[i:j])\n i = j+1\n flag = line[i]\n i += 2; j = i+9\n dinput['x'][counter] = np.float(line[i:j])\n i = j+10; j = i+9\n dinput['y'][counter] = np.float(line[i:j])\n counter += 1\n #-- reduce to data values\n dinput['MJD'] = dinput['MJD'][:counter]\n dinput['x'] = dinput['x'][:counter]\n dinput['y'] = dinput['y'][:counter]\n #-- return the date, flag and polar motion values\n return dinput", "def parse(filehandle):\n\n intervalstart = False\n candidatestart = False\n candidates = []\n for line in filehandle:\n elems = [x.strip() for x in line.split('=')]\n # Filter useless lines\n if len(elems) < 2:\n continue\n if not intervalstart:\n if elems[0] == 'Object class':\n objectclass = elems[1]\n\n elif elems[0] == 'xmin':\n xmin = float(elems[1])\n\n elif elems[0] == 'xmax':\n xmax = float(elems[1])\n\n elif elems[0] == 'nx':\n size = int(elems[1])\n\n elif elems[0] == 'dx':\n shift = float(elems[1])\n\n elif elems[0] == 'x1':\n start = float(elems[1])\n\n elif elems[0] == 'ceiling':\n ceiling = int(elems[1])\n\n elif elems[0] == 'maxnCandidates':\n maxnCandidates = int(elems[1])\n\n elif elems[0] == 'intensity':\n # Iteration never returns here because intervalstart is True\n intervalstart = True\n intTier = Tier(xmin, xmax, size, '\"Intensity\"')\n text = '\"' + elems[1] + '\"'\n intTier.shift = shift\n\n # Set begin and end so they can be used in the next iteration\n # to set the first Interval for the pitch Tier\n begin = 0.0\n end = start\n intTier.addInterval(Interval(begin, end, text))\n pitchTier = Tier(xmin, xmax, size, objectclass)\n pitchTier.shift = shift\n # Prepare candidate list for first Interval\n # First iteration skips the intensity condition below\n candidates.append((0, 0))\n\n elif intervalstart:\n\n if elems[0] == 'intensity':\n begin = intTier[-1].xmax\n end = begin + shift\n text = '\"' + elems[1] + '\"'\n intTier.addInterval(Interval(begin, end, text))\n candidates = []\n\n elif elems[0] == 'nCandidates':\n nc = int(elems[1])\n candidatestart = True\n\n elif candidatestart:\n\n if elems[0] == \"frequency\":\n freq = float(elems[1])\n elif elems[0] == \"strength\":\n strength = float(elems[1])\n candidates.append((freq, strength))\n\n if len(candidates) == nc:\n # Candidate are ranked according to a decoding algorithm\n # First candidate is most likely, but we parse them all\n pitchTier.addInterval(\n Interval(begin, end, '\"' + str(candidates[0][0]) + '\"'))\n candidatestart = False\n\n return (pitchTier, intTier)", "def read_pendf_xs(file,start,finish):\n with open(file) as f:\n e = []\n cs = []\n\n break_outer = False\n\n for i,line in enumerate(f):\n # -------------------------------\n # Stop the loop once finish is reached\n # -------------------------------\n if i == finish:\n break\n if i >= start-1:\n \t# -------------------------------\n \t# Only include first 66 columns, split on space\n \t# and convert to an array of strings\n \t# -------------------------------\n word_len = 11\n word_start = 0\n for j in range(6):\n word = line[word_start:word_start+11]\n\n if( j%2 == 0 ):\n # -------------------------------\n # Grab the energies, convert to readable format\n # -------------------------------\n if( word == ' ' ):\n break_outer = True\n break # end of TAB1\n e.append(word.replace('-','e-').replace('+','e+'))\n else:\n # -------------------------------\n # Grab cross section, convert to readable format\n # -------------------------------\n if( word == ' ' ):\n break_outer = True\n break # end of TAB1\n cs.append(word.replace('-','e-').replace('+','e+'))\n word_start+=word_len\n\n if( break_outer ):\n break # end of TAB1\n \n # -------------------------------\n # Convert to floats\n # -------------------------------\n e = np.array(e).astype(float)\n cs = np.array(cs).astype(float)\n\n # -------------------------------\n # Stack them into a numpy array\n # -------------------------------\n pointwise_cs = np.array([e,cs])\n \n return pointwise_cs", "def part_b(filename):\n\n data = np.genfromtxt(get_filepath(filename), names=['abs_time', 'key'], delimiter=\",\")\n\n rel_time = [curr - last for last, curr in zip(np.concatenate(([0], data['abs_time'])), data['abs_time'])]\n rel_time_squared = [x * x for x in rel_time]\n\n return np.mean(rel_time_squared)", "def readEdgeDump():\n edgeDumpDict = {}\n begin = False\n interval = 0\n inputFile = open(path.FQedgeDump, 'r')\n for line in inputFile:\n words = line.split('\"')\n if not begin and words[0].find(\"<end>\") != -1:\n words = words[0].split(\">\")\n interval = int(words[1][:-5])\n edgeDumpDict.setdefault(interval, [])\n elif words[0].find(\"<interval\") != -1 and int(words[1]) >= simStartTime:\n interval = int(words[1])\n begin = True\n if begin and words[0].find(\"<edge id\") != -1:\n edge = words[1]\n if edge[0] != ':':\n speed = float(words[13])\n entered = int(words[15])\n # if no vehicle drove of the edge ignore the edge\n if entered == 0:\n continue\n edgeDumpDict.setdefault(interval, []).append((edge, speed))\n inputFile.close()\n return edgeDumpDict", "def readProf(fname, wdir='.'):\n\n fname = path.join(wdir, fname)\n x, y = [], []\n\n with open(fname) as f:\n lines = f.readlines()\n\n for line in lines:\n elements = line.split()\n\n if elements[0] == '#':\n pass\n else:\n x.append(float(elements[0]))\n y.append(float(elements[1]))\n\n return x, y", "def get_overlaps(file_name):\r\n\r\n place = {}\r\n size = {}\r\n sap = {}\r\n overlapping = []\r\n active_list = []\r\n max_width = 0\r\n\r\n with open(file_name + \".scl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if line.split()[0] == \"Sitespacing\":\r\n sitespacing = line.split()[2]\r\n if line.split()[0] == \"SubrowOrigin\":\r\n starting_x = line.split()[2]\r\n ending_x = int(starting_x) + int(sitespacing) * int(line.split()[5])\r\n if ending_x > max_width:\r\n max_width = ending_x\r\n\r\n divider = max_width // 10\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if len(line.split()) == 3:\r\n size[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] in size:\r\n place[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n sap_num = int(line.split()[1]) // divider\r\n if sap_num not in sap.keys():\r\n sap[sap_num] = []\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]), int(line.split()[2]),\r\n \"start\"])\r\n\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]),\r\n int(line.split()[2]) + int(size[line.split()[0]][1]), \"end\"])\r\n\r\n for lista in sap.values():\r\n lista.sort(key=lambda x: x[3])\r\n lista.sort(key=lambda x: x[4], reverse=True)\r\n for element in lista:\r\n if element[4] == \"start\":\r\n if len(active_list) == 0:\r\n active_list.append(element[0])\r\n else:\r\n for node in active_list:\r\n if int(place[node][0]) < int(place[element[0]][0]) + int(size[element[0]][0]) \\\r\n and int(place[node][0]) + int(size[node][0]) > int(place[element[0]][0]) \\\r\n and int(place[node][1]) < int(place[element[0]][1]) + int(size[element[0]][1]) \\\r\n and int(place[node][1]) + int(size[node][1]) > int(place[element[0]][1]):\r\n overlap = (node, element[0])\r\n overlapping.append(overlap)\r\n active_list.append(element[0])\r\n else:\r\n active_list.remove(element[0])\r\n return overlapping", "def get_lengths(filename, means, stds):\n\n with open(filename, 'rt') as f:\n for line in f:\n \n if line.startswith('Spring EE distance'):\n\n line = next(f)\n means.append(float(line.split()[0]))\n stds.append(float(line.split()[1]))\n break\n\n else:\n raise EOFError('No spring EE distance found')", "def getDiscriminativeEmissions(file, k=1):\n\temissions = {}\n\tforward_emissions = {}\n\tbackward_emissions = {}\n\tforward2_emissions = {}\n\tbackward2_emissions = {}\n\twords = []\n\ttags = []\n\twith open(file, encoding=\"utf-8\") as f:\n\t\tfor line in f:\n\t\t\ttemp = line.strip()\n\n\t\t\t# ignore empty lines\n\t\t\tif len(temp) == 0:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tlast_space_index = temp.rfind(\" \")\n\t\t\t\tword = temp[:last_space_index].lower()\n\t\t\t\ttag = temp[last_space_index + 1:]\n\t\t\t\t\n\t\t\t\twords.append(word)\n\t\t\t\ttags.append(tag)\n\n\tfor i in range(0, len(words)):\n\t\t# update count(y->x)\n\t\tword = words[i]\n\t\tprev_word = words[i-1] if i > 0 else 'START_WORD'\n\t\tprev2_word = words[i-2] if i > 1 else 'START_WORD2'\n\t\tnext_word = words[i+1] if i < len(words)-1 else 'END_WORD'\n\t\tnext2_word = words[i+2] if i < len(words)-2 else 'END_WORD2'\n\t\ttag = tags[i]\n\n\t\taddCount(word, tag, emissions)\n\t\taddCount(prev_word, tag, forward_emissions)\n\t\taddCount(next_word, tag, backward_emissions)\n\t\taddCount(prev2_word, tag, forward2_emissions)\n\t\taddCount(next2_word, tag, backward2_emissions)\n\n\tfor word, tagCountDict in emissions.items():\n\t\tcount = sum(tagCountDict.values())\n\t\tfor tag, tagCount in tagCountDict.items():\n\t\t\ttagCountDict[tag] = tagCount / count\n\t\n\tfor word, tagCountDict in forward_emissions.items():\n\t\tcount = sum(tagCountDict.values())\n\t\tfor tag, tagCount in tagCountDict.items():\n\t\t\ttagCountDict[tag] = tagCount / count\n\t\n\tfor word, tagCountDict in backward_emissions.items():\n\t\tcount = sum(tagCountDict.values())\n\t\tfor tag, tagCount in tagCountDict.items():\n\t\t\ttagCountDict[tag] = tagCount / count\n\t\n\tfor word, tagCountDict in forward2_emissions.items():\n\t\tcount = sum(tagCountDict.values())\n\t\tfor tag, tagCount in tagCountDict.items():\n\t\t\ttagCountDict[tag] = tagCount / count\n\t\n\tfor word, tagCountDict in backward2_emissions.items():\n\t\tcount = sum(tagCountDict.values())\n\t\tfor tag, tagCount in tagCountDict.items():\n\t\t\ttagCountDict[tag] = tagCount / count\n\t\n\tunique_tags = set(tags)\n\n\ttag_counts = {}\n\n\tfor tag in tags:\n\t\tif tag in tag_counts:\n\t\t\ttag_counts[tag] += 1\n\t\telse:\n\t\t\ttag_counts[tag] = 1\n\n\ttotal_count = sum(tag_counts.values())\n\tfor key, count in tag_counts.items():\n\t\ttag_counts[key] = count/total_count\n\n\tfor tag in unique_tags:\n\t\temissions[\"#UNK#\"] = tag_counts\n\t\tforward_emissions[\"#UNK#\"] = tag_counts\n\t\tbackward_emissions[\"#UNK#\"] = tag_counts\n\t\tforward2_emissions[\"#UNK#\"] = tag_counts\n\t\tbackward2_emissions[\"#UNK#\"] = tag_counts\n\n\t# replace with unk\t\t\n\n\treturn emissions, forward_emissions, backward_emissions, forward2_emissions, backward2_emissions, unique_tags, tag_counts", "def bin_spec_y(self, start, end):\n #print(self.spec_x.tolist())\n start_spec_x = closest_value_index(start, self.spec_x.tolist())\n i = 0\n bin_sum = 0\n while(start_spec_x + i < len(self.spec_x) and self.spec_x[start_spec_x + i] <= end):\n bin_sum += self.spec_y[start_spec_x + i]\n i += 1\n average = bin_sum / (i+1)\n return average", "def glitr_range_to_bed(in_range, out_bed):\n summit_size = cfg.get('peaks', 'peak_summit_size')\n with open(in_range) as infile:\n with open(out_bed, 'w') as outfile:\n with open(out_bed + '_summits.%s_around' % summit_size, 'w') \\\n as outfile_summits:\n for i, line in enumerate(infile):\n fields = line.strip('\\n').split('\\t')\n chrom, start, stop = parse_ucsc_range(fields[0])\n start = max(0, start)\n foldchange = fields[3]\n outfile.write('\\t'.join([chrom, str(start), str(stop),\n 'GLITR_peak_%s'%(i+1),\n str(int(float(foldchange))),'+'])\n + '\\n')\n # take bases around center as summit\n center = start + (stop - start) / 2\n center_start = center - summit_size / 2\n center_stop = center + summit_size / 2\n outfile_summits.write('\\t'.join([chrom, str(center_start),\n str(center_stop), 'GLITR_peak_%s'%(i+1),\n str(int(float(foldchange))),'+']) + '\\n')", "def parse():\n file = open(INPUT, 'r')\n\n expect_eff = False\n expect_vout = False\n\n eff_dict = {}\n vout_dict = {}\n\n for line in file:\n if line.startswith('PCC'):\n id = line.strip()\n expect_eff = True\n elif expect_eff:\n if line.startswith('efficiency'):\n eff_str = line.strip().split(':')[1]\n # get rid of % symbol\n eff = int(eff_str.split('%')[0])\n eff_dict[id] = .01 * eff\n\n expect_vout = True\n\n expect_eff = False\n elif expect_vout:\n if line.startswith('output voltage'):\n vout_str = line.strip().split(':')[1]\n vout = int(vout_str)\n vout_dict[id] = vout\n\n expect_vout = False\n\n with open(EFF_OUTPUT, 'w') as f:\n json.dump(eff_dict, f)\n\n with open(VOUT_OUTPUT, 'w') as f:\n json.dump(vout_dict, f)\n\n # plot stats of eff and vout\n plot_hist(eff_dict.values(), 'Efficiency', 'eff', bins=50)\n plot_hist(vout_dict.values(), 'V_out', 'vout', bins=50)", "def make_stride_dict(filename):\r\n MAX_ACC=getMAXASA()\r\n stride = {}\r\n handle = open(filename, \"r\")\r\n #print \"herxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\r\n try:\r\n #kk=0\r\n for l in handle.readlines():\r\n #kk=kk+1\r\n #print kk\r\n sl = l.split()\r\n if sl[0] != \"ASG\": #if not detailed secondary structure record\r\n continue\r\n #REM |---Residue---| |--Structure--| |-Phi-| |-Psi-| |-Area-| ~~~~\r\n #ASG ALA A 1 1 C Coil 360.00 -35.26 120.7 ~~~~\r\n #0 1 2 3 4 5 6 7 8 9 10 \r\n # In cases where stride cannot recognize the residue type, it puts a '-' there\r\n # However, Bio.PDB uses ' ' so convert between the two \r\n if sl[2]=='-':\r\n sl[2]=' '\r\n \r\n resid=(sl[2],sl[3])\r\n aa=sl[1]\r\n ss=sl[5].upper() #There was b and B both from Bridge\r\n phi=float(sl[7])\r\n psi=float(sl[8])\r\n asa=float(sl[9])\r\n try:\r\n rasa=asa/MAX_ACC[aa]\r\n if rasa > 1.0: # we do get values greater than 1\r\n rasa=1.0\r\n except KeyError:\r\n rasa=np.nan\r\n stride[resid]=(aa,ss,phi,psi,asa,rasa)\r\n #construct a key,value pair\r\n #pdb.set_trace() \r\n finally:\r\n handle.close()\r\n return stride\r\n #return dssp, keys\r", "def read(self, file):\n text = open(file, 'r')\n text.readline() # header crap\n text.readline()\n text.readline()\n self.__xmin = float(text.readline().rstrip().split()[2])\n self.__xmax = float(text.readline().rstrip().split()[2])\n text.readline()\n m = int(text.readline().rstrip().split()[2]) # will be self.__n soon\n text.readline()\n for i in range(m): # loop over grids\n text.readline()\n if text.readline().rstrip().split()[2] == '\"IntervalTier\"': \n # inam = text.readline().rstrip().split()[2][1:-1]\n inam = text.readline().split('=')[1].strip().strip('\"') # Joseph Keshet: handle space in the tier name\n imin = float(text.readline().rstrip().split()[2])\n imax = float(text.readline().rstrip().split()[2])\n itie = IntervalTier(inam, imin, imax) # redundant FIXME\n n = int(text.readline().rstrip().split()[3])\n for j in range(n):\n text.readline().rstrip().split() # header junk\n jmin = float(text.readline().rstrip().split()[2])\n jmax = float(text.readline().rstrip().split()[2])\n # MS changed, to account for intervals where label\n # begins with spacing\n #jmrk = text.readline().rstrip().split()[2][1:-1]\n jmrk = getMark(text)\n #\n itie.append(Interval(jmin, jmax, jmrk))\n \n self.append(itie) \n else: # pointTier\n # inam = text.readline().rstrip().split()[2][1:-1]\n inam = text.readline().split('=')[1].strip().strip('\"') # Joseph Keshet: handle space in the tier name\n imin = float(text.readline().rstrip().split()[2])\n imax = float(text.readline().rstrip().split()[2])\n itie = PointTier(inam, imin, imax) # redundant FIXME\n n = int(text.readline().rstrip().split()[3])\n for j in range(n):\n text.readline().rstrip() # header junk\n jtim = float( text.readline().rstrip().split()[2])\n jmrk = text.readline().rstrip().split()[2][1:-1]\n itie.append(Point(jtim, jmrk))\n self.append(itie)\n text.close()" ]
[ "0.58007795", "0.5407008", "0.52349705", "0.5167778", "0.51037353", "0.5056159", "0.5043961", "0.5008997", "0.49668044", "0.49005035", "0.48916578", "0.48865697", "0.48817754", "0.48588818", "0.48307395", "0.48247167", "0.48014796", "0.4761774", "0.47577602", "0.47458568", "0.47455612", "0.47410256", "0.47365215", "0.47328517", "0.47302002", "0.46722302", "0.46487558", "0.46474138", "0.4630091", "0.46283245" ]
0.70150334
0
This preforms the second strategy, chooses a random envelope for you.
def perform_strategy(self): number = randint(0, 111) myEnvelope = self._envelopeList[number] print(myEnvelope)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_envelope_routed(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def test_envelope_routed(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def test_strategy(self):\n self.responses_test([], [], [C], random_seed=1)\n self.responses_test([], [], [D], random_seed=2)", "def test_envelope_echoed_back(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n original_envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(original_envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=10)\n assert delivered_envelope is not None\n\n delivered_envelope.to = addr_1\n delivered_envelope.sender = addr_2\n\n self.multiplexer_client_2.put(delivered_envelope)\n echoed_envelope = self.multiplexer_client_1.get(block=True, timeout=5)\n\n assert echoed_envelope is not None\n assert echoed_envelope.to == original_envelope.sender\n assert delivered_envelope.sender == original_envelope.to\n assert (\n delivered_envelope.protocol_specification_id\n == original_envelope.protocol_specification_id\n )\n assert delivered_envelope.message == original_envelope.message", "def test_envelope_echoed_back(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n original_envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(original_envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=10)\n assert delivered_envelope is not None\n\n delivered_envelope.to = addr_1\n delivered_envelope.sender = addr_2\n\n self.multiplexer_client_2.put(delivered_envelope)\n echoed_envelope = self.multiplexer_client_1.get(block=True, timeout=5)\n\n assert echoed_envelope is not None\n assert echoed_envelope.to == original_envelope.sender\n assert delivered_envelope.sender == original_envelope.to\n assert (\n delivered_envelope.protocol_specification_id\n == original_envelope.protocol_specification_id\n )\n assert delivered_envelope.message == original_envelope.message", "def throw(self):\n self.side = random.randint(1, self.num_sides)", "def test_exact_supercontrolled_decompose_phase_1_use_random(self, seed):\n state = np.random.default_rng(seed)\n basis_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_phase = state.random() * 2 * np.pi\n basis_b = state.random() * np.pi / 4\n basis_unitary = np.exp(1j * basis_phase) * basis_k1 @ Ud(np.pi / 4, basis_b, 0) @ basis_k2\n decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary))\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n\n tgt_unitary = np.exp(1j * tgt_phase) * tgt_k1 @ Ud(np.pi / 4, basis_b, 0) @ tgt_k2\n self.check_exact_decomposition(tgt_unitary, decomposer, num_basis_uses=1)", "def random_cloud(envelope, seed=None, impulse=False, events=None, do_amp=True, do_mask=False):\n\n (N_X, N_Y, N_frame) = envelope.shape\n amps = 1.\n if impulse:\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n phase = -2*np.pi*(N_X/2*fx + N_Y/2*fy + N_frame/2*ft)\n F_events = np.exp(1j * phase)\n elif events is None:\n np.random.seed(seed=seed)\n phase = 2 * np.pi * np.random.rand(N_X, N_Y, N_frame)\n F_events = np.exp(1j * phase)\n if do_amp:\n # see Galerne, B., Gousseau, Y. & Morel, J.-M. Random phase textures: Theory and synthesis. IEEE Transactions in Image Processing (2010). URL http://www.biomedsearch.com/nih/Random-Phase-Textures-Theory-Synthesis/20550995.html. (basically, they conclude \"Even though the two processes ADSN and RPN have different Fourier modulus distributions (see Section 4), they produce visually similar results when applied to natural images as shown by Fig. 11.\")\n F_events *= np.random.randn(N_X, N_Y, N_frame)\n else:\n F_events = np.fft.fftn( events[:, :, :] )\n F_events = np.fft.fftshift(F_events)\n\n Fz = F_events * envelope\n\n # de-centering the spectrum\n Fz = np.fft.ifftshift(Fz)\n Fz[0, 0, 0] = 0. # removing the DC component\n z = np.fft.ifftn(Fz).real\n if do_mask:\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n z *= get_mask(fx, fy, ft)\n\n return z", "def test_burst_order(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n sent_envelopes = [\n self._make_envelope(addr_1, addr_2, i, i - 1)\n for i in range(1, self.NB_ENVELOPES + 1)\n ]\n for envelope in sent_envelopes:\n self.multiplexer_client_1.put(envelope)\n\n received_envelopes = []\n for _ in range(1, self.NB_ENVELOPES + 1):\n envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n received_envelopes.append(envelope)\n\n # test no new message is \"created\"\n with pytest.raises(Empty):\n self.multiplexer_client_2.get(block=True, timeout=1)\n\n assert len(sent_envelopes) == len(\n received_envelopes\n ), f\"expected number of envelopes {len(sent_envelopes)}, got {len(received_envelopes)}\"\n for expected, actual in zip(sent_envelopes, received_envelopes):\n assert expected.message == actual.message, (\n \"message content differ; probably a wrong message \"\n \"ordering on the receiving end\"\n )", "async def randompack(self, ctx):\r\n if ctx.message.author == self.bot.user:\r\n return\r\n packs = ['Get Together', 'Get to Work', 'City Life',\r\n 'Luxury Party Stuff', 'Perfect Patio Stuff', 'Cool Kitchen Stuff', 'Spooky Stuff', 'Movie Hangout Stuff', 'Romantic Garden Stuff', 'Kids Room Stuff', 'Backyard Stuff',\r\n 'Outdoor Retreat', 'Spa Day', 'Dine Out']\r\n pack = random.choice(packs)\r\n if pack == 'City Life':\r\n await self.bot.say('{}, you should buy **{}** when it comes out.'.format(ctx.message.author.mention, pack))\r\n else:\r\n await self.bot.say('{}, you should buy **{}**.'.format(ctx.message.author.mention, pack))", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def mseed_2_Party(wav_dir, cat, temp_cat, lowcut, highcut, filt_order,\n process_length, prepick):\n\n partay = Party()\n # Get templates first\n temp_tup = [(ev, str(ev.resource_id).split('/')[-1].split('_')[0])\n for ev in cat\n if str(ev.resource_id).split('/')[-1].split('_')[-1]=='self']\n temp_evs, temp_ids = zip(*temp_tup)\n temp_evs = list(temp_evs)\n wav_files = ['%s/%s.mseed' % (wav_dir, str(ev.resource_id).split('/')[-1])\n for ev in temp_evs]\n temp_wavs = [read(wav) for wav in wav_files if os.path.isfile(wav)]\n for temp_wav, temp_ev in zip(temp_wavs, temp_evs):\n #Create a Template object, assign it to Family and then to Party\n tid = str(temp_ev.resource_id).split('/')[-1].split('_')[0]\n if len([ev for ev in temp_cat\n if str(ev.resource_id).split('/')[-1] == tid]) > 0:\n temp_ev = [ev for ev in temp_cat\n if str(ev.resource_id).split('/')[-1] == tid][0]\n tmp = Template(name=tid, st=temp_wav, lowcut=lowcut, highcut=highcut,\n samp_rate=temp_wav[0].stats.sampling_rate,\n filt_order=filt_order, process_length=process_length,\n prepick=prepick, event=temp_ev)\n fam_det_evs = [ev for ev in cat\n if str(ev.resource_id).split('/')[-1].split('_')[-1]!='self'\n and str(ev.resource_id).split('/')[-1].split('_')[0]==tid]\n fam_dets = [Detection(template_name=str(ev.resource_id).split('/')[-1].split('_')[0],\n detect_time=UTCDateTime([com.text.split('=')[-1]\n for com in ev.comments\n if com.text.split('=')[0]=='det_time'][0]),\n no_chans=len(ev.picks),\n chans=[pk.waveform_id.station_code\n for pk in ev.picks],\n detect_val=float([com.text.split('=')[-1]\n for com in ev.comments\n if com.text.split('=')[0]=='detect_val'][0]),\n threshold=float([com.text.split('=')[-1]\n for com in ev.comments\n if com.text.split('=')[0]=='threshold'][0]),\n typeofdet='corr',\n threshold_type='MAD',\n threshold_input=8.0,\n event=ev, id=str(ev.resource_id).split('/')[-1])\n for ev in fam_det_evs]\n fam_cat = Catalog(events=[det.event for det in fam_dets])\n fam = Family(template=tmp, detections=fam_dets, catalog=fam_cat)\n partay.families.append(fam)\n return partay", "def test_exact_supercontrolled_decompose_phase_2_use_random(self, seed):\n state = np.random.default_rng(seed)\n decomposer = self.make_random_supercontrolled_decomposer(state)\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n tgt_a, tgt_b = state.random(size=2) * np.pi / 4\n tgt_unitary = np.exp(1j * tgt_phase) * tgt_k1 @ Ud(tgt_a, tgt_b, 0) @ tgt_k2\n self.check_exact_decomposition(tgt_unitary, decomposer, num_basis_uses=2)", "def test_envelope_echoed_back_node_agent(self):\n addr_1 = self.connection_client_1.address\n addr_n = self.connection_node_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n original_envelope = Envelope(\n to=addr_n,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(original_envelope)\n delivered_envelope = self.multiplexer_node_2.get(block=True, timeout=10)\n assert delivered_envelope is not None\n\n delivered_envelope.to = addr_1\n delivered_envelope.sender = addr_n\n\n self.multiplexer_node_2.put(delivered_envelope)\n echoed_envelope = self.multiplexer_client_1.get(block=True, timeout=5)\n\n assert echoed_envelope is not None\n assert echoed_envelope.to == original_envelope.sender\n assert delivered_envelope.sender == original_envelope.to\n assert (\n delivered_envelope.protocol_specification_id\n == original_envelope.protocol_specification_id\n )\n assert delivered_envelope.message == original_envelope.message", "def random_advice(message):\n advice = requests.get(\"https://api.adviceslip.com/advice\").json()['slip']['advice']\n\n return advice", "def crossover_one_point_saes(solver, par1, par2, xo_chance=None, point=None):\n assert len(par1.es_params) == len(par1.trace)\n assert len(par2.es_params) == len(par2.trace)\n if xo_chance is None:\n xo_chance = solver.alg_params.crossover_rate\n\n r = random.random()\n if r < xo_chance:\n if point is None:\n point = random.randrange(0, min(len(par1.trace), len(par2.trace))+1)\n new_trace = par1.trace[:point] + par2.trace[point:]\n new_es_params = par1.es_params[:point] + par2.es_params[point:]\n t = solver.create_solution_with_raw_trace(new_trace)\n new_es_params = fix_saes_params(solver, t.trace, new_es_params)\n t.es_params = new_es_params\n else:\n t = copy.deepcopy(random.choice([par1, par2]))\n assert len(t.es_params) == len(t.trace)\n return t", "def random():\n fake = Faker()\n \n group_header = GroupHeader(fake.bothify(text='MSGID?????????'),fake.date_time(),fake.date_object(),'CLRG')\n originator = Participant(fake.lexify(text='????',letters='ABCDEFGRHIJKL') + fake.bank_country() + 'PPXXX',\n fake.iban(),fake.name())\n beneficiary = Participant(fake.lexify(text='????',letters='ABCDEFGRHIJKL') + fake.bank_country() + 'PPXXX',\n fake.iban(),fake.name())\n transation = Transaction(beneficiary,\n str(round(random.uniform(1,2), 2)),\n fake.bothify(text='ENDTOEND?????????'),\n fake.bothify(text='TXID?????????'),\n fake.date_time(),\n fake.bothify(text='REF?????????'),\n fake.bothify(text='REMINF?????????'))\n \n return SCTInst(group_header,originator,transation)", "def test_envelope_echoed_back_node_agent(self):\n addr_1 = self.connection_client_1.address\n addr_n = self.connection_node.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n original_envelope = Envelope(\n to=addr_n,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(original_envelope)\n delivered_envelope = self.multiplexer_node.get(block=True, timeout=10)\n assert delivered_envelope is not None\n\n delivered_envelope.to = addr_1\n delivered_envelope.sender = addr_n\n\n self.multiplexer_node.put(delivered_envelope)\n echoed_envelope = self.multiplexer_client_1.get(block=True, timeout=5)\n\n assert echoed_envelope is not None\n assert echoed_envelope.to == original_envelope.sender\n assert delivered_envelope.sender == original_envelope.to\n assert (\n delivered_envelope.protocol_specification_id\n == original_envelope.protocol_specification_id\n )\n assert delivered_envelope.message == original_envelope.message", "def test_approx_supercontrolled_decompose_phase_2_use_random(self, seed, delta=0.01):\n state = np.random.default_rng(seed)\n basis_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_phase = state.random() * 2 * np.pi\n basis_b = 0.4 # how to safely randomize?\n basis_unitary = np.exp(1j * basis_phase) * basis_k1 @ Ud(np.pi / 4, basis_b, 0) @ basis_k2\n decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary), basis_fidelity=0.99)\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n tgt_a, tgt_b = 0.3, 0.2 # how to safely randomize?\n d1, d2, d3 = state.random(size=3) * delta\n tgt_unitary = np.exp(1j * tgt_phase) * tgt_k1 @ Ud(tgt_a + d1, tgt_b + d2, d3) @ tgt_k2\n self.check_approx_decomposition(tgt_unitary, decomposer, num_basis_uses=2)", "def successive_poisson(tau1, tau2, size=1):\r\n # Draw samples out of first exponential distribution: t1\r\n t1 = np.random.exponential(tau1,size=tau1)\r\n\r\n # Draw samples out of second exponential distribution: t2\r\n t2 = np.random.exponential(tau2,size=tau2)\r\n\r\n return t1 + t2", "def create_ge_envelopes(sample_rate,\n gate_time,\n envelope_args,\n modulation_args=None,\n quantization_args=None,\n upsampling_args=None,\n noise_args=None):\n xs, times = dsp_utils.create_custom_signal(\n\t\t\t x_envelope_ge,\n sample_rate,\n gate_time,\n\t envelope_args=envelope_args,\n \t\t\t modulation_args=modulation_args,\n quantization_args=quantization_args,\n upsampling_args=upsampling_args,\n noise_args=noise_args)\n ys, _ = dsp_utils.create_custom_signal(\n\t\t\t y_envelope_ge,\n sample_rate,\n gate_time,\n\t envelope_args=envelope_args,\n \t\t\t modulation_args=modulation_args,\n quantization_args=quantization_args,\n upsampling_args=upsampling_args,\n noise_args=noise_args)\n dets, _ = dsp_utils.create_custom_signal(\n\t\t\t det_envelope_ge,\n sample_rate,\n gate_time,\n\t envelope_args=envelope_args,\n \t\t\t modulation_args=modulation_args,\n quantization_args=quantization_args,\n upsampling_args=upsampling_args,\n noise_args=noise_args)\n return times, xs, ys, dets", "def find_curve(self):\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break", "def alternate_response(self, particle, f): \n\t\tintersection = self.get_intersection(particle)\n\t\tif np.random.random() < f:\n\t\t\tinteraction.diffuse_reflection(particle.specie, self.normal, intersection)\n\t\telse:\n\t\t\tinteraction.specular_reflection(particle, self.normal, intersection)", "async def random(self, ctx):\n response = await self.api.random()\n await ctx.send(embed=self._build_embed(response))", "def test_escalation_of_an_article_twice(self):\n token = self.user1.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_twice()\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"error\"], self.report_twice)", "def make_random_supercontrolled_decomposer(self, seed):\n state = np.random.default_rng(seed)\n basis_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_phase = state.random() * 2 * np.pi\n basis_b = state.random() * np.pi / 4\n basis_unitary = np.exp(1j * basis_phase) * basis_k1 @ Ud(np.pi / 4, basis_b, 0) @ basis_k2\n decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary))\n return decomposer", "def get_random_phrase():\n return random.choices(PHRASES, WEIGHTS, k=1)[0]", "def test_sampling2 () :\n delta = 2 * np.pi / 3\n r = Reward(partial(stepFunction, \n xRange=(-delta/2, delta/2), \n yRange=(-delta/2, delta/2)), \n (-1, 0))\n states = []\n xs = np.arange(-np.pi, np.pi, delta)\n ys = np.arange(-np.pi, np.pi, delta)\n for x, y in product(xs, ys) : \n states.append(\n toExternalStateRep([x + delta / 2, y + delta / 2, 0, 0]).astype(float)\n )\n agent = findOptimalAgent(r)\n vals = estimateValueFromAgent(states, agent, r)\n for s, v in zip(states, vals) : \n print(toInternalStateRep(s)[:2], v)", "def test_envelope_sent(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n envelope = self._make_envelope(addr_1, addr_2)\n\n # make the send to fail\n with mock.patch.object(\n self.connection_client_1.logger, \"exception\"\n ) as _mock_logger, mock.patch.object(\n self.connection_client_1._node_client, \"_write\", side_effect=Exception\n ):\n self.multiplexer_client_1.put(envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n _mock_logger.assert_has_calls(\n [\n call(\n \"Exception raised on message send. Try reconnect and send again.\"\n )\n ]\n )\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def pick(self, mess, args):\n return random.choice(args)" ]
[ "0.5605532", "0.5605532", "0.5256275", "0.5127899", "0.5127899", "0.50189346", "0.50167304", "0.5000255", "0.495632", "0.49257392", "0.48593804", "0.4853922", "0.48520026", "0.48400712", "0.48328158", "0.48325053", "0.47939473", "0.47607374", "0.47501016", "0.47105208", "0.46978077", "0.46977708", "0.46892908", "0.4680635", "0.46710205", "0.4660178", "0.46560428", "0.46448937", "0.4630149", "0.46293443" ]
0.7433414
0
Combine results into single dataframe and save to disk as .csv file
def save_results(self): results = pd.concat([ pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']), pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']), pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), ], axis=1) create_results_directory() results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_results(self, path):\n pd.DataFrame(self.results).to_csv(path)", "def save_combined_clean_data(self):\n df = []\n for data in self.clean_data:\n df.append(data.df)\n df = pd.concat(df, axis=0, join='outer', ignore_index=False, keys=None,\n levels=None, names=None, verify_integrity=False, copy=True)\n file_name = \"../data/clean_data/\" + \"combined_clean_data + \" + '.csv'\n df.to_csv(file_name, sep=\";\", index=False)\n\n return(df)", "def __create_output_csv(self, df, score_list, elapsed_list):\n df['Similar']=score_list\n df['Elapsed']=elapsed_list\n df.to_csv('Output.csv',index=False)\n return df", "def response_to_df_csv():\n results = api.call_api()\n df = t.get_dataframe(results)\n t.save_csv(df)\n return df", "def convert_to_csv(self, branch):\n names = [\"CSE_results.csv\", \"IT_results.csv\"]\n self.results = {\"ROLL_NO\": self.roll_nos, \"Name\": self.names, \"SGPA\": self.sgpa}\n print(self.results)\n df = DataFrame.from_dict(self.results)\n df.to_csv(names[branch], index=False)", "def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))", "def to_csv(self, out_folder):\n import pandas as pd\n\n df = pd.DataFrame(zip(self.results['cids'],\n self.results['differences'],\n self.results['experimental_values']),\n columns=['cids', 'differences',\n 'experimental_values'])\n df.to_csv(out_folder, index=False)", "def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)", "def create_query_csv(self):\n\n self.query_df.to_csv(self.query_output_file)", "def dwn_saved_result_csv(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n s = Source.objects.filter(source_id=source_id)[0]\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, s.source, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))", "def write_results(self, results, fname, folder=None):\n folder = folder or self.output_dir\n\n if not os.path.exists(folder):\n logger.info(\"creating {} (did not exist)\".format(folder))\n os.makedirs(folder)\n\n out_file = os.path.join(folder, fname)\n logger.info(\"writing to {}\".format(out_file))\n df = pd.DataFrame(results)\n df.to_csv(out_file, index=False)\n \n return None", "def write_results(results):\n with RESULTS_PATH.open(\"w\") as writer:\n csvwriter = csv.writer(writer)\n csvwriter.writerows(results)", "def calculated_data_to_csv(transmissivity_calculated, conductivity_calculated,\n confirmed_wells, feature_class_name):\n utm_e = [i[0][0] for i in confirmed_wells]\n utm_n = [i[0][1] for i in confirmed_wells]\n np.set_printoptions(suppress=True) #removes scientific notation\n location = np.array([utm_e, utm_n])\n location = location.transpose()\n transmissivity_calculated = np.array(transmissivity_calculated)\n conductivity_calculated = np.array(conductivity_calculated)\n joined_data = np.concatenate((location, transmissivity_calculated, conductivity_calculated), axis = 1)\n my_df = pd.DataFrame(joined_data)\n header_list = ['UTME', 'UTMN', 'T_min', 'T_raw', 'T_max', 'K_min', 'K_raw', 'K_max', 'Well ID']\n raw_csv_name = f\"{feature_class_name}.csv\"\n my_df.to_csv(raw_csv_name, index = False, header = header_list)\n return my_df, raw_csv_name", "def consolidate_results(path='./Data'):\n model_files = [load(os.path.join(path, f)) \n for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and f.startswith('model_')]\n df_final = pd.DataFrame(columns=['model_name','train_accuracy','test_accuracy',\n 'macro_avg_precision','macro_avg_recall',\n 'macro_avg_f1-score','weighted_avg_precision',\n 'weighted_avg_recall','weighted_avg_f1-score'])\n for model_file in model_files:\n results = model_file['model_results']\n class_report = classification_report(results.category, results.pred, output_dict=True)\n df_final = df_final.append({'model_name':model_file['model_name'],\n 'train_accuracy':'{0:.2f}'.format(model_file['model_CV'].best_score_),\n 'test_accuracy':'{0:.2f}'.format(class_report['accuracy']),\n 'macro_avg_precision':class_report['macro avg']['precision'],\n 'macro_avg_recall':class_report['macro avg']['recall'],\n 'macro_avg_f1-score':class_report['macro avg']['f1-score'],\n 'weighted_avg_precision':class_report['weighted avg']['precision'],\n 'weighted_avg_recall':class_report['weighted avg']['recall'],\n 'weighted_avg_f1-score':class_report['weighted avg']['f1-score']\n },ignore_index=True)\n return(df_final)", "def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise", "def to_csv(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving results into a csv (comma separated values) file.\")\n v=np.array([list(self.initialConcentration.values()),\n list(self.fitting_error.values()),\n list(self.k.values()),\n list(self.Fb.values()),\n list(self.slope.values())]).T\n k=list(self.initialConcentration.keys())\n d=pd.DataFrame(v,columns=['Initial Concentration','Fitting Error','k','Fb','Slope'],index=k)\n fn=get_valid_fname(self.ID)\n self.csvname=\"%s_initial_concentrations.csv\"%(fn)\n self.fullcsvname=\"%s/%s_initial_concentrations.csv\"%(self.info['resultsdir'],fn)\n self.info['csvname_initialConcentration']=self.csvname\n print(self.csvname)\n d.to_csv('%s/%s'%(self.info['resultsdir'],self.csvname))", "def to_csv(self, path):\n results = self.all()\n if self.stop_check is not None and self.stop_check():\n return\n results.to_csv(path)", "def write_tocsv(file_name, dataframe) :\n print(\"\\nSaved result to {}\\n\".format(file_name))\n dataframe.to_csv(file_name, mode='a', header=False,index=False)", "def write_matches(matches: List[Result],out: str):\n data = pd.DataFrame(matches)\n data.to_csv(out,sep=\"\\t\",index=False)", "def dwn_all_saved_results(request):\n \n sources = []\n for i in Source.objects.filter(user=request.user):\n sources.append((i.source_id, i.datetime_extracted.strftime('%d/%m/%Y %H:%M'), i.source))\n \n data = []\n for s, timee, s_name in sources:\n objs = ExtractedRelation.objects.filter(source=s)\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, timee, s_name, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Extraction Time', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/all_analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/all_analysis_results.csv','rb'))", "def save_results_to_csv(save_file_path, append=True, tmp_file_path=tmp_file_path, datefmt='%d/%m/%Y %H:%M:%S'):\n # load tmp results\n res_summary = open_json(tmp_file_path, data_format=pd.DataFrame)\n\n # calculate average scores\n combis = list(product(\n ['CV', 'Val'], \n ['precision', 'recall', 'f1', 'exact match', 'loss', \n 'precision_CE', 'recall_CE', 'f1_CE', 'exact match_CE']\n ))\n for combi in combis:\n get_average(res_summary, combi)\n\n # calculate end time\n end = datetime.now()\n res_summary['endtime'] = end.strftime(datefmt)\n res_summary['timetaken'] = end - \\\n datetime.strptime(res_summary['starttime'][0], datefmt)\n\n if append and os.path.isfile(save_file_path):\n # load old file\n old_summary = pd.read_csv(save_file_path)\n # append below\n res_summary = pd.concat([old_summary, res_summary], axis=0)\n\n # save final and delete tmp file\n res_summary.to_csv(save_file_path, index=False)\n os.remove(tmp_file_path)", "def dwn_analysis_csv(request):\n data = []\n for i in results:\n data.append((i['sentence'], i['head'], i['tail'], i['pred_relation'], i['sent'], i['conf']))\n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))", "def to_csv(data_path):\n news_df, price_df = load_data(data_path)\n\n combined_df = combine_stock_news(news_df, price_df)\n\n combined_df.to_csv(data_path + \"news_price_df.csv\")", "def write_to_csv(results, filename):\r\n fieldnames = ('datetime_utc', 'distance_au', 'velocity_km_s',\r\n 'designation', 'name', 'diameter_km',\r\n 'potentially_hazardous')\r\n\r\n with open(filename, 'w') as outfile:\r\n writer = csv.writer(outfile)\r\n writer.writerow(fieldnames)\r\n for row in results:\r\n r = [row.time, row.distance, row.velocity, row.neo.designation,\r\n row.neo.name, row.neo.diameter, row.neo.hazardous]\r\n writer.writerow(r)", "def get_csv_string(self):\n df = None\n for d in self.data:\n if df is None:\n df = d.as_dataframe()\n else:\n df = df.append(d.as_dataframe())\n\n if df is None:\n return \"\"\n else:\n return df.to_csv(index=False)", "def write_df_to_csv(output_df,file_path):\n output_df\\\n .coalesce(1)\\\n .write\\\n .format(\"csv\")\\\n .option(\"header\",\"true\")\\\n .mode(\"overwrite\")\\\n .save(file_path)", "def output_csv(df):\n # remove existing plot\n if os.path.exists(\"files/converted.csv\"):\n os.remove(\"files/converted.csv\")\n # save csv\n df.to_csv('files/converted.csv')", "def combine_results(voting = 'hard',clf_list = ['test_small','rt_small','test2_small']):\n \n start = time.clock()\n df = load_all_dfs(clf_list)\n\n print('combining the data and voting ', voting)\n\n if voting == 'hard':\n print('voting')\n\n label_tupel_list = list(df.groupby(level=['id'])['std'].idxmax())#idmax \n num_samples = len(label_tupel_list)\n index = [label_tupel_list[i][0] for i in range(num_samples)]\n df.index\n time_need = []\n t2 = 0\n\n print(\"doing god's work\")\n df_new = df.ix[index]\n df_new = df.ix[label_tupel_list]\n end = time.clock()\n print('done', end-start)\n #return df_new\n \n \n cols = ['Class_1',\n 'Class_2',\n 'Class_3',\n 'Class_4',\n 'Class_5',\n 'Class_6',\n 'Class_7',\n 'Class_8',\n 'Class_9']\n df_new2 = df_new.reset_index()\n del df_new2['std']\n del df_new2['id']\n del df_new2['df']\n\n print('zero')\n try:\n print('first')\n clf_names = 'with_'\n print('second')\n for i in range(len(clf_list)):\n print(clf_list[i])\n clf_names = clf_names + '_' + clf_list[i]\n \n df_new2.to_csv('Pikki'+clf_names+ '.csv',header = cols,index_label = ['id'])\n \n df_new2.index +=1\n\n print('written to')\n print('Pikki'+clf_names+ '.csv')\n \n df_new2.to_csv('combined_Pikki'+clf_names+ '.csv',header = cols,index_label = ['id'])\n except:\n df_new2.to_csv('combined_Pikki.csv',header = cols,index_label = ['id'])\n return df_new", "def save_results(self, path):\n create_folder(path)\n self.get_scores().to_csv(path + r'/scores.csv', index=False)\n self.get_results().to_csv(path + r'/results.csv', index=False)\n self.get_pivot_last_epoch().to_csv(path + r'/pivot_last_epoch.csv', index=True)", "def to_csv(self, \n last_match_id, \n first_match_id = 0, \n file_count = 20, \n start_file = 0, \n matches_per_file = 20000):\n for i in range(start_file, start_file + file_count):\n print(i)\n last_match_id_current = last_match_id - i * matches_per_file\n file_name = 'rawdata_' + str(i) + '.csv'\n currunt_dataframe = self.mine_data(file_name = file_name,\n first_match_id = first_match_id,\n last_match_id = last_match_id_current,\n stop_at = matches_per_file)\n currunt_dataframe.to_csv('rawdata_' + str(i) + '.csv')" ]
[ "0.7109894", "0.7028306", "0.7024764", "0.69350445", "0.6878669", "0.6859569", "0.6832618", "0.6804342", "0.6681769", "0.66685516", "0.6637697", "0.660774", "0.6546744", "0.65395343", "0.65183043", "0.65171605", "0.6508093", "0.6489798", "0.6484335", "0.647765", "0.64603716", "0.6451568", "0.644045", "0.64308965", "0.64274883", "0.6417843", "0.6367094", "0.63664174", "0.6364839", "0.6348115" ]
0.7250397
0
Starts the add command and prompts user for a category
def command_add(message, bot): chat_id = message.chat.id user_bills['user_telegram_id'] = chat_id markup = types.ReplyKeyboardMarkup(one_time_keyboard=True) markup.row_width = 2 for c in spend_categories: markup.add(c) markup.add("Cancel") msg = bot.reply_to(message, 'Select Category \nSelect Cancel to abort.', reply_markup=markup) # print('category', message.text) bot.register_next_step_handler(msg, post_category_selection, bot)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_category():\n if request.method == 'POST':\n result = USERS[session['username']].add_recipe_category(\n request.form['title'])\n if result == 'recipe_category added':\n flash(result, 'info')\n else:\n flash(result, 'warning')\n return redirect(url_for('categories'))\n return render_template('add_category.html')", "def add_category():\n if 'user' not in session:\n flash(\"You need to be logged in to create a category.\")\n return redirect(url_for(\"login\"))\n\n if request.method == \"POST\":\n category = {\n \"category_name\": request.form.get(\"category_name\"),\n \"created_by\": session[\"user\"]\n }\n mongo.db.categories.insert_one(category)\n flash(\"Category Successfully Added\")\n return redirect(url_for(\"get_categories\"))\n\n categories = mongo.db.categories.find().sort(\n \"category_name\", 1)\n return render_template(\"add_category.html\",\n categories=categories)", "def add():\r\n\r\n add = True\r\n\r\n form = CategoryForm()\r\n if form.validate_on_submit():\r\n category = Category(name=form.name.data,\r\n description=form.description.data)\r\n try:\r\n # add category to the database\r\n db.session.add(category)\r\n db.session.commit()\r\n flash('You have successfully added a new category.')\r\n except:\r\n # in case category name already exists\r\n flash('Error: category name already exists.')\r\n\r\n # redirect to category page\r\n return redirect(url_for('category.list'))\r\n\r\n # load category template\r\n return render_template('category/form.html', action=\"Add\",\r\n add=add, form=form,\r\n title=\"Add Category\")", "def add_category(self, category):\n if category not in self.categories and category.strip() != \"\":\n self.categories.append(category.strip())", "async def add(self, ctx, cat: str, *, item: str):\n c = check_category(cat)\n if not c[0]:\n await ctx.send(\"`{}` isn't a category you can add things to.\".format(cat))\n return\n else:\n data = c[1]\n datafile = c[2]\n cat = c[3]\n user = ctx.author.name + \"#\" + ctx.author.discriminator\n if user not in data:\n data[user] = []\n \n things = [x.strip() for x in item.split(',')]\n mesg = \"\"\n for thing in things:\n data[user].append(thing.title())\n mesg += \"`{}` was added to your {} list.\\n\".format(thing.title(), cat)\n await ctx.send(mesg)\n pickle.dump(data, open(datafile, \"wb\"))", "def addCategory():\r\n # authentication\r\n if 'username' not in login_session:\r\n flash('Please login to add category')\r\n return redirect(url_for('showCategories'))\r\n\r\n if request.method == 'POST':\r\n name = request.form['name']\r\n description = request.form['description']\r\n # validation\r\n if not name:\r\n flash('Add CategoryError: Name can\\'t be empty')\r\n return redirect(url_for('showCategories'))\r\n # create operation\r\n newCategory = Category(name=name, description=description,\r\n user_id=login_session['user_id'])\r\n session.add(newCategory)\r\n session.commit()\r\n flash('Added Category \\'{}\\' Successfully!'.format(newCategory.name))\r\n return redirect(url_for('showCategories'))\r\n else:\r\n # serve GET request with form\r\n return render_template(\"addCategory.html\")", "def add_category(self):\n name = self.caregoryName.text()\n if name == '':\n return\n parent = self.categoryParent.currentText()\n\n addition = self.orm.add_category(name, parent)\n if not addition:\n show_warning(\"Category already exists.\")\n else:\n self.show_categories()\n if parent == '':\n self.show_available_parents()", "def create(self) -> dict:\n\n questions = [\n Text(name=\"name\", message=\"Enter category name\"),\n ]\n\n return prompt(questions)", "def add_category():\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Get form fields\n new_category_name = request.form['new_category_name']\n # Get user's database ID for the item's database entry\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n # Flash messages for incomplete item info\n if not request.form['new_category_name']:\n flash('Please add category name.')\n return redirect(url_for('add_category'))\n # Query database for item name\n category_name_in_db = (session.query(Categories.name)\n .filter_by(name=new_category_name)\n .all())\n # If the category name is already in the database, don't add\n if category_name_in_db:\n print('Category name \"{}\" already in database.'\n .format(new_category_name))\n flash('Category name \"{}\" already in database.'\n .format(new_category_name))\n return redirect(url_for('add_category'))\n # If user is logged in, and all info provided, add category\n new_category = Categories(\n name=new_category_name,\n creator_db_id=user_db_id)\n session.add(new_category)\n session.commit()\n print('Category {} successfully created.'.format(new_category_name))\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Render webpage\n return render_template('add_category.html',\n login_status=login_status)", "async def category(self,ctx):\n await ctx.send(\"Yes this is a category.\")", "def add_category(self, category):\n if category not in self.categories and category.strip() != \"\" and category is not None:\n self.categories.append(category.strip())", "def add_category(category_name):\n category_name = category_name.lower()\n db.categories.insert_one({\"name\": category_name})\n return jsonify({\"success\": True, \"message\": f\"{category_name} added successfully.\"})", "def add_category():\n\n form = AddOrEditCategoryForm()\n if form.validate_on_submit():\n new_category = Category(name=form.name.data, owner=current_user._get_current_object())\n try:\n db.session.add(new_category)\n db.session.commit()\n except:\n flash(\n (\"Failed to add category \\\"%s\\\".\"\n \" Make sure that the category name is unique.\")\n % new_category.name)\n else:\n flash(\"A new category \\\"%s\\\" has been added.\" % new_category.name)\n finally:\n return redirect(url_for('.index'))\n return render_template('add_or_edit.html', form=form)", "def cb_new_category(self, event):\n self.main_frame.new_category_dialog(None)", "def main(self) -> dict:\n\n questions = [\n Checkbox(\n name=\"main\",\n message=\"SELECT A CATEGORY OPTION:\",\n choices=[\"CREATE\", \"READ\", \"UPDATE\", \"DELETE\"])\n ]\n\n return prompt(questions)", "def add_category(self, category: str) -> None:\n for letter in self.data:\n if not self.data[letter].get(category):\n self.data[letter][category] = []\n print(f'Categoria: {category} adicionada ao dicionário.')\n self.save()\n self.beautify_json()", "def add_category(self, category):\n raise NotImplementedError()", "def prompt_categories(self, name, default_category=None):\n if not self.categories:\n raise AurploaderError(\"no categories\")\n if default_category not in self.categories:\n default_category = None\n while True:\n print('Select category for {}'.format(name))\n for n in sorted(self.categories):\n print(' {:2d}) {}'.format(n, self.categories[n]))\n print('Enter \"x\" to skip this package.')\n if default_category:\n category = input('Category [{}]: '.format(default_category))\n else:\n category = input('Category: ')\n if category.lower() == 'x':\n return None\n elif not category and default_category:\n return default_category\n else:\n try:\n category = int(category)\n if category in self.categories:\n return category\n except ValueError:\n continue", "def add_category():\n\n form = CategoryForm()\n form.name.current_user_id = current_user.id\n\n if form.validate_on_submit():\n new_category = Category(\n name=form.name.data.capitalize(), user_id=current_user.id)\n db.session.add(new_category)\n db.session.commit()\n flash(\"Successfully created new category '{}'\".format(\n form.name.data.capitalize()), category='success')\n return redirect(url_for('url.index'))\n\n return render_template(\n 'forms/form.html',\n form_title=\"Add Category\",\n form=form,\n form_name='category',\n action=url_for('url.add_category')\n )", "def add_category(self, cid: str, cat: str):\n self.logging.info(f\"adding category: {cat} with it {cid}\")\n if self.sess.query(exists().where(Category.category_id == cid or Category.category == cat)).scalar():\n return\n genre = Genre(cid=uuid4().hex,\n categorey_id=cid,\n category=cat)\n self.sess.add(genre)\n self.sess.commit()", "def _add_entry(self, cat_entry):\n\n # run through category apps and add orphans to Desktop\n # database, add DM and categories to database\n models.cat_apps(cat_entry)\n\n # run through and categories to database\n models.cat_list(cat_entry.categories)\n\n # create new - models.py \n cat_record = models.Categories(category=cat_entry.category) \n\n # fill in values \n cat_record.fill_record(cat_entry) \n\n BaseInfo.session.add(cat_record)\n\n try:\n BaseInfo.session.commit( )\n except exc.SQLAlchemyError:\n logger.error(\"Commit error\")", "def test_add_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)", "def insert_recipe_category():\r\n\r\n # validates request form\r\n form = request.form\r\n error_list = validate_form(form, 'recipe_category')\r\n\r\n if error_list == []:\r\n # validates image URL\r\n image_URL = validate_image(form['img_link'])\r\n\r\n # inserts recipe category\r\n recipe_category = {\r\n 'name': request.form.get('name'),\r\n 'img_link': image_URL,\r\n 'number_of_recipes': 0\r\n }\r\n mongo.db.recipe_categories.insert_one(recipe_category)\r\n\r\n # redirects to the recipe category search\r\n return redirect(url_for(\r\n 'search',\r\n collection='recipe_categories')\r\n )\r\n\r\n else:\r\n # initializes page title and header\r\n page_title = 'Add recipe category'\r\n page_header = 'Add a new recipe category:'\r\n\r\n # sends error list back to the form to correct mistakes\r\n return render_template(\r\n 'add_form.html',\r\n errors=error_list,\r\n form=form,\r\n page_title=page_title,\r\n page_header=page_header\r\n )", "def category_choice_input(self):\n self.category = input(fr.FR[8])\n try:\n if self.category == \"q\":\n self.leave_category_choice -= 1\n elif 1 <= int(self.category) <= len(config.CATEGORIES):\n print(self.category)\n self.products = self.product_table.get_list_product(\n self.category)\n self.products_sub = self.product_table.get_list_product(\n self.category)\n self.choice_product()\n self.leave_category_choice -= 1\n except ValueError:\n print(fr.FR[10])", "def category_add(request: HttpRequest) -> HttpResponse:\n if request.method == 'POST':\n # Process with adding category\n form = CategoryForm(request.POST)\n category_existed = Category.objects.filter(name=form.data.get('name')).exists()\n if form.is_valid() and not category_existed:\n logger.debug('Adding a category to database %s', form.cleaned_data)\n form.save()\n messages.success(request, 'Category successfully added')\n else:\n messages.error(request, 'Could not create category. Category might already existed')\n return redirect('rss-index')", "def new_category():\n if request.method == 'POST':\n genre = Category(name=request.form['new_category'],\n user_id=login_session['user_id'])\n session.add(genre)\n try:\n session.commit()\n except:\n session.rollback()\n flash(\"Error: Cannot have two categories with the same name!\")\n return redirect(url_for('show_categories'))\n else:\n return render_template(\"newcategory.html\")", "def cli(category):\n if category in urls or category == 'all':\n if category == 'all':\n for category in urls:\n req_and_save(category)\n else:\n req_and_save(category)\n else:\n click.echo(f\"{click.style(category, bg='yellow', fg='red')} not valid\")", "def add_category(self, name, user_id):\r\n category_id, message = self._db_manager.add_category(name, user_id)\r\n flash(message)\r\n return category_id", "def test_add_category(self):\n self.add_success(self.test_data['pants'])", "def add(**kwargs):\n logger.info('add dream command')\n instance = {}\n instance['date'] = kwargs['date'].date()\n instance['tags'] = list(kwargs['tags'])\n instance['drtype'] = kwargs['type']\n instance['title'] = kwargs['title']\n launch_gui(instance)" ]
[ "0.67735505", "0.67442906", "0.66510326", "0.66429573", "0.6636363", "0.662902", "0.6562078", "0.6550507", "0.6453305", "0.645305", "0.640512", "0.6403172", "0.6363379", "0.63466406", "0.6299047", "0.6298222", "0.62930804", "0.6286906", "0.6272263", "0.62220436", "0.6192982", "0.6155349", "0.6108863", "0.60897803", "0.6060874", "0.6051089", "0.5996325", "0.5969007", "0.5961365", "0.59598225" ]
0.7265265
0
Asks user if the user wants to split the bill with another user
def get_sharing_details(message, bot): markup = types.ReplyKeyboardMarkup(one_time_keyboard=True) markup.row_width = 3 markup.add("Yes") markup.add("No") markup.add("Cancel") bot.send_message(message.chat.id, 'Do you want to split this bill with any other users?', reply_markup=markup) bot.register_next_step_handler(message, post_sharing_selection, bot)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ask_user():\r\n while True:\r\n if bj.player1.double_down is True and bj.player1.split is True and bj.player1.went_split is False:\r\n p_choice = input(\"Hit, Stand, Double Down or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.split is True and bj.player1.went_split is False: # various input prompts depending on available player choices\r\n p_choice = input(\"Hit, Stand or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.double_down is True:\r\n p_choice = input(\"Hit, Stand or Double Down?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n else:\r\n p_choice = input(\"Hit or Stand?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice", "def handle_user_id_input_for_sharing(message, bot):\n chat_id = message.chat.id\n username = str(message.text)\n\n if username == \"Cancel\":\n bot.send_message(message.chat.id, 'Cancelling Record!!')\n display_text = \"\"\n for c in commands: # generate help text out of the commands dictionary defined at the top\n display_text += \"/\" + c + \": \"\n display_text += commands[c] + \"\\n\"\n bot.send_message(chat_id, 'Please select a menu option from below:')\n bot.send_message(chat_id, display_text)\n return\n\n bot.send_message(chat_id, \"User {} will be sent an update about the split\".format(username))\n\n if 'shared_with' in user_bills:\n user_bills['shared_with'].append(username)\n else:\n user_bills['shared_with'] = [username]\n\n get_sharing_details(message, bot)\n\n # TODO: Add message queue to handle multiple requests\n asyncio.run(send_update_to_user_about_expense(message, user_bills, bot))", "def handlenames():\n\n while True:\n name = input(\"Please enter full name or list> \")\n if name == \"\":\n print(\"Please enter valid name.\\n\")\n elif name == \"list\":\n printdonorlist()\n break\n elif name in donor_db:\n getdonationamount(name)\n printthankyou(name)\n break\n else:\n addnewdonordonation(name)\n getdonationamount(name)\n printthankyou(name)\n break", "def prompt_user_for_starting_balance():\n print('What starting account balance do you want to have for your new account?')\n return input()", "def progress_confirm() -> str:\r\n message_display = \"Batch is not being Currently Brewed\"\r\n #Catches batch number user has entered on UI\r\n user_batch = request.args.get(\"move_number\")\r\n #Catches tank user has entered on UI\r\n next_tank = request.args.get(\"tank_choice\")\r\n print(user_batch)\r\n for brew_section in current_brewings:\r\n if brew_section[0] == user_batch:\r\n #Finding the list in current_brewings where the batch\r\n #the user is interested in is stored\r\n batch_index = current_brewings.index(brew_section)\r\n message_display = stage_progresser(batch_index, next_tank)\r\n break\r\n\r\n return render_template(\"progress_confirm.html\",\r\n display_message=message_display)", "def UserMenu(self):\n prompt = \"\"\"\n (CD) Certificate of Deposit\n (MM) Money Market\n (MS) Money Savings\n (C) Checking\n Enter Account Type: \"\"\"\n done = 0\n while not done:\n choice = 0\n while not choice:\n try:\n option = raw_input(prompt).strip().upper()\n m = re.search(r'CD|MM|MS|C',option)\n if m:\n print \" Your preferred account type is \",option\n prompt2 = \"\"\"\n (=>) WithDrawal\n (<=) Deposit\n (-) Debit\n (+) Credit\n Enter Choice :\"\"\"\n else:\n print \"Invalid Transaction\"\n except(EOFError, KeyboardInterrupt):\n option = 'C'\n if option == 'E':\n choice = 1\n try:\n option1 = raw_input(prompt2).strip().upper()\n except(KeyboardInterrupt, EOFError):\n option1 = 'E'\n m1 = re.search(r'=>|<=|-|+',option1)\n if not m1:\n print \"Invalid option.. Try again\"\n else:\n choice = 1\n if option1 == '=>': self.Deposit()\n if option1 == '<=': self.Withdrawal()\n if option1 == '-': self.Debit()\n if option1 == '+': self.Credit()\n if option1 == 'E': done = 1", "def withdraw_by_username(self,amount,username):\r\n pass", "def login_menu(self):\n print(\"\\n**** LOGIN MENU ****\")\n print(\"1. Login as BDO \\n2. Login as GPM \\n3. Login as Member\\n4. Exit\")\n choice = input(\"Choose: \")\n\n if choice == '1':\n BlockDevelopmentOfficer(self.conn).login_bdo()\n elif choice == '2':\n GramPanchayatMember(self.conn).login_gpm()\n elif choice == '3':\n Member(self.conn).login_member()\n elif choice == '4':\n print(\"\\nExiting...\")\n self.conn.close()\n else:\n print(\"\\nWrong Input! Try again.\")\n\n if choice != '4':\n self.login_menu()\n\n return True", "def allowed(self, user, amount):\n return True", "def handle_selection_main(self):\n choice = self.get_input()\n if choice == '1':\n self.display_cust()\n elif choice == '2':\n self.is_user = False\n self.display_eng()", "def send_thankyou():\n \n # Loop if user selects list\n full_name = 'list'\n while full_name.lower() == 'list':\n # Create prompt menu\n full_name = input('Please input your Full Name\\n'\n '\\t or list if you would like to see a list of donors >> ')\n \n # Check user input and perform appropriate action \n if full_name.lower() == 'list':\n # Query database for list of donors\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n sorted_donors = (Donation\n .select(Donation.donor_name)\n .group_by(Donation.donor_name)\n .order_by(Donation.donor_name))\n for donor in sorted_donors:\n print(donor.donor_name)\n \n except Exception as e:\n logger.info(f'Error retrieving donors last donation from database')\n logger.info(e)\n \n finally:\n database.close()\n else:\n prompt_donation(full_name)\n break", "def check_balance():\n print(\"\\n\")\n print(messages.check_balance)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.check_balance(credentials)\n start_again() if result else BankOperationsUi.check_balance()", "async def confirm_user(self, name, splits_list=None):\n\n # Generates a list if not provided \n if splits_list is None:\n splits_list = await self._get_all_splits()\n\n # Returns True if name exists, False if name does not\n index = await self._exact_search(name, splits_list)\n return index != -1", "def get_bill_amt():\n\n return float(input(\"How much was your total bill: \"))", "def open_account():\n print(\"\\n\")\n print(messages.open_account)\n u_id = pyip.inputInt(\"Id: \", greaterThan=0)\n name = pyip.inputCustom(raiseNameError, prompt=\"Name: \")\n address = pyip.inputCustom(raiseAddressError, prompt=\"Address: \")\n email = pyip.inputEmail(\"Email: \")\n balance = pyip.inputInt(\"Balance: \", min=0)\n password = pyip.inputPassword(\"Password: \")\n\n user_data = [u_id, name, address, balance, email, password]\n result = BankOperationsBackend.open_account(user_data)\n\n start_again() if result else BankOperationsUi.open_account()", "def draw_money(name, bank_id, password):\n amount = int(raw_input(\"Enter Amount to withdraw:\"))\n for i in range(0, len(MY_MEMBER)):\n if MY_MEMBER[i].Name == name and \\\n MY_MEMBER[i].Password == password and \\\n MY_MEMBER[i].BankID == bank_id:\n if MY_MEMBER[i].balance >= amount:\n MY_MEMBER[i].balance -= amount\n new_balance = MY_MEMBER[i].balance\n print\"*************************\"\n print\"****Withdrawing Cash*****\"\n print\"your New Bank balance: %r\" % new_balance\n print\"Amount Withdraw: %r\" % amount\n print\"*************************\"\n\n else:\n print\"your Account Balance is low!! \"\n print\"Transaction Failed...\"\n what_to_do(name, bank_id, password)\n return\n what_to_do(name, bank_id, password)", "def handle_selection_cust(self):\n choice = self.get_input()\n if choice == '1':\n self.display_cust_unlock()\n elif choice == '2':\n self.display_return_car()\n elif choice == '3':\n self.display_main()", "def select_user_and_add_transaction(self):\n def add_transaction(to_user):\n print(\"Amount of transaction:\")\n amount = input()\n new_transaction = transaction.Transaction(amount)\n to_user.add_transaction(new_transaction)\n\n try:\n selected_user = self.prompt_user_selection()\n add_transaction(selected_user)\n except ValueError:\n print(\"No changes made.\")", "def handle_selection_cust_unlock(self):\n choice = self.get_input()\n if choice == '1':\n self.login_menu()\n # elif choice == '2':\n # ...\n elif choice == '3':\n self.display_cust()", "def post_amount_input(message, bot):\n # print(message.text)\n try:\n chat_id = message.chat.id\n amount_entered = message.text\n if amount_entered=='Cancel':\n raise Exception(\"Cancelling record!!\")\n amount_value = validate_entered_amount(amount_entered) # validate\n if amount_value == 0: # cannot be $0 spending\n raise Exception(\"Spent amount has to be a non-zero number.\")\n\n user_bills['cost'] = float(amount_value)\n # print(user_bills)\n # print(user_bills['cost'])\n\n user_bills['timestamp'] = datetime.now()\n # print(user_bills['timestamp'])\n # print(count)\n # print(user_çcbills['number'])\n\n user_history = db.user_bills.find({'user_telegram_id' : message.chat.id})\n maximum = 0\n for rec in user_history:\n maximum = max(maximum, rec['number'])\n # print(maximum)\n # print('done')\n\n # global count_\n user_bills['number'] = maximum+1\n # count_ += 1\n\n get_sharing_details(message, bot)\n\n except Exception as e:\n bot.reply_to(message,str(e))\n display_text = \"\"\n for c in commands: # generate help text out of the commands dictionary defined at the top\n display_text += \"/\" + c + \": \"\n display_text += commands[c] + \"\\n\"\n bot.send_message(chat_id, 'Please select a menu option from below:')\n bot.send_message(chat_id, display_text)", "def confirm():\r\n if(PsdEntry.get() == \"Psd\" and UserEntry.get() == \"User\"):\r\n open()\r\n else:\r\n messagebox.showerror(\"Error\",\"Invalid Username\")", "def prompt_user_account_to_withdrawl():\n print('What account do you want to withdrawl from?:')\n return input()", "def bardear(channel, user):\n name = vos_quien_sos(user)\n if name in THE_BAD_GUYS.keys():\n if ebrios['Office1'] or ebrios['Office2']:\n msg = 'Ah no, mirá quién se suma! '\n else:\n msg = 'PERO MIRÁ QUIEN ORGANIZA! '\n msg += '<@' + name + '>! ' + choice(THE_BAD_GUYS[name])\n postea(channel, msg)\n return True\n return False", "def simple_banking_management_functional():\n create_user('private', **USERS['Andreas'])\n create_user('company', **USERS['carrot_inc'])\n\n result = search_private_user('Andreas', 'Gustafsson')\n result_2 = search_company_user('carrot')\n\n register_account('savings', USERS['Andreas']['id_nr'])\n register_account('salary', USERS['Andreas']['id_nr'])\n\n deposit('savings', 100, USERS['Andreas']['id_nr'])\n deposit('salary', 20, USERS['Andreas']['id_nr'])\n\n withdraw('savings', 50, USERS['Andreas']['id_nr'])\n withdraw('salary', 30, USERS['Andreas']['id_nr'])\n\n print(BANK[USERS['Andreas']['id_nr']])", "def send_thank_you():\n full_name = input(\"Enter the donor's full name > \")\n \n donors = donor_names()\n \n if full_name == 'list':\n #Print the donor names and restart the function\n for name in donors:\n print(name)\n \n send_thank_you()\n \n if full_name not in donors:\n donor_db.append([(full_name)])\n \n amount = float( input(\"Enter the donation amount > \") )\n \n for index, row in enumerate(donor_db):\n if row[0] == full_name:\n new_row = row + [amount]\n \n donor_db.pop(index)\n donor_db.append((new_row,))\n \n \n print(\"Thank you, {}! Your generous donation of ${:.2f} will go a long way to feed the needy.\".format(full_name, amount))", "def send_single_thank_you():\n update_lists()\n donor_name = get_name_input()\n\n if donor_name == \"quit\":\n print(\"No donor name entered, exiting to menu\")\n else:\n donor_amount = check_number_input()\n\n if donor_name not in donor_totals_list:\n firstname, lastname = donor_name.split(\" \")\n add_donor(firstname, lastname, donor_name)\n add_donation(donor_name, donor_amount)\n else:\n for donor in donor_totals_list:\n if donor.fullname == donor_name:\n add_donation(donor_name, donor_amount)\n print('\\nDear {},'.format(donor_name))\n print('''\\tThank you for your generous donation of ${:,.2f}\\n\n Sincerely, \\nThe ChickTech Donations Department\\n'''.format(\n donor_amount))\n update_lists()", "def get_player_bet(self) -> None:\n print(\"Please enter the amount you want to bet.\")\n while self.user.bet == 0:\n input_ = input(\">>> \")\n try:\n input_ = float(input_)\n self.user.bet = input_\n except ValueError as e:\n print(str(e))\n continue", "def prompt_user_account_to_deposit():\n print('What account do you want to deposit to?:')\n return input()", "async def process_bj_game(self, ctx, amount, user_id):\n if amount >= 0:\n if not await self.check_in_game(user_id, ctx):\n if amount > await ex.u_currency.get_balance(user_id):\n await ctx.send(f\"> **{ctx.author}, you can not bet more than your current balance.**\")\n else:\n return True\n else:\n await ctx.send(f\"> **{ctx.author}, you can not bet a negative number.**\")", "def main():\n user_answer = prompt_user_what_to_do_next()\n while 'q' != user_answer:\n list_of_all_accounts_known = ATMBankAccount.read_in_account_numbers_and_balances()\n if '1' == user_answer:\n starting_account_balance_ammount = prompt_user_for_starting_balance()\n create_an_account_for_user(list_of_all_accounts_known, int(starting_account_balance_ammount))\n elif '2' == user_answer:\n print_out_account_balances(list_of_all_accounts_known)\n elif '3' == user_answer:\n user_to_account_deposit = prompt_user_account_to_deposit()\n user_money_to_deposit = prompt_user_money_to_deposit()\n ATMBankAccount.deposit_to_account(list_of_all_accounts_known, user_to_account_deposit, user_money_to_deposit)\n print_out_account_balances(list_of_all_accounts_known)\n elif '4' == user_answer:\n user_to_account_withdrawl = prompt_user_to_withdrawl()\n user_money_to_withdrawl = prompt_user_money_to_withdrawl()\n ATMBankAccount.withdrawl_fund_from_account(list_of_all_accounts_known, user_to_account_withdrawl, user_money_to_withdrawl)\n print_out_account_balances(list_of_all_accounts_known)\n elif '5' == user_answer:\n user_account_to_get_interest = prompt_user_account_to_get_interest()\n ATMBankAccount.calculate_half_percent_interest_on_account(list_of_all_accounts_known, user_account_to_get_interest)\n print_out_account_balances(list_of_all_accounts_known)\n user_answer = prompt_user_what_to_do_next()\n break\n ATMBankAccount.write_out_account_numbers_and_balances(list_of_all_accounts_known)" ]
[ "0.5349768", "0.5122272", "0.51155144", "0.5085779", "0.5071575", "0.5061397", "0.5043871", "0.49912462", "0.49727002", "0.4970632", "0.4967183", "0.49505353", "0.4948263", "0.4942679", "0.49339795", "0.49235326", "0.492086", "0.49188274", "0.4916647", "0.49121168", "0.49118537", "0.49102482", "0.48687303", "0.48534346", "0.4841657", "0.48411313", "0.4838291", "0.48161066", "0.48010427", "0.4792659" ]
0.552782
0
Template method for executing the command.
def execute_command(self): raise Exception("Not implemented")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _process_command(self, **kwargs):\n return self.run_command(**kwargs)", "def execute(self) :\n \n raise NotImplementedError()", "def execute(self):\r\n pass", "def execute_command(self, command):\n raise NotImplementedError", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def execute(self):\n\n pass", "def execute(self):\n\t\tpass", "def do_command(self, args):\n pass", "async def _run_command(self, command, *args, **kwargs):\n pass", "def execute(self, *args, **kwargs):\n pass", "def runCommand(self): \\\n # pylint: disable=no-self-use", "def execute(self):\n raise NotImplementedError", "def execute(self):\n raise NotImplementedError", "def execute():\n pass", "def execute(self) -> None:\n self.command(self.target)", "def execute(self) -> None:\n raise NotImplementedError", "def execute_command(self):\n return ''", "def __call__(self):\n context = Context()\n return self.recipe.execute(context, self.cmd, self.cmd_args)", "def execute(self):\n raise NotImplementedError(\"Subclasses should override this method.\")", "def _execute(self, _):\r\n pass", "def execute(self) -> None:\n pass # Implement in Executors", "def execute(self):\n raise NotImplementedError('execute')" ]
[ "0.7968591", "0.7832499", "0.77924705", "0.77703875", "0.7767006", "0.7767006", "0.7767006", "0.7767006", "0.7767006", "0.7767006", "0.77667135", "0.77667135", "0.77667135", "0.77667135", "0.77194923", "0.77103496", "0.77051127", "0.7691909", "0.768377", "0.7665118", "0.7665118", "0.76581186", "0.7648969", "0.7634001", "0.758396", "0.7575497", "0.75701636", "0.75677913", "0.75042355", "0.74908423" ]
0.8045764
0
Get the detections from the model using the generator.
def _get_detections(args, generator, model, score_threshold=0.05, max_detections=100, save_path=None): all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())] detection_out = np.zeros([generator.size(),512,512,3]) # detection_out = np.zeros([generator.size(),512,512]) attention_out = np.zeros([generator.size(),512,512]) mask_out = np.zeros([generator.size(),512,512]) for i in tqdm(range(generator.size()), desc='Running network: '): raw_image = generator.load_image(i) # image = np.expand_dims(raw_image.copy(), axis=-1) # image = np.repeat(image, 3, axis=-1) # image = generator.preprocess_image(image) image = generator.preprocess_image(raw_image.copy()) image, scale = generator.resize_image(image) if keras.backend.image_data_format() == 'channels_first': image = image.transpose((2, 0, 1)) # run network # boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3] boxes, scores, labels, masks, attention_map = model.predict_on_batch(np.expand_dims(image, axis=0)) # print('scores:', scores.shape) # print('labels',labels.shape) # correct boxes for image scale boxes /= scale # select indices which have a score above the threshold indices = np.where(scores[0, :] > score_threshold)[0] # print('indices', indices) scores = scores.numpy() boxes = boxes.numpy() labels = labels.numpy() masks = masks.numpy() attention_map = attention_map.numpy() # select those scores scores = scores[0][indices] # find the order with which to sort the scores scores_sort = np.argsort(-scores)[:max_detections] # print(scores_sort) # select detections image_boxes = boxes[0, indices[scores_sort], :] image_scores = scores[scores_sort] image_labels = labels[0, indices[scores_sort]] image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1) if save_path is not None: draw_annotations(raw_image, generator.load_annotations(i), label_to_name=generator.label_to_name) draw_detections(raw_image, image_boxes, image_scores, image_labels, score_threshold=args.detection_threshold, label_to_name=generator.label_to_name) detection_out[i, :, :] = raw_image attention_map[np.where(attention_map < args.attention_threshold)] = 0 # attention_out[i, :, :] = cv2.flip( cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (origin_shape[1], origin_shape[0])), 0) attention_out[i, :, :] = cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (512, 512)) masks[masks < args.segmentation_threshold] = 0 masks = cv2.resize(np.squeeze(np.uint8(masks * 255)), (512, 512)) mask_out[i, :, :] = masks # copy detections to all_detections for label in range(generator.num_classes()): if not generator.has_label(label): continue all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1] if save_path is not None: detection_out = sitk.GetImageFromArray(detection_out) sitk.WriteImage(detection_out, os.path.join(save_path, 'detection_result.nii.gz')) attention_out = sitk.GetImageFromArray(attention_out) sitk.WriteImage(attention_out, os.path.join(save_path, 'attention_result.nii.gz')) mask_out = sitk.GetImageFromArray(mask_out) sitk.WriteImage(mask_out, os.path.join(save_path, 'masks_result.nii.gz')) return all_detections
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_detections(self):\n self.rescue_model.setInput(self.human_blob)\n self.predictions = self.rescue_model.forward()", "def get_detections(self):\n frame = self.get_still()\n return detector.process_frame(frame, False)", "def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()", "def get_test_fetch(synth_placeholder,model):\n with tf.variable_scope(\"G\"):\n generated_images = model.generator(synth_placeholder,FLAGS.channels)\n return generated_images", "def get_detections(self, image):\n self.img = jetson.utils.cudaFromNumpy(image)\n self.width = image.shape[1]\n self.height = image.shape[0]\n detections = self._net.Detect(self.img, self.width, self.height)\n print(\"The inference is happening at \" + str(self._net.GetNetworkFPS()) + \" FPS\")\n return detections, jetson.utils.cudaToNumpy(self.img)", "def detection(self, model_infos, trained_images=None):\n # Index of the class in the list is its ID. For example, to get ID of\n class_names = ['BG', 'red_s', 'red_m', 'red_l', 'yellow_s', 'yellow_m', 'yellow_l', 'green_s', 'green_m',\n 'green_l', 'blue_s', 'blue_m', 'blue_l', 'orange_s', 'orange_m', 'orange_l']\n config = ShapesConfig()\n detect_model = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config, model_info=model_infos)\n # Load weights trained on current model\n cur_model_path = os.path.join(model_infos[0], model_infos[1]+'.h5')\n cur_model_weights = os.path.join(MODEL_DIR, cur_model_path)\n detect_model.load_weights(cur_model_weights, by_name=True)\n # Traverse all the packages(the pool)\n result_of_detection = {}\n for package in self.images_pool:\n image_dir = os.path.join(DATA_DIR, package)\n images_in_package = os.listdir(image_dir)\n # import ground truth to check out the detection result\n instance_nums_of_images = self.count_instances_in_images(package)\n for img in images_in_package:\n # Skip detection of those images that already used for training\n if trained_images:\n if img in trained_images:\n continue\n image = skimage.io.imread(os.path.join(image_dir, img), as_gray=False)\n # Run detection\n results = detect_model.detect([image], verbose=0)\n r = results[0]\n \"\"\"\n # average entropy model\n total_entropy = 0\n for prob in r['scores']:\n total_entropy -= prob * math.log2(prob) + (1 - prob) * math.log2(1 - prob)\n result_of_detection[img] = total_entropy / len(r['scores']) if r['scores'] != [] else total_entropy\n \"\"\"\n # use dict to save the info of the detected instances of each images\n # min detection model\n\n gt_instances = instance_nums_of_images[img.split('.')[0]]\n result_of_detection[img] = abs(len(r['scores']) - gt_instances)\n\n # print(result_of_detection)\n print(\"+++++++detection finished\")\n del detect_model\n del config\n return result_of_detection", "def detected(self):\n return self.detections", "def __detect_objs(self):\n while True:\n # Wait for input images\n if (not self.__predict_start) or \\\n (self.__img is None):\n continue\n\n # Client for detection\n client = vision.ImageAnnotatorClient()\n\n # Encode image to binary\n _, img_buffer = cv2.imencode(\".jpg\", self.__img)\n img_bytes = img_buffer.tobytes()\n\n # Change to vision Image type\n image = vision.Image(content=img_bytes)\n # Detect Person\n self.__detect_info = client.object_localization(image=image,\n max_results=self.__max_results\n ).localized_object_annotations\n cv2.waitKey(30)", "def get_object_detections(self):\n detections = self.__get_cropped_detections(self.image)\n return detections", "def detect_objects(self, image):\n # Feed the input image to the model\n self.set_input_tensor(image)\n self.model.invoke()\n\n # Get all outputs from the model\n boxes = self.get_output_tensor(0)\n classes = self.get_output_tensor(1)\n scores = self.get_output_tensor(2)\n count = int(self.get_output_tensor(3))\n\n results = []\n for i in range(count):\n result = {\n 'bounding_box': boxes[i],\n 'class_id': int(classes[i]),\n 'score': scores[i]\n }\n results.append(result)\n return results", "def evaluate(\n args,\n generator,\n model,\n iou_threshold=0.5,\n score_threshold=0.05,\n max_detections=100,\n save_path=None\n):\n # gather all detections and annotations\n all_detections = _get_detections(args, generator, model, score_threshold=score_threshold, max_detections=max_detections, save_path=save_path)\n all_annotations = _get_annotations(generator)\n # print('all detections:', all_detections)\n # print('all all_annotations:', all_annotations)\n average_precisions = {}\n\n # all_detections = pickle.load(open('all_detections.pkl', 'rb'))\n # all_annotations = pickle.load(open('all_annotations.pkl', 'rb'))\n # pickle.dump(all_detections, open('all_detections.pkl', 'wb'))\n # pickle.dump(all_annotations, open('all_annotations.pkl', 'wb'))\n\n # process detections and annotations\n # print('###########')\n # print(generator.num_classes())\n\n for label in range(generator.num_classes()):\n if not generator.has_label(label):\n print('generator has not label')\n continue\n\n false_positives = np.zeros((0,))\n true_positives = np.zeros((0,))\n false_negatives = np.zeros((0,))\n scores = np.zeros((0,))\n num_annotations = 0.0\n\n for i in range(generator.size()):\n detections = all_detections[i][label]\n annotations = all_annotations[i][label]\n num_annotations += annotations.shape[0]\n detected_annotations = []\n\n for d in detections:\n scores = np.append(scores, d[4])\n\n if annotations.shape[0] == 0:\n # print('NO bbox annos')\n # print(d)\n false_positives = np.append(false_positives, 0)\n true_positives = np.append(true_positives, 0)\n false_negatives = np.append(false_negatives, 1)\n continue\n\n overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)\n assigned_annotation = np.argmax(overlaps, axis=1)\n max_overlap = overlaps[0, assigned_annotation]\n\n if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:\n false_positives = np.append(false_positives, 0)\n true_positives = np.append(true_positives, 1)\n detected_annotations.append(assigned_annotation)\n else:\n false_positives = np.append(false_positives, 1)\n true_positives = np.append(true_positives, 0)\n\n # no annotations -> AP for this class is 0 (is this correct?)\n if num_annotations == 0:\n average_precisions[label] = 0, 0\n continue\n\n # print(num_annotations)\n # print(all_detections[0][label].shape)\n # print(all_detections[0][label])\n TP = sum(true_positives)\n FP = sum(false_positives)\n FN = sum(false_negatives)\n # sort by score\n indices = np.argsort(-scores)\n false_positives = false_positives[indices]\n true_positives = true_positives[indices]\n\n # compute false positives and true positives\n false_positives = np.cumsum(false_positives)\n true_positives = np.cumsum(true_positives)\n\n # compute recall and precision\n recall = true_positives / num_annotations\n precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)\n\n\n print('TP:', TP)\n print('FP:', FP)\n print('FN:', FN)\n print('Recall:', TP / (TP + FN))\n print('Precision:', TP / (TP + FP))\n\n # compute average precision\n average_precision = _compute_ap(recall, precision)\n average_precisions[label] = average_precision, num_annotations\n\n return average_precisions", "def generate_images(self, count):\n # Generate images from the currently loaded model\n noise = np.random.normal(0, 1, (count, self.dimensions_noise))\n return self.generator.predict(noise)", "def get_model_detection_function(model):\n\n @tf.function\n def detect_fn(image):\n \"\"\"Detect objects in image.\"\"\"\n\n image, shapes = model.preprocess(image)\n prediction_dict = model.predict(image, shapes)\n detections = model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])\n\n return detect_fn", "def evaluate(\n model,\n generator,\n iou_threshold=0.5,\n):\n # 类别\n classes = generator.get_categories()\n # mAP 用来保存类别对应的AP值\n average_precisions = {}\n # 记录每个类别的标准框数量\n classes_num_annotations = {}\n # 得到批次大小\n batch_size = generator.batch_size\n # 得到所有图片数量\n all_image_num = len(generator)*batch_size\n # 得到空的detections and annotations\n all_detections = [[None for i in range(generator.num_classes())] for j in range(all_image_num)]\n all_annotations = [[None for i in range(generator.num_classes())] for j in range(all_image_num)]\n all_scores = [[None for i in range(generator.num_classes())] for j in range(all_image_num)]\n # 循环每张图片\n for i in tqdm(range(len(generator))):\n batch_imgs, batch_metas, batch_bboxes, batch_labels = generator[i]\n preds = model.predict(batch_imgs, batch_metas, box_mapping_back=False)\n\n # 一个批次可能有多张图片\n for j,pred in enumerate(preds):\n # 取出不为0的标签位置\n idx = np.where(batch_labels[j]!=0)\n # 取出不为0的真实标签\n gt_boxes = batch_bboxes[j,idx]\n # 取出不为0的真实标注框\n gt_labels = batch_labels[j,idx]\n # 预测结果不是空值\n if len(pred['class_ids'])!=0:\n # 预测概率\n scores = pred['scores']\n # 预测类别\n pred_labels = pred['class_ids']\n # 预测框\n pred_boxes = pred['rois']\n # 循环每个类别\n for label in range(generator.num_classes()):\n # 保存每张图片的检测框预测结果\n all_detections[i*batch_size+j][label] = pred_boxes[pred_labels == label, :]\n # 保存每张图片的真实标注框坐标\n all_annotations[i*batch_size+j][label] = gt_boxes[gt_labels == label, :]\n # 保存每张图片的预测框概率值\n all_scores[i*batch_size+j][label] = scores[pred_labels == label] \n else:\n # 循环每个类别\n for label in range(generator.num_classes()):\n # 保存每张图片的检测框预测结果\n all_detections[i*batch_size+j][label] = None\n # 保存每张图片的真实标注框坐标\n all_annotations[i*batch_size+j][label] = gt_boxes[gt_labels == label, :]\n # 保存每张图片的预测框概率值\n all_scores[i*batch_size+j][label] = 0 \n\n # 循环每个类别\n for label in range(generator.num_classes()):\n # 假正例\n false_positives = np.zeros((0,))\n # 真正例\n true_positives = np.zeros((0,))\n # 保存概率值\n scores = np.zeros((0,))\n # 真实标注框数量\n num_annotations = 0.0\n # 循环所有图片\n for i in range(all_image_num):\n # 预测框\n detections = all_detections[i][label]\n # 真实标注框\n annotations = all_annotations[i][label]\n # 真实标注框数量\n num_annotations += annotations.shape[0]\n # 用来保存检测到的真实标注框索引\n detected_annotations = []\n # 循环预测框\n for j,d in enumerate(detections):\n if d is not None:\n # 保存改预测框的概率值\n scores = np.append(scores, all_scores[i][label][j])\n # 如果该类别真实没有真实标注框\n if annotations.shape[0] == 0:\n # 假正例1个\n false_positives = np.append(false_positives, 1)\n # 真正例0个\n true_positives = np.append(true_positives, 0)\n continue\n # 计算预测框与真实标注框交并比\n overlaps = iou.compute_overlaps(np.expand_dims(d, axis=0), annotations)\n # 变成numpy数据\n overlaps = overlaps.numpy()\n # 求预测框最大交并比对应的真实标注的索引\n assigned_annotation = np.argmax(overlaps, axis=1)\n # 得到预测框与真实标注框的最大交并比\n max_overlap = overlaps[0, assigned_annotation]\n # 如果iou大于阈值,并且改索引不在记录索引的list中\n if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:\n # 假正例0个\n false_positives = np.append(false_positives, 0)\n # 真正例1个\n true_positives = np.append(true_positives, 1)\n # 把该真实标注框的索引加入list中\n detected_annotations.append(assigned_annotation)\n else:\n # 假正例1个\n false_positives = np.append(false_positives, 1)\n # 真正例0个\n true_positives = np.append(true_positives, 0)\n # 关于该类别的假正例和真正例都统计完成后\n # 如果真实标注框的数量为0,那么该类别的AP等于0,可能是有bug\n if num_annotations == 0:\n average_precisions[classes[label]] = 0\n # 存入字典\n classes_num_annotations[classes[label]] = 0\n continue\n\n # 对预测框分数从大到小进行排序\n indices = np.argsort(-scores)\n # 根据新的索引取出假正例和真正例\n false_positives = false_positives[indices]\n true_positives = true_positives[indices]\n\n # cumsum逐次累加\n false_positives = np.cumsum(false_positives)\n true_positives = np.cumsum(true_positives)\n\n # 计算召回率,召回率是越来越高的\n recall = true_positives / num_annotations\n # np.finfo(np.float64).eps,2.22e-16防止分母为0\n # 计算精确率,精确率是上下起伏的\n precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)\n\n # 计算AP\n average_precision = compute_ap(recall, precision)\n # 存入字典\n average_precisions[classes[label]] = average_precision \n # 存入字典\n classes_num_annotations[classes[label]] = num_annotations\n\n return average_precisions,classes_num_annotations", "def get_model_detection_function(model):\r\n\r\n @tf.function\r\n def detect_fn(image):\r\n \"\"\"Detect objects in image.\"\"\"\r\n\r\n image, shapes = model.preprocess(image)\r\n prediction_dict = model.predict(image, shapes)\r\n detections = model.postprocess(prediction_dict, shapes)\r\n\r\n return detections, prediction_dict, tf.reshape(shapes, [-1])\r\n\r\n return detect_fn", "def get_model_detection_function(model):\n\n @tf.function\n def detect_fn(image):\n \"\"\"Detect objects in image.\"\"\"\n\n # image= tf.convert_to_tensor(image, dtype=tf.float32)\n\n image, shapes = model.preprocess(image)\n prediction_dict = model.predict(image, shapes)\n detections = model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])\n\n return detect_fn", "def dataset(self):\n for d in dirlist(os.path.join(self.datadir)):\n for f in imlist(d):\n yield ImageDetection(filename=f).category(filebase(d))", "def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]", "def get_models():\n D = Discriminator(num_channels=params.num_channels,\n conv_dim=params.d_conv_dim,\n image_size=params.image_size,\n num_gpu=params.num_gpu,\n num_extra_layers=params.num_extra_layers,\n use_BN=False)\n G = Generator(num_channels=params.num_channels,\n z_dim=params.z_dim,\n conv_dim=params.g_conv_dim,\n image_size=params.image_size,\n num_gpu=params.num_gpu,\n num_extra_layers=params.num_extra_layers,\n use_BN=True)\n\n # init weights of models\n D.apply(init_weights)\n G.apply(init_weights)\n\n # restore model weights\n if params.d_model_restore is not None and \\\n os.path.exists(params.d_model_restore):\n D.load_state_dict(torch.load(params.d_model_restore))\n if params.g_model_restore is not None and \\\n os.path.exists(params.g_model_restore):\n G.load_state_dict(torch.load(params.g_model_restore))\n\n # check if cuda is available\n if torch.cuda.is_available():\n cudnn.benchmark = True\n D.cuda()\n G.cuda()\n\n print(D)\n print(G)\n\n return D, G", "def get_model_detection_function(model):\n\n @tf.function\n def detect_fn(image):\n \"\"\"Detect objects in image.\"\"\"\n\n image, shapes = model.preprocess(image)\n prediction_dict = model.predict(image, shapes)\n detections = model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])\n\n return detect_fn", "def sample(self, detections):\n\n raise NotImplementedError", "def generate_detections(encoder, mot_dir, output_dir, detection_dir=None):\n if detection_dir is None:\n detection_dir = mot_dir\n try:\n os.makedirs(output_dir)\n except OSError as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(output_dir):\n pass\n else:\n raise ValueError(\n \"Failed to created output directory '%s'\" % output_dir)\n\n for sequence in os.listdir(mot_dir):\n print(\"Processing %s\" % sequence)\n sequence_dir = os.path.join(mot_dir, sequence)\n\n # image_dir = os.path.join(sequence_dir, \"img1\")\n image_dir = sequence_dir\n image_filenames = {\n int(f[6:10]): os.path.join(image_dir, f) \n for f in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir, f))}\n\n detection_file = os.path.join(\n detection_dir, sequence, \"det/det.txt\")\n detections_in = np.loadtxt(detection_file, delimiter=' ')\n detections_out = []\n\n frame_indices = detections_in[:, 0].astype(np.int)\n min_frame_idx = frame_indices.astype(np.int).min()\n max_frame_idx = frame_indices.astype(np.int).max()\n for frame_idx in range(min_frame_idx, max_frame_idx + 1):\n print(\"Frame %05d/%05d\" % (frame_idx, max_frame_idx))\n mask = frame_indices == frame_idx\n rows = detections_in[mask]\n\n if frame_idx not in image_filenames:\n print(\"WARNING could not find image for frame %d\" % frame_idx)\n continue\n bgr_image = cv2.imread(\n image_filenames[frame_idx], cv2.IMREAD_COLOR)\n features = encoder(bgr_image, rows[:, 2:6].copy())\n detections_out += [np.r_[(row, feature)] for row, feature\n in zip(rows, features)]\n\n output_filename = os.path.join(output_dir, \"%s.npy\" % sequence)\n np.save(\n output_filename, np.asarray(detections_out), allow_pickle=False)", "def min_detection_strategy(self, init_model_infos):\n model_folder = 'min_detection_model_v2'\n result = self.detection(init_model_infos)\n # here add some methods to make select more 'clever'\n rank_hard_images = sorted(result.items(), key=lambda item:item[1], reverse=True)\n total_amount = 30\n trained_images = []\n # Select most hard images (30 as a step)\n # Start training with select images\n while total_amount < 150:\n al_model = TrainingProcess()\n al_model_data = []\n \"\"\"\n # CEAL to get better result pick 15 most hard and 15 most easy\n for item in rank_hard_images[:20]:\n al_model_data.append(item[0])\n trained_images.append(item[0])\n for item in rank_hard_images[-10:]:\n al_model_data.append(item[0])\n trained_images.append(item[0])\n print('select images are:', al_model_data)\n \"\"\"\n # To keep the distribution same, take the package that have the most hard images for training\n package_distrib = [0] * 11\n for item in rank_hard_images[:30]:\n package_distrib[(int(item[0].split('.')[0]) -1) // 30] += 1\n package_id = package_distrib.index(max(package_distrib))\n image_to_package_dir = os.path.join(DATA_DIR, \"package%s\" % package_id)\n al_model_data = os.listdir(image_to_package_dir)\n print('select package are:', package_id)\n print('select images are:', al_model_data)\n total_amount += 30\n if total_amount == 60:\n last_model_info = init_model_infos\n else:\n last_model_info = al_model_info\n last_model_path = os.path.join(last_model_info[0], last_model_info[1] + '.h5')\n last_model_weights = os.path.join(MODEL_DIR, last_model_path)\n al_model_info = [model_folder, '%s_images_model' % total_amount]\n al_model.train_model(al_model_data, al_model_info, self.dataset_val, cur_model_path=last_model_weights)\n al_model.mAP_of_model(al_model_info, self.dataset_val)\n result = self.detection(al_model_info, trained_images)\n rank_hard_images = sorted(result.items(), key=lambda item:item[1], reverse=True)\n del al_model\n print(\"Ending selection\")", "def im_list_detections(model, im_list):\n _t = Timer()\n num_images = len(im_list)\n im_list_boxes = [[] for _ in range(num_images)]\n im_list_scores = [[] for _ in range(num_images)]\n im_list_ids = [[] for _ in range(num_images)]\n im_list_classes = [[] for _ in range(num_images)]\n # create anchors for each level\n anchors = create_cell_anchors()\n for i in range(num_images):\n im_list_ids[i] = im_list[i]['id']\n im = cv2.imread(im_list[i]['image'])\n with c2_utils.NamedCudaScope(0):\n _t.tic()\n im_list_boxes[i], im_list_scores[i], im_list_classes[i] = \\\n im_detections(model, im, anchors)\n _t.toc()\n logger.info(\n 'im_detections: {:d}/{:d} {:.3f}s'.format(\n i + 1, num_images, _t.average_time))\n return im_list_boxes, im_list_scores, im_list_classes, im_list_ids", "def detect(self, images, verbose=0):\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n #assert len(images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images = []\n for img in images:\n molded_images.append(mold_image(img))\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n molded_images = np.asarray(molded_images)\n if verbose:\n log(\"molded_images\", molded_images)\n # Run object detection\n y = self.keras_model.predict([molded_images], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n results.append(y[i][0][0][1])\n return results", "def load_detections(det_dir):\n detections = []\n\n for curd in os.listdir(det_dir):\n sub_dir = os.path.join(det_dir, curd)\n if os.path.isdir(sub_dir):\n\n for f in os.listdir(sub_dir):\n filepath = os.path.join(sub_dir, f)\n\n if os.path.isfile(filepath) and filepath.endswith('txt'):\n # find detection and read it\n with open(filepath, 'r') as rf:\n # filename = rf.readline().strip('\\n')\n # dets2read = int(rf.readline())\n data = []\n\n for l in rf.readlines():\n # for i in range(dets2read):\n # x, y, w, h, c = rf.readline().split()\n x, y, w, h, c = l.split()\n data.append({'x':float(x), 'y':float(y), 'w':float(w)-float(x), 'h':float(h)-float(y), 'c':float(c)})\n #detections.append({'filename': filename, 'data': data})\n detections.append({'filename': os.path.splitext(f)[0], 'data': data})\n\n return detections", "def update(self):\n self.detections = []\n return self.detections", "def detect_objects(self, image):\n preprocessed_image = self._preprocess_image(image)\n\n # Feed the input image to the model\n self._set_input_tensor(preprocessed_image)\n self._interpreter.invoke()\n\n # Get all outputs from the model\n boxes = self._get_output_tensor(0)\n classes = self._get_output_tensor(1)\n scores = self._get_output_tensor(2)\n count = int(self._get_output_tensor(3))\n\n results = []\n for i in range(count):\n if scores[i] >= self.THRESHOLD:\n result = {\n 'bounding_box': boxes[i],\n 'class_id': classes[i],\n 'score': scores[i]\n }\n results.append(result)\n return results", "def detect_fn(image):\n\n image, shapes = model.preprocess(image)\n prediction_dict = model.predict(image, shapes)\n detections = model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])", "def get_preds_and_labels(model, generator):\n preds = []\n labels = []\n for _ in range(int(np.ceil(generator.samples / BATCH_SIZE))):\n x, y = next(generator)\n preds.append(model.predict(x))\n labels.append(y)\n # Flatten list of numpy arrays\n return np.concatenate(preds).ravel(), np.concatenate(labels).ravel()" ]
[ "0.6862919", "0.65428936", "0.6540743", "0.63694745", "0.62906307", "0.6126367", "0.6088731", "0.60303426", "0.6010036", "0.59128106", "0.5887276", "0.58750725", "0.58260036", "0.58221745", "0.58056706", "0.5803134", "0.5769824", "0.5766133", "0.5759714", "0.5739925", "0.5706238", "0.5692298", "0.56838006", "0.56398636", "0.56039226", "0.55696094", "0.5565203", "0.5537138", "0.5527984", "0.5526018" ]
0.7306675
0
Logger name of this loggable object.
def logger_name(self): return self.__class__.__name__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_name(self) -> Optional[str]:\n return self._log_name", "def logPrefix(self):\n return self.__class__.__name__", "def logger_name( self ):\n return Constants.LogKeys.steps", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def log_group_name(self) -> str:\n ...", "def logging_id(self) -> str:\n return getattr(self, '_logging_id_', self.__class__.__qualname__)", "def log_stream_name(self) -> str:\n ...", "def logger(self):\n return logging.getLogger(self.logger_name)", "def logger(self) -> logging.Logger:\n cls = type(self)\n return logging.getLogger(cls.__module__ + \".\" + cls.__name__)", "def logger(self):\n my_id = id(self)\n name = self.__class__.__name__\n logger_name = '{name}.{my_id}'.format(my_id=my_id, name=name)\n\n logger = self.sdk.loggers.get(logger_name)\n if logger is None:\n self.sdk.loggers[logger_name] = getLogger(\n '{name}'.format(name=logger_name))\n\n return self.sdk.loggers[logger_name]", "def __getattr__(self, name):\n return getattr(self.logger, name)", "def _log_name():\n return os.path.splitext(os.path.basename(__file__))[0]", "def logger(self):\n return self.logging", "def log(self):\n if self._log is None:\n self._log = Logger().get_logger(self.__class__.__name__)\n return self._log", "def _logger(self) -> logging.Logger:\n return logging.getLogger(\n type(self).__name__\n )", "def getLogFile(self):\r\n return LOG.getLogFile().name", "def logger(self) -> 'Logger':\n return self.Logger", "def logger(self):\n return logging", "def get_name(self):\n return \"{0}: \".format(self.__class__.__name__)", "def logger(self):\r\n return self._logger", "def getlogger(self):\n return self.logger", "def get_logger(self):\n return self.__logger", "def logger(self):\n pass", "def name(self):\n names = {}\n for key, logger in self._loggers.items():\n names[key] = logger.name\n return names", "def get(name):\r\n log = logging.getLogger(\"%s.%s\" % (ROOT_NAME, name))\r\n return log", "def logger(self):\n return self._pinnacle.logger", "def get_logger(self):\n return self.logger", "def get_logger(self):\n return self.logger", "def logger(self):\n return self._logger" ]
[ "0.7734195", "0.74924845", "0.74675226", "0.7257177", "0.7257177", "0.71899426", "0.70215774", "0.69762486", "0.69282496", "0.6895209", "0.6883889", "0.68143374", "0.67916155", "0.67857605", "0.6767836", "0.6763246", "0.66945565", "0.6686084", "0.66589147", "0.6648707", "0.6643365", "0.6627692", "0.6626159", "0.66260433", "0.66189104", "0.661483", "0.66141826", "0.660038", "0.660038", "0.6583805" ]
0.8728791
0
Plots the sample cost and rate of a given fault over the injection times defined in the app sampleapproach
def samplecost(app, endclasses, fxnmode, samptype='std', title=""): associated_scens=[] for phase in app.phases: associated_scens = associated_scens + app.scenids.get((fxnmode, phase), []) costs = np.array([endclasses[scen]['cost'] for scen in associated_scens]) times = np.array([time for phase, timemodes in app.sampletimes.items() if timemodes for time in timemodes if fxnmode in timemodes.get(time)] ) rates = np.array(list(app.rates_timeless[fxnmode].values())) tPlot, axes = plt.subplots(2, 1, sharey=False, gridspec_kw={'height_ratios': [3, 1]}) phasetimes_start =[times[0] for phase, times in app.phases.items()] phasetimes_end =[times[1] for phase, times in app.phases.items()] ratetimes =[] ratesvect =[] phaselocs = [] for (ind, phasetime) in enumerate(phasetimes_start): axes[0].axvline(phasetime, color="black") phaselocs= phaselocs +[(phasetimes_end[ind]-phasetimes_start[ind])/2 + phasetimes_start[ind]] axes[1].axvline(phasetime, color="black") ratetimes = ratetimes + [phasetimes_start[ind]] + [phasetimes_end[ind]] ratesvect = ratesvect + [rates[ind]] + [rates[ind]] #axes[1].text(middletime, 0.5*max(rates), list(app.phases.keys())[ind], ha='center', backgroundcolor="white") #rate plots axes[1].set_xticks(phaselocs) axes[1].set_xticklabels(list(app.phases.keys())) axes[1].plot(ratetimes, ratesvect) axes[1].set_xlim(phasetimes_start[0], phasetimes_end[-1]) axes[1].set_ylim(0, np.max(ratesvect)*1.2 ) axes[1].set_ylabel("Rate") axes[1].set_xlabel("Time ("+str(app.units)+")") axes[1].grid() #cost plots axes[0].set_xlim(phasetimes_start[0], phasetimes_end[-1]) axes[0].set_ylim(0, 1.2*np.max(costs)) if samptype=='fullint': axes[0].plot(times, costs, label="cost") else: if samptype=='quadrature' or samptype=='pruned piecewise-linear': sizes = 1000*np.array([weight if weight !=1/len(timeweights) else 0.0 for phase, timeweights in app.weights[fxnmode].items() for time, weight in timeweights.items() if time in times]) axes[0].scatter(times, costs,s=sizes, label="cost", alpha=0.5) axes[0].stem(times, costs, label="cost", markerfmt=",", use_line_collection=True) axes[0].set_ylabel("Cost") axes[0].grid() if title: axes[0].set_title(title) elif type(fxnmode[0])==tuple: axes[0].set_title("Cost function of "+str(fxnmode)+" over time") else: axes[0].set_title("Cost function of "+fxnmode[0]+": "+fxnmode[1]+" over time") #plt.subplot_adjust() plt.tight_layout()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self):\r\n dt = m.get_instance().dt\r\n self.perception_history = m.get_instance().larvae[0].history\r\n t = np.arange(0,len(self.perception_history)*dt,dt)\r\n plt.plot(t,self.perception_history)\r\n plt.title('Perception History')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Perception (uM)')\r\n plt.show()", "def plot_tcv(self):\n self.plot_profiles(0, title='Shot #{:d} @ t={:.2f} s'.format(self.shot, self.t))", "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def visualize(self, time, pred, true):\n plt.plot(time, true, label='Actual')\n plt.plot(time, pred, label='Predicted')\n plt.xlabel('Time')\n plt.ylabel('Price ($)')\n plt.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0.,\n prop={'size': 14})\n plt.show()", "def plot_speed_hist(dlc_df, cam_times, trials_df, feature='paw_r', cam='left', legend=True):\r\n # Threshold the dlc traces\r\n dlc_df = likelihood_threshold(dlc_df)\r\n # For pre-GPIO sessions, remove the first few timestamps to match the number of frames\r\n cam_times = cam_times[-len(dlc_df):]\r\n if len(cam_times) != len(dlc_df):\r\n raise ValueError(\"Camera times length and DLC length are inconsistent\")\r\n # Get speeds\r\n speeds = get_speed(dlc_df, cam_times, camera=cam, feature=feature)\r\n # Windows aligned to align_to\r\n start_window, end_window = plt_window(trials_df['stimOn_times'])\r\n start_idx = insert_idx(cam_times, start_window)\r\n end_idx = np.array(start_idx + int(WINDOW_LEN * SAMPLING[cam]), dtype='int64')\r\n # Add speeds to trials_df\r\n trials_df[f'speed_{feature}'] = [speeds[start_idx[i]:end_idx[i]] for i in range(len(start_idx))]\r\n # Plot\r\n times = np.arange(len(trials_df[f'speed_{feature}'].iloc[0])) / SAMPLING[cam] + WINDOW_LAG\r\n # Need to expand the series of lists into a dataframe first, for the nan skipping to work\r\n correct = trials_df[trials_df['feedbackType'] == 1][f'speed_{feature}']\r\n incorrect = trials_df[trials_df['feedbackType'] == -1][f'speed_{feature}']\r\n plt.plot(times, pd.DataFrame.from_dict(dict(zip(correct.index, correct.values))).mean(axis=1),\r\n c='k', label='correct trial')\r\n plt.plot(times, pd.DataFrame.from_dict(dict(zip(incorrect.index, incorrect.values))).mean(axis=1),\r\n c='gray', label='incorrect trial')\r\n plt.axvline(x=0, label='stimOn', linestyle='--', c='r')\r\n plt.title(f'{feature.capitalize()} speed trial avg\\n({cam.upper()} cam)')\r\n plt.xticks([-0.5, 0, 0.5, 1, 1.5])\r\n plt.xlabel('time [sec]')\r\n plt.ylabel('speed [px/sec]')\r\n if legend:\r\n plt.legend()\r\n\r\n return plt.gca()", "def overviewCommand(self):\n plt.figure(11)\n plt.clf()\n ax = plt.subplot(211)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET'),\n color='r', label='FUOFFSET',\n linewidth=1, alpha=1) \n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='r', linewidth=3, alpha=0.5,\n label=self.DLtrack+'-PSP')\n plt.legend()\n plt.subplot(212, sharex=ax)\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n 1e6*self.raw['OPDC'].data.field('FUOFFSET')-\n 1e6*(self.raw['OPDC'].data.field(self.DLtrack)-\n self.raw['OPDC'].data.field('PSP')),\n color='k', label='$\\Delta$',\n linewidth=1, alpha=1) \n \n signal = self.raw['OPDC'].data.field('FUOFFSET')\n plt.figure(12)\n plt.clf()\n ax2 = plt.subplot(111)\n Fs = 1e6/np.diff(self.raw['OPDC'].data.field('TIME')).mean()\n print Fs\n ax2.psd(signal[:50000], NFFT=5000, Fs=Fs, label='FUOFFSET',scale_by_freq=0)\n plt.legend()", "def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()", "def H_perform_plot(performance, hurricane):\n fig = plt.figure(figsize = (15, 10))\n for i in range(len(performance)):\n temp1 = performance[i]\n temp2 = hurricane[i]\n plt.plot(np.arange(0, len(temp1), 1), temp1, color = temp2.c, label = temp2.name)\n plt.xlabel('Time Step')\n plt.xticks(np.arange(0, len(temp1), 30))\n plt.ylabel('Performance')\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, frameon = 0)\n plt.grid(True)", "def Traffic_Perform_Plot(performance, hurricanes):\n fig = plt.figure(figsize = (8, 6))\n for i in range(len(performance)):\n temp1 = performance[i]\n temp2 = hurricanes[i]\n perform = sf.Normalize(temp1, Type = 'max')\n plt.plot(np.arange(0, len(perform), 1), perform, color = temp2.c, label = temp2.name, marker = 'o')\n plt.xlabel('Time Step')\n plt.xticks(np.arange(0, len(perform), 1))\n plt.ylabel('Performance')\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, frameon = 0)\n plt.grid(True)", "def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()", "def plot_data_stats(data_dict, data_bxtxn, data_dt):\n print(onp.mean(onp.sum(data_bxtxn, axis=1)), \"spikes/second\")\n f = plt.figure(figsize=(12,4))\n plt.subplot(141)\n plt.hist(onp.mean(data_bxtxn, axis=1).ravel()/data_dt);\n plt.xlabel('spikes / sec')\n plt.subplot(142)\n plt.imshow(data_dict['hiddens'][0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('Sample trial rates')\n plt.subplot(143);\n plt.imshow(data_bxtxn[0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('spikes')\n plt.subplot(144)\n plt.stem(onp.mean(onp.sum(data_bxtxn, axis=1), axis=0));\n plt.xlabel('neuron #')\n plt.ylabel('spikes / sec');\n return f", "def throughputLinePlot(self):\n\t\tplot(self.window_size, self.throughput, label = \"Throughput vs Window Size\")\n\t\txticks(self.window_size)\n\t\txlim(0, 21)\n\t\txlabel('Window Size (No. of Packets)')\n\t\tylabel('Throughput in Mbps')\n\t\tlegend(loc=0)\n\t\tsavefig('ThroughputLinePlot.png')", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def bench_plotter(self):\n\n # plot random as histogram, upper en lower bound as a red line\n minima = []\n for i in range(1, 4):\n cost_list = []\n with open(f\"../output_runs/text_info_random{i}_10k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n counter = 0\n for number in text:\n counter += 1\n if number is not \"\":\n cost_list.append(int(number))\n if counter == 1000:\n break\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random:\", minim, maxim)\n plt.axvline(x=53188, color='r')\n plt.axvline(x=103030, color=\"r\")\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Random walk\")\n\n # plot histogram of priority and hillclimber\n cost_list = []\n with open(f\"../output_runs/text_info_prior_hill{i}_\\\n 1k.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"prior hill:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5, label=f\"Priority + Hill\")\n\n # plot histogram of simulated annealing\n cost_list = []\n with open(f\"../output_runs/simulated_annealing{i}_1000.txt\",\n \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+anneal:\", minim, maxim)\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Random + sim anneal\")\n\n # plot histogram of random plus hillclimber\n cost_list = []\n with open(f\"../output_runs/random_hill{i}_1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n minim = min(cost_list)\n minima.append(minim)\n maxim = max(cost_list)\n print(\"random+hill:\", minim, maxim)\n plt.hist(cost_list, bins=100, alpha=0.5,\n label=f\"Random + Hillclimber\")\n\n # plot histogram of kmeans plus hillclimber\n cost_list = []\n with open(f\"../output_runs/text_k-means_hill{i}_\\\n 1000.txt\", \"r\") as f:\n text = f.read().split('\\n')\n for number in text:\n if number is not \"\":\n cost_list.append(int(number))\n plt.hist(cost_list, bins=20, alpha=0.5,\n label=f\"Kmean and hill {i}\")\n totalmin = min(minima)\n plt.axvline(x=totalmin, color=\"g\")\n plt.title(f\"4 algorithms Wijk {i}, lowest cost: {totalmin}\")\n plt.xlabel(\"Cost\")\n plt.ylabel(\"Frequency\")\n plt.legend(loc='upper right')\n plt.show()", "def plot_sample(self):\n print(u'plot_sample()')\n data_set = self.data_sets[1]\n scenario = u'Greedy Search'\n titles = [u'Collaborative Filtering', u'Content-based']\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for i, rec_type in enumerate(data_set.missions):\n graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'\n for strategy in Strategy.strategies:\n m = data_set.missions[rec_type][graph][strategy][scenario]\n m.compute_stats()\n ppl.plot(axes[i], np.arange(STEPS_MAX + 1),\n m.stats, label=strategy, linewidth=2)\n axes[i].set_xlabel(u'#Hops')\n axes[i].set_ylabel(u'Success Ratio')\n axes[i].set_ylim(0, 85)\n axes[i].set_xlim(0, STEPS_MAX * 1.01)\n axes[i].set_title(titles[i])\n ppl.legend(axes[i], loc=0)\n\n\n # plt.suptitle(u'Greedy Search on the BookCrossing for N=15',\n # size='xx-large', x=0.5)\n fig.subplots_adjust(left=0.08, right=0.97, top=0.9)\n\n plt.savefig('plots/sample.png')\n plt.savefig('plots/sample.pdf')", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def visualize(epc_data: List[EmissionPerCapita],\r\n prediction_year: int, title: str, frame_rate: int) -> None:\r\n\r\n # Set fit with 2 graphs.\r\n fig = make_subplots(rows=2, cols=1,\r\n subplot_titles=('Emission Per Capita (in thousand metric tons)',\r\n 'Average Emission Per Capita (in thousand metric tons)'))\r\n\r\n colors = assign_colors(epc_data) # assign colors to each element.\r\n\r\n # Initialize the two graphs.\r\n # PS: We believe there is no error in the marker_color line but\r\n # somehow pycharm insists there is.(We have tried a demo from\r\n # the official plotly library and pycharm still highlights it.)\r\n initial_sorted_top_10 = sort_top_10(epc_data, epc_data[0].start_year)\r\n initial_sorted_colors = get_sorted_colors(colors, initial_sorted_top_10[0])\r\n fig.add_trace(go.Bar(x=initial_sorted_top_10[0], y=initial_sorted_top_10[1],\r\n text=initial_sorted_top_10[0],\r\n hoverinfo='none', textposition='outside',\r\n texttemplate='%{x}<br>%{y:s}', cliponaxis=False,\r\n name='Per Capita in: ' + str(epc_data[0].start_year),\r\n marker_color=initial_sorted_colors\r\n ), row=1, col=1)\r\n\r\n x_axis = list(range(epc_data[0].start_year, epc_data[0].end_year + prediction_year + 1))\r\n fig.add_trace(go.Scatter(x=x_axis, y=[0],\r\n name='Average Per Capita: ' + str(epc_data[0].start_year)\r\n ), row=2, col=1)\r\n\r\n # Produce each frame presented in the animation.\r\n list_of_frames = []\r\n average_emission_so_far = []\r\n for i in range(epc_data[0].start_year, epc_data[0].end_year + prediction_year + 1, frame_rate):\r\n\r\n # Get the sorted top 10 and their corresponding colors for the current frame.\r\n sorted_top_10 = sort_top_10(epc_data, i)\r\n sorted_colors = get_sorted_colors(colors, sorted_top_10[0])\r\n\r\n # Append the current year average emission per capita to the accumulator.\r\n list.append(average_emission_so_far, average_emission(epc_data, i))\r\n\r\n # Append the current frame to list_of_frames using the following style.\r\n # PS: the same situation happens in this marker_color, too.\r\n list_of_frames.append(go.Frame(data=[go.Bar(x=sorted_top_10[0], y=sorted_top_10[1],\r\n text=sorted_top_10[0],\r\n hoverinfo='none', textposition='outside',\r\n texttemplate='%{x}<br>%{y:s}', cliponaxis=False,\r\n name='Per Capita in: ' + str(i),\r\n marker_color=sorted_colors),\r\n go.Scatter(x=x_axis, y=average_emission_so_far,\r\n name='Average Per Capita in: ' + str(i))],\r\n traces=[0, 1]))\r\n\r\n fig.frames = list_of_frames\r\n\r\n # Set the layout of the two graphs.\r\n fig.update_layout(updatemenus=[{'type': 'buttons',\r\n 'showactive': False,\r\n 'y': 0,\r\n 'x': 1.05,\r\n 'xanchor': 'left',\r\n 'yanchor': 'bottom',\r\n 'buttons': [{'label': 'Play',\r\n 'method': 'animate',\r\n 'args': [None]}]}],\r\n width=1400, height=750,\r\n font={'size': 20},\r\n title=title + ' (Predicted after year: ' + str(epc_data[0].end_year) + ')')\r\n fig.show()", "def plot_latency_throughput(latency):\n init()\n time = list(item['Latency Timestamp'] for item in latency)\n delay = list(item['Transfer Delay'] for item in latency)\n rate = list(item['Data Rate'] for item in latency)\n y = [delay, rate]\n xlabel = 'Time (ns)'\n ylabels = ['Transfer Delay (ns)', 'Throughput (Gbit/s)']\n titles = ['Overall Transfer Delay', 'Overall Throughput']\n width, height = latency_throughput_size[0], latency_throughput_size[1]\n fig, axes = plt.subplots(2, 1, figsize=(width, height), dpi=300)\n for i in range(2):\n sns.lineplot(x=time, y=y[i], markers=True,\n dashes=False, ax=axes[i], lw=1,\n color=sns.color_palette('RdBu_r')[0])\n axes[i].set_ylabel(ylabels[i])\n axes[i].set_xlabel(xlabel)\n axes[i].xaxis.grid(True)\n axes[i].yaxis.grid(True)\n axes[i].set_title(titles[i], fontsize=13)\n fig.tight_layout()\n return fig", "def plot_example_psds(example,rate):\r\n plt.figure()\r\n \r\n ##YOUR CODE HERE \r\n \r\n return", "def Usage():\n print \"\"\"\n To plot the result using the iter number of the x axis:\n\n plot_sdcard.py -i /tmp/data.txt\n\n To plot the result using time for the x axis:\n\n plot_sdcard.py -t /tmp/data.txt\n\n To plot the result from the profiler:\n\n profile_sdcard.sh\n plot_sdcard.py -p\n\n \"\"\"\n sys.exit(2)", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def plot_progress(fig, costs, learning_rate, lambda_reg):\n ax = fig.add_subplot(111)\n ax.plot(\n np.arange(len(costs)),\n costs,\n alpha=0.8,\n label=\"LR: \" + str(learning_rate) + \" __ Lambda: \" + str(lambda_reg),\n )\n\n ax.legend(\n bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),\n loc=\"best\",\n ncol=4,\n mode=\"expand\",\n borderaxespad=0.0,\n )", "def showPlot5():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n len_sim_data2 = []\n raw_sim_data2 = runSimulation(item, 1.0, 25, 25, 0.75, 100, RandomWalkRobot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n for mes in raw_sim_data2:\n len_sim_data2.append(len(mes))\n overa = [sum(len_sim_data)/len(len_sim_data), sum(len_sim_data2)/len(len_sim_data2)]\n proc_sim_data.append(overa)\n plot(interested_in, proc_sim_data)\n title('performance comparision of the two types of bots')\n xlabel('number of robots')\n ylabel('mean time (clocks)')\n show()", "def visualise_food_consumption(data: LogData, directory: Path):\n\n figure, axes = plot.subplots()\n\n food_history = get_food_history(data)\n\n axes.plot(food_history.keys(), food_history.values(), label=\"Food\", color=\"blue\", **{\"ls\": \"--\"})\n\n axes.legend(loc=\"upper left\")\n axes.set_xlim(0, data.duration_secs())\n axes.set_xlabel(\"Time (seconds)\")\n axes.set_ylabel(\"Amount\")\n axes.set_title(\"Food availability\")\n\n plot.savefig(directory / Path(\"food_consumption.png\"))\n plot.close()", "def costovertime(endclasses, app, costtype='expected cost'):\n costovertime = cost_table(endclasses, app)\n plt.plot(list(costovertime.index), costovertime[costtype])\n plt.title('Total '+costtype+' of all faults over time.')\n plt.ylabel(costtype)\n plt.xlabel(\"Time (\"+str(app.units)+\")\")\n plt.grid()", "def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")", "def plotter(self, Result, outcome):\n # Plot results time histories\n fig, axs = plt.subplots(2, 3, figsize=(20, 10))\n axs = axs.reshape(-1)\n axs[0].plot(Result.time, Result.velocity)\n axs[1].plot(Result.time, Result.mass)\n axs[2].plot(Result.time, Result.angle)\n axs[3].plot(Result.time, Result.altitude)\n axs[4].plot(Result.time, Result.distance)\n axs[5].plot(Result.time, Result.radius)\n axs[0].set_title('velocity (m/s) vs time (s)', fontsize=16)\n axs[1].set_title('mass (kg) vs time (s)', fontsize=16)\n axs[2].set_title('angle (rad) vs time (s)', fontsize=16)\n axs[3].set_title('altitude (m) vs time (s)', fontsize=16)\n axs[4].set_title('distance (m) vs time (s)', fontsize=16)\n axs[5].set_title('radius (m) vs time (s)', fontsize=16)\n plt.tight_layout()\n\n # Plot energy deposition curve\n fig, ax = plt.subplots(1, 1, figsize=(8, 8))\n ax.plot(Result.dedz, Result.altitude / 1e3)\n ax.set_xlabel('Energy per unit height [Kt/km]', fontsize=14)\n ax.set_ylabel('Altitude [km]', fontsize=14)\n plt.show()", "def test():\n data1 = resources_vs_time(0.0, 50)\n data2 = resources_vs_time(1.0, 10)\n data3 = resources_vs_time(2.0, 10)\n data4 = resources_vs_time(0.5, 10)\n print data1\n simpleplot.plot_lines(\"Growth\", 600, 600, \"time\", \"total resources\", [data1])", "def show_dcr_results(dg):\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n # print(df_dsp.describe()) \n\n # compare DCR and A/E distributions\n fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n elo, ehi, epb = 0, 25000, 100\n \n # aoe distribution\n # ylo, yhi, ypb = -1, 2, 0.1\n # ylo, yhi, ypb = -0.1, 0.3, 0.005\n ylo, yhi, ypb = 0.05, 0.08, 0.0005\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p0.hist2d(df_dsp['trapEmax'], df_dsp['aoe'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n # p0.set_xlabel('Energy (uncal)', ha='right', x=1)\n p0.set_ylabel('A/E', ha='right', y=1)\n\n # dcr distribution\n # ylo, yhi, ypb = -20, 20, 1 # dcr_raw\n # ylo, yhi, ypb = -5, 2.5, 0.1 # dcr = dcr_raw / trapEmax\n # ylo, yhi, ypb = -3, 2, 0.1\n ylo, yhi, ypb = 0.9, 1.08, 0.001\n ylo, yhi, ypb = 1.034, 1.0425, 0.00005 # best for 64.4 us pz\n # ylo, yhi, ypb = 1.05, 1.056, 0.00005 # best for 50 us pz\n # ylo, yhi, ypb = 1.016, 1.022, 0.00005 # best for 100 us pz\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p1.hist2d(df_dsp['trapEmax'], df_dsp['dcr'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n p1.set_xlabel('Energy (uncal)', ha='right', x=1)\n p1.set_ylabel('DCR', ha='right', y=1)\n \n # plt.show()\n plt.savefig(f'./plots/dcr_cyc{cycle}.png', dpi=300)\n plt.cla()", "def updatefig(*args):\n p1.set_array(turn(grid))\n p2.set_data(tally['time'], tally['sickos'])\n p3.set_data(tally['time'], tally['immune'])\n p4.set_data(tally['time'], tally['dead'])\n ax2.set_xlim(0, max(tally['time']))\n # ax2.set_ylim(0, max(max(sickos), max(immune)))\n # End sim if the disease is gone\n if tally['sickos'][-1] == 0:\n ani.event_source.stop()\n end_time = time.process_time()\n show_summary()\n print(\"Process time:\", end_time - start_time)\n return p1, p2, p3, p4," ]
[ "0.58707404", "0.58563536", "0.58295834", "0.5811372", "0.58079165", "0.57968676", "0.57574296", "0.5750948", "0.57313925", "0.57307285", "0.57214856", "0.5688989", "0.56813246", "0.5665063", "0.5632812", "0.5622377", "0.5619761", "0.56086326", "0.5604528", "0.56001186", "0.55777055", "0.5566906", "0.5556824", "0.5544519", "0.55431867", "0.5535885", "0.5535253", "0.5527088", "0.55206305", "0.55191004" ]
0.5999519
0
Plots the total cost or total expected cost of faults over time.
def costovertime(endclasses, app, costtype='expected cost'): costovertime = cost_table(endclasses, app) plt.plot(list(costovertime.index), costovertime[costtype]) plt.title('Total '+costtype+' of all faults over time.') plt.ylabel(costtype) plt.xlabel("Time ("+str(app.units)+")") plt.grid()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_cost(self):\n steps = np.arange(len(self.cost_values))\n plt.plot(steps, self.cost_values, '-o')\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Cost value\")\n plt.title(\"Cost value per step using Gradient Descent\")\n plt.show()", "def plot_costs(j_history):\n plt.figure(figsize=(14, 8))\n plt.plot(range(len(j_history)), j_history)\n plt.grid(True)\n plt.title('J (Cost)')\n plt.xlabel('Iteration')\n plt.ylabel('Cost function')\n plt.xlim([0, 1.05 * ITERATIONS])\n plt.ylim([4, 7])\n plt.show()\n plt.close()", "def plot_progress(fig, costs, learning_rate, lambda_reg):\n ax = fig.add_subplot(111)\n ax.plot(\n np.arange(len(costs)),\n costs,\n alpha=0.8,\n label=\"LR: \" + str(learning_rate) + \" __ Lambda: \" + str(lambda_reg),\n )\n\n ax.legend(\n bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),\n loc=\"best\",\n ncol=4,\n mode=\"expand\",\n borderaxespad=0.0,\n )", "def plot_progress(self):\n plt.plot(-self.training_average_reward, label='negative average reward')\n plt.plot(self.training_average_electricity_cost_in_euros, label='electricity cost in euros')\n plt.legend()\n plt.xlabel('Epoch')\n plt.ylabel('cost in euros')\n plt.title('Average electricity cost in euros and reward')\n plt.show()", "def plot_cost(c_v, c_t, save_plots_path):\n\n plt.figure()\n plt.plot(c_v, label='Validation loss')\n plt.plot(c_t, label='Training loss')\n plt.legend()\n title = 'Loss per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.savefig(save_plots_path + \"swag_loss_plot.png\")", "def plot_graph(costs):\n plt.figure()\n for i in range(len(np.array(costs).T)):\n plt.plot(np.array(costs)[:, i])\n plt.title(\"Costs\")\n plt.show()", "def plot_results(outputs_table_totals, elec_benefits, gas_benefits):\n summer_months = [6, 7, 8, 9]\n shoulder_months = [3, 4, 5, 10]\n winter_months = [11, 12, 1, 2]\n peak_hours = [16, 17, 18, 19, 20]\n pct_hours_in_summer = 2928 / 8760\n pct_hours_in_shoulder = 2952 / 8760\n pct_hours_in_winter = 2880 / 8760\n\n trc_costs_record = outputs_table_totals[\"TRC Costs ($)\"]\n pac_costs_record = outputs_table_totals[\"PAC Costs ($)\"]\n trc_record = outputs_table_totals[\"TRC\"]\n pac_record = outputs_table_totals[\"PAC\"]\n lifecycle_net_mwh = outputs_table_totals[\"Electricity Lifecycle Net Savings (MWh)\"]\n lifecycle_net_therms = outputs_table_totals[\"Gas Lifecycle Net Savings (Therms)\"]\n lifecycle_net_ghg = outputs_table_totals[\"Total Lifecycle GHG Savings (Tons)\"]\n\n # Getting variables for plots\n elec_benefits_cols = (\n [\"hourly_savings\"] + ACC_COMPONENTS_ELECTRICITY + [\"av_csts_levelized\"]\n )\n\n elec_benefits_hour_month_year = (\n elec_benefits.groupby([\"hour_of_day\", \"year\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n\n total_benefits = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"total\"].sum()\n )\n\n summer_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n summer_peak_benefits = elec_benefits_hour_month_year[\"total\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n shoulder_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(shoulder_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n winter_benefits = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"total\"]\n .sum()\n )\n total_savings = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\"hourly_savings\"].sum()\n )\n summer_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n shoulder_savings = list(\n elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n summer_peak_savings = elec_benefits_hour_month_year[\"hourly_savings\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].sum()\n winter_savings = list(\n elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"hourly_savings\"]\n .sum()\n )\n total_av_csts_avg = list(\n elec_benefits_hour_month_year.groupby([\"hour_of_day\"])[\n \"av_csts_levelized\"\n ].mean()\n )\n summer_av_csts_avg = list(\n pct_hours_in_summer\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n summer_peak_av_csts_avg = elec_benefits_hour_month_year[\"av_csts_levelized\"][\n (elec_benefits_hour_month_year[\"month\"].isin(summer_months))\n & (elec_benefits_hour_month_year[\"hour_of_day\"].isin(peak_hours))\n ].mean()\n shoulder_av_csts_avg = list(\n pct_hours_in_shoulder\n * elec_benefits_hour_month_year[\n ((elec_benefits_hour_month_year[\"month\"].isin(shoulder_months)))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n winter_av_csts_avg = list(\n pct_hours_in_winter\n * elec_benefits_hour_month_year[\n (elec_benefits_hour_month_year[\"month\"].isin(winter_months))\n ]\n .groupby([\"hour_of_day\"])[\"av_csts_levelized\"]\n .mean()\n )\n\n elec_benefits_sum_by_hod = (\n elec_benefits[elec_benefits_cols].groupby(elec_benefits[\"hour_of_day\"]).sum()\n )\n elec_benefits_hoy = (\n elec_benefits[elec_benefits_cols]\n .groupby(elec_benefits[\"hour_of_year\"])\n .sum()\n .cumsum()\n .reset_index()\n )\n sav_avcsts_288 = (\n elec_benefits.groupby([\"hour_of_day\", \"month\"])\n .agg(\n {\n **{component: \"sum\" for component in ACC_COMPONENTS_ELECTRICITY},\n **{\n \"hourly_savings\": \"sum\",\n \"marginal_ghg\": \"sum\",\n \"av_csts_levelized\": \"mean\",\n },\n }\n )\n .reset_index()\n )\n sav_avcsts_288 = sav_avcsts_288[\n [\"hour_of_day\", \"month\", \"hourly_savings\", \"total\", \"marginal_ghg\"]\n ]\n ghgsav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"marginal_ghg\")\n sav = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"hourly_savings\")\n avcsts = sav_avcsts_288.pivot(\"hour_of_day\", \"month\", \"total\")\n\n # savings load shape plot\n fig0, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_savings,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u25EF$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax2.plot(\n hod,\n shoulder_savings,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u2206$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax3.plot(\n hod,\n winter_savings,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A1$\",\n markersize=13,\n linestyle=\"-\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=14, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"Savings (MWh/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_savings + shoulder_savings + winter_savings) < 0:\n ymax = 0\n else:\n ymax = max(summer_savings + shoulder_savings + winter_savings)\n if min(summer_savings + shoulder_savings + winter_savings) > 0:\n ymin = 0\n else:\n ymin = min(summer_savings + shoulder_savings + winter_savings)\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\"Seasonal Savings Load Shapes\", size=18, loc=\"left\").set_position(\n [0, 1.03]\n )\n\n # benefits_seasonal_shape_plot\n fig1, (ax1, ax2, ax3) = plt.subplots(\n 1, 3, figsize=(18, 5), sharex=True, sharey=True\n )\n plt.subplots_adjust(wspace=0, hspace=0)\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels1 = [\"Summer\"]\n legend_labels2 = [\"Shoulder\"]\n legend_labels3 = [\"Winter\"]\n\n ax1.plot(\n hod,\n summer_benefits,\n c=\"firebrick\",\n linewidth=5,\n marker=\"$\\u2B24$\",\n markersize=13,\n linestyle=\":\",\n )\n ax2.plot(\n hod,\n shoulder_benefits,\n c=\"royalblue\",\n linewidth=5,\n marker=\"$\\u25B2$\",\n markersize=13,\n linestyle=\":\",\n )\n ax3.plot(\n hod,\n winter_benefits,\n c=\"green\",\n linewidth=5,\n marker=\"$\\u25A0$\",\n markersize=13,\n linestyle=\":\",\n )\n ax1.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax2.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n ax3.axhline(y=0, color=\"gray\", linewidth=1, linestyle=\"--\")\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n leg1 = ax1.legend(legend_labels1, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels2, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels3, fontsize=15, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax1.set_ylabel(\"TRC Benefits ($/hr)\", size=16)\n ax2.set_xlabel(\"Hour of Day\", size=16)\n\n if max(summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax = 0\n else:\n ymax = max(summer_benefits + shoulder_benefits + winter_benefits)\n if min(summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin = 0\n else:\n ymin = min(summer_benefits + shoulder_benefits + winter_benefits)\n\n # Tick and label parameters\n ax1.set_ylim(ymin * 1.08, ymax * 1.08)\n ax1.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin * 1.08,\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax1.set_xticks(np.arange(0, 24, step=4))\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax1.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax2.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax2.xaxis.set_minor_locator(AutoMinorLocator())\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=7, width=2, labelsize=14\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Seasonal TRC Benefits by Hour ($)\", size=18, loc=\"left\"\n ).set_position([0, 1.03])\n\n # sum_hourly_plot\n fig2 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig2.gca()\n colors = [\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels = []\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY[1:]):\n if x == 1:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n else:\n ax.bar(\n hod,\n elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]],\n bottom=elec_benefits_sum_by_hod.iloc[:, 2 : x + 1].sum(axis=1),\n color=colors[x - 1],\n )\n legend_labels.append(\n re.findall(\n \".*Name: (.*),\",\n str(elec_benefits_sum_by_hod[ACC_COMPONENTS_ELECTRICITY[x]]),\n )[0]\n )\n x += 1\n\n # Set x and y limits based on min and max values\n ymax = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).max()\n if elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min() > 0:\n ymin = 0\n else:\n ymin = elec_benefits_sum_by_hod.iloc[:, 2:x].sum(axis=1).min()\n\n ax.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Day\", size=17, labelpad=5)\n ax.set_ylabel(\"$ Avoided Costs\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Electric Avoided Costs by Component and Hour of Day\",\n size=17,\n loc=\"left\",\n )\n\n # Tick and lebel parameters\n ax.tick_params(bottom=True, top=False, left=True, right=False)\n ax.set_xticks(np.arange(0, 24, step=4))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 2) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )\n\n # avoided_cost_summary_plot\n fig3, (ax1, ax2, ax3) = plt.subplots(\n 3, 1, figsize=(6, 10), sharex=True, sharey=False\n )\n axs = [ax1, ax2, ax3]\n hod = elec_benefits_sum_by_hod.index\n legend_labels = [\"Total\", \"Summer\", \"Shoulder\", \"Winter\"]\n\n ax1.plot(\n hod,\n total_benefits,\n c=\"royalblue\",\n marker=\"$\\u25EF$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax1.plot(hod, summer_benefits, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax1.plot(hod, shoulder_benefits, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax1.plot(hod, winter_benefits, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax2.plot(\n hod,\n total_savings,\n c=\"firebrick\",\n marker=\"$\\u2206$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax2.plot(hod, summer_savings, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax2.plot(hod, shoulder_savings, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax2.plot(hod, winter_savings, c=\"teal\", linewidth=1, linestyle=\"-\")\n ax3.plot(\n hod,\n total_av_csts_avg,\n c=\"green\",\n marker=\"$\\u25A0$\",\n markersize=10,\n linewidth=3,\n linestyle=\"-\",\n )\n ax3.plot(hod, summer_av_csts_avg, c=\"darkorchid\", linewidth=1, linestyle=\"--\")\n ax3.plot(hod, shoulder_av_csts_avg, c=\"olivedrab\", linewidth=1, linestyle=\":\")\n ax3.plot(hod, winter_av_csts_avg, c=\"teal\", linewidth=1, linestyle=\"-\")\n\n leg1 = ax1.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg1.get_lines(), leg1.get_texts()):\n text.set_color(line.get_color())\n leg2 = ax2.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg2.get_lines(), leg2.get_texts()):\n text.set_color(line.get_color())\n leg3 = ax3.legend(legend_labels, fontsize=11, loc=\"upper left\", frameon=False)\n for line, text in zip(leg3.get_lines(), leg3.get_texts()):\n text.set_color(line.get_color())\n\n ax3.set_xticks(np.arange(0, 24, step=4))\n ax3.set_xlabel(\"Hour of Day\", size=14, labelpad=5)\n ax3.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.set_xlim(hod.min() - hod.max() * 0.04, hod.max() * 1.04)\n ax3.xaxis.set_minor_locator(AutoMinorLocator())\n\n ax1.set_ylabel(\"TRC Benefits ($)\", size=14)\n ax2.set_ylabel(\"Savings (MWh)\", size=14)\n ax3.set_ylabel(\"Av. Cost ($/MWh)\", size=14)\n\n if max(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) < 0:\n ymax1 = 0\n else:\n ymax1 = max(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if min(total_benefits + summer_benefits + shoulder_benefits + winter_benefits) > 0:\n ymin1 = 0\n else:\n ymin1 = min(\n total_benefits + summer_benefits + shoulder_benefits + winter_benefits\n )\n if max(total_savings + summer_savings + shoulder_savings + winter_savings) < 0:\n ymax2 = 0\n else:\n ymax2 = max(total_savings + summer_savings + shoulder_savings + winter_savings)\n if min(total_savings + summer_savings + shoulder_savings + winter_savings) > 0:\n ymin2 = 0\n else:\n ymin2 = min(total_savings + summer_savings + shoulder_savings + winter_savings)\n if (\n max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n < 0\n ):\n ymax3 = 0\n else:\n ymax3 = max(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n if (\n min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n > 0\n ):\n ymin3 = 0\n else:\n ymin3 = min(\n total_av_csts_avg\n + summer_av_csts_avg\n + shoulder_av_csts_avg\n + winter_av_csts_avg\n )\n\n # Tick and lebel parameters\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n ax3.set_ylim(ymin3 * 1.08, ymax3 * 1.08)\n\n ax1.set_yticks(\n np.arange(\n ymin1 * 1.08,\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 3) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax2.set_yticks(\n np.arange(\n ymin2 * 1.08,\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 3) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax3.set_yticks(\n np.arange(\n ymin3 * 1.08,\n ymax3 * 1.08,\n step=max(round(ymax3 - ymin3, 3) / 5, int((round(ymax3 - ymin3, 0)) / 4)),\n )\n )\n\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n ax3.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=12\n )\n\n # Shade peak region\n ax1.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax2.axvspan(16, 21, alpha=0.2, color=\"grey\")\n ax3.axvspan(16, 21, alpha=0.2, color=\"grey\")\n\n # Print key information\n plt.annotate(\n \"Electric Benefits = $\" + str(round(elec_benefits[\"total\"].sum(), 2)),\n xy=(350, 530),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Gas Benefits = $\" + str(round(gas_benefits, 2)),\n xy=(350, 505),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Total Benefits = $\"\n + str(round(elec_benefits[\"total\"].sum() + gas_benefits, 2)),\n xy=(350, 480),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC Costs = $\" + str(trc_costs_record),\n xy=(350, 455),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC Costs = $\" + str(pac_costs_record),\n xy=(350, 430),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"TRC = \" + str(trc_record),\n xy=(350, 405),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"PAC = \" + str(pac_record),\n xy=(350, 380),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Electric Savings = \" + str(lifecycle_net_mwh) + \" MWh\",\n xy=(350, 335),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle Gas Savings = \" + str(lifecycle_net_therms) + \" Therms\",\n xy=(350, 310),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Net Lifecycle GHG Savings = \" + str(lifecycle_net_ghg) + \" Tons\",\n xy=(350, 285),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_savings) / sum(total_savings)), 1))\n + \"% MWh savings during summer peak period\",\n xy=(350, 260),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n str(round(100 * ((summer_peak_benefits) / sum(total_benefits)), 1))\n + \"% Electric TRC benefits from summer peak period\",\n xy=(350, 235),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Electric Benefits per MWh = $\"\n + str(round(elec_benefits[\"total\"].sum() / lifecycle_net_mwh, 2)),\n xy=(350, 210),\n xycoords=\"axes points\",\n fontsize=18,\n )\n plt.annotate(\n \"Typical Avoided Cost per MWh = $\"\n + str(round(elec_benefits[\"av_csts_levelized\"].mean(), 2)),\n xy=(350, 145),\n xycoords=\"axes points\",\n fontsize=18,\n )\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Savings and Avoided Cost Profiles\", size=16, loc=\"left\"\n ).set_position([0, 1.03])\n\n # marginal_ghg_savings_plot\n cmp = sns.diverging_palette(16, 260, l=35, n=25, as_cmap=True)\n\n fig4 = plt.figure(figsize=(8, 6), dpi=100)\n ax1 = fig4.gca()\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n hmp = sns.heatmap(ghgsav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=15)\n ax1.set_ylabel(\"Hour of Day\", size=15)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=13\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=13,\n rotation=0,\n )\n ax1.set_title(\"Electric GHG Savings by Month and Hour\", size=15, loc=\"left\", pad=8)\n cbar1 = hmp.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=14)\n plt.annotate(\"Sum GHG\", xy=(370, 352), xycoords=\"axes points\", fontsize=12)\n plt.annotate(\"Savings (Tons)\", xy=(370, 336), xycoords=\"axes points\", fontsize=12)\n\n # month_hour_savings_benefits_plot\n fig5, (ax1, ax2) = plt.subplots(1, 2, figsize=(21, 10), dpi=200)\n y_ticks = [\n 0,\n \"\",\n 2,\n \"\",\n 4,\n \"\",\n 6,\n \"\",\n 8,\n \"\",\n 10,\n \"\",\n 12,\n \"\",\n 14,\n \"\",\n 16,\n \"\",\n 18,\n \"\",\n 20,\n \"\",\n 22,\n ]\n fleft = sns.heatmap(sav, cmap=cmp, ax=ax1, yticklabels=y_ticks, center=0.00)\n fright = sns.heatmap(avcsts, cmap=cmp, ax=ax2, yticklabels=y_ticks, center=0.00)\n ax1.set_xlabel(\"Month\", size=22)\n ax1.set_ylabel(\"Hour of Day\", size=22)\n ax2.set_xlabel(\"Month\", size=22)\n ax2.set_ylabel(\"Hour of Day\", size=22)\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax1.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=18\n )\n ax2.tick_params(\n which=\"major\",\n axis=\"y\",\n direction=\"out\",\n length=6,\n width=2,\n labelsize=18,\n rotation=0,\n )\n ax1.set_title(\n \"MWh Savings by Month and Hour\", size=24, loc=\"left\", pad=15\n ).set_position([0, 1.1])\n ax2.set_title(\"$ Benefits by Month and Hour\", size=24, loc=\"left\", pad=15)\n fig4.tight_layout(pad=2.0)\n cbar1 = fleft.collections[0].colorbar\n cbar1.ax.tick_params(labelsize=18)\n cbar2 = fright.collections[0].colorbar\n cbar2.ax.tick_params(labelsize=18)\n plt.annotate(\"Sum MWh\", xy=(-200, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Savings\", xy=(-193, 560), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Sum TRC\", xy=(435, 585), xycoords=\"axes points\", fontsize=20)\n plt.annotate(\"Benefits\", xy=(442, 560), xycoords=\"axes points\", fontsize=20)\n\n # savings_benefits_cumulative_sum_plot\n fig6 = plt.figure(figsize=(12, 7), dpi=250)\n ax1 = fig6.gca()\n ax1.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"hourly_savings\"],\n color=\"royalblue\",\n linewidth=3,\n )\n ax2 = ax1.twinx()\n ax2.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[\"total\"],\n color=\"firebrick\",\n linewidth=3,\n linestyle=\"--\",\n )\n ax2.axhline(y=0, color=\"gray\", linewidth=0.7, linestyle=\"--\")\n\n # Set x and y limits based on min and max values\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].max() >= 0\n and elec_benefits_hoy[\"total\"].max() >= 0\n ):\n ymax1 = elec_benefits_hoy[\"hourly_savings\"].max()\n ymax2 = elec_benefits_hoy[\"total\"].max()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() < 0\n ):\n ymax1 = 0\n ymax2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].max() < 0\n and elec_benefits_hoy[\"total\"].max() > 0\n ):\n ymax1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].min()\n * (\n elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].max()\n / (elec_benefits_hoy[\"total\"].max() - elec_benefits_hoy[\"total\"].min())\n )\n )\n ymax2 = elec_benefits_hoy[\"total\"].max()\n else:\n ymax1 = 0\n ymax2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].max()\n / (\n elec_benefits_hoy[\"hourly_savings\"].max()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n if (\n elec_benefits_hoy[\"hourly_savings\"].min() <= 0\n and elec_benefits_hoy[\"total\"].min() <= 0\n ):\n ymin1 = elec_benefits_hoy[\"hourly_savings\"].min()\n ymin2 = elec_benefits_hoy[\"total\"].min()\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() > 0\n ):\n ymin1 = 0\n ymin2 = 0\n elif (\n elec_benefits_hoy[\"hourly_savings\"].min() > 0\n and elec_benefits_hoy[\"total\"].min() < 0\n ):\n ymin1 = (\n -1\n * elec_benefits_hoy[\"hourly_savings\"].max()\n * (\n elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n / (\n 1\n - elec_benefits_hoy[\"total\"].min()\n / (elec_benefits_hoy[\"total\"].min() - elec_benefits_hoy[\"total\"].max())\n )\n )\n ymin2 = elec_benefits_hoy[\"total\"].min()\n else:\n ymin1 = 0\n ymin2 = (\n -1\n * elec_benefits_hoy[\"total\"].min()\n * (\n elec_benefits_hoy[\"hourly_savings\"].min()\n / (\n elec_benefits_hoy[\"hourly_savings\"].min()\n - elec_benefits_hoy[\"hourly_savings\"].min()\n )\n )\n )\n\n # Set x and y axis limits\n ax1.set_xlim(-340, 9000)\n ax1.set_ylim(ymin1 * 1.08, ymax1 * 1.08)\n ax2.set_ylim(ymin2 * 1.08, ymax2 * 1.08)\n\n # Set x and y axis labels\n ax1.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax1.set_ylabel(\"Net Lifecycle Savings (MWh)\", size=17)\n ax2.set_ylabel(\"$ TRC Benefits\", size=17, rotation=-90, labelpad=20)\n\n # Set plot title, size, and position\n ax1.set_title(\n \"Cumulative Savings and TRC Benefits by Hour of Year\",\n size=17,\n loc=\"left\",\n pad=8,\n )\n\n # Tick and lebel parameters\n ax1.set_xticks(np.arange(0, 8760, step=1000))\n ax1.set_yticks(\n np.arange(\n int(round(ymin1 * 1.1, 0)),\n ymax1 * 1.08,\n step=max(round(ymax1 - ymin1, 2) / 5, int((round(ymax1 - ymin1, 0)) / 4)),\n )\n )\n ax1.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax1.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n ax2.set_xticks(np.arange(0, 8760, step=1000))\n ax2.set_yticks(\n np.arange(\n int(round(ymin2 * 1.1, 0)),\n ymax2 * 1.08,\n step=max(round(ymax2 - ymin2, 2) / 5, int((round(ymax2 - ymin2, 0)) / 4)),\n )\n )\n ax2.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax2.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax1.xaxis.set_minor_locator(AutoMinorLocator())\n ax1.yaxis.set_minor_locator(AutoMinorLocator())\n ax2.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n ax1.legend(\n [\"Savings\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 1),\n loc=\"upper left\",\n frameon=False,\n )\n ax2.legend(\n [\"TRC Beneftis\"],\n fontsize=12,\n bbox_to_anchor=(0.02, 0.95),\n loc=\"upper left\",\n frameon=False,\n )\n\n fig7 = plt.figure(figsize=(12, 7), dpi=250)\n ax = fig7.gca()\n colors1 = [\n \"black\",\n \"royalblue\",\n \"black\",\n \"pink\",\n \"firebrick\",\n \"gray\",\n \"darkviolet\",\n \"darkorange\",\n \"green\",\n \"saddlebrown\",\n ]\n legend_labels2 = []\n\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[0]],\n color=colors1[0],\n linewidth=3,\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[0])\n x = 1\n while x <= len(ACC_COMPONENTS_ELECTRICITY) - 2:\n ax.plot(\n elec_benefits_hoy[\"hour_of_year\"],\n elec_benefits_hoy[ACC_COMPONENTS_ELECTRICITY[x]],\n color=colors1[x],\n )\n legend_labels2.append(ACC_COMPONENTS_ELECTRICITY[x])\n x += 1\n\n # Set x and y limits based on min and max values\n if max(elec_benefits_hoy.iloc[:, 2:x].max()) < 0:\n ymax = 0\n else:\n ymax = max(elec_benefits_hoy.iloc[:, 2:x].max())\n if min(elec_benefits_hoy.iloc[:, 2:x].min()) > 0:\n ymin = 0\n else:\n ymin = min(elec_benefits_hoy.iloc[:, 2:x].min())\n\n ax.set_xlim(-340, 9000)\n ax.set_ylim(ymin * 1.1, ymax * 1.08)\n\n # Set x and y axis labels\n ax.set_xlabel(\"Hour of Year\", size=17, labelpad=5)\n ax.set_ylabel(\"$ TRC Benefits\", size=17)\n\n # Set plot title, size, and position\n ax.set_title(\n \"Sum of Avoided Costs by Component and Hour of Day\", size=17, loc=\"left\"\n )\n\n # Tick and lebel parameters\n ax.set_xticks(np.arange(0, 8760, step=1000))\n ax.set_yticks(\n np.arange(\n int(round(ymin * 1.1, 0)),\n ymax * 1.08,\n step=max(round(ymax - ymin, 3) / 5, int((round(ymax - ymin, 0)) / 4)),\n )\n )\n ax.tick_params(\n which=\"major\", axis=\"x\", direction=\"out\", length=6, width=2, labelsize=14\n )\n ax.tick_params(\n which=\"major\", axis=\"y\", direction=\"out\", length=6, width=2, labelsize=14\n )\n\n # Minor ticks\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n # Legend\n plt.legend(\n legend_labels2,\n bbox_to_anchor=(1, 1),\n fontsize=12,\n loc=\"upper left\",\n frameon=False,\n )", "def visualize(self, time, pred, true):\n plt.plot(time, true, label='Actual')\n plt.plot(time, pred, label='Predicted')\n plt.xlabel('Time')\n plt.ylabel('Price ($)')\n plt.legend(bbox_to_anchor=(0.1, 1), loc=2, borderaxespad=0.,\n prop={'size': 14})\n plt.show()", "def plot_costs(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.costs), 1)\n plt.plot(epochs_range, self.costs[threshold:], color='green', marker='o')\n plt.title('Cost function plot. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Cost')\n plt.grid(True)\n plt.show()", "def plot_stress_time(F_tot, response_t, coords, t_range):\n section = np.where((t>t_range[0]) & (t<t_range[1]))[0]\n fig, ax1 = plt.subplots(figsize=[15,5])\n ax2 = ax1.twinx()\n# ax1.set_title('Load and response at '+str(coords),fontsize = 14)\n ax1.set_xlim(t_range)\n ax1.set_xlabel('t [s]')\n resp = ax1.plot(t[section], response_t[section]/10**6, color=\"#00A6D6\",\n label='Equivalent gate stress')\n ax1.set_ylabel('Stress [MPa]', fontsize=12)\n d_max = 1.2*max(response_t[section])/10**6\n d_mean = np.mean(response_t[section])/10**6\n ax1.set_ylim(d_mean-d_max,d_max)\n ax1.legend()\n\n force = ax2.plot(t[section], F_tot[section]/1000, color=\"#c3312f\", label = 'Wave force')\n ax2.set_ylabel('Integrated wave force [$kN/m$]', fontsize = 12)\n F_lim = 1.2*max(F_tot[section])/1000\n F_mean = np.mean(F_tot[section]/1000)\n ax2.set_ylim(F_mean-F_lim,F_lim)\n\n lines = resp + force\n labs = [l.get_label() for l in lines]\n ax1.grid(lw=0.25)\n ax1.legend(lines,labs, fontsize = 12)\n return fig", "def H_perform_plot(performance, hurricane):\n fig = plt.figure(figsize = (15, 10))\n for i in range(len(performance)):\n temp1 = performance[i]\n temp2 = hurricane[i]\n plt.plot(np.arange(0, len(temp1), 1), temp1, color = temp2.c, label = temp2.name)\n plt.xlabel('Time Step')\n plt.xticks(np.arange(0, len(temp1), 30))\n plt.ylabel('Performance')\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, frameon = 0)\n plt.grid(True)", "def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")", "def plot_cost(self, cost_tr_data: [[int]], cost_test_data: [[int]], title: str=\"Cost\"):\n plot_title, img_title = self.prep_titles(title)\n test_legend = ['training data', 'test data']\n\n # Data for plotting x- and y-axis\n x = np.arange(1, CFG.EPOCHS + 1)\n y = [cost_tr_data, cost_test_data]\n\n # prints x and y-axis values\n print(f'COST:')\n print(f'x: {x}')\n print(f'y: {y}')\n max_y1 = np.amax(y[0]) + 0.1\n max_y2 = np.amax(y[1]) + 0.1\n max_y = max(max_y1, max_y2)\n\n plt.figure(figsize=(CFG.FIG_WIDTH, CFG.FIG_HEIGHT))\n\n # Create the lineplot\n for line in range(2):\n ax = sns.lineplot(x=x, y=y[line], color=CFG.COLOR_COST[line], label=test_legend[line])\n\n ax.legend(loc='best')\n ax.set(xlabel='Epochs',\n ylabel='Cost',\n title=plot_title,\n xlim=(1, CFG.EPOCHS),\n ylim=(0, max_y))\n\n self.save_plot(img_title)\n plt.show()", "def cost_profile_plot(cost_values):\n \n ax = plt.figure(figsize = (7.5,4.5)).gca()\n cost_values = np.array(cost_values)\n span = np.arange(1,len(cost_values)+1)\n ax.plot(span,cost_values, color = 'k', alpha = 0.7)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.set_xlabel('Epoch')\n ax.set_ylabel('Cost (MSE) value')\n plt.show()\n plt.close('all')", "def Traffic_Perform_Plot(performance, hurricanes):\n fig = plt.figure(figsize = (8, 6))\n for i in range(len(performance)):\n temp1 = performance[i]\n temp2 = hurricanes[i]\n perform = sf.Normalize(temp1, Type = 'max')\n plt.plot(np.arange(0, len(perform), 1), perform, color = temp2.c, label = temp2.name, marker = 'o')\n plt.xlabel('Time Step')\n plt.xticks(np.arange(0, len(perform), 1))\n plt.ylabel('Performance')\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, frameon = 0)\n plt.grid(True)", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def timePiePlot(self, pctM=0.04):\n names = ['Voronoi Tesselation', 'Connectivity of Cluster Cells']\n dict = {}\n for i,j in zip(names, self.times):\n dict[i] = j\n\n total = sum(dict.values())\n title = 'Time Consumption per Step of Voronoi Analysis - Total [s]= ' + str(round(total, 3))\n labels = []\n values = []\n for v in dict.keys():\n if dict[v] / total > pctM:\n labels.append(v + ' - ' + str(round(dict[v], 3)) + ' (' + str(round(dict[v] * 100 / total, 2)) + ' %)')\n else:\n labels.append(v)\n values.append(dict[v] / total)\n labdis = 1.07\n cmap = plt.get_cmap(\"plasma\")\n c = np.arange(len(dict.keys())) / len(dict.keys())\n colors = cmap(c)\n\n fig = plt.figure()\n fig.set_size_inches(12, 7)\n plt.title(title, fontsize=22)\n plt.pie(dict.values(), labels=labels, shadow=True, startangle=0, labeldistance=labdis, colors=colors)\n plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.", "def plot_cost_vs_clusters(cost,num_clusters,filename):\n fig, ax = plt.subplots()\n ax.plot(num_clusters,cost)\n ax.grid()\n ax.set_xlabel(\"Number of clusters\")\n ax.set_ylabel(\"Cost of dropping off TAs\")\n fig.savefig(filename)\n plt.close()", "def draw_bonus_error(error):\n f, ax = plt.subplots()\n vertices = np.arange(10, 50)\n ax.plot(vertices, error[10:], 'b', label='Error')\n plt.xlabel('Rounds')\n plt.ylabel('Misclassification Error')\n plt.title('Misclassification Error: l = 10, m = 20, n = 40')\n plt.legend(loc='upper left')\n plt.grid(True)\n plt.show()", "def plot_stop_and_cost(opt_res, scale='log'):\n nplots=len(opt_res)\n ncols=2\n nrows=int(np.ceil(nplots/ncols))\n _, axes = plt.subplots(nrows, ncols, figsize=(20, 5*nrows))\n for n in range(nplots):\n i = int(np.floor(n / ncols))\n j=n % ncols\n axes[i,j].plot(opt_res[n]['stop'], label='stopping criterion', color='b')\n axes[i,j].set_title('Outer loop # '+ str(n))\n axes[i,j].set_yscale(scale)\n # axes[i,j].set_xscale('log')\n axes[i,j].grid(True)\n axes[i,j].set_ylabel('stop crit')\n ax_2=axes[i,j].twinx()\n ax_2.plot(opt_res[n]['obj'], label='Total cost', color='g')\n ax_2.set_ylabel('total cost')\n ax_2.set_yscale(scale)\n ax_2.set_yticks([np.min(opt_res[n]['obj']), np.max(opt_res[n]['obj'])])\n # axes[i,j].set_ylim([10**2,10**8])\n # axes[i,j].set_xlim([0,100])\n axes[i,j].legend()\n ax_2.legend()", "def visualize_raw_dat_cost(self, dat):\n print('Cost of Games')\n\n plt.hist(dat['Price'].dropna())\n plt.xlabel('Price')\n plt.ylabel('Count')\n plt.title('Price Histogram')\n plt.show()\n\n print('Previous graph also has a long-tail. Will narrow the scope again')\n print('~99% of results in following graph')\n\n plt.hist(dat[(dat['Price'] < 15)]['Price'].dropna())\n plt.xlabel('Price')\n plt.ylabel('Count')\n plt.title('Price of Apps')\n plt.show()\n\n print('''84% of apps are free, skewing the results further. \n Looking into only non-free apps for a clearer picture on what remains''')\n\n plt.hist(dat[(dat['Price'] < 15) & (dat['Price'] != 0)]['Price'].dropna())\n plt.xlabel('Price')\n plt.ylabel('Count')\n plt.title('Price of non-free Apps')\n plt.show()", "def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()", "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()", "def plot_stop_and_cost_output(opt_res, path, scale='log', nframes=4):\n import matplotlib\n font = {'size' : 15, 'weight': 'normal'}\n matplotlib.rc('font', **font)\n\n nplots=len(opt_res)\n ncols=2\n nrows=int(np.ceil(nframes/ncols))\n _, axes = plt.subplots(nrows, ncols, figsize=(25, 5*nrows))\n for n in range(nframes):\n i = int(np.floor(n / ncols))\n j=n % ncols\n axes[i,j].plot(opt_res[n]['stop'], label='Stopping criterion', color='b')\n axes[i,j].set_title('Outer loop # '+ str(n))\n axes[i,j].set_yscale(scale)\n # axes[i,j].set_xscale('log')\n axes[i,j].grid(True)\n axes[i,j].set_ylabel('Stopping Criterion')\n ax_2=axes[i,j].twinx()\n ax_2.plot(opt_res[n]['obj'], label='Total cost', color='g')\n ax_2.set_ylabel('Total cost')\n ax_2.set_yscale(scale)\n # axes[i,j].set_ylim([10**2,10**8])\n # axes[i,j].set_xlim([0,100])\n axes[i,j].legend(loc=1)\n ax_2.legend(loc=2)\n plt.savefig(path,transparent=True, dpi=400)", "def arrr_starrr_graph(self):\n\n plt.figure()\n total_cost = 0\n\n # plot batteries\n counter = 0\n for batt in self.grid.batteries:\n plt.plot(batt.x, batt.y, marker='x',\n color=colors[counter], markersize=10)\n counter += 1\n\n # iterate over houses and path\n for house in self.grid.houses:\n battery = self.grid.batteries[house.connection]\n\n # get path coordinates\n path_data = house.path\n\n # plot path and house\n plt.plot(path_data[0][0], path_data[0][1],\n color=colors[house.connection], linewidth=.3)\n plt.plot(house.x, house.y, marker='p',\n color=colors[house.connection])\n total_cost += path_data[1]\n plt.draw()\n plt.pause(0.000000001)\n\n plt.title(f\"total cost = {total_cost}\")", "def SA_data_display(opt_df, all_df):\n fig, axs = plt.subplots(2, 3)\n\n axs[0,0].set_title(\"Optimal rewire attempts for circularity\")\n axs[0,0].set_ylabel(\"Percent waste %\")\n axs[0,0].set_xlabel(\"Time (s)\")\n axs[0,0].plot(opt_df[\"Time (s)\"], opt_df[\"Percent waste (%)\"])\n\n axs[0,1].set_title(\"Optimal rewire attempts acceptance probability\")\n axs[0,1].set_ylabel(\"Acceptance Probability\")\n axs[0,1].set_xlabel(\"Time (s)\") # time??\n axs[0,1].scatter(opt_df[\"Time (s)\"], opt_df[\"Probability\"])\n\n axs[0,2].set_title(\"Optimal rewire attempts temperature decrease\")\n axs[0,2].set_ylabel(\"Temperature\")\n axs[0,2].set_xlabel(\"Time (s)\") # time??\n axs[0,2].plot(opt_df[\"Time (s)\"], opt_df[\"Temperature\"])\n\n axs[1,0].set_title(\"All rewire attempts for circularity\")\n axs[1,0].set_ylabel(\"Percent waste %\")\n axs[1,0].set_xlabel(\"Time (s)\")\n axs[1,0].plot(all_df[\"Time (s)\"], all_df[\"Percent waste (%)\"])\n\n axs[1,1].set_title(\"All rewire attempts acceptance probability\")\n axs[1,1].set_ylabel(\"Acceptance Probability\")\n axs[1,1].set_xlabel(\"Time (s)\") # time??\n axs[1,1].scatter(all_df[\"Time (s)\"], all_df[\"Probability\"])\n\n axs[1,2].set_title(\"All rewire attempts temperature decrease\")\n axs[1,2].set_ylabel(\"Temperature\")\n axs[1,2].set_xlabel(\"Time (s)\") # time??\n axs[1,2].plot(all_df[\"Time (s)\"], all_df[\"Temperature\"])\n\n return plt.show()", "def plot_graph(error_rates, avg_hits):\n plt.xlabel(\"Error rates (σ)\")\n plt.ylabel(\"Average pins hit\")\n plt.plot(error_rates, avg_hits)\n plt.show()", "def plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):\n ax.plot([y_true.min(), y_true.max()],\n [y_true.min(), y_true.max()],\n '--r', linewidth=2)\n ax.scatter(y_true, y_pred, alpha=0.2)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['left'].set_position(('outward', 10))\n ax.spines['bottom'].set_position(('outward', 10))\n ax.set_xlim([y_true.min(), y_true.max()])\n ax.set_ylim([y_true.min(), y_true.max()])\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n extra = plt.Rectangle((0, 0), 0, 0, fc=\"w\", fill=False,\n edgecolor='none', linewidth=0)\n ax.legend([extra], [scores], loc='upper left')\n title = title + '\\n Evaluation in {:.2f} seconds'.format(elapsed_time)\n ax.set_title(title)", "def plotLoss():\n # ssr\n ssr = np.log(gradientDescent(X, y)[1])\n # number of iterations \n iterations = np.log(np.arange(1, len(ssr) + 1, 1))\n # plot reduction of ssr\n plt.plot(iterations, ssr)\n # xlabel\n plt.xlabel(\"Iteration\")\n # ylabel\n plt.ylabel(\"SSR\")\n # title\n plt.title(\"Reduction of SSR by number of Iterations\")\n # show plot \n plt.show()", "def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()" ]
[ "0.66773504", "0.6464759", "0.641111", "0.63844514", "0.62017936", "0.6200148", "0.6091807", "0.60301834", "0.59970903", "0.59831995", "0.5969821", "0.59258425", "0.58940935", "0.5876098", "0.5858264", "0.5832031", "0.58274806", "0.58000296", "0.5795744", "0.5789539", "0.57780945", "0.5776379", "0.5770956", "0.5749664", "0.57484543", "0.573918", "0.573611", "0.57064074", "0.5692671", "0.5678414" ]
0.7189957
0
Clean seleniumwire captured requests to avoid later, after many get, accumulated too much requests
def cleanCapturedRequests(driver): # driver.requests = None if hasattr(driver, "requests"): del driver.requests
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancelRequests(self):\n self.get_bmc_website()\n self.__cancelMyRequest = Cancel(self.browser)\n self.__cancelMyRequest.cancelRequest()", "def _invalidate_http_cache(self):\n self._requests_cache = {}", "def reset_all_requests(self):\n self._send_request(\"/reset\")", "def tearDown(self):\n clear_url_caches()", "def tearDown(self):\n clear_url_caches()", "def __call__(self):\n self.page1() # GET web (request 101)\n\n grinder.sleep(1000)\n self.page2() # GET web (request 201)\n\n grinder.sleep(1000)\n self.page3() # GET web (request 301)\n\n grinder.sleep(1000)\n self.page4() # GET web (request 401)\n\n grinder.sleep(1000)\n self.page5() # GET web (request 501)\n\n grinder.sleep(1000)\n self.page6() # GET web (request 601)\n\n grinder.sleep(1000)\n self.page7() # GET web (request 701)\n\n grinder.sleep(1000)\n self.page8() # GET web (request 801)\n\n grinder.sleep(1000)\n self.page9() # GET web (request 901)\n\n grinder.sleep(1000)\n self.page10() # GET web (request 1001)\n\n grinder.sleep(1000)\n self.page11() # GET web (request 1101)\n\n grinder.sleep(1000)\n self.page12() # GET web (request 1201)\n\n grinder.sleep(1000)\n self.page13() # GET web (request 1301)\n\n grinder.sleep(1000)\n self.page14() # GET web (request 1401)\n\n grinder.sleep(1000)\n self.page15() # GET web (request 1501)\n\n grinder.sleep(1000)\n self.page16() # GET web (request 1601)\n\n grinder.sleep(1000)\n self.page17() # GET web (request 1701)\n\n grinder.sleep(1000)\n self.page18() # GET web (request 1801)\n\n grinder.sleep(1000)\n self.page19() # GET web (request 1901)\n\n grinder.sleep(1000)\n self.page20() # GET web (request 2001)\n\n grinder.sleep(1000)\n self.page21() # GET web (request 2101)\n\n grinder.sleep(1000)\n self.page22() # GET web (request 2201)\n\n grinder.sleep(1000)\n self.page23() # GET web (request 2301)\n\n grinder.sleep(1000)\n self.page24() # GET web (request 2401)\n\n grinder.sleep(1000)\n self.page25() # GET web (request 2501)\n\n grinder.sleep(1000)\n self.page26() # GET web (request 2601)\n\n grinder.sleep(1000)\n self.page27() # GET web (request 2701)\n\n grinder.sleep(1000)\n self.page28() # GET web (request 2801)\n\n grinder.sleep(1000)\n self.page29() # GET web (request 2901)\n\n grinder.sleep(1000)\n self.page30() # GET web (request 3001)\n\n grinder.sleep(1000)\n self.page31() # GET web (request 3101)\n\n# grinder.sleep(1000)\n# self.page32() # POST downloads (request 3201)\n\n# grinder.sleep(1000)\n# self.page33() # GET goog-malware-shavar_s_10501-10520.10501.10502-10520: (request 3301)\n\n grinder.sleep(1000)\n self.page34() # GET web (request 3401)\n\n grinder.sleep(1000)\n self.page35() # GET web (request 3501)\n# self.page36() # GET goog-malware-shavar_a_9606-9610.9606-9609.9610: (request 3601)\n\n# grinder.sleep(1000)\n# self.page37() # GET goog-phish-shavar_s_36981-36985.36981-36985.: (request 3701)\n\n# grinder.sleep(1000)\n# self.page38() # GET goog-phish-shavar_s_36986-36990.36986-36987.36988-36990: (request 3801)\n\n# grinder.sleep(1000)\n# self.page39() # GET goog-phish-shavar_a_46491-46500.46491-46499.46500: (request 3901)\n\n grinder.sleep(1000)\n self.page40() # GET web (request 4001)\n\n grinder.sleep(1000)\n self.page41() # GET web (request 4101)\n\n grinder.sleep(1000)\n self.page42() # GET web (request 4201)\n\n grinder.sleep(1000)\n self.page43() # GET web (request 4301)\n\n grinder.sleep(1000)\n self.page44() # GET web (request 4401)\n\n grinder.sleep(1000)\n self.page45() # GET web (request 4501)\n\n grinder.sleep(1000)\n self.page46() # GET web (request 4601)\n\n grinder.sleep(1000)\n self.page47() # GET web (request 4701)\n\n grinder.sleep(1000)\n self.page48() # GET web (request 4801)\n\n grinder.sleep(1000)\n self.page49() # GET web (request 4901)\n\n grinder.sleep(1000)\n self.page50() # GET web (request 5001)\n\n grinder.sleep(1000)\n self.page51() # GET web (request 5101)\n\n grinder.sleep(1000)\n self.page52() # GET web (request 5201)\n\n grinder.sleep(1000)\n self.page53() # GET web (request 5301)", "def _clear_request(self):\n self._request = None", "def clear_requests(self, path):\n body = {}\n body['path'] = path\n data = json.dumps(body)\n self._send_request(\"/clear\", data)", "def clean_stale_issues():\n from security_monkey.common.audit_issue_cleanup import clean_stale_issues\n clean_stale_issues()", "def selenium_teardown():\n families_to_delete, visits_to_delete, responses_to_delete = [], [], []\n\n families_to_delete.extend(Family.objects.filter(study_id_number=59638))\n families_to_delete.extend(Family.objects.filter(study_id_number=83695))\n for f in families_to_delete:\n visits_to_delete.extend(f.visit_set.all())\n for v in visits_to_delete:\n responses_to_delete.extend(v.response_set.all())\n\n for r in responses_to_delete:\n r.delete()\n for v in visits_to_delete:\n v.delete()\n for f in families_to_delete:\n f.delete()", "def erase_captured_urls(url_list):\n if gs.local:\n erase_captured_urls_local(url_list)\n else:\n erase_captured_urls_aws(url_list)", "def __call__(self):\n self.page1() # GET supercars.do (requests 101-111)\n\n grinder.sleep(2117)\n self.page2() # GET cars.do (requests 201-202)\n\n grinder.sleep(1867)\n self.page3() # GET car.do (request 301)\n\n grinder.sleep(4351)\n self.page4() # GET enquire.do (requests 401-402)\n\n grinder.sleep(16341)\n self.page5() # POST enquire.do (request 501)\n\n grinder.sleep(1309)\n self.page6() # GET supercars.do (request 601)\n\n grinder.sleep(669)\n self.page7() # GET cars.do (requests 701-702)\n\n grinder.sleep(1260)\n self.page8() # GET car.do (request 801)\n\n grinder.sleep(837)\n self.page9() # GET car.do (request 901)\n\n grinder.sleep(1108)\n self.page10() # GET search.do (request 1001)\n\n grinder.sleep(3146)\n self.page11() # POST search.do (requests 1101-1102)\n\n grinder.sleep(2822)\n self.page12() # POST search.do (request 1201)\n\n grinder.sleep(1333)\n self.page13() # GET sell.do (request 1301)\n\n grinder.sleep(17417)\n self.page14() # POST sell.do (request 1401)\n\n grinder.sleep(6680)\n self.page15() # GET insurance.do (request 1501)\n\n grinder.sleep(600)\n self.page16() # GET about.do (requests 1601-1602)\n\n grinder.sleep(584)\n self.page17() # GET supercars.do (request 1701)\n\n grinder.sleep(1049)\n self.page18() # GET cars.do (requests 1801-1802)\n\n grinder.sleep(2901)\n self.page19() # GET car.do (request 1901)\n\n grinder.sleep(1441)\n self.page20() # GET car.do (request 2001)\n\n grinder.sleep(791)\n self.page21() # GET supercars.do (request 2101)\n\n grinder.sleep(1365)\n self.page22() # GET cars.do (request 2201)\n\n grinder.sleep(1067)\n self.page23() # GET supercars.do (request 2301)\n\n grinder.sleep(1284)\n self.page24() # GET cars.do (request 2401)\n\n grinder.sleep(879)\n self.page25() # GET supercars.do (request 2501)\n\n grinder.sleep(1066)\n self.page26() # GET cars.do (request 2601)\n\n grinder.sleep(974)\n self.page27() # GET supercars.do (request 2701)", "def remove_unreachable_urls(list_of_urls):\n list_of_reachable_url = []\n for url in list_of_urls:\n try:\n f = requests.get(url)\n print('\\t',url, 'status_code:', f.status_code)\n list_of_reachable_url.append(url)\n except:\n print('\\t',url, 'not reachable -- > removed')\n\n return list_of_reachable_url", "def _clean_outdated(self):\n now = _now()\n outdated = []\n for request_no, request_info in self._current_requests.items():\n if now - request_info.start_time > self._force_clean_after:\n outdated.append(request_no)\n if outdated:\n logging.error(\"There are {} requests which were started but haven't \"\n \"been finished in more than {}s.\"\n .format(len(outdated), self._force_clean_after))\n for request_no in outdated:\n del self._current_requests[request_no]\n self._last_autoclean_time = now", "def tearDown(self):\n self.driver.delete_all_cookies()", "def clear_async_requests(self):\n\t\tself._async_transforms.clear()\n\t\tself._async_http_requests.clear()\n\t\treturn self", "def tearDown(self):\n api.clear_cache()", "def tearDown(self):\n api.clear_cache()", "def cleanup_your_mess(self):\n\n # TODO: I believe this works, but urllib3.connectionpool retries to\n # connect 3 times after close. Might be fine.\n output(\"Closing Chrome driver\")\n driver = getattr(getattr(cmds, 'packtbook'), 'driver')\n\n if driver:\n driver.quit()", "def clear_all_cookies():", "def reset():\n from . import core\n core.http.reset()", "def cleanup_incomplete_bugs_without_response(self):\n raise NotImplementedError(\"not yet done\")", "def curent_sesion_cleanup(self):\r\n\r\n for key,value in self.curent_sesion.items():\r\n for idx in value:\r\n requests.delete(key + str(idx), headers=self.headers)\r\n for check in requests.get(key,headers=self.headers).json()['results']:\r\n if idx in check.values():\r\n return False\r\n self.curent_sesion[key].clear()\r\n return True", "def test_remove_expired(self):\n req1 = FakeRequest(1, True)\n req2 = FakeRequest(2, False)\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, True)\n req5 = FakeRequest(5, False)\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n self.request_buffer.remove_expired()\n\n self.assertTrue(\n req2 in self.request_buffer.requests and\n req5 in self.request_buffer.requests\n )", "def clean_registries(self):\n registry = self.connection.get_finished_registry(name=self.name)\n registry.cleanup()\n registry = self.connection.get_started_registry(name=self.name)\n registry.cleanup()", "def clearDownloadQueue(self):\n #print(\"CLEAR DOWNLOAD QUEUE\")\n self.downloadQueue = []\n self.clearEvents()", "def reset_http(self):\n self.service = None", "def extract_dynamic_urls(self, url):\n\n # delete previous requests and visit the given URL\n del self.driver.requests\n self.driver.get(url)\n\n # iterate over all JS event attributes\n for event_attribute in EVENTS:\n # find all HTML elements with the current attribute\n try:\n elements = self.driver.find_elements_by_xpath(\"//*[@%s]\" % event_attribute)\n except Exception as e:\n if \"unexpected alert open:\" in str(e):\n continue\n raise e\n\n # run the javascript of every eventful HTML element\n if elements:\n for element in elements:\n # try submit and click events directly and other attributes via a workaround\n try:\n if event_attribute == \"onsubmit\":\n element.submit()\n elif event_attribute == \"onclick\":\n element.click()\n else:\n self.driver.execute_script(\"arguments[0].%s()\" % event_attribute, element)\n # except any errors and ignore them\n except:\n pass\n\n # go back to the original URL by going back in history\n # if that fails, try to revisit the original URL directly\n i = -1\n while True:\n try:\n if self.driver.current_url != url:\n break\n else:\n self.driver.execute_script(\"window.history.go(%d)\" % i)\n i -= 1\n except selenium.common.exceptions.UnexpectedAlertPresentException as e:\n for j in range(5):\n try:\n self.driver.get(url)\n break\n except selenium.common.exceptions.UnexpectedAlertPresentException:\n time.sleep(j)\n\n # if for some reason, the original URL could not be visited again, stop completely\n if self.driver.current_url != url:\n break\n\n # extract URLs, POST params and cookies by inspecting requests made by the Selenium driver\n visited_urls = set()\n for request in self.driver.requests:\n if \"url\" in request.__dir__():\n req_url = request.url\n else:\n req_url = request.path\n\n # add as path instance if POST parameters are available\n if self.url_has_netloc(req_url) and request.method == \"POST\" and request.body:\n try:\n body = request.body.decode()\n post_params = get_query_params(body)\n if post_params:\n parsed_url = urllib.parse.urlparse(req_url)\n get_params = get_query_params(parsed_url.query)\n # extract cookies sent by the Selenium driver\n cookie_strs = request.headers[\"Cookie\"].split(\";\")\n cookies = {}\n for cookie_str in cookie_strs:\n k, v = cookie_str.strip(), \"\"\n if \"=\" in cookie_str:\n k, v = cookie_str.strip().split(\"=\")\n cookies[k] = v\n # finally, add as instance of the visited website path\n self.add_path_instance(parsed_url.path, get_params, post_params, cookies)\n except:\n pass\n visited_urls.add(req_url)\n\n del self.driver.requests\n return visited_urls", "def __del__(self):\n for req in self._outbox:\n req.Wait()", "def stop(self):\n if self._real_send:\n requests.Session.send = self._real_send\n self._real_send = None" ]
[ "0.5872074", "0.5758299", "0.5635539", "0.560575", "0.560575", "0.55663204", "0.55220544", "0.54572016", "0.54330814", "0.54221976", "0.5397869", "0.5388094", "0.53824675", "0.5355474", "0.53215826", "0.531101", "0.5293863", "0.5293863", "0.5276583", "0.52676547", "0.52650756", "0.5259553", "0.5254778", "0.52391446", "0.52340984", "0.52287763", "0.52270603", "0.5214139", "0.52116877", "0.52038324" ]
0.8178762
0
Override default values for random initial topic assignment, set to "seed" instead.
def set_seed(self,seed): assert seed.dtype==np.int and seed.shape==(self.N,) self.topic_seed = seed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_seed(self,seed):\n\n\t\tassert seed.dtype==np.int and seed.shape==(self.samples,self.N)\n\t\tself.topic_seed = seed", "def initialize(self, number_of_topics, random=False):\n print(\"Initializing...\")\n\n self.initialize_randomly(number_of_topics)\n\n #print(\"pi\", self.document_topic_prob)\n #print(\"p(w|theta)\", self.topic_word_prob)", "def initialize(self, number_of_topics, random=False):\n print(\"Initializing...\")\n\n if random:\n self.initialize_randomly(number_of_topics)\n else:\n self.initialize_uniformly(number_of_topics)", "def generate_initial_topics(self):\n initial_topics = random.sample(self.remaining_topics, self.num_topics)\n self.remaining_topics = [topic for topic in self.remaining_topics if topic not in initial_topics]\n return initial_topics", "def random_init(self, docs):\n for di in xrange(len(docs)):\n doc = docs[di]\n topics = np.random.randint(self.n_topic, size=len(doc))\n self.topic_assignment.append(topics)\n\n for wi in xrange(len(doc)):\n topic = topics[wi]\n word = doc[wi]\n self.TW[topic, word] += 1\n self.sum_T[topic] += 1\n self.DT[di, topic] += 1", "def initialize(self, seed=None):\r\n self.seed(seed)", "def initialize_randomness(seed):", "def set_seed(self):\n self.set_scikit_learn_seed()\n self.set_torch_seed()\n self.set_python_random_seed()", "def init_seed(seed=None):\n if seed is None:\n seed = int(time.time())\n\n LOGGER.info(\"Using seed=%d\", seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)", "def set_torch_seed(self):\n torch.manual_seed(42)", "def set_seed(self,seed):\r\n if seed is None:\r\n warnings.warn(\r\n \"Initializing player with seed from Axelrod module random number generator. \"\r\n \"Results may not be seed reproducible.\")\r\n self._seed = _module_random.random_seed_int()\r\n else:\r\n self._seed = seed\r\n self._random = RandomGenerator(seed=self._seed)\r\n self.base._random = self._random\r\n self.trust._random = self._random\r\n self.conviction._random = self._random\r\n \r\n self.generator = torch.Generator()\r\n self.generator.manual_seed(int(seed))", "def setUp(self):\n # record the randomness used in case the test fails:\n self.rand_seed = int(time.time())\n sr.seed(self.rand_seed)\n print(\"seed for this test: \" + str(self.rand_seed))", "def set_manual_seed(seed):\n\n random.seed(seed)\n torch.manual_seed(seed)\n\n print('Using manual seed: {seed}'.format(seed=seed))", "def set_seed(self, seed):\n self.seed = seed", "def seed(self, seed: Optional[int]) -> None:\n ...", "def set_seed(self, seed=None):\n super().set_seed(seed=seed)\n for t in self.policy_list:\n t.set_seed(self._random.random_seed_int())", "def random_seed(self) -> None:\n self.seed = random.SeedSequence().entropy", "def setUp(self):\n # record the randomness used in case the test fails:\n rand_seed = int(time.time())\n sr.seed(rand_seed)\n print(\"seed for this test: \" + str(rand_seed))", "def worker_init_fn(worker_id):\r\n base_seed = torch.IntTensor(1).random_().item()\r\n #print(worker_id, base_seed)\r\n np.random.seed(base_seed + worker_id)", "def pre_randomize(self, seed):\n super(ReseedingRandomizer, self).pre_randomize(seed)\n self.seed(seed=seed)", "def seed(self, seed=None):\n raise NotImplementedError()", "def seed(self, seed=None):\n raise NotImplementedError()", "def set_scikit_learn_seed(self):\n np.random.seed(42)", "def __init__(self, topic):\n super().__init__(topic)", "def __init__(self, topics_to_test):\n super().__init__()\n self.topics_to_test = topics_to_test", "def set_seed(cls, seed: Any) -> None:\n cls.rand = Random(seed)", "def __init__(self, topic):\n self.topic = topic", "def set_python_random_seed(self):\n random.seed(42)", "def set_global_seeds(seed):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)", "def _init_seeding(cls, seed_type=int(SeedType.NONE), seeds=None):\n seed_type = int(seed_type)\n \n if seed_type == (SeedType.NONE):\n assert seeds is None, \"Seed type set to NONE, therefore seed cannot be set.\"\n elif seed_type == (SeedType.CONSTANT):\n assert seeds is not None, \"Seed set to constant seed, so seed must be specified.\"\n cls._seed_generator = [int(x) for x in seeds.split(\",\") if x]\n elif seed_type == (SeedType.GENERATED):\n assert seeds is not None, \"Seed set to generated seed, so initial seed must be specified.\"\n cls._seed_generator = Random(int(seeds))\n elif seed_type == (SeedType.SPECIFIED):\n cls._seed_generator = ([[str(x) for x in s.split(\",\") if x] for s in seeds.split(\"-\") if s])\n else:\n raise TypeError(\"Seed type {} not supported\".format(seed_type))\n \n cls._seed_type = seed_type" ]
[ "0.7856412", "0.6859348", "0.6736005", "0.6576959", "0.6419191", "0.6385298", "0.6314003", "0.6161125", "0.61501825", "0.6149955", "0.61064327", "0.6061132", "0.60445714", "0.6027036", "0.6016285", "0.60146075", "0.5998351", "0.599772", "0.5957093", "0.59457964", "0.59443516", "0.59443516", "0.5937208", "0.5931089", "0.59301513", "0.59280723", "0.585808", "0.5840598", "0.5836459", "0.58335227" ]
0.7798357
1
Compute termtopic matrix from sampled_topics.
def tt_comp(self,sampled_topics): samples = sampled_topics.shape[0] tt = np.zeros((self.V,self.K,samples)) for s in xrange(samples): tt[:,:,s] = samplers_lda.tt_comp(self.tokens, sampled_topics[s,:], self.N, self.V, self.K, self.beta) return tt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_topic_matrix(self):\n print('get topic matrix')\n\n topic_words_dict = self.config['topic_words']\n\n topic_matrix = np.empty((0, self.wordvec.embedding_dim))\n\n topic_id = 0\n for topic in topic_words_dict.keys():\n topic_words = topic_words_dict[topic]\n topic_vector = self.wordvec.avg_words_vector(topic_words)\n\n topic_matrix = np.append(topic_matrix, topic_vector, axis=0)\n\n self.id2topic[str(topic_id)] = topic\n topic_id += 1\n\n return topic_matrix", "def get_topic_terms_df(self, topics):\n labels = list(topics.keys())\n \n topic_terms = []\n for topic, top_n_words in topics.items():\n top_n_words = sorted(top_n_words, key=lambda x: x[1], reverse=True)[:self.num_words]\n terms = [term for term, c_tf_idf in top_n_words]\n terms = \", \".join(terms)\n topic_terms.append(terms)\n\n topic_terms_df = pd.DataFrame()\n topic_terms_df['id'] = labels\n topic_terms_df['Topic terms'] = topic_terms\n return topic_terms_df", "def get_topics(self):\n topics = self.word_topics\n return topics / topics.sum(axis=1)[:, None]", "def dt_comp(self,sampled_topics):\n\n\t\tsamples = sampled_topics.shape[0]\n\t\tdt = np.zeros((self.D,self.K,samples))\n\n\t\tfor s in xrange(samples):\n\t\t\tdt[:,:,s] = samplers_lda.dt_comp(self.docid, sampled_topics[s,:], self.N, self.K, self.D, self.alpha)\n\n\t\treturn dt", "def _initialize(self):\n for doc_index, doc in enumerate(self.document):\n temp_word_topic_matrix = []\n for word in doc:\n if word in self.word2id.keys():\n start_topic_index = np.random.randint(0, self.K)\n temp_word_topic_matrix.append(start_topic_index)\n self.doc_topic_matrix[doc_index, start_topic_index] += 1\n self.topic_word_matrix[start_topic_index, self.word2id[word]] += 1\n self.topic_matrix[start_topic_index] += 1\n self.current_word_topic_matrix.append(temp_word_topic_matrix)", "def set_sampled_topics(self,sampled_topics):\n\n\t\tassert sampled_topics.dtype == np.int and len(sampled_topics.shape) <= 2 \t\n\n\t\tif len(sampled_topics.shape) == 1: self.sampled_topics = sampled_topics.reshape(1,sampled_topics.shape[0])\n\t\telse: self.sampled_topics = sampled_topics\n\n\t\tself.samples = self.sampled_topics.shape[0]\n\n\t\tself.tt = self.tt_comp(self.sampled_topics)\n\t\tself.dt = self.dt_comp(self.sampled_topics)", "def get_topic_words(self, topics):\n topic_words = []\n for topic, top_n_words in topics.items():\n words = [word for word, c_tf_idf in top_n_words]\n topic_words.append(words)\n return topic_words", "def get_topic_terms(self, topicid, topn=10):\n topic = self.var_lambda[topicid, :]\n topic = topic / topic.sum() # normalize to probability distribution\n bestn = matutils.argsort(topic, topn, reverse=True)\n return [(id, topic[id]) for id in bestn]", "def lda_models(doc_term_matrix, n_topics, vectorizer, rand_start):\n\n perplexity_values = []\n lda_time = []\n topics_list = []\n \n i = rand_start \n for num_topics in n_topics:\n \n # create model\n t1 = time.time()\n lda_model = LatentDirichletAllocation(n_components=num_topics, doc_topic_prior = 1/num_topics, \n topic_word_prior=0.1, n_jobs=39, random_state = i) \n lda_model.fit_transform(doc_term_matrix)\n t2 = time.time()\n lda_time.append(t2-t1)\n print(f\" Model time: {t2-t1}\", flush = True)\n \n # compute perplexity\n perplexity_values.append(lda_model.bound_)\n \n # create list of topics\n topics = list_topics(lda_model.components_, vectorizer, top_n=10)\n topics_list.append(topics)\n \n # output completion message\n i = i+1\n print('Number of topics =', num_topics, \"complete.\", flush = True)\n\n return perplexity_values, lda_time, topics_list", "def test_extract_topics(base_bertopic):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(documents)\n c_tf_idf = base_bertopic._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def random_init(self, docs):\n for di in xrange(len(docs)):\n doc = docs[di]\n topics = np.random.randint(self.n_topic, size=len(doc))\n self.topic_assignment.append(topics)\n\n for wi in xrange(len(doc)):\n topic = topics[wi]\n word = doc[wi]\n self.TW[topic, word] += 1\n self.sum_T[topic] += 1\n self.DT[di, topic] += 1", "def get_topics_strings(\n topics_words, mu, sigma, vocabulary, topics_to_print=10, words_per_topic=30\n):\n mu = np.squeeze(mu, axis=0)\n sigma = np.squeeze(sigma, axis=0)\n # Use a stable sorting algorithm so that when alpha is fixed\n # we always get the same topics.\n highest_weight_topics = np.argsort(-mu, kind=\"mergesort\")\n top_words = np.argsort(-topics_words, axis=1)\n\n res = []\n # try:\n for topic_idx in highest_weight_topics[:topics_to_print]:\n lst = [\n \"index={} mu={:.2f} sigma={:.2f}\".format(\n topic_idx, mu[topic_idx], sigma[topic_idx]\n )\n ]\n lst += [vocabulary[word] for word in top_words[topic_idx, :words_per_topic]]\n res.append(\" \".join(lst))\n # except:\n # res.append('')\n\n return np.array(res)", "def transform(self):\n result = []\n for item in self.doc_topic_matrix:\n result.append(item / np.sum(item))\n result = np.array(result)\n return result", "def dtm_vis(self, time, corpus):\n doc_topic = self.gammas / self.gammas.sum(axis=1)[:, np.newaxis]\n\n def normalize(x):\n return x / x.sum()\n\n topic_term = [\n normalize(np.exp(chain.e_log_prob.T[time]))\n for k, chain in enumerate(self.topic_chains)\n ]\n\n doc_lengths = []\n term_frequency = np.zeros(self.vocab_len)\n for doc_no, doc in enumerate(corpus):\n doc_lengths.append(len(doc))\n\n for term, freq in doc:\n term_frequency[term] += freq\n\n vocab = [self.id2word[i] for i in range(len(self.id2word))]\n\n return doc_topic, np.array(topic_term), doc_lengths, term_frequency, vocab", "def doc_topics(self, doc_number):\n doc_topic = self.gammas / self.gammas.sum(axis=1)[:, np.newaxis]\n return doc_topic[doc_number]", "def display_topics_svd(model_fit, terms, num_top_words, topics = None):", "def test_extract_topics():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model = BERTopic()\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics", "def get_topics_table_by_id(self, topic_id):\n words = self._topics_words(MAX_WORDS)\n weights = self._topics_weights(MAX_WORDS)\n if topic_id >= len(words):\n raise ValueError(\"Too large topic ID.\")\n\n num_words = len(words[topic_id])\n\n data = np.zeros((num_words, 2), dtype=object)\n data[:, 0] = words[topic_id]\n data[:, 1] = weights[topic_id]\n\n metas = [StringVariable(self.topic_names[topic_id]),\n ContinuousVariable(\"Topic{}_weights\".format(topic_id + 1))]\n metas[-1]._out_format = '%.2e'\n\n domain = Domain([], metas=metas)\n t = Topic.from_numpy(domain,\n X=np.zeros((num_words, 0)),\n metas=data)\n t.W = data[:, 1]\n t.name = 'Topic_{}'.format(topic_id + 1)\n return t", "def test_extract_topics_custom_cv():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n\n cv = CountVectorizer(ngram_range=(1, 2))\n model = BERTopic(vectorizer=cv)\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def test_extract_topics_custom_cv(base_bertopic_custom_cv):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic_custom_cv._update_topic_size(documents)\n c_tf_idf = base_bertopic_custom_cv._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic_custom_cv.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def fake_data(n_docs, n_words, n_sent_length, n_topics):\n # These are log ratios for the doc & word topics\n doc_topics = orthogonal_matrix([n_docs, n_topics])\n wrd_topics = orthogonal_matrix([n_topics, n_words])\n # Multiply log ratios and softmax to get prob of word in doc\n doc_to_wrds = softmax(np.dot(doc_topics, wrd_topics))\n # Now sample from doc_to_wrd to get realizations\n indices = np.arange(n_words).astype('int32')\n sentences = []\n for doc_to_wrd in doc_to_wrds:\n words = sample(indices, doc_to_wrd, n_sent_length)\n sentences.append(words)\n\n return np.array(sentences, dtype=np.int32)", "def query(self,query_samples):\n\n\t\tself.sampled_topics = np.zeros((self.samples,self.N), dtype = np.int)\n\n\t\tfor s in xrange(self.samples):\n\n\t\t\tself.sampled_topics[s,:] = samplers_lda.sampler_query(self.docid, self.tokens, self.topic_seed,\n\t\t\t\t\t\t\t\t\t\t\tnp.ascontiguousarray(self.tt[:,:,s], dtype=np.float),\n\t\t\t\t\t\t\t\t\t\t\tself.N, self.K, self.D, self.alpha, query_samples)\n\n\t\t\tprint(\"Sample %d queried\" % s)\n\n\t\tself.dt = np.zeros((self.D,self.K,self.samples))\n\n\t\tfor s in xrange(self.samples):\n\t\t\tself.dt[:,:,s] = samplers_lda.dt_comp(self.docid,self.sampled_topics[s,:], self.N, self.K, self.D, self.alpha)", "def _topics_words(self, num_of_words):\n x = self.model.show_topics(-1, num_of_words, formatted=False)\n # `show_topics` method return a list of `(topic_number, topic)` tuples,\n # where `topic` is a list of `(word, probability)` tuples.\n return [[i[0] for i in topic[1]] for topic in x]", "def build_term_doc_matrix(self):\n\n print(\"Inside build_term_doc_matrix >>> \")\n self.term_doc_matrix = np.zeros([self.number_of_documents,self.vocabulary_size])\n for kVal in range(0, self.number_of_documents):\n for lVal,wordVocab in enumerate(self.vocabulary):\n wrd_doc = 0\n for oVal in range(0, len(self.documents[kVal])):\n if (wordVocab == self.documents[kVal][oVal]):\n wrd_doc = wrd_doc +1\n self.term_doc_matrix[kVal][lVal] = wrd_doc\n #print(\"term_doc_matrix >>> \" + self.term_doc_matrix)", "def build_topics_fn(\n batch_size, n_words=55, n_topics=3, dtype=np.float64, correlated=False\n):\n mean = tf.random.normal([n_topics], dtype=dtype)\n\n if correlated:\n dof = n_topics * (n_topics + 1) // 2\n ell = tf.random.normal([dof], dtype=dtype)\n mdl = tfd.MultivariateNormalTriL(\n loc=mean, scale_tril=tfb.FillScaleTriL(diag_shift=tf.cast(1e-5, dtype))(ell)\n )\n else:\n ell = tf.random.normal([n_topics], dtype=dtype)\n mdl = tfd.MultivariateNormalDiag(loc=mean, scale_diag=tf.nn.softplus(ell))\n\n eta = mdl.sample(batch_size)\n topics = tfd.Multinomial(1, probs=tf.nn.softmax(eta)).sample()\n\n topics_words = np.zeros((n_topics, n_words), dtype=dtype)\n for i in range(n_topics):\n topics_words[i][i * n_words // n_topics : (i + 1) * n_words // n_topics] = 1\n\n word_probs = tf.matmul(topics, topics_words).numpy()\n vocabulary = [str(i) for i in range(n_words)]\n\n return (\n *_get_fns(word_probs, batch_size),\n vocabulary,\n word_probs,\n mean,\n ell,\n eta,\n topics,\n topics_words,\n )", "def initialize(self):\n # Initializing the counter and distribution.\n for k in range(0, self.topic_number,1):\n self.topic_term_count_matrix[k]= [0.0] * self.term_number\n self.topic_distribution_over_term[k] = [0.0] * self.term_number\n self.sum_topic_by_term_count[k] = 0.0\n for m in range(0, self.document_number,1):\n self.document_topic_count_matrix[m] = [0.0] * self.topic_number\n self.document_distribution_over_topic[m] = [0.0] * self.topic_number\n self.sum_document_by_topic_count[m] = 0.0\n\n # Initializing topics assigned to all words of all documents.\n for m in range(0, self.document_number, 1):\n N = len(self.documents[m])\n self.word_topic_assignment[m] = [-1] * N\n for n in range(0, N,1):\n topic = int(random.uniform(0,1) * self.topic_number)\n self.document_topic_count_matrix[m][topic] += 1.0\n self.topic_term_count_matrix[topic][self.documents[m][n]] += 1.0\n self.sum_topic_by_term_count[topic] += 1.0\n self.word_topic_assignment[m][n] = topic\n self.sum_document_by_topic_count[m] = N", "def __init__(self,corpus,topic_number=10,iteration_number=1000,burn_in=500,update_cycle=100,alpha=None,beta=None):\n # documents, key: id of document, value: list of word in an specific document.\n self.documents = corpus.documents\n # number of iteration when using Gibbs Sampling.\n self.iteration_number = iteration_number\n self.topic_number = topic_number\n self.burn_in = burn_in\n self.update_cycle = update_cycle\n # number of terms.\n self.term_number = len(corpus.word_id)\n # number of documents.\n self.document_number = len(self.documents)\n # if alpha and beta is None, then assign values to them.\n if alpha == None:\n self.alpha = [2.0] * self.topic_number\n else:\n self.alpha = alpha\n if beta == None:\n self.beta = [0.5] * self.term_number\n else:\n self.beta = beta\n # The sum of elements in beta.\n self.sum_beta = sum(self.beta)\n # The sum of elements in alpha.\n self.sum_alpha = sum(self.alpha)\n # counter, [m][k] refers to the number of times that topic k has been observed with a word in document m.\n self.document_topic_count_matrix = {}\n # counter, [k][t] refers to the number of times that term t has been observed with topic k.\n self.topic_term_count_matrix = {}\n # distribution matrix, [m][k] refers the probability that assigning topic k to document m.\n self.document_distribution_over_topic = {}\n # distribution matrix, [k][t] refers the probability that assigning topic k to term t.\n self.topic_distribution_over_term = {}\n # counter, [m] refers the number of times that all topics have been observed with a word in document m.\n # also, [m] equals to the number of words in document m.\n self.sum_document_by_topic_count = {}\n # counter, [k] refers the number of times that all terms have been observed with topic k.\n self.sum_topic_by_term_count = {}\n # topic assigned to an word in a document. [m][n] refers to the topic that assigned to the n th word in document\n # m.\n self.word_topic_assignment = {}\n # the number of times that the distribution has been updated.\n self.update_number = 0.0", "def initialize(self):\n self.n_words = len(self.vocab)\n self.n_docs = len(self.documents)\n\n # Initialize the three count matrices.\n # The (i,j) entry of self.nmz is the number of words in document i assigned to topic j.\n self.nmz = np.zeros((self.n_docs, self.n_topics))\n # The (i,j) entry of self.nzw is the number of times term j is assigned to topic i.\n self.nzw = np.zeros((self.n_topics, self.n_words))\n # The (i)-th entry is the number of times topic i is assigned in the corpus.\n self.nz = np.zeros(self.n_topics)\n\n # Initialize the topic assignment dictionary.\n self.topics = {} # key-value pairs of form (m,i):z\n\n for m in range(self.n_docs):\n for i in self.documents[m]:\n # Get random topic assignment, i.e. z is a random integer in the range of topics\n z = np.random.randint(self.n_topics)\n # Increment count matrices\n self.nmz[m,z] += 1\n self.nzw[z,self.documents[m][i]] += 1\n self.nz[z] += 1\n # Store topic assignment\n self.topics[(m,i)] = z", "def initialize(self, corpus):\n if self.id2word is None:\n logger.info(\"no word id mapping provided; initializing from corpus, assuming identity\")\n self.id2word = utils.dict_from_corpus(corpus)\n self.num_terms = len(self.id2word)\n elif self.id2word:\n self.num_terms = 1 + max(self.id2word)\n else:\n self.num_terms = 0\n\n shape = self.num_topics, self.num_terms\n logger.info(\"constructing %s random matrix\", str(shape))\n # Now construct the projection matrix itself.\n # Here i use a particular form, derived in \"Achlioptas: Database-friendly random projection\",\n # and his (1) scenario of Theorem 1.1 in particular (all entries are +1/-1).\n randmat = 1 - 2 * np.random.binomial(1, 0.5, shape) # convert from 0/1 to +1/-1\n # convert from int32 to floats, for faster multiplications\n self.projection = np.asfortranarray(randmat, dtype=np.float32)\n # TODO: check whether the Fortran-order shenanigans still make sense. In the original\n # code (~2010), this made a BIG difference for np BLAS implementations; perhaps now the wrappers\n # are smarter and this is no longer needed?" ]
[ "0.7314734", "0.64872533", "0.64799446", "0.63049644", "0.6254123", "0.62255585", "0.61655533", "0.6160577", "0.61441", "0.6136241", "0.60976034", "0.6075077", "0.60740954", "0.6068708", "0.6040718", "0.60375553", "0.6037457", "0.60017866", "0.5994458", "0.59844494", "0.5982392", "0.5976876", "0.5972493", "0.5909515", "0.5907604", "0.59006757", "0.58897376", "0.58747864", "0.58492094", "0.5844604" ]
0.6976323
1
Compute perplexity for each sample.
def perplexity(self): return samplers_lda.perplexity_comp(self.docid,self.tokens,self.tt,self.dt,self.N,self.K,self.samples)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perplexity(self):\n raise NotImplementedError(\"To be implemented\")", "def perplexity(self, sents):\n return 2 ** self.cross_entropy(sents)", "def perplexity(self, sents):\n # total words seen\n M = 0\n for sent in sents:\n M += len(sent)\n # cross-entropy\n l = 0\n print('Computing Perplexity on {} sents...\\n'.format(len(sents)))\n for sent in sents:\n l += self.sent_log_prob(sent) / M\n return pow(2, -l)", "def perplexity(self, corpus):\n sum_pro = 0.0\n total_words = 0\n for sentence in corpus:\n sen_pro = self.sentence_logprob(sentence)\n sum_pro += sen_pro\n total_words += len(sentence)\n\n \n\n l = sum_pro/total_words\n w = 0.0\n w = 2**(-l)\n\n return w", "def perplexity(y_true, y_pred):\n cross_entropy = K.categorical_crossentropy(y_true, y_pred)\n perplexity = K.pow(2.0, cross_entropy)\n return perplexity", "def perplexity(self, corpus):\n M = 0\n prob = 0\n\n for line in corpus:\n M += len(line)\n M += 1 # consider \"STOP\"\n prob += self.sentence_logprob(line)\n result = 2**(-(prob/M))\n\n return result", "def _perplexity(self, X, log_w):\n return np.exp(-log_w/X.sum())", "def perplexity(model, data):\n probs = [model.get_prob(word) for word in data] # get word's probability\n probs_log = [\n log2(word_prob) if word_prob > 0 else log2(float_info.epsilon)\n for word_prob in probs\n ] # log the probabilities. using epsilon when the probability is 0\n sum_probs = reduce(lambda a, b: a + b, probs_log) # sum all\n power_val = (-1 * sum_probs) / len(probs_log) # divide by n and neg all\n return 2 ** power_val", "def compute_perplexity(self, train_corpus: str, test_corpus: str, n: int):\n N = len(''.join(test_corpus.split()))\n y = -1 / N\n likelihood = self.compute_likelihood(train_corpus, test_corpus, n)\n return likelihood ** y", "def perplexity(self, text_ngrams):\n return pow(\n 2.0, self.entropy(progress(text_ngrams, desc=\"Calculating Perplexity\") if self.verbose else text_ngrams)\n )", "def perplexity(self, corpus):\n l = 0\n total_word_count = 0\n for sentence in corpus :\n l += self.sentence_logprob(sentence)\n # 2 extra START tokens and 1 extra STOP token\n total_word_count += len(sentence)\n l /= total_word_count\n return math.pow(2, -l)", "def perplexity_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return 2 ** crossentropy_fn(args)", "def compute_perplexity(self,loss: float):\n return math.exp(loss)", "def score_samples(self, x):\n n = x.shape[0]\n logp = np.log(self.mix_weight)\n logpz = np.zeros((n, self.ncomponents))\n\n for i in range(self.ncomponents):\n logpz[:, i] = logp[i] + multivariate_student.logpdf(x, self.cond_proba.mean[i], self.cond_proba.cov[i],\n self.cond_proba.df)\n\n logpz, ll = normalize_logspace(logpz)\n pz = np.exp(logpz)\n return pz, ll", "def score_samples(self, x):\n n = x.shape[0]\n logp = np.log(self.mix_weight)\n logpz = np.zeros((n, self.ncomponents))\n\n for i in range(self.ncomponents):\n logpz[:, i] = logp[i] + multivariate_normal.logpdf(x, self.cond_proba.mean[i], self.cond_proba.cov[i])\n\n logpz, ll = normalize_logspace(logpz)\n pz = np.exp(logpz)\n return pz, ll", "def compute_perplexity(self, sess, name):\n\n total_loss = 0\n total_predict_count = 0\n\n while True:\n try:\n loss, predict_count, batch_size = self.eval(sess)\n total_loss += loss * batch_size\n total_predict_count += predict_count\n except tf.errors.OutOfRangeError:\n break\n\n perplexity = utils.safe_exp(total_loss / total_predict_count)\n self.logger.info(\"{} perplexity: %.2f\".format(name, perplexity))\n \n return perplexity", "def perplexity(filepath, model):\n log_prob, count = log_prob_of_file(filepath, model)\n perplexity = math.exp((-1.0/count) * log_prob)\n return perplexity", "def score_samples(self, x):\n n, dim = x.shape\n logp = np.log(self.mix_weight)\n logt = np.log(self.cond_proba.T + np.finfo(np.float64).eps)\n lijk = np.zeros((n, dim, self.ncomponents))\n\n x = canonize_labels(x)\n\n for i in range(dim):\n ndx = ~np.isnan(x[:, i])\n lijk[ndx, i] = logt[:, x[ndx, i], i].T\n logpz = logp + np.squeeze(np.sum(lijk, 1))\n\n logpz, ll = normalize_logspace(logpz)\n pz = np.exp(logpz)\n return pz, ll", "def sample_propensities(mutated_params: torch.Tensor) -> torch.Tensor:\n return torch.softmax(mutated_params, -1)", "def calculate_perplexity(loss):\n return math.exp(float(loss)) if loss < 300 else float(\"inf\")", "def P_init(X, perplexity):\n\n n, _ = X.shape\n x = np.sum(X ** 2, axis=1)\n y = np.sum(X ** 2, axis=1)[:, np.newaxis]\n z = np.matmul(X, X.T)\n D = x - 2 * z + y\n np.fill_diagonal(D, 0.)\n P = np.zeros((n, n))\n betas = np.ones((n, 1))\n H = np.log2(perplexity)\n\n return D, P, betas, H", "def ppf(self,x):\n return self.categoricalDist.ppf(x)", "def evaluate_perplexity(learner, text_list):\n num_words = 0\n log_prob = 0\n num_unk = 0\n log_prob_unk = 0\n\n for text in tqdm(text_list):\n learner.model.reset()\n word_tensor, y = learner.data.one_item(\"xxbos\")\n\n for word in str(text).split()[1:]:\n idx = learner.data.vocab.stoi[word]\n predicted_probs = learner.pred_batch(batch=(word_tensor, y))[0][-1]\n\n log_prob += log10(predicted_probs[idx])\n num_words += 1\n\n if learner.data.vocab.itos[idx] == \"xxunk\":\n num_unk += 1\n log_prob_unk += log10(predicted_probs[idx])\n\n word_tensor = word_tensor.new_tensor([idx])[None]\n\n perplexity = 10 ** (- log_prob / num_words)\n\n # Contribution of OOV words to perplexity - to compare with irstlm's PPwp (compile-lm.cpp:406)\n # This implementation is a bit incorrect since the final number of words in PPwp should be\n # num_words - num_unk, but to enable comparison, we stick to original implementation.\n perplexity_oov = perplexity * (1 - 10 ** (log_prob_unk / num_words))\n\n return perplexity, num_unk / num_words, perplexity_oov", "def compute_perplexity(model, sess, name, eval_handle):\n\n def aggregate_all_summaries(original, updates):\n for key in updates:\n if key not in original:\n original[key] = 0.0\n original[key] += updates[key]\n return original\n\n total_loss = 0\n total_predict_count = 0\n start_time = time.time()\n aggregated_summaries = {}\n batch_processed = 0\n while True:\n try:\n loss, all_summaries, predict_count, batch_size = model.eval(\n sess, eval_handle)\n total_loss += loss * batch_size\n batch_processed += 1\n total_predict_count += predict_count\n aggregated_summaries = aggregate_all_summaries(aggregated_summaries,\n all_summaries)\n except tf.errors.OutOfRangeError:\n break\n\n perplexity = utils.safe_exp(total_loss / total_predict_count)\n for key in aggregated_summaries:\n if key not in set(\n [\"eval_dialogue_loss1\", \"eval_dialogue_loss2\", \"eval_action_loss3\"]):\n aggregated_summaries[key] /= batch_processed\n utils.print_time(\" eval %s: perplexity %.2f\" % (name, perplexity),\n start_time)\n return perplexity, aggregated_summaries", "def probability(self, samples):\n pass", "def eval_pp(net, z, zt):\n if net.name != 'lbl':\n Im = zt['IM']\n else:\n Im = None\n pp = lm_tools.perplexity(net, zt['ngrams'], z['word_dict'], Im=Im, context=net.context)\n print 'PERPLEXITY: ' + str(pp)", "def calculate_perplexity_new(self, sentence_probability, input_y, words_to_idx):\n \n batch_size = sentence_probability.shape[0]\n sentence_length = sentence_probability.shape[1]\n perplexity = np.zeros(batch_size)\n pad_index = words_to_idx['<pad>'] # index of pad\n \n for sentence_i in range(batch_size):\n word_index = 1 # start at 1 to discard bos\n log_sum = 0\n while ((word_index < sentence_length) and (input_y[sentence_i][word_index] != pad_index)): # stop when the first pad token is reached\n \n log_sum += np.log2(sentence_probability[sentence_i, word_index, \n input_y[sentence_i][word_index]])\n word_index += 1 \n word_index -= 1 # remove one count because of discarded bos\n try:\n # catch sentence with all pad's\n perplexity[sentence_i] = np.power(2, -(log_sum/word_index))\n except:\n print(input_y[sentence_i])\n print('sencentece with just pads')\n perplexity[sentence_i] = np.nan \n \n return perplexity", "def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n", "def probability(cpts, term, obs):\r\n \r\n \r\n # term is a list e.g., ['x_1', '0']\r\n # flip refers to the assignment either '0' false or '1' true\r\n flip = term[1]\r\n # the term itself\r\n term = term[0]\r\n # accumulator variable\r\n answer = 0\r\n # this loop locates where in the CPT we're looking\r\n for clause in range(len(cpts)):\r\n if cpts[clause][0] == term:\r\n index = clause\r\n # focus on our term\r\n cpt = cpts[index]\r\n # this loop checks if there are no preconditions\r\n # if not, then we immediately know the probability and can return\r\n for m in range(len(cpt[1])):\r\n if cpt[1][m][-2][1] == '1':\r\n if cpt[1][m][0] == [[]]:\r\n answer = cpt[1][m][-1]\r\n # list of the variables we have observed\r\n have = []\r\n if obs != []:\r\n for k in obs:\r\n have.append(k[0])\r\n # list of variables we need to know in order to calculate the probability\r\n needed = []\r\n for prob in range(len(cpt[1])):\r\n for j in cpt[1][prob][0]:\r\n if j != []:\r\n if j[0] not in needed:\r\n needed.append(j[0])\r\n # conditional logic based on the known variables\r\n for required in needed:\r\n if required not in have:\r\n # deep copy our observations list\r\n obs2 = []\r\n obs3 = []\r\n for observs in obs:\r\n obs2.append(observs)\r\n obs3.append(observs)\r\n # if we need to know a variable but don't have it\r\n # then we allow it to be either 0 or 1\r\n obs3.append([required,'1'])\r\n obs2.append([required,'0'])\r\n # computes probability if the unknown term is true, times \r\n # the probability that the unknown term is true, plus the\r\n # probability if the unknown term is false, times the \r\n # probability that the unknown term is false\r\n answer = (probability(cpts, [term,flip], obs3) * probability(cpts, [required,'1'], obs)) + (probability(cpts, [term,flip], obs2) * (probability(cpts, [required,'0'], obs)))\r\n # this loop looks complicated but all it's doing is finding the correct\r\n # line in the CPT\r\n if cpt[1][prob][-2][1] == '1':\r\n count = 1\r\n for i in range(len(cpt[1][prob][0])):\r\n if cpt[1][prob][0][i] in obs:\r\n count *= 1\r\n else:\r\n count = 0\r\n if count == 1:\r\n answer += cpt[1][prob][-1]\r\n\r\n\r\n # this computes the probability that the term is true, so if we asked \r\n # for the probability that it is false, just return 1 - answer\r\n if flip == '0':\r\n return 1 - answer\r\n return answer", "def perplexity(p: np.ndarray, q: np.ndarray, eps: float = 1e-10) -> List[float]:\n kl_div_pq = kl_div(p, q, eps)[0]\n perplexity_pq = np.exp(-kl_div_pq)\n return [perplexity_pq]" ]
[ "0.8005421", "0.7128528", "0.69695795", "0.68535006", "0.6853017", "0.6775191", "0.67121816", "0.668699", "0.65877926", "0.65655065", "0.6539731", "0.6510954", "0.64906204", "0.64209616", "0.6399191", "0.6285764", "0.62789685", "0.6167774", "0.60993886", "0.60989195", "0.6094348", "0.60791427", "0.60707474", "0.6005147", "0.5993785", "0.5993739", "0.59867895", "0.5978392", "0.59342664", "0.5919781" ]
0.7709719
1
Keep subset of samples. If index is an integer, keep last N=index samples. If index is a list, keep the samples corresponding to the index values in the list.
def samples_keep(self,index): if isinstance(index, (int, long)): index = range(self.samples)[-index:] self.sampled_topics = np.take(self.sampled_topics,index,axis=0) self.tt = np.take(self.tt,index,axis=2) self.dt = np.take(self.dt,index,axis=2) self.samples = len(index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _index_select_nd(source: torch.Tensor, index: torch.Tensor) -> torch.Tensor:\n index_size = index.size() # (num_atoms/num_bonds, max_num_bonds)\n suffix_dim = source.size()[1:] # (hidden_size,)\n final_size = index_size + suffix_dim # (num_atoms/num_bonds, max_num_bonds, hidden_size)\n\n target = source.index_select(dim=0, index=index.view(\n -1)) # (num_atoms/num_bonds * max_num_bonds, hidden_size)\n target = target.view(\n final_size) # (num_atoms/num_bonds, max_num_bonds, hidden_size)\n\n return target", "def test_subset_by_index(self):\n\n this_satellite_dict = satellite_io.subset_by_index(\n satellite_dict=copy.deepcopy(SATELLITE_DICT_ALL_EXAMPLES),\n desired_indices=DESIRED_INDICES\n )\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_SUBSET_BY_INDEX\n ))", "def random_sampling(self, n_subset):\n t = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"[INFO] {} - Random sampling with replacement ...\".format(t))\n subset_list = []\n training_set = self\n subset_size = math.ceil(training_set.n_samples / n_subset)\n # create subsets\n for i in range(n_subset):\n # run a permutation to mix all samples (sampling with replacement)\n self.permutation()\n # always draw the first samples\n start_idx = 0\n stop_idx = subset_size\n subset = deepcopy(training_set)\n subset.data = subset.data[start_idx:stop_idx][:]\n subset.labels = subset.labels[start_idx:stop_idx][:]\n subset.labels_onehot = subset.labels_onehot[start_idx:stop_idx][:]\n subset.n_samples = stop_idx - start_idx\n subset.true_distribution = subset._get_true_distribution()\n subset.set_batch_size(training_set.batch_size)\n subset_list.append(subset)\n print(\"\\tSubset shape {}\".format(subset.data.shape))\n return subset_list", "def _select_batch(self, index):\n swap = np.random.binomial(1, 0.5, 1)\n if swap:\n triplet_labels = self.triplet_label[::-1]\n # sample along columns of crosstab\n classes_selected = self.class_ref.T.sample(self.classes_in_batch)\n else:\n triplet_labels = self.triplet_label\n classes_selected = self.class_ref.sample(self.classes_in_batch)\n classes1_selected = classes_selected.index.values\n classes2_selected = classes_selected.loc[:, classes_selected.sum() > 0].columns.values\n batch = self.data.loc[self.data[triplet_labels[0]].isin(classes1_selected), :].\\\n groupby(triplet_labels[0]).apply(lambda x: self._select_samples_for_class(x,\n classes2_selected,\n triplet_labels[1]))\n return batch", "def pop(self, idx=None):\n if not idx:\n samples = np.copy(self.data[:self.idx])\n self.data[:] = np.empty(self.data.shape)\n self.idx = 0\n else:\n if idx > self.idx:\n raise ValueError()\n samples = np.copy(self.data[:idx])\n data = np.copy(self.data[idx:self.idx])\n self.data[:] = np.empty(self.data.shape)\n self.data[:self.idx - idx] = data\n self.idx -= idx\n return samples", "def selectOfSample(self, indexes):\n index_set = set()\n for idx in indexes:\n i = list(self.sample[self.sample['masked'] == False].index)[idx]\n index_set.add(i)\n for ind in list(self.sample[self.sample['masked'] == False].index):\n if ind not in index_set:\n self.sample.at[ind, 'masked'] = True\n return index_set", "def pull_n_samples(dset, n):\n return list(dset[i] for i in random.sample(range(len(dset)), n))", "def subsampleData(self, count):\n size = 0\n for block in self.blocks: size += len(block[1])\n subset = numpy.random.permutation(size)[:count]\n subset.sort()\n\n pos = 0\n index = 0\n ret = Dataset()\n for block in self.blocks:\n while subset[index]<(pos+len(block[1])):\n loc = subset[index] - pos\n ret.add(block[0][loc,:], block[1][loc])\n index += 1\n if index==subset.shape[0]: return ret\n pos += len(block[1])\n \n return ret", "def subset_from_indices(self, indices):\n return self.extract_inds(indices)", "def slice_by_index(lst, indices):\r\n slicer = itemgetter(*indices)(lst)\r\n if len(indices) == 1:\r\n return [slicer]\r\n return list(slicer)", "def slice_by_index(lst, indexes):\r\n if not lst or not indexes:\r\n return []\r\n slice_ = itemgetter(*indexes)(lst)\r\n if len(indexes) == 1:\r\n return [slice_]\r\n return list(slice_)", "def subsample(self, dataset):\n sample_idx = np.random.choice(\n dataset.shape[0], self.sample_size, replace=True)\n sample = dataset[sample_idx,...]\n return sample", "def create_sample(df: pd.DataFrame, indices: list, n: int = 2) -> list:\r\n samples = []\r\n for idx in indices:\r\n if idx <= n:\r\n continue\r\n\r\n samples.append([\r\n ' '.join(df.loc[idx - n:idx - 1, 'article'].to_list()),\r\n df.loc[idx, 'article']\r\n ])\r\n return samples", "def cut_sample(whole_audio_data, num_samples):\n len_audio_data = len(whole_audio_data)\n if num_samples >= len_audio_data:\n raise Exception(\"Length of to be generated signal cannot be greater and equal to original audio signal\")\n sys.exit(-1)\n\n # generate a random number which is used as a first index to cut off\n ind = random.randint(0, len_audio_data-num_samples)\n gen_data = whole_audio_data[ind:ind+num_samples]\n return gen_data", "def index_to_slices(index):\r\n\r\n #contruct the return structure\r\n ind = np.asarray(index,dtype=np.int64)\r\n ret = [[] for i in range(ind.max()+1)]\r\n\r\n #find the switchpoints\r\n ind_ = np.hstack((ind,ind[0]+ind[-1]+1))\r\n switchpoints = np.nonzero(ind_ - np.roll(ind_,+1))[0]\r\n\r\n [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))]\r\n return ret", "def index_to_slices(index):\r\n\r\n #contruct the return structure\r\n ind = np.asarray(index,dtype=np.int64)\r\n ret = [[] for i in range(ind.max()+1)]\r\n\r\n #find the switchpoints\r\n ind_ = np.hstack((ind,ind[0]+ind[-1]+1))\r\n switchpoints = np.nonzero(ind_ - np.roll(ind_,+1))[0]\r\n\r\n [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))]\r\n return ret", "def select(individuals, n):\r\n # return selBest(individuals, n)\r\n return individuals[:n]", "def batched_index_select(input, dim, index):\n views = [input.shape[0]] + [1 if i != dim else -1 for i in range(1, len(input.shape))]\n expanse = list(input.shape)\n expanse[0] = -1\n expanse[dim] = -1\n index = index.view(views).expand(expanse)\n return torch.gather(input, dim, index)", "def filter_workload_index(\n only: tp.Optional[WorkloadSet], index: WorkloadIndex\n) -> tp.Generator[tp.List[Command], None, None]:\n\n keys = [k for k in index if k and ((only and (k & only)) or (not only))]\n for k in keys:\n yield index[k]", "def keep_n(self, n=100):\n before = self.item_count()\n\n item_count = self.item_count()\n if item_count > n: self.filter(self.sample(n))\n\n after = self.item_count()\n with msg(f'Keeping (at most) {n} items: {after} of {before}', done=False, enabled=self.output):pass", "def random_subset(array, count):\n indices = np.random.permutation(len(array))[:count]\n return array[indices]", "def slice_bands(self, band_idx):\n new_eigenvals = self.eigenvals.T[sorted(band_idx)].T\n return type(self)(kpoints=self.kpoints, eigenvals=new_eigenvals)", "def sampleNo(xvar, yvar, N, avoididx):\n\n allidx = np.arange(0, len(xvar)*len(yvar)) # flattened array of all indices in mesh\n noidx = np.setxor1d(allidx, avoididx) #allidx - avoididx\n #noidx = np.array(list(set(allidx) - set(avoididx)))\n nosampleidx = np.random.choice(noidx, size=N,replace=False)\n newavoididx = np.sort(np.hstack((avoididx, nosampleidx)))\n rowidx,colidx = np.unravel_index(nosampleidx, (len(yvar), len(xvar)))\n samples = []\n for row,col in zip(rowidx, colidx):\n xp = xvar[col]\n yp = yvar[row]\n samples.append((xp, yp))\n\n return (samples, newavoididx)", "def take(self, lists_indices):\n return self.d_series.map_partitions(\n lambda s: s.list.take(lists_indices), meta=self.d_series._meta\n )", "def remove_from_bad(arr,index_to_remove):\n \n newarr=arr\n set_rank_to_remove=np.where(arr==index_to_remove)[0]\n if len(set_rank_to_remove)!=0:\n rank_to_remove=set_rank_to_remove[0]\n newarr=np.delete(arr,rank_to_remove)\n return newarr", "def set_subset(self):\r\n if self._random_subset:\r\n perm = torch.randperm(len(self._indices))\r\n self._subset = self._indices[perm][:self._subset_size]\r\n else:\r\n self._subset = torch.Tensor(self._indices[:self._subset_size])", "def select_at_indexes(indexes, tensor):\n dim = len(indexes.shape)\n assert indexes.shape == tensor.shape[:dim]\n num = indexes.numel()\n t_flat = tensor.view((num,) + tensor.shape[dim:])\n s_flat = t_flat[torch.arange(num), indexes.view(-1)]\n return s_flat.view(tensor.shape[:dim] + tensor.shape[dim + 1:])", "def _sample(self, n=1):\n return [self[i] for i in np.random.choice(self.length, n, replace=False)]", "def pop(self, index):\n self._sets.pop(index)", "def set_index_list(index_a):\n indexes = list(range(len(game_data.data)))\n if index_a != -1:\n indexes.pop(index_a)\n return indexes" ]
[ "0.60203993", "0.59574664", "0.5886324", "0.5867899", "0.5816982", "0.57350016", "0.5727632", "0.5652907", "0.56261337", "0.56184375", "0.56039095", "0.5409709", "0.53960425", "0.5390497", "0.5389218", "0.5389218", "0.5365618", "0.5363418", "0.535475", "0.53461355", "0.53373736", "0.53141606", "0.52867824", "0.5282918", "0.5270323", "0.52694106", "0.52681524", "0.52595764", "0.52415764", "0.52308595" ]
0.7132457
0
Compute average termtopic matrix, and print to file if print_output=True.
def tt_avg(self, print_output=True, output_file = "tt.csv"): avg = self.tt.mean(axis=2) if print_output: np.savetxt(output_file, avg, delimiter = ",") return avg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def topic_content(self,W,output_file = \"topic_description.csv\"):\n\n\t\ttopic_top_probs = []\n\t\ttopic_top_words = []\n\n\t\ttt = self.tt_avg(False)\n\n\t\tfor t in xrange(self.K):\n\t\t\ttop_word_indices = tt[:,t].argsort()[-W:][::-1]\n\t\t\ttopic_top_probs.append(np.round(np.sort(tt[:,t])[-W:][::-1],3))\n\t\t\ttopic_top_words.append([self.token_key.keys()[self.token_key.values().index(i)] for i in top_word_indices])\n\n\t\twith codecs.open(output_file,\"w\",\"utf-8\") as f:\n\t\t\tfor t in xrange(self.K):\n\t\t\t\twords = ','.join(topic_top_words[t])\n\t\t\t\tprobs = ','.join([str(i) for i in topic_top_probs[t]])\n\t\t\t\tf.write(\"topic\" + str(t) + ',')\n\t\t\t\tf.write(\"%s\\n\" % words)\n\t\t\t\tf.write(\" \" + ',')\n\t\t\t\tf.write(\"%s\\n\" % probs)", "def output(query,lda,features):\n roles = get_mostcommon(path,5000)\n all_roles = len(roles)\n irrelevant = irrelevant_features(features)\n #with open(\"guesses.txt\", \"w\") as text_file:\n # text_file.write('role:')\n # text_file.write('\\t')\n # text_file.write(\"guess: \")\n # text_file.write('\\t')\n # text_file.write(\"smatch: \")\n # text_file.write('\\n')\n for query in roles:\n #text_file.write(str(query))\n #text_file.write('\\t')\n guess = guess_topic(ilda,query,features, irrelevant)\n #smatch = try_normaliser(query)\n #if guess != smatch:\n # diff += 1\n print(query)\n # print(guess, '\\t' , smatch )\n print(guess)\n print()\n #text_file.write(str(guess))\n #text_file.write('\\t')\n #text_file.write(str(smatch))\n #print('guess: ', str(guess), '\\n')\n #print('smatch: ', str(smatch))\n #text_file.write('\\t')\n #text_file.write(str(smatch))\n #text_file.write('\\n')\n #text_file.write('\\n')", "def print_avg():", "def main():\n vocab = str.split(file(sys.argv[1]).read())\n testlambda = numpy.loadtxt(sys.argv[2])\n testlambda = topN(testlambda, int(sys.argv[3]))\n words_per_topic = 20\n\n for k in range(0, len(testlambda)):\n lambdak = list(testlambda[k, :])\n lambdak = lambdak / sum(lambdak)\n temp = zip(lambdak, range(0, len(lambdak)))\n temp = sorted(temp, key=lambda x: x[0], reverse=True)\n\n print 'topic %d:' % (k)\n # feel free to change the \"53\" here to whatever fits your screen nicely.\n for i in range(0, words_per_topic):\n print '%s:%.4f' % (vocab[temp[i][1]], temp[i][0])\n print", "def save_topic_terms(self, topics, output_path):\n topic_terms_df = self.get_topic_terms_df(topics)\n topic_terms_df.to_string(output_path, index=False)", "def get_average_scores(self):\n models = self.eval_parameters['average_experiment']['models']\n metrics = self.eval_parameters['average_experiment']['metrics_list']\n metrics_keys = list(metrics.keys())\n print(r\"\\begin{table}[]\")\n print(\"\\centering\")\n print(r\"\\tiny\")\n print(\"\\caption{Average results over 85 topics. Each row represents a different run (top 10 runs of each model). Each column represents a different assessments aggregation.}\")\n print(\"\\label{tab:average_results}\")\n print(r\"\\begin{tabular}{@{}\"+''.join(['l']*(len(metrics_keys)+1))+\"@{}}\")\n print(\"runid\", '&'.join(metric.replace('_','\\_')for metric in metrics_keys),sep='&')\n print(r\"\\\\ \\midrule\")\n for model in models:\n runs = self.get_list_files(self.eval_parameters['average_experiment']['runs_folder'] + model+\"/\")\n for file in runs:\n val = []\n for metric_id in metrics_keys:\n out1 = subprocess.check_output(\n ['../trec_eval-master/trec_eval', '-m', metrics[metric_id]['metric'],\n metrics[metric_id]['qrels'], file])\n val += [str(out1.rstrip().split()[2]).replace('b\\'', '').replace('\\'', '')]\n print(file.replace(self.eval_parameters['average_experiment']['runs_folder'],'').replace('_','\\_'),'&', '&'.join(val),r\"\\\\\")\n print(r\"\\bottomrule\")\n print(r\"\\end{tabular}\")\n print(r\"\\end{table}\")", "def print_topic_word_distribution(corpus, number_of_topics, topk, filepath):\n\tV = len(corpus.vocabulary) # size of vocabulary\n\tassert(topk < V)\n\tf = open(filepath, \"w\")\n\tfor k in range(number_of_topics):\n\t\tword_prob = corpus.topic_word_prob[k, ] # word probability given a topic\n\t\t# print word_prob\n\t\tword_index_prob = []\n\t\tfor i in range(V):\n\t\t\tword_index_prob.append([i,corpus.vocabulary[i],word_prob[i]])\n\t\tword_index_prob = sorted(word_index_prob, key=itemgetter(1), reverse=True) # sort by word count\n\t\twith open('word_index_prob.txt',\"a+\") as f2:\n\t\t\tf2.write(str(word_index_prob)+'\\n')\n\t\t\tf2.close()\n\t\tf.write(\"Topic #\" + str(k) + \":\\n\")\n\t\tfor i in range(topk):\n\t\t\tindex = word_index_prob[i][0]\n\t\t\tf.write(corpus.vocabulary[index] + \" \")\n\t\tf.write(\"\\n\")\n\tprint \"Written topic-word distribution to file: \" + filepath \n\tf.close()", "def dt_avg(self, print_output=True, output_file = \"dt_query.csv\"):\t\n\n\t\tavg = self.dt.mean(axis=2)\n\t\tif print_output: np.savetxt(output_file, avg, delimiter = \",\")\n\t\treturn avg", "def dt_avg(self, print_output=True, output_file = \"dt.csv\"):\t\n\n\t\tavg = self.dt.mean(axis=2)\n\t\tif print_output: np.savetxt(output_file, avg, delimiter = \",\")\n\t\treturn avg", "def main():\n with read_std_files(OUT_FILE) as (qrys_file, docs_file, out_file):\n doc_count, token_count, word_map = map_docs(docs_file)\n avg_doc_len = token_count / float(doc_count)\n for doc_id, doc_tokens in tokenize(docs_file):\n doc_len = len(doc_tokens)\n doc_dct = dictify(doc_tokens)\n for query_id, query_tokens in tokenize(qrys_file):\n query_dct = dictify(query_tokens)\n similarity = tfidf(query_dct, doc_dct, doc_len, doc_count, avg_doc_len, word_map)\n log(out_file, query_id, doc_id, similarity)", "def main(result_directory, d_best, min_precision, targeted_files,\n null_distance):\n\n # paths\n path_log = os.path.join(result_directory, \"log_final_reduced\")\n\n # get data\n df_log = pd.read_csv(path_log, header=0, encoding=\"utf-8\", sep=\";\",\n index_col=False)\n path_tfidf = os.path.join(result_directory, \"tfidf.npz\")\n tfidf = load_sparse_csr(path_tfidf)\n print(\"df_log shape :\", df_log.shape)\n print(\"tfidf shape :\", tfidf.shape, \"\\n\")\n\n # compute topic space\n w, auc, precision, recall, threshold = compute_best_topic_space(\n result_directory, tfidf, df_log, d_best)\n print(\"--------------------------------------------\")\n print(\"w shape :\", w.shape)\n print(\"best auc :\", auc, \"(%i topics)\" % d_best[\"n_topics\"], \"\\n\")\n\n # fit the neighborhood\n radius, i_radius = find_radius(precision, recall, threshold, min_precision)\n neigh = define_neighbors(result_directory, w, radius, d_best[\"norm\"])\n print(\"recommendation radius :\", radius, \"\\n\")\n\n # plot precision recall curve\n graph_precision_recall(auc, recall, precision, i_radius)\n\n # find all neighborhoods\n # all_neighbors(result_directory, df_log, w, neigh)\n\n # get a 2D plan from the topic space\n (w_reduced_2d, variance_explained_2d, w_reduced_3d,\n variance_explained_3d) = dimensionality_reduction(result_directory, w)\n\n # plot reduced topic space\n plot_topic_space_reduced(result_directory, w_reduced_3d,\n variance_explained_3d)\n\n # reset new directories for neighbors plots\n create_specific_neighbors_directory(result_directory)\n\n # get information for a specific file\n for i_target in targeted_files:\n indices, distances = describe_one_file(result_directory, df_log, w,\n neigh, i_target, null_distance)\n\n # plot neighbors 3D\n graph_neighbors_3d_local(result_directory, df_log, w, indices,\n i_target, radius, str(i_target))\n\n return", "def print_document_topic_distribution(corpus, number_of_topics, topk, filepath):\n\t# print topk, number_of_topics\n\tassert(topk < number_of_topics)\n\tf = open(filepath, \"w\")\n\tD = len(corpus.documents) # number of documents\n\tfor d in range(D):\n\t\ttopic_prob = corpus.document_topic_prob[d, ] # topic probability given a document\n\t\ttopic_index_prob = []\n\t\tfor i in range(number_of_topics):\n\t\t\ttopic_index_prob.append([i, topic_prob[i]])\n\t\ttopic_index_prob = sorted(topic_index_prob, key=itemgetter(1), reverse=True)\n\t\tf2 = open('topic_index_prob.txt',\"a+\")\n\t\tf2.write(str(topic_index_prob)+'\\n')\n\t\tf2.close()\n\t\tf.write(\"Document #\" + str(d) + \":\\n\")\n\t\tfor i in range(topk):\n\t\t\tindex = topic_index_prob[i][0]\n\t\t\tf.write(\"topic\" + str(index) + \" \")\n\t\tf.write(\"\\n\")\n\tprint \"Written document-topic distribution to file: \" + filepath \n\tf.close()", "def write_topic(bag, output_file, topic_name, column_names):\n column_mapping = dict(zip(column_names, range(0, len(column_names))))\n\n \"\"\" Go through every message for a given topic, extract its data fields,\n and write it to the output file\n \"\"\"\n msg_count = 1\n for _, msg, _ in bag.read_messages(topics=topic_name):\n sys.stdout.write('\\t\\tWriting message %u%s' % (msg_count, \"\\r\"))\n msg_count += 1\n column_values = {}\n \"\"\" Build a dictionary of field names and their values. The field names\n match the column headers.\n \"\"\"\n find_field_value('', msg, column_values, column_mapping)\n \"\"\" write the discovered values out to the file \"\"\"\n write_topic_line(output_file, column_mapping, column_values)\n\n sys.stdout.write('\\t\\tProcessed %u messages\\n' % (msg_count - 1))", "def show_topic_model_textually(seed_gensim_topic_model, seed_gensim_corpus,\n texts_to_analyze, num_topics):\n print(\"alpha =\", seed_gensim_topic_model.alpha)\n print(seed_gensim_topic_model)\n print(seed_gensim_topic_model.print_topics(num_topics))\n print()", "def generateMatrix(self):\n if self.tokenWeights and self.extraFeatures:\n nFeatures = self.wordId + self.wordId2 + len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting TOKEN WEIGHTS AND EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n # finally extra features values stored at the end of the vector\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.wordId + self.wordId2 + self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n\n elif self.tokenWeights and not self.extraFeatures:\n nFeatures = self.wordId + self.wordId2\n logging.info('Exporting TOKEN WEIGHTS %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n else:\n nFeatures = len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n logging.info('Matrix generated')\n logging.info(mtrx.shape)\n return mtrx", "def summarize_corpus():\n\t\n\t# get metadata\n\t#get_metadata.from_TEIP5(wdir, corpus_inpath, \"metadata\", md_mode)\n\t\n\t# visualize some metadata\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"author-continent\")\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"author-country\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"language\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_hist\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_x\")\n\tvisualize_metadata.plot_pie(wdir, md_csv, \"subgenre\")\n\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"subgenre\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"gender\")\n\t\n\t# make some counts\n\tmd_table = pd.DataFrame.from_csv(os.path.join(wdir, md_csv), header=0)\n\tnum_texts = len(md_table)\n\t#num_language = len(md_table.groupby([\"language\"]))\n\t#num_continent = len(md_table.groupby([\"author-continent\"]))\n\t#num_countries = len(md_table.groupby([\"author-country\"]))\n\t#num_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_subgenre = len(md_table.groupby([\"subgenre\"]))\n\t#num_subgenre_x = len(md_table.groupby([\"subgenre_x\"]))\n\t#fr_subgenre_hist = md_table.groupby([\"subgenre_hist\"]).count()\n\t#num_historical = fr_subgenre_hist[\"idno\"][\"historical\"]\n\t#num_not_historical = fr_subgenre_hist[\"idno\"][\"not_historical\"]\n\t\n\t\n\td = {\"texts\":[num_texts], \n\t#\"languages\":[num_language],\n\t#\"continents\":[num_continent],\n\t#\"countries\":[num_countries],\n\t\"authors\":[num_authors],\n\t#\"subgenre_x\":[num_subgenre_x],\n\t\"subgenre\":[num_subgenre]}\n\t#\"num_historical\":[num_historical],\n\t#\"num_not_historical\":[num_not_historical]}\n\t\n\t\n\t\n\tcount_fr = pd.DataFrame(d)\n\tcount_fr.to_csv(os.path.join(wdir, \"corpus-description.csv\"), sep=\",\", header=True)\n\tprint(\"Done: summarize corpus\")", "def make_term_doc_matrix(self):\n print \"\\ngenerating term-frequency matrix:\"\n\n try:\n len_of_code_book = self.len_of_code_book\n except AttributeError as e:\n print \">>> temp histogram method not run. exit()\"\n sys.exit(1) # change this to serch for the longest histgoram in the directory ??\n\n list_of_histograms = []\n for d_cnt, date in sorted(enumerate(os.listdir(self.hist_path))):\n directory = os.path.join(self.hist_path, date)\n print \" >\", date\n for recording in sorted(os.listdir(directory)):\n list_of_histograms.append((recording, directory, len_of_code_book))\n\n if self.config['hists']['parallel']:\n num_procs = mp.cpu_count()\n pool = mp.Pool(num_procs)\n chunk_size = int(np.ceil(len(list_of_histograms)/float(num_procs)))\n results = pool.map(h.worker_padd, list_of_histograms, chunk_size)\n pool.close()\n pool.join()\n else: # for sequential debugging:\n results = []\n for cnt, event in enumerate(list_of_histograms):\n print \"adding to feature space: \", event[0]\n results.append(h.worker_padd(event))\n\n uuids = [uuid for (uuid, hist) in results]\n f = open(self.accu_path + \"/list_of_uuids.p\", \"w\")\n pickle.dump(uuids, f)\n f.close()\n\n # features = np.vstack(results)\n features = np.vstack([hist for (uuid, hist) in results])\n new_features = h.recreate_data_with_high_instance_graphlets(self.accu_path, features, self.config['hists']['low_instances'])\n return True", "def print_topics(self, num_topics=10, num_words=10):\n\n topic_modeler = LatentDirichletAllocation(n_topics=num_topics, learning_method='online')\n\n topic_modeler.fit(self.comments_vectorized)\n\n word_list = self.vectorizer.get_feature_names()\n\n for topic_number, topic in enumerate(topic_modeler.components_):\n top_ten = np.argsort(-topic)[:num_words]\n\n words_ranked = \", \".join([word_list[i] for i in top_ten])\n\n print(\"Topic {}: {}\".format(topic_number, words_ranked))", "def produce_all_term_data(self):\n # remove cold start records if requested\n test = self.test.copy()\n test = self.handle_cold_start(test)\n\n outputs = self.output()\n trainf, testf = outputs['train'], outputs['test']\n with trainf.open('w') as ftrain, testf.open('w') as ftest:\n self.write_libfm_data(ftrain, ftest, self.train, test)\n\n # Write the term-to-id guide\n test = test.sort(('termnum'))\n test['rownum'] = np.arange(len(test))\n guide = test.groupby('termnum').max()['rownum']\n with self.output()['guide'].open('w') as f:\n guide.to_csv(f, index_label='termnum', header=True)", "def print_topic(self, topic, time=0, top_terms=20):\n topic = self.topic_chains[topic].e_log_prob\n topic = np.transpose(topic)\n topic = np.exp(topic[time])\n topic = topic / topic.sum()\n bestn = matutils.argsort(topic, top_terms, reverse=True)\n beststr = [(self.id2word[id_], topic[id_]) for id_ in bestn]\n return beststr", "def writeNormScore(self,fin,fout):\n\n for line in fin:\n [sv, en, score] = re.split(r'\\t|,',line)\n self.count[sv][en] += float(score)\n self.en_sum[en] += float(score)\n self.sv_sum[sv] += float(score)\n\n for sv, ens in self.count.iteritems():\n for en in ens.keys():\n fout.write(sv + \",\" + en + \"\\t\" + str(self.count[sv][en] / self.sv_sum[sv] * self.en_sum[en]) + \"\\n\")", "def print_report(\n m, X_valid, y_valid, t=0.5, X_train=None, y_train=None, show_output=True\n):\n # X_train = X_train.values\n # X_valid = X_valid.values\n\n if isinstance(m, list):\n probs_valid = predict_ensemble(m, X_valid)\n y_val_pred = adjusted_classes(probs_valid, t)\n\n if X_train is not None:\n probs_train = predict_ensemble(m, X_train)\n y_train_pred = adjusted_classes(probs_train, t)\n else:\n probs_valid = m.predict_proba(X_valid)[:, 1]\n y_val_pred = adjusted_classes(probs_valid, t)\n\n if X_train is not None:\n probs_train = m.predict_proba(X_train)[:, 1]\n y_train_pred = adjusted_classes(probs_train, t)\n\n res = [\n roc_auc_score(y_valid, probs_valid),\n f1_score(y_valid, y_val_pred),\n confusion_matrix(y_valid, y_val_pred),\n ]\n result = f\"AUC valid: {res[0]} \\nF1 valid: {res[1]}\"\n\n if X_train is not None:\n res += [\n roc_auc_score(y_train, probs_train),\n f1_score(y_train, y_train_pred),\n ]\n result += f\"\\nAUC train: {res[3]} \\nF1 train: {res[4]}\"\n\n acc_train = m.score(X_train, y_train)\n acc_valid = m.score(X_valid, y_valid)\n\n if show_output:\n logging.info(f\"train acc: {acc_train}\")\n logging.info(f\"test acc: {acc_valid} \")\n\n logging.info(result)\n plot_confusion_matrix(\n m, X_valid, y_valid, display_labels=y_valid.unique()\n )\n logging.info(classification_report(y_valid, y_val_pred))\n plt.show()\n return {\n \"train\": {\"AUC\": res[3], \"F1\": res[4], \"acc\": acc_train},\n \"test\": {\"AUC\": res[0], \"F1\": res[1], \"acc\": acc_valid},\n }", "def get_topic_matrix(self):\n print('get topic matrix')\n\n topic_words_dict = self.config['topic_words']\n\n topic_matrix = np.empty((0, self.wordvec.embedding_dim))\n\n topic_id = 0\n for topic in topic_words_dict.keys():\n topic_words = topic_words_dict[topic]\n topic_vector = self.wordvec.avg_words_vector(topic_words)\n\n topic_matrix = np.append(topic_matrix, topic_vector, axis=0)\n\n self.id2topic[str(topic_id)] = topic\n topic_id += 1\n\n return topic_matrix", "def maximization_step(self, number_of_topics, verbose):\n if verbose:\n print(\"M step:\")\n\n self.topic_word_prob = np.zeros((number_of_topics, len(self.vocabulary)))\n self.topic_word_prob_collection_specific = []\n\n for k in range(self.number_of_collections):\n topic_word_prob_collection_specific = np.zeros((number_of_topics, len(self.vocabulary)))\n for i in range(self.number_of_documents):\n # update P(w | z)\n\n # ############################\n\n self.topic_word_prob = np.add(self.topic_word_prob,\n np.transpose(np.multiply(np.multiply(np.multiply(self.term_doc_matrix[k][i], 1 - self.topic_prob_B[k][i]), self.topic_prob_j[k][i]), self.topic_prob_C[k][i])))\n\n topic_word_prob_collection_specific = np.add(self.topic_word_prob,\n np.transpose(np.multiply(np.multiply(np.multiply(self.term_doc_matrix[k][i], 1 - self.topic_prob_B[k][i]), self.topic_prob_j[k][i]), 1 - self.topic_prob_C[k][i])))\n\n # update P(z | d)\n\n # ############################\n\n matrix = np.dot(np.transpose(self.term_doc_matrix[k][i]), self.topic_prob_j[k][i])\n self.document_topic_prob[k][i] = normalize_row(matrix)\n topic_word_prob_collection_specific = normalize_row(topic_word_prob_collection_specific)\n self.topic_word_prob_collection_specific.append(topic_word_prob_collection_specific)\n\n self.topic_word_prob = normalize_row(self.topic_word_prob)\n\n #print(\"pi:\")\n #print(self.document_topic_prob)\n #print(\"p(w|theta):\")\n #print(self.topic_word_prob)", "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "def write(self, output):\n\n keys = self._motifs.keys()\n keys.sort()\n\n thre = self._motifs['threshold'] # threshold used for motif detection\n align = self._motifs[\"align\"] # alignment score used to detect motifs\n\n with open(output, \"w\") as o:\n\n # Precise time of the computation and print the whole results file\n print >> o, \"Launched:{} GMT Threshold used: {} AlignThreshold: {}\\n\" \\\n .format(strftime(\"%a, %d %b %Y %H:%M:%S\", gmtime()), thre, align)\n for k in keys:\n if k != \"threshold\" and k != \"size\" and k != \"align\":\n smk = self._motifs[k] # motif number X, SMK stands for\n # Self._Motifs[K]\n # print the general information for this particular motif\n print >> o, \"\\n{} Start: {} Stop: {} \".format(k, smk[\"start\"], smk[\"stop\"]-1)\\\n + \"AvgPhylogeneticScore: {} AvgAlignScore: {} Size: {}\\n\"\\\n .format(smk[\"score\"], smk[\"align\"], smk[\"size\"])\n\n sub = smk.keys()\n sub.sort() # to have always the same order of sequences\n # print sequences\n for s in sub:\n if s != \"start\" and s != \"stop\" and s != \"score\" \\\n and s != \"align\" and s != \"size\":\n print >> o, \"{0:20}\\t{1:20}\\t{2}\".format(s, smk[s][\"start\"], smk[s][\"seq\"].upper())", "def alpha_prof_out(\n cluster,\n fileout,\n **kwargs,\n):\n\n m_mean, m_hist, dm, alpha, ealpha, yalpha, eyalpha = mass_function(\n cluster,\n **kwargs,\n )\n lrprofn, aprof, dalpha, edalpha, ydalpha, eydalpha = alpha_prof(\n cluster,\n **kwargs,\n )\n\n fileout.write(\"%f %f %f %f %f \" % (cluster.tphys, alpha, ealpha, yalpha, eyalpha))\n for i in range(0, len(m_mean)):\n fileout.write(\"%f \" % m_mean[i])\n for i in range(0, len(dm)):\n fileout.write(\"%f \" % dm[i])\n for i in range(0, len(lrprofn)):\n fileout.write(\"%f \" % lrprofn[i])\n for i in range(0, len(aprof)):\n fileout.write(\"%f \" % aprof[i])\n\n fileout.write(\"%f %f %f %f\\n\" % (dalpha, edalpha, ydalpha, eydalpha))", "def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")", "def export(self, outdir, topic_docs = None):\n parsed_topics_fn = outdir+'/parsed_topics_'+self.name+'.csv'\n parsed_topics = self.parse_topics()\n with open(parsed_topics_fn, 'wb') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(['topic index', 'top words'])\n for i,t in enumerate(parsed_topics):\n writer.writerow([i]+t)\n if topic_docs is not None:\n doc_tops_fn = outdir+'/doc_topics_'+self.name+'.csv'\n pnos,texts = topic_docs\n doc_tops = self.doc_topics(texts)\n with open(doc_tops_fn, 'wb') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(['pno', 'top 10 topics'])\n for pno,dts in zip(pnos, doc_tops):\n writer.writerow([pno]+dts)\n visualize_fn = outdir+'/vis'+self.name+'.html'\n self.visualize(visualize_fn)", "def writeMatlabOutput(self, output, prettyname):\n self.writeHeaderOutput((\"%\",\"%\"), output, prettyname)\n for d in self.data:\n # now just print it out\n line = d.buildMatlabDeclaration(\"\", \"\", root=self.matlabRoot)\n if len(line) != 0:\n output.write(line+\"\\n\\n\")" ]
[ "0.5921104", "0.57090473", "0.56463623", "0.55301774", "0.54683155", "0.54337686", "0.53767115", "0.5324321", "0.52994114", "0.52380234", "0.52355903", "0.52273804", "0.51805234", "0.51471543", "0.5144186", "0.5143176", "0.5133842", "0.51320434", "0.51258814", "0.5087141", "0.50683683", "0.5061348", "0.50500524", "0.5043", "0.50316876", "0.5026071", "0.50185233", "0.5012636", "0.5008574", "0.5008522" ]
0.6552503
0
Override default values for random initial topic assignment, set to "seed" instead. seed is 2d array (number of samples in LDA model x number of tokens in LDA model)
def set_seed(self,seed): assert seed.dtype==np.int and seed.shape==(self.samples,self.N) self.topic_seed = seed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_seed(self,seed):\n\n\t\tassert seed.dtype==np.int and seed.shape==(self.N,)\n\t\tself.topic_seed = seed", "def initialize(self, number_of_topics, random=False):\n print(\"Initializing...\")\n\n self.initialize_randomly(number_of_topics)\n\n #print(\"pi\", self.document_topic_prob)\n #print(\"p(w|theta)\", self.topic_word_prob)", "def random_init(self, docs):\n for di in xrange(len(docs)):\n doc = docs[di]\n topics = np.random.randint(self.n_topic, size=len(doc))\n self.topic_assignment.append(topics)\n\n for wi in xrange(len(doc)):\n topic = topics[wi]\n word = doc[wi]\n self.TW[topic, word] += 1\n self.sum_T[topic] += 1\n self.DT[di, topic] += 1", "def initialize(self, number_of_topics, random=False):\n print(\"Initializing...\")\n\n if random:\n self.initialize_randomly(number_of_topics)\n else:\n self.initialize_uniformly(number_of_topics)", "def set_scikit_learn_seed(self):\n np.random.seed(42)", "def init_seed(seed=None):\n if seed is None:\n seed = int(time.time())\n\n LOGGER.info(\"Using seed=%d\", seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)", "def test_lda_topic_model_generator_dimensions( ):\n N = 1000\n D = 1000\n K = 10\n W = 100\n\n tm = LDATopicModel.generate( K, D, a0 = 15 )\n assert( tm.topics.shape == (D, K) )\n assert( tm.weights.shape == (K,) )\n assert( sc.allclose( tm.alphas.sum(), 15 ) )\n\n docs = tm.sample( N, words = W, n_views = 3 )\n # Each document is a row\n for v in docs:\n assert( v.shape == (N, D) )", "def set_manual_seed(seed):\n\n random.seed(seed)\n torch.manual_seed(seed)\n\n print('Using manual seed: {seed}'.format(seed=seed))", "def __init__(self,nback=1,ntokens_pm=2,ntokens_og=3,stimdim=2,seed=99):\n np.random.seed(seed)\n tr.manual_seed(seed)\n self.nback = nback\n # embedding\n self.ntokens_pm = ntokens_pm\n self.ntokens_og = ntokens_og\n self.stimdim = stimdim\n # emat\n self.randomize_emat()\n return None", "def __init__(self, corpus, seed=None):\n super().__init__()\n self.corpus = corpus\n self.seed = seed\n self.idxs = list(range(len(corpus)))\n self.shuffle(seed)", "def _initialize(self):\n for doc_index, doc in enumerate(self.document):\n temp_word_topic_matrix = []\n for word in doc:\n if word in self.word2id.keys():\n start_topic_index = np.random.randint(0, self.K)\n temp_word_topic_matrix.append(start_topic_index)\n self.doc_topic_matrix[doc_index, start_topic_index] += 1\n self.topic_word_matrix[start_topic_index, self.word2id[word]] += 1\n self.topic_matrix[start_topic_index] += 1\n self.current_word_topic_matrix.append(temp_word_topic_matrix)", "def set_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)", "def _lda(self):\n self.ldamodel = gensim.models.ldamodel.LdaModel(self.gensim_corpus, \n num_topics=self.n_topics, \n id2word=self.id_map, \n passes=self.n_passes,\n random_state=42)\n \n self.topic_matrix = self.ldamodel.print_topics(num_topics=self.n_topics, \n num_words=self.n_words)", "def set_seed(seed: int = None):\n\n if seed is not None:\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(0)", "def set_seed(seed: int):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)", "def initialize(self, seed=None):\r\n self.seed(seed)", "def set_seed(seed: int):\n np.random.seed(seed)\n torch.manual_seed(seed)", "def set_seed(self):\n self.set_scikit_learn_seed()\n self.set_torch_seed()\n self.set_python_random_seed()", "def set_seed(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)", "def generate_initial_topics(self):\n initial_topics = random.sample(self.remaining_topics, self.num_topics)\n self.remaining_topics = [topic for topic in self.remaining_topics if topic not in initial_topics]\n return initial_topics", "def set_global_seeds(seed):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)", "def _set_random_seed(seed):\r\n if seed is not None and seed > 0:\r\n random.seed(seed)\r\n np.random.seed(seed)\r\n torch.manual_seed(seed)\r\n if torch.cuda.device_count() > 0:\r\n mpu.model_parallel_cuda_manual_seed(seed)\r\n else:\r\n raise ValueError('Seed ({}) should be a positive integer.'.format(seed))", "def initialize(self):\n # Initializing the counter and distribution.\n for k in range(0, self.topic_number,1):\n self.topic_term_count_matrix[k]= [0.0] * self.term_number\n self.topic_distribution_over_term[k] = [0.0] * self.term_number\n self.sum_topic_by_term_count[k] = 0.0\n for m in range(0, self.document_number,1):\n self.document_topic_count_matrix[m] = [0.0] * self.topic_number\n self.document_distribution_over_topic[m] = [0.0] * self.topic_number\n self.sum_document_by_topic_count[m] = 0.0\n\n # Initializing topics assigned to all words of all documents.\n for m in range(0, self.document_number, 1):\n N = len(self.documents[m])\n self.word_topic_assignment[m] = [-1] * N\n for n in range(0, N,1):\n topic = int(random.uniform(0,1) * self.topic_number)\n self.document_topic_count_matrix[m][topic] += 1.0\n self.topic_term_count_matrix[topic][self.documents[m][n]] += 1.0\n self.sum_topic_by_term_count[topic] += 1.0\n self.word_topic_assignment[m][n] = topic\n self.sum_document_by_topic_count[m] = N", "def set_seed():\n np.random.seed(1423)", "def initialize(self):\n self.n_words = len(self.vocab)\n self.n_docs = len(self.documents)\n\n # Initialize the three count matrices.\n # The (i,j) entry of self.nmz is the number of words in document i assigned to topic j.\n self.nmz = np.zeros((self.n_docs, self.n_topics))\n # The (i,j) entry of self.nzw is the number of times term j is assigned to topic i.\n self.nzw = np.zeros((self.n_topics, self.n_words))\n # The (i)-th entry is the number of times topic i is assigned in the corpus.\n self.nz = np.zeros(self.n_topics)\n\n # Initialize the topic assignment dictionary.\n self.topics = {} # key-value pairs of form (m,i):z\n\n for m in range(self.n_docs):\n for i in self.documents[m]:\n # Get random topic assignment, i.e. z is a random integer in the range of topics\n z = np.random.randint(self.n_topics)\n # Increment count matrices\n self.nmz[m,z] += 1\n self.nzw[z,self.documents[m][i]] += 1\n self.nz[z] += 1\n # Store topic assignment\n self.topics[(m,i)] = z", "def set_seed(seed):\n\ttorch.manual_seed(seed)\n\ttorch.cuda.manual_seed_all(seed)\n\tnp.random.seed(seed)", "def initialize_randomness(seed):", "def set_torch_seed(self):\n torch.manual_seed(42)", "def setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True", "def test_initialize(self):\n\n for m in self.models:\n start_docs = turicreate.SArray(self.docs.tail(3))\n m = topic_model.create(\n start_docs,\n num_topics=20,\n method=\"cgs\",\n alpha=0.1,\n beta=0.01,\n num_iterations=1,\n print_interval=1,\n )\n start_topics = turicreate.SFrame(m.topics.head(100))\n m2 = topic_model.create(\n self.docs,\n num_topics=20,\n initial_topics=start_topics,\n method=\"cgs\",\n alpha=0.1,\n beta=0.01,\n num_iterations=0,\n print_interval=1,\n )\n\n # Check that the vocabulary of the new model is the same as\n # the one we used to initialize the model.\n self.assertTrue(\n (start_topics[\"vocabulary\"] == m2.topics[\"vocabulary\"]).all()\n )\n\n # Check that the previously most probable word is still the most\n # probable after 0 iterations, i.e. just initialization.\n old_prob = start_topics[\"topic_probabilities\"].vector_slice(0)\n new_prob = m2.topics[\"topic_probabilities\"].vector_slice(0)\n self.assertTrue(np.argmax(list(old_prob)) == np.argmax(list(new_prob)))" ]
[ "0.79058135", "0.7000231", "0.6963045", "0.6539895", "0.64373744", "0.63780665", "0.63570637", "0.63285434", "0.6327561", "0.63219947", "0.63108164", "0.6261182", "0.6256775", "0.62072074", "0.6185912", "0.6178263", "0.61493504", "0.6143526", "0.6107175", "0.61040074", "0.60863894", "0.60837185", "0.60820645", "0.60803235", "0.6004944", "0.5995366", "0.59949887", "0.59928846", "0.59664834", "0.59576046" ]
0.80255085
0
Query docs with query_samples number of Gibbs sampling iterations.
def query(self,query_samples): self.sampled_topics = np.zeros((self.samples,self.N), dtype = np.int) for s in xrange(self.samples): self.sampled_topics[s,:] = samplers_lda.sampler_query(self.docid, self.tokens, self.topic_seed, np.ascontiguousarray(self.tt[:,:,s], dtype=np.float), self.N, self.K, self.D, self.alpha, query_samples) print("Sample %d queried" % s) self.dt = np.zeros((self.D,self.K,self.samples)) for s in xrange(self.samples): self.dt[:,:,s] = samplers_lda.dt_comp(self.docid,self.sampled_topics[s,:], self.N, self.K, self.D, self.alpha)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def test_search_samples(self):\n self.login()\n\n page_size = 20\n query = 'batch8'\n\n # hit the API endpoint\n data = {'q': query,\n 'page': 1,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.filter(batch__icontains=query).order_by(\"-received\")\n\n # format queryset into json for returning\n serializer = SampleSerializer(expected, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': False\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def gibbs_sample(self):\n # Initialize the initial state of Markov Chain.\n self.initialize()\n # Gibbs Sampling.\n for iteration_index in range(0, self.iteration_number, 1):\n for m in range(0,self.document_number,1):\n for n in range(0, len(self.documents[m]), 1):\n # Change the state of word_m_n according to it's full conditional probability.\n self.sample_by_full_condition(m=m,n=n)\n print 'iteration:', iteration_index,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if iteration_index > self.burn_in and iteration_index % self.update_cycle == 0:\n # Update the distribution after burn in.\n self.update_distribution()\n else:\n pass\n # calculate the final distribution.\n self.get_distribution()", "def search_samples():\n r = req('GET', SUB_API + 'search/samples', params=apply_search_filters())\n samples = []\n for sample in demisto.get(r.json(), 'data.items'):\n samples.append({\n 'ID': demisto.get(sample, 'result'),\n 'Details': demisto.get(sample, 'details')\n })\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample': samples},\n 'HumanReadable': tableToMarkdown('ThreatGrid - Sample Search', samples, ['Result', 'Details']),\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })", "def do_query(documents, config_file=None, logger=None, context=None):\n num_documents = documents.count()\n return {\"num_documents\": num_documents}", "def sample(self, num_samples, **kwargs):\n pass", "def evaluate(self, query_samples):\n n = query_samples.shape[0]\n query_bin_proportions, query_bin_assignments = self.__calculate_bin_proportions(query_samples)\n # print(query_bin_proportions)\n different_bins = NDB.two_proportions_z_test(self.bin_proportions, self.ref_sample_size, query_bin_proportions,\n n, significance_level=self.significance_level,\n z_threshold=self.z_threshold)\n ndb = np.count_nonzero(different_bins)\n js = NDB.jensen_shannon_divergence(self.bin_proportions, query_bin_proportions)\n return ndb, js", "def query(self, n_instances):\n\n pool_idx = get_pool_idx(self.X, self.train_idx)\n\n n_instances = min(n_instances, len(pool_idx))\n\n # If the model is not trained, choose random papers.\n if not self.model_trained:\n query_idx, _ = random_sampling(\n None, X=self.X, pool_idx=pool_idx, n_instances=n_instances,\n query_kwargs=self.query_kwargs)\n\n else:\n # Make a query from the pool.\n query_idx, _ = self.learner.query(\n X=self.X,\n pool_idx=pool_idx,\n n_instances=n_instances,\n query_kwargs=self.query_kwargs\n )\n return query_idx", "def get_all(self, q=None, limit=None):\r\n q = q or []\r\n\r\n if limit and limit < 0:\r\n raise ClientSideError(_(\"Limit must be positive\"))\r\n kwargs = _query_to_kwargs(q, storage.SampleFilter.__init__)\r\n f = storage.SampleFilter(**kwargs)\r\n return map(Sample.from_db_model,\r\n pecan.request.storage_conn.get_samples(f, limit=limit))", "def get_samples(**project_filters):\n p = get_project(**project_filters)\n return Sample.query.filter_by(_project_id=p.id).all()", "def test_get_all_samples(self):\n self.login()\n\n page_size = 20\n\n # hit the API endpoint for both pages\n for page in range(1, 3):\n\n data = {'page': page,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.all().order_by(\"-received\")\n\n paginator = Paginator(expected, page_size)\n res = paginator.page(page)\n\n # format queryset into json for returning\n serializer = SampleSerializer(res, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': (page == 1)\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def produce_query_batches(self):\n pass", "def query(self, bytes_gen: Iterator[bytes] = None, **kwargs):\n self._call_client(bytes_gen, mode='query', **kwargs)", "def sample(self, bqm, **parameters):\n return self.child.sample(bqm, **parameters)", "def _gibbs_sampling_iteration(self):\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n k = self.z_mn[m, n]\n self.n_mk[m, k] -= 1\n self.n_m[m] -= 1\n self.n_kt[k, w_mn] -= 1\n self.n_k[k] -= 1\n k = self._conditional_z(\n self.n_components, self.alpha, self.beta,\n self.n_mk, self.n_kt, m, w_mn, self.beta_sum, self.n_k)\n self.z_mn[m, n] = k\n self.n_mk[m, k] += 1\n self.n_m[m] += 1\n self.n_kt[k, w_mn] += 1\n self.n_k[k] += 1", "def _collection_samples(collection_query, limit, config):\n just_cid = lambda obj : obj.get('meta', {}).get('concept-id')\n found_collections = scom.search_by_page(\"collections\",\n query=collection_query,\n filters=just_cid,\n page_state=scom.create_page_state(limit=limit),\n config=config)\n return found_collections[:limit]", "def produce_query_batches(self):\n self.__generate_queries()\n return self.__bobs", "def sample(self, bqm, fixed_variables=None, **parameters):\n return super().sample(bqm, fixed_variables=fixed_variables, **parameters)", "def samples(self):\n pass", "def query(self, queries):\n times = []\n for q in queries:\n # print(\"Starting \" + q)\n t_start = time.time()\n self.solr.search(\"text:\" + q, rows=self.n_rows)\n times.append(time.time()-t_start)\n return {\"times_query\": np.mean(times)}", "def get_top_docs(self, query_vectors, n_docs):\n raise NotImplementedError", "def run_sampler(query, nc_lcs=1000, store_path='../data/lc_output/'):\n\n # Unpack & load cadences\n sim_table = generator.simulation.load_table() #complete simulation table\n sim_table_pointing = generator.fetch_cadence_info(sim_table, query['coordinates'][0], query['coordinates'][1], filter_band=query['sample_bands'])\n\n for i in tqdm(range(nc_lcs)):\n sim_table_gen = generator.simple_mjd_sampler(sim_table_pointing, time_separation=query['survey_duration'], mode='random') # should have all the epochs of observation withiin that time frame\n #phase = sim_table_gen['mjd'] - sim_table_gen['mjd'][0] # starting from the first detection\n\n return calc_lc_band(sim_table_gen)\n #Select each filter?\n\n\n\n\n # TODO: Clean\n #model_M = np.zeros(shape=(5000,6))\n #lim_5s = np.zeros(shape=(5000,6)) # no more than >1000 pointings per field (set to 5k to be safe); mark as nan the empty ones\n #snr = np.zeros(shape=(5000,6))\n #sigma_band = np.zeros(shape=(5000,6))\n #for index, filter in tqdm(enumerate(lsst_bands)):\n # model_M[0:len(phase[sim_table_gen['filter']==filter]), index] = models.photometricmodel(phase[sim_table_gen['filter']==filter]).fourier_cos(*query['model_theta'][index])\n # model_M[:,index][model_M[:,index]==0] = np.inf # mask as infinity TODO -- numpy mask instead", "def likelihood_sample(self, trial_count):\n count = 0\n\n sum_query_weights = 0\n sum_total_weights = 0\n\n for i in xrange(trial_count):\n values = {}\n\n sample_weight = 1.0\n\n for letter in self.letters:\n prob = self.variables[letter].get_prob(values)\n\n # Fix the evidence variables\n if letter in self.query.evidence:\n values[letter] = self.query.evidence[letter]\n\n if (values[letter]):\n sample_weight *= prob\n else:\n sample_weight *= (1 - prob)\n else:\n values[letter] = self.sample(prob)\n\n if values[self.query.variable]:\n sum_query_weights += sample_weight\n\n sum_total_weights += sample_weight\n\n return float(sum_query_weights) / sum_total_weights", "def sampleDocuments(sample_size = 250):\n\n\tdocuments_csv_filepath = getScriptDirectory() + \"/result/documents.csv\"\n\tsampled_documents_filepath = getScriptDirectory() + \"/result/sampled_documents.csv\"\n\tsample_statistics_filepath = getScriptDirectory() + \"/result/statistics.csv\"\n\n\t# Check if the sample already exists\n\tif(os.path.isfile(sampled_documents_filepath)):\n\t\tprint \"Sample already exists. Moving on to getting metadata.\"\n\n\t\treturn\n\n\t# Check if result folder has been made\n\tresult_folder = getScriptDirectory() + \"/result\"\n\n\tif not os.path.exists(result_folder):\n\t\tos.makedirs(result_folder)\n\n\n\t# Read in the existing documents\n\tdocuments = pandas.read_csv(documents_csv_filepath)\n\n\tunique_mns = pandas.unique(documents['authoritativeMN'])\n\tsampled_documents = pandas.DataFrame({'identifier' : [], 'authoritativeMN' : []})\n\n\tfor mn in unique_mns:\n\t\tdf_subset = documents[documents.authoritativeMN == mn]\n\t\tnrows = df_subset.shape[0]\n\n\t\tprint(\" Member node \" + mn + \" has \" + str(nrows) + \" rows\")\n\n\t\tif nrows is 0:\n\t\t\tcontinue\n\t\telif nrows is 1:\n\t\t\tsampled_rows = [0]\n\t\telse:\n\t\t\tif nrows > sample_size:\n\t\t\t\trows_to_sample = range(0, nrows)\n\t\t\t\tsampled_rows = numpy.random.choice(rows_to_sample, sample_size)\n\t\t\telse:\n\t\t\t\tsampled_rows = range(0, nrows)\n\n\t\tdf_subset_filtered = df_subset.iloc[sampled_rows,:]\n\n\t\tsampled_documents = pandas.concat([sampled_documents, df_subset_filtered])\n\n\tsampled_documents.groupby([\"authoritativeMN\"]).aggregate(['count']).to_csv(sample_statistics_filepath, encoding = \"utf-8\")\n\tsampled_documents.to_csv(sampled_documents_filepath, index = False, encoding = \"utf-8\")\n\n\treturn", "def bm25_algorithm(self, query, k1 = 2, b = 0.75):\n\t\tdocsInfo = self.get_needed_inf(query)\n\t\tavgdl = sum(x['len'] for x in docsInfo) / float(len(docsInfo))\n\t\t# print(docsInfo)\n\t\tdocScore = self.count_score(docsInfo, avgdl, k1, b)\n\t\treturn docScore", "def sample_refine(self, **kwargs):", "def generate_samples(self, config, num_samples):\n tic = time.time()\n\n generator = GMM(**config)\n weights = torch.rand(config.num_components)\n generator.component_weights.set_(weights / weights.sum())\n generator.gaussian.means.set_(torch.randn(config.num_components, config.num_features))\n\n if config.covariance == 'diag':\n generator.gaussian.covars.set_(torch.rand(config.num_components, config.num_features))\n\n samples = generator.sample(num_samples)\n\n toc = time.time()\n print(f\"Generated {num_samples:,} samples in {toc-tic:.2f} seconds.\")\n\n return samples", "def run(self, nsamples):\n if isinstance(nsamples, int) and nsamples > 0:\n self.nsamples = nsamples\n else:\n raise RuntimeError(\"UQpy: nsamples must be a positive integer.\")\n\n if self.nsamples <= self.samples.shape[0]:\n raise NotImplementedError('UQpy Error: The number of requested samples must be larger than the existing '\n 'sample set.')\n\n self.run_rss()", "def get_samples(self, min_samples):\n raise NotImplementedError" ]
[ "0.6119865", "0.6119865", "0.6033776", "0.59654444", "0.59214705", "0.5798614", "0.56682414", "0.5615993", "0.5610056", "0.5603541", "0.55298346", "0.55129606", "0.5500481", "0.5475288", "0.5471387", "0.5451142", "0.5441666", "0.5428866", "0.539851", "0.5390029", "0.53861856", "0.5373401", "0.5372711", "0.5370026", "0.5366848", "0.5362989", "0.53608733", "0.5339649", "0.5317546", "0.5298436" ]
0.64173937
0
param observation nparray with shape (n, windowobs_dim) return observation nparray with shape(n, disc_dim)
def get_disc_obs(self, observation): temp = [self.convertToMocap(s.reshape((self.disc_window, -1))).reshape(-1) for s in observation] return np.asarray(temp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def observation(self, obs):\n\n# import pdb;pdb.set_trace()\n return np.moveaxis(obs, 2, 0)", "def observation_space():", "def observation_func(\n self,\n state: np.ndarray,\n observation_noise: Optional[np.ndarray] = None,\n control_vect: Optional[np.ndarray] = None\n ) -> np.ndarray:\n pass", "def _get_observation(self, observation):", "def observation(self, observation):\n final_obs = []\n for obs in observation:\n o = []\n o.extend(obs['left_team'].flatten())\n o.extend(obs['left_team_direction'].flatten())\n o.extend(obs['right_team'].flatten())\n o.extend(obs['right_team_direction'].flatten())\n\n # If there were less than 11vs11 players we backfill missing values with\n # -1.\n # 88 = 11 (players) * 2 (teams) * 2 (positions & directions) * 2 (x & y)\n if len(o) < 88:\n o.extend([-1] * (88 - len(o)))\n\n # ball position\n o.extend(obs['ball'])\n # ball direction\n o.extend(obs['ball_direction'])\n # one hot encoding of which team owns the ball\n if obs['ball_owned_team'] == -1:\n o.extend([1, 0, 0])\n if obs['ball_owned_team'] == 0:\n o.extend([0, 1, 0])\n if obs['ball_owned_team'] == 1:\n o.extend([0, 0, 1])\n\n active = [0] * 11\n if obs['active'] != -1:\n active[obs['active']] = 1\n o.extend(active)\n\n game_mode = [0] * 7\n game_mode[obs['game_mode']] = 1\n o.extend(game_mode)\n final_obs.append(o)\n return np.array(final_obs, dtype=np.float32)", "def observe_np(\n state: np.ndarray,\n impulse: np.ndarray,\n):\n state_shape = state.shape\n if len(state_shape) < 3:\n raise ValueError(\n \"State must be at least 3D (`[width, height, channel]`) but got {}\"\n \"\".format(state.shape))\n\n state_shape = [-1 if dim is None else dim for dim in state_shape]\n\n # State now has single batch dimension, has shape\n # `[batch_size, height, width, channel]`.\n state = np.reshape(state, [-1] + state_shape[-3:])\n\n state = numpy_convolution.convolve_2d(state, impulse, padding=\"SAME\")\n\n # Retrieve original `batch_dimensions`.\n return np.reshape(state, state_shape[:-1] + [impulse.shape[-1]])", "def observe(self, observation):\n # shallow copy observation (deep copy can be expensive)\n obs = observation.copy()\n batch_idx = self.opt.get('batchindex', 0)\n self.observation = obs\n #self.answers[batch_idx] = None\n return obs", "def window_data(X, window_length):\n return X[int(len(X)/2-window_length/2):int(len(X)/2+window_length/2)]", "def calc_observables(samples):\n n = samples.shape[1]\n obs = np.zeros((samples.shape[0], n+n*(n-1)//2))\n \n k = 0\n for i in range(n):\n obs[:,i] = samples[:,i]\n for j in range(i+1,n):\n obs[:,n+k] = samples[:,i] * samples[:,j]\n k += 1\n return obs", "def get_observation_(self):\n obs = np.zeros(self.STATE_SIZE, dtype=np.float)\n obs[:,:,0] = np.array(self.input_img_.data).reshape((self.STATE_SIZE[0:2]))\n\n if self.debug_:\n self.debugger_.show_input_occ_grid(self.input_img_)\n self.debugger_.show_input_image(obs[:,:,0])\n return obs", "def unflatten_n(self, obs):\n dims = [c.flat_dim for c in self.spaces]\n flat_obs = np.split(obs, np.cumsum(dims)[:-1], axis=-1)\n unflat_obs = [\n c.unflatten_n(xi) for c, xi in zip(self.spaces, flat_obs)\n ]\n unflat_obs_grouped = list(zip(*unflat_obs))\n return unflat_obs_grouped", "def _window_function(arr: np.ndarray, border: int = 0) -> np.ndarray:\n ndata = len(arr)\n nwind = ndata - 2 * border\n w = np.zeros(ndata)\n for i in range(nwind):\n w[i + border] = np.sin(np.pi * (i + 1.0) / (nwind + 1.0))\n return w", "def _process_observation(self, observation: Tensor) -> Tensor:\n # partition the input between Experts uniformly [batch_size * num_agents, hidden_size] and process it it\n obs = observation.view(self.num_experts * self.batch_size, self.input_size_per_expert)\n obs_processed = self.input_net_norm(self.activation(self.input_net(obs)))\n return obs_processed", "def obs_action_shape(env):\n obs_space = env.observation_space\n return np.append(np.ravel(obs_space.high)+1, [env.action_space.n])", "def _get_observation_dimension(self):\n return len(self._get_observation_np())", "def flatten_n(self, obs):\n obs_regrouped = [[x[i] for x in obs] for i in range(len(obs[0]))]\n flat_regrouped = [\n c.flatten_n(xi) for c, xi in zip(self.spaces, obs_regrouped)\n ]\n return np.concatenate(flat_regrouped, axis=-1)", "def process_observation(self, observation):\n return observation", "def process_observation(self, observation):\n return observation", "def observation_space(self):\n raise NotImplementedError", "def observations(self):\n\n ret = np.zeros_like(self._observations)\n for input in range(len(self._observations)):\n ret[input] = self._observations[input].getSlice(0)\n\n return ret", "def observations(self):\n\n ret = np.zeros_like(self._observations)\n for input in range(len(self._observations)):\n ret[input] = self._observations[input].getSlice(0)\n\n return ret", "def calc_observables(samples):\n n = samples.shape[1]\n obs = np.zeros((samples.shape[0],n*(n-1)//2))\n \n k = 0\n for i in range(n):\n for j in range(i+1,n):\n obs[:,k] = samples[:,i]*samples[:,j]\n k += 1\n return obs", "def preprocess_observation(input_observation, prev_processed_observation, input_dimensions):\n processed_observation = input_observation[35:195] # crop\n processed_observation = downsample(processed_observation)\n processed_observation = remove_color(processed_observation)\n processed_observation = remove_background(processed_observation)\n processed_observation[processed_observation != 0] = 1 # everything else (paddles, ball) just set to 1\n # Convert from 80 x 80 matrix to 1600 x 1 matrix\n #print(processed_observation.shape)\n processed_observation = processed_observation.astype(np.float).ravel()\n processed_observation = processed_observation.reshape((1,input_dimensions))\n #print(processed_observation.shape)\n # subtract the previous frame from the current one so we are only processing on changes in the game\n if prev_processed_observation is not None:\n input_observation = processed_observation - prev_processed_observation\n else:\n input_observation = np.zeros( (1, input_dimensions) )\n # store the previous frame so we can subtract from it next time\n prev_processed_observations = processed_observation\n #print(\"input observation size \", input_observation.shape)\n #print(\"RETURNING %s\" % input_observation.shape)\n return input_observation, prev_processed_observations", "def movingWindow(rawData, n):\n data = np.array([rawData[i:i+n] for i in range(rawData.shape[0] - (n-1))])\n return data", "def dwindow(window):\r\n \r\n h=window\r\n nh=len(h)\r\n lh=(nh-1)/2\r\n stepheight=(h[0]+h[-1])/2.\r\n ramp=float((h[-1]-h[0]))/nh\r\n h2=np.zeros(nh+2)\r\n h2[1:nh+1]=h-stepheight-ramp*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n dwin=(h2[2:nh+2]-h2[0:nh])/2.+ramp\r\n dwin[0]=dwin[0]+stepheight\r\n dwin[-1]=dwin[-1]-stepheight\r\n \r\n return dwin", "def observation_spec(self):\n return ArraySpec(shape=(23,), dtype=np.float32)", "def read_window(ds, window):\n\n img = ds.read(window=window)\n img = np.nan_to_num(img)\n return np.dstack(img)", "def observation_space(self):\n pass", "def process_observation(self, observation):\n #print(\"start_process_obs\")\n processed_observation = np.zeros((NB_AGENTS, OBSERVATION_SIZE))\n\n goliath_type = getattr(env, 'Terran_Goliath')\n battlecruiser_type = getattr(env, 'Terran_Battlecruiser')\n '''\n goliath and battlecruiser type:\n hp_max: 125\n armor: 1\n cooldown_max: 22\n acceleration: 1\n top_speed: 4.57\n damage_amount: 12\n damage_factor: 1\n weapon_range: 192\n sight_range: 256\n seek_range: 160\n\n hp_max: 500\n energy_max: 200\n armor: 3\n cooldown_max: 30\n acceleration: 27\n top_speed: 2.5\n damage_amount: 25\n damage_factor: 1\n weapon_range: 192\n sight_range: 352\n '''\n #print(\"goliath and battlecruiser type:\")\n #print(goliath_type)\n #print(battlecruiser_type)\n\n for i, agent in enumerate(observation.my_unit):\n if agent.hp <= 0:\n continue\n my_x = agent.pos_x\n my_y = agent.pos_y\n my_type_str = agent.unit_type\n my_type = goliath_type if my_type_str == 'Terran_Goliath' else print(\"error in the my_type\")\n t1 = [agent.hp + agent.shield, agent.cooldown, math.atan2(agent.velocity_y, agent.velocity_x),\n math.sqrt((agent.velocity_x) ** 2 + (agent.velocity_y) ** 2), agent.angle,\n 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame]\n t2 = [self.last_action[i] / (env.action_space[1] - 1)]\n t3 = [i.nearest_obstacle_dist for i in agent.pos_info]\n t4 = []\n t5 = []\n t4_max = []\n t5_max = []\n for idx, enemy in enumerate(observation.en_unit):\n en_type_str = enemy.unit_type\n if en_type_str == 'Terran_Battlecruiser':\n en_type = battlecruiser_type\n else:\n continue \n if enemy.hp <= 0:\n t4.extend([0,0,0,0,0,0,0,0,0,0])\n else:\n t4.extend([math.atan2(enemy.pos_y - my_y, enemy.pos_x - my_x), math.sqrt((enemy.pos_x - my_x) ** 2 + (enemy.pos_y - my_y) ** 2),\n math.atan2(enemy.velocity_y, enemy.velocity_x), math.sqrt((enemy.velocity_x) ** 2 + (enemy.velocity_y) ** 2),\n enemy.cooldown, enemy.hp + enemy.shield, enemy.angle, 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame])\n t4_max.extend([math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1])\n for idx, ally in enumerate(observation.my_unit):\n if i == idx:\n continue\n if ally.hp <= 0:\n t5.extend([0,0,0,0,0])\n else:\n t5.extend([math.atan2(ally.pos_y - my_y, ally.pos_x - my_x), math.sqrt((ally.pos_x - my_x) ** 2 + (ally.pos_y - my_y) ** 2),\n math.atan2(ally.velocity_y, ally.velocity_x), math.sqrt((ally.velocity_x) ** 2 + (ally.velocity_y) ** 2), ally.hp + ally.shield])\n ally_type = goliath_type\n t5_max.extend([math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max])\n if my_type_str == 'Terran_Goliath':\n t1_max = [my_type.hp_max + my_type.shield_max, 1, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n else:\n t1_max = [my_type.hp_max + my_type.shield_max, my_type.cooldown_max, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n #t4_max = [math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1]\n #t5_max = [math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max]\n\n #t5_max = [32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi]\n\n t1 = np.divide(t1, t1_max) # runtime warning\n t2 = np.array(t2) / 320\n t3 = np.array(t3) / 320\n t4 = np.divide(t4, t4_max)\n t5 = np.divide(t5, t5_max)\n\n processed_observation[i] = np.concatenate([t1, t2, t3, t4, t5])\n\n self.last_my_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.my_unit]) > 0))\n self.last_enemy_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.en_unit]) > 0))\n self.last_enemy_unit_hp.append(sum([u.hp + u.shield for u in observation.en_unit]))\n self.accumulated_observation.append(processed_observation)\n\n\n return processed_observation", "def observationsMatchingBatchDim(self):\n ret = []\n for inp in range(len(self._observations)):\n all_obs = self._observations[inp].getSlice(0)\n processed = all_obs\n # If we have more than 1 observation per state\n if self._batch_dimensions[inp][0] > 1 and len(all_obs) > 0:\n obs_per_state = self._batch_dimensions[inp][0]\n processed = np.zeros((len(all_obs), obs_per_state, ) + all_obs.shape[1:])\n # for every observation, we create a state\n for i in range(all_obs.shape[0]):\n state = np.zeros((obs_per_state,) + all_obs.shape[1:])\n # everything before state_start_idx is all_obs[0]\n state_start_idx = 0\n\n # start index in all_obs\n start_idx = i - obs_per_state\n\n # if we're in the first obs_per_state observations, we need to fill the first\n # -start_idx elements with all_obs[0]\n if start_idx < 0:\n n_to_fill = -start_idx\n state[0:n_to_fill] = np.repeat(all_obs[0][None, :, :], n_to_fill, axis=0)\n\n # start of where to fill the rest\n state_start_idx = n_to_fill\n\n # new start_idx for\n start_idx = 0\n state[state_start_idx:] = all_obs[start_idx+1:i+1]\n processed[i] = state\n\n ret.append(processed)\n return ret" ]
[ "0.6400762", "0.6013691", "0.58461124", "0.564084", "0.55929255", "0.5563894", "0.55621684", "0.54093623", "0.5378842", "0.53710353", "0.5322694", "0.52799237", "0.5259906", "0.5259592", "0.52363986", "0.52172846", "0.5196161", "0.5196161", "0.5165882", "0.51658237", "0.51658237", "0.5157002", "0.5133151", "0.5120078", "0.50907177", "0.50696456", "0.5059369", "0.5032255", "0.502", "0.49989882" ]
0.72958225
0
Tested on Inception and MobileNet Please edit model_file to suit your model pb file and label_file to specify your label text file Please see function "detectGate" for further customization.
def addOpenFile(): model_file = "mobile_graph.pb" label_file = "mobile_labels.txt" graph = load_graph(model_file) filename = filedialog.askopenfilename(initialdir="/",title="Select File",filetypes=[("JPEG Files",".jpeg .jpg")]) print("Selected file: %s" % filename) image = ImageTk.PhotoImage(Image.open(filename)) canvas.create_image(50,50,anchor=tk.NW,image=image) imgfile = filename #recognize(filename) #line ni paling penting untuk pass parameter model file dengan label file detectGate(graph,label_file,filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detectGate(graph,label_file,file_name):\n input_height = 192\n input_width = 192\n input_mean = 0\n input_std = 255\n input_layer = \"Placeholder\"\n output_layer = \"final_result\"\n \n \n \n\n \n\n \n t = read_tensor_from_image_file(\n file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n \n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name)\n output_operation = graph.get_operation_by_name(output_name)\n\n with tf.Session(graph=graph) as sess:\n results = sess.run(output_operation.outputs[0], {\n input_operation.outputs[0]: t\n })\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n #for i in top_k:\n # print(labels[i], results[i])\n \n gresults = float(\"{:.4f}\".format(results[top_k[0]]))\n \n labelandimage = \"{0} \\r\\n {1} - {2}\".format(file_name,labels[top_k[0]],gresults)\n \n label = tk.Label(canvas,text=labelandimage,bg=\"gray\")\n label.pack()\n\n print (labels[top_k[0]], results[top_k[0]])", "def __init__(self,\n model_name: str = 'Sample_Model',\n graph_name: str = 'detect.tflite',\n labelmap_name: str = 'labelmap.txt',\n min_conf_threshold: int = 0.5,\n use_tpu: str = '',\n distance_threshold: int = 150,\n debug: bool = False\n ):\n\n self._min_conf_threshold = min_conf_threshold\n self.distance_threshold = distance_threshold\n\n self.do_detect = True\n\n # Import TensorFlow libraries\n # If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow\n # If using Coral Edge TPU, import the load_delegate library\n pkg = importlib.util.find_spec('tflite_runtime')\n if pkg:\n from tflite_runtime.interpreter import Interpreter\n if use_tpu:\n from tflite_runtime.interpreter import load_delegate\n else:\n from tensorflow.lite.python.interpreter import Interpreter\n if use_tpu:\n from tensorflow.lite.python.interpreter import load_delegate\n\n # If using Edge TPU, assign filename for Edge TPU model\n if use_tpu:\n # If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'\n if (graph_name == 'detect.tflite'):\n graph_name = 'edgetpu.tflite'\n\n # Get path to current working directory\n self._cwd_path = os.getcwd()\n\n # Path to .tflite file, which contains the model that is used for object detection\n path_to_ckpt = os.path.join(self._cwd_path, model_name, graph_name)\n\n # Path to label map file\n path_to_labels = os.path.join(self._cwd_path, model_name, labelmap_name)\n\n # Load the label map\n with open(path_to_labels, 'r') as f:\n self._labels = [line.strip() for line in f.readlines()]\n\n # Have to do a weird fix for label map if using the COCO \"starter model\" from\n # https://www.tensorflow.org/lite/models/object_detection/overview\n # First label is '???', which has to be removed.\n if self._labels[0] == '???':\n del (self._labels[0])\n\n # Load the Tensorflow Lite model.\n # If using Edge TPU, use special load_delegate argument\n if use_tpu:\n self._interpreter = Interpreter(model_path=path_to_ckpt,\n experimental_delegates=[load_delegate('libedgetpu.so.1.0')])\n print(path_to_ckpt)\n else:\n self._interpreter = Interpreter(model_path=path_to_ckpt)\n\n self._interpreter.allocate_tensors()\n\n # Get model details\n self._input_details = self._interpreter.get_input_details()\n self._output_details = self._interpreter.get_output_details()\n self._height = self._input_details[0]['shape'][1]\n self._width = self._input_details[0]['shape'][2]\n\n self._floating_model = (self._input_details[0]['dtype'] == np.float32)\n self.input_mean = 127.5\n self.input_std = 127.5\n\n # Variable to hold focal width of used camera lens\n self.focal_value = 0", "def __init__(self, mode, path):\n\n\t\tmodel = load_model('data/model.h5') \n\n\t\tif mode == \"test\":\n\n\t\t\tX_test, Y_test = self._load_dataset(path)\n\t\t\tpreds = model.evaluate(X_test, Y_test)\n\t\t\tprint (\"Loss = \" + str(preds[0]))\n\t\t\tprint (\"Test Accuracy = \" + str(preds[1]))\n\n\n\t\telif mode == \"predict\":\t\t\t\n\t\t\t\n\t\t\tlabel_dict = {'airplane':0, 'automobile':1, 'bird':2, 'cat':3, 'deer':4,\n\t\t\t'dog':5, 'frog':6, 'horse':7, 'ship':8, 'truck':9}\n\n\t\t\timg = image.load_img(path, target_size=(64, 64))\n\t\t\tx = image.img_to_array(img)\n\t\t\tx = np.reshape(x, (1,64,64,3))\n\t\t\ttemp_pred = model.predict(x)\n\t\t\tidx = np.argmax(temp_pred)\n\t\t\t\n\t\t\tprint(\"The object detected in the picture is a(n) : \" + \n\t\t\t\tlist(label_dict.keys())[list(label_dict.values()).index(idx)])", "def detect_model(model_path,filetype = '.bin'):\n paths = [\n os.path.join(model_path,'cameras{}'.format(filetype)),\n os.path.join(model_path,'images{}'.format(filetype)),\n os.path.join(model_path,'points3D{}'.format(filetype)),\n ]\n for path in paths:\n if not os.path.exists(path):\n return False\n return True", "def get_joint_detection_model(model_path, model_type):\n # config_file_path = '/usr/local/bin/config'\n if model_type == 'Foot_detection':\n # with open('/usr/local/bin/src/config.ini','w') as f:\n # f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512 1024\\nstrides = 8 16 32 64 128 256\\nratios = 1.2 1.5 2 2.5 3\\nscales =1 1.5 2\\n')\n\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone('resnet50').retinanet,\n num_classes=5,\n weights=None,\n multi_gpu=False,\n freeze_backbone=True,\n lr=1e-3,\n config=read_config_file('/usr/local/bin/Config files/config_foot.ini'))\n\n training_model.load_weights(model_path)\n infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_foot.ini')))\n\n elif model_type == 'Hand_detection':\n # with open('/usr/local/bin/src/config.ini','w') as f:\n # f.write('[anchor_parameters]\\nsizes = 32 64 128 256 512 1024\\nstrides = 8 16 32 64 128 256\\nratios = 1 1.5 2 2.5 3\\nscales = 1 1.2 1.6\\n')\n\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone('resnet50').retinanet,\n num_classes=6,\n weights=None,\n multi_gpu=False,\n freeze_backbone=True,\n lr=1e-3,\n config=read_config_file('/usr/local/bin/Config files/config_hand.ini'))\n training_model.load_weights(model_path)\n infer_model = convert_model(training_model, anchor_params = parse_anchor_parameters(read_config_file('/usr/local/bin/Config files/config_hand.ini')))\n \n return infer_model", "def __init__(self):\n self.classes_to_detect = ['person']\n # Load lebel_map\n self._load_label(PATH_TO_LABELS, NUM_CLASSES, use_disp_name=True)\n\n # Load Tensorflow model into memory\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(GRAPH_PATH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with self.detection_graph.as_default():\n self.sess = tf.Session(graph=self.detection_graph, config=tf_config)\n # Definite input and output Tensors for detection_graph\n self.image_tensor = self.detection_graph.get_tensor_by_name(\n 'image_tensor:0')\n # Each box represents a part of the image where a particular\n # object was detected.\n self.detection_boxes = self.detection_graph.get_tensor_by_name(\n 'detection_boxes:0')\n # Each score represent how level of confidence for each of\n # the objects. Score is shown on the result image, together\n # with the class label.\n self.detection_scores = self.detection_graph.get_tensor_by_name(\n 'detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name(\n 'detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name(\n 'num_detections:0')\n\n logger.info('Model graph loaded.')", "def detect_labels(path):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n # [START vision_python_migration_label_detection]\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n ss=labels[0].description \n ss.split('/')[0]\n os.system(\"./ILOVEAPPLE/sort {} {}\".format(ss, path))\n # [END vision_python_migration_label_detection]", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n # INFO:tensorflow: name = input_ids, shape = (?, 180)\n # INFO:tensorflow: name = input_mask, shape = (?, 180)\n # INFO:tensorflow: name = is_real_example, shape = (?,)\n # INFO:tensorflow: name = label_ids, shape = (?,)\n # INFO:tensorflow: name = masked_lm_ids, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_positions, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_weights, shape = (?, 180)\n # INFO:tensorflow: name = segment_ids, shape = (?, 180)\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n #next_sentence_labels = features[\"next_sentence_labels\"]\n \n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n \n gcn_embedding = build_gcn_output(adj_mat, w2n, n2w, model.get_embedding_table(), bert_config, is_training)\n \n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), gcn_embedding,\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n\n masked_lm_loss = tf.identity(masked_lm_loss, name=\"masked_lm_loss\")\n\n\n total_loss = masked_lm_loss\n\n total_loss = tf.identity(total_loss, name='total_loss')\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint and (not FLAGS.use_horovod or hvd.rank() == 0):\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if not FLAGS.use_horovod or hvd.rank() == 0:\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, FLAGS.use_horovod)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n return output_spec\n elif mode == tf.estimator.ModeKeys.PREDICT:\n\n #def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n # masked_lm_weights):#, next_sentence_example_loss,\n #next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n #masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n # [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n # values=next_sentence_example_loss)\n\n predictions = {\n \"input_ids\": tf.reshape(input_ids, [-1]),\n \"predictions\": masked_lm_log_probs\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions)\n #eval_metric_ops=eval_metrics)\n return output_spec\n else:\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n }\n\n eval_metrics = metric_fn(\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights)\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics)\n\n return output_spec", "def main(model_folder, override=False):\n model_description_file = os.path.join(model_folder, \"info.yml\")\n # Read the model description file\n with open(model_description_file) as ymlfile:\n model_description = yaml.safe_load(ymlfile)\n\n project_root = utils.get_project_root()\n # Read the feature description file\n feature_folder = os.path.join(project_root, model_description[\"data-source\"])\n with open(os.path.join(feature_folder, \"info.yml\")) as ymlfile:\n feature_description = yaml.safe_load(ymlfile)\n # Get a list of all used features\n feature_list = features.get_features(feature_description[\"features\"])\n # Get the dimension of the feature vector\n input_features = sum(n.get_dimension() for n in feature_list)\n logger.info(\"Number of features: %i\", input_features)\n\n # Analyze model\n logger.info(model_description[\"model\"])\n if model_description[\"model\"][\"type\"] != \"mlp\":\n return\n create_model(\n model_folder,\n model_description[\"model\"][\"type\"],\n model_description[\"model\"][\"topology\"],\n override,\n )\n utils.create_run_logfile(model_folder)", "def detection_cam(network_path, xml_path):\n\n files = os.listdir(network_path)\n\n networks = [load_network(network_path + files[k]) for k in range(len(files))]\n\n cap = cv2.VideoCapture(0)\n\n known_images = load_vector_database(\"P:/coding_weeks/machine_learning/repo/database/training_database.vdb\")\n\n known_labels = []\n\n for label in known_images:\n known_labels.append(label)\n\n while True:\n # Capture image par image\n ret, frame = cap.read()\n\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n boxes, faces = face_detection(rgb, xml_path)\n\n names = []\n\n for face in faces:\n face = cv2.resize(face, (128, 128))\n face = cv2.cvtColor(face, cv2.COLOR_RGB2GRAY)\n vector_list = hog(face, orientations=8, pixels_per_cell=(8, 8), cells_per_block=(1, 1))\n\n vector = numpy.zeros((len(vector_list), 1))\n\n for k in range(len(vector_list)):\n vector[k, 0] = vector_list[k]\n\n # guess = network.forward_propagation(vector)\n #\n # max_index = 0\n # max_value = guess[0, 0]\n #\n # for k in range(len(known_labels)):\n # if guess[k, 0] > max_value:\n # max_index = k\n # max_value = guess[k, 0]\n #\n # if max_value < 0.3:\n # names.append(\"UNKNOWN\" + str(max_value))\n #\n # else:\n # names.append(known_labels[max_index] + str(max_value))\n #\n # print(\"GUESS {} | TRUSTED {}\".format(known_labels[max_index], str(100.0 * max_value)[:5]))\n\n labels = []\n\n for network in networks:\n guess = network.forward_propagation(vector)\n\n max_index = 0\n max_value = guess[0, 0]\n\n for k in range(len(known_labels)):\n if guess[k, 0] > max_value:\n max_index = k\n max_value = guess[k, 0]\n\n labels.append(known_labels[max_index])\n\n labels.sort()\n\n d = {}\n\n for label in labels:\n if label not in d:\n d[label] = 1\n else:\n d[label] += 1\n\n max = 0\n label = \"\"\n\n for l in d:\n if d[l] > max:\n max = d[l]\n label = l\n\n if max >= 0.8 * len(files):\n names.append(label)\n else:\n names.append(\"UNKNOWN\")\n\n for ((x_beginning, y_beginning, face_width, face_height), name) in zip(boxes, names):\n cv2.rectangle(frame, (x_beginning, y_beginning), (x_beginning + face_width, y_beginning + face_height), (0, 255, 0), 2)\n\n cv2.putText(frame, name, (x_beginning, y_beginning), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)\n\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n #submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, \"submit\")\n #os.makedirs(submit_dir)\n\n # Read dataset\n img_ids = []\n dataset_dir = os.path.join(dataset_dir, subset)\n image_file = os.listdir(dataset_dir)\n #submission = []\n for img in image_file:\n if not img.startswith('.'):\n img_file = os.path.join(dataset_dir, img)\n image = skimage.io.imread(img_file)\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # Detect object\n\t\t\t\n r = model.detect([image])[0]\n # Encode image to RLE. Returns a string of multiple lines\n source_id = img.split(\".\")[0]\n #rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n #submission.append(rle)\n # Save image with masks\n visualize.display_instances(\n image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'],\n #show_bbox=False, show_mask=False,\n title=\"Predictions\")\n plt.savefig(\"{}/{}.png\".format(submit_dir, source_id))\n\n\n\t\t\n # Save to csv file", "def _process_features(self, limit):\n\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n raw = '/'.join((self.rawdir, 'feature'))\n logger.info(\"building labels for features\")\n\n line_counter = 0\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (feature_id, dbxref_id, organism_id, name, uniquename,\n residues, seqlen, md5checksum, type_id, is_analysis,\n timeaccessioned, timelastmodified) = line\n\n feature_key = feature_id\n if re.search(r'[\\|\\s\\[\\]\\{\\}\\\\<\\>]', uniquename):\n # some uniquenames have pipes or other nasty chars!\n # for example: FB||||FBrf0133242|Hugh-u1\n feature_id = self._makeInternalIdentifier(\n 'feature', feature_key)\n else:\n feature_id = 'FlyBase:'+uniquename\n self.idhash['feature'][feature_key] = feature_id\n self.feature_types[feature_key] = type_id\n self.label_hash[feature_id] = name\n\n if feature_key not in self.feature_to_organism_hash:\n self.feature_to_organism_hash[feature_key] = set()\n self.feature_to_organism_hash[feature_key].add(organism_id)\n\n # HACK - FBgn are genes, and therefore classes,\n # all else be individuals\n is_gene = False\n if re.search(r'(FBgn|FBog)', feature_id):\n self.idhash['gene'][feature_key] = feature_id\n is_gene = True\n elif re.search(r'FBa[lb]', feature_id):\n self.idhash['allele'][feature_key] = feature_id\n elif re.search(r'FBt[ip]', feature_id):\n self.idhash['feature'][feature_key] = feature_id\n\n if self.testMode and \\\n int(feature_key) not in self.test_keys['gene'] + \\\n self.test_keys['allele'] + self.test_keys['feature']:\n continue\n\n # now do something with it!\n # switch on type_id\n if name.strip() == '':\n name = uniquename\n\n type_key = type_id\n type_id = self.idhash['cvterm'][type_key]\n\n # skip some features by type\n types_to_skip = [\n 'SO:0000316', # CDS\n 'SO:0000696', # oligos\n 'SO:0000358', # polypeptide\n 'SO:0000234', # transcripts\n ]\n\n type_keys_to_skip = [\n 596, # pcr_product\n 57096, # mature peptide\n 57097, # signal_peptide\n 57270, # repeat masker\n 58210, # alignment\n 59643, # cDNA_clone\n 60006, # uncharacterized_change_in_nucleotide_sequence\n 61351, # oligo\n 61467, # polypeptide_domain\n 257, # exon\n 286, # intron\n ]\n\n organisms_to_skip = [\n 2 # computational result\n ]\n\n if type_id in types_to_skip \\\n or int(type_key) in type_keys_to_skip\\\n or int(organism_id) in organisms_to_skip:\n continue\n\n line_counter += 1\n\n if int(type_key) == 604: # RNAi_reagent\n # TODO add other reagents?\n self.idhash['reagent'][feature_key] = feature_id\n\n # deal with the taxonomy\n # only get taxa for features that are actually used in our set\n tax_internal_id = self._makeInternalIdentifier(\n 'organism', organism_id)\n if organism_id not in self.checked_organisms:\n # will get the NCBITax if necessary\n tax_id = self._get_organism_id(organism_id)\n self.checked_organisms.add(organism_id)\n else:\n tax_id = self.idhash['organism'][organism_id]\n\n tax_label = self.label_hash.get(tax_id)\n if not re.search(r'FBog', feature_id) \\\n and re.search(r'Drosophila', tax_label):\n # make only fly things leaders\n model.makeLeader(feature_id)\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if is_gene:\n model.addClassToGraph(\n feature_id, name, type_id)\n g.addTriple(\n feature_id, model.object_properties['in_taxon'],\n tax_id)\n else:\n if re.search('FBa[lb]', feature_id):\n type_id = Genotype.genoparts['allele']\n model.addIndividualToGraph(feature_id, name, type_id)\n\n # stop adding what we do not appreciate\n # if is_obsolete == 't':\n # if is_gene:\n # model.addDeprecatedClass(feature_id)\n # else:\n # model.addDeprecatedIndividual(feature_id)\n # self.deprecated_features.add(feature_key)\n\n model.addClassToGraph(tax_id)\n if tax_id != tax_internal_id:\n model.addEquivalentClass(tax_id, tax_internal_id)\n\n model.addComment(\n feature_id,\n self._makeInternalIdentifier('feature', feature_key))\n\n # TODO save checked_organisms fbid to ncbitax mapping to\n # a local file to speed up subsequent searches\n\n return", "def get_detect_model():\n x, conv_layer, conv_vars = convolutional_layers()\n \n # Fourth layer\n W_fc1 = weight_variable([8 * 32 * 128, 2048])\n W_conv1 = tf.reshape(W_fc1, [8, 32, 128, 2048])\n b_fc1 = bias_variable([2048])\n h_conv1 = tf.nn.relu(conv2d(conv_layer, W_conv1,\n stride=(1, 1), padding=\"VALID\") + b_fc1) \n # Fifth layer\n W_fc2 = weight_variable([2048, 1 + 15 * len(common.CHARS)])\n W_conv2 = tf.reshape(W_fc2, [1, 1, 2048, 1 + 15 * len(common.CHARS)])\n b_fc2 = bias_variable([1 + 15 * len(common.CHARS)])\n h_conv2 = conv2d(h_conv1, W_conv2) + b_fc2\n\n return (x, h_conv2, conv_vars + [W_fc1, b_fc1, W_fc2, b_fc2])", "def __init__(self, threshold = 0.65):\n \n p = os.path.dirname(os.path.realpath(__file__)) + '/models/'\n self.face_detector = cv2.dnn.readNetFromTensorflow(p + \"opencv_face_detector_uint8.pb\",\n p + \"opencv_face_detector.pbtxt\")\n self.align_predictor = dlib.shape_predictor(p +'shape_predictor_68_face_landmarks.dat')\n self.gender_svm = joblib.load(p + 'svm_classifier.joblib')\n self.vgg_feature_extractor = VGGFace(include_top = False, input_shape = (224, 224, 3), pooling ='avg')\n self.threshold = threshold", "def __init__(self,\n model_name: Text,\n ckpt_path: Text,\n model_params: Dict[Text, Any] = None):\n self.model_name = model_name\n self.ckpt_path = ckpt_path\n self.params = hparams_config.get_detection_config(model_name).as_dict()\n if model_params:\n self.params.update(model_params)\n self.params.update(dict(is_training_bn=False))\n self.label_map = self.params.get('label_map', None)", "def yolo_detection(raw_image):\n class_ids = []\n confidences = []\n boxes = []\n height , width ,c= raw_image.shape\n blob = cv2.dnn.blobFromImage(raw_image, 0.00392, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n center_x = int(detection[0]*width)\n center_y = int(detection[1]*height)\n w = int(detection[2]*width)\n h = int(detection[3]*height)\n ##Rectangle Draw\n topleft_x = int(center_x-(w/2))\n topleft_y = int(center_y-(h/2))\n\n boxes.append([topleft_x,topleft_y,w,h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n #DISPLAY DETECTION\n total_detections = len(boxes)\n for i in range(total_detections):\n if i in indexes:\n topleft_x, topleft_y, w,h = boxes[i]\n label = detection_classes[class_ids[i]]\n cv2.rectangle(raw_image, (topleft_x,topleft_y), (topleft_x+w,topleft_y+h), (0,100,255), 1)\n cv2.putText(raw_image, label, (topleft_x, topleft_y),cv2.FONT_HERSHEY_COMPLEX,1,(0,165,255))\n\n\n return raw_image", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\n return response", "def _analyze(self) -> None:\n self.config['loader']['save_path'] = self.outpufolder\n if self.threshold_entry.get():\n if float(self.threshold_entry.get()) > 1.0 or float(self.threshold_entry.get()) < 0.0:\n raise ValueError('Treshold value should be in range 0.0 and 1.0')\n self.config['threshold'] = float(self.threshold_entry.get())\n self.config['loader']['img_path'] = self.folderpath[0]\n self.config['loader']['extentions'] = self.radio_btn_var.get()\n self.config['model']['name'] = self.radio_btn_network_var.get()\n loader = Loader(self.config)\n self.config['loader']['loader'] = loader\n process = Process(self.config)\n self.config['process'] = process\n model = Model(self.config)\n self.config['model']['model'] = model\n\n if self.config['loader']['img_path'] == 0:\n model.predict_camera()\n else:\n dest = Path(self.config['loader']['save_path'])\n if not dest.exists():\n dest.mkdir()\n\n source = Path(self.config['loader']['img_path'])\n\n if source.is_dir():\n for img_path in tqdm(source.rglob('**/*')):\n if img_path.suffix in self.config['loader']['extentions']:\n model.predict_img(str(img_path))\n elif source.is_file():\n if source.suffix == '.avi':\n model.predict_video(str(source))\n elif source.suffix in self.config['loader']['extentions']:\n model.predict_img(str(source))", "def load_model():\n \n _files = training_file()\n \n predictor_path = _files.model_file(LANDMARKS_WEIGHTS)\n face_rec_model_path = _files.model_file(RESNET_WEIGHTS)\n \n detector = dlib.get_frontal_face_detector()\n sp = dlib.shape_predictor(predictor_path)\n facerec = dlib.face_recognition_model_v1(face_rec_model_path)\n \n return (detector, sp, facerec)", "def __init__(self):\r\n self.label = \"OVL to Feature\"\r\n self.description = \"OVL to Feature converts an OVL file from CPOF, C2PC, GCCS or similar system and converts it to a series of Feature Class for Point, Line, and Polygons.\"\r\n self.canRunInBackground = False", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n #print('Labels:')\n\n #for label in labels:\n # print(label.description)\n return labels", "def detect(self, features):\n pass # TODO", "def recognize_person(known_face_encodings, known_face_names):\n\n # Initialize model for body detection\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,\n use_display_name=True)\n\n category_index = label_map_util.create_category_index(categories)\n\n # Initialize connect with server\n credentials = pika.PlainCredentials(USER, PASSWORD)\n parameters = pika.ConnectionParameters(IP, PORT, credentials=credentials)\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n\n # Initialize parameters for logging\n last_visible = np.array([False for _ in range(0, len(known_face_names))], dtype=np.bool)\n last_visible_time = [datetime.datetime.min for _ in range(0, len(known_face_names))]\n\n last_no_face = False\n last_no_face_time = datetime.datetime.min\n\n last_unknown = False\n last_unknown_time = datetime.datetime.min\n\n last_update_face_base = datetime.datetime(1, 1, 1, 0, 0, 0)\n update_time = time.time() + TIMEOUT_UPDATE\n\n process_this_frame = True\n\n # Get video stream and processed frame\n camera = cv2.VideoCapture(CAMERA_ID)\n\n with detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n while True:\n # Check for timeout for updating database\n if time.time() > update_time:\n update_time = time.time() + TIMEOUT_UPDATE\n if (datetime.datetime.now() - last_update_face_base).days >= TIME_TO_UPDATE:\n known_face_encodings, known_face_names = read_known_faces()\n last_update_face_base = datetime.datetime.now()\n\n # Get picture from stream\n ret, frame = camera.read()\n small_frame = cv2.resize(frame, (0, 0), fx=1/DECREASING_LEVEL, fy=1/DECREASING_LEVEL)\n rgb_small_frame = small_frame[:, :, ::-1]\n\n if process_this_frame:\n # Get detected objects (bodies and faces)\n image_np_expanded = np.expand_dims(frame, axis=0)\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n n_body = 0\n for i in range(0, scores.shape[1]):\n if scores[0][i] > 0.5:\n n_body += 1\n else:\n break\n\n # Get coordinates of box around faces\n face_locations = face_recognition.face_locations(rgb_small_frame)\n\n now_no_face = False\n\n # Check number of detected faces and bodies\n n_faces = len(face_locations)\n if n_body > n_faces:\n # Send alarm if anybody try to hide face\n now_no_face = True\n now = datetime.datetime.now()\n if not last_no_face:\n last_no_face_time = now\n else:\n if last_no_face_time != datetime.datetime.min:\n delta = now - last_no_face_time\n if delta.seconds > TIMEOUT:\n with open(\"logging.txt\", \"a+\") as log_file:\n user_id = None\n send_data = {\"userId\": user_id,\n \"cameraId\": str(CAMERA_ID)}\n json_send_data = json.dumps(send_data)\n\n channel.basic_publish(exchange='', routing_key='users', body=json_send_data)\n\n log_file.write(\"\\nALARM NO FACE at \" + now.strftime(\"%H:%M:%S %d-%m-%Y\"))\n last_no_face_time = datetime.datetime.min\n\n # Get identified faces embeddings\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n face_names = []\n now_visible = np.array([False for _ in range(0, len(known_face_names))], dtype=np.bool)\n now_unknown = False\n\n # Find similar face from database\n for face_encoding in face_encodings:\n name = \"Unknown\"\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n # Current face was recognized - send record about it\n name = known_face_names[best_match_index]\n now_visible[best_match_index] = True\n now = datetime.datetime.now()\n if not last_visible[best_match_index]:\n last_visible_time[best_match_index] = now\n else:\n if last_visible_time[best_match_index] != datetime.datetime.min:\n delta = now - last_visible_time[best_match_index]\n if delta.seconds > TIMEOUT:\n with open(\"logging.txt\", \"a+\") as log_file:\n user_id = name.split('_')[0]\n send_data = {\"userId\": user_id, \"cameraId\": CAMERA_ID}\n json_send_data = json.dumps(send_data)\n\n channel.basic_publish(exchange='', routing_key='users', body=json_send_data)\n\n log_file.write(\n \"\\nRecognize \" + name + \" at \" + now.strftime(\"%H:%M:%S %d-%m-%Y\"))\n last_visible_time[best_match_index] = datetime.datetime.min\n else:\n # Current face was NOT recognized - send alarm about it\n now_unknown = True\n now = datetime.datetime.now()\n if not last_unknown:\n last_unknown_time = now\n else:\n if last_unknown_time != datetime.datetime.min:\n delta = now - last_unknown_time\n if delta.seconds > TIMEOUT:\n with open(\"logging.txt\", \"a+\") as log_file:\n user_id = None\n send_data = {\"userId\": user_id, \"cameraId\": CAMERA_ID}\n json_send_data = json.dumps(send_data)\n\n channel.basic_publish(exchange='', routing_key='users', body=json_send_data)\n\n log_file.write(\"\\nALARM at \" + now.strftime(\"%H:%M:%S %d-%m-%Y\"))\n last_unknown_time = datetime.datetime.min\n\n face_names.append(name)\n\n last_visible = copy.deepcopy(now_visible)\n last_no_face = now_no_face\n last_unknown = now_unknown\n\n process_this_frame = not process_this_frame\n\n # Visualize box around person\n vis_util.visualize_boxes_and_labels_on_image_array(frame, np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores), category_index,\n use_normalized_coordinates=True,\n line_thickness=8, skip_labels=True,\n skip_scores=True)\n\n # Visualize box around face with name\n for (face_top, face_right, face_bottom, face_left), name in zip(face_locations, face_names):\n face_coordinates = {\"top\": face_top * DECREASING_LEVEL,\n \"right\": face_right * DECREASING_LEVEL,\n \"bottom\": face_bottom * DECREASING_LEVEL,\n \"left\": face_left * DECREASING_LEVEL\n }\n\n if name == \"Unknown\":\n color = RED_COLOR\n else:\n color = BLUE_COLOR\n\n # Get face's coordinates\n cv2.rectangle(frame, (face_coordinates[\"left\"], face_coordinates[\"top\"]),\n (face_coordinates[\"right\"], face_coordinates[\"bottom\"]), color, 2)\n\n # Visualize person's name if he was recognized\n text_coordinates = get_text_coordinates(name, face_coordinates)\n cv2.rectangle(frame, (text_coordinates[\"left\"] - 5, face_coordinates[\"bottom\"]),\n (text_coordinates[\"right\"] + 5, text_coordinates[\"bottom\"] + 8),\n color, cv2.FILLED)\n cv2.putText(frame, name, (text_coordinates[\"left\"], text_coordinates[\"bottom\"] + 4),\n TEXT_FONT, 1.0, WHITE_COLOR, 1)\n\n cv2.imshow('Video', frame)\n\n # Press 'q' to quit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n process_this_frame = not process_this_frame\n\n connection.close()\n camera.release()\n cv2.destroyAllWindows()\n\n return known_face_encodings, known_face_names", "def object_detect(filename):\n cv2.ocl.setUseOpenCL(False)\n just_fname = filename.split(\".\")[0]\n image = cv2.imread('./static/uploads/' + filename)\n bbox, label, conf = cv.detect_common_objects(image)\n output_image = draw_bbox(image, bbox, label, conf)\n plt.imshow(output_image)\n plt.savefig(os.path.join('./static/output/', just_fname + '.png'))\n d = Counter(label)\n if not label:\n return \"No objects detected\"\n labelstr = \", \".join('{} {}'.format(v, k) for k, v in d.items())\n return labelstr", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n os.makedirs(submit_dir)\n\n # Read dataset\n dataset = TamperDataset()\n dataset.load_tamper(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n submission = []\n f1 = 0\n print(len(dataset.image_ids))\n # for image_id in dataset.image_ids:\n # # Load image and run detection\n # image = dataset.load_image(image_id)\n # # Detect objects\n # r = model.detect([image], verbose=0)[0]\n\n # # Encode image to RLE. Returns a string of multiple lines\n # source_id = dataset.image_info[image_id][\"id\"]\n # rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n # submission.append(rle)\n # # Save image with masks\n\n # N = r[\"scores\"].shape[0]\n # if not N:\n # \tH, W, C = image.shape\n # \tmask = np.zeros((H,W))\n\n \t\n # else:\n\n # H, W, C = image.shape\n\n # idx = np.argsort(-r[\"scores\"])\n # mask = r[\"masks\"][:,:,idx[0]].astype(np.float32)\n\n # bbox = r[\"rois\"][idx[0], :4]\n\n # y1, x1, y2, x2 = bbox\n\n\n\n # mask = dense_crf(image, mask)\n\n # mask = np.where(mask >= 0.5, 255, 0)\n\n # H, W, C = image.shape\n\n # full_mask = np.zeros((H, W))\n # full_mask[y1:y2, x1:x2] = mask\n\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # ela=dataset.load_ela(image_id)\n # Detect objects\n # r = model.detect([image],[ela], verbose=0)[0]\n r = model.detect([image],verbose=0)[0]\n\n # Encode image to RLE. Returns a string of multiple lines\n source_id = dataset.image_info[image_id][\"id\"]\n rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n submission.append(rle)\n # Save image with masks\n\n N = r[\"scores\"].shape[0]\n if not N:\n H, W, C = image.shape\n mask = np.zeros((H,W))\n\n \n else:\n idx = np.argsort(-r[\"scores\"])\n mask = r[\"masks\"][:,:,idx[0]].astype(np.uint8)\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n\n annotation = dataset.load_annaation(image_id)\n annotation = np.where(annotation >= 0.5, 1, 0) \n f = get_FM(mask, annotation)\n f1 += f\n\n print(f1/len(dataset.image_ids))\n\n\n\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n # visualize.display_instances(\n # image, r['rois'], r['masks'], r['class_ids'],\n # dataset.class_names, r['scores'],\n # show_bbox=False, show_mask=False,\n # title=\"Predictions\")\n # plt.savefig(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"]))\n\n # Save to csv file\n # submission = \"ImageId,EncodedPixels\\n\" + \"\\n\".join(submission)\n # file_path = os.path.join(submit_dir, \"submit.csv\")\n # with open(file_path, \"w\") as f:\n # f.write(submission)\n print(\"Saved to \", submit_dir)", "def load_model_label(model_path, label_bin_path):\n # load the model and label binarizer\n print(\"[INFO] loading network and label binarizer...\")\n model = load_model(model_path)\n lb = pickle.loads(open(label_bin_path, \"rb\").read())\n return model, lb", "def model_fn(features,labels,mode,params):\n tf.logging.info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s ,shape = %s\" % (name,features[name].shape))\n\n input_ids = features['input_ids']\n input_mask = features['input_mask']\n segment_ids = features['segment_ids']\n label_ids = features['label_ids']\n if 'is_real_example' in features:\n is_real_example = tf.cast(features['is_real_example'],dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids),dtype=tf.float32)\n\n is_training = (mode == tf_estimator.estimator.ModeKeys.TRAIN)\n\n (total_loss,per_example_loss,probabilities,predictions) = \\\n create_model(albert_config,is_training,input_ids,input_mask,\n segment_ids,label_ids,num_labels,\n use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map,initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint,assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf_estimator.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,use_tpu=False)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf_estimator.estimator.ModeKeys.EVAL:\n def metric_fn(per_example_loss,label_ids,logits,is_real_example):\n accuracy = tf.metrics.accuracy(\n labels=label_ids,predictions=predictions,\n weights=is_real_example\n )\n loss = tf.metrics.mean(\n values=per_example_loss,weights=is_real_example\n )\n return {\n 'eval_accuracy':accuracy,\n 'eval_loss':loss,\n }\n\n eval_metrics = metric_fn(per_example_loss,label_ids,predictions,is_real_example)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'probabilities':probabilities,\n 'predictions':predictions,\n },\n )\n\n return output_spec", "def __init__(self,\n model_name: Text,\n ckpt_path: Text,\n batch_size: int = 1,\n use_xla: bool = False,\n min_score_thresh: float = None,\n max_boxes_to_draw: float = None,\n line_thickness: int = None,\n model_params: Dict[Text, Any] = None):\n self.model_name = model_name\n self.ckpt_path = ckpt_path\n self.batch_size = batch_size\n\n self.params = hparams_config.get_detection_config(model_name).as_dict()\n\n if model_params:\n self.params.update(model_params)\n self.params.update(dict(is_training_bn=False))\n self.label_map = self.params.get('label_map', None)\n\n self.signitures = None\n self.sess = None\n self.use_xla = use_xla\n\n self.min_score_thresh = min_score_thresh\n self.max_boxes_to_draw = max_boxes_to_draw\n self.line_thickness = line_thickness" ]
[ "0.68174416", "0.62827706", "0.5758899", "0.5696043", "0.56804377", "0.5678108", "0.5578127", "0.5561133", "0.5548295", "0.55313677", "0.5493445", "0.5478988", "0.5474024", "0.5455986", "0.54302204", "0.54249835", "0.54225004", "0.54104656", "0.5408392", "0.5408276", "0.5397128", "0.5387242", "0.5352594", "0.5337217", "0.5332834", "0.53273493", "0.5323962", "0.5312703", "0.5295689", "0.5289872" ]
0.6969177
0
Rule for the definition of "available quantity" based on 1 quant object.
def qty_available(quant) -> float: return quant.quantity - quant.reserved_quantity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_product_quantity(item, qty):\n return True", "def sum_availability(val, quant) -> float:\n return val + qty_available(quant)", "def availability(self):\n # TODO: These lookups are highly inefficient. However, we'll wait with optimizing\n # until Django 1.8 is released, as the following feature might make it a\n # lot easier:\n # https://docs.djangoproject.com/en/1.8/ref/models/conditional-expressions/\n # TODO: Test for interference with old versions of Item-Quota-relations, etc.\n # TODO: Prevent corner-cases like people having ordered an item before it got\n # its first variationsadded\n quotalookup = (\n ( # Orders for items which do not have any variations\n Q(variation__isnull=True)\n & Q(item__quotas__in=[self])\n ) | ( # Orders for items which do have any variations\n Q(variation__quotas__in=[self])\n )\n )\n\n paid_orders = OrderPosition.objects.current.filter(\n Q(order__status=Order.STATUS_PAID)\n & quotalookup\n ).count()\n\n if paid_orders >= self.size:\n return Quota.AVAILABILITY_GONE, 0\n\n pending_valid_orders = OrderPosition.objects.current.filter(\n Q(order__status=Order.STATUS_PENDING)\n & Q(order__expires__gte=now())\n & quotalookup\n ).count()\n if (paid_orders + pending_valid_orders) >= self.size:\n return Quota.AVAILABILITY_ORDERED, 0\n\n valid_cart_positions = CartPosition.objects.current.filter(\n Q(expires__gte=now())\n & quotalookup\n ).count()\n if (paid_orders + pending_valid_orders + valid_cart_positions) >= self.size:\n return Quota.AVAILABILITY_RESERVED, 0\n\n return Quota.AVAILABILITY_OK, self.size - paid_orders - pending_valid_orders - valid_cart_positions", "def _update_reserved_quantity(self, product_id, location_id, quantity, lot_id=None, package_id=None, owner_id=None,\n strict=False):\n self = self.sudo()\n rounding = product_id.uom_id.rounding\n quants = self._gather(product_id, location_id, lot_id=lot_id, package_id=package_id, owner_id=owner_id,\n strict=strict)\n reserved_quants = []\n\n if float_compare(quantity, 0, precision_rounding=rounding) > 0:\n # if we want to reserve\n available_quantity = self._get_available_quantity(product_id, location_id, lot_id=lot_id,\n package_id=package_id, owner_id=owner_id, strict=strict)\n if float_compare(quantity, available_quantity, precision_rounding=rounding) > 0:\n raise UserError(_('It is not possible to reserve more products of %s than you have in stock.',\n product_id.display_name))\n elif float_compare(quantity, 0, precision_rounding=rounding) < 0:\n # if we want to unreserve\n available_quantity = sum(quants.mapped('reserved_quantity'))\n # if float_compare(abs(quantity), available_quantity, precision_rounding=rounding) > 0:\n # raise UserError(_('It is not possible to unreserve more products of %s than you have in stock.',\n # product_id.display_name))\n else:\n return reserved_quants\n\n for quant in quants:\n if float_compare(quantity, 0, precision_rounding=rounding) > 0:\n max_quantity_on_quant = quant.quantity - quant.reserved_quantity\n if float_compare(max_quantity_on_quant, 0, precision_rounding=rounding) <= 0:\n continue\n max_quantity_on_quant = min(max_quantity_on_quant, quantity)\n quant.reserved_quantity += max_quantity_on_quant\n reserved_quants.append((quant, max_quantity_on_quant))\n quantity -= max_quantity_on_quant\n available_quantity -= max_quantity_on_quant\n else:\n max_quantity_on_quant = min(quant.reserved_quantity, abs(quantity))\n quant.reserved_quantity -= max_quantity_on_quant\n reserved_quants.append((quant, -max_quantity_on_quant))\n quantity += max_quantity_on_quant\n available_quantity += max_quantity_on_quant\n\n if float_is_zero(quantity, precision_rounding=rounding) or float_is_zero(available_quantity,\n precision_rounding=rounding):\n break\n return reserved_quants", "def _validate_qty(values: dict):\n\n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n\n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.lot_size_filter\n # if ONE :=1 and not filter.min_qty <= quantity <= filter.max_qty:\n # ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n quantity,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values", "def quantity_available(quantity):\n available = False\n\n try:\n code = int(quantity)\n if (code in name_given_code.keys()):\n available = True\n except:\n name = quantity.lower()\n if (name in code_given_name.keys()):\n available = True\n\n return available", "def _update_available_quantity(self, product_id, location_id, quantity, lot_id=None, package_id=None, owner_id=None,\n in_date=None):\n if not in_date:\n manual_validate_date_time = self._context.get('manual_validate_date_time', False)\n if manual_validate_date_time:\n in_date = fields.Datetime.from_string(manual_validate_date_time)\n return super(StockQuant, self)._update_available_quantity(product_id, location_id, quantity, lot_id=lot_id,\n package_id=package_id, owner_id=owner_id,\n in_date=in_date)", "def check_stock(self):\n if self.quantity > self.item.quantity:\n return \"%s Please adjust your cart.\" % CartItem.get_insufficient_stock_msg(self.item.quantity)\n return None", "def is_quantity(x):\n return isinstance(x, Quantity)", "def clean(self):\n cleaned_data = super().clean()\n variant = cleaned_data.get('variant')\n quantity = cleaned_data.get('quantity')\n if variant and quantity is not None:\n try:\n variant.check_quantity(quantity)\n except InsufficientStock as e:\n error = forms.ValidationError(\n pgettext_lazy(\n 'Add item form error',\n 'Could not add item. '\n 'Only %(remaining)d remaining in stock.' %\n {'remaining': e.item.quantity_available}))\n self.add_error('quantity', error)\n return cleaned_data", "def quantities_available(quantities):\n available = []\n for q in quantities:\n available.append(quantity_available(q))\n return available", "def check_restrictions(self):\n from .signals import determine_availability\n\n responses = determine_availability.send(\n self.item.event, item=self.item,\n variations=[self.to_variation_dict()], context=None,\n cache=self.item.event.get_cache()\n )\n price = self.default_price if self.default_price is not None else self.item.default_price\n for receiver, response in responses:\n if 'available' in response[0] and not response[0]['available']:\n return False\n elif 'price' in response[0] and response[0]['price'] is not None and response[0]['price'] < price:\n price = response[0]['price']\n return price", "def test_product_available_by_stock(self):\n product = ProductFactory(stock_amount=10)\n self.assertEqual(product.left_in_stock, 10)\n self.assertTrue(product.is_available())", "def stock_availability():\n\tdef update_reserved_qty(bin_data, updates):\n\t\tfor k, v in updates.items():\n\t\t\tif k in bin_data:\n\t\t\t\told_reserved = bin_data[k][\"reserved\"]\n\t\t\t\tnew_reserved = old_reserved + v\n\t\t\t\tbin_data[k][\"reserved\"] = new_reserved\n\t\treturn bin_data\n\n\ttry:\n\t\tstock_for_so = []\n\t\tquery = \"\"\"\n\t\t\tselect so.name, so.customer, soi.item_code, (soi.qty - soi.delivered_qty) as qty\n\t\t\tfrom `tabSales Order` so left join `tabSales Order Item` soi\n\t\t\ton so.name = soi.parent\n\t\t\twhere so.status not in ('Closed', 'Stopped') and so.docstatus = 1\n\t\t\tgroup by so.name, soi.item_code order by so.creation\n\t\t\"\"\"\n\t\tso_data = frappe.db.sql(query, as_dict=True)\n\n\t\t# formatting: sales_data => {\"sales_order\": [{\"item_code\": \"qty\"}]}\n\t\tsales_data = {}\n\t\tfor so in so_data:\n\t\t\tif so.get(\"name\") not in sales_data:\n\t\t\t\tsales_data[so.name] = [{so.item_code: so.qty}]\n\t\t\telse:\n\t\t\t\texisting = sales_data[so.name]\n\t\t\t\texisting.append({so.item_code:so.qty})\n\t\t\t\tsales_data[so.name] = existing\n\n\t\t# available stock\n\t\tbin_data = frappe.db.sql(\"\"\"select item_code, sum(actual_qty) as actual_qty\n\t\t\tfrom `tabBin` group by item_code\"\"\")\n\n\t\t# {\"item_code\": {\"bin_qty\", \"reserved\"}}\n\t\tbin_qty = { b[0]:{\"qty\": b[1], \"reserved\": 0} for b in bin_data if b[1] > 0}\n\n\t\t# check sales order wise availability\n\t\tfor so, items in sales_data.items():\n\t\t\tif not frappe.db.get_value(\"Sales Order\", so, \"stock_availability_mail\"):\n\t\t\t\titem_qty = {}\n\t\t\t\tis_stock_available = True\n\t\t\t\tfor item in items:\n\t\t\t\t\titem_code, qty = item.keys()[0], item.values()[0]\n\t\t\t\t\tif item_code in bin_qty:\n\t\t\t\t\t\tif qty <= bin_qty[item_code][\"qty\"] - bin_qty[item_code][\"reserved\"]:\n\t\t\t\t\t\t\titem_qty[item_code] = qty\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tis_stock_available = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tis_stock_available = False\n\t\t\t\t\t\tbreak\n\t\t\t\tif is_stock_available:\n\t\t\t\t\t# update_bit_qty_reserved\n\t\t\t\t\tbin_qty = update_reserved_qty(bin_qty, item_qty)\n\t\t\t\t\tstock_for_so.append(so)\n\t\tif len(stock_for_so):\n\t\t\tstock_availability_mail(stock_for_so)\n\texcept Exception as e:\n\t\tfrappe.log_error(message=frappe.get_traceback(), title=\"Stock availability Scheduler failed\")", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def check_quotas(self):\n if self.properties.count() > 0: # NOQA\n raise ValueError('Do not call this directly on items which have properties '\n 'but call this on their ItemVariation objects')\n return min([q.availability() for q in self.quotas.all()])", "def filter_available(self, queryset, name, value):\n if str2bool(value):\n # The 'quantity' field is greater than the calculated 'allocated' field\n # Note that the item must also be \"in stock\"\n return queryset.filter(StockItem.IN_STOCK_FILTER).filter(Q(quantity__gt=F('allocated')))\n else:\n # The 'quantity' field is less than (or equal to) the calculated 'allocated' field\n return queryset.filter(Q(quantity__lte=F('allocated')))", "def test_shortage_quantity(self):\n shortages = self._uncertain_demand.shortages\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_shortages = lambda l, k, j, x, y: round(abs(((j + (j - k)) - l) + x)) if l < k else 0\n test_shortage = cal_shortages(\n self._quantity_on_hand,\n safety_stock, reorder,\n self._quantity_on_hand,\n self._backlog\n )\n self.assertEqual(shortages, test_shortage)", "def available(self):\n return self.stock_level - self.in_order_book", "def capacity_used(self):\n raise NotImplementedError()", "def stocks(self):\n return self.quantity - self.reserved", "def get_max_quantity(self):\n try:\n additional_json = self.get_additional_json()\n return additional_json['ActionPanel']['isModel']['remainQty']\n except Exception as error:\n return None", "def test_excess_quantity(self):\n excess = self._uncertain_demand.excess_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_excess = lambda x, y, z: round(x - (y + (y - z)), 0) if x > y + (y - z) else 0\n test_excess = cal_excess(self._quantity_on_hand, reorder, safety_stock)\n self.assertEqual(int(excess), int(test_excess))", "def test_single_quant_assign_correct_quant(self):\n Quant = self.env[\"stock.quant\"]\n\n # Create a bunch of identical quants in the same location\n quants = Quant.browse()\n for i in range(5):\n quants |= self.create_quant(self.apple.id, self.test_stock_location_01.id, 10)\n self.assertEqual(len(quants), 5)\n\n quant = quants[2]\n pick = quant.create_picking(self.picking_type_pick, confirm=True, assign=True)\n self.assertEqual(pick.state, \"assigned\")\n self.assertEqual(quant.reserved_quantity, 10)", "def Available(self) -> int:", "def Available(self) -> int:", "def Available(self) -> int:", "def check_restrictions(self):\n if self.properties.count() > 0: # NOQA\n raise ValueError('Do not call this directly on items which have properties '\n 'but call this on their ItemVariation objects')\n from .signals import determine_availability\n\n vd = VariationDict()\n responses = determine_availability.send(\n self.event, item=self,\n variations=[vd], context=None,\n cache=self.event.get_cache()\n )\n price = self.default_price\n for receiver, response in responses:\n if 'available' in response[0] and not response[0]['available']:\n return False\n elif 'price' in response[0] and response[0]['price'] is not None and response[0]['price'] < price:\n price = response[0]['price']\n return price", "def make_quantity(string):\n pass", "def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False" ]
[ "0.66404766", "0.6625551", "0.65198964", "0.6518039", "0.6365163", "0.63387996", "0.6244827", "0.6101653", "0.6098047", "0.607263", "0.5913213", "0.58860075", "0.5862092", "0.5858698", "0.5850056", "0.5831736", "0.5807448", "0.5775316", "0.5773819", "0.5766712", "0.57523704", "0.57513076", "0.57361615", "0.5698054", "0.567565", "0.567565", "0.567565", "0.56635594", "0.56583047", "0.56480104" ]
0.77095944
0
Filter function that flags untracked quants to be filtered.
def filter_untracked(quant) -> bool: return quant.lot_id is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter(self, *q, **kwargs):\n return self._filter_or_exclude(*q, **kwargs)", "def filter_tracked(self, queryset, name, value):\n q_batch = Q(batch=None) | Q(batch='')\n q_serial = Q(serial=None) | Q(serial='')\n\n if str2bool(value):\n return queryset.exclude(q_batch & q_serial)\n else:\n return queryset.filter(q_batch & q_serial)", "def filter_all(_):\n return True", "def filter_tracked(quant) -> bool:\n return quant.lot_id is not None", "def untracked(self):\n\n # Getting all scopes.\n day_scope = get_scope_by_name(Scope.DAY.value)()\n week_scope = get_scope_by_name(Scope.WEEK.value)()\n month_scope = get_scope_by_name(Scope.MONTH.value)()\n year_scope = get_scope_by_name(Scope.YEAR.value)()\n\n return self.exclude(\n Q(scope=day_scope.name, track_events__created__date__range=(day_scope.start, day_scope.end)) |\n Q(scope=week_scope.name, track_events__created__date__range=(week_scope.start, week_scope.end)) |\n Q(scope=month_scope.name, track_events__created__date__range=(month_scope.start, month_scope.end)) |\n Q(scope=year_scope.name, track_events__created__date__range=(year_scope.start, year_scope.end))\n )", "def filter(self, *args, **kwargs):", "def filter(self, filters):", "def step_filter(self, qs):\n return qs", "def filter(self, *args, **kwargs):\n self._not_support_combined_queries(\"filter\")\n return self._filter_or_exclude(False, args, kwargs)", "def filter_clear(client, args):\n client.context.set_query([])", "def _FilterProtonsAndElectrons(self):\n self.reactants = filter(lambda c: c.compound.kegg_id not in \n ['C00080', 'C05359'], self.reactants)", "def test_filter_with_empty_filters(mockdata, qfilter):\n assert len(qfilter.filter(mockdata)) == 100", "def filter_flag_present(self, req, qs):\n return qs", "def filter(self, *args, **kwargs):\n return FilteredQuery(self, F(*args, **kwargs))", "def filter(self, included_suites=None, included_tests=None,\n included_tags=None, excluded_tags=None):\n self.visit(Filter(included_suites, included_tests,\n included_tags, excluded_tags))", "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def pre_filter(self, qs):\n return qs", "def passes_cutoff(self, filter_code):\r\n try:\r\n filterset_dict = {\"all_positions\":[True],\r\n \"all_variants\":[self.is_variant == True],\r\n \"actionable_variants\":[self.is_variant == True, \r\n self.in_blacklist == \"WHITE\", \r\n \"exon\" in self.loc, # and \"exonic_nc\" not in self.loc, \r\n \"syn\" not in self.func, \r\n \"ref\" not in self.func, \r\n self.ir_version == \"14\" or int(self.FAO)>50,\r\n int(self.FRO)+int(self.FAO)>500, \r\n self.FR == \".\"],\r\n \r\n \r\n \"indels\":[self.is_variant == True, self.type == \"del\" or self.type == \"in\" , \"exon\" in self.loc]\r\n }\r\n return all(filterset_dict[filter_code])\r\n \r\n except:\r\n return False", "def exclude(self, *args, **kwargs):\n return self.filter(~F(*args, **kwargs))", "def filter(self, filter_dict):\n pass", "def filter_depleted(self, queryset, name, value):\n if str2bool(value):\n return queryset.filter(quantity__lte=0)\n else:\n return queryset.exclude(quantity__lte=0)", "def filter(q_words):\n filtered_words = [\"how\",\"what\"]\n for word in q_words:\n if word in filtered_words:\n q_words.remove(word)", "def filters(self):\n\t\treturn self.local_filter", "def empty_filter(item, *args, **kwargs):\n return True", "def filter_selection_set(info: GraphQLResolveInfo):\n from graphql import Location\n from .pyutils import unfreeze\n\n excluded_field_nodes = []\n\n def _should_include(field_node: FieldNode):\n if not field_node.name:\n # Unknown field_node type\n return True\n if field_node.name.value == \"subscription_id\":\n return True\n\n # Location is a highly nested AST type\n excluded_field_nodes.append(unfreeze(field_node, ignore_types=[Location]))\n return False\n\n info.field_nodes[0].selection_set.selections = [\n x for x in info.field_nodes[0].selection_set.selections if _should_include(x)]\n\n return excluded_field_nodes", "def filter_in_stock(self, queryset, name, value):\n if str2bool(value):\n return queryset.filter(StockItem.IN_STOCK_FILTER)\n else:\n return queryset.exclude(StockItem.IN_STOCK_FILTER)", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "async def filter(self, **kwargs):\n\n pass", "def filter(self) -> Optional[pulumi.Input['FilterArgs']]:\n return pulumi.get(self, \"filter\")", "def filterTrackingCookies(self):\n return self.__filterTrackingCookies" ]
[ "0.6373299", "0.624071", "0.6236699", "0.60885334", "0.6035009", "0.5997121", "0.596575", "0.5934492", "0.5920947", "0.59118086", "0.58876425", "0.58635277", "0.5812935", "0.5790246", "0.5780941", "0.57521236", "0.57398605", "0.5689652", "0.5659296", "0.5656991", "0.5649688", "0.5648241", "0.56428754", "0.5617588", "0.55845386", "0.5544698", "0.5519244", "0.5496508", "0.5494006", "0.5475426" ]
0.6342002
1
Returns a list of available quantities based on a tracking status. Without tracking all of the quantities are summed together. With tracking the quantities are summed per lot. This does not provide mapping between the quantities and the lot numbers. Use the quantities_per_lot() function for that. availability_by_tracking("none", (stock.quant(1), stock.quant(2))) [56.0] availability_by_tracking("lot", (stock.quant(1), stock.quant(2), stock.quant(3))) [45.2, 34.9]
def availability_by_tracking(tracking, quants) -> List[float]: if tracking == "none": return [reduce(sum_availability, quants, 0)] return list(quantities_per_lot(quants).values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quantities_available(quantities):\n available = []\n for q in quantities:\n available.append(quantity_available(q))\n return available", "def sum_availability(val, quant) -> float:\n return val + qty_available(quant)", "def high_availability(self):\n from django.db import connection\n from models import Order, Publishing\n cursor = connection.cursor()\n\n # FIXME: following query do not work for listings composed by multiple product\n cursor.execute(\"\"\"\n SELECT DISTINCT A.listing_id FROM\n (SELECT\n \"listings_publishing\".\"listing_id\",\n \"listings_listingset\".\"product_id\",\n \"listings_listingset\".\"quantity\",\n \"listings_publishing\".\"available_units\"\n AS \"needed\"\n FROM \"listings_publishing\"\n JOIN \"listings_listingset\"\n ON (\"listings_publishing\".\"listing_id\" = \"listings_listingset\".\"listing_id\")\n -- following where selects main publishings\n WHERE \"listings_publishing\".\"pub_date\" IN (\n SELECT MAX(\"A0\".\"pub_date\") AS \"max_date\"\n FROM \"listings_publishing\" AS \"A0\"\n WHERE NOT (\"A0\".\"status\" = %s )\n AND \"listings_publishing\".\"listing_id\" = \"A0\".\"listing_id\"\n AND \"listings_publishing\".\"store_id\" = \"A0\".\"store_id\"\n GROUP BY \"A0\".\"listing_id\", \"A0\".\"store_id\"\n )\n AND NOT EXISTS (\n SELECT *\n FROM \"listings_publishing\" AS \"B0\"\n WHERE \"B0\".\"status\" = %s\n AND \"listings_publishing\".\"listing_id\" = \"B0\".\"listing_id\"\n AND \"listings_publishing\".\"store_id\" = \"B0\".\"store_id\"\n )\n ) AS A\n JOIN\n (SELECT\n \"listings_listingset\".\"product_id\",\n COALESCE(\"warehouse_stock\".\"quantity\", 0) -\n SUM(\"listings_listingset\".\"quantity\" *\n COALESCE(\"listings_order\".\"quantity\", 0)) as \"available\"\n FROM \"listings_listingset\"\n JOIN \"listings_publishing\"\n ON \"listings_publishing\".\"listing_id\" = \"listings_listingset\".\"listing_id\"\n LEFT JOIN \"listings_order\"\n ON (\n \"listings_order\".\"publishing_id\" = \"listings_publishing\".\"id\" AND\n \"listings_order\".\"status\" = %s\n )\n LEFT JOIN \"warehouse_stock\"\n ON \"warehouse_stock\".\"product_id\" = \"listings_listingset\".\"product_id\"\n GROUP BY \"listings_listingset\".\"product_id\", \"warehouse_stock\".\"quantity\") AS B\n ON A.product_id = B.product_id\n WHERE A.needed < DIV(B.available, A.quantity)\n \"\"\", [Publishing.ACTIVE_PUBLISHING, Publishing.ACTIVE_PUBLISHING, Order.ORDER_PENDING])\n\n res = cursor.fetchall()\n return [r[0] for r in res]", "def availability(self):\n # TODO: These lookups are highly inefficient. However, we'll wait with optimizing\n # until Django 1.8 is released, as the following feature might make it a\n # lot easier:\n # https://docs.djangoproject.com/en/1.8/ref/models/conditional-expressions/\n # TODO: Test for interference with old versions of Item-Quota-relations, etc.\n # TODO: Prevent corner-cases like people having ordered an item before it got\n # its first variationsadded\n quotalookup = (\n ( # Orders for items which do not have any variations\n Q(variation__isnull=True)\n & Q(item__quotas__in=[self])\n ) | ( # Orders for items which do have any variations\n Q(variation__quotas__in=[self])\n )\n )\n\n paid_orders = OrderPosition.objects.current.filter(\n Q(order__status=Order.STATUS_PAID)\n & quotalookup\n ).count()\n\n if paid_orders >= self.size:\n return Quota.AVAILABILITY_GONE, 0\n\n pending_valid_orders = OrderPosition.objects.current.filter(\n Q(order__status=Order.STATUS_PENDING)\n & Q(order__expires__gte=now())\n & quotalookup\n ).count()\n if (paid_orders + pending_valid_orders) >= self.size:\n return Quota.AVAILABILITY_ORDERED, 0\n\n valid_cart_positions = CartPosition.objects.current.filter(\n Q(expires__gte=now())\n & quotalookup\n ).count()\n if (paid_orders + pending_valid_orders + valid_cart_positions) >= self.size:\n return Quota.AVAILABILITY_RESERVED, 0\n\n return Quota.AVAILABILITY_OK, self.size - paid_orders - pending_valid_orders - valid_cart_positions", "def qty_available(quant) -> float:\n return quant.quantity - quant.reserved_quantity", "def availability(self) -> list:\n availability = self._availability\n return availability", "def _product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):\n if not field_names:\n field_names = []\n if context is None:\n context = {}\n res = {}\n \n for product_loc in self.browse(cr, uid, ids):\n c = context.copy()\n c.update({ 'states': ('done',), 'what': ('in', 'out'), 'location': product_loc.location_id.id})\n stock = self.pool.get('product.product').get_product_available(cr, uid, [product_loc.product_id.id], context=c)\n res[product_loc.id] = stock.get(product_loc.product_id.id, 0.0)\n return res", "def stock_availability():\n\tdef update_reserved_qty(bin_data, updates):\n\t\tfor k, v in updates.items():\n\t\t\tif k in bin_data:\n\t\t\t\told_reserved = bin_data[k][\"reserved\"]\n\t\t\t\tnew_reserved = old_reserved + v\n\t\t\t\tbin_data[k][\"reserved\"] = new_reserved\n\t\treturn bin_data\n\n\ttry:\n\t\tstock_for_so = []\n\t\tquery = \"\"\"\n\t\t\tselect so.name, so.customer, soi.item_code, (soi.qty - soi.delivered_qty) as qty\n\t\t\tfrom `tabSales Order` so left join `tabSales Order Item` soi\n\t\t\ton so.name = soi.parent\n\t\t\twhere so.status not in ('Closed', 'Stopped') and so.docstatus = 1\n\t\t\tgroup by so.name, soi.item_code order by so.creation\n\t\t\"\"\"\n\t\tso_data = frappe.db.sql(query, as_dict=True)\n\n\t\t# formatting: sales_data => {\"sales_order\": [{\"item_code\": \"qty\"}]}\n\t\tsales_data = {}\n\t\tfor so in so_data:\n\t\t\tif so.get(\"name\") not in sales_data:\n\t\t\t\tsales_data[so.name] = [{so.item_code: so.qty}]\n\t\t\telse:\n\t\t\t\texisting = sales_data[so.name]\n\t\t\t\texisting.append({so.item_code:so.qty})\n\t\t\t\tsales_data[so.name] = existing\n\n\t\t# available stock\n\t\tbin_data = frappe.db.sql(\"\"\"select item_code, sum(actual_qty) as actual_qty\n\t\t\tfrom `tabBin` group by item_code\"\"\")\n\n\t\t# {\"item_code\": {\"bin_qty\", \"reserved\"}}\n\t\tbin_qty = { b[0]:{\"qty\": b[1], \"reserved\": 0} for b in bin_data if b[1] > 0}\n\n\t\t# check sales order wise availability\n\t\tfor so, items in sales_data.items():\n\t\t\tif not frappe.db.get_value(\"Sales Order\", so, \"stock_availability_mail\"):\n\t\t\t\titem_qty = {}\n\t\t\t\tis_stock_available = True\n\t\t\t\tfor item in items:\n\t\t\t\t\titem_code, qty = item.keys()[0], item.values()[0]\n\t\t\t\t\tif item_code in bin_qty:\n\t\t\t\t\t\tif qty <= bin_qty[item_code][\"qty\"] - bin_qty[item_code][\"reserved\"]:\n\t\t\t\t\t\t\titem_qty[item_code] = qty\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tis_stock_available = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tis_stock_available = False\n\t\t\t\t\t\tbreak\n\t\t\t\tif is_stock_available:\n\t\t\t\t\t# update_bit_qty_reserved\n\t\t\t\t\tbin_qty = update_reserved_qty(bin_qty, item_qty)\n\t\t\t\t\tstock_for_so.append(so)\n\t\tif len(stock_for_so):\n\t\t\tstock_availability_mail(stock_for_so)\n\texcept Exception as e:\n\t\tfrappe.log_error(message=frappe.get_traceback(), title=\"Stock availability Scheduler failed\")", "def getGearAvailableForPurchase(self):\n\t\turl = \"https://habitica.com/api/v3/user/inventory/buy\"\n\t\treturn(getUrl(url, self.credentials))", "def stocks(self):\n return self.quantity - self.reserved", "def list_availability_definition(self):\n return self._get(path='availability')", "def yoga_trackings():\n return analytics.select_rows(\n analytics.trackings_table(),\n 0,\n 1)", "def test_all_available(self):\n response = self.client.get(\n reverse('bookings', kwargs={'facility': 'g'}))\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(context[\"username\"], self.user)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)\n self.assertEqual(context[\"display_first_week\"], True)\n\n self.assertEqual(bookings[0].calendar_week,\n self.current_week.calendar_week)\n self.assertEqual(bookings[1].calendar_week,\n self.current_week.calendar_week + 1)\n self.assertEqual(bookings[2].calendar_week,\n self.current_week.calendar_week + 2)\n self.assertEqual(bookings[3].calendar_week,\n self.current_week.calendar_week + 3)\n\n for week in bookings:\n for row in week.rows:\n for block in row.blocks:\n self.assertEqual(type(block), BlockAvailable)", "def available_list(cls, num):\n return cls.objects.filter(status=0)[:num]", "def get_available_vehicles(self):\n return np.sum([self.env.acc[region][self.env.time] for region in self.env.region])", "def unavailable_ids(self):\n from django.db import connection\n from models import Order, Publishing\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n SELECT DISTINCT A.listing_id FROM\n (SELECT\n \"listings_publishing\".\"listing_id\",\n \"listings_listingset\".\"product_id\",\n \"listings_publishing\".\"available_units\" * \"listings_listingset\".\"quantity\"\n AS \"needed\"\n FROM \"listings_publishing\"\n JOIN \"listings_listingset\"\n ON \"listings_publishing\".\"listing_id\" = \"listings_listingset\".\"listing_id\"\n -- following where selects main publishings\n WHERE \"listings_publishing\".\"pub_date\" IN (\n SELECT MAX(\"A0\".\"pub_date\") AS \"max_date\"\n FROM \"listings_publishing\" AS \"A0\"\n WHERE NOT (\"A0\".\"status\" = %s )\n AND \"listings_publishing\".\"listing_id\" = \"A0\".\"listing_id\"\n AND \"listings_publishing\".\"store_id\" = \"A0\".\"store_id\"\n GROUP BY \"A0\".\"listing_id\", \"A0\".\"store_id\"\n )\n AND NOT EXISTS (\n SELECT *\n FROM \"listings_publishing\" AS \"B0\"\n WHERE \"B0\".\"status\" = %s\n AND \"listings_publishing\".\"listing_id\" = \"B0\".\"listing_id\"\n AND \"listings_publishing\".\"store_id\" = \"B0\".\"store_id\"\n )\n ) AS A\n JOIN\n (SELECT\n \"listings_listingset\".\"product_id\",\n COALESCE(\"warehouse_stock\".\"quantity\", 0) -\n SUM(\"listings_listingset\".\"quantity\" *\n COALESCE(\"listings_order\".\"quantity\", 0)) as \"available\"\n FROM \"listings_listingset\"\n JOIN \"listings_publishing\"\n ON \"listings_publishing\".\"listing_id\" = \"listings_listingset\".\"listing_id\"\n LEFT JOIN \"listings_order\"\n ON (\n \"listings_order\".\"publishing_id\" = \"listings_publishing\".\"id\" AND\n \"listings_order\".\"status\" = %s\n )\n LEFT JOIN \"warehouse_stock\"\n ON \"warehouse_stock\".\"product_id\" = \"listings_listingset\".\"product_id\"\n GROUP BY \"listings_listingset\".\"product_id\", \"warehouse_stock\".\"quantity\") AS B\n ON A.product_id = B.product_id\n WHERE A.needed > B.available\n \"\"\", [Publishing.ACTIVE_PUBLISHING, Publishing.ACTIVE_PUBLISHING, Order.ORDER_PENDING])\n\n res = cursor.fetchall()\n return [r[0] for r in res]", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"bug\", \"beverage\", \"second_person_plural\", \n \"cot_caught\", \"rain_sun\", \"crawfish\", \"halloween\",\n \"sandwich\", \"side_road\", \"shoes\", \"highway\", \"yard_sale\",\n \"rubbernecking\", \"frosting\", \"lawyer\", \"kitty_corner\",\n \"firefly\", \"verge\", \"brew_thru\", \"water_fountain\"]", "def get_product_available(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n\n location_obj = self.pool.get('stock.location')\n warehouse_obj = self.pool.get('stock.warehouse')\n shop_obj = self.pool.get('sale.shop')\n\n user_obj = self.pool.get('res.users').browse(cr, 1, uid)\n\n states = context.get('states',[])\n what = context.get('what',())\n if not ids:\n ids = self.search(cr, uid, [])\n res = {}.fromkeys(ids, 0.0)\n if not ids:\n return res\n\n if context.get('shop', False) and context['shop']:\n warehouse_id = shop_obj.read(cr, 1, int(context['shop']), ['warehouse_id'])['warehouse_id'][0]\n if warehouse_id:\n context['warehouse'] = warehouse_id\n\n if context.get('warehouse', False) and context['warehouse']:\n lot_id = warehouse_obj.read(cr, 1, int(context['warehouse']), ['lot_stock_id'])['lot_stock_id'][0]\n if lot_id:\n context['location'] = lot_id\n\n if context.get('location', False) and context['location']:\n if type(context['location']) == type(1):\n location_ids = [context['location']]\n elif type(context['location']) in (type(''), type(u'')):\n location_ids = location_obj.search(cr, 1, [('name','ilike',context['location'])], context=context)\n else:\n location_ids = context['location']\n else:\n location_ids = []\n #wids = warehouse_obj.search(cr, uid, [], context=context)\n #for w in warehouse_obj.browse(cr, uid, wids, context=context):\n # location_ids.append(w.lot_stock_id.id)\n lids = location_obj.search(cr, 1, [])\n #print(lids, 'todas os locais', user_obj.company_id.id)\n for lo in location_obj.browse(cr, 1, lids, context=context):\n #print(lo.id, lo.company_id, lo.company_ids, user_obj.company_id.id)\n if lo.company_id and user_obj.company_id.id == lo.company_id.id:\n location_ids.append(lo.id)\n else:\n for co in lo.company_ids:\n if user_obj.company_id.id == co.id:\n location_ids.append(lo.id)\n\n # build the list of ids of children of the location given by id\n if context.get('compute_child', True):\n if len(location_ids) == 0:\n raise osv.except_osv(u'Atenção!', u'Não há local de estoque definido para a empresa/unidade!')\n\n child_location_ids = location_obj.search(cr, 1, [('location_id', 'child_of', location_ids)])\n location_ids = child_location_ids or location_ids\n\n # this will be a dictionary of the UoM resources we need for conversion purposes, by UoM id\n uoms_o = {}\n # this will be a dictionary of the product UoM by product id\n product2uom = {}\n for product in self.browse(cr, 1, ids, context=context):\n product2uom[product.id] = product.uom_id.id\n uoms_o[product.uom_id.id] = product.uom_id\n\n results = []\n results2 = []\n\n from_date = context.get('from_date',False)\n to_date = context.get('to_date',False)\n date_str = False\n date_values = False\n where = [tuple(location_ids),tuple(location_ids),tuple(ids),tuple(states)]\n if from_date and to_date:\n date_str = \"date>=%s and date<=%s\"\n where.append(tuple([from_date]))\n where.append(tuple([to_date]))\n elif from_date:\n date_str = \"date>=%s\"\n date_values = [from_date]\n elif to_date:\n date_str = \"date<=%s\"\n date_values = [to_date]\n if date_values:\n where.append(tuple(date_values))\n\n prodlot_id = context.get('prodlot_id', False)\n prodlot_clause = ''\n if prodlot_id:\n prodlot_clause = ' and prodlot_id = %s '\n where += [prodlot_id]\n elif 'prodlot_id' in context and not prodlot_id:\n prodlot_clause = 'and prodlot_id is null '\n\n # TODO: perhaps merge in one query.\n if 'in' in what:\n # all moves from a location out of the set to a location in the set\n cr.execute(\n 'select sum(product_qty), product_id, product_uom '\\\n 'from stock_move '\\\n 'where location_id NOT IN %s '\\\n 'and location_dest_id IN %s '\\\n 'and product_id IN %s '\\\n 'and state IN %s ' + (date_str and 'and '+date_str+' ' or '') +' '\\\n + prodlot_clause +\n 'group by product_id,product_uom',tuple(where))\n results = cr.fetchall()\n if 'out' in what:\n # all moves from a location in the set to a location out of the set\n cr.execute(\n 'select sum(product_qty), product_id, product_uom '\\\n 'from stock_move '\\\n 'where location_id IN %s '\\\n 'and location_dest_id NOT IN %s '\\\n 'and product_id IN %s '\\\n 'and state in %s ' + (date_str and 'and '+date_str+' ' or '') + ' '\\\n + prodlot_clause +\n 'group by product_id,product_uom',tuple(where))\n results2 = cr.fetchall()\n\n # Get the missing UoM resources\n uom_obj = self.pool.get('product.uom')\n uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2)\n if context.get('uom', False):\n uoms += [context['uom']]\n uoms = filter(lambda x: x not in uoms_o.keys(), uoms)\n if uoms:\n uoms = uom_obj.browse(cr, 1, list(set(uoms)), context=context)\n for o in uoms:\n uoms_o[o.id] = o\n\n #TOCHECK: before change uom of product, stock move line are in old uom.\n context.update({'raise-exception': False})\n # Count the incoming quantities\n for amount, prod_id, prod_uom in results:\n amount = uom_obj._compute_qty_obj(cr, 1, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] += amount\n # Count the outgoing quantities\n for amount, prod_id, prod_uom in results2:\n amount = uom_obj._compute_qty_obj(cr, 1, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] -= amount\n\n for prod_id in res:\n if isinstance(res[prod_id], D):\n res[prod_id] = float(res[prod_id])\n return res", "def available(self):\n return self.stock_level - self.in_order_book", "def filter_available(self, queryset, name, value):\n if str2bool(value):\n # The 'quantity' field is greater than the calculated 'allocated' field\n # Note that the item must also be \"in stock\"\n return queryset.filter(StockItem.IN_STOCK_FILTER).filter(Q(quantity__gt=F('allocated')))\n else:\n # The 'quantity' field is less than (or equal to) the calculated 'allocated' field\n return queryset.filter(Q(quantity__lte=F('allocated')))", "def quantities_by_good_id(self) -> GoodHoldings:\n assert self._quantities_by_good_id is not None, \"GoodHoldings not set!\"\n return copy.copy(self._quantities_by_good_id)", "def required_slots(tracker):\n print(tracker.get_slot('order_number'))\n return [\"order_number\"]", "def available_spots_for(self, tile):\n return [spot for spot in self.available_spots() if self.tile_fits(spot, tile)]", "def available_items(self):\n return [item for item in self.all_items.values() if self.is_available(item)]", "def available_stock_level(cls, sku):\n stock_level_info = cls.stock_level_info(sku)\n return stock_level_info.available", "def getOccupied(self):\n occupiedList = []\n for spot in self.parkingSpots:\n if spot.status == 'occupied':\n occupiedList.append(spot)\n return occupiedList", "def stock_report(request, *args, **kwargs):\n checkout_locations = Location.objects.checkout().order_by('name').notRetiredOrAdmin()\n checkin_locations = Location.objects.checkin().order_by('name').notRetiredOrAdmin()\n washlocation = Location.objects.checkin().get(retired=False, washing_location=True)\n hqLocation = Location.objects.checkin().get(retired=False, headquarters=True)\n\n neighborhoods = list(x.__str__() for x in Neighborhood.objects.all())\n checkout_data = []\n checkin_data = []\n\n for loc in checkout_locations:\n if \"Testing Location\" not in loc.name and \"Test Location\" not in loc.name:\n checkout_data.append(dict(name=loc.name, count=loc.get_estimated_stock(), avg_weekly_usage=loc.avg_weekly_usage_over_past_4_weeks, address=loc.address, latitude=loc.latitude, longitude=loc.longitude, neighborhood=loc.neighborhood, minimum_boxes=loc.minimum_boxes, maximum_boxes=loc.maximum_boxes, error_avg_difference=loc.error_avg_difference))\n\n for loc in checkin_locations:\n if \"Testing Location\" not in loc.name and \"Test Location\" not in loc.name:\n checkin_data.append(dict(name=loc.name, count=loc.get_estimated_stock(), avg_weekly_usage=loc.avg_weekly_usage_over_past_4_weeks, address=loc.address, latitude=loc.latitude, longitude=loc.longitude, neighborhood=loc.neighborhood))\n\n def get_estimated_at_checkout():\n count = sum([l.get_estimated_stock() for l in\n Location.objects.checkout()])\n return count\n\n def get_estimated_at_checkin():\n count = sum([l.get_estimated_stock() for l in\n checkin_locations])\n return count\n\n def get_estimated_checkedout():\n count = sum([s.boxes_currently_out for s in\n Subscription.objects.active()])\n return count\n\n def get_estimated_at_wash():\n count = washlocation.get_estimated_stock()\n return count\n\n def get_estimated_at_hq():\n count = hqLocation.get_estimated_stock()\n return count\n\n cycle_data = {\n \"labels\": [\n \"Clean at restaurants\",\n \"Checked out\",\n \"Dirty at return stations\", #this should expand to 3 categories, two of them is below\n \"Dirty and being washed\",\n \"Clean at G2G HQ\",\n ],\n \"count\": [\n get_estimated_at_checkout(),\n get_estimated_checkedout(),\n get_estimated_at_checkin(),\n get_estimated_at_wash(),\n get_estimated_at_hq(),\n ],\n }\n\n total = get_estimated_at_checkout() + get_estimated_checkedout() + get_estimated_at_checkin() + get_estimated_at_wash() + get_estimated_at_hq()\n\n return render(\n request, \"admin/stock_report.html\",\n {\n \"data_json\":json.dumps(dict(\n neighborhoods=neighborhoods,\n checkin=checkin_data, \n checkout=checkout_data,\n total=total,\n clean=get_estimated_at_hq(),\n wash=get_estimated_at_wash(),\n cycle=cycle_data\n ))}\n )", "def inventoryByYear(self):\n\n\t\tcurrentYear = date.today().year\n\t\tinventory = {}\n\n\t\tfor bottle in self.bottles:\n\t\t\tif bottle.consumption == None:\n\t\t\t\tholdYear = max(currentYear, bottle.hold_until)\n\t\t\t\tif holdYear not in inventory:\n\t\t\t\t\tinventory[holdYear] = 1\n\n\t\t\t\telse:\n\t\t\t\t\tinventory[holdYear] += 1\n\n\t\treturn inventory", "def required_slots(tracker: Tracker) -> List[Text]:\n print(\"required_slots(tracker: Tracker)\")\n return [\"name\",\"roomcount\",\"roomtype\"]", "def yoga_habits_trackings():\n return analytics.select_rows(\n analytics.habits_trackings_table(),\n 0,\n 1)" ]
[ "0.59363973", "0.5828805", "0.54700196", "0.5159181", "0.5094753", "0.5089247", "0.50791717", "0.5024315", "0.48572624", "0.48141047", "0.47759804", "0.475829", "0.47470802", "0.47466728", "0.47046915", "0.4694997", "0.46340385", "0.45999828", "0.4578999", "0.45616272", "0.45428926", "0.45280057", "0.45236886", "0.45137665", "0.44924414", "0.4483456", "0.44629532", "0.44566163", "0.445202", "0.44417945" ]
0.86029047
0
minimax is a recursive function that uses the current node to search children nodes until a leaf is found. The function keeps track of the alpha and beta variables as it traverses through the tree. When a leaf node is found the leaf_nodes variable is updated to keep track of the number of leafs examined.
def minimax(node, isMaximizingPlayer, alpha, beta): if node.name.isdigit(): # leaf nodes contain numbers not letters global leaf_nodes leaf_nodes += 1 return int(node.name) if isMaximizingPlayer: # max node bestVal = -math.inf for child in node.children: value = minimax(child, False, alpha, beta) bestVal = max(bestVal, value) alpha = max(alpha, bestVal) if beta <= alpha: break return bestVal else: # min node bestVal = math.inf for child in node.children: value = minimax(child, True, alpha, beta) bestVal = min(bestVal, value) beta = min(beta, bestVal) if beta <= alpha: break return bestVal
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minimax(node: GameState, depth: int, alpha: int, beta: int, maximizingPlayer: bool):\n\n # static evaluation of a GameState if at the bottom of the tree\n if depth == 0:\n node.buildGameStateFromID()\n if node.possible:\n node.compute_score()\n return node.score\n\n # recursively computing the best move\n if maximizingPlayer:\n node.score = -1000\n for child in node.children:\n curVal = minimax(child, depth-1, alpha, beta, False)\n node.score = max(node.score, curVal)\n alpha = max(alpha, curVal)\n if beta <= alpha:\n break\n\n return node.score\n\n else:\n node.score = 1000\n for child in node.children:\n curVal = minimax(child, depth-1, alpha, beta, True)\n node.score = min(node.score, curVal)\n beta = min(beta, curVal)\n if beta <= alpha:\n break\n\n return node.score", "def minimax_alpha_beta(node,alpha,beta,depth=0):\n node.display(2,\" \"*depth,\"minimax_alpha_beta(\",node.name,\", \",alpha, \", \", beta,\")\")\n best=None # only used if it will be pruned\n if node.isLeaf():\n node.display(2,\" \"*depth,\"returning leaf value\",node.evaluate())\n return node.evaluate(),None\n elif node.isMax:\n for C in node.children():\n score,path = minimax_alpha_beta(C,alpha,beta,depth+1)\n if score >= beta: # beta pruning\n node.display(2,\" \"*depth,\"pruned due to beta=\",beta,\"C=\",C.name)\n return score, None \n if score > alpha:\n alpha = score\n best = C.name, path\n node.display(2,\" \"*depth,\"returning max alpha\",alpha,\"best\",best)\n return alpha,best\n else:\n for C in node.children():\n score,path = minimax_alpha_beta(C,alpha,beta,depth+1)\n if score <= alpha: # alpha pruning\n node.display(2,\" \"*depth,\"pruned due to alpha=\",alpha,\"C=\",C.name)\n return score, None\n if score < beta:\n beta=score\n best = C.name,path\n node.display(2,\" \"*depth,\"returning min beta\",beta,\"best=\",best)\n return beta,best", "def minimax(self, board, depth, self_color, alpha, beta):\r\n\r\n # Reached terminal node, evaluate and pass up the tree.\r\n # Terminal nodes are either those at max depth, or the last ones we have time for.\r\n if depth == self.minimax_max_depth or (time.time() - self.start_time > self.time_limit and depth != 0):\r\n return self.evaluate(board)\r\n\r\n # Reached transient node, keep searching.\r\n else:\r\n\r\n possible_moves = self.find_possible_moves(board, self_color)\r\n\r\n if possible_moves:\r\n\r\n # Self makes a move\r\n if depth % 2 == 0:\r\n\r\n children_nodes = {}\r\n value = -10000000\r\n\r\n for move in possible_moves:\r\n\r\n updated_board = self.update_board(board, self_color, move)\r\n children_nodes[move] = self.minimax(updated_board, depth + 1, self.get_opponent_color(self_color), alpha, beta)\r\n\r\n temp_value = max(children_nodes[move], alpha)\r\n\r\n # Alpha-beta pruning\r\n if temp_value > value:\r\n value = temp_value\r\n if temp_value >= beta:\r\n break\r\n if temp_value > alpha:\r\n alpha = temp_value\r\n\r\n if depth == 0:\r\n # Tree has been searched, return all possible moves with their respective worth\r\n return children_nodes\r\n\r\n # Else, just pass current node's worth up the tree\r\n return value\r\n\r\n # Opponent makes a move\r\n else:\r\n\r\n children_nodes = {}\r\n value = 10000000\r\n\r\n for move in possible_moves:\r\n\r\n updated_board = self.update_board(board, self_color, move)\r\n children_nodes[move] = self.minimax(updated_board, depth + 1, self.get_opponent_color(self_color), alpha, beta)\r\n\r\n temp_value = min(children_nodes[move], beta)\r\n\r\n # Alpha-beta pruning\r\n if temp_value < value:\r\n value = temp_value\r\n if temp_value <= alpha:\r\n break\r\n if temp_value < beta:\r\n beta = temp_value\r\n\r\n # Else, just pass current node's worth up the tree\r\n return value\r\n\r\n # Return something even if all hell freezes over.\r\n return 0", "def alphabeta(node, depth, alpha, beta, maximizingPlayer, board):\n if depth == 0:\n return node\n\n if maximizingPlayer: # This is us\n best_node = float(\"-inf\"), []\n\n # Get child nodes and put them in the proper form\n possible_moves = get_possible_moves_white(node, board)\n child_nodes = []\n for set_moves in possible_moves:\n child_nodes.append([get_total_score(set_moves, board), set_moves])\n\n if len(child_nodes) == 0:\n return [float(\"-inf\"), node[1]] # We get put check mate (crash if node[1] == [])\n\n best_nodes_list = []\n i = randrange(len(child_nodes))\n for child in child_nodes[i:] + child_nodes[:i]:\n rec_child = alphabeta(child, depth - 1, alpha, beta, False, board)\n if best_node[0] < rec_child[0]:\n best_nodes_list = [rec_child]\n best_node = rec_child\n alpha = max(alpha, best_node[0])\n if alpha >= beta:\n if alpha == float(\"inf\"):\n return best_node\n return [float(\"inf\"), []] # Pruning\n elif best_node[0] == rec_child[0]:\n best_nodes_list.append(rec_child)\n return random.choice(best_nodes_list)\n else: # minimizingPlayer\n worst_node = float(\"inf\"), []\n\n # Get child nodes and put them in the proper form\n possible_moves = get_possible_moves_black(node, board)\n child_nodes = []\n for set_moves in possible_moves:\n child_nodes.append([get_total_score(set_moves, board), set_moves])\n\n if len(child_nodes) == 0:\n if is_checkmate(node[1], board):\n return [float(\"inf\"), node[1]] # Check-mate\n else:\n return [float(\"-inf\"), node[1]] # Draw\n\n worst_nodes_list = []\n for child in child_nodes:\n rec_child = alphabeta(child, depth - 1, alpha, beta, True, board)\n if worst_node[0] > rec_child[0]:\n worst_nodes_list = [rec_child]\n worst_node = rec_child\n beta = min(beta, worst_node[0])\n if alpha >= beta:\n return [float(\"-inf\"), []] # Pruning\n elif worst_node[0] == rec_child[0]:\n worst_nodes_list.append(rec_child)\n return random.choice(worst_nodes_list)", "def minimax_ab(\r\n board: np.ndarray, alpha: np.int, beta: np.int, player: BoardPiece, depth: np.int, max_player: bool, heuristic\r\n) -> np.array:\r\n from agents.connectn.common import apply_player_action\r\n from agents.connectn.common import pretty_print_board\r\n\r\n # other_player variable is used to alternate player pieces as depth increases\r\n if player == 1:\r\n other_player = 2\r\n else:\r\n other_player = 1\r\n\r\n # should define which are columns are free here to reduce computation time\r\n node_num = np.shape(board)[1] # number of moves initially allowed\r\n\r\n # pretty_print_board(board)\r\n\r\n if depth == 0: # if a terminal node, then use the heuristic\r\n return heuristic(board, player)\r\n\r\n if max_player: # maximizing player level\r\n node_value = np.full(node_num, int(-1e10)) # empty array for node values, negative inf\r\n for node in range(node_num): # for each child node, maximize\r\n board_copy = apply_player_action(board, node, player, copy=True)\r\n comparison = np.min(minimax_ab(board_copy, alpha, beta, player, depth-1, False, heuristic))\r\n if comparison > node_value[node]:\r\n node_value[node] = comparison\r\n # alpha-beta pruning:\r\n if comparison >= beta:\r\n return node_value\r\n if comparison > alpha:\r\n alpha = comparison\r\n return node_value\r\n\r\n else: # minimizing player level\r\n node_value = np.full(node_num, int(1e10)) # empty array for node values, positive inf\r\n for node in range(node_num): # for each child node, minimize\r\n board_copy = apply_player_action(board, node, other_player, copy=True)\r\n comparison = np.max(minimax_ab(board_copy, alpha, beta, other_player, depth-1, True, heuristic))\r\n if comparison < node_value[node]:\r\n node_value[node] = comparison\r\n # alpha-beta pruning\r\n if comparison <= alpha:\r\n return node_value\r\n if comparison < beta:\r\n beta = comparison\r\n return node_value", "def alphabeta(self, game, depth, alpha=float(\"-inf\"), beta=float(\"inf\"), maximizing_player=True):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise Timeout()\n\n \"\"\n legal_moves = game.get_legal_moves()\n\n best_move = (-1, -1)\n if maximizing_player:\n score = float(\"-inf\")\n else:\n score = float(\"inf\")\n\n #At bottom of minimax tree\n if depth == 1:\n for each_move in legal_moves:\n new_game = game.forecast_move(each_move)\n new_score = self.score(new_game, self)\n if maximizing_player:\n #Pruning of max nodes\n if new_score >= beta:\n return beta, each_move\n if new_score > score:\n score = new_score\n best_move = each_move\n else:\n #Pruning of min nodes\n if new_score <= alpha:\n return alpha, each_move\n if new_score < score:\n score = new_score\n best_move = each_move\n\n return score, best_move\n\n #Not at bottom of minimax tree\n for each_move in legal_moves:\n #Return each_move that gets the highest score\n new_game = game.forecast_move(each_move)\n new_score , new_move = self.alphabeta(new_game, depth-1, alpha, beta, not maximizing_player)\n if maximizing_player:\n #Pruning of max nodes\n if new_score >= beta:\n return beta, each_move\n elif new_score > alpha:\n #Update alpha for pruning in next for iteration\n alpha = new_score\n\n if new_score > score:\n score = new_score\n best_move = each_move\n else:\n #Pruning of min nodes\n if new_score <= alpha:\n return alpha, each_move\n elif new_score < beta:\n #Update beta for pruning in next for iteration\n beta = new_score\n\n if new_score < score:\n score = new_score\n best_move = each_move\n\n return score, best_move", "def minimax(self, state, is_max, depth = 0, alpha=float(\"-inf\"), beta=float(\"inf\")):\n # Terminate \n if self.terminate(depth, state):\n return None, Utility.utility_function(state) \n \n # Recursive\n possible_moves = state.current_player_possible_moves()\n return self.search(is_max, possible_moves, state, depth, alpha, beta)", "def alphabeta_value(self, game, depth, alpha=-math.inf, beta=math.inf):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n # If the current node is a terminal node or we've reached the depth limit,\n # return the score of the current node.\n if depth == 0 or game.is_winner(self) or game.is_loser(self):\n return self.score(game, self)\n\n moves = game.get_legal_moves(game.active_player)\n scores = []\n\n # If this is a max node, search for the highest score.\n if game.active_player == self:\n\n for move in moves:\n score = self.alphabeta_value(game.forecast_move(move), depth - 1, alpha, beta)\n scores.append(score)\n # In a max node, the parent min node won't use a score\n # greater than beta, so there's no point in looking for\n # a larger one.\n if score >= beta:\n return score\n # Update alpha so that next child I search can prune\n # itself if it knows it's going to return a lower value.\n if score > alpha:\n alpha = score\n\n return max(scores)\n # If this is a min node, search for the lowest score.\n else:\n\n for move in moves:\n score = self.alphabeta_value(game.forecast_move(move), depth - 1, alpha, beta)\n scores.append(score)\n # In a min node, the parent max node won't use a score\n # lower than alpha, so there's no point in looking for\n # a smaller one.\n if score <= alpha:\n return score\n # Update beta so that the next child I search can prune\n # itself if it knows it will return a higher value.\n if score < beta:\n beta = score\n\n return min(scores)", "def minimax(self, game_node: ConnectFourGame, search_depth: int, alpha: int, beta: int):\n\n # Returns heuristic value when game reaches a terminal state\n if search_depth == 0 or game_node.winner_id is not None or game_node.grid.is_grid_full():\n return self.heuristic_function(game_node, search_depth)\n\n # AI player wants to maximize gains while opponent wants to minimize it\n maximizing_player = game_node.current_player == self.ai_player_id\n value = -inf if maximizing_player else inf\n\n # Maximizing player will want to maximize values, minimizing player will want to do the opposite\n if maximizing_player:\n value = -inf\n for _, child_node in self._get_game_node_children(game_node):\n value = max(value, self.minimax(child_node, search_depth - 1, alpha, beta))\n alpha = max(alpha, value)\n if alpha >= beta:\n break # Beta cutoff\n else:\n value = inf\n for _, child_node in self._get_game_node_children(game_node):\n value = min(value, self.minimax(child_node, search_depth - 1, alpha, beta))\n beta = min(beta, value)\n if beta <= alpha:\n break # Alpha cutoff\n\n return value", "def minimax(board: bytearray,\n depth: int, # depth of current plies (0 = deepest)\n alpha: int, \n beta: int, \n max_player: bool, # is this maximzing player?\n at_top: bool=False, # are we the top call to this?\n ):\n \n # base cases:\n # - Player will win w/this board (since AI loses, return massive negative)\n # - AI will win w/this board (return massive positive)\n # - reached the depth of recursion (score this leaf for AI)\n # - board is full (no score for this leaf)\n\n won_by: int = 0\n for c1, c2, c3, c4 in _WINDOWS:\n if board[c1] and (board[c1] == board[c2] == board[c3] == board[c4]):\n won_by = board[c1]\n break\n \n if won_by == _PLAYER:\n # subtract depth so we delay loss as much as possible \n return -1000 - depth \n if won_by == _AI:\n # add depth so faster wins are more attractive\n return 1000 + depth \n if depth == 0:\n score = score_position(board, _AI) \n return score\n\n # (these are arranged _CENTER-out to help make a/b pruning most efficient)\n valid_locations: list[int] = [\n col % 7 for col in (38,37,39,36,40,35,41) if board[col] == _EMPTY]\n\n if not valid_locations:\n return 0\n \n # minimax strategy: alternate if AI is looking for best move (maximizing)\n # or the most harmful response from the player (minimizing).\n value: int\n new_col: int\n row: int\n \n if max_player:\n value = _MINUS_INFINITY\n new_col = random.choice(valid_locations)\n for col in valid_locations: \n for r in (0, 1, 2, 3, 4, 5):\n if board[7 * r + col] == 0:\n row = r\n break\n b_copy = bytearray(board)\n b_copy[row * 7 + col] = _AI\n new_score: int = minimax(b_copy, depth-1, alpha, beta, False)\n if new_score > value:\n value = new_score\n new_col = col\n alpha = max(alpha, value)\n if alpha >= beta:\n break\n\n else:\n value = _INFINITY\n for col in valid_locations:\n for r in (0, 1, 2, 3, 4, 5):\n if board[7 * r + col] == 0:\n row = r\n break\n b_copy = bytearray(board)\n b_copy[row * 7 + col] = _PLAYER\n new_score: int = minimax(b_copy, depth-1, alpha, beta, True)\n if new_score < value:\n value = new_score\n beta = min(beta, value)\n if alpha >= beta:\n break\n\n # For speed, return simple score except when exiting first recursive call,\n # and return the chosen column & the score in that case.\n if not at_top:\n return value\n else:\n return new_col % 7, value", "def minimax(board, depth, alpha, beta, maximizingPlayer):\n valid_locations = get_valid_locations(board)\n\n if is_terminal_node(board) or depth == 0:\n if is_terminal_node(board):\n if winning_move(board, AI_PIECE):\n return None, 10000000\n elif winning_move(board, PLAYER_PIECE):\n return None, -10000000\n else:\n return None, 0\n\n else:\n return None, score_position(board, AI_PIECE)\n\n if maximizingPlayer:\n value = -math.inf\n column = random.choice(valid_locations)\n\n for col in valid_locations:\n row = get_next_open_row(board, col)\n bord_p = copy.deepcopy(board)\n drop_piece(bord_p, row, col, AI_PIECE)\n new_score = minimax(bord_p, depth - 1, alpha, beta, False)[1]\n\n if new_score > value:\n value = new_score\n column = col\n\n alpha = max(alpha, value)\n if alpha >= beta:\n break\n\n return column, value\n\n else:\n value = math.inf\n\n for col in valid_locations:\n row = get_next_open_row(board, col)\n bord_p = copy.deepcopy(board)\n drop_piece(bord_p, row, col, PLAYER_PIECE)\n new_score = minimax(bord_p, depth - 1, alpha, beta, True)[1]\n\n if new_score < value:\n value = new_score\n column = col\n\n beta = min(beta, value)\n if alpha >= beta:\n break\n\n return column, value", "def test_minimax(self):\n h, w = 7, 7 # board size\n starting_location = (2, 3)\n adversary_location = (0, 0) # top left corner\n iterative_search = False\n method = \"minimax\"\n\n # The agent under test starts at position (2, 3) on the board, which\n # gives eight (8) possible legal moves [(0, 2), (0, 4), (1, 1), (1, 5),\n # (3, 1), (3, 5), (4, 2), (4, 4)]. The search function will pick one of\n # those moves based on the estimated score for each branch. The value\n # only changes on odd depths because even depths end on when the\n # adversary has initiative.\n value_table = [[0] * w for _ in range(h)]\n value_table[1][5] = 1 # depth 1 & 2\n value_table[4][3] = 2 # depth 3 & 4\n value_table[6][6] = 3 # depth 5\n heuristic = makeEvalTable(value_table)\n\n # These moves are the branches that will lead to the cells in the value\n # table for the search depths.\n expected_moves = [set([(1, 5)]),\n set([(3, 1), (3, 5)]),\n set([(3, 5), (4, 2)])]\n\n # Expected number of node expansions during search\n counts = [(8, 8), (24, 10), (92, 27), (418, 32), (1650, 43)]\n\n # Test fixed-depth search; note that odd depths mean that the searching\n # player (student agent) has the last move, while even depths mean that\n # the adversary has the last move before calling the heuristic\n # evaluation function.\n for idx in range(5):\n test_depth = idx + 1\n agentUT, board = self.initAUT(test_depth, heuristic,\n iterative_search, method,\n loc1=starting_location,\n loc2=adversary_location)\n\n # disable search timeout by returning a constant value\n agentUT.time_left = lambda: 1e3\n _, move = agentUT.minimax(board, test_depth)\n\n num_explored_valid = board.counts[0] == counts[idx][0]\n num_unique_valid = board.counts[1] == counts[idx][1]\n\n self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(\n method, test_depth, counts[idx][0], board.counts[0]))\n\n self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(\n method, test_depth, counts[idx][1], board.counts[1]))\n\n self.assertIn(move, expected_moves[idx // 2], WRONG_MOVE.format(\n method, test_depth, expected_moves[idx // 2], move))", "def minimax(self, depth: int, alpha: float, beta: float, maximizing_player: bool) -> float:\r\n if self.check_if_win('x' if maximizing_player is True else 'o'):\r\n return -10 if maximizing_player else 10\r\n if self.full_board():\r\n return 1\r\n if depth == 0:\r\n return 0\r\n\r\n available_moves = self.check_for_moves()\r\n\r\n if maximizing_player:\r\n max_eval = -INFINITY\r\n for move in available_moves:\r\n self.tags[move[0]][move[1]] = 'o'\r\n evaluation = self.minimax(depth - 1, alpha, beta, False)\r\n self.tags[move[0]][move[1]] = None\r\n max_eval = max(max_eval, evaluation)\r\n alpha = max(alpha, evaluation)\r\n if beta <= alpha:\r\n break\r\n return max_eval\r\n\r\n else:\r\n min_eval = INFINITY\r\n for move in available_moves:\r\n self.tags[move[0]][move[1]] = 'x'\r\n evaluation = self.minimax(depth - 1, alpha, beta, True)\r\n self.tags[move[0]][move[1]] = None\r\n min_eval = min(min_eval, evaluation)\r\n beta = min(beta, evaluation)\r\n if beta <= alpha:\r\n break\r\n return min_eval", "def alphabeta(self, game, depth, alpha=float(\"-inf\"), beta=float(\"inf\"), maximizing_player=True):\n #logging.debug(\"alphabeta(%d, %f, %f, %s)\", depth, alpha, beta, maximizing_player)\n\n if self.time_left() < self.TIMER_THRESHOLD:\n raise Timeout()\n\n if depth <= 0: # last row to search so return score of this board\n score = self.score(game, self)\n return score,(-1,-1)\n \n # Otherwise search the next layer\n legal_moves = game.get_legal_moves()\n\n # Check for some legal moves - if none return score of this board\n if len(legal_moves) == 0:\n return self.score(game, self),(-1,-1)\n\n # Perform max layer search \n if maximizing_player:\n value = -inf\n best_move_so_far = (-1,-1)\n for m in legal_moves:\n logging.debug(\" Max layer - trying this move: %s\", str(m))\n this_value, _ = self.alphabeta(game.forecast_move(m), depth-1, alpha, beta, not maximizing_player)\n if this_value > value:\n value = this_value\n best_move_so_far = m\n if value >= beta:\n return value,m\n alpha = max(alpha, value)\n return value, best_move_so_far\n \n # Perform min layer search \n else:\n value = inf\n best_move_so_far = (-1,-1)\n for m in legal_moves:\n logging.debug(\" Min layer - trying this move: %s\", str(m))\n this_value, _ = self.alphabeta(game.forecast_move(m), depth-1, alpha, beta, not maximizing_player)\n if this_value < value:\n value = this_value\n best_move_so_far = m\n if value <= alpha:\n return value,m\n beta = min(beta, value)\n return value, m", "def alphabeta(self, game, depth, alpha=float(\"-inf\"), beta=float(\"inf\"), maximizing_Player = True):\n def max_value(self, game, depth, alpha, beta):\n \"\"\"This is helper function for alpha-beta prunnig on minimax\n Min_value (self, game, depth, alpha, beta)\n\n Parameters:\n game: game state\n depth: search depth\n alpha: search upper limit\n beta: search lower limit\n\n Find maximum score of each game state corresponding to its legal moves\n Set new alpha (search upper limit) if find score higher than current limit\n Return score of that state when search complete.\n \"\"\"\n \n # Timeout Check\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n # Get legal moves\n valid_moves = game.get_legal_moves()\n # Best possible score -> initiated at inf, the highest score possible\n best_value = float(\"-inf\")\n \n # Terminal State:\n # When search reaches search limit or no legal moves left\n # Return score of terminal state\n if (depth == 0) or (not valid_moves):\n return self.score(game, self)\n \n # Search each move in legal moves\n for move in valid_moves:\n\n # Update best possible value with current best or search value \n best_value = max(best_value, min_value(self, game.forecast_move(move), depth-1, alpha, beta))\n \n # Update beta when best bossible value is equal or higher than beta\n if (best_value >= beta):\n return best_value\n\n # Update alpha if best possible value is higher than alpha\n alpha = max(best_value, alpha)\n \n # Return best value (in this case max value) \n return best_value\n \n def min_value(self, game, depth, alpha, beta):\n \"\"\"This is helper function for alpha-beta prunnig on minimax\n Min_value (self, game, depth, alpha, beta)\n\n Parameters:\n game: game state\n depth: search depth\n alpha: search upper limit\n beta: search lower limit\n\n Find minimum score of each game state corresponding to its legal moves\n Set new beta (search lower limit) if find score lower than current limit\n Return score of that state when search complete.\n \"\"\"\n \n # Timeout Check\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n \n # Get legal moves\n valid_moves = game.get_legal_moves()\n # Best possible score -> initiated at inf, the highest score possible\n best_value = float(\"inf\")\n \n # Terminal State:\n # When search reaches search limit or no legal moves left\n # Return score of terminal state\n if (depth == 0) or (not valid_moves):\n return self.score(game, self)\n \n # Search each move in legal moves\n for move in valid_moves:\n \n # Update best possible value with current best or search value \n best_value = min(best_value, max_value(self, game.forecast_move(move), depth-1, alpha, beta))\n \n # Update beta when best bossible value is equal or lower than alpha\n if (best_value <= alpha):\n return best_value\n \n # Update alpha if best possible value is lower than beta\n beta = min(best_value, beta)\n \n return best_value\n \n # Timeout Check\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n # Main MiniMax Function\n # Get legal moves\n valid_moves = game.get_legal_moves()\n\n # Best possible move -> initiated at (-1,-1)\n # Best possible score -> initiated at -inf, the lowest score possible\n best_score = float(\"-inf\")\n best_move = (-1, -1)\n\n # Terminal State:\n # When no legal moves left return (-1, -1) move to forfeit\n if (depth == 0) or (not valid_moves):\n return (-1, -1)\n \n # Search best move from each move in legal moves\n # Using minimax by first call min_value (helper function)\n # While searching, if any move return better score than current best\n # core, set that move and corresponding score as new target also set new upper limit with maximum score so far\n # Search ends when score is higher than beta\n for move in valid_moves:\n score = min_value(self, game.forecast_move(move), depth -1, alpha, beta)\n if (score > best_score):\n best_score = score\n best_move = move\n alpha = max(alpha, score)\n if best_score >= beta:\n return best_move\n\n return best_move", "def __mini_max_ab(self,\n board: Board,\n depth: int,\n is_max: bool,\n alpha: int,\n beta: int,\n states: List[Board]\n ) -> Tuple[int, Board]:\n self.nodes_count += 1\n if depth == 0:\n return self.__moves_available(board), states[0]\n if self.get_num_of_moves(board, self.opponent_color) == 0:\n return 9999, states[0]\n all_moves = self.get_all_moves(board, self.player_color)\n\n if is_max:\n best = (-9999, board)\n for move in all_moves:\n next_state = self.__mini_max_ab(move, depth - 1, False, alpha, beta, states + [move])\n best = max(best, next_state, key=lambda x: x[0])\n alpha = max(alpha, best[0])\n if beta <= alpha:\n self.pruning_count += 1\n break\n return best\n else:\n best = (9999, board)\n for move in all_moves:\n next_state = self.__mini_max_ab(move, depth - 1, True, alpha, beta, states + [move])\n best = min(best, next_state, key=lambda x: x[0])\n beta = min(beta, best[0])\n if beta <= alpha:\n self.pruning_count += 1\n break\n return best", "def min_value(self, game, depth, alpha, beta):\n if self.time_left() < self.TIMER_THRESHOLD: # Timeout check\n raise SearchTimeout()\n\n if game.is_loser(self) or game.is_winner(self) or depth == 0: # Terminal test, checks base cases\n return self.score(game,self) # returns the score, UTILITY of the current state\n\n legal_moves = game.get_legal_moves() # obtain all the available moves on the board\n best_score = math.inf # abstraction assignment of infinity\n\n for m in legal_moves: # iterate through available moves - ACTIONS available to the state\n new_state = game.forecast_move(m)\n # for each move - ACTION, create the outcome of that move - RESULT of each ACTION resulting in a new state\n score = self.max_value(new_state, depth - 1, alpha, beta) # recursive call to max - using new state, alpha and beta\n best_score = min(best_score, score) # calculate min between best_score and score\n if best_score <= alpha: # check if best score is less than or equal to alpha\n return best_score # return best score\n beta = min(beta, best_score) # calculate min between alpha and best_score\n return best_score # propagate min and return its value", "def alphabeta(self, game, depth, alpha=float(\"-inf\"), beta=float(\"inf\"), maximizing_player=True, tab='\\t'):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise Timeout()\n\n floor = alpha\n ceiling = beta\n legal_moves = game.get_legal_moves(game.active_player)\n if legal_moves is not None and len(legal_moves)>0:\n if depth>0: # Recursive case:\n if maximizing_player: # MAXIMIZING ply\n score, move = None, None\n for i,m in enumerate(legal_moves):\n newscore, _ = self.alphabeta(game.forecast_move(m), depth-1, floor, ceiling, maximizing_player=not maximizing_player, tab=tab+'\\t')\n if score is None or newscore > score:\n score, move = newscore, m\n \n # Alphabeta bookkeeping:\n if score > floor:\n floor = score # Constrains children at the next (minimizing) layer to be above this value\n if score >= ceiling: # No need to search any more if we've crossed the upper limit at this max layer already\n break\n else: # MINIMIZING ply\n# print (tab + \"MINIMIZING: (({})) {} < score < {} || Moves: {}\".format(depth, floor, ceiling, legal_moves))\n score, move = None, None\n for i,m in enumerate(legal_moves):\n newscore, _ = self.alphabeta(game.forecast_move(m), depth-1, floor, ceiling, maximizing_player=not maximizing_player, tab=tab+'\\t')\n if score is None or newscore < score:\n score, move = newscore, m\n \n # Alphabeta bookkeeping:\n if score < ceiling:\n ceiling = score # Constrains children at the next (maximizing) layer to be below this value\n if score <= floor: # No need to search any more if we've crossed the lower limit at this min layer already\n break\n else: # Base case (depth==0)\n score, move = self.score(game, self), None\n else: # We are at a DEAD-END here\n score, move = self.score(game, self), (-1, -1)\n\n return score, move", "def minimax(node,depth):\n if node.isLeaf():\n return node.evaluate(),None\n elif node.isMax:\n max_score = float(\"-inf\")\n max_path = None\n for C in node.children():\n score,path = minimax(C,depth+1)\n if score > max_score:\n max_score = score\n max_path = C.name,path\n return max_score,max_path\n else:\n min_score = float(\"inf\")\n min_path = None\n for C in node.children():\n score,path = minimax(C,depth+1)\n if score < min_score:\n min_score = score\n min_path = C.name,path\n return min_score,min_path", "def alphabeta(self, game, depth, alpha=float(\"-inf\"), beta=float(\"inf\")):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n # TODO: finish this function!\n # raise NotImplementedError\n legal_moves = game.get_legal_moves() # obtain list of all available moves on the board\n best_move = (-1, -1) # initialise best move in case of error\n best_score = -math.inf # abstraction assignment of infinity\n\n for m in legal_moves: # iterate through all moves available to the board - ACTIONS\n new_state = game.forecast_move(m) # capture new game state - RESULT for each move - ACTION\n score = self.min_value(new_state, depth - 1, alpha, beta) # recursive call to min_value to test new state along with alpha and beta\n if score > best_score: # check to see if score of move is better than current best score\n best_move = m # update best move to the current move\n best_score = score # update best score to current state's score\n if best_score >= beta: # check best move against lower bound\n return m # return move\n alpha = max(alpha, best_score) # calculate max between alpha and best score, work out higher bound for current state\n return best_move # return best move for player", "def alphabeta(self, depth, isMax, alpha, beta, depth_flag):\n # Check for endgame condition:\n if self.EndGame() == True:\n # print(self.player.getCurrentPieceList())\n result = {\n \"Score\": '',\n \"Piece\": '',\n \"Pos\": ''\n }\n pass\n # Get score of pieceList\n result['Score'] = ((99999999) * self.player.getCurrentPlayer(), depth + 1)\n return result\n\n # Check for depth = 0\n if depth == 0:\n # print(\"Depth 0\")\n result = {\n \"Score\": '',\n \"Piece\": '',\n \"Pos\": ''\n }\n pass\n\n # Get score of pieceList\n result['Score'] = (self.player.getPieceListScore(self.pieceList), depth + 1)\n return result\n\n # Change current player\n self.player.changeCurrentPlayer()\n\n # Get possible move of the current player\n possibleMoves = self.player.getAllPossibleMoves()\n eval_result = list()\n\n # Loop through possible move and create new piecelist with each move\n for i in range(len(possibleMoves)):\n # Collect inputs (piece and piece's position)\n piece = possibleMoves[i][0]['Symbol']\n to_pos = possibleMoves[i][1]\n\n # Create new pieceList\n nextState = self.nextState(piece, to_pos)\n\n # Recall alpha beta to collect score from lower depth\n alpha_beta_result = nextState.alphabeta(depth - 1, not isMax, alpha, beta, depth_flag)\n\n try:\n score = alpha_beta_result['Score']\n\n except:\n print(\"\\nWrong\\n\")\n\n result = {\n \"Score\": score,\n \"Piece\": piece,\n \"Pos\": to_pos\n }\n\n eval_result.append(result)\n if isMax:\n alpha = max(alpha, score[0])\n\n else:\n beta = min(beta, score[0])\n # Check if depth is at top depth. Don't check beta <= alpha to avoid wrong pruning\n if depth == depth_flag and (score[0] != 99999999 or score[0] != -99999999):\n pass\n\n else:\n if beta <= alpha:\n # print(\"Prun\")\n return eval_result[-1]\n\n # Get score from eval result\n score_list = list()\n id = ''\n for j in range(len(eval_result)):\n score_list.append(eval_result[j]['Score'])\n\n # Find index, check max depth , base on max or min value of tuple score\n if isMax:\n id = score_list.index(max(filter(lambda t: t[0] == max(score_list,\n key=itemgetter(0))[0], score_list),\n key=itemgetter(1)))\n else:\n id = score_list.index(max(filter(lambda t: t[0] == min(score_list,\n key=itemgetter(0))[0], score_list),\n key=itemgetter(1)))\n\n return eval_result[id]", "def _alphabeta_search(self, depth, white, black, playing, move_sequence,\n alpha=float('-inf'), beta=float('inf'), max_depth=1000,\n time_left=1000):\n self.nodes_searched += 1 # keep track of nodes searched for displaying\n start_time = clock() # keep track of time to stop the thread\n action = None\n\n state = (tuple(sorted(white, key=lambda x: x[0] + 10 * x[1])),\n tuple(sorted(black, key=lambda x: x[0] + 10 * x[1])), playing)\n\n if state in move_sequence: return 0 # check if state was previously encountered in branch\n move_sequence.add(state)\n\n # check if node is database of stored nodes at the correct depth\n if state in self.nodes and self.nodes[state][2] >= depth and \\\n abs(self.nodes[state][0]) < 1000:\n move_sequence.remove(state)\n if depth == max_depth:\n return (self.nodes[state][0], self.nodes[state][1])\n else:\n return self.nodes[state][0]\n if playing == 1 and is_winning(white): # check that state is not terminal\n val = 100000 + depth\n elif playing == 0 and is_winning(black):\n val = -100000 - depth\n elif depth == 0: # if depth is zero return heuristic approximation\n val = self._heuristic(white, black, playing)\n else:\n if not self.big:\n xstart = 1\n ystart = 1\n n = 5\n m = 4\n else: # this whole set of statements is just to only consider the inner squares\n # of a big board for the first few moves.\n if self.turn > 5:\n xstart = 1\n ystart = 1\n n = 7\n m = 6\n else:\n xstart = 2\n ystart = 2\n n = 6\n m = 5\n\n if playing == 0: # maximizer\n val = float(\"-inf\")\n\n for a in get_actions(white, black, n, m, xstart, ystart):\n modified_cell = self._apply_action(a, white)\n v = self._alphabeta_search(depth - 1, white, black, 1, move_sequence, alpha, beta) # continue searching\n if v > val: # update value and action if a better path has been found\n val = v\n action = a\n self._undo_action(a, white, modified_cell)\n\n alpha = max(alpha, val) # alpha-beta prunning\n if beta < alpha: break\n if clock() - start_time > time_left: return # end search if out of time\n else: # minimizer\n val = float(\"inf\")\n\n for a in get_actions(black, white, n, m, xstart, ystart):\n modified_cell = self._apply_action(a, black)\n v = self._alphabeta_search(depth - 1, white, black, 0, move_sequence, alpha, beta)\n if v < val:# update value and action if a better path has been found\n val = v\n action = a\n\n self._undo_action(a, black, modified_cell)\n\n beta = min(beta, val) # alpha-beta prunning\n if beta < alpha: break\n if clock() - start_time > time_left: break # end search if out of time\n\n self.nodes[state] = (val, action, depth) # update state database\n move_sequence.remove(state)\n if max_depth == depth: # only return the action if at root node\n return (val, action)\n else:\n return val", "def alphabeta(self, game, depth, alpha=float('-inf'), beta=float('inf')):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n best_score = float('-inf')\n\n player_moves = get_legal_moves_from_pos(game, game.get_player_location(game.active_player))\n\n if len(player_moves) == 0:\n # DEBUG_PRINT\n # print('D={} BEST MOVE=> {}, TERM, maxD={}'.format(depth, best_move, stats['max_search_depth']))\n return (-1, -1)\n else:\n # initialise best move so we don't return a forfeit\n best_move = player_moves[0]\n\n # Order move evaluation by highest number of legal moves first\n player_moves.sort(key=lambda p_move: len(get_legal_moves_from_pos(game, p_move)), reverse=True)\n\n\n for move in player_moves:\n score = self.__min_play(game=game.forecast_move(move), depth=depth - 1, alpha=alpha, beta=beta)\n\n # prune min branch if score is higher than or equal to beta\n if score >= beta:\n return move\n\n # otherwise update best score and move\n if score > best_score:\n best_move = move\n best_score = score\n\n # update alpha (highest in max path) to highest_score if score > alpha\n alpha = max(alpha, best_score)\n\n # DEBUG_PRINT\n # if best_score not in [float('inf'), float('-inf')]:\n # print(' D={} BEST MOVE=> {}, score={}, maxD={}\\n'.format(depth, best_move, best_score, stats['max_search_depth']))\n\n return best_move", "def alpha_beta_search(self, game_state, depth):\r\n alpha = float(\"-inf\")\r\n beta = float(\"inf\")\r\n best_score = float(\"-inf\")\r\n best_move = None\r\n for a in game_state.actions():\r\n vv = self.min_value(game_state.result(a), alpha, beta, depth)\r\n alpha = max(alpha, vv)\r\n if vv > best_score:\r\n best_score = vv\r\n best_move = a\r\n return best_move", "def alphaBeta(self,state,alpha,beta,depth=0):\n \n #print(\"NextState (depth \"+str(depth)+\"):\")\n #print(\"Action: \"+state.get_action())\n if state in self.__explored:\n return self.__explored[state.get_hashable_state()]\n \n if state.is_end_state() or depth >= (self.__max_depth-1):\n #Return terminal state's utility value\n self.__explored[state.get_hashable_state()] = state.get_utility_value()\n return state.get_utility_value()\n \n is_max_turn = state.get_max_turn()\n childList = state.get_successors()\n \n if is_max_turn:\n for c in childList:\n #if c in self.__explored.keys():\n # continue\n alpha = max(alpha, self.alphaBeta(c,alpha,beta,depth+1)) \n if beta <= alpha:\n break \n self.__explored[state.get_hashable_state()] = alpha\n return alpha\n else:\n for c in childList:\n #if c in self.__explored.keys():\n # continue\n beta = min(beta, self.alphaBeta(c,alpha,beta,depth+1)) \n if beta <= alpha:\n break \n self.__explored[state.get_hashable_state()] = beta\n return beta", "def iterative_minimax_strategy(game: Any) -> Any:\n s = Stack()\n id0 = 0\n d = {0: Tree([id0, game, None])}\n s.add(0)\n\n while not s.is_empty():\n id1 = s.remove()\n item = [id1]\n if d[id1].children == []:\n for move in d[id1].value[1].current_state.get_possible_moves():\n game1 = copy.deepcopy(d[id1].value[1])\n game1.current_state = game1.current_state.make_move(move)\n id0 += 1\n d[id0] = Tree([id0, game1, None])\n d[id1].children.append(id0)\n item.append(id0)\n else:\n item.extend(d[id1].children)\n for num in item:\n if d[num].value[1].is_over(d[num].value[1].current_state):\n d[num].value[2] = -1\n elif d[num].children != [] and all(d[x].value[2] is not None\n for x in d[num].children):\n d[num].value[2] = max([(-1) * d[y].value[2]\n for y in d[num].children])\n else:\n s.add(num)\n i = 0\n for q in d[0].children:\n if d[q].value[2] == -1:\n i = d[0].children.index(q)\n return game.current_state.get_possible_moves()[i]", "def alphabeta_helper(self, game, depth, alpha=float(\"-inf\"), \\\n beta=float(\"inf\"), maximizing_player=True):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n # Get all the available moves at the current state\n legal_moves = game.get_legal_moves()\n\n # FIRST, test to see if stopped-out\n if len(legal_moves) == 0 or depth == 0:\n if maximizing_player:\n return (self.score(game, game.active_player), (-1, -1))\n else:\n return (self.score(game, game.inactive_player), (-1, -1))\n\n this_move = legal_moves[0]\n if maximizing_player: # This is the active player at max ply\n this_score = float(\"-inf\")\n for move in legal_moves:\n next_ply_state = game.forecast_move(move)\n next_ply_score, next_ply_move \\\n = \\\n self.alphabeta_helper(next_ply_state, \\\n depth-1, alpha, beta, False)\n\n if next_ply_score >= this_score:\n this_score = next_ply_score\n\n if this_score >= beta:\n return this_score, move\n\n if this_score > alpha:\n this_move = move\n alpha = this_score\n\n else: # This is active player with the next min ply for opponent\n this_score = float(\"inf\")\n for move in legal_moves:\n next_ply_state = game.forecast_move(move)\n next_ply_score, next_ply_move \\\n = \\\n self.alphabeta_helper(next_ply_state, \\\n depth-1, alpha, beta, True)\n\n if next_ply_score <= this_score:\n this_score = next_ply_score\n\n if this_score <= alpha:\n return this_score, move\n\n \n if this_score < beta:\n this_move = move\n beta = this_score\n\n return this_score, this_move", "def alphabeta(self, game, depth, alpha=float(\"-inf\"), beta=float(\"inf\"), maximizing_player=True):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise Timeout()\n\n # Initialise variable for no legal moves\n no_legal_moves = (-1, -1)\n best_move = no_legal_moves\n best_utility = float('-inf') if maximizing_player else float('inf')\n current_player = game.active_player if maximizing_player else game.inactive_player\n remaining_legal_moves = game.get_legal_moves(game.active_player)\n\n # Recursion function termination conditions when legal moves exhausted or no plies left\n if not remaining_legal_moves:\n return game.utility(current_player), no_legal_moves\n elif depth == 0:\n return self.score(game, current_player), remaining_legal_moves[0]\n\n # Recursively alternate between Maximise and Minimise calculations for decrementing depths\n for move in remaining_legal_moves:\n # Obtain successor of current state by creating copy of board and applying a move.\n next_state = game.forecast_move(move)\n forecast_utility, _ = self.alphabeta(next_state, depth - 1, alpha, beta, not maximizing_player)\n\n if maximizing_player:\n if forecast_utility > best_utility:\n best_utility, best_move = forecast_utility, move\n\n # Prune next successor node if possible\n if best_utility >= beta:\n break\n alpha = max(alpha, best_utility)\n else:\n if forecast_utility < best_utility:\n best_utility, best_move = forecast_utility, move\n\n # Prune next successor node if possible\n if best_utility <= alpha:\n break\n beta = min(beta, best_utility)\n\n return best_utility, best_move", "def minimax(self, board, depth, alpha, beta, maximizing_player, computer, human):\n valid_moves = board.get_valid_moves()\n\n # if the computer has one after one move then he must definitely make that move\n if depth == 3 and board.game_won() == computer:\n return (None, 3000000000000000000000)\n\n # if the computer can win in two moves then the score has to be high but lower than the previous case\n if depth == 1 and board.game_won() == computer:\n return (None, 30000000000)\n\n is_terminal = self.is_terminal_node(board)\n if depth == 0 or is_terminal:\n if is_terminal:\n # the game gets won by the human in the future moves\n if board.game_won() == human:\n return (None, float(\"-inf\"))\n # the game stops because there are no more possible moves on the board (it is full)\n else:\n return (None, 0)\n else:\n # We have reached the full depth of the tree -> we return the heuristic score of the leaf nodes\n # by calculating the score of the participant at that point in the game considering his pieces on the board\n return (None, board.score_position(computer))\n\n # ALPHA BETA PRUNING FOR OPTIMIZATION\n\n # the maximizing computer = Computer\n if maximizing_player:\n score = float('-inf')\n column = random.choice(valid_moves)\n for col in valid_moves:\n ai_board = copy.deepcopy(board)\n ai_board.move_on_board(col, computer)\n new_score = self.minimax(ai_board, depth - 1, alpha, beta, False, computer, human)[1]\n if new_score > score:\n score = new_score\n column = col\n alpha = max(alpha, score)\n if alpha >= beta:\n break\n\n return column, score\n # the minimizing computer = Human\n else:\n score = float('inf')\n column = random.choice(valid_moves)\n for col in valid_moves:\n ai_board = copy.deepcopy(board)\n ai_board.move_on_board(col, computer - 1)\n new_score = self.minimax(ai_board, depth - 1, alpha, beta, True, computer, human)[1]\n if new_score < score:\n score = new_score\n column = col\n beta = min(beta, score)\n if alpha >= beta:\n break\n\n return column, score", "def minimax_alpha_beta(board, depth, alpha, beta, maximizing_player):\n available_moves = [column for column in range(len(board[0])) if\n is_valid_move(board, column)]\n shuffle(available_moves)\n is_terminal, row, column = is_terminal_node(board)\n if depth == 0 or is_terminal is True:\n if is_terminal is True:\n if board[row][column] == 2:\n # computer wins\n return None, float('inf')\n elif board[row][column] == 1:\n # player wins\n return None, float('-inf')\n else: # draw\n return None, 0\n else:\n # depth == 0\n return None, get_move_score(board, 2)\n\n if maximizing_player: # computer\n value = float('-inf')\n col = randrange(len(board[0]))\n for column in available_moves:\n board_copy = deepcopy(board)\n board_copy, _ = make_move(board_copy, column, 2)\n _, new_score = minimax_alpha_beta(board_copy, depth - 1, alpha,\n beta, False)\n if new_score > value:\n value = new_score\n col = column\n alpha = max(alpha, value)\n if alpha >= beta:\n break\n return col, value\n else: # player\n value = float('inf')\n col = randrange(len(board[0]))\n for column in available_moves:\n board_copy = deepcopy(board)\n board_copy, _ = make_move(board_copy, column, 1)\n _, new_score = minimax_alpha_beta(board_copy, depth - 1, alpha,\n beta, True)\n if new_score <= value:\n value = new_score\n col = column\n beta = min(beta, value)\n if beta <= alpha:\n break\n return col, value" ]
[ "0.73440564", "0.7331566", "0.7307859", "0.69187623", "0.6834855", "0.68207544", "0.66435844", "0.65970844", "0.65479726", "0.6534145", "0.64557296", "0.6412116", "0.63775927", "0.63698417", "0.6361018", "0.6336017", "0.6331006", "0.63172877", "0.62967336", "0.6285531", "0.6216108", "0.62069684", "0.62058634", "0.6192517", "0.61588144", "0.6142655", "0.61348754", "0.61154914", "0.60855263", "0.6076428" ]
0.7741564
0
Add callback which will be called after reconnect.
def add_reconnect_callback(self, callback: Callable[[], None]) -> None: self._on_reconnect_callbacks.add(callback)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def register_connect_changed_callback(self, callback=None):\r\n return self._arm.register_connect_changed_callback(callback=callback)", "def add_on_connection_close_callback(self):\n self.logger.info('Adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)", "def add_on_connection_close_callback(self):\n logger.info('Adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)", "def registerCallback(self, cb, *args):\n\n conn = len(self.callbacks)\n self.callbacks[conn] = (cb, args)\n return conn", "def add_callback(self, on_msg_cb):\n self.callbacks.add(on_msg_cb)", "def after_connect(self):\n pass", "def connect(self, reconnect=True, *args, **kwargs):\n pass", "def register_callback(self):\n raise Exception('not implemented')", "async def on_connect(self) -> None:", "def register_callback(self, func):\n self.callback = func", "def register_callback(self, callback):\n self.callbacks.add(callback)", "def subscribe(self, callback: Callable) -> None:\n self.callbacks.add(callback)", "async def on_connect(self):\n pass", "def add_callbacks(self):\n logger.debug('-' * 50)\n logger.debug('Connecting callbacks (should only ever happen once during a session)')\n logger.debug('-' * 50)\n\n nuke.addOnCreate(nuke.localizationPanelSignals.register_node)\n localization.addFileCallback(self.model.file_cb_updates_row)\n localization.addReadCallback(self.model.update_node_status)\n nuke.localizationPanelSignals.newItem.connect(self.model.add_item)\n nuke.addOnDestroy(self.model.remove_node)", "def add_callback(self, callback):\n if callback is not None:\n self.callbacks.append(callback)", "def register(self, callback):\n self.callback = callback", "def addCallback(self,newCallback):\n self.callback.append(newCallback)", "def _callback(func):\n def wrapper(self, *args, **kwargs):\n logging.debug(\"Is connected: {}\".format(self.crestron_client.is_connected))\n if not self.crestron_client.is_connected:\n self.crestron_connect()\n while not self.crestron_client.is_connected:\n time.sleep(1)\n func(self, *args, **kwargs)\n return wrapper", "def _onconnect(self):\n\n pass", "def _connect_callback(self, future):\n if future.exception() is None:\n self._ws_connection = future.result()\n self._on_connection_success()\n self._read_messages()\n else:\n self._on_connection_error(future.exception())", "def add_on_connect_handler(self, handler):\n self._on_connect_handlers.append(handler)", "def onConnect(self, fetcher, connectionRespInfo): #$NON-NLS-1$\r", "def add_conn_hook(self, hook):\n self._conn_hooks.append(hook)", "def onConnect(self, request_or_response):", "def register_callback(self, callback: Callable[[], None]) -> None:\r\n print(\"register callback called\")\r\n self._callbacks.add(callback)", "def add_callback(self, callback) -> None:\r\n self._callbacks.append(callback)", "def on_connect( client, userdata, flag, rc ):\n if ( rc == 0 ):\n client.connected_flag = True\n logging.info( \"Connected to Broker! Returned code: %s\\n\" %rc )\n else:\n logging.info( \"Failed to connect. Returned code: %s\\n\" %rc )" ]
[ "0.74931026", "0.74931026", "0.74931026", "0.69970465", "0.6915597", "0.6901249", "0.6890013", "0.6638505", "0.6606058", "0.6539018", "0.6511781", "0.65081155", "0.64518017", "0.6449474", "0.6388555", "0.63857764", "0.6376846", "0.63707376", "0.6370163", "0.6357641", "0.6346638", "0.63445395", "0.63391846", "0.63089997", "0.6306784", "0.6300949", "0.6293074", "0.62745935", "0.626528", "0.6264145" ]
0.84650695
0
Sets a consumption task for this connection and schedules it to run.
def set_and_schedule_consumption_task( self, consumption_task: Callable[[], Awaitable[None]] ) -> None: self._consumption_task = consumption_task if self._consumption_task: self._running_task = asyncio.ensure_future(self._consumption_task())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def put_task(self, task):\n # Check if current task is valid\n if not task.connect(self): # Check if task can be used\n return # Drop current task\n self.queue.put(task) # Add current task in schedule queue", "async def _start_cron_task(self):\n pass", "def setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(60, scheduled_task.s(), name='A scheduled task')", "def apply_task(self, task):\n self.tasks.add(task)", "def schedule_task(self, task, date):\n return self.connection.schedule_task(task, date)", "def task(self, task):\n if not task:\n return\n\n from stalker import Task\n\n if not isinstance(task, Task):\n raise TypeError(\n \"%s.task should be a stalker.Task instance, not %s\"\n % (self.__class__.__name__, task.__class__.__name__)\n )\n self._task = task\n\n self.fill_ui()", "async def inbound_task_call(self):\n from cocotb.triggers import Timer\n await Timer(0, units=\"ps\")", "def register(self, task, schedule, minutes: int = None):\n self.task_list.append(ScheduledTask(task, schedule, minutes))", "def schedule(self):\n\n self.task_cls.original_apply_async(*self.task_args,\n **self.task_kwargs)", "def tasks(self, tasks):\n\n self._tasks = tasks", "def setTask(self, value):\n return self._call_java(\"setTask\", value)", "def process(self, task):\n # Predict timestamp for the first run\n _, date = task.trigger(wait=False, **task.trigger_args)\n\n # Adding the task in schedule queue\n self.task_manager.schedule_task(task, date)", "def schedule_task(self, name, date):\n pass", "def _schedule_task(self, task: _Task) -> None:\n if isinstance(task.interval, (int, float)):\n task.next = self.sys_loop.call_later(task.interval, self._run_task, task)\n elif isinstance(task.interval, time):\n today = datetime.combine(date.today(), task.interval)\n tomorrow = datetime.combine(date.today() + timedelta(days=1), task.interval)\n\n # Check if we run it today or next day\n if today > datetime.today():\n calc = today\n else:\n calc = tomorrow\n\n task.next = self.sys_loop.call_at(calc.timestamp(), self._run_task, task)\n else:\n _LOGGER.critical(\n \"Unknown interval %s (type: %s) for scheduler %s\",\n task.interval,\n type(task.interval),\n task.id,\n )", "def call(self, task, **options):\n pass", "def _run_task(self, task: _Task) -> None:\n\n async def _wrap_task():\n \"\"\"Run schedule task and reschedule.\"\"\"\n try:\n if self.sys_core.state == CoreState.RUNNING:\n await task.coro_callback()\n finally:\n if task.repeat and self.sys_core.state not in (\n CoreState.STOPPING,\n CoreState.CLOSE,\n ):\n self._schedule_task(task)\n else:\n self._tasks.remove(task)\n\n task.job = self.sys_create_task(_wrap_task())", "def schedule_task(self, task):\n if self.time_based:\n minimum_wait_server = float('inf')\n for uid, server in self.all_servers.items():\n if server.status:\n if minimum_wait_server > server.waiting_time:\n target_server = server\n minimum_wait_server = server.waiting_time\n try:\n target_server.jobs.append(task)\n target_server.waiting_time += task.task_time\n self.servers_jobs_list[target_server.server_id].append(task)\n except Exception:\n print(\"There are no servers left to reassign\")\n raise Exception(\"################# CHAOS MONKEY WON ####################\")\n else:\n minimum_jobs = float('inf')\n for uid, server in self.all_servers.items():\n if server.status:\n if minimum_jobs > len(server.jobs):\n minimum_jobs = len(server.jobs)\n target_server = server\n try:\n target_server.jobs.append(task)\n target_server.waiting_time += task.task_time\n self.servers_jobs_list[target_server.server_id].append(task)\n except Exception:\n print(\"There are no servers left to reassign\")\n raise Exception(\"################# CHAOS MONKEY WON ####################\")", "def tasks_start(sender, **kwargs):\n sender.add_periodic_task(5.0, get_heartbeat.s())\n sender.add_periodic_task(5.0, monitor_resource_util.s())", "def task(self, value):\n \n if value['job_type'] == \"UPDATE\" or value['job_type'] == \"MAINTAIN\":\n self._queue.put(value)\n\n if 'focused_task' in value:\n if value['focused_task'] == 1:\n logging.info(\"Focused task is ON\\n\")\n self.finishing_task = True\n else:\n self.finishing_task = False\n logging.info(\"Focused task is OFF\\n\")\n else:\n self.finishing_task = False\n logging.info(\"focused task is OFF\\n\")\n \n self._task = value\n self.run()", "def config(self, frequency, duration):\n if 0 > frequency:\n raise ValueError('Baste frequency must be >= 0')\n\n if 0 >= duration:\n raise ValueError('Baste duration must be > 0')\n\n self._duration = duration\n self._frequency = frequency\n\n if self._baste_periodic_handle:\n self._baste_periodic_handle.stop()\n self._baste_periodic_handle = None\n\n if self._baste_off_handle:\n tornado.ioloop.IOLoop.instance().remove_timeout(\n self._baste_off_handle\n )\n self._baste_off_handle = None\n\n self._baste_off()\n\n if frequency > 0:\n\n self._baste_periodic_handle = tornado.ioloop.PeriodicCallback(\n self._baste,\n frequency * 60 * 1000)\n self._baste_periodic_handle.start()\n self._baste()", "def execute_task(self):\n raise NotImplementedError(\"Execute Task method not implemented\")", "def task(self, callable, name=None):\n if name is None:\n name = callable.__name__\n if self.name:\n name = '%s.%s' % (self.name, name)\n if name in self.tasks:\n raise ValueError('task %r conflicts with existing task' % name)\n self.tasks[name] = callable\n return callable", "def _begin_consuming(self):\n self._consuming = True\n loop = asyncio.get_event_loop()\n self._message_queue = asyncio.Queue(\n maxsize=self.app.settings['SQS_PREFETCH_LIMIT'],\n loop=loop,\n )\n loop.create_task(self._consume())", "def __init__(self, scheduler_name, task, interval, delay=0):\n\n self.scheduler_name = scheduler_name\n self.task = task\n self.interval = interval\n self.delay = delay\n self.scheduler = sched.scheduler(time.time, time.sleep)\n self.__running = False\n super(Scheduler, self).__init__(name=self.scheduler_name)\n self.setDaemon(True)", "def run_task(self) -> Task:", "def _set_task(self, task_idx):\n self.task_idx = task_idx", "def add_wakeup_task(self, task):\n self.wakeup_tasks.append(task)\n # make sure to run this at least once\n self.wake_up()", "def execute_task(self, task):\n t = threading.Thread(target=task)\n t.start()", "def execute_task(self, task_name):\n self.busy = True\n self.pipe_start.send((\"EXECUTE\",task_name))", "def _schedule_run(cls, workbook, task, outbound_context):\n\n def run_delayed_task():\n \"\"\"Runs the delayed task. Performs all the steps required to setup\n a task to run which are not already done. This is mostly code\n copied over from convey_task_result.\n \"\"\"\n db_api.start_tx()\n try:\n execution_id = task['execution_id']\n execution = db_api.execution_get(execution_id)\n\n # Change state from DELAYED to IDLE to unblock processing.\n\n WORKFLOW_TRACE.info(\"Task '%s' [%s -> %s]\"\n % (task['name'],\n task['state'], states.IDLE))\n\n db_task = db_api.task_update(task['id'],\n {\"state\": states.IDLE})\n task_to_start = [db_task]\n data_flow.prepare_tasks(task_to_start, outbound_context)\n db_api.commit_tx()\n finally:\n db_api.end_tx()\n\n if not states.is_stopped_or_finished(execution[\"state\"]):\n cls._run_tasks(task_to_start)\n\n task_spec = workbook.tasks.get(task['name'])\n retries, break_on, delay_sec = task_spec.get_retry_parameters()\n if delay_sec > 0:\n # Run the task after the specified delay.\n eventlet.spawn_after(delay_sec, run_delayed_task,\n context=auth_context.ctx())\n else:\n LOG.warn(\"No delay specified for task(id=%s) name=%s. Not \"\n \"scheduling for execution.\" % (task['id'], task['name']))" ]
[ "0.5458914", "0.54185814", "0.53717726", "0.53485686", "0.5340436", "0.5327154", "0.5307498", "0.51731163", "0.51555216", "0.5129518", "0.51186925", "0.5094137", "0.5083912", "0.50817496", "0.50254023", "0.50215", "0.5017692", "0.4945982", "0.4923733", "0.48934317", "0.4891426", "0.48837942", "0.4869764", "0.48537302", "0.48390797", "0.48299396", "0.48296794", "0.48270392", "0.4808875", "0.48052076" ]
0.8098527
0
Paginate the results of the base query. We use limit/offset as the results need to be ordered by date and not the primary key.
def _paginate(cls, context, query): marker = int(context.marker or 0) limit = int(context.limit or CONF.metadatas_page_size) # order by 'updated DESC' to show the most recent metadatas first query = query.order_by(desc(DBMetadata.updated)) # Apply limit/offset query = query.limit(limit) query = query.offset(marker) # check if we need to send a marker for the next page if query.count() < limit: marker = None else: marker += limit return query.all(), marker
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def paginate(self, page_size=20, **q_options):\n cursor = self._get_cursor(self.page, page_size, **q_options)\n results, cursor, more = self.query.fetch_page(page_size,\n start_cursor=cursor,\n **q_options)\n self.has_next = more\n return results, cursor, more", "def paginate(self, data, **kwargs):\n\n where_query = self.build_conditions(data)\n args = []\n limit = kwargs.pop(\"end\", None)\n\n if kwargs.get(\"start\", None):\n where_query.append(\" id >=?\")\n args.append(kwargs.pop('start'))\n\n if len(where_query) > 0:\n data[\"query\"] += \" WHERE \"\n data[\"query\"] += \" AND \".join(where_query)\n cursor = self.get_connector().cursor()\n\n # a hook for ordering\n data[\"query\"] += \" ORDER BY id ASC\"\n\n if limit:\n data[\"query\"] += \" LIMIT {0}\".format(limit)\n\n cursor.execute(data[\"query\"].format(self.ressource_config['table']),\n tuple(args)\n )\n\n objs = []\n for elem in cursor.fetchall():\n objs.append(dict(zip(self.fields, elem)))\n\n return objs", "def paginate_queryset(self, queryset, request, view=None):\n self.page_number_pagination = None\n if request.GET.get('q'):\n self.page_number_pagination = CustomPageNumberPagination()\n return self.page_number_pagination.paginate_queryset(\n queryset, request, view=view\n )\n\n self.base_url = request.build_absolute_uri()\n self.ordering = self.get_ordering(request, queryset, view)\n\n self.cursor = self.decode_cursor(request)\n if self.cursor is None:\n (offset, reverse, current_position) = (0, False, None)\n else:\n (offset, reverse, current_position) = self.cursor\n\n # Cursor pagination always enforces an ordering.\n if reverse:\n queryset.add_sort(*_reverse_ordering(self.ordering))\n else:\n queryset.add_sort(*self.ordering)\n\n # If we have a cursor with a fixed position then filter by that.\n if current_position is not None:\n order = self.ordering[0]\n is_reversed = order.startswith('-')\n order_attr = order.lstrip('-')\n\n # Test for: (cursor reversed) XOR (queryset reversed)\n if self.cursor.reverse != is_reversed:\n kwargs = {order_attr: {'lt': current_position}}\n else:\n kwargs = {order_attr: {'gt': current_position}}\n\n queryset.add_pagination_filter(kwargs)\n\n # If we have an offset cursor then offset the entire page by that amount.\n # We also always fetch an extra item in order to determine if there is a\n # page following on from this one.\n queryset = queryset[offset:offset + self.page_size + 1]\n logger.info('ES query: %s', json.dumps(queryset._s.to_dict()))\n results = queryset.execute()\n\n self.page = results[:self.page_size]\n if reverse:\n self.page = list(reversed(self.page))\n\n # Determine the position of the final item following the page.\n if len(results) > len(self.page):\n has_following_position = True\n following_position = self._get_position_from_instance(\n results[-1], self.ordering\n )\n else:\n has_following_position = False\n following_position = None\n\n if reverse:\n # If we have a reverse queryset, then the query ordering was in reverse\n # so we need to reverse the items again before returning them to the user.\n\n # Determine next and previous positions for reverse cursors.\n self.has_next = (current_position is not None) or (offset > 0)\n self.has_previous = has_following_position\n if self.has_next:\n self.next_position = current_position\n if self.has_previous:\n self.previous_position = following_position\n else:\n # Determine next and previous positions for forward cursors.\n self.has_next = has_following_position\n self.has_previous = (current_position is not None) or (offset > 0)\n if self.has_next:\n self.next_position = following_position\n if self.has_previous:\n self.previous_position = current_position\n\n # Display page controls in the browsable API if there is more\n # than one page.\n if (self.has_previous or self.has_next) and self.template is not None:\n self.display_page_controls = True\n\n return results, self.page", "def paginate_query(self, query, current_page, rows_per_page):\n explanation = self.explain_query(query)\n\n num_rows = explanation['num_rows']\n time_cost = explanation['time_cost']\n byte_width = explanation['byte_width']\n total_pages = 1 + (num_rows / rows_per_page)\n\n # set first page that a user can navigate to\n start_page = current_page - 5\n if start_page < 1:\n start_page = 1\n\n # set the last page that a user can navigate to\n end_page = start_page + 10\n if end_page > total_pages:\n end_page = total_pages\n\n # set the offset\n offset = (current_page - 1) * rows_per_page\n\n # add limit and offset for select queries\n res = self.limit_and_offset_select_query(\n query=query, limit=rows_per_page, offset=offset)\n select_query = res['select_query']\n query = res['query']\n\n # actually make the query\n column_names = None # top columns\n rows = None # in tuple form\n\n res = self.execute_sql(query)\n\n # determine the column_names and rows\n if select_query or res['row_count'] > 0: # normal case\n column_names = [field['name'] for field in res['fields']]\n rows = res['tuples']\n else: # query just returned a bool\n column_names = ['status']\n rows = [['success' if res['status'] else res['error']]]\n\n result = {\n 'num_rows': num_rows,\n 'time_cost': time_cost,\n 'byte_width': byte_width,\n 'total_pages': total_pages,\n 'start_page': start_page,\n 'end_page': end_page,\n 'column_names': column_names,\n 'rows': rows,\n 'select_query': select_query\n }\n\n return result", "def pagination(self, lastValue=None, sortKey=\"_id\", limit=10, asc=\"ASC\"):\n comparison = \"\"\n if lastValue is not None:\n comparison = sortKey + \" > \" + sanitize_value(lastValue)\n limit = int(limit)\n if asc != \"ASC\" and asc != \"DESC\":\n asc = \"ASC\"\n results = self.__run(\n pagination_template.substitute(\n tablename=self.tablename,\n comparison=comparison,\n sortKey=sortKey,\n asc=asc,\n limit=limit\n ),\n )\n return results", "def paginate_cursor(cls, **kwargs):\n return PaginationCursor(cls.objects, **kwargs)", "def paginate_queryset(self, queryset, request, view=None):\n self.request = request\n\n try:\n self.page_number = int(request.query_params.get(\n self.page_query_param, 1\n ))\n except ValueError:\n self.page_number = 1\n\n if self.page_number > self.max_page:\n raise NotFound('Result page number too high.')\n\n offset = (self.page_number - 1) * self.page_size\n queryset = queryset[offset:offset + self.page_size]\n self.results = queryset.execute()\n\n self.page = self.results[:self.page_size]\n\n return self.results, self.page", "def _paginate_query(query, model, limit, sort_keys, marker=None,\n sort_dir=None, sort_dirs=None):\n\n if 'id' not in sort_keys:\n # TODO(justinsb): If this ever gives a false-positive, check\n # the actual primary key, rather than assuming its id\n LOG.warn(_LW('Id not in sort_keys; is sort_keys unique?'))\n\n assert (not (sort_dir and sort_dirs)) # nosec\n # nosec: This function runs safely if the assertion fails.\n\n # Default the sort direction to ascending\n if sort_dir is None:\n sort_dir = 'asc'\n\n # Ensure a per-column sort direction\n if sort_dirs is None:\n sort_dirs = [sort_dir] * len(sort_keys)\n\n assert (len(sort_dirs) == len(sort_keys)) # nosec\n # nosec: This function runs safely if the assertion fails.\n if len(sort_dirs) < len(sort_keys):\n sort_dirs += [sort_dir] * (len(sort_keys) - len(sort_dirs))\n\n # Add sorting\n for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):\n sort_dir_func = {\n 'asc': sqlalchemy.asc,\n 'desc': sqlalchemy.desc,\n }[current_sort_dir]\n\n try:\n sort_key_attr = getattr(model, current_sort_key)\n except AttributeError:\n raise exception.InvalidSortKey()\n query = query.order_by(sort_dir_func(sort_key_attr))\n\n default = '' # Default to an empty string if NULL\n\n # Add pagination\n if marker is not None:\n marker_values = []\n for sort_key in sort_keys:\n v = getattr(marker, sort_key)\n if v is None:\n v = default\n marker_values.append(v)\n\n # Build up an array of sort criteria as in the docstring\n criteria_list = []\n for i in range(len(sort_keys)):\n crit_attrs = []\n for j in range(i):\n model_attr = getattr(model, sort_keys[j])\n default = _get_default_column_value(\n model_attr.property.columns[0].type)\n attr = sa_sql.expression.case([(model_attr != None,\n model_attr), ],\n else_=default)\n crit_attrs.append((attr == marker_values[j]))\n\n model_attr = getattr(model, sort_keys[i])\n default = _get_default_column_value(\n model_attr.property.columns[0].type)\n attr = sa_sql.expression.case([(model_attr != None,\n model_attr), ],\n else_=default)\n if sort_dirs[i] == 'desc':\n crit_attrs.append((attr < marker_values[i]))\n elif sort_dirs[i] == 'asc':\n crit_attrs.append((attr > marker_values[i]))\n else:\n raise ValueError(_(\"Unknown sort direction, \"\n \"must be 'desc' or 'asc'\"))\n\n criteria = sa_sql.and_(*crit_attrs)\n criteria_list.append(criteria)\n\n f = sa_sql.or_(*criteria_list)\n query = query.filter(f)\n\n if limit is not None:\n query = query.limit(limit)\n\n return query", "def paging_results(self):\n\n return 30", "def paginate(self, request, queryset, max_results):\n page = request.GET.get('page')\n paginator = Paginator(queryset, max_results)\n try:\n return paginator.page(page)\n except PageNotAnInteger:\n return paginator.page(1)\n except EmptyPage:\n return paginator.page(paginator.num_pages)", "def paginate_queryset(self, queryset, page_size):\n limit = self.kwargs.get('limit') or self.request.REQUEST.get('limit')\n try:\n limit = int(limit)\n except (TypeError, ValueError):\n limit = page_size\n limit = min(limit, getattr(settings, \"MAX_LIMIT_PER_PAGE\", 500))\n orphans = self.kwargs.get('orphans') or self.request.REQUEST.get('orphans') or getattr(settings, \"ORPHANS\", 0)\n paginator = self.get_paginator(queryset, limit, orphans=orphans, allow_empty_first_page=self.get_allow_empty())\n offset = self.kwargs.get('offset') or self.request.REQUEST.get('offset', 0)\n try:\n page = paginator.page_by_offset(offset)\n except OffsetNotAnInteger:\n # If page is not an integer, deliver first page.\n page = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n page = paginator.page(paginator.num_pages)\n return (paginator, page, page.object_list, page.has_other_pages())", "def get_list(self, **kwargs):\n self.fields = self.get_fields(**kwargs)\n fields = \", \".join(self.fields)\n kwargs[\"query\"] = 'SELECT {0}'.format(fields)\n start = kwargs.pop(\"offset\", None)\n end = kwargs.pop(\"count\", None)\n data = self.filter(**kwargs)\n\n return self.paginate(data, start=start, end=end)", "def _generate_paginate_query(context, session, paginate_type, marker,\n limit, sort_keys, sort_dirs, filters,\n offset=None\n ):\n get_query, process_filters, get = PAGINATION_HELPERS[paginate_type]\n\n sort_keys, sort_dirs = process_sort_params(sort_keys,\n sort_dirs,\n default_dir='desc')\n query = get_query(context, session=session)\n\n if filters:\n query = process_filters(query, filters)\n if query is None:\n return None\n\n marker_object = None\n if marker is not None:\n marker_object = get(context, marker, session)\n\n return sqlalchemyutils.paginate_query(query, paginate_type, limit,\n sort_keys,\n marker=marker_object,\n sort_dirs=sort_dirs,\n offset=offset)", "def paginate(self, page_num=1, page_size=100):\n from .database import Page\n count = self.count()\n pages_total = int(ceil(count / float(page_size)))\n if page_num == -1:\n page_num = pages_total\n elif page_num < 1:\n raise ValueError('Invalid page number: %d' % page_num)\n offset = (page_num - 1) * page_size\n return Page(\n objects=list(self[offset : offset + page_size]),\n number_of_objects=count,\n pages_total=pages_total,\n number=page_num,\n page_size=page_size\n )", "def get_all(self, start_at, limit, order=None):", "def page(self):\r\n limit = self.get_limit()\r\n offset = self.get_offset()\r\n count = self.get_count()\r\n objects = self.get_slice(limit, offset)\r\n meta = {\r\n 'offset': offset,\r\n 'limit': limit,\r\n 'total_count': count}\r\n\r\n if limit:\r\n meta['previous'] = self.get_previous(limit, offset)\r\n meta['next'] = self.get_next(limit, offset, count)\r\n\r\n return {\r\n self.collection_name: objects, 'meta': meta}", "def paginate_queryset(self, queryset, request, view=None):\n # Needed for other methods of this class.\n self.request = request\n\n page_size = self.get_page_size(request)\n page_number = request.query_params.get(self.page_query_param, 1)\n\n original_page_number = page_number\n page_number = self._get_page_number(page_number)\n\n if page_number <= 0:\n msg = self.invalid_page_message.format(\n page_number=original_page_number,\n message=_(\"Invalid page\"),\n )\n raise NotFound(msg)\n\n start = (page_number - 1) * page_size\n end = page_number * page_size\n\n result = []\n total_count = 0\n total_pages = 1\n\n if queryset:\n result = queryset[start:end].execute()\n total_count = result.hits.total[\"value\"]\n hits = max(1, total_count)\n total_pages = ceil(hits / page_size)\n\n if total_pages > 1 and self.template is not None:\n # The browsable API should display pagination controls.\n self.display_page_controls = True\n\n # Needed for other methods of this class.\n self.page = PaginatorPage(\n page_number=page_number,\n total_pages=total_pages,\n count=total_count,\n )\n\n return result", "def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data", "def _make_paged_query(\n conn, search_base, search_scope, ad_query, attr_list, page_size\n):\n result = []\n page_result_control = SimplePagedResultsControl(\n size=page_size,\n cookie=''\n )\n\n msgid = conn.search_ext(\n search_base,\n search_scope,\n ad_query,\n attr_list,\n serverctrls=[page_result_control],\n )\n\n while True:\n r_type, r_data, r_msgid, serverctrls = conn.result3(msgid)\n result.extend(r_data)\n\n if serverctrls:\n if serverctrls[0].cookie:\n page_result_control.size = page_size\n page_result_control.cookie = serverctrls[0].cookie\n\n msgid = conn.search_ext(\n search_base,\n search_scope,\n ad_query,\n attr_list,\n serverctrls=[page_result_control],\n )\n else:\n break\n\n return result", "def paginate_queryset(self, queryset):\n if self.paginator is None:\n return None\n return self.paginator.paginate_queryset(\n queryset, self.request, view=self, count=self.data_count\n )", "def paginate_queryset(self, queryset, request, view=None):\n self.count = queryset.count()\n self.request = request\n try:\n self.page, self.next_page, self.has_next = queryset.fetch_page(\n self.get_page_size(request), start_cursor=Cursor(urlsafe=self.get_page_token()))\n except InvalidPage:\n raise NotFound('Requested page not found')\n except BadValueError as err:\n raise BadRequest(str(err))\n\n return list(self.page)", "def _simple_paginate(data_frame, start=None, end=None, orders=()):\n if orders:\n sort, ascending = _apply_sorting(orders)\n data_frame = data_frame.sort_values(by=sort, ascending=ascending)\n\n return data_frame[start:end]", "def get_slice(self, limit, offset):\n # Always get the first page\n return super(NoLimitPaginator, self).get_slice(0, 0)", "def _paginate_issues_with_cursor(page_url,\n request,\n query,\n cursor,\n limit,\n template,\n extra_nav_parameters=None,\n extra_template_params=None):\n issues, next_cursor, has_more = query.fetch_page(limit, start_cursor=cursor)\n nav_parameters = {}\n if extra_nav_parameters:\n nav_parameters.update(extra_nav_parameters)\n nav_parameters['cursor'] = next_cursor.urlsafe() if next_cursor else ''\n\n params = {\n 'limit': limit,\n 'cursor': nav_parameters['cursor'],\n 'nexttext': 'Next',\n }\n if has_more:\n params['next'] = _url(page_url, **nav_parameters)\n if extra_template_params:\n params.update(extra_template_params)\n return _inner_paginate(request, issues, template, params)", "def _paginate_issues_with_cursor(page_url,\n request,\n query,\n cursor,\n limit,\n template,\n extra_nav_parameters=None,\n extra_template_params=None):\n issues, next_cursor, has_more = query.fetch_page(limit, start_cursor=cursor)\n nav_parameters = {}\n if extra_nav_parameters:\n nav_parameters.update(extra_nav_parameters)\n nav_parameters['cursor'] = next_cursor.urlsafe() if next_cursor else ''\n\n params = {\n 'limit': limit,\n 'cursor': nav_parameters['cursor'],\n 'nexttext': 'Next',\n }\n if has_more:\n params['next'] = _url(page_url, **nav_parameters)\n if extra_template_params:\n params.update(extra_template_params)\n return _inner_paginate(request, issues, template, params)", "def get_paginate_data(self, *args, **kwargs):\n pass", "def paginated(self) -> global___Snippet.Paginated:", "def paginate(self, page=1, per_page=20, item_schema: marshmallow.Schema = None):\n items = self.limit(per_page).offset((page - 1) * per_page).all()\n\n if not items and page != 1:\n return utils.Pagination(self, page, per_page, 0, [], item_schema)\n\n if page == 1 and len(items) < per_page:\n total = len(items)\n else:\n total = self.order_by(None).count()\n\n return utils.Pagination(self, page, per_page, total, items, item_schema)", "def paginate(self, *args, **kwargs):\n result = {}\n result.update(self.get_paginate_data(*args, **kwargs))\n result.update(self.get_objects_data())\n return result", "def paginate_queryset(self, queryset, request, view=None):\n self.count = self.get_count(queryset)\n self.start_index = 0\n self.end_index = self.start_index + self.page_size - 1\n\n # TODO: this logic is repeated below...\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n range_string = request.GET.get(self.range_query_param)\n\n if range_string:\n try:\n page_range = json.loads(range_string)\n except json.JSONDecodeError:\n return None\n\n if len(page_range) != 2:\n return None\n\n self.start_index, self.end_index = [pagination._positive_int(x) for x in page_range]\n\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n if self.start_index > self.end_index:\n self.start_index = self.end_index\n\n return list(queryset[self.start_index:self.end_index + 1])" ]
[ "0.7076282", "0.70332694", "0.7004154", "0.6953918", "0.687327", "0.68525463", "0.6833838", "0.67465377", "0.6690049", "0.664417", "0.65346265", "0.6503121", "0.6495013", "0.6476076", "0.6464674", "0.64421767", "0.6423931", "0.64225787", "0.6421544", "0.64159894", "0.63912034", "0.63901407", "0.63866115", "0.6381123", "0.6381123", "0.6376573", "0.6365631", "0.62649393", "0.6257209", "0.6254691" ]
0.70889175
0
Split SubFind FOF groups into different subsets This includes the full FOF group, the main (background/central) subhalo, satellite subhaloes, and diffuse material This allows for more flexible comparison between other algorithms, which may include different subsets of particles in haloes and/or subhaloes
def splitSFgroups(halodata, pids_fof, coords_fof, vels_fof, pids, coords, vels, nhalo, nsubhalo): firstSub = halodata['Group/GroupFirstSub'] numSubs = halodata['Group/GroupNsubs'] group_offset = halodata['Group/GroupOffsetType'][:,1] group_npart = halodata['Group/GroupLen'] sub_npart = halodata['Subhalo/SubhaloLen'] sub_offset = halodata['Subhalo/SubhaloOffsetType'][:,1] central_mask = np.where(firstSub != -1)[0] # FOF groups with a central subhalo ncentrals = central_mask.size sat_mask = np.where(halodata['Subhalo/SubhaloRankInGr'] > 0)[0] nsats = sat_mask.size # Arrays to hold different subsets of FOF group particles pids_central = np.empty(nhalo, dtype = 'object') coords_central = np.empty(nhalo, dtype = 'object') vels_central = np.empty(nhalo, dtype = 'object') pids_sat = np.empty(nsubhalo, dtype = 'object') coords_sat = np.empty(nsubhalo, dtype = 'object') vels_sat = np.empty(nsubhalo, dtype = 'object') pids_background = np.empty(nhalo, dtype = 'object') coords_background = np.empty(nhalo, dtype = 'object') vels_background = np.empty(nhalo, dtype = 'object') pids_diffuse = np.empty(nhalo, dtype = 'object') coords_diffuse = np.empty(nhalo, dtype = 'object') vels_diffuse = np.empty(nhalo, dtype = 'object') # Split FOF group into central subhalo, diffuse material, and # background (central + diffuse) for i in range(ncentrals): ihalo = central_mask[i] # Central subhalo IDs and coordinates id_arr = pids_fof[ihalo][0:sub_npart[firstSub[ihalo]]] coord_arr = coords_fof[ihalo][0:sub_npart[firstSub[ihalo]]] vel_arr = vels_fof[ihalo][0:sub_npart[firstSub[ihalo]]] pids_central[ihalo] = id_arr coords_central[ihalo] = coord_arr vels_central[ihalo] = vel_arr # Now add in diffuse particles (not bound in substructure) lastSub = firstSub[ihalo] + numSubs[ihalo] - 1 id_unbound = np.array([part for part in pids[(sub_offset[lastSub] + sub_npart[lastSub]): (group_offset[ihalo] + group_npart[ihalo])]]) coord_unbound = np.array([coord for coord in coords[(sub_offset[lastSub] + sub_npart[lastSub]): (group_offset[ihalo] + group_npart[ihalo])]]) vel_unbound = np.array([vel for vel in vels[(sub_offset[lastSub] + sub_npart[lastSub]): (group_offset[ihalo] + group_npart[ihalo])]]) # There are diffuse particles in this FOF group if id_unbound.size > 0: # Populate diffuse particle arrays pids_diffuse[ihalo] = id_unbound coords_diffuse[ihalo] = coord_unbound vels_diffuse[ihalo] = vel_unbound # Create background particle (central subhalo + diffuse) arrays id_arr = np.concatenate((id_arr, id_unbound)) coord_arr = np.concatenate((coord_arr, coord_unbound)) vel_arr = np.concatenate((vel_arr, vel_unbound)) # Populate background particle arrays pids_background[ihalo] = id_arr coords_background[ihalo] = coord_arr vels_background[ihalo] = vel_arr # Split FOF into (satellite) subhaloes linds = sub_offset uinds = linds + sub_npart for i in range(nsats): isub = sat_mask[i] pids_sat[isub] = np.array([part for part in pids[linds[isub]:uinds[isub]]]) coords_sat[isub] = np.array([coord for coord in coords[linds[isub]:uinds[isub]]]) vels_sat[isub] = np.array([vel for vel in vels[linds[isub]:uinds[isub]]]) # Construct FOF components dictionary fofdata = {} fofdata['Background/PIDs'] = pids_background fofdata['Background/Coordinates'] = coords_background fofdata['Background/Velocities'] = vels_background fofdata['Central/PIDs'] = pids_central fofdata['Central/Coordinates'] = coords_central fofdata['Central/Velocities'] = vels_central fofdata['Diffuse/PIDs'] = pids_diffuse fofdata['Diffuse/Coordinates'] = coords_diffuse fofdata['Diffuse/Velocities'] = vels_diffuse fofdata['Satellite/PIDs'] = pids_sat fofdata['Satellite/Coordinates'] = coords_sat fofdata['Satellite/Velocities'] = vels_sat return fofdata
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def splitVRdata(partdata, halodata, pids_halos, pids, coords, vels, nhalo, nsubhalo):\n\n\t# Arrays to hold different subsets of FOF group particles\n\tpids_background = np.array(pids_halos[:nhalo], dtype = 'object')\n\tcoords_background = np.empty(nhalo, dtype = 'object')\n\tvels_background = np.empty(nhalo, dtype = 'object')\n\tpids_sub = np.array(pids_halos[nhalo:], dtype = 'object')\n\tcoords_sub = np.empty(nsubhalo, dtype = 'object')\n\tvels_sub = np.empty(nsubhalo, dtype = 'object')\n\n\t# Create analogues to pids_halos (i.e. array where each\n\t# entry is the coordinates and velocities for each particle\n\t# in that (sub)halo)\n\tpids_all = np.concatenate(pids_halos)\n\tpid_idx = np.argsort(pids)\n\tpid_sorted = pids[pid_idx]\n\tmatch_idx = np.searchsorted(pid_sorted, pids_all)\n\tidxs = pid_idx[match_idx]\n\tcoords_halos = coords[idxs]\n\tvels_halos = vels[idxs]\n\n\t# Indices that mark the first and last particle in each (sub)halo\n\tlinds = partdata['Offset'] + partdata['Offset_unbound']\n\tuinds = linds + partdata['Npart']\n\n\t# (Field) haloes\n\tfor ihalo in range(nhalo):\n\t\tcoords_background[ihalo] = np.array([coord for coord in coords_halos[linds[ihalo]:uinds[ihalo]]])\n\t\tvels_background[ihalo] = np.array([vel for vel in vels_halos[linds[ihalo]:uinds[ihalo]]])\n\n\t# Subhaloes\n\tfor isub in range(nhalo, nhalo + nsubhalo):\n\t\tidx = isub - nhalo\n\t\tcoords_sub[idx] = np.array([coord for coord in coords_halos[linds[isub]:uinds[isub]]])\n\t\tvels_sub[idx] = np.array([vel for vel in vels_halos[linds[isub]:uinds[isub]]])\n\n\t# Get PIDs of all subhaloes hosted by each field halo to create\n\t# arrays containing ALL particles in the FOF group\n\thostHaloID = halodata['hostHaloID']\n\tpids_fof = np.empty(nhalo, dtype = 'object')\n\tcoords_fof = np.empty(nhalo, dtype = 'object')\n\tvels_fof = np.empty(nhalo, dtype = 'object')\n\tfor ihalo in range(nhalo):\n\t\tsubs = np.where(hostHaloID == ihalo + 1)[0] - nhalo\n\t\tif subs.size > 0:\n\t\t\t#print(pids_sub[subs])\n\t\t\t#print(np.concatenate(pids_sub[subs]))\n\t\t\tpids_fof[ihalo] = np.concatenate((pids_background[ihalo], np.concatenate(pids_sub[subs])))\n\t\t\tcoords_fof[ihalo] = np.concatenate((coords_background[ihalo], np.concatenate(coords_sub[subs])))\n\t\t\tvels_fof[ihalo] = np.concatenate((vels_background[ihalo], np.concatenate(vels_sub[subs])))\n\t\telse: # This halo hosts no subhaloes\n\t\t\tpids_fof[ihalo] = pids_background[ihalo]\n\t\t\tcoords_fof[ihalo] = coords_background[ihalo]\n\t\t\tvels_fof[ihalo] = vels_background[ihalo]\n\n\t# Construct FOF components dictionary\n\tfofdata = {}\n\tfofdata['FOF/PIDs'] = pids_fof\n\tfofdata['FOF/Coordinates'] = coords_fof\n\tfofdata['FOF/Velocities'] = vels_fof\n\tfofdata['Background/PIDs'] = pids_background\n\tfofdata['Background/Coordinates'] = coords_background\n\tfofdata['Background/Velocities'] = vels_background\n\tfofdata['Satellite/PIDs'] = pids_sub\n\tfofdata['Satellite/Coordinates'] = coords_sub\n\tfofdata['Satellite/Velocities'] = vels_sub\n\n\treturn fofdata", "def getSFpartData(opt, z, halodata, pids, coords, vels, nhalo, nsubhalo):\n\n\tpids_fof = np.empty(nhalo, dtype = 'object')\n\tcoords_fof = np.empty(nhalo, dtype = 'object')\n\tvels_fof = np.empty(nhalo, dtype = 'object')\n\n\t# Lower and upper indices (first and last particle) for each halo\n\tlinds = halodata['Group/GroupOffsetType'][:,1]\n\tuinds = linds + halodata['Group/GroupLen']\n\n\t# PIDs, coords, vels for all particles in each FOF group (including substructure)\n\tfor ihalo in range(nhalo):\n\t\tpids_fof[ihalo] = np.array([part for part in pids[linds[ihalo]:uinds[ihalo]]])\n\t\tcoords_fof[ihalo] = np.array([coord for coord in coords[linds[ihalo]:uinds[ihalo]]])\n\t\tvels_fof[ihalo] = np.array([vel for vel in vels[linds[ihalo]:uinds[ihalo]]])\n\n\t# Fix wraparound for haloes located near box edges\n\tcommon.fixWraparound(opt, coords_fof, z)\n\n\treturn pids_fof, coords_fof, vels_fof", "def subdivide_mesh(obj, n_subdiv=2):\n thisfunc = thisfile + '->subdivide_mesh()'\n\n scene = bpy.context.scene\n\n # All objects need to be in 'OBJECT' mode to apply modifiers -- maybe a Blender bug?\n for o in bpy.data.objects:\n scene.objects.active = o\n bpy.ops.object.mode_set(mode='OBJECT')\n o.select = False\n obj.select = True\n scene.objects.active = obj\n\n bpy.ops.object.modifier_add(type='SUBSURF')\n obj.modifiers['Subsurf'].subdivision_type = 'CATMULL_CLARK'\n obj.modifiers['Subsurf'].levels = n_subdiv\n obj.modifiers['Subsurf'].render_levels = n_subdiv\n\n # Apply modifier\n bpy.ops.object.modifier_apply(modifier='Subsurf', apply_as='DATA')\n\n # Scene update necessary, as matrix_world is updated lazily\n scene.update()\n\n logging.info(\"%s: Subdivided mesh of '%s'\", thisfunc, obj.name)", "def split_test_data():\n outputvis = ROOT_DIR + 'test_imaging/test_split_1eb.ms'\n targ = TARGETS['NGC1333IRAS4A']\n spw = '{0}:236~276'.format(SPWS[targ.name]['NH3_11'].spw_id)\n split(\n vis=get_vis_name(targ),\n outputvis=outputvis,\n field=targ.name,\n spw=spw,\n )", "def split(self, fractions=[0.8, 0.2]):\n\n if sum(fractions) > 1.0 or sum(fractions) <= 0:\n raise ValueError(\"the sum of fractions argument should be between 0 and 1\")\n\n # random indices\n idx = np.arange(self.n_samples)\n np.random.shuffle(idx)\n\n # insert zero\n fractions.insert(0, 0)\n\n # gte limits of the subsets\n limits = (np.cumsum(fractions) * self.n_samples).astype(np.int32)\n\n subsets = []\n # create output dataset\n for i in range(len(fractions) - 1):\n subsets.append(\n Dataset(self.inputs[idx[limits[i]:limits[i + 1]]], self.targets[idx[limits[i]:limits[i + 1]]]))\n\n return subsets", "def split_data(self, verbose=False):\n # group sample by patient and body part\n tmp = self.data_info.groupby(['patientID', 'body_part']).max()\n # get the index (i.e. patient and bodypart) where none of the body part XR of a given patient are abnormal\n idx_list_normal = tmp[tmp.body_part_abnormal == 0].index.to_list()\n # get the index (i.e. patient and bodypart) where at least one but not all of the body part XR of a given patient are abnormal\n idx_list_mixt = tmp[tmp.body_part_abnormal == 0.5].index.to_list()\n # get the index (i.e. patient and bodypart) where all one of the body part XR of a given patient are abnormal\n idx_list_abnormal = tmp[tmp.body_part_abnormal == 1].index.to_list()\n total = len(idx_list_normal)+len(idx_list_mixt)+len(idx_list_abnormal)\n train_size = self.train_frac*total\n assert train_size < len(idx_list_normal), f'There are not enough normal sample for the given train_frac : {self.train_frac}. \\\n There are {len(idx_list_normal)} normal sample over {total} total samples.'\n valid_size = (1-self.train_frac)*0.5*total\n test_size = (1-self.train_frac)*0.5*total\n # randomly pick (1-ratio_known_abnormal)*train_frac*total from the normal index for the train set\n train_idx_normal, remain = train_test_split(idx_list_normal, \\\n train_size=int((1-self.ratio_known_abnormal)*train_size),\\\n random_state=self.random_state)\n # split the rest equally in the validation and test set\n valid_idx_normal, test_idx_normal = train_test_split(remain, test_size=0.5, random_state=self.random_state)\n # add ratio_known_abnormal*train_frac*total from the abnormal index\n if self.ratio_known_abnormal == 0.0:\n train_idx_abnormal, remain = [], idx_list_abnormal\n else:\n train_idx_abnormal, remain = train_test_split(idx_list_abnormal, \\\n train_size=int(self.ratio_known_abnormal*train_size),\\\n random_state=self.random_state)\n # split the rest equally in the validation and test set\n valid_idx_abnormal, test_idx_abnormal = train_test_split(remain, test_size=0.5, random_state=self.random_state)\n # split the mixt between test and validation and consider them as abnormal patients bodypart\n valid_idx_mixt, test_idx_mixt = train_test_split(idx_list_mixt, test_size=0.5, random_state=self.random_state)\n valid_idx_abnormal += valid_idx_mixt\n test_idx_abnormal += test_idx_mixt\n # get the known and unknown index for each sets\n # get a fraction of normal known\n if self.ratio_known_normal == 0.0:\n train_idx_known, train_idx_unknown = [], train_idx_normal\n valid_idx_known, valid_idx_unknown = [], valid_idx_normal\n test_idx_known, test_idx_unknown = [], test_idx_normal\n else:\n train_idx_known, train_idx_unknown = train_test_split(train_idx_normal, \\\n train_size=int(self.ratio_known_normal*train_size),\\\n random_state=self.random_state)\n valid_idx_known, valid_idx_unknown = train_test_split(valid_idx_normal, \\\n train_size=int(self.ratio_known_normal*valid_size),\\\n random_state=self.random_state)\n test_idx_known, test_idx_unknown = train_test_split(test_idx_normal, \\\n train_size=int(self.ratio_known_normal*test_size), \\\n random_state=self.random_state)\n # get the abnormal known\n # all abnormal in train are known\n train_idx_known += train_idx_abnormal\n if self.ratio_known_abnormal == 0.0:\n valid_idx_unknown += valid_idx_abnormal\n test_idx_unknown += test_idx_abnormal\n else:\n valid_idx_known_abnormal, valid_idx_unknown_abnormal = train_test_split(valid_idx_abnormal, \\\n train_size=int(self.ratio_known_abnormal*valid_size), \\\n random_state=self.random_state)\n valid_idx_known += valid_idx_known_abnormal\n valid_idx_unknown += valid_idx_unknown_abnormal\n test_idx_known_abnormal, test_idx_unknown_abnormal = train_test_split(test_idx_abnormal, \\\n train_size=int(self.ratio_known_abnormal*test_size),\\\n random_state=self.random_state)\n test_idx_known += test_idx_known_abnormal\n test_idx_unknown += test_idx_unknown_abnormal\n\n # get the subsample dataframe with semi-label\n train_df = self.generate_semisupervized_label(train_idx_known, train_idx_unknown)\n valid_df = self.generate_semisupervized_label(valid_idx_known, valid_idx_unknown)\n test_df = self.generate_semisupervized_label(test_idx_known, test_idx_unknown)\n # shuffle the dataframes\n self.subsets['train'] = train_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n self.subsets['valid'] = valid_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n self.subsets['test'] = test_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n # Print summary\n if verbose:\n self.print_stat()", "def GetMatchedSubContourLists(\n scListRef,\n scList,\n allValsByFrame,\n orderOfSCsByValue,\n splitLength=1,\n fixedNumInteriorPoints=None,\n):\n ## NOT DONE! MERGE LATER??\n return simplifiedSCListRef, simplifiedSCList", "def _split(self, split, randomise=False, **kwargs):\r\n # Copy split to prevent modifying outside arguments\r\n split = split.copy()\r\n # Compute total\r\n total = sum(split.values())\r\n # If split contains floats, convert to integers\r\n if isinstance(total, float):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total*100}%. ' \\\r\n + 'Split should not exceed 100%.'\r\n assert total <= 1, assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < 1:\r\n split['rest'] = 1 - total\r\n split = self._float_split_to_int(split)\r\n total = sum(split.values())\r\n # Create subsets based off integer values\r\n if isinstance(total, int):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total} data entries ' \\\r\n + f'but only {len(self.data)} are available.'\r\n assert total <= len(self.data), assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < len(self.data):\r\n split['rest'] = len(self.data) - total\r\n # Create subsets\r\n index = 0\r\n for name, length in split.items():\r\n subset_name = f'{self.name}.{name}'\r\n subset_data = self.data[index:index + length]\r\n subset_seed = self.seed\r\n if self.seed is not None:\r\n subset_seed += sum([ord(c) for c in name]) + length\r\n subset = self._make_subset(subset_name,\r\n subset_data,\r\n randomise=randomise,\r\n seed=subset_seed,\r\n **kwargs\r\n )\r\n setattr(self, name, subset)\r\n index += length\r\n # Replace data with references to subsets\r\n self.data = []\r\n for name in split.keys():\r\n self.data.append(getattr(self, name, None))\r\n # Indicate that this is a superset\r\n self.is_superset = True", "def _get_subgroups(self):\n groups = [] # array of arrays\n for i in range(self.filter.shape[0]):\n for j in range(i):\n if self.filter[i][j]:\n if len(groups) < 1:\n groups.append([j, i])\n continue\n found = False\n for group_i, _ in enumerate(groups):\n if i in groups[group_i]:\n if j not in groups[group_i]:\n groups[group_i].append(j)\n found = True\n elif j in groups[group_i]:\n if i not in groups[group_i]:\n groups[group_i].append(i)\n found = True\n if not found:\n groups.append([i, j])\n return groups", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def split(self):\n sub_images = []\n\n for region in regionprops(self.cells):\n minr, minc, maxr, maxc = region.bbox\n sub_image = self.image_raw[max(0, minr - 10):maxr, max(0, minc - 10):maxc, :]\n\n sub_images.append(FQimage(data=sub_image))\n\n return sub_images", "def group_boundary_elements(self,force=False):\n if force or self._bc_groups is None:\n # This part is the same as in waq_scenario\n g=self.grid()\n if g is None:\n return super(SunHydro,self).group_boundary_elements()\n\n self.infer_2d_elements()\n\n poi=self.pointers\n bc_sel = (poi[:,0]<0)\n bc_elts = np.unique(self.seg_to_2d_element[ poi[bc_sel,1]-1 ])\n\n groups=np.zeros(self.n_2d_elements,self.group_dtype)\n groups['id']-=1\n\n gforce=forcing.GlobalForcing(sun=self.sun)\n sun_g=self.sun.grid()\n\n def node_sun_to_g(n):\n return g.select_nodes_nearest(sun_g.points[n])\n\n # map group id as returned by this method to a dict with items \n # like which shapefile did it come from, index in that shapefile,\n # and fields from the feature.\n # note that it is possible for two boundary flows to enter the same\n # cell - only the first will be marked, with the second feature\n # skipped in both groups and bc_group_mapping\n # self.bc_group_mapping={} \n ngroups=0\n\n for flow_shp in self.flow_shps:\n flows=wkb2shp.shp2geom(flow_shp)\n sun_groups=gforce.add_groups_bulk(defs=flows)\n\n for feat_id in range(len(flows)):\n grp=sun_groups[feat_id]\n if grp.cell_based():\n sun_cells=grp.cells\n cells=[]\n for cell in sun_cells:\n g_nodes=[node_sun_to_g(n)\n for n in sun_g.cells[cell]]\n cells.append( g.nodes_to_cell(g_nodes) )\n\n cells=np.array(cells)\n else:\n # for the purposes of bc_groups, figure out the\n # respective cells\n cells=[]\n for sun_e in grp.edges:\n sun_e_nodes=sun_g.edges[sun_e,:2]\n e=g.nodes_to_edge(node_sun_to_g(sun_e_nodes[0]),\n node_sun_to_g(sun_e_nodes[1]))\n assert e is not None\n cells.append(g.edge_to_cells(e))\n cells=np.array(cells)\n cells=cells[cells>=0]\n\n details=dict(flow_shp=flow_shp,\n feat_id=feat_id)\n for n in flows.dtype.names:\n details[n]=flows[n][feat_id]\n\n # limit this to cells which are not already marked, but *are*\n # in bc_elts\n cells=[c for c in cells\n if (groups['id'][c]<0) and (c in bc_elts) ] \n if len(cells):\n groups['id'][cells] = ngroups\n groups['name'][cells]=details.get('name','group %d'%ngroups)\n groups['attrs'][cells] = details\n # self.bc_group_mapping[ngroups]=details\n ngroups+=1\n else:\n self.log.warning(\"Feature %d from %s (name=%s) overlaps another flow or wasn't\" \n \" found as a boundary, \"\n \" and will be skipped\"%(feat_id,flow_shp,\n details.get('name','n/a')))\n\n # anything not marked already then gets grouped by adjacency and marked\n # the same way as before - see waq_scenario.py for more comments\n def adjacent_cells(g,c,candidates):\n a=list(g.cell_to_adjacent_boundary_cells(c))\n b=list(g.cell_to_cells(c))\n nbrs=filter(lambda cc: cc in candidates,a+b)\n return np.unique(nbrs)\n def trav(c,mark):\n groups['id'][c]=mark\n groups['name'][c]=\"group %d\"%mark\n for nbr in adjacent_cells(g,c,bc_elts):\n if groups['id'][nbr]<0:\n trav(nbr,mark)\n\n ngroups=1+groups['id'].max()\n\n for bc_elt in bc_elts:\n if groups['id'][bc_elt]<0:\n # This is the part where if there are other cells \n # which are part of the same forcing group, they should\n # all get this value\n trav(bc_elt,ngroups)\n ngroups+=1\n self._bc_groups=groups\n return self._bc_groups", "def createSubListOfSpeciesFeatures(self):\n return _libsbml.MultiSpeciesPlugin_createSubListOfSpeciesFeatures(self)", "def split_bottleExpansion((nuW,nuEF,nuEB,TE), (n1,n2), pts): \n #Define grid to use\n xx = yy = dadi.Numerics.default_grid(pts)\n \n #phi for equilibrium ancestral population\n phi = dadi.PhiManip.phi_1D(xx)\n \n \n #The ancestral population splits into the West and East, and the East undergoes a second bottleneck followed by an exponential population size change.\n phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)\n \n #Function for the Eastern population describing an second bottleneck followed by exponential population growth until present\n nuE_func = lambda t: nuEB*numpy.exp(numpy.log(nuEF/nuEB) * t/TE)\n\n # function for growth in west\n nuW_func = lambda t: numpy.exp(numpy.log(nuW) * t/TE)\n\n # integrate the two populations\n phi = dadi.Integration.two_pops(phi,xx,TE, nu1=nuW_func, nu2=nuE_func)\n \n #Return frequency spectrum\n fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,yy))\n return fs", "def get_subgroups(group, characteristic):\n # Get the dataframe and group by the characteristic\n dataframe = group['dataframe']\n groupby = dataframe.groupby(by=characteristic)\n\n # Create the new subgroups\n subgroups = []\n hca = OntologyConversorHCA()\n scea = OntologyConversorSCAE()\n for value, subgroup in groupby:\n # If the group does not have enough cells skip it\n if len(subgroup) < 25:\n continue\n value_0 = parse_concrete(value)\n value_hca = hca.parse_word(value)\n value_scea = scea.parse_word(value)\n \n if value_hca == value_scea:\n value = value_hca\n elif value_hca != value_0:\n value = value_hca\n else:\n value = value_scea\n \n # Creaete the subgroup from the group\n new_subgroup = group.copy()\n new_subgroup['dataframe'] = subgroup\n new_subgroup[characteristic] = value\n subgroups = subgroups + [new_subgroup]\n\n return subgroups", "def _group_fc_accordingto_fctype(sel, data_all):\n colname = pd.Series(data_all.columns)\n colname = colname[1:]\n fcname = [n.split('--') for n in colname]\n\n idx = []\n for i, nn in enumerate(fcname):\n pn = [n.split(' ')[1] for n in nn]\n if pn[0] == pn[1]:\n idx.append(i)\n colname_intra = colname.iloc[idx]\n colname_intre = list(set(data_all.columns) - set(colname_intra))\n colname_intra = list(colname_intra.iloc[:])\n colname_intra.append(sel._huename)\n\n data_all_intra = data_all[colname_intra]\n data_all_inter = data_all[colname_intre]\n return data_all_intra, data_all_inter", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def split(features, groundtruths, n_split):\n\n if n_split == 1:\n return features, groundtruths\n\n tags = list(set(groundtruths))\n new_index = {}\n for tag in tags:\n new_index[tag] = []\n for index, gt in enumerate(groundtruths):\n new_index[gt].append(index)\n new_feats = []\n new_gts = []\n for i in range(0, n_split):\n indexes = []\n for tag in tags:\n ref = len(new_index[tag])/n_split\n indexes.append(new_index[tag][ref*i:ref*(i+1)])\n \"\"\"\n ..todo:: manage multiple tags!\n \"\"\"\n indexes = indexes[0] + indexes[1]\n # print(features[:5])\n # print(len(indexes))\n # print(len(indexes[0]))\n # print(len(indexes[1]))\n # sys.exit()\n indexes.sort()\n new_gts.append([groundtruths[j] for j in indexes])\n new_feats.append([features[j] for j in indexes])\n return new_feats, new_gts", "def split():\n flag = 0\n for chromosome in region:\n for inf in region[chromosome]:\n if flag == 0:\n if chromosome not in test_set:\n test_set[chromosome] = [inf]\n else:\n test_set[chromosome].append(inf)\n else:\n if chromosome not in train_set:\n train_set[chromosome] = [inf]\n else:\n train_set[chromosome].append(inf)\n\n flag += 1\n flag %= 10", "def split(self, splits, catchall=False):\r\n raise NotImplementedError()", "def testA_FileSplitting(self):\n splitter = SplitterFactory()\n\n oneSetSubscription = self.createSubscription(nFiles=10, lumisPerFile=1)\n jobFactory = splitter(package=\"WMCore.WMBS\", subscription=oneSetSubscription)\n\n jobGroups = jobFactory(lumis_per_job=3, halt_job_on_file_boundaries=True, performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 1)\n self.assertEqual(len(jobGroups[0].jobs), 10)\n for job in jobGroups[0].jobs:\n self.assertTrue(len(job['input_files']), 1)\n self.assertEqual(job['estimatedJobTime'], 100 * 12)\n self.assertEqual(job['estimatedDiskUsage'], 100 * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)\n\n twoLumiFiles = self.createSubscription(nFiles=5, lumisPerFile=2)\n jobFactory = splitter(package=\"WMCore.WMBS\", subscription=twoLumiFiles)\n jobGroups = jobFactory(lumis_per_job=1, halt_job_on_file_boundaries=True, performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 1)\n self.assertEqual(len(jobGroups[0].jobs), 10)\n for job in jobGroups[0].jobs:\n self.assertEqual(len(job['input_files']), 1)\n self.assertEqual(job['estimatedJobTime'], 50 * 12)\n self.assertEqual(job['estimatedDiskUsage'], 50 * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)\n\n wholeLumiFiles = self.createSubscription(nFiles=5, lumisPerFile=3)\n jobFactory = splitter(package=\"WMCore.WMBS\", subscription=wholeLumiFiles)\n jobGroups = jobFactory(lumis_per_job=2, halt_job_on_file_boundaries=True, performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 1)\n # 10 because we split on run boundaries\n self.assertEqual(len(jobGroups[0].jobs), 10)\n jobList = jobGroups[0].jobs\n for idx, job in enumerate(jobList, start=1):\n # Have should have one file, half two\n self.assertEqual(len(job['input_files']), 1)\n if idx % 2 == 0:\n self.assertEqual(job['estimatedJobTime'], (1.0 * round(100 / 3)) * 12)\n self.assertEqual(job['estimatedDiskUsage'], (1.0 * round(100 / 3)) * 400)\n else:\n self.assertEqual(job['estimatedJobTime'], (2.0 * round(100 / 3)) * 12)\n self.assertEqual(job['estimatedDiskUsage'], (2.0 * round(100 / 3)) * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)\n\n mask0 = jobList[0]['mask'].getRunAndLumis()\n self.assertEqual(mask0, {0: [[0, 1]]})\n mask1 = jobList[1]['mask'].getRunAndLumis()\n self.assertEqual(mask1, {0: [[2, 2]]})\n mask2 = jobList[2]['mask'].getRunAndLumis()\n self.assertEqual(mask2, {1: [[100, 101]]})\n mask3 = jobList[3]['mask'].getRunAndLumis()\n self.assertEqual(mask3, {1: [[102, 102]]})\n\n j0 = Job(id=jobList[0]['id'])\n j0.loadData()\n self.assertEqual(j0['mask'].getRunAndLumis(), {0: [[0, 1]]})\n\n # Do it with multiple sites\n twoSiteSubscription = self.createSubscription(nFiles=5, lumisPerFile=2, twoSites=True)\n jobFactory = splitter(package=\"WMCore.WMBS\",\n subscription=twoSiteSubscription)\n jobGroups = jobFactory(lumis_per_job=1,\n halt_job_on_file_boundaries=True,\n performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 2)\n self.assertEqual(len(jobGroups[0].jobs), 10)\n for job in jobGroups[0].jobs:\n self.assertEqual(len(job['input_files']), 1)\n self.assertEqual(job['estimatedJobTime'], 50 * 12)\n self.assertEqual(job['estimatedDiskUsage'], 50 * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)", "def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))", "def split_diagnoses(\n formatted_data_path,\n n_test=100,\n subset_name=\"test\",\n MCI_sub_categories=True,\n p_age_threshold=0.80,\n p_sex_threshold=0.80,\n categorical_split_variable=None,\n ignore_demographics=False,\n verbose=0,\n):\n commandline_to_json(\n {\n \"output_dir\": formatted_data_path,\n \"n_test\": n_test,\n \"subset_name\": subset_name,\n \"MCI_sub_categories\": MCI_sub_categories,\n \"p_age_threshold\": p_age_threshold,\n \"p_sex_threshold\": p_sex_threshold,\n \"categorical_split_variable\": categorical_split_variable,\n \"ignore_demographics\": ignore_demographics,\n },\n filename=\"split.json\",\n )\n\n # Read files\n results_path = formatted_data_path\n\n train_path = path.join(results_path, \"train\")\n if path.exists(train_path):\n shutil.rmtree(train_path)\n if n_test > 0:\n os.makedirs(train_path)\n\n if categorical_split_variable is None:\n categorical_split_variable = \"diagnosis\"\n\n test_path = path.join(results_path, subset_name)\n if path.exists(test_path):\n shutil.rmtree(test_path)\n os.makedirs(test_path)\n\n diagnosis_df_paths = os.listdir(results_path)\n diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith(\".tsv\")]\n diagnosis_df_paths = [\n x for x in diagnosis_df_paths if not x.endswith(\"_baseline.tsv\")\n ]\n\n MCI_special_treatment = False\n\n if \"MCI.tsv\" in diagnosis_df_paths and n_test > 0:\n if MCI_sub_categories:\n diagnosis_df_paths.remove(\"MCI.tsv\")\n MCI_special_treatment = True\n elif \"sMCI.tsv\" in diagnosis_df_paths or \"pMCI.tsv\" in diagnosis_df_paths:\n logger.warning(\n \"MCI special treatment was deactivated though MCI subgroups were found.\"\n \"Be aware that it may cause data leakage in transfer learning tasks.\"\n )\n\n # The baseline session must be kept before or we are taking all the sessions to mix them\n for diagnosis_df_path in diagnosis_df_paths:\n diagnosis_df = pd.read_csv(path.join(results_path, diagnosis_df_path), sep=\"\\t\")\n interest_columns = diagnosis_df.columns.values\n diagnosis = diagnosis_df_path.split(\".\")[0]\n logger.info(f\"Running split for diagnosis {diagnosis}\")\n if n_test > 0:\n train_df, test_df = create_split(\n diagnosis,\n diagnosis_df,\n categorical_split_variable,\n n_test=n_test,\n p_age_threshold=p_age_threshold,\n p_sex_threshold=p_sex_threshold,\n ignore_demographics=ignore_demographics,\n )\n # Save baseline splits\n train_df.to_csv(\n path.join(train_path, f\"{diagnosis}_baseline.tsv\"),\n sep=\"\\t\",\n index=False,\n )\n test_df.to_csv(\n path.join(test_path, f\"{diagnosis}_baseline.tsv\"), sep=\"\\t\", index=False\n )\n\n long_train_df = retrieve_longitudinal(train_df, diagnosis_df)\n long_train_df.to_csv(\n path.join(train_path, f\"{diagnosis}.tsv\"), sep=\"\\t\", index=False\n )\n long_test_df = retrieve_longitudinal(test_df, diagnosis_df)\n long_test_df.to_csv(\n path.join(test_path, f\"{diagnosis}.tsv\"), sep=\"\\t\", index=False\n )\n\n else:\n baseline_df = extract_baseline(diagnosis_df)\n test_df = baseline_df[interest_columns]\n test_df.to_csv(\n path.join(test_path, f\"{diagnosis}_baseline.tsv\"), sep=\"\\t\", index=False\n )\n long_test_df = retrieve_longitudinal(test_df, diagnosis_df)\n long_test_df.to_csv(\n path.join(test_path, f\"{diagnosis}.tsv\"), sep=\"\\t\", index=False\n )\n\n if MCI_special_treatment:\n\n # Extraction of MCI subjects without intersection with the sMCI / pMCI train\n diagnosis_df = pd.read_csv(path.join(results_path, \"MCI.tsv\"), sep=\"\\t\")\n MCI_df = diagnosis_df.set_index([\"participant_id\", \"session_id\"])\n baseline_df = extract_baseline(MCI_df, set_index=False)\n\n if n_test > 1:\n n_test = int(n_test)\n else:\n n_test = int(n_test * len(baseline_df))\n\n MCI_df, supplementary_diagnoses = remove_sub_labels(\n MCI_df, [\"sMCI\", \"pMCI\"], diagnosis_df_paths, results_path\n )\n if len(supplementary_diagnoses) == 0:\n raise ValueError(\n \"The MCI_sub_categories flag is not needed as there are no intersections with\"\n \"MCI subcategories.\"\n )\n\n # Construction of supplementary train\n supplementary_train_df = pd.DataFrame()\n for diagnosis in supplementary_diagnoses:\n sup_baseline_train_df = pd.read_csv(\n path.join(train_path, f\"{diagnosis}_baseline.tsv\"), sep=\"\\t\"\n )\n supplementary_train_df = pd.concat(\n [supplementary_train_df, sup_baseline_train_df]\n )\n sub_df = (\n supplementary_train_df.reset_index()\n .groupby(\"participant_id\")[\"session_id\"]\n .nunique()\n )\n logger.debug(\n f\"supplementary_train_df {len(sub_df)} subjects, {len(supplementary_diagnoses)} scans\"\n )\n\n supplementary_train_df.reset_index(drop=True, inplace=True)\n\n # MCI selection\n MCI_df.reset_index(inplace=True)\n baseline_df = extract_baseline(MCI_df)\n\n train_df, test_df = create_split(\n \"MCI\",\n baseline_df,\n categorical_split_variable,\n n_test=n_test,\n p_age_threshold=p_age_threshold,\n p_sex_threshold=p_sex_threshold,\n ignore_demographics=ignore_demographics,\n supplementary_train_df=supplementary_train_df,\n )\n\n # Write selection of MCI\n train_df.to_csv(\n path.join(train_path, \"MCI_baseline.tsv\"), sep=\"\\t\", index=False\n )\n test_df.to_csv(path.join(test_path, \"MCI_baseline.tsv\"), sep=\"\\t\", index=False)\n\n long_train_df = retrieve_longitudinal(train_df, diagnosis_df)\n long_train_df.to_csv(path.join(train_path, \"MCI.tsv\"), sep=\"\\t\", index=False)\n long_test_df = retrieve_longitudinal(test_df, diagnosis_df)\n long_test_df.to_csv(path.join(test_path, \"MCI.tsv\"), sep=\"\\t\", index=False)", "def split_all_closed_faces(self, max_tol=0.01, precision=0.01, num_splits=1):\n divider = ShapeUpgrade_ShapeDivideClosed(self.topods_shape())\n divider.SetPrecision(precision)\n divider.SetMinTolerance(0.1 * max_tol)\n divider.SetMaxTolerance(max_tol)\n divider.SetNbSplitPoints(num_splits)\n ok = divider.Perform()\n if not ok:\n # Splitting failed or there were no closed faces to split\n # Return the original solid\n return self\n return Solid(divider.Result())", "def read_subfind_group(self, subnr, datasets=None):\n\n offset = self.cat[\"SubOffset\"][subnr]\n count = self.cat[\"SubLen\"][subnr]\n return self.read_particle_range(offset, count, datasets)", "def go(self):\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n for isplit,fof_split in enumerate(fof_splits):\n logger.info('%s %s' % (isplit,fof_split))\n self._write_split(isplit, fof_split)", "def split_data_by_refl(self, test_fraction=0.5):\n if BaseModel.is_laue(self.inputs):\n harmonic_id = BaseModel.get_harmonic_id(self.inputs)\n test_idx = (np.random.random(harmonic_id.max()+1) <= test_fraction)[harmonic_id]\n train, test = self.split_laue_data_by_mask(test_idx)\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test\n\n test_idx = np.random.random(len(self.inputs[0])) <= test_fraction\n train, test = self.split_mono_data_by_mask(test_idx)\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test", "def fetch_group_lasso_datasets():\n\n # helper functions\n\n def find_interaction_index(seq, subseq,\n alphabet=\"ATGC\",\n all_possible_len_n_interactions=None):\n n = len(subseq)\n alphabet_interactions = \\\n [set(p) for\n p in list(itertools.combinations_with_replacement(alphabet, n))]\n\n num_interactions = len(alphabet_interactions)\n if all_possible_len_n_interactions is None:\n all_possible_len_n_interactions = \\\n [set(interaction) for\n interaction in\n list(itertools.combinations_with_replacement(seq, n))]\n\n subseq = set(subseq)\n\n group_index = num_interactions * \\\n all_possible_len_n_interactions.index(subseq)\n value_index = alphabet_interactions.index(subseq)\n\n final_index = group_index + value_index\n return final_index\n\n def create_group_indicies_list(seqlength=7,\n alphabet=\"ATGC\",\n interactions=[1, 2, 3],\n include_extra=True):\n alphabet_length = len(alphabet)\n index_groups = []\n if include_extra:\n index_groups.append(0)\n group_count = 1\n for inter in interactions:\n n_interactions = comb(seqlength, inter)\n n_alphabet_combos = comb(alphabet_length,\n inter,\n repetition=True)\n\n for x1 in range(int(n_interactions)):\n for x2 in range(int(n_alphabet_combos)):\n index_groups.append(int(group_count))\n\n group_count += 1\n return index_groups\n\n def create_feature_vector_for_sequence(seq,\n alphabet=\"ATGC\",\n interactions=[1, 2, 3]):\n feature_vector_length = \\\n sum([comb(len(seq), inter) *\n comb(len(alphabet), inter, repetition=True)\n for inter in interactions]) + 1\n\n feature_vector = np.zeros(int(feature_vector_length))\n feature_vector[0] = 1.0\n for inter in interactions:\n # interactions at the current level\n cur_interactions = \\\n [set(p) for p in list(itertools.combinations(seq, inter))]\n interaction_idxs = \\\n [find_interaction_index(\n seq, cur_inter,\n all_possible_len_n_interactions=cur_interactions) + 1\n for cur_inter in cur_interactions]\n feature_vector[interaction_idxs] = 1.0\n\n return feature_vector\n\n positive_url = \\\n \"http://genes.mit.edu/burgelab/maxent/ssdata/MEMset/train5_hs\"\n negative_url = \\\n \"http://genes.mit.edu/burgelab/maxent/ssdata/MEMset/train0_5_hs\"\n\n pos_file = tempfile.NamedTemporaryFile() #bufsize=0)\n neg_file = tempfile.NamedTemporaryFile() #bufsize=0)\n\n posreq = urllib.request.Request(positive_url)\n with urllib.request.urlopen(posreq) as posresponse:\n pos_page = posresponse.read().decode(\"utf-8\")\n\n negreq = urllib.request.Request(negative_url)\n with urllib.request.urlopen(negreq) as negresponse:\n neg_page = negresponse.read().decode(\"utf-8\")\n\n positive_sequences = [str(line.strip().upper()) for idx, line in\n enumerate(pos_page.strip().split('\\n'))\n if \">\" not in line and idx < 2 * 8000]\n\n negative_sequences = [str(line.strip().upper()) for idx, line in\n enumerate(neg_page.strip().split('\\n'))\n if \">\" not in line and\n idx < 2 * len(positive_sequences)]\n\n assert len(positive_sequences) == len(negative_sequences), \\\n \"lengths were not the same: p={pos} n={neg}\" \\\n .format(pos=len(positive_sequences), neg=len(negative_sequences))\n\n positive_vector_matrix = np.array([create_feature_vector_for_sequence(s)\n for s in positive_sequences])\n negative_vector_matrix = np.array([create_feature_vector_for_sequence(s)\n for s in negative_sequences])\n\n df = pd.DataFrame(data=np.vstack((positive_vector_matrix,\n negative_vector_matrix)))\n df.loc[0:positive_vector_matrix.shape[0], \"Label\"] = 1.0\n df.loc[positive_vector_matrix.shape[0]:, \"Label\"] = 0.0\n\n design_matrix = df\n groups = create_group_indicies_list()\n\n return design_matrix, groups", "def spectral_clustering_division(E, geoms, split_type=\"threshold\", normalize_geoms=True):\n global INTERNAL_PARAMETERS\n n_pts, n_vec = E.shape\n _n, _d = geoms.shape\n assert _n == n_pts and _d == 3, \"Invalid geoms (%s)\" % (str(geoms.shape))\n # limit on tube sizes\n mts = int(INTERNAL_PARAMETERS['min_tube_size'])\n Mts = int(INTERNAL_PARAMETERS['max_tube_size'])\n # lower limit on the number of clusters\n min_n_clusters = int(INTERNAL_PARAMETERS['min_k'])\n # max allowed node depth\n max_depth = int(INTERNAL_PARAMETERS['max_depth'])\n # min eigenvector amplitude for split\n min_evect_amplitude = float(INTERNAL_PARAMETERS['min_evect_amplitude'])\n # number of thresholds to try when using thresholding splits\n n_threshs = int(INTERNAL_PARAMETERS['n_threshs'])\n\n # check degenerate case: just issue a warning and lower mts\n if n_pts <= 2 * min_n_clusters * mts:\n n_mts = int(max(1, n_pts / (2.0 * min_n_clusters)))\n sys.stderr.write(\"WARNING: small video\" +\n \"({} <= {}) \".format(n_pts, 2 * min_n_clusters * mts) +\n \": changing min_leaf_size to {}.\\n\".format(n_mts))\n mts = n_mts\n\n # get the normalized spatio-temporal positions\n if normalize_geoms:\n nrlz = np.array([640., 480., 1e2])\n ngeoms = geoms.astype(np.float) / nrlz[np.newaxis, :]\n ngeoms -= ngeoms.mean(axis=0)[np.newaxis, :]\n else:\n ngeoms = geoms - geoms.mean(axis=0)[np.newaxis, :]\n\n # initialize the tree structure\n stree = SpectralTree(\n E, ngeoms, mts, Mts, min_n_clusters, max_depth, min_evect_amplitude,\n split_type, n_threshs)\n # recursively split the leaves in depth-first left-to-right order\n stree.build()\n\n return stree.labels, stree.int_paths", "def sbc_groups():\n cam = \"sbc\"\n for light, lens, window in [(True, True, True),\n (True, True, False),\n (True, False, False),\n (False, False, False)]: \n filenames = flatfiles(cam)\n filenames = get_light_sbc(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_window_sbc(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, window))" ]
[ "0.6487616", "0.592714", "0.5788769", "0.5654306", "0.5585296", "0.55509764", "0.5434781", "0.543442", "0.5431795", "0.5424935", "0.54144394", "0.5367812", "0.53305584", "0.53304404", "0.52642953", "0.5258931", "0.522978", "0.5192073", "0.5165952", "0.51431376", "0.5128682", "0.51247436", "0.5116633", "0.5115435", "0.5108764", "0.5107557", "0.51035094", "0.51010215", "0.50956804", "0.5094092" ]
0.76197255
0
Split VELOCIraptor FOF groups into different subsets This includes the full FOF group, the background (field) halo, and subhaloes This allows for more flexible comparison between other algorithms, which may include different subsets of particles in haloes and/or subhaloes
def splitVRdata(partdata, halodata, pids_halos, pids, coords, vels, nhalo, nsubhalo): # Arrays to hold different subsets of FOF group particles pids_background = np.array(pids_halos[:nhalo], dtype = 'object') coords_background = np.empty(nhalo, dtype = 'object') vels_background = np.empty(nhalo, dtype = 'object') pids_sub = np.array(pids_halos[nhalo:], dtype = 'object') coords_sub = np.empty(nsubhalo, dtype = 'object') vels_sub = np.empty(nsubhalo, dtype = 'object') # Create analogues to pids_halos (i.e. array where each # entry is the coordinates and velocities for each particle # in that (sub)halo) pids_all = np.concatenate(pids_halos) pid_idx = np.argsort(pids) pid_sorted = pids[pid_idx] match_idx = np.searchsorted(pid_sorted, pids_all) idxs = pid_idx[match_idx] coords_halos = coords[idxs] vels_halos = vels[idxs] # Indices that mark the first and last particle in each (sub)halo linds = partdata['Offset'] + partdata['Offset_unbound'] uinds = linds + partdata['Npart'] # (Field) haloes for ihalo in range(nhalo): coords_background[ihalo] = np.array([coord for coord in coords_halos[linds[ihalo]:uinds[ihalo]]]) vels_background[ihalo] = np.array([vel for vel in vels_halos[linds[ihalo]:uinds[ihalo]]]) # Subhaloes for isub in range(nhalo, nhalo + nsubhalo): idx = isub - nhalo coords_sub[idx] = np.array([coord for coord in coords_halos[linds[isub]:uinds[isub]]]) vels_sub[idx] = np.array([vel for vel in vels_halos[linds[isub]:uinds[isub]]]) # Get PIDs of all subhaloes hosted by each field halo to create # arrays containing ALL particles in the FOF group hostHaloID = halodata['hostHaloID'] pids_fof = np.empty(nhalo, dtype = 'object') coords_fof = np.empty(nhalo, dtype = 'object') vels_fof = np.empty(nhalo, dtype = 'object') for ihalo in range(nhalo): subs = np.where(hostHaloID == ihalo + 1)[0] - nhalo if subs.size > 0: #print(pids_sub[subs]) #print(np.concatenate(pids_sub[subs])) pids_fof[ihalo] = np.concatenate((pids_background[ihalo], np.concatenate(pids_sub[subs]))) coords_fof[ihalo] = np.concatenate((coords_background[ihalo], np.concatenate(coords_sub[subs]))) vels_fof[ihalo] = np.concatenate((vels_background[ihalo], np.concatenate(vels_sub[subs]))) else: # This halo hosts no subhaloes pids_fof[ihalo] = pids_background[ihalo] coords_fof[ihalo] = coords_background[ihalo] vels_fof[ihalo] = vels_background[ihalo] # Construct FOF components dictionary fofdata = {} fofdata['FOF/PIDs'] = pids_fof fofdata['FOF/Coordinates'] = coords_fof fofdata['FOF/Velocities'] = vels_fof fofdata['Background/PIDs'] = pids_background fofdata['Background/Coordinates'] = coords_background fofdata['Background/Velocities'] = vels_background fofdata['Satellite/PIDs'] = pids_sub fofdata['Satellite/Coordinates'] = coords_sub fofdata['Satellite/Velocities'] = vels_sub return fofdata
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def splitSFgroups(halodata, pids_fof, coords_fof, vels_fof, pids, coords, vels, nhalo, nsubhalo):\n\n\tfirstSub = halodata['Group/GroupFirstSub']\n\tnumSubs = halodata['Group/GroupNsubs']\n\tgroup_offset = halodata['Group/GroupOffsetType'][:,1]\n\tgroup_npart = halodata['Group/GroupLen']\n\tsub_npart = halodata['Subhalo/SubhaloLen']\n\tsub_offset = halodata['Subhalo/SubhaloOffsetType'][:,1]\n\n\tcentral_mask = np.where(firstSub != -1)[0] # FOF groups with a central subhalo\n\tncentrals = central_mask.size\n\tsat_mask = np.where(halodata['Subhalo/SubhaloRankInGr'] > 0)[0]\n\tnsats = sat_mask.size\n\n\t# Arrays to hold different subsets of FOF group particles\n\tpids_central = np.empty(nhalo, dtype = 'object')\n\tcoords_central = np.empty(nhalo, dtype = 'object')\n\tvels_central = np.empty(nhalo, dtype = 'object')\n\tpids_sat = np.empty(nsubhalo, dtype = 'object')\n\tcoords_sat = np.empty(nsubhalo, dtype = 'object')\n\tvels_sat = np.empty(nsubhalo, dtype = 'object')\n\tpids_background = np.empty(nhalo, dtype = 'object')\n\tcoords_background = np.empty(nhalo, dtype = 'object')\n\tvels_background = np.empty(nhalo, dtype = 'object')\n\tpids_diffuse = np.empty(nhalo, dtype = 'object')\n\tcoords_diffuse = np.empty(nhalo, dtype = 'object')\n\tvels_diffuse = np.empty(nhalo, dtype = 'object')\n\n\t# Split FOF group into central subhalo, diffuse material, and \n\t# background (central + diffuse)\n\tfor i in range(ncentrals):\n\t\tihalo = central_mask[i]\n\t\t# Central subhalo IDs and coordinates\n\t\tid_arr = pids_fof[ihalo][0:sub_npart[firstSub[ihalo]]]\n\t\tcoord_arr = coords_fof[ihalo][0:sub_npart[firstSub[ihalo]]]\n\t\tvel_arr = vels_fof[ihalo][0:sub_npart[firstSub[ihalo]]]\n\t\tpids_central[ihalo] = id_arr\n\t\tcoords_central[ihalo] = coord_arr\n\t\tvels_central[ihalo] = vel_arr\n\n\t\t# Now add in diffuse particles (not bound in substructure)\n\t\tlastSub = firstSub[ihalo] + numSubs[ihalo] - 1\n\t\tid_unbound = np.array([part for part in pids[(sub_offset[lastSub] + sub_npart[lastSub]):\n\t\t\t\t\t\t\t\t\t\t\t\t\t(group_offset[ihalo] + group_npart[ihalo])]])\n\t\tcoord_unbound = np.array([coord for coord in coords[(sub_offset[lastSub] + sub_npart[lastSub]):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(group_offset[ihalo] + group_npart[ihalo])]])\n\t\tvel_unbound = np.array([vel for vel in vels[(sub_offset[lastSub] + sub_npart[lastSub]):\n\t\t\t\t\t\t\t\t\t\t\t\t\t(group_offset[ihalo] + group_npart[ihalo])]])\n\t\t# There are diffuse particles in this FOF group\n\t\tif id_unbound.size > 0:\n\t\t\t# Populate diffuse particle arrays\n\t\t\tpids_diffuse[ihalo] = id_unbound\n\t\t\tcoords_diffuse[ihalo] = coord_unbound\n\t\t\tvels_diffuse[ihalo] = vel_unbound\n\t\t\t# Create background particle (central subhalo + diffuse) arrays\n\t\t\tid_arr = np.concatenate((id_arr, id_unbound))\n\t\t\tcoord_arr = np.concatenate((coord_arr, coord_unbound))\n\t\t\tvel_arr = np.concatenate((vel_arr, vel_unbound))\n\t\t# Populate background particle arrays\n\t\tpids_background[ihalo] = id_arr\n\t\tcoords_background[ihalo] = coord_arr\n\t\tvels_background[ihalo] = vel_arr\n\n\t# Split FOF into (satellite) subhaloes\n\tlinds = sub_offset\n\tuinds = linds + sub_npart\n\tfor i in range(nsats):\n\t\tisub = sat_mask[i]\n\t\tpids_sat[isub] = np.array([part for part in pids[linds[isub]:uinds[isub]]])\n\t\tcoords_sat[isub] = np.array([coord for coord in coords[linds[isub]:uinds[isub]]])\n\t\tvels_sat[isub] = np.array([vel for vel in vels[linds[isub]:uinds[isub]]])\n\n\t# Construct FOF components dictionary\n\tfofdata = {}\n\tfofdata['Background/PIDs'] = pids_background\n\tfofdata['Background/Coordinates'] = coords_background\n\tfofdata['Background/Velocities'] = vels_background\n\tfofdata['Central/PIDs'] = pids_central\n\tfofdata['Central/Coordinates'] = coords_central\n\tfofdata['Central/Velocities'] = vels_central\n\tfofdata['Diffuse/PIDs'] = pids_diffuse\n\tfofdata['Diffuse/Coordinates'] = coords_diffuse\n\tfofdata['Diffuse/Velocities'] = vels_diffuse\n\tfofdata['Satellite/PIDs'] = pids_sat\n\tfofdata['Satellite/Coordinates'] = coords_sat\n\tfofdata['Satellite/Velocities'] = vels_sat\n\n\treturn fofdata", "def split_test_data():\n outputvis = ROOT_DIR + 'test_imaging/test_split_1eb.ms'\n targ = TARGETS['NGC1333IRAS4A']\n spw = '{0}:236~276'.format(SPWS[targ.name]['NH3_11'].spw_id)\n split(\n vis=get_vis_name(targ),\n outputvis=outputvis,\n field=targ.name,\n spw=spw,\n )", "def getSFpartData(opt, z, halodata, pids, coords, vels, nhalo, nsubhalo):\n\n\tpids_fof = np.empty(nhalo, dtype = 'object')\n\tcoords_fof = np.empty(nhalo, dtype = 'object')\n\tvels_fof = np.empty(nhalo, dtype = 'object')\n\n\t# Lower and upper indices (first and last particle) for each halo\n\tlinds = halodata['Group/GroupOffsetType'][:,1]\n\tuinds = linds + halodata['Group/GroupLen']\n\n\t# PIDs, coords, vels for all particles in each FOF group (including substructure)\n\tfor ihalo in range(nhalo):\n\t\tpids_fof[ihalo] = np.array([part for part in pids[linds[ihalo]:uinds[ihalo]]])\n\t\tcoords_fof[ihalo] = np.array([coord for coord in coords[linds[ihalo]:uinds[ihalo]]])\n\t\tvels_fof[ihalo] = np.array([vel for vel in vels[linds[ihalo]:uinds[ihalo]]])\n\n\t# Fix wraparound for haloes located near box edges\n\tcommon.fixWraparound(opt, coords_fof, z)\n\n\treturn pids_fof, coords_fof, vels_fof", "def split_data(self, verbose=False):\n # group sample by patient and body part\n tmp = self.data_info.groupby(['patientID', 'body_part']).max()\n # get the index (i.e. patient and bodypart) where none of the body part XR of a given patient are abnormal\n idx_list_normal = tmp[tmp.body_part_abnormal == 0].index.to_list()\n # get the index (i.e. patient and bodypart) where at least one but not all of the body part XR of a given patient are abnormal\n idx_list_mixt = tmp[tmp.body_part_abnormal == 0.5].index.to_list()\n # get the index (i.e. patient and bodypart) where all one of the body part XR of a given patient are abnormal\n idx_list_abnormal = tmp[tmp.body_part_abnormal == 1].index.to_list()\n total = len(idx_list_normal)+len(idx_list_mixt)+len(idx_list_abnormal)\n train_size = self.train_frac*total\n assert train_size < len(idx_list_normal), f'There are not enough normal sample for the given train_frac : {self.train_frac}. \\\n There are {len(idx_list_normal)} normal sample over {total} total samples.'\n valid_size = (1-self.train_frac)*0.5*total\n test_size = (1-self.train_frac)*0.5*total\n # randomly pick (1-ratio_known_abnormal)*train_frac*total from the normal index for the train set\n train_idx_normal, remain = train_test_split(idx_list_normal, \\\n train_size=int((1-self.ratio_known_abnormal)*train_size),\\\n random_state=self.random_state)\n # split the rest equally in the validation and test set\n valid_idx_normal, test_idx_normal = train_test_split(remain, test_size=0.5, random_state=self.random_state)\n # add ratio_known_abnormal*train_frac*total from the abnormal index\n if self.ratio_known_abnormal == 0.0:\n train_idx_abnormal, remain = [], idx_list_abnormal\n else:\n train_idx_abnormal, remain = train_test_split(idx_list_abnormal, \\\n train_size=int(self.ratio_known_abnormal*train_size),\\\n random_state=self.random_state)\n # split the rest equally in the validation and test set\n valid_idx_abnormal, test_idx_abnormal = train_test_split(remain, test_size=0.5, random_state=self.random_state)\n # split the mixt between test and validation and consider them as abnormal patients bodypart\n valid_idx_mixt, test_idx_mixt = train_test_split(idx_list_mixt, test_size=0.5, random_state=self.random_state)\n valid_idx_abnormal += valid_idx_mixt\n test_idx_abnormal += test_idx_mixt\n # get the known and unknown index for each sets\n # get a fraction of normal known\n if self.ratio_known_normal == 0.0:\n train_idx_known, train_idx_unknown = [], train_idx_normal\n valid_idx_known, valid_idx_unknown = [], valid_idx_normal\n test_idx_known, test_idx_unknown = [], test_idx_normal\n else:\n train_idx_known, train_idx_unknown = train_test_split(train_idx_normal, \\\n train_size=int(self.ratio_known_normal*train_size),\\\n random_state=self.random_state)\n valid_idx_known, valid_idx_unknown = train_test_split(valid_idx_normal, \\\n train_size=int(self.ratio_known_normal*valid_size),\\\n random_state=self.random_state)\n test_idx_known, test_idx_unknown = train_test_split(test_idx_normal, \\\n train_size=int(self.ratio_known_normal*test_size), \\\n random_state=self.random_state)\n # get the abnormal known\n # all abnormal in train are known\n train_idx_known += train_idx_abnormal\n if self.ratio_known_abnormal == 0.0:\n valid_idx_unknown += valid_idx_abnormal\n test_idx_unknown += test_idx_abnormal\n else:\n valid_idx_known_abnormal, valid_idx_unknown_abnormal = train_test_split(valid_idx_abnormal, \\\n train_size=int(self.ratio_known_abnormal*valid_size), \\\n random_state=self.random_state)\n valid_idx_known += valid_idx_known_abnormal\n valid_idx_unknown += valid_idx_unknown_abnormal\n test_idx_known_abnormal, test_idx_unknown_abnormal = train_test_split(test_idx_abnormal, \\\n train_size=int(self.ratio_known_abnormal*test_size),\\\n random_state=self.random_state)\n test_idx_known += test_idx_known_abnormal\n test_idx_unknown += test_idx_unknown_abnormal\n\n # get the subsample dataframe with semi-label\n train_df = self.generate_semisupervized_label(train_idx_known, train_idx_unknown)\n valid_df = self.generate_semisupervized_label(valid_idx_known, valid_idx_unknown)\n test_df = self.generate_semisupervized_label(test_idx_known, test_idx_unknown)\n # shuffle the dataframes\n self.subsets['train'] = train_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n self.subsets['valid'] = valid_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n self.subsets['test'] = test_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n # Print summary\n if verbose:\n self.print_stat()", "def split(self, fractions=[0.8, 0.2]):\n\n if sum(fractions) > 1.0 or sum(fractions) <= 0:\n raise ValueError(\"the sum of fractions argument should be between 0 and 1\")\n\n # random indices\n idx = np.arange(self.n_samples)\n np.random.shuffle(idx)\n\n # insert zero\n fractions.insert(0, 0)\n\n # gte limits of the subsets\n limits = (np.cumsum(fractions) * self.n_samples).astype(np.int32)\n\n subsets = []\n # create output dataset\n for i in range(len(fractions) - 1):\n subsets.append(\n Dataset(self.inputs[idx[limits[i]:limits[i + 1]]], self.targets[idx[limits[i]:limits[i + 1]]]))\n\n return subsets", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def subdivide_mesh(obj, n_subdiv=2):\n thisfunc = thisfile + '->subdivide_mesh()'\n\n scene = bpy.context.scene\n\n # All objects need to be in 'OBJECT' mode to apply modifiers -- maybe a Blender bug?\n for o in bpy.data.objects:\n scene.objects.active = o\n bpy.ops.object.mode_set(mode='OBJECT')\n o.select = False\n obj.select = True\n scene.objects.active = obj\n\n bpy.ops.object.modifier_add(type='SUBSURF')\n obj.modifiers['Subsurf'].subdivision_type = 'CATMULL_CLARK'\n obj.modifiers['Subsurf'].levels = n_subdiv\n obj.modifiers['Subsurf'].render_levels = n_subdiv\n\n # Apply modifier\n bpy.ops.object.modifier_apply(modifier='Subsurf', apply_as='DATA')\n\n # Scene update necessary, as matrix_world is updated lazily\n scene.update()\n\n logging.info(\"%s: Subdivided mesh of '%s'\", thisfunc, obj.name)", "def split_data_by_refl(self, test_fraction=0.5):\n if BaseModel.is_laue(self.inputs):\n harmonic_id = BaseModel.get_harmonic_id(self.inputs)\n test_idx = (np.random.random(harmonic_id.max()+1) <= test_fraction)[harmonic_id]\n train, test = self.split_laue_data_by_mask(test_idx)\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test\n\n test_idx = np.random.random(len(self.inputs[0])) <= test_fraction\n train, test = self.split_mono_data_by_mask(test_idx)\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test", "def split_data(df, split_method='fo', test_size=.2, random_state=42):\n if split_method == 'fo':\n train_set, test_set = _split_fo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'tfo':\n train_set, test_set = _split_tfo(df, test_size=test_size)\n elif split_method == 'ufo':\n train_set, test_set = _split_ufo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'utfo':\n train_set, test_set = _split_utfo(df, test_size=test_size)\n else:\n raise HuitreError('Invalid data_split value, expect: ufo, utfo')\n train_set = train_set.reset_index(drop=True)\n test_set = test_set.reset_index(drop=True)\n return train_set, test_set", "def group_boundary_elements(self,force=False):\n if force or self._bc_groups is None:\n # This part is the same as in waq_scenario\n g=self.grid()\n if g is None:\n return super(SunHydro,self).group_boundary_elements()\n\n self.infer_2d_elements()\n\n poi=self.pointers\n bc_sel = (poi[:,0]<0)\n bc_elts = np.unique(self.seg_to_2d_element[ poi[bc_sel,1]-1 ])\n\n groups=np.zeros(self.n_2d_elements,self.group_dtype)\n groups['id']-=1\n\n gforce=forcing.GlobalForcing(sun=self.sun)\n sun_g=self.sun.grid()\n\n def node_sun_to_g(n):\n return g.select_nodes_nearest(sun_g.points[n])\n\n # map group id as returned by this method to a dict with items \n # like which shapefile did it come from, index in that shapefile,\n # and fields from the feature.\n # note that it is possible for two boundary flows to enter the same\n # cell - only the first will be marked, with the second feature\n # skipped in both groups and bc_group_mapping\n # self.bc_group_mapping={} \n ngroups=0\n\n for flow_shp in self.flow_shps:\n flows=wkb2shp.shp2geom(flow_shp)\n sun_groups=gforce.add_groups_bulk(defs=flows)\n\n for feat_id in range(len(flows)):\n grp=sun_groups[feat_id]\n if grp.cell_based():\n sun_cells=grp.cells\n cells=[]\n for cell in sun_cells:\n g_nodes=[node_sun_to_g(n)\n for n in sun_g.cells[cell]]\n cells.append( g.nodes_to_cell(g_nodes) )\n\n cells=np.array(cells)\n else:\n # for the purposes of bc_groups, figure out the\n # respective cells\n cells=[]\n for sun_e in grp.edges:\n sun_e_nodes=sun_g.edges[sun_e,:2]\n e=g.nodes_to_edge(node_sun_to_g(sun_e_nodes[0]),\n node_sun_to_g(sun_e_nodes[1]))\n assert e is not None\n cells.append(g.edge_to_cells(e))\n cells=np.array(cells)\n cells=cells[cells>=0]\n\n details=dict(flow_shp=flow_shp,\n feat_id=feat_id)\n for n in flows.dtype.names:\n details[n]=flows[n][feat_id]\n\n # limit this to cells which are not already marked, but *are*\n # in bc_elts\n cells=[c for c in cells\n if (groups['id'][c]<0) and (c in bc_elts) ] \n if len(cells):\n groups['id'][cells] = ngroups\n groups['name'][cells]=details.get('name','group %d'%ngroups)\n groups['attrs'][cells] = details\n # self.bc_group_mapping[ngroups]=details\n ngroups+=1\n else:\n self.log.warning(\"Feature %d from %s (name=%s) overlaps another flow or wasn't\" \n \" found as a boundary, \"\n \" and will be skipped\"%(feat_id,flow_shp,\n details.get('name','n/a')))\n\n # anything not marked already then gets grouped by adjacency and marked\n # the same way as before - see waq_scenario.py for more comments\n def adjacent_cells(g,c,candidates):\n a=list(g.cell_to_adjacent_boundary_cells(c))\n b=list(g.cell_to_cells(c))\n nbrs=filter(lambda cc: cc in candidates,a+b)\n return np.unique(nbrs)\n def trav(c,mark):\n groups['id'][c]=mark\n groups['name'][c]=\"group %d\"%mark\n for nbr in adjacent_cells(g,c,bc_elts):\n if groups['id'][nbr]<0:\n trav(nbr,mark)\n\n ngroups=1+groups['id'].max()\n\n for bc_elt in bc_elts:\n if groups['id'][bc_elt]<0:\n # This is the part where if there are other cells \n # which are part of the same forcing group, they should\n # all get this value\n trav(bc_elt,ngroups)\n ngroups+=1\n self._bc_groups=groups\n return self._bc_groups", "def temp_split(filename):\n filename, ext = filename.rsplit('.')\n data = np.load(filename + \".\" + ext)\n # define basic constants from parent\n A = data['a']\n A_SIZE = A.shape[0]\n A_SHAPE = A.shape\n ORIGINAL_SIZE = data['original_size']\n B = data['b']\n # basics\n ki, kj, m = np.sum(A, 1), np.sum(A, 0), np.sum(np.sum(A, 1))\n # eval & evec\n eval, evec = linalg.eigh(B)\n # split\n g1_order, g1_arrays, g2_order, g2_arrays = create_g(A, evec)\n g1, g2 = create_g_matrix(g1_order, g1_arrays), create_g_matrix(g2_order, g2_arrays)\n # threshold (q)\n q1 = create_q(A_SIZE, B, g1_order, m)\n q2 = create_q(A_SIZE, B, g2_order, m)\n # B of G\n b1 = create_b_of_g(B, g1_order)\n b2 = create_b_of_g(B, g2_order)\n # a_elems\n a1_elems = []\n a2_elems = []\n original_elems = data['a_elems']\n for i in g1_order:\n a1_elems.append(original_elems[i])\n for i in g2_order:\n a2_elems.append(original_elems[i])\n return Part(filename + ',1', ext, q1, g1.shape[0], ','.join([str(x) for x in a1_elems])), \\\n Part(filename + ',2', ext, q2, g2.shape[0], ','.join([str(x) for x in a2_elems]))", "def split_groups(self, num_of_groups=None, verbose=False):\n if num_of_groups is None:\n num_of_groups = self.number_of_groups\n self.training_file.split_groups(num_of_groups=num_of_groups,\n verbose=verbose)", "def split(self, splits, catchall=False):\r\n raise NotImplementedError()", "def SplitCurves(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_FaceDivide_SplitCurves(self, *args)", "def split():\n flag = 0\n for chromosome in region:\n for inf in region[chromosome]:\n if flag == 0:\n if chromosome not in test_set:\n test_set[chromosome] = [inf]\n else:\n test_set[chromosome].append(inf)\n else:\n if chromosome not in train_set:\n train_set[chromosome] = [inf]\n else:\n train_set[chromosome].append(inf)\n\n flag += 1\n flag %= 10", "def split_for_cv(all_data, split_ratio, split_axis=default.DEFAULT_CV_AXIS, seed=default.DEFAULT_CV_RANDOM_SEED):\n\n check.argument_numeric(split_ratio, 0, 1)\n check.argument_enum(split_axis, [0, 1], allow_none=True)\n\n # Split the priors into gold standard based on axis (flatten if axis=None)\n if split_axis is None:\n priors_data, gold_standard = _split_flattened(all_data, split_ratio, seed=seed)\n else:\n priors_data, gold_standard = _split_axis(all_data, split_ratio, axis=split_axis, seed=seed)\n\n return priors_data, gold_standard", "def spectral_clustering_division(E, geoms, split_type=\"threshold\", normalize_geoms=True):\n global INTERNAL_PARAMETERS\n n_pts, n_vec = E.shape\n _n, _d = geoms.shape\n assert _n == n_pts and _d == 3, \"Invalid geoms (%s)\" % (str(geoms.shape))\n # limit on tube sizes\n mts = int(INTERNAL_PARAMETERS['min_tube_size'])\n Mts = int(INTERNAL_PARAMETERS['max_tube_size'])\n # lower limit on the number of clusters\n min_n_clusters = int(INTERNAL_PARAMETERS['min_k'])\n # max allowed node depth\n max_depth = int(INTERNAL_PARAMETERS['max_depth'])\n # min eigenvector amplitude for split\n min_evect_amplitude = float(INTERNAL_PARAMETERS['min_evect_amplitude'])\n # number of thresholds to try when using thresholding splits\n n_threshs = int(INTERNAL_PARAMETERS['n_threshs'])\n\n # check degenerate case: just issue a warning and lower mts\n if n_pts <= 2 * min_n_clusters * mts:\n n_mts = int(max(1, n_pts / (2.0 * min_n_clusters)))\n sys.stderr.write(\"WARNING: small video\" +\n \"({} <= {}) \".format(n_pts, 2 * min_n_clusters * mts) +\n \": changing min_leaf_size to {}.\\n\".format(n_mts))\n mts = n_mts\n\n # get the normalized spatio-temporal positions\n if normalize_geoms:\n nrlz = np.array([640., 480., 1e2])\n ngeoms = geoms.astype(np.float) / nrlz[np.newaxis, :]\n ngeoms -= ngeoms.mean(axis=0)[np.newaxis, :]\n else:\n ngeoms = geoms - geoms.mean(axis=0)[np.newaxis, :]\n\n # initialize the tree structure\n stree = SpectralTree(\n E, ngeoms, mts, Mts, min_n_clusters, max_depth, min_evect_amplitude,\n split_type, n_threshs)\n # recursively split the leaves in depth-first left-to-right order\n stree.build()\n\n return stree.labels, stree.int_paths", "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "def sbc_groups():\n cam = \"sbc\"\n for light, lens, window in [(True, True, True),\n (True, True, False),\n (True, False, False),\n (False, False, False)]: \n filenames = flatfiles(cam)\n filenames = get_light_sbc(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_window_sbc(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, window))", "def split(features, groundtruths, n_split):\n\n if n_split == 1:\n return features, groundtruths\n\n tags = list(set(groundtruths))\n new_index = {}\n for tag in tags:\n new_index[tag] = []\n for index, gt in enumerate(groundtruths):\n new_index[gt].append(index)\n new_feats = []\n new_gts = []\n for i in range(0, n_split):\n indexes = []\n for tag in tags:\n ref = len(new_index[tag])/n_split\n indexes.append(new_index[tag][ref*i:ref*(i+1)])\n \"\"\"\n ..todo:: manage multiple tags!\n \"\"\"\n indexes = indexes[0] + indexes[1]\n # print(features[:5])\n # print(len(indexes))\n # print(len(indexes[0]))\n # print(len(indexes[1]))\n # sys.exit()\n indexes.sort()\n new_gts.append([groundtruths[j] for j in indexes])\n new_feats.append([features[j] for j in indexes])\n return new_feats, new_gts", "def go(self):\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n for isplit,fof_split in enumerate(fof_splits):\n logger.info('%s %s' % (isplit,fof_split))\n self._write_split(isplit, fof_split)", "def split(self,i):\n alpha = 0.6\n eps = 2.6\n\n if self.n > self.maxn-3:\n print \"cannot refine any further\"\n return False\n \n # The son \n self.m[i] = self.m[i] / 4.0\n #self.h[i] = self.h[i] * alpha\n\n # Daughter 1\n self.r[self.n] = self.r[i] + eps*np.array([0,1])\n self.m[self.n] = self.m[i] \n self.v[self.n] = self.v[i]\n \n # Daughter 2\n self.r[self.n+1] = self.r[i] + eps*np.array([0.866025,-0.5])\n self.m[self.n+1] = self.m[i] \n self.v[self.n+1] = self.v[i]\n \n # Daughter 3\n self.r[self.n+2] = self.r[i] + eps*np.array([-0.866025,-0.5])\n self.m[self.n+2] = self.m[i] \n self.v[self.n+2] = self.v[i]\n \n self.n = self.n+3\n #print \"There are now \",self.n,\"particles\"\n return True", "def make_door_split(bm, face, size, off, **kwargs):\n return split(bm, face, size.y, size.x, off.x, off.y, off.z)", "def split(self):\n sub_images = []\n\n for region in regionprops(self.cells):\n minr, minc, maxr, maxc = region.bbox\n sub_image = self.image_raw[max(0, minr - 10):maxr, max(0, minc - 10):maxc, :]\n\n sub_images.append(FQimage(data=sub_image))\n\n return sub_images", "def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))", "def split(self, train_mask, test_mask):\n train = self.dataset.loc[train_mask]\n test = self.dataset.loc[test_mask]\n return PandasTrainTestSplit.from_dfs(train, test, self.fguide)", "def split(self, train_fraction=0.8, val_fraction=0.2, test_fraction=0, seed=1):\n if self.is_initialized():\n return\n self.ensure_fraction_sum(train_fraction, val_fraction, test_fraction)\n np.random.seed(seed)\n self.samples = sorted(self.samples)\n np.random.shuffle(self.samples)\n train_idx = ceil(train_fraction*(len(self.samples)))\n val_idx = train_idx + ceil(val_fraction*(len(self.samples)))\n test_idx = val_idx + ceil(test_fraction*(len(self.samples)))\n indices = list(range(len(self.samples)))\n self.indices[TRAIN_SUBSET] = indices[:train_idx]\n self.indices[VAL_SUBSET] = indices[train_idx:val_idx]\n self.indices[TEST_SUBSET] = indices[val_idx:test_idx]", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def _split(self, split, randomise=False, **kwargs):\r\n # Copy split to prevent modifying outside arguments\r\n split = split.copy()\r\n # Compute total\r\n total = sum(split.values())\r\n # If split contains floats, convert to integers\r\n if isinstance(total, float):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total*100}%. ' \\\r\n + 'Split should not exceed 100%.'\r\n assert total <= 1, assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < 1:\r\n split['rest'] = 1 - total\r\n split = self._float_split_to_int(split)\r\n total = sum(split.values())\r\n # Create subsets based off integer values\r\n if isinstance(total, int):\r\n assert_msg = 'Not enough data! ' \\\r\n + f'Split requires a total of {total} data entries ' \\\r\n + f'but only {len(self.data)} are available.'\r\n assert total <= len(self.data), assert_msg\r\n # Add 'rest' subset if not all data is used in split\r\n if total < len(self.data):\r\n split['rest'] = len(self.data) - total\r\n # Create subsets\r\n index = 0\r\n for name, length in split.items():\r\n subset_name = f'{self.name}.{name}'\r\n subset_data = self.data[index:index + length]\r\n subset_seed = self.seed\r\n if self.seed is not None:\r\n subset_seed += sum([ord(c) for c in name]) + length\r\n subset = self._make_subset(subset_name,\r\n subset_data,\r\n randomise=randomise,\r\n seed=subset_seed,\r\n **kwargs\r\n )\r\n setattr(self, name, subset)\r\n index += length\r\n # Replace data with references to subsets\r\n self.data = []\r\n for name in split.keys():\r\n self.data.append(getattr(self, name, None))\r\n # Indicate that this is a superset\r\n self.is_superset = True", "def _split_flattened(data, split_ratio, seed=default.DEFAULT_CV_RANDOM_SEED):\n\n check.argument_numeric(split_ratio, 0, 1)\n\n pc = np.sum(data.values != 0)\n gs_count = int(split_ratio * pc)\n idx = _make_shuffled_index(pc, seed=seed)\n\n pr_idx = data.values[data.values != 0].copy()\n gs_idx = data.values[data.values != 0].copy()\n\n pr_idx[idx[0:gs_count]] = 0\n gs_idx[idx[gs_count:]] = 0\n\n gs = data.values.copy()\n pr = data.values.copy()\n\n gs[gs != 0] = gs_idx\n pr[pr != 0] = pr_idx\n\n priors_data = pd.DataFrame(pr, index=data.index, columns=data.columns)\n gold_standard = pd.DataFrame(gs, index=data.index, columns=data.columns)\n\n return priors_data, gold_standard" ]
[ "0.77059865", "0.62634546", "0.5937793", "0.5880467", "0.58631194", "0.5844169", "0.5616146", "0.5584471", "0.55817026", "0.5554655", "0.5537077", "0.5516713", "0.5505204", "0.5493369", "0.5489576", "0.5452154", "0.5451404", "0.54258513", "0.5421155", "0.54158276", "0.5400885", "0.53891104", "0.5385367", "0.5384305", "0.5377447", "0.5376395", "0.5362492", "0.5354603", "0.533989", "0.53373075" ]
0.7033883
1
Extracts a single face image from a video sequence.
def extract_face(seq): img, locations = extract_image(seq) if img is None: # No frame with a face was found. return None else: # We found a frame with a face. # If there are multiple faces, choose the largest. loc = get_largest_face(locations) cropped = crop_face(img, loc, ZOOMOUT_FACTOR) return cropped
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_frame(face_cc, video_capture, voice):\n if video_capture.isOpened():\n _, frame = video_capture.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n try:\n faces = face_cc.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n ## TODO research where this is now? flags=cv2.cv.CV_HAAR_SCALE_IMAGE\n )\n\n if debug:\n # Draw a rectangle around faces\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n except:\n raise\n\n return frame, faces", "def request():\n return face_client.face.detect_with_stream(image=open(\"frame.png\", 'rb'),\n return_face_attributes=[emotion_attribute],\n recognition_model='recognition_02')", "def get_frame(self):\n\n # Grabs, decodes, and returns the next video frame\n ret, frame = self.video.read()\n\n # Convert image into grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Find all the faces in the current frame of video\n face_locations = face_recognition.face_locations(gray)\n\n # For each face detected, put bounding box around face detected\n # and display smile prediction\n for face_location in face_locations:\n count = 0\n\n # Pixel location for face bounding box\n top, right, bottom, left = face_location\n box = (left, top, right, bottom)\n\n # Call predict_smile() method to predict probabilities for smile\n # vs no smile\n predictions = predict_smile(gray, box, count)\n\n # Draw a box around the face on current frame\n cv2.rectangle(frame, (left, top), (right, bottom),\n (255, 255, 255), 2)\n\n # Round predictions and display on current frame\n smile_prob = str(round((predictions[0][1])*100, 1))\n cv2.putText(frame, f'{smile_prob}% smiling', (left, bottom+35),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n\n count += 1\n\n # Encodes and return an image into a memory buffer\n ret, jpeg = cv2.imencode('.jpg', frame)\n return jpeg.tobytes()", "def video_handle_for_demo():\n frame = cv2.imread(\"vision.png\")\n\n return frame", "def track_face(video):\n\n # Get the cascade classifier with pre-trained classifiers\n face_cascade = cv2.CascadeClassifier('frontal_face.xml')\n\n # initialize a previous face with zeros\n prev_face = (0, 0, 0, 0)\n result = []\n\n # Loop until frame from video capture returns None\n while True:\n _, img = video.read()\n if img is None:\n break\n\n # convert to gray scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Do the face detection. Set minNeighbors relatively high\n # to eliminate false positive faces.\n faces = face_cascade.detectMultiScale(gray, 1.3, 5,\n cv2.cv.CV_HAAR_SCALE_IMAGE)\n\n # If the number of faces found is 0, use face from previous frame\n # as a good indicator of where face in the current frame should be.\n # Has inherent error but works well.\n if len(faces) == 0:\n faces = [prev_face]\n\n # Sort faces by width of bounding box in reverse order\n sort_face = sorted(faces, key=lambda x: x[2], reverse=True)\n\n # Get bounding box of largest face, assumes smaller ones are\n # false positives.\n x, y, w, h = sort_face[0]\n\n # append to results\n result.append((x, y, x + w, y + h))\n # set previous face\n prev_face = faces[0]\n\n if VISUALIZE:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_gray = gray[y: y + h, x: x + w]\n roi_color = img[y: y + h, x: x + w]\n cv2.imshow('img', img)\n cv2.waitKey(30)\n\n return result", "def detect(self, frame): \n return self.__detect_faces(frame)", "def get_pose():\n files = {'file': ('image.jpg', open(\n 'assets/image.jpg', 'rb'), 'images/jpeg')}\n result = requests.post(URL, files=files).json()\n img = cv2.imread('assets/image.jpg')[:, :, ::-1]\n return result, img", "def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]", "def video_by_frame(video):\n cap = cv2.VideoCapture(video)\n\n while True:\n ret, im = cap.read()\n yield im", "def convertDetectFrame(self):\n \n self.processDetectFrame()\n try:\n img = QImage(self.currentDetectFrame,\n self.currentDetectFrame.shape[1],\n self.currentDetectFrame.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None", "def extract_faces(frame_path, out_path, face_path, processes=1):\n if os.path.exists(out_path):\n msg = '[extract_faces] Skipping extraction since faces already exist at {}'\n print(msg.format(out_path))\n return\n\n from faceoff.faceswap_api import FaceSwapInterface\n\n os.makedirs(out_path)\n print('[extract_faces] Starting on {}'.format(frame_path))\n start_time = time.time()\n\n api = FaceSwapInterface()\n api.extract(frame_path, out_path, face_path, processes)", "def face_detection(frame):\n if frame is None :\n return 0,0,0,0\n \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n # Draw a rectangle around the faces\n position_x, position_y ,width,height = 0, 0, 0, 0\n for x, y, w, h in faces:\n position_x, position_y ,width,height = x, y, w, h\n\n return position_x, position_y,width,height", "def runFaceRecognition(useHOG=False):\n #Open a handler for the camera\n video_capture = cv2.VideoCapture(CAMERA_DEVICE_ID)\n\n #Setup database\n database = setupDatabase()\n\n skipFrame = 0\n\n while video_capture.isOpened():\n #Skip every 2 frames to increase frame rate\n if (skipFrame < 2):\n skipFrame += 1\n continue\n else:\n skipFrame = 0\n\n #Read frame from camera and check that it went ok\n ok, frame = video_capture.read()\n if not ok:\n print(\"\\n[!] Error reading frame from camera. \", end=\"\")\n print(\"Video capture stopped.\\n\")\n break\n\n #Run facial detection and recognition on image\n detectAndRecognizeFacesInImage(frame,\n database, useHOG)\n\n #Display the resulting image\n cv2.imshow('Video', frame)\n\n #Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n #Release handle to the webcam\n video_capture.release()\n cv2.destroyAllWindows()", "def get_image(self, frame):\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]", "def get_frame(self):\r\n\r\n # Reading the Video and grasping the Frames\r\n _, frame = self.video.read()\r\n\r\n # Converting the Color image to Gray Scale\r\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n # Image size is reduced by 30% at each image scale.\r\n scaleFactor = 1.3\r\n\r\n # 5 neighbors should be present for each rectangle to be retained.\r\n minNeighbors = 5\r\n\r\n # Detect the Faces in the given Image and store it in faces.\r\n faces = facec.detectMultiScale(gray_frame, scaleFactor, minNeighbors)\r\n\r\n # Iterating through all the faces detected\r\n for (x, y, w, h) in faces:\r\n\r\n # Taking the Face part in the Image as Region of Interest.\r\n roi = gray_frame[y:y+h, x:x+w]\r\n\r\n # Let us resize the Image accordingly to use pretrained model.\r\n roi = cv2.resize(roi, (48, 48))\r\n\r\n # Let us make the Prediction of Emotion present in the Image.\r\n prediction = model.predict_emotion(\r\n roi[np.newaxis, :, :, np.newaxis])\r\n\r\n # Custom Symbols to print with text of emotion.\r\n Symbols = {\"Happy\": \":)\", \"Sad\": \":}\", \"Surprise\": \"!!\",\r\n \"Angry\": \"?\", \"Disgust\": \"#\", \"Neutral\": \".\", \"Fear\": \"~\"}\r\n\r\n # Defining the Parameters for putting Text on Image\r\n Text = str(prediction) + Symbols[str(prediction)]\r\n Text_Color = (180, 105, 255)\r\n\r\n Thickness = 2\r\n Font_Scale = 1\r\n Font_Type = cv2.FONT_HERSHEY_SIMPLEX\r\n\r\n # Inserting the Text on Image\r\n cv2.putText(frame, Text, (x, y), Font_Type,\r\n Font_Scale, Text_Color, Thickness)\r\n\r\n # Finding the Coordinates and Radius of Circle\r\n xc = int((x + x+w)/2)\r\n yc = int((y + y+h)/2)\r\n radius = int(w/2)\r\n\r\n # Drawing the Circle on the Image\r\n cv2.circle(frame, (xc, yc), radius, (0, 255, 0), Thickness)\r\n\r\n # Encoding the Image into a memory buffer\r\n _, jpeg = cv2.imencode('.jpg', frame)\r\n\r\n # Returning the image as a bytes object\r\n return jpeg.tobytes()", "def detect_face(self, img):\n #convert the test image to gray image as opencv face detector expects gray images\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #let's detect multiscale (some images may be closer to camera than others) images\n #result is a list of faces\n faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);\n\n #if no faces are detected then return None\n if (len(faces) == 0):\n return None, None\n\n #under the assumption that there will be only one face,\n #extract the face area\n (x, y, w, h) = faces[0]\n\n #return only the face part of the image\n return gray[y:y+w, x:x+h], faces[0]", "def get_data(self):\n global CAM\n count = 0\n while CAM.isOpened():\n count += 1\n print('COUNT' + str(count))\n _, frame = CAM.read()\n\n # cropped face\n cropped_face, bbox_coordinate, anchor_coordinate = detect_faces(frame)\n if cropped_face is None:\n print(\"NONE FACE DETECTED\")\n sleep(1)\n continue\n\n # get fake face\n fake_face, profile_feature_vector = generate_frontal_face(cropped_face)\n\n cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_BGR2RGB)\n fake_face = cv2.cvtColor(fake_face, cv2.COLOR_BGR2RGB)\n\n # face matching\n face_matcher = FaceMatcher()\n matched_face, matched_name, matched_front_fake_face, matched_diff = \\\n face_matcher.match(cropped_face, fake_face, profile_feature_vector)\n\n matched_face = cv2.cvtColor(matched_face, cv2.COLOR_BGR2RGB)\n matched_front_fake_face = cv2.cvtColor(matched_front_fake_face, cv2.COLOR_BGR2RGB)\n\n _, cropped_face_jpeg = cv2.imencode('.jpg', cropped_face)\n _, fake_face_jpeg = cv2.imencode('.jpg', fake_face)\n _, matched_face_jpeg = cv2.imencode('.jpg', matched_face)\n _, matched_front_fake_face_jpeg = cv2.imencode('.jpg', matched_front_fake_face)\n\n encoded_cropped_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(cropped_face_jpeg.tobytes()).decode())\n encoded_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(fake_face_jpeg.tobytes()).decode())\n\n encoded_matched_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_face_jpeg.tobytes()).decode())\n encoded_matched_front_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_front_fake_face_jpeg.tobytes()).decode())\n\n # get detection model return here and send to face frontalization model\n SIO.emit('detection', {'cropped_face': encoded_cropped_face,\n 'fake_face': encoded_fake_face,\n 'matched_face': encoded_matched_face,\n 'matched_name': matched_name,\n 'matched_front_fake_face': encoded_matched_front_fake_face,\n 'id': uuid.uuid4().hex},\n namespace='/detections')\n sleep(self.delay)", "def detect_face(face_file, max_results=4):\n client = vision.ImageAnnotatorClient()\n\n content = face_file.read()\n image = types.Image(content=content)\n\n return client.face_detection(image=image).face_annotations", "def detector(videoframe, facedetection, maskdetection):\n (h, w) = videoframe.shape[:2]\n blobimage = cv2.dnn.blobFromImage(videoframe, 1.0, (224, 224), (104.0, 177.0, 123.0))\n\n facedetection.setInput(blobimage)\n ffinding = facedetection.forward()\n\n face_list = []\n locations = []\n predictions = []\n\n for i in range(0, ffinding.shape[2]):\n credence = ffinding[0, 0, i, 2]\n if credence > 0.6:\n case = ffinding[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x_start, y_start, x_end, y_end) = case.astype(\"int\")\n (x_start, y_start) = (max(0, x_start), max(0, y_start))\n (x_end, y_end) = (min(w - 1, x_end), min(h - 1, y_end))\n\n image = videoframe[y_start:y_end, x_start:x_end]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224, 224))\n image = img_to_array(image)\n image = preprocess_input(image)\n face_list.append(image)\n locations.append((x_start, y_start, x_end, y_end))\n\n if len(face_list) > 0:\n face_list = np.array(face_list, dtype=\"float32\")\n predictions = maskdetection.predict(face_list, batch_size=32)\n return (locations, predictions)", "def captureNextFrame(self):\r\n mainls = []\r\n\r\n\r\n ret, readFrame = self.capture.read()\r\n\r\n if (ret == True):\r\n self.currentFrame = cv2.cvtColor(readFrame, cv2.COLOR_BGR2RGB)\r\n self.faceDetection(self.currentFrame)\r\n self.currentFrame = self.bbFrame", "def video_faces(self, video_path, coord_path):\n\n\t\tcap = cv2.VideoCapture(video_path)\n\t\t\n\t\t# Check if camera opened successfully\n\t\tif not cap.isOpened():\n\t\t\tprint(\"Error opening video stream or file\")\n\t\t\treturn None\n\t\t\n\t\tvideo_fps = cap.get(cv2.CAP_PROP_FPS)\n\t\tread_fps = 30\n\t\tdiv = video_fps / read_fps\n\t\t\n\t\tvideo_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\t\t\n\t\tcoords_present = os.path.exists(coord_path)\n\t\t\t\n\t\tif coords_present:\n\t\t\tall_coords = load_from_file(coord_path)\n\t\telse:\n\t\t\tall_coords = np.empty(shape=(video_frames, 4), dtype=np.int64)\n\n\t\tfaces = []\n\t\t\n\t\ti = -1\n\t\twhile cap.isOpened():\n\t\t\tret, frame = cap.read()\n\t\t\tif ret:\n\t\t\t\ti += 1\n\t\t\t\tif not i % div:\n\t\t\t\t\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\t\t\t\t\tif not coords_present:\n\t\t\t\t\t\tall_coords[i] = self.get_face_coords(frame)\n\t\t\t\t\tc = all_coords[i]\n\t\t\t\t\tif (c == -1).all():\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tface = frame[c[0]:c[1], c[2]:c[3]]\n\t\t\t\t\t\tface = cv2.resize(face, (self.input_size[0], self.input_size[1])).astype('float32')\n\t\t\t\t\t\tfaces.append(face)\n\t\t\telse:\n\t\t\t\tbreak\n\t\tcap.release()\n\t\t\n\t\tfaces = np.array(faces)\n\t\t\t\n\t\treturn faces, all_coords", "def extract_faces(image_path: str, pk: int):\n image = Image.open(image_path)\n image = np.array(image)\n\n if image.shape[0] <= 0 or image.shape[1] <= 0:\n return None\n\n import mtcnn\n\n # detect faces from image\n face_detector = mtcnn.MTCNN()\n detections = face_detector.detect_faces(image)\n\n if len(detections) < 1:\n return None\n\n from deepface.basemodels.Facenet import InceptionResNetV2\n\n # load InceptionResNet model provided by deepface\n facenet_model = InceptionResNetV2()\n facenet_model.load_weights(get_weights(\"facenet\"))\n\n # normalize faces and get embeddings\n faces = [normalize_face(image, face) for face in detections]\n embeddings = facenet_model.predict(np.vstack(faces), batch_size=len(faces))\n\n for i in range(len(faces)):\n person_id = recognize_person(embeddings[i])\n print(person_id, flush=True)\n face_obj = models.Face.objects.create(\n confidence=detections[i]['confidence'],\n left=detections[i]['box'][0],\n top=detections[i]['box'][1],\n width=detections[i]['box'][2],\n height=detections[i]['box'][3],\n photo_id=pk,\n person_id=person_id\n )\n\n save_embeddings(embeddings[i], face_obj.id, person_id)", "def start_video_feed_parsing(img_processor='hog'):\n\n video_capture = cv2.VideoCapture(0)\n\n while True:\n _, frame = video_capture.read()\n\n frame = IMG_PROCESSORS[img_processor].mark_up_faces(frame)\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything is done, release the capture\n video_capture.release()\n cv2.destroyAllWindows()", "def detection():\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=3,\n minSize=(30, 30)\n )\t#Haar-cascade: A Face detection algorithm\n\n area = faces[:,2] * faces[:,3]\n faces = np.c_[faces,area]\t#concatenates area values to last column of 'face' array.\n\n print('All detected faces\\n',faces)\n i,j = unravel_index(faces.argmax(), faces.shape)\t# gets the position of maximum value from 'face' array.\n print(i,j)\n print(\"Found %d Face%s!\" %(len(faces),\"s\"[len(faces)==1:]))\n\n X = faces[i,0]\n Y = faces[i,1]\n W = faces[i,2]\n H = faces[i,3]\n \n cv2.rectangle(image, (X, Y), (X + W, Y + H), (0, 255, 0), 2)\n roi_color = image[Y:Y + H, X:X + W] \n print(\"Face(largest) Extracted.\")\n cv2.imwrite('Extracted_face.jpg', roi_color)\t#Image Extraction.\n status = cv2.imwrite('Output.jpg', image)\n print(\"Image Output.jpg written to filesystem: \", status)", "def get_image():\n bgr = np.frombuffer(\n stream.read_frame().get_buffer_as_uint8(), dtype=np.uint8\n ).reshape(RESOLUTIONY, RESOLUTIONX, 3)\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb", "def take_picture(path):\n cascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascadePath)\n video_capture = cv2.VideoCapture(0)\n ret, frame = video_capture.read()\n video_capture.release()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #\n #cv2.imwrite(path,frame)\n #\n if detect_face(frame):\n cv2.imwrite(path,gray)\n return True\n return False", "def __call__(self, video_path, per_frames = 1 , offset = None):\n \n cap = cv2.VideoCapture(video_path)\n \n if not cap.isOpened():\n raise Exception(\"Video file does not exist or is invalid\")\n\n \n if offset:\n cap.set(cv2.CAP_PROP_POS_MSEC, offset)\n \n \n info = []\n\n while cap.isOpened():\n ret, frame = cap.read()\n if ret:\n if cap.get(cv2.CAP_PROP_POS_FRAMES) % per_frames == 0:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n faces_info = self.detect_faces_from_image(frame,\n desired_width=224, desired_height=224) \n if faces_info:\n for element in faces_info:\n face_img = image.img_to_array(element[1])\n\n face_img = utils.preprocess_input(face_img, version=1)\n face_img = np.expand_dims(face_img, axis=0)\n\n features = self.vgg_feature_extractor.predict(face_img)\n label = self.gender_svm.predict(features)[0]\n decision_value = round(self.gender_svm.decision_function(features)[0], 3)\n\n bounding_box = element[0][0]\n detection_score = round(element[5], 3)\n bbox_length = bounding_box.bottom() - bounding_box.top()\n\n info.append([\n cap.get(cv2.CAP_PROP_POS_FRAMES), bounding_box, (bbox_length, bbox_length), label,\n decision_value, detection_score\n ])\n\n else:\n break\n cap.release()\n info = pd.DataFrame.from_records(info, columns = ['frame', 'bb', 'size','label', 'decision', 'conf'])\n return info", "def get_movie_frame(movie_file, frame=0):\n movie = cv2.VideoCapture(movie_file)\n _, image = movie.read() \n height, width, _ = image.shape\n filename = os.path.splitext(movie_file)[0] + f'_{frame}.jpg'\n cv2.imwrite(filename, image)\n \n return filename, height, width", "def get_pose_from_file(f, d):\n filepath = 'assets/images/' + str(d) + '/' + str(f)\n\n with open(filepath, 'rb') as name:\n _, ext = str(f).split('.')\n frame = cv2.imread(filepath)[:, :, ::-1]\n files = {'file': (filepath, name, 'images/' + ext)}\n result = requests.post(URL, files=files).json()\n\n return result, frame", "def detect_face(face_file, max_results=10):\n client = vision.ImageAnnotatorClient()\n\n content = face_file.read()\n image = types.Image(content=content)\n\n return client.face_detection(image=image).face_annotations" ]
[ "0.648261", "0.6263333", "0.6202552", "0.6190077", "0.61684835", "0.60556257", "0.5997593", "0.59863806", "0.59741646", "0.5964527", "0.594866", "0.5927054", "0.5911717", "0.590194", "0.5895345", "0.58833265", "0.5880981", "0.5844954", "0.5831536", "0.5815507", "0.58091724", "0.5803979", "0.57947165", "0.5783663", "0.5783354", "0.57421184", "0.5735059", "0.5713849", "0.56900847", "0.5685426" ]
0.7714512
0