query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns halo (row of data) given a ``nodeIndex``
def get_halo(self, index): try: halo = self.data.loc[index] except KeyError: raise IndexError( "Halo id %d not found in %s" % (index, self.filename) ) return halo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_node(self, index):\r\n\t\tself._validate_index(index)\r\n\t\treturn self._traverse(lambda i, list: i < index)[\"node\"]", "def halo_host(self, index):\n halo = self.get_halo(index)\n return (\n halo\n if halo.name == halo[\"hostIndex\"]\n else self.halo_host(self.get_halo(halo[\"hostIndex\"]).name)\n )", "def halo_host(self, index):\n halo = self.get_halo(index)\n return (\n halo\n if halo.name == halo[\"hostIndex\"]\n else self.halo_host(self.get_halo(halo[\"hostIndex\"]).name)\n )", "def get(self, node_index):\n return self.nodes[node_index + self.low_idx]", "def __getitem__(self, index):\n node = self.head\n index += 1\n for level in reversed(range(self.max_levels)):\n while node.width[level] <= index:\n index -= node.width[level]\n node = node.next[level]\n return node.value", "def get(self, index):\n if self.head is None:\n raise Exception('Node vide')\n else:\n return self.leonardo_recurs(index, self.head)", "def right_child(self, index):\n return 2 * index + 1", "def __get_row(self, index: int) -> int:\n return index // self.columns", "def _get_node_at(self, index):\n assert isinstance(index, int) \n if index >= 0: \n steps = index \n else:\n steps = self.size() + index\n if steps < 0:\n return None \n node = self.head\n while steps > 0 and node is not None:\n node = node.next_node\n steps -= 1 \n return node", "def read_index(self, index):\n current = self.head\n if index == 0:\n return current.data\n elif index >= self.size() :\n return None\n else:\n position = 0\n while position < index:\n current = current.next_node\n position += 1\n return current.data", "def TestWay2(index):\n data_len = len(data)\n if index < -data_len or index > data_len - 1:\n return None\n return data[index]", "def nthnode(self, nl_p=None, index=0):\n # TODO: create a method called nthnodename\n if not nl_p:\n nl_p = self.getnetnodes()\n # (const nodelist_bn* nodes, int index)\n cnetica.NthNode_bn.argtypes = [c_void_p, c_int]\n cnetica.NthNode_bn.restype = c_void_p\n return cnetica.NthNode_bn(nl_p, index) # node_p", "def right_child(self, index):\n return 2 * index + 2", "def __get_node_at_index(self, index: int) -> Node[T]:\n if 0 <= index and index < len(self):\n current = self.head\n for i in range(index):\n current = current.link\n return current\n else:\n raise ValueError(\"Index out of bounds\")", "def _get_node_at(self, index):\n assert isinstance(index, int)\n from_head = True if index >= 0 else False \n if from_head: \n node = self.head\n steps = index \n else:\n node = self.tail \n steps = abs(index) -1 \n while steps > 0 and node is not None:\n node = node.next_node if from_head else node.prev_node \n steps -= 1 \n return node", "def get_node_from_index(self, index):\n curr = self.head\n for i in range(index):\n curr = curr.next\n return curr", "def get_index(self, index):\n return self.get_node_from_index(index).data", "def left_child(self, index):\n return 2 * index", "def get(self, index):\n return self._get_node(index)", "def getNeuron(self, index):\n\t\treturn self.loader.getNeuron(index)", "def get_node_from_index(self, i, j):\n if 0 <= i < self.resized_height and 0 <= j < self.resized_width:\n return self.nodes[i, j]\n else:\n return None", "def leonardo_recurs(self, index, node):\n print(index, node)\n if node is None:\n return node\n if index == 0:\n return node\n else:\n return self.leonardo_recurs(index - 1, node.next)", "def TestWay(index):\n if index < -len(data) or index > len(data) - 1:\n return None\n return data[index]", "def left_child(self, index):\n return 2 * index + 1", "def getNode(self, index):\n return self.nodes[index]", "def get(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n current_node = current_node.next\r\n if current_index == index: return current_node.data\r\n current_index += 1", "def right(self, index):\n try:\n if index == self.root_index():\n index = self.adjacency_list[index][1]\n else:\n index = self.adjacency_list[index][2]\n return index\n except IndexError:\n return -1", "def at(self, index):\n if index < 0 or index >= self.length:\n raise \"Index out of range\"\n node = self.head\n i=0\n while i<=index:\n if i==index:\n return node\n node = node.next\n i = i+1", "def FindLeafNode(self, node, index):\n if node.start > index or node.end() <= index:\n if self.debug:\n print node.ToPrettyString();\n print index;\n raise ValueError(\"Node don't contain index\");\n if node.start == index and node.level == 0: return node;\n if not node.children:\n raise ValueError(\"Didn't find the index\");\n for child in node.children:\n if child.start <= index and child.end() > index:\n return self.FindLeafNode(child, index);\n if self.debug:\n print node.ToPrettyString();\n print index;\n print \"node.start=%d\" % node.start;\n print \"node.end=%d\" % node.end();\n raise ValueError(\"Shouldn't reach the end\");", "def read_single_halo(self, treenum, halonum, **kwargs):\n return self.read_single_tree(treenum, halonum=halonum, **kwargs)" ]
[ "0.5902977", "0.5608498", "0.5608498", "0.5394057", "0.5361975", "0.53427607", "0.5313705", "0.52938735", "0.526928", "0.5263665", "0.5258102", "0.5251677", "0.5238716", "0.52234507", "0.52188444", "0.52096754", "0.51846796", "0.51694787", "0.51663357", "0.516497", "0.5148196", "0.5144117", "0.5116902", "0.5089499", "0.5054708", "0.50516045", "0.5048964", "0.5024094", "0.4995002", "0.49736083" ]
0.6535571
1
Finds indices of all progenitors of a halo, recursively.
def halo_progenitor_ids(self, index): _progenitors = [] def rec(i): _progenitor_ids = self.data[self.data["descendantHost"] == i][ "hostIndex" ].unique() logging.debug("Progenitors recursion: %d > %d (%d progenitors)", index, i, len(_progenitor_ids)) if len(_progenitor_ids) == 0: return for _progenitor_id in _progenitor_ids: # if _progenitor_id not in _progenitors: # TODO: this only eliminates fly-byes _progenitors.append(_progenitor_id) rec(_progenitor_id) rec(index) logging.info( "%d progenitors found for halo %d", len(_progenitors), index ) return _progenitors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def halo_progenitor_ids(self, index):\n _progenitors = []\n\n def rec(i):\n _progenitor_ids = self.data[self.data[\"descendantHost\"] == i][\n \"hostIndex\"\n ].unique()\n logging.debug(\n \"Progenitors recursion: %d > %d (%d progenitors)\",\n index,\n i,\n len(_progenitor_ids),\n )\n if len(_progenitor_ids) == 0:\n return\n for _progenitor_id in _progenitor_ids:\n # TODO: this only eliminates fly-bys:\n # if _progenitor_id not in _progenitors:\n _progenitors.append(_progenitor_id)\n rec(_progenitor_id)\n\n rec(index)\n\n logging.info(\n \"%d progenitors found for halo %d\", len(_progenitors), index\n )\n return _progenitors", "def find_halos(pos, ngrid, log, level=3000):\n print('Binning particles', file=log)\n cells = get_cells(pos, ngrid, log)\n count = bincount(cells, minlength=ngrid**3)\n count.shape = (ngrid,ngrid,ngrid)\n print('Count in', count.min(), count.max(), file=log)\n idx = flatnonzero(count>level)\n print('Number of cells above', level, 'is', len(idx), file=log)\n \n \n labels, num_features = ndimage.label(count>level)\n print('Number fo features', num_features, file=log)\n print('Labels in', labels.min(), labels.max(), file=log)\n locations = ndimage.find_objects(labels)\n\n dense_regions = []\n\n for i in range(num_features):\n loc = locations[i]\n hw = max(l.stop - l.start for l in loc) * 0.5 /ngrid\n hw_padded = hw + 0.0/ngrid\n\n ctr =[(0.5/ngrid)*(l.stop + l.start) for l in loc]\n count_i = count[loc][labels[loc]==(i+1)].sum()\n print('Count', count_i, file=log)\n dense_regions.append((count_i, ctr, hw_padded))\n\n # sort by number of particles in the region\n dense_regions = sorted(dense_regions, key = lambda num_ctr_hw :num_ctr_hw[0], reverse=True)\n\n return dense_regions", "def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices", "def neighborhood(G,n,o):\n base = G[n]\n neighbors = {}\n neighbors[n] = 0\n newNodes = set(neighbors.keys())\n for i in range(1,o+1):\n #for node in neighbors.keys():\n nodes = newNodes.copy()\n newNodes = set()\n for node in nodes:\n branch = G[node]\n for node in branch:\n if node not in neighbors:\n newNodes.add(node)\n neighbors[node]=i\n return neighbors", "def hoggar_indices():\n return list(product([0,1], repeat=6))", "def get_indices(waves):\n prob_ = np.abs(waves)**2\n # batch\n prob = [np.sum(prob_[i:i+4,:], axis=0) for i in range(0, len(waves[:,0]), 4)]\n prob = np.asarray(prob)\n prob_tot = np.sum(prob, axis=0)\n \n # cutoff\n length = np.size(prob[:,0])\n len10 = int(length/10)\n flags = np.zeros((prob.shape[1]), dtype=int)\n # hinges\n # 50% within 10% of corners\n\n # surface\n # 50% within 10% of surfaces\n # not already labelled hinges\n prob_left = np.sum(prob[0:len10,:], axis=0)\n frac_left = prob_left/prob_tot\n\n prob_right = np.sum(prob[length-len10:length,:], axis=0)\n frac_right = np.divide(prob_right, prob_tot)\n\n for i in range(len(flags)):\n if frac_left[i]>0.5 or frac_right[i]>0.5:\n flags[i] = 1\n \n indices = [i for i, x in enumerate(flags) if x == 1]\n indices0 = [i for i, x in enumerate(flags) if x == 0]\n \n return indices, indices0", "def getNeighbours(seg,meta,inversedIndex):\n return np.unique(np.fromiter( (inversedIndex[x] for x in np.concatenate([meta.loc[seg]['ins'],meta.loc[seg]['outs']])),dtype=np.int))", "def iter_all_hypo_isomorphic(hypo_indicator, nhypo):\n hypo_ind = [i for i in range(nhypo)]\n for permuted in uperm(hypo_ind):\n perm_hypo_indicator = []\n for li in hypo_indicator:\n if len(li) >= 1:\n perm_li = [permuted[v] for v in li]\n perm_hypo_indicator.append(sorted(perm_li))\n elif len(li) == 0:\n perm_hypo_indicator.append(li)\n yield perm_hypo_indicator", "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def iter_hypo_indicator(nhypo, n_pattern, n_overlap):\n # for i, x in enumerate(iter_hypo_indicator(2,6,5)):\n # print(i, x)\n base_bag = [[]]\n base_count = 0\n additional_bag =[[]]\n additional_count = 0\n for hypo_base in pattern_hypo_product_space(nhypo, n_pattern):\n if hypo_indicator_filter(hypo_base, nhypo, base_bag):\n base_bag.append([])\n base_count += 1\n base_bag[base_count] = hypo_base\n # print(base_bag)\n for hypo_overlap in pattern_powerhypo_product_space(nhypo-1, n_pattern):\n if overlap_filter(hypo_overlap, n_overlap):\n hypo_overlap = remap_overlap_indicator(hypo_overlap, hypo_base, nhypo)\n hypo_indicator = concatenate_hypo_indicators(hypo_base, hypo_overlap)\n if not is_hypobag_isomorphic(additional_bag, hypo_indicator, nhypo):\n additional_bag.append([])\n additional_count += 1\n additional_bag[additional_count] = hypo_indicator\n # print(additional_bag)\n yield hypo_indicator", "def search(self) -> int:\n # crete node list\n for x in range(self.n):\n for y in range(self.n):\n if not self.grid[y][x] == 0:\n self.all_nodes.append((x, y))\n # recursively create paths\n i = 0\n paths = [[(0, 0)]]\n while i < self.n * self.n:\n paths = self.generate_paths(paths)\n if isinstance(paths, int):\n return paths\n i += 1\n\n return -1", "def childWellIndices(self):\n return self._wellIndices", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def build_hals_main_branch_indices(self, do_host=False):\n\n def get_mmp_index(my_id, previous_hal, mmp_prop='vel.circ.max'):\n # first get the indices in the previous catalog where the descendant is my ID\n if not len(previous_hal):\n return -2**31\n\n progenitor_indices = np.where(\n previous_hal.prop('descendant.id') == my_id)[0]\n if not progenitor_indices.size:\n return -2**31\n\n # then get the sorting values of the progenitor halos\n progenitor_mmp_prop = previous_hal.prop(\n mmp_prop, progenitor_indices)\n\n # then return the index of the one that's the biggest\n return progenitor_indices[np.argmax(progenitor_mmp_prop)]\n\n assert self.hals is not None\n if do_host == True or do_host == 'host' or do_host == 'host1':\n starting_index = self.hals[-1].prop('host.index')[0]\n store_name = 'host_hals_mb_indices'\n elif do_host == 'host2' or do_host == '2':\n starting_index = self.hals[-1].prop('host2.index')[0]\n store_name = 'host2_hals_mb_indices'\n else:\n assert self.index is not None\n starting_index = self.index\n store_name = 'hals_mb_indices'\n\n res = np.empty(len(self.hals), dtype=int)\n res.fill(-2**31)\n\n current_snapshot_index = len(self.hals) - 1\n my_index = starting_index\n while my_index >= 0:\n res[current_snapshot_index] = my_index\n\n my_id = self.hals[current_snapshot_index].prop('id', my_index)\n my_index = get_mmp_index(\n my_id, self.hals[current_snapshot_index-1])\n current_snapshot_index -= 1\n\n self.__dict__[store_name] = res\n return res", "def vitoria_1(tab,jog):\r\n for i in range(1,4):\r\n win = [(0,jog,jog), (jog,0,jog), (jog,jog,0)]\r\n coluna = obter_coluna(tab, i)\r\n linha = obter_linha(tab, i) \r\n if coluna in win:\r\n return i+3*win.index(coluna)\r\n elif linha in win:\r\n return 3*i-2+win.index(linha) \r\n if i!=3:\r\n diagonal = obter_diagonal(tab, i)\r\n if diagonal in win:\r\n if i==1:\r\n return i+4*win.index(diagonal)\r\n\r\n else:\r\n return 7-2*win.index(diagonal)", "def get_main_points(neuron):\n (branch_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 2)\n (endpoint_index,) = np.where(neuron.branch_order[neuron.n_soma:] == 0)\n selected_index = np.union1d(branch_index + neuron.n_soma,\n endpoint_index + neuron.n_soma)\n selected_index = np.append(range(neuron.n_soma), selected_index)\n return selected_index", "def findIndices(g):\r\n change = [0]\r\n seen = [g[0]]\r\n for i in range(1, len(g)):\r\n if not g[i] in seen:\r\n change.append(i)\r\n seen.append(g[i])\r\n return change", "def _list_of_elements_in_H(self):\n N = self.level()\n if N != 1:\n gcd = arith.gcd\n H = [x for x in range(1, N) if gcd(x, N) == 1]\n else:\n H = [1]\n\n return H", "def iter_leaf_idx(self):\n def leaf_idx(tree, total):\n total[0] += 1\n for elem in tree[1:]:\n if isinstance(elem, Tree):\n for elem2 in leaf_idx(elem, total):\n yield total[0]\n else:\n yield total[0]\n total[0] += 1\n return leaf_idx(self, [0])", "def get_gt_hom_idxs(alt_num):\n last = -1\n hom_idxs = []\n for a in range(alt_num + 1):\n last = last + (a + 1)\n hom_idxs.append(last)\n return hom_idxs", "def SectionIndicesConnectedToSoma(self):\n indices = []\n index = 0\n for each_section in self._section_list:\n if each_section.ParentId() == -1:\n indices.append(index)\n index += 1\n return indices", "def _occupation_set(index):\n indices = set()\n\n # For bit manipulation we need to count from 1 rather than 0\n index += 1\n\n indices.add(index - 1)\n parent = index & (index - 1)\n index -= 1\n while index != parent:\n indices.add(index - 1)\n # Remove least significant one from index\n # E.g. 00010100 -> 00010000\n index &= index - 1\n return indices", "def get_pent_idx(pent):\n pidx = 0\n for i in range(pent.shape[0]):\n for j in range(pent.shape[1]):\n if pent[i][j] != 0:\n pidx = pent[i][j]\n break\n if pidx != 0:\n break\n if pidx == 0:\n return -1\n return pidx - 1", "def oprime_inds(self, obj_inds):\n if type(obj_inds) == set:\n obj_inds = list(obj_inds)\n try:\n common_intent = self.np_table[obj_inds[0], :].copy()\n except IndexError:\n return set(range(len(self.attributes)))\n else:\n for obj_ind in obj_inds[1:]:\n common_intent &= self.np_table[obj_ind, :]\n return common_intent.nonzero()[0]", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def jw_number_indices(n_electrons, n_qubits):\n occupations = itertools.combinations(range(n_qubits), n_electrons)\n indices = [sum([2**n for n in occupation]) for occupation in occupations]\n return indices", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def match(head_list_at_crossing, wall_color_at_crossing, p):\n num_index2 = int((p-1)/2)\n indicies_of_heads = []\n # print(num_index2)\n for i in range(1, num_index2+1):\n # print(i)\n # print(indicies_of_heads)\n indicies_of_heads += find(i, head_list_at_crossing)\n # print(indicies_of_heads)\n return indicies_of_heads", "def filter_halo_pnum(data, Ncut=1000):\n npart = np.array(data['np'][0])\n ind =np.where(npart > Ncut)[0]\n print(\"# of halos:\",len(ind))\n return ind", "def omega(self):\n return [coset for coset in range(len(self.p)) if self.p[coset] == coset]" ]
[ "0.6863077", "0.5833046", "0.5638713", "0.5561099", "0.5542297", "0.5519623", "0.54976434", "0.54312", "0.537776", "0.53390366", "0.53045774", "0.5278391", "0.5255487", "0.52501196", "0.5239766", "0.522312", "0.52182275", "0.5186579", "0.5170028", "0.51365596", "0.5129794", "0.50402755", "0.5028078", "0.50225174", "0.5009092", "0.500398", "0.49896482", "0.49632493", "0.49525642", "0.49225065" ]
0.6898143
0
Finds host of halo. Recursively continues until hits the main halo, in case of multiply embedded subhaloes.
def halo_host(self, index): halo = self.get_halo(index) return ( halo if halo.name == halo["hostIndex"] else self.halo_host(self.get_halo(halo["hostIndex"]).name) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateSubhalos(host,file, host2sub):\n if not (host.ID in host2sub):\n return\n g = open(file,'r')\n for posn in host2sub[host.ID]:\n g.seek(posn)\n line = g.readline()\n sub = MTH.MTHalo(line)\n if sub.pid != host.ID:\n print 'WARNING: ERROR: halo not sub of host! Proceeding anyway'\n tree = MT.MergerTree(file,sub.ID)\n tree.haloList.append(sub)\n if sub.num_prog==0:\n tree.progenitors.append(sub)\n # Now deal with all other halos in the tree\n index = 1\n line = g.readline()\n while line !='' and line[0:5] != '#tree':\n halo = MTH.MTHalo(line)\n tree.haloList.append(halo)\n if halo.num_prog ==0:\n tree.progenitors.append(halo)\n updateLinks(tree.haloList, index)\n line = g.readline()\n index += 1\n host.subhalos.append(sub)\n g.close()", "def updateSubhalos_old(host, file):\n f = open(file, 'r')\n line = f.readline()\n i = 0\n while line != '':\n if line[0:5] == \"#tree\":\n #if i%10000 == 0:\n #print 'subhalo finder scanned ', i, ' trees'\n i+=1\n num = int(line[6::])\n # Deal with a=0 halo independently\n line = f.readline()\n sub = MTH.MTHalo(line)\n if sub.pid == host.ID: # not upid. only subhalos, not subsub etc.\n #build tree, add to subhalo list of host\n tree = MT.MergerTree(file, num)\n tree.haloList.append(sub)\n if sub.num_prog ==0:\n tree.progenitors.append(sub)\n\n # Now deal with all other halos in the tree\n index = 1\n line = f.readline()\n while line !='' and line[0:5] != '#tree':\n halo = MTH.MTHalo(line)\n tree.haloList.append(halo)\n if halo.num_prog ==0:\n tree.progenitors.append(halo)\n updateLinks(tree.haloList, index)\n line = f.readline()\n index +=1\n # add a=1 subhalo to subhalo list of host (maybe should add tree?)\n host.subhalos.append(sub)\n else:\n line = f.readline()\n else:\n line = f.readline()\n f.close()", "def build_hals_main_branch_indices(self, do_host=False):\n\n def get_mmp_index(my_id, previous_hal, mmp_prop='vel.circ.max'):\n # first get the indices in the previous catalog where the descendant is my ID\n if not len(previous_hal):\n return -2**31\n\n progenitor_indices = np.where(\n previous_hal.prop('descendant.id') == my_id)[0]\n if not progenitor_indices.size:\n return -2**31\n\n # then get the sorting values of the progenitor halos\n progenitor_mmp_prop = previous_hal.prop(\n mmp_prop, progenitor_indices)\n\n # then return the index of the one that's the biggest\n return progenitor_indices[np.argmax(progenitor_mmp_prop)]\n\n assert self.hals is not None\n if do_host == True or do_host == 'host' or do_host == 'host1':\n starting_index = self.hals[-1].prop('host.index')[0]\n store_name = 'host_hals_mb_indices'\n elif do_host == 'host2' or do_host == '2':\n starting_index = self.hals[-1].prop('host2.index')[0]\n store_name = 'host2_hals_mb_indices'\n else:\n assert self.index is not None\n starting_index = self.index\n store_name = 'hals_mb_indices'\n\n res = np.empty(len(self.hals), dtype=int)\n res.fill(-2**31)\n\n current_snapshot_index = len(self.hals) - 1\n my_index = starting_index\n while my_index >= 0:\n res[current_snapshot_index] = my_index\n\n my_id = self.hals[current_snapshot_index].prop('id', my_index)\n my_index = get_mmp_index(\n my_id, self.hals[current_snapshot_index-1])\n current_snapshot_index -= 1\n\n self.__dict__[store_name] = res\n return res", "def hal_9000(self, hal_board):\r\n\r\n # if HAL can win, HAL swiftly eliminates Dave\r\n for num in range(0, 8):\r\n pit_board = self.copy_board(hal_board)\r\n if self.check_if_empty(pit_board, num):\r\n pit_board[num] = \"O\"\r\n if self.check_for_win(pit_board, \"O\"):\r\n return num\r\n\r\n # if Dave can win on next move, block him\r\n for num in range(0, 8):\r\n pit_board = self.copy_board(hal_board)\r\n if self.check_if_empty(pit_board, num):\r\n pit_board[num] = \"X\"\r\n if self.check_for_win(pit_board, \"X\"):\r\n return num\r\n\r\n # if the middle is open, HAL takes it\r\n if self.check_if_empty(hal_board, 4):\r\n return 4\r\n\r\n # if one of the corners are open, HAL takes it\r\n for num in [0, 2, 6, 8]:\r\n if self.check_if_empty(hal_board, num):\r\n return num\r\n\r\n # if all else fails, HAL will choose a random free space\r\n for num in [1, 3, 5, 7]:\r\n if self.check_if_empty(hal_board, num):\r\n return num", "def check_main_branches(df):\n if not df.scale.is_monotonic_decreasing:\n raise RuntimeError(\"`df.scale` is not descending.\")\n\n # First halo as no descendants and the last halo has no progenitor.\n desc_ids = df.desc_id.values[1:]\n halo_ids = df.id.values[:-1]\n # The desc_id of the last halo should be the next halo_id\n if np.all(desc_ids == halo_ids):\n return None\n else:\n # Return ID of the last correct halo\n return df.id.values[np.argmin(desc_ids == halo_ids)]", "def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i + 1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i + 1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host + 1].properties['children'].append(i + 1)\n except KeyError:\n pass", "def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i+1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i+1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host+1].properties['children'].append(i+1)\n except KeyError:\n pass", "def find_sandwich_bottom(blk):\n # Always follow the main branch of a flow: the last connection.\n _blk = blk.connections[len(blk.connections) - 1]\n while _blk is not None:\n if _blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return None\n if _blk.name in COLLAPSIBLE:\n return _blk\n _blk = _blk.connections[len(_blk.connections) - 1]\n return None", "def findNextSite(container):\n while container:\n if IContainmentRoot.providedBy(container):\n return None\n try:\n container = get_parent(container)\n if container is None:\n return None\n except TypeError:\n return None\n if ISite.providedBy(container):\n return container", "def get_framework_neighbours(atom, useH=True):\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist", "def _find_hilight(self):\n traverse = deque([self.item])\n while traverse:\n item = traverse.popleft()\n if item.hilight:\n return item.hilight\n traverse.extendleft(reversed(item.subitems))", "def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]", "def particleLastHalo(particle, halo):\n for i in range(0, len(halo.fullParents)):\n if halo.fullParents[i].containsParticle(particle):\n return halo.fullParents[i]\n return -1", "def build_halo_mask(fixed_depth=30, margin=21, min_fragment=10):\n assert margin % 2 is not 0, \"Margin should be odd\"\n\n rr, cc = circle(margin / 2, margin / 2, margin / 2 + 1, shape=(margin, margin))\n structure_element = numpy.zeros((margin, margin))\n structure_element[rr, cc] = 1\n structure_element = numpy.repeat(numpy.expand_dims(numpy.expand_dims(structure_element, 0), 0), fixed_depth, 0)\n\n sel = torch.from_numpy(structure_element).float().to(device)\n\n def f(label):\n \"\"\"\n \n :param label: batch of instance levels each instance must have unique id\n :return: labels, masks and object_lists used by halo loss\n \"\"\"\n back = numpy.zeros((label.shape[0], fixed_depth, label.shape[1], label.shape[2]))\n object_list = []\n for i in range(label.shape[0]):\n bincount = numpy.bincount(label[i].flatten())\n pixels = numpy.where(bincount > min_fragment)[0]\n if len(pixels) > fixed_depth:\n pixels = pixels[:fixed_depth]\n warnings.warn(\"Not all objects fits in fixed depth\", RuntimeWarning)\n\n for l, v in enumerate(pixels):\n back[i, l, label[i] == v] = 1.\n object_list.append(numpy.array(range(l + 1)))\n\n labels = torch.from_numpy(back).float().to(device)\n masks = F.conv2d(labels, sel, groups=fixed_depth, padding=margin / 2)\n \n masks[masks > 0] = 1.\n masks[labels > 0] = 2.\n masks[:, 0, :, :] = 1.\n \n weights=masks.sum(-1,keepdim=True).sum(-2,keepdim=True)\n weights[weights==0.]=1.\n \n masks = masks/weights\n \n return labels, masks, object_list\n\n return f", "def build_host( self, node ):\n\n try:\n\n if node.url is not None:\n \n NodeId = node.url\n if self.state[ 'probe' ].resolve_a( node ) is None: return node\n\n elif node.a_records is not None:\n\n NodeId = node.a_records[0]\n if self.state[ 'probe' ].resolve_ptr( node ) is None: return node \n\n else:\n self._log( 'build_host', 'DEBUG', 'Empty host object detected, unable to process {}'.format( node ) )\n\n # Pull Coords If Geoip Available\n if self.state[ 'geoip' ] is not None:\n self.state[ 'probe' ].pull_geoip( node, self.state[ 'geoip' ] )\n\n # Ignore everything without an exchange\n if self.state[ 'probe' ].resolve_mx( node ) is None: return None \n\n # Pull down our TXT records\n if self.state[ 'probe' ].resolve_txt( node ) is None: return node\n\n except:\n self._log( 'build_host', 'DEBUG', 'Lookup has failed for {}'.format( NodeId ) )\n\n return node", "def coalesce_hosts(self, host_node):\n c_hosts = host_node.children\n assert len(c_hosts) == 2, \"Error: function assumes binary tree\"\n h1, h2 = c_hosts\n\n # label the ancestral host node\n host_node.name = h1.name + '_' + h2.name\n\n # extract affected pathogen lineages in each descendant (H1, H2) of the host node\n p1 = filter(lambda x: x.host == h1, self.extant_p)\n p2 = filter(lambda x: x.host == h2, self.extant_p)\n\n if len(p1)>0 and len(p2) > 0 and random.uniform(0,1) < self.p_cospec:\n # cospeciation - pathogen lineages carried in H1 and H2 coalesce in host node\n # TODO: What if there are multiple pathogen lineages in H1 and/or H2?\n # Possibilities: (1) select one random pair of pathogen lineages to coalesce (only 1 cospeciation)\n # (2) every pair of pathogen lineages in H1/H2 has probability of cospeciation\n # This makes it possible for 3 or more path. lineages to coalesce at once\n # Current implementation (below) assumes (1).\n\n pick1 = random.sample(p1, 1)[0] # returns a list\n pick2 = random.sample(p2, 1)[0]\n pick1.host = host_node # relocate these pathogen lineages to ancestral host\n pick2.host = host_node\n to_coalesce = [pick1, pick2]\n self.coalesce_paths(to_coalesce, t0=host_node.height)\n\n # carry over all lineages to the ancestral host\n for node in p1+p2:\n node.host = host_node\n\n # update host lists\n self.extant_h.remove(h1)\n self.not_extant_h.append(h1)\n self.extant_h.remove(h2)\n self.not_extant_h.append(h2)\n self.extant_h.append(host_node)", "def check_one_neighbor_present(duthosts, per_host, asic, neighbor, nbrhosts, all_cfg_facts):\n cfg_facts = all_cfg_facts[per_host.hostname][asic.asic_index]['ansible_facts']\n\n neighs = cfg_facts['BGP_NEIGHBOR']\n inband_info = get_inband_info(cfg_facts)\n local_ip = neighs[neighbor]['local_addr']\n\n if local_ip == inband_info['ipv4_addr'] or local_ip == inband_info['ipv6_addr']:\n # skip inband neighbors\n return\n\n # Check neighbor on local linecard\n local_port = get_port_by_ip(cfg_facts, local_ip)\n if local_port is None:\n logger.error(\"Did not find port for this neighbor %s, must skip\", local_ip)\n return\n\n neigh_mac = get_neighbor_info(neighbor, nbrhosts)['mac']\n if neigh_mac is None:\n logger.error(\"Could not find neighbor MAC, must skip. IP: %s, port: %s\", local_ip, local_port)\n\n local_dict = check_local_neighbor(per_host, asic, neighbor, neigh_mac, local_port)\n logger.info(\"Local_dict: %s\", local_dict)\n\n # Check the same neighbor entry on the supervisor nodes\n slotname = cfg_facts['DEVICE_METADATA']['localhost']['hostname']\n asicname = cfg_facts['DEVICE_METADATA']['localhost']['asic_name']\n\n if per_host.is_multi_asic and len(duthosts.supervisor_nodes) == 0:\n check_voq_neighbor_on_sup(per_host, slotname, asicname, local_port,\n neighbor, local_dict['encap_index'], neigh_mac)\n else:\n for sup in duthosts.supervisor_nodes:\n check_voq_neighbor_on_sup(sup, slotname, asicname, local_port,\n neighbor, local_dict['encap_index'], neigh_mac)\n\n # Check the neighbor entry on each remote linecard\n for rem_host in duthosts.frontend_nodes:\n\n for rem_asic in rem_host.asics:\n if rem_host == per_host and rem_asic == asic:\n # skip remote check on local host\n continue\n rem_cfg_facts = all_cfg_facts[rem_host.hostname][rem_asic.asic_index]['ansible_facts']\n remote_inband_info = get_inband_info(rem_cfg_facts)\n if remote_inband_info == {}:\n logger.info(\"No inband configuration on this asic: %s/%s, will be skipped.\", rem_host.hostname,\n rem_asic.asic_index)\n continue\n remote_inband_mac = get_sonic_mac(rem_host, rem_asic.asic_index, remote_inband_info['port'])\n check_voq_remote_neighbor(rem_host, rem_asic, neighbor, neigh_mac, remote_inband_info['port'],\n local_dict['encap_index'], remote_inband_mac)", "def _get_halo(self,i):\n if self._order is False:\n if self._subs is True:\n #this needs to be tested again on a snapshot that is not ordered!\n x = Halo(i, self, self.base, np.where(np.in1d(self.base['iord'], self.ids[self._subhalodat['sub_off'][i]:self._subhalodat['sub_off'][i]+self._subhalodat['sub_len'][i]] )))\n else:\n x = Halo(i, self, self.base, np.where(np.in1d(self.base['iord'], self.ids[self._halodat['group_off'][i]:self._halodat['group_off'][i]+self._halodat['group_len'][i]] )))\n \n else:\n if self._subs is False: #to use groups as halos:\n x = Halo(i, self, self.base, self.ids[self._halodat['group_off'][i]:self._halodat['group_off'][i]+self._halodat['group_len'][i]] ) \n else:\n x=Halo(i, self, self.base, self.ids[self._subhalodat['sub_off'][i]:self._subhalodat['sub_off'][i]+self._subhalodat['sub_len'][i]] )\n \n x._descriptor = \"halo_\"+str(i)\n x.properties.update(self.get_halo_properties(i))\n return x", "def find_host_key(self, value):\n for key in self:\n if value in key.hosts:\n return key\n return None", "def in_hsts_preload(host: typing.AnyStr) -> bool:\n\n if isinstance(host, str):\n host = host.encode(\"ascii\")\n labels = host.lower().split(b\".\")\n\n # Fast-branch for gTLDs that are registered to preload all sub-domains.\n if labels[-1] in _GTLD_INCLUDE_SUBDOMAINS:\n return True\n\n with open_pkg_binary(\"hstspreload.bin\") as f:\n for layer, label in enumerate(labels[::-1]):\n # None of our layers are greater than 4 deep.\n if layer > 3:\n return False\n\n # Read the jump table for the layer and label\n jump_info = _JUMPTABLE[layer][_crc8(label)]\n if jump_info is None:\n # No entry: host is not preloaded\n return False\n\n # Read the set of entries for that layer and label\n f.seek(jump_info[0])\n data = bytearray(jump_info[1])\n f.readinto(data)\n\n for is_leaf, include_subdomains, ent_label in _iter_entries(data):\n # We found a potential leaf\n if is_leaf:\n if ent_label == host:\n return True\n if include_subdomains and host.endswith(b\".\" + ent_label):\n return True\n\n # Continue traversing as we're not at a leaf.\n elif label == ent_label:\n break\n else:\n return False\n return False", "def in_hsts_preload(host: typing.AnyStr) -> bool:\n\n if isinstance(host, str):\n host = host.encode(\"ascii\")\n labels = host.lower().split(b\".\")\n\n # Fast-branch for gTLDs that are registered to preload all sub-domains.\n if labels[-1] in _GTLD_INCLUDE_SUBDOMAINS:\n return True\n\n with open_pkg_binary(\"hstspreload.bin\") as f:\n for layer, label in enumerate(labels[::-1]):\n # None of our layers are greater than 4 deep.\n if layer > 3:\n return False\n\n # Read the jump table for the layer and label\n jump_info = _JUMPTABLE[layer][_crc8(label)]\n if jump_info is None:\n # No entry: host is not preloaded\n return False\n\n # Read the set of entries for that layer and label\n f.seek(jump_info[0])\n data = bytearray(jump_info[1])\n f.readinto(data)\n\n for is_leaf, include_subdomains, ent_label in _iter_entries(data):\n # We found a potential leaf\n if is_leaf:\n if ent_label == host:\n return True\n if include_subdomains and host.endswith(b\".\" + ent_label):\n return True\n\n # Continue traversing as we're not at a leaf.\n elif label == ent_label:\n break\n else:\n return False\n return False", "def in_hsts_preload(host: typing.AnyStr) -> bool:\n\n if isinstance(host, str):\n host = host.encode(\"ascii\")\n labels = host.lower().split(b\".\")\n\n # Fast-branch for gTLDs that are registered to preload all sub-domains.\n if labels[-1] in _GTLD_INCLUDE_SUBDOMAINS:\n return True\n\n with open_pkg_binary(\"hstspreload.bin\") as f:\n for layer, label in enumerate(labels[::-1]):\n # None of our layers are greater than 4 deep.\n if layer > 3:\n return False\n\n # Read the jump table for the layer and label\n jump_info = _JUMPTABLE[layer][_crc8(label)]\n if jump_info is None:\n # No entry: host is not preloaded\n return False\n\n # Read the set of entries for that layer and label\n f.seek(jump_info[0])\n data = bytearray(jump_info[1])\n f.readinto(data)\n\n for is_leaf, include_subdomains, ent_label in _iter_entries(data):\n # We found a potential leaf\n if is_leaf:\n if ent_label == host:\n return True\n if include_subdomains and host.endswith(b\".\" + ent_label):\n return True\n\n # Continue traversing as we're not at a leaf.\n elif label == ent_label:\n break\n else:\n return False\n return False", "def in_hsts_preload(host: typing.AnyStr) -> bool:\n\n if isinstance(host, str):\n host = host.encode(\"ascii\")\n labels = host.lower().split(b\".\")\n\n # Fast-branch for gTLDs that are registered to preload all sub-domains.\n if labels[-1] in _GTLD_INCLUDE_SUBDOMAINS:\n return True\n\n with open_pkg_binary(\"hstspreload.bin\") as f:\n for layer, label in enumerate(labels[::-1]):\n # None of our layers are greater than 4 deep.\n if layer > 3:\n return False\n\n # Read the jump table for the layer and label\n jump_info = _JUMPTABLE[layer][_crc8(label)]\n if jump_info is None:\n # No entry: host is not preloaded\n return False\n\n # Read the set of entries for that layer and label\n f.seek(jump_info[0])\n data = bytearray(jump_info[1])\n f.readinto(data)\n\n for is_leaf, include_subdomains, ent_label in _iter_entries(data):\n # We found a potential leaf\n if is_leaf:\n if ent_label == host:\n return True\n if include_subdomains and host.endswith(b\".\" + ent_label):\n return True\n\n # Continue traversing as we're not at a leaf.\n elif label == ent_label:\n break\n else:\n return False\n return False", "def coalesce_within_root(self, host_node):\n height = host_node.height\n while len(self.extant_p) > 1 and len(self.choices) >= 1:\n if self.verbose:\n print self\n self.get_pairs()\n if len(self.choices) == 0:\n #\n return\n height += random.expovariate(len(self.choices)*self.c_rate)\n cpaths = random.choice(self.choices.keys())\n self.coalesce_paths(cpaths, t0=height)\n if self.verbose:\n print self", "def get_halite_move(game, ship, args = None):\n if args is None:\n args = {}\n\n move = \"o\"\n\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - ship {} is getting a density based move\".format(ship.id))\n\n moves = []\n for blocks in game.game_map.get_cell_blocks(ship.position, 3, 3): # returns array of tuples [(direction), CellBlock]\n directional_offset = blocks[0]\n block = blocks[1]\n\n if block.get_max() > Mining_threshold:\n moves.append((directional_offset, block, block.get_mean()))\n\n sorted_blocks = sorted(moves, key=lambda item: item[2], reverse=True)\n\n if not sorted_blocks:\n move = get_random_move(game, ship) # ToDo: would be better to try a large search radius?\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - ship {} All surrounding cells have halite < threshold({}) . Returning random move: {}\".format(ship.id, Mining_threshold, move))\n return move\n\n best_bloc_data = sorted_blocks[0] # (directional_offset, block, block mean value)\n max_cell_amount = best_bloc_data[1].get_max()\n\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - ship {} found {} valid halite cells with a the max cell containing {} halite\".format(ship.id, len(sorted_blocks), max_cell_amount))\n\n for best_cell in best_bloc_data[1].get_cells():\n if best_cell.halite_amount == max_cell_amount:\n break\n\n move_offset = best_bloc_data[0]\n\n new_position = game.game_map.normalize(ship.position.directional_offset(move_offset))\n\n normalized_position = game.game_map.normalize(new_position)\n\n cell = game.game_map[normalized_position]\n\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} best cell new_position: {}, offset: {}, value: {}\".format(ship.id, normalized_position, move_offset, cell.halite_amount))\n\n #\n # collision resolution\n #\n if cell.is_occupied:\n game.collisions.append((ship, cell.ship, Direction.convert(move_offset), normalized_position, resolve_halite_move)) # args = alt moves?\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - ship {} collided with ship {} at {} while moving {}\".format(ship.id, cell.ship.id, normalized_position, Direction.convert(move_offset)))\n return None\n\n #\n # success\n #\n cell.mark_unsafe(ship)\n move = Direction.convert(move_offset)\n\n # blocks are guaranteed to have at least 1 cell that is minable. This is critical to avoid getting stuck\n # between two blocks each of which is never modified. For a 3x3 block, add 'plus_one' assures that we move\n # far enough to reach the modifiable cell, thus preventing an endless movement between blocks\n move_plus_one = Position(best_cell.position.x, best_cell.position.y) # go one more move in the same direction\n if cell.position != move_plus_one:\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} has a plus_one move of {}, halite: {}\".format(ship.id, move_plus_one, max_cell_amount))\n ship.path.append(move_plus_one)\n\n return move", "def host(self, host):\n for p, c in self.configs_:\n if host in c.hosts_:\n return c.host(host)\n return {}", "def solve_h(ene, channel, dipole, n, h):\n\n (n0, l0, l1) = channel\n v = lambda x: -1.0/x + l1*(l1+1)*0.5/(x*x)\n \n if(dipole == \"length\"):\n driv_term = HAtom(1.0).length(n0, l0, l1)\n elif(dipole == \"velocity\"):\n driv_term = HAtom(1.0).velocity(n0, l0, l1)\n else:\n raise(Exception(\"dipole must be length or velocity\"))\n \n s = lambda x: driv_term.at(x)\n return solve_driv(v, ene, s, n, h)", "def find_sandwich_top(blk):\n # Always follow the main branch of a flow: the first connection.\n _blk = blk.connections[0]\n while _blk is not None:\n if _blk.name in COLLAPSIBLE:\n return None\n if _blk.name in ['repeat', 'if', 'ifelse', 'forever', 'while']:\n if blk != _blk.connections[len(_blk.connections) - 1]:\n return None\n if _blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return _blk\n blk = _blk\n _blk = _blk.connections[0]\n return None", "def same_host_descent(G, host_species):\n nodes = set() # nodes that are involved in human-human transmission.\n for sc, sk, d in G.edges(data=True):\n sc_host = G.node[sc]['host_species']\n sk_host = G.node[sk]['host_species']\n \n sc_subtype = G.node[sc]['subtype']\n sk_subtype = G.node[sk]['subtype']\n \n subtype_exclusions = ['Mixed', 'mixed']\n \n not_mixed = (sc_subtype not in subtype_exclusions) and (sk_subtype not in subtype_exclusions)\n is_clonal = d['edge_type'] == 'full_complement'\n \n if sc_host == host_species and sk_host == host_species and not_mixed and is_clonal:\n nodes.add(sc)\n nodes.add(sk)\n\n return nodes", "def get_hnodes(self,h):\n t_nodes = self.get_h(h)\n for t_node in t_nodes:\n t_node = self.tree.get_node(t_node)\n self.check_childs(t_node.identifier)" ]
[ "0.56107956", "0.54044807", "0.53122574", "0.52616286", "0.524105", "0.51986015", "0.5194676", "0.5027002", "0.49752408", "0.49468756", "0.49244332", "0.49071574", "0.48990795", "0.4883024", "0.48149812", "0.47726056", "0.47632203", "0.47600392", "0.47526672", "0.47278097", "0.47278097", "0.47278097", "0.47278097", "0.47053036", "0.46668738", "0.46597758", "0.46552837", "0.46507588", "0.46407896", "0.46192312" ]
0.63591355
1
Finds mass of central halo and all subhaloes.
def halo_mass(self, index): return self.data[self.data["hostIndex"] == index][ "particleNumber" ].sum()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_center_of_mass_allies(self,obs):", "def center_of_mass(self, entity, geometric=False):\n\n # Structure, Model, Chain, Residue\n if isinstance(entity, Entity.Entity):\n atom_list = entity.get_atoms()\n # List of Atoms\n elif hasattr(entity, \"__iter__\") and [x for x in entity if x.level == \"A\"]:\n atom_list = entity\n # Some other weirdo object\n else:\n raise ValueError(\n f\"Center of Mass can only be calculated from the following objects:\\n\"\n f\"Structure, Model, Chain, Residue, list of Atoms.\"\n )\n\n masses = []\n positions = [[], [], []] # [ [X1, X2, ..] , [Y1, Y2, ...] , [Z1, Z2, ...] ]\n\n for atom in atom_list:\n masses.append(atom.mass)\n\n for i, coord in enumerate(atom.coord.tolist()):\n positions[i].append(coord)\n\n # If there is a single atom with undefined mass complain loudly.\n if \"ukn\" in set(masses) and not geometric:\n raise ValueError(\n f\"Some atoms don't have an element assigned.\\n\"\n f\"Try adding them manually or calculate the geometrical center of mass instead.\"\n )\n\n if geometric:\n return [sum(coord_list) / len(masses) for coord_list in positions]\n else:\n w_pos = [[], [], []]\n for atom_index, atom_mass in enumerate(masses):\n w_pos[0].append(positions[0][atom_index] * atom_mass)\n w_pos[1].append(positions[1][atom_index] * atom_mass)\n w_pos[2].append(positions[2][atom_index] * atom_mass)\n\n return [sum(coord_list) / sum(masses) for coord_list in w_pos]", "def calculate_molecular_mass(symbols):\n\n mass = 0\n for atom in symbols:\n mass += atom_weigths[atom]\n\n return mass", "def get_center_of_mass_enemies(self,obs):", "def test_molar_mass():\n first = molar_mass({\"H\":2, \"O\":1})\n assert first == approx(18.01528)\n second = parse_formula(\"C6H6\")\n test2 = molar_mass(second)\n assert test2 == approx(78.11184)\n third = parse_formula(\"PO4H2(CH2)12CH3\")\n test3 = molar_mass(third)\n assert test3 == approx(280.34072)", "def HI_mass(mhalo,aa):\n zp1 = 1.0/aa\n zz = zp1-1\n # Set the parameters of the HOD, using the \"simple\" form.\n # MHI ~ M0 x^alpha Exp[-1/x] x=Mh/Mmin\n # from the Appendix of https://arxiv.org/pdf/1804.09180.pdf, Table 6.\n # Fits valid for 1<z<6:\n mcut= 1e10*(6.11-1.99*zp1+0.165*zp1**2)\n alp = (1+2*zz)/(2+2*zz)\n # Work out the HI mass/weight per halo -- ignore prefactor.\n xx = mhalo/mcut+1e-10\n mHI = xx**alp * np.exp(-1/xx)\n # Scale to some standard number in the right ball-park.\n mHI*= 2e9*np.exp(-1.9*zp1+0.07*zp1**2)\n # Return the HI masses.\n return(mHI)\n #", "def calculate_center_of_mass(symbols, coordinates):\n\n total_mass = calculate_molecular_mass(symbols)\n\n mass_array = np.zeros([len(symbols),1])\n\n for i in range(len(symbols)):\n mass_array[i] = atomic_weights[symbols[i]]\n\n center_of_mass = sum(coordinates * mass_array) / total_mass\n\n return center_of_mass", "def calc_mass(self):\n\n star = self.star\n\n M, K, N = star.mesh_size\n ph = star.phi_coords\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j, k):\n sum = 0\n\n for i in range(0, M - 2, 2):\n sum += (1 / 6) * (ph[i + 2] - ph[i]) * (star.rho[i, j, k] +\n 4 *\n star.rho[i + 1, j, k]\n + star.rho[i + 2, j, k])\n\n return 2 * sum\n\n def Q2(k):\n sum = 0\n\n for j in range(0, K - 2, 2):\n sum += (1 / 6) * (mu[j + 2] - mu[j]) * \\\n (Q1(j, k) + 4 * Q1(j + 1, k) + Q1(j + 2, k))\n\n return 2 * sum\n\n mass = 0\n\n for k in range(0, N - 2, 2):\n mass += (1 / 6) * (r[k + 2] - r[k]) * (r[k]**2 * Q2(k) +\n 4 * r[k + 1]**2 * Q2(k + 1) +\n r[k + 2]**2 * Q2(k + 2))\n\n return mass", "def _get_molecule_center_of_mass(self):\n center_of_mass = np.zeros([3], dtype=float)\n masses = self._prmtop[\"MASS\"]\n for atom_ind in range(len(self._crd)):\n center_of_mass += masses[atom_ind] * self._crd[atom_ind]\n total_mass = masses.sum()\n if total_mass == 0:\n raise RuntimeError(\"zero total mass\")\n return center_of_mass / total_mass", "def test_get_center_of_mass(self):\n symbols = ['C', 'H', 'H', 'H', 'H']\n coords = np.array([[0.0000000, 0.0000000, 0.0000000],\n [0.6269510, 0.6269510, 0.6269510],\n [-0.6269510, -0.6269510, 0.6269510],\n [-0.6269510, 0.6269510, -0.6269510],\n [0.6269510, -0.6269510, -0.6269510]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n for cm_coord in center_of_mass:\n self.assertEqual(cm_coord, 0.0)\n\n symbols = ['O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H']\n coords = np.array([[1.28706525, 0.52121353, 0.04219198],\n [0.39745682, -0.35265044, -0.63649234],\n [0.36441173, -1.68197093, 0.08682400],\n [-0.59818222, 0.10068325, -0.65235399],\n [0.74799641, -0.48357798, -1.66461710],\n [0.03647269, -1.54932006, 1.12314420],\n [-0.31340646, -2.38081353, -0.41122551],\n [1.36475837, -2.12581592, 0.12433596],\n [2.16336803, 0.09985803, 0.03295192]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n self.assertAlmostEqual(center_of_mass[0], 0.7201, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.4880, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.1603, 3)\n\n numbers = [6, 6, 8, 1, 1, 1, 1, 1, 1]\n coords = np.array([[1.1714680, -0.4048940, 0.0000000],\n [0.0000000, 0.5602500, 0.0000000],\n [-1.1945070, -0.2236470, 0.0000000],\n [-1.9428910, 0.3834580, 0.0000000],\n [2.1179810, 0.1394450, 0.0000000],\n [1.1311780, -1.0413680, 0.8846660],\n [1.1311780, -1.0413680, -0.8846660],\n [0.0448990, 1.2084390, 0.8852880],\n [0.0448990, 1.2084390, -0.8852880]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, numbers=numbers)\n self.assertAlmostEqual(center_of_mass[0], -0.0540, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.0184, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.0000, 3)", "def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}", "def cal_mass(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for mass routine)')\n\n\n \n if self.E**2-self.px**2-self.py**2-self.pz**2>1e-7: #precision problem\n self.mass=math.sqrt(self.E**2-self.px**2-self.py**2-self.pz**2)\n else:\n self.mass=0", "def filter_halo_mass(data, Mcut=None):\n m = np.array(data['m'][0])\n ind =np.where(m > Mcut)[0]\n print(\"# of halos:\",len(ind))\n return ind", "def mass_tot_rho(self):\n\n dm = np.zeros(self.nzon)\n dm[0] = 4. * np.pi / 3. * (self.r[0] ** 3 - self.r_cen ** 3) * self.rho[0]\n for i in range(1, self.nzon):\n dm[i] = 4. / 3. * np.pi * (self.r[i] ** 3 - self.r[i - 1] ** 3) * self.rho[i]\n # print(f' M_tot(Density) = {np.sum(dm)/phys.M_sun:.3f}')\n return np.sum(dm)", "def populate(centres, masses, halomodel=None, profile=None, hodmod=None, edges=None):\r\n if halomodel is not None:\r\n profile = halomodel.halo_profile\r\n hodmod = halomodel.hod\r\n\r\n masses = np.array(masses)\r\n\r\n # Define which halos have central galaxies.\r\n cgal = np.random.binomial(1, hodmod.central_occupation(masses))\r\n cmask = cgal > 0\r\n central_halos = np.arange(len(masses))[cmask]\r\n\r\n if hodmod._central:\r\n masses = masses[cmask]\r\n centres = centres[cmask]\r\n\r\n # Calculate the number of satellite galaxies in halos\r\n # Using ns, rather than ns, gives the correct answer for both central condition and not.\r\n # Note that other parts of the algorithm also need to be changed if central condition is not true.\r\n # if hodmod._central:\r\n # sgal = poisson.rvs(hodmod.ns(masses[cmask]))\r\n # else:\r\n sgal = poisson.rvs(hodmod.ns(masses))\r\n\r\n # Get an array ready, hopefully speeds things up a bit\r\n ncen = np.sum(cgal)\r\n nsat = np.sum(sgal)\r\n\r\n pos = np.empty((ncen + nsat, 3))\r\n halo = np.empty(ncen+nsat)\r\n\r\n # Assign central galaxy positions\r\n halo[:ncen] = central_halos\r\n if hodmod._central:\r\n pos[:ncen, :] = centres\r\n else:\r\n pos[:ncen, :] = centres[cmask]\r\n\r\n\r\n smask = sgal > 0\r\n # if hodmod._central:\r\n # sat_halos = central_halos[np.arange(len(masses[cmask]))[smask]]\r\n # else:\r\n if hodmod._central:\r\n sat_halos = central_halos[np.arange(len(masses))[smask]]\r\n else:\r\n sat_halos = np.arange(len(masses))[smask]\r\n\r\n sgal = sgal[smask]\r\n centres = centres[smask]\r\n masses = masses[smask]\r\n\r\n # Now go through each halo and calculate galaxy positions\r\n start = time.time()\r\n halo[ncen:] = np.repeat(sat_halos,sgal)\r\n indx = np.concatenate(([0],np.cumsum(sgal))) + ncen\r\n\r\n# print \"SMASHING THIS NOW\"\r\n def fill_array(i):\r\n m,n,ctr = masses[i], sgal[i],centres[i]\r\n pos[indx[i]:indx[i+1],:] = profile.populate(n, m, ba=1, ca=1, centre=ctr)\r\n\r\n if HAVE_POOL:\r\n mp.ProcessingPool(mp.cpu_count()).map(fill_array,list(range(len(masses))))\r\n else:\r\n for i in range(len(masses)):\r\n fill_array(i)\r\n\r\n nhalos_with_gal = len(set(central_halos.tolist()+sat_halos.tolist()))\r\n\r\n print(\"Took \", time.time() - start, \" seconds, or \", (time.time() - start)/nhalos_with_gal, \" each halo.\")\r\n print(\"NhalosWithGal: \", nhalos_with_gal, \", Ncentrals: \", ncen,\", NumGal: \", len(halo), \", MeanGal: \", float(\r\n len(halo))/nhalos_with_gal, \", MostGal: \", sgal.max() + 1 if len(sgal)>0 else 1)\r\n\r\n if edges is None:\r\n pass\r\n elif np.isscalar(edges):\r\n edges = np.array([[0, 0, 0], [edges, edges, edges]])\r\n elif np.array(edges).shape == (2,):\r\n edges = np.array([[edges[0]]*3, [edges[1]]*3])\r\n\r\n if edges is not None:\r\n for j in range(3):\r\n d = pos[:, j] - edges[0][j]\r\n pos[d < 0, j] = edges[1][j] + d[d < 0]\r\n d = pos[:, j] - edges[1][j]\r\n pos[d > 0, j] = edges[0][j] + d[d > 0]\r\n\r\n return pos, halo.astype(\"int\"), ncen", "def get_mol_masses(mol):\n return np.array([a.GetMass() for a in mol.GetAtoms()])", "def solid_surface_density_nHill_given_physical_catalog(sssp_per_sys, sssp, max_core_mass=10., n=10.):\n a_all = sssp_per_sys['a_all'][sssp_per_sys['a_all'] > 0]\n core_mass_all = np.copy(sssp_per_sys['mass_all'])\n core_mass_all[core_mass_all > max_core_mass] = max_core_mass\n sigma_all = solid_surface_density_nHill(core_mass_all, sssp_per_sys['a_all'], Mstar=sssp['Mstar_all'][:,None], n=n)[sssp_per_sys['a_all'] > 0]\n return sigma_all, a_all", "def calc_mass(self):\n\n star = self.star\n\n K, N = star.mesh_size\n mu = star.mu_coords\n r = star.r_coords\n\n def Q1(j):\n return np.sum((mu[2::2] - mu[:-2:2]) *\n (star.rho[:-2:2, j] + 4 * star.rho[1:-1:2, j] +\n star.rho[2::2, j])) / 6\n\n mass = 0\n\n for j in range(0, N - 2, 2):\n mass += (r[j + 2] - r[j]) * (r[j]**2 * Q1(j) +\n 4 * r[j + 1]**2 * Q1(j + 1) +\n r[j + 2]**2 * Q1(j + 2))\n\n return 2 / 3 * np.pi * mass", "def getMolecularMass(self):\n dataDict = self.__dict__\n # get formula dictionary\n dd = {}\n for ca in self.chemAtoms:\n if isinstance(ca, ChemAtom):\n ss = ca.elementSymbol\n ii = dd.get(ss)\n if ii is None:\n dd[ss] = 1\n else:\n dd[ss] = ii + 1\n \n # calculate mass\n xx = self.root.currentChemElementStore\n result = sum(ii * xx.findFirstChemElement(symbol=ss).mass for (ss, ii) in dd.iteritems())\n return result", "def centre_of_mass(mol):\n\n numatoms = mol.GetNumAtoms()\n conf = mol.GetConformer()\n if not conf.Is3D():\n return 0\n # get coordinate of each atoms\n pts = np.array([list(conf.GetAtomPosition(atmidx)) for atmidx in range(numatoms)])\n atoms = [atom for atom in mol.GetAtoms()]\n mass = Descriptors.MolWt(mol)\n # get center of mass\n center_of_mass = np.array(np.sum(atoms[i].GetMass() * pts[i] for i in range(numatoms))) / mass\n return center_of_mass", "def get_mass(atomic_symbol: str) -> float:\n\n if atomic_symbol in _masses.keys():\n return _masses[atomic_symbol]\n\n else:\n return 0", "def center_of_mass(self, tolerance=1e-9):\n props = GProp_GProps()\n brepgprop_VolumeProperties(self.topods_solid(), props, tolerance)\n com = props.CentreOfMass()\n return geom_utils.gp_to_numpy(com)", "def calc_gasSD_inside_half_mass(galObj, gas_m, gas_pos, halfMassR='gas'):\n if halfMassR == 'gas':\n half_mass_radius = galObj.radii['gas_half_mass'].in_units('kpc')\n elif halfMassR == 'stars':\n half_mass_radius = galObj.radii['stellar_half_mass'].in_units('kpc')\n\n extent = np.sqrt(gas_pos[:, 0]**2 + gas_pos[:, 1]**2 + gas_pos[:, 2]**2)\n mask = extent <= half_mass_radius\n gasSD = np.sum(gas_m[mask])/np.pi/(half_mass_radius*1.e3)**2\n print(\"gas SD from particles within half-mass: \")\n print(gasSD)\n print(\"gas SD from global gas mass: \")\n print(galObj.masses['gas'] / np.pi / (half_mass_radius*1.e3)**2)\n\n print(galObj.masses['gas'])\n print(np.sum(gas_m[mask]))\n # hmmm.....\n # import pdb; pdb.set_trace()\n return gasSD", "def test_atomic_masses():\n first = get_atomic_mass(\"As\")\n assert first == 74.9216\n \n second = get_atomic_mass(\"Be\")\n assert second == 9.012182\n\n third = get_atomic_mass(\"Li\")\n assert third == 6.941", "def _compute_mass(box_size, evo_config):\n\n # ensure format\n standard_volume = evo_config['individuals']['standard_volume']\n if isinstance(box_size, list):\n if len(box_size) == 1: # sphere\n box_size = box_size[0]\n box_size = np.asarray(box_size)\n\n if np.prod(box_size.shape) < 2: # sphere\n return 4 / 3 * np.pi * box_size**3 / standard_volume\n else: # box\n if np.ndim(box_size) == 1:\n return np.prod(box_size * 2) / standard_volume\n else:\n return np.prod(box_size * 2, axis=1) / standard_volume", "def mass(self):\n self.check_symbols()\n return self._tree_mass(self._tokentree())", "def calculate_molar_mass(collector):\n avg_temp = collector.get_average_temperature()\n avg_acceleration = collector.get_average_acceleration()\n ground_pressure = collector.get_ground_pressure()\n numerator = 0\n denominator = 0\n for altitude, pressure in\\\n collector.get_iter('altitude', 'pressure'):\n try:\n numerator -= (Calculator.R * avg_temp /\n avg_acceleration / altitude *\n math.log(pressure / ground_pressure))\n except ZeroDivisionError:\n pass\n else:\n denominator += 1\n if denominator == 0:\n raise NoDataError('No altitude/pressure to calculate molar mass')\n return numerator / denominator", "def centerOfMass(data):\r\n dd = []\r\n for d in data:\r\n dd.append(d.coordinate)\r\n\r\n data = dd\r\n data = np.array(data)\r\n n = len(data)\r\n x = sum(data[:,0])\r\n y = sum(data[:,1])\r\n z = sum(data[:,2])\r\n x/=n\r\n y/=n\r\n z/=n\r\n return x,y,z,n", "def mass(self):\n\t\treturn self.volume*self.density", "def molar_mass_dry_air():\n return 28.9647" ]
[ "0.62661475", "0.61996317", "0.618251", "0.6090707", "0.60871905", "0.5922938", "0.5916971", "0.5912088", "0.59059227", "0.5891885", "0.5858639", "0.5802172", "0.5787271", "0.578331", "0.57702184", "0.5754956", "0.5754098", "0.57494813", "0.5742618", "0.5713476", "0.5711955", "0.5711495", "0.5694094", "0.5687913", "0.56755453", "0.56673837", "0.5654649", "0.5636952", "0.5621792", "0.5609333" ]
0.6344772
1
Calculates mass assembly history for a given halo. Treebased approach has been abandoned for performace reasons.
def collapsed_mass_history(self, index, nfw_f): logging.debug("Looking for halo %d", index) halo = self.get_halo(index) if halo["hostIndex"] != halo.name: raise ValueError("Not a host halo!") m_0 = self.halo_mass(index) progenitors = pd.concat( [ self.data.loc[index], self.data.loc[self.halo_progenitor_ids(index)], ] ) logging.debug( "Built progenitor sub-table for halo %d of mass %d with %d members", index, m_0, progenitors.size, ) progenitors = progenitors[progenitors["particleNumber"] > nfw_f * m_0] cmh = progenitors.groupby("snapshotNumber", as_index=False)[ "particleNumber" ].sum() cmh["nodeIndex"] = index logging.info( "Aggregated masses of %d valid progenitors of halo %d", progenitors.size, index, ) return cmh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collapsed_mass_history(self, index, nfw_f):\n\n logging.debug(\"Looking for halo %d\", index)\n halo = self.get_halo(index)\n if halo[\"hostIndex\"] != halo.name:\n raise ValueError(\"Not a host halo!\")\n m_0 = self.halo_mass(index)\n\n progenitors = pd.concat(\n [\n self.data.loc[index],\n self.data.loc[self.halo_progenitor_ids(index)],\n ]\n )\n logging.debug(\n \"built prog sub-table [%d] (m=%d, %d progs)\",\n index,\n m_0,\n progenitors.size,\n )\n\n progenitors = progenitors[progenitors[\"particleNumber\"] > nfw_f * m_0]\n cmh = progenitors.groupby(\"snapshotNumber\", as_index=False)[\n \"particleNumber\"\n ].sum()\n cmh[\"nodeIndex\"] = index\n logging.info(\n \"Aggregated masses of %d valid progenitors of halo %d\",\n progenitors.size,\n index,\n )\n\n return cmh", "def history(self, hash):\n txs = self._t.get(hash, max_transactions=10000)['transactions']\n tree = defaultdict(list)\n number_editions = 0\n\n for tx in txs:\n _tx = self._t.get(tx['txid'])\n txid = _tx['txid']\n verb_str = BlockchainSpider.check_script(_tx['vouts'])\n verb = Spoolverb.from_verb(verb_str)\n from_address, to_address, piece_address = BlockchainSpider._get_addresses(_tx)\n timestamp_utc = _tx['time']\n action = verb.action\n\n edition_number = 0\n if action != 'EDITIONS':\n edition_number = verb.edition_number\n else:\n number_editions = verb.num_editions\n\n tree[edition_number].append({'txid': txid,\n 'verb': verb_str,\n 'from_address': from_address,\n 'to_address': to_address,\n 'piece_address': piece_address,\n 'timestamp_utc': timestamp_utc,\n 'action': action,\n 'number_editions': number_editions,\n 'edition_number': edition_number})\n\n # lets update the records with the number of editions of the piece since we do not know\n # this information before the EDITIONS transaction\n for edition, chain in tree.items():\n [d.update({'number_editions': number_editions}) for d in chain]\n return dict(tree)", "def createHistoryCollection(step, pickleName, maximumSkips=5, cutoffDistance=2, minStellarMass=1e8, contaminationTolerance=0.05, \\\n\tminDarkParticles=1e4, requireBH=True, emailAddress=None, computeRamPressure=True, computeMergers=True, massForRatio='Mstar', \\\n\tbhString=\"bh('BH_mass', 'max', 'BH_central')\"):\n\n\t#Time the calculation\n\tt_start = time.time()\n\n\t#Obtain halos that meet the requirements.\n\thaloList = getSuitableHalos(step, requireBH=requireBH, minStellarMass=minStellarMass, contaminationTolerance=contaminationTolerance, \\\n\tminDarkParticles=minDarkParticles)\n\thistoryCollection = {}\n\tfailedHaloNumbers = []\n\n\t#Loop through and find histories.\n\tfor h_index in range(len(haloList)):\n\t\thaloNumber = haloList[h_index].halo_number\n\t\tprint \"Processing halo_number {0}, halo {1} of {2}.\".format(haloNumber, h_index+1, len(haloList))\n\t\ttry:\n\t\t\thistoryBook = makeHistory(haloList[h_index], maximumSkips=maximumSkips, cutoffDistance=cutoffDistance, bhString=bhString)\n\t\t\thistoryCollection[haloNumber] = historyBook\n\t\texcept NoResultsError:\n\t\t\t#The galaxy lacks one of the items asked for, probably a BH.\n\t\t\tprint \" FAILED\"\n\t\t\tfailedHaloNumbers.append(haloNumber)\n\t\t\tpass\n\t\tif computeMergers:\n\t\t\tmergerTimes, mergerRatios = stitched_merger_finder(haloList[h_index], maximumSkips=maximumSkips, \\\n cutoffDistance=cutoffDistance, massForRatio=massForRatio)\n historyCollection[haloNumber]['mergerTimes'] = mergerTimes\n historyCollection[haloNumber]['mergerRatios'] = mergerRatios\n\n\tif step.simulation.basename == 'h1.cosmo50':\n\t\t#Adding one new key: The distance from the cluster center\n\t\ttry:\n\t\t\tclusterCoordinates = historyCollection[1]['SSC']\n\t\t\tclusterRadius = historyCollection[1]['R200']\n\t\t\tprint \"Computing cluster distances.\"\n\t\t\tfor h_index in range(len(haloList)):\n\t\t\t\thaloNumber = haloList[h_index].halo_number\n\t\t\t\tif (haloNumber == 1) | (haloNumber in failedHaloNumbers):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tdisplacement = historyCollection[haloNumber]['SSC'] - clusterCoordinates\n\t\t\t\t\tdistance = np.sqrt(np.array([np.dot(displacement[:,i],displacement[:,i]) for i in range(displacement.shape[1])]))\n\t\t\t\t\thistoryCollection[haloNumber]['clusterDistance'] = distance / clusterRadius\n\t\texcept KeyError:\n\t\t\t#The cluster coordinates aren't in this set.\n\t\t\tpass\n\n\t\t#Adding another key: ram pressure\n\t\tif computeRamPressure:\n\t\t\tcp = ClusterProfiler(step)\n\t\t\tclusterVelocity = historyCollection[1]['Vcom']\n\t\t\tfor h_index in range(len(haloList)):\n\t\t\t\thaloNumber = haloList[h_index].halo_number\n\t\t\t\tif (haloNumber == 1) | (haloNumber in failedHaloNumbers):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tclusterDensity = cp.computeGasDensity(historyCollection[haloNumber]['clusterDistance'], \\\n\t\t\t\t\thistoryCollection[haloNumber]['time'])\n\t\t\t\t\trelativeVelocities = historyCollection[haloNumber]['Vcom'] - clusterVelocity\n\t\t\t\t\trelativeSpeedSquared = np.array([np.dot(relativeVelocities[:,i],relativeVelocities[:,i]) for i in range(relativeVelocities.shape[1])])\n\t\t\t\t\thistoryCollection[haloNumber]['ramPressure'] = clusterDensity * relativeSpeedSquared * constants.M_sun / (constants.pc * 1e3)**3 * 1e6\n\t\n\t#Pickle the output\n\thistoryCollection['failedHaloNumbers'] = failedHaloNumbers\n\twith open(pickleName, 'w') as myfile:\n\t\tpickle.dump(historyCollection, myfile)\n\t\n\tt_end = time.time()\n\n\tprint \"Process complete after {0:3.2f} hours.\".format((t_end-t_start)/60/60)\n\tprint \"Saved to {0}.\".format(pickleName)\n\n\tif emailAddress is not None:\n\t\t#Send an optional email alert.\n\t\tmsg = MIMEText(\"Hey there!\\n\\nIt's me, your friend the HistoryMaker. I'm just emailing to let you know that the collection you asked for is done. It took me {0:3.2f} hours to complete.\\n\\nRegards,\\nHM\".format((t_end-t_start)/60/60))\n\t\tmsg['From'] = \"HistoryMaker\"\n\t\tmsg['To'] = emailAddress\n\t\tmsg['Subject'] = \"History Collection Complete\"\n\n\t\ts = smtplib.SMTP('localhost')\n\t\ts.sendmail(\"HistoryMaker\", [emailAddress], msg.as_string())\n\t\ts.quit()", "def build_branches(self, mpi):\n\n if not mpi.Rank:\n print(\" -Reading tree properties\")\n self.index = np.zeros((len(self.halos), self.Nsnaps), dtype=np.int) - 1\n self.M500c = (\n np.zeros((len(self.halos), self.Nsnaps), dtype=np.float) + 6.774e-11\n )\n self.M200c = (\n np.zeros((len(self.halos), self.Nsnaps), dtype=np.float) + 6.774e-11\n )\n self.M200m = (\n np.zeros((len(self.halos), self.Nsnaps), dtype=np.float) + 6.774e-11\n )\n self.Mvir = np.zeros((len(self.halos), self.Nsnaps), dtype=np.float) + 6.774e-11\n for j in range(0, len(self.halos), 1):\n branch = self.tree.get_main_branch(\n self.Isnap,\n self.sub_tab.FirstSub[j],\n keysel=[\n \"Group_M_Crit500\",\n \"Group_M_Crit200\",\n \"Group_M_Mean200\",\n \"Group_M_TopHat200\",\n \"SubhaloGrNr\",\n ],\n )\n\n if len(branch.SubhaloGrNr) >= self.Nsnaps:\n tmp = self.Nsnaps\n else:\n tmp = len(branch.SubhaloGrNr)\n self.index[j, :tmp] = branch.SubhaloGrNr[:tmp]\n self.M500c[j, :tmp] = branch.Group_M_Crit500[:tmp]\n self.M200c[j, :tmp] = branch.Group_M_Crit200[:tmp]\n self.M200m[j, :tmp] = branch.Group_M_Mean200[:tmp]\n self.Mvir[j, :tmp] = branch.Group_M_TopHat200[:tmp]\n\n # Convert code units to astro\n self.M500c *= 1.0e10 / self.sub_tab.hub # [Msun]\n self.M200c *= 1.0e10 / self.sub_tab.hub # [Msun]\n self.M200m *= 1.0e10 / self.sub_tab.hub # [Msun]\n self.Mvir *= 1.0e10 / self.sub_tab.hub # [Msun]\n return", "def metric_halstats(hal_filename, reference_id=\"ref\"):\n \n # Get the list of dicts of per-genome stats.\n status_list = get_halstats_stats(hal_filename)\n \n # Throw out non-leaves\n status_list = [entry for entry in status_list if entry[\"NumChildren\"] == 0]\n \n # Grab all the genome names\n genome_names = [entry[\"GenomeName\"] for entry in status_list]\n \n # Get the dict from genome name to total bases from that genome aligned to\n # the reference at all, and the dict of N compositions, in parallel.\n coverage_dict, basecomp_dict = in_parallel(\n lambda: get_halstats_coverage(hal_filename, genome_names, reference_id),\n lambda: get_halstats_basecomps(hal_filename, genome_names))\n \n for entry in status_list:\n # For each genome, we want the coverage against the reference.\n \n # Grab the genome name\n genome_name = entry[\"GenomeName\"]\n \n if not coverage_dict.has_key(genome_name):\n # This is probably the root sequence and didn't get a coverage for\n # some reason. At any rate, the root sequence would be all Ns\n continue\n \n # Figure out how much of it is not Ns\n non_n = basecomp_dict[genome_name]\n \n # How many bases are eligible?\n eligible = float(entry[\"Length\"] * non_n)\n \n if eligible == 0:\n # No coverage is defined\n entry[\"Coverage\"] = float(\"NaN\")\n continue\n \n # Compute and save the coverage for each entry, by dividing bases\n # aligned by bases eligible.\n entry[\"Coverage\"] = coverage_dict[genome_name] / eligible\n \n # Return the results\n return status_list", "def get_mean_halo_growths(mass_params, z_range):\r\n\r\n halo_mass_range = np.arange(mass_params[0], mass_params[1], mass_params[2])\r\n N = halo_mass_range.size - 1\r\n\r\n mean_halo_tracks = {\r\n \"mass_bin\": [],\r\n \"mass_track\": []\r\n }\r\n\r\n for i in range(N):\r\n mean_halo_tracks[\"mass_bin\"].append([halo_mass_range[i], halo_mass_range[i+1]])\r\n mean_mass = (halo_mass_range[i] + halo_mass_range[i+1]) /2.\r\n mean_halo_tracks[\"mass_track\"].append(Mass_acc_history_VDB_FS(mean_mass, z_range, Cosmo.h, Cosmo.Ob0))\r\n\r\n return mean_halo_tracks", "def MAH_Hearin_2021(halo_mass_t0, cosmic_t):\r\n\r\n #U_a_early = 2.5\r\n #U_a_early_late = 0.3\r\n #log10tau_c = 1.25\r\n\r\n k = 3.5\r\n\r\n a_late_early = 2.5-0.3 #np.log( np.power(np.e, U_a_early_late) + 1. )\r\n a_early = 2.5 #np.log( np.power(np.e, U_a_early) + 1. )\r\n tau_c = 1.25 #np.power(10., log10tau_c)\r\n alpha = a_early + a_late_early / (1. + np.exp(-k*(cosmic_t - tau_c)) )\r\n\r\n MAH = np.log10( 10.**halo_mass_t0 * np.power(cosmic_t / Cosmo.age(0), alpha) )\r\n\r\n return MAH", "def MAH(self, mobs, zobs, mseed=None, set_history=False):\n lgmobs = log10(mobs)\n lgzobs = log10(zobs+1.)\n if ((lgmobs > self.lgmmax) or \n (lgmobs < self.lgmmin)):\n print(\"lgmobs beyond range lgmobs %10.3f lgmmin %10.3f lgmmax %10.3f \"%(lgmobs, self.lgmmin, self.lgmmax))\n raise cex.ParameterOutsideDefaultRange(mobs)\n if ((lgzobs > self.lgzmax) or \n (lgzobs < self.lgzmin)):\n raise cex.ParameterOutsideDefaultRange(zobs)\n # starting mass\n if mseed is None:\n lgmseed = lgmobs - 2.0\n else:\n lgmseed = log10(mseed)\n if (lgmseed < self.lgmmin):\n print(\"lgmmin too large lgmseed %10.3f lgmmin %10.3f\"%(lgmseed, self.lgmmin))\n raise cex.ParameterOutsideDefaultRange(mseed)\n # for concentration\n m_magic = mobs*self.frac_magic\n lgmmagic = log10(m_magic)\n if (lgmmagic < lgmseed):\n raise cex.ParameterOutsideDefaultRange(m_magic)\n\n lgz_magic, lgm_history, lgz_history = self._MAH_lg(\n lgmobs, lgzobs, lgmseed, lgmmagic)\n t_magic = self.age(10.**lgz_magic-1.)\n t_obs = self.age(zobs)\n# t_magic = self._age(10.**lgz_magic-1.)\n# t_obs = self._age(zobs)\n cvir = self._cvir_fit(t_magic, t_obs)\n if set_history:\n m_history = np.power(10., lgm_history)\n z_history = np.power(10., lgz_history)-1.\n mah_history = np.vstack([m_history, z_history])\n return(cvir, mah_history)\n else:\n return(cvir)", "def __build_history(self, obj: Object) -> dict:\n previous_history = obj.history\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}", "def __build_history(self, obj: Object) -> dict:\n previous_history = dict(obj.history)\n return {**previous_history, self.__get_timestamp(): {'type_id': obj.type_id, 'redshift': obj.redshift}}", "def compute_audit(self):\r\n \r\n time = datetime.now()\r\n H0_dist = []\r\n Ha_dist = []\r\n\r\n for i in range(0, self.m):\r\n #print(\"CURRENT H0 dist: \", H0_dist)\r\n #try:\r\n H0_dist = self.next_round_dist(True, H0_dist, i)\r\n Ha_dist = self.next_round_dist(False, Ha_dist, i)\r\n '''\r\n except Exception as e:\r\n \r\n print(e)\r\n self.bad = H0_dist\r\n self.bad2 = Ha_dist\r\n return\r\n '''\r\n self.decide_k_min(H0_dist, Ha_dist, i)\r\n #print('ROUND INDEX: ',i,'kminschedl: ',self.k_min_sched[i])\r\n\r\n #self.truncate_dist(H0_dist, i)\r\n H0_dist = H0_dist[:self.k_min_sched[i]]\r\n #self.truncate_dist(Ha_dist, i)\r\n Ha_dist = Ha_dist[:self.k_min_sched[i]]\r\n \r\n #print(\"The outputs: k_mins, LR denominator, LR numerator, 1 / LR (or alpha').\")\r\n #print(self.k_min_sched, '\\n', self.pr_H0_sched, '\\n', self.pr_Ha_sched, '\\n', \r\n #self.risk_sched)\r\n #print(\"Output suppressed. Use instance variables k_min_sched, pr_H0_sched, pr_Ha_sched, risk_sched\")\r\n\r\n #print(\"Time elapsed:\", datetime.now() - time)\r", "def test_find_homologs(self):\r\n\r\n formatdb_cmd = 'formatdb -p F -o T -i %s' % self.subjectdb_fp\r\n system(formatdb_cmd)\r\n self._paths_to_clean_up.append(\"formatdb.log\")\r\n for suffix in [\"nhr\", \"nin\", \"nsd\", \"nsi\", \"nsq\"]:\r\n self._paths_to_clean_up.append(\".\".join(\r\n [self.subjectdb_fp, suffix]))\r\n\r\n blast_output, hit_ids, removed_hit_ids =\\\r\n find_homologs(self.query_fp, self.subjectdb_fp, e_value=1e-4,\r\n max_hits=100, working_dir=\"./\", blast_mat_root=None,\r\n wordsize=28, percent_aligned=0.98, DEBUG=False)\r\n\r\n self.assertEqual(hit_ids, set([\"bth:BT_0001\", \"hsa:8355\"]))\r\n self.assertEqual(removed_hit_ids, set())\r\n\r\n i = 0\r\n for line in blast_output:\r\n\r\n if line.startswith(\"#\"):\r\n i += 1\r\n continue # depends on tmpfilename, skip testing\r\n\r\n self.assertEqual(blast_output[i], EXP_BLAST_OUTPUT[i])\r\n i += 1\r\n\r\n # Ensure low % alignment seqs are removed\r\n blast_output, hit_ids, removed_hit_ids =\\\r\n find_homologs(self.query2_fp, self.subjectdb_fp,\r\n e_value=1e-4, max_hits=100, working_dir=\"./\",\r\n blast_mat_root=None, wordsize=28, percent_aligned=1.00,\r\n DEBUG=False)\r\n\r\n self.assertEqual(hit_ids, set([\"bth:BT_0001\"]))\r\n self.assertEqual(removed_hit_ids, set([\"hsa:8355_tweaked\"]))\r\n\r\n # Ensure high % alignment seqs are not removed\r\n blast_output, hit_ids, removed_hit_ids =\\\r\n find_homologs(self.query2_fp, self.subjectdb_fp,\r\n e_value=1e-4, max_hits=100, working_dir=\"./\",\r\n blast_mat_root=None, wordsize=28, percent_aligned=0.75,\r\n DEBUG=False)\r\n\r\n self.assertEqual(hit_ids, set([\"bth:BT_0001\", \"hsa:8355_tweaked\"]))\r\n self.assertEqual(removed_hit_ids, set())", "def accumulateSubgridMassHistory(self,q):\n pass", "def analyze_data(O, nstates, nsamples=1000, nobservations=None):\n\n # Time interval.\n tau = 0.001 # time interval (s) for plotting\n\n # Truncate O to number of observations.\n if nobservations:\n print \"Using only %d observations\" % nobservations\n O = [ o_t[0:nobservations] for o_t in O ]\n else:\n nobservations = len(O[0])\n\n # Generate MLHMM.\n print \"Generating MLHMM...\"\n estimator = bhmm.MLHMM(O, nstates)\n\n print \"Initial guess:\"\n print str(estimator.hmm.output_model)\n print estimator.hmm.transition_matrix\n print estimator.hmm.stationary_distribution\n\n # Plot initial guess.\n s_t = None\n o_t = O[0]\n filename = os.path.join('figures', 'synthetic-three-state-model-guess-nstates%(nstates)d-nobs%(nobservations)d.pdf' % vars())\n plots.plot_state_assignments(estimator.hmm, s_t, o_t, time_units='s', obs_label='force / pN', tau=tau, pdf_filename=filename)\n\n print \"Fitting HMM...\"\n mle = estimator.fit()\n\n # Plot.\n s_t = mle.hidden_state_trajectories[0]\n import numpy as np\n o_t = O[0]\n filename = os.path.join('figures', 'synthetic-three-state-model-mlhmm-nstates%(nstates)d-nobs%(nobservations)d.pdf' % vars())\n plots.plot_state_assignments(mle, s_t, o_t, time_units='s', obs_label='force / pN', tau=tau, pdf_filename=filename)\n\n # Initialize BHMM with MLHMM model.\n print \"Sampling models from BHMM...\"\n sampler = bhmm.BHMM(O, nstates, initial_model=mle)\n bhmm_models = sampler.sample(nsamples=nsamples, save_hidden_state_trajectory=False)\n\n # Generate a sample saving a hidden state trajectory.\n final_models = sampler.sample(nsamples=1, save_hidden_state_trajectory=True)\n\n # Plot final BHMM sample.\n model = final_models[0]\n s_t = model.hidden_state_trajectories[0]\n o_t = O[0]\n filename = os.path.join('figures', 'synthetic-three-state-model-bhmm-nstates%(nstates)d-nobs%(nobservations)d.pdf' % vars())\n plots.plot_state_assignments(model, s_t, o_t, time_units='s', obs_label='force / pN', tau=tau, pdf_filename=filename)\n\n return [mle, bhmm_models]", "def buildCache(ham: Dict[str, Any]) -> None:\n\n # Initialize the Hamiltonian\n clearCache(ham)\n\n # Build operators and sequences\n buildOperatorCache(ham)\n buildSequenceCache(ham)", "def caculate_prob(self):\n t_H = self.tree.depth()\n t_h = 1\n while(t_h <= t_H):\n t_hnodes = self.get_h(t_h)\n t_sum = 0\n t_hpro = []\n t_cpro = []\n for t_n in t_hnodes:\n t_sum = self.tree.get_node(t_n).data[0] + t_sum\n t_node = self.tree.get_node(t_n)\n if t_node.is_leaf():\n t_node.data.append(0)\n continue\n t_childrens = self.tree.children(t_n)\n t_shang = 0\n for child in t_childrens:\n t_shang = t_shang + (child.data[0]/t_node.data[0])*np.log(child.data[0]/t_node.data[0])\n t_node.data.append(-t_shang)\n for t_n in t_hnodes:\n t_node = self.tree.get_node(t_n)\n t_parentnode = self.tree.parent(t_n)\n if t_h > 1:\n t_node.data.append((t_node.data[0] / t_sum) * (t_node.data[0]/t_parentnode.data[0]))\n t_hpro.append((t_node.data[0]/t_sum) * (t_node.data[0]/t_parentnode.data[0]))\n else:\n t_node.data.append((t_node.data[0] / t_sum))\n t_hpro.append((t_node.data[0] / t_sum))\n\n t_cpro.append(t_node.data[1])\n t_ndata = np.array(t_hpro)\n mean = np.mean(t_ndata)\n std = np.std(t_ndata,ddof=1)\n t_sdata = np.array(t_cpro)\n mean_s = np.mean(t_sdata)\n std_s = np.std(t_sdata,ddof=1)\n for t_n in t_hnodes:\n t_node = self.tree.get_node(t_n)\n if(std != 0):\n t_node.data[2] = (t_node.data[2] - mean)/std\n else:\n t_node.data[2] = (t_node.data[2] - mean)\n if(mean_s == 0 and std_s ==0):\n t_node.data[1] = -100.0\n continue\n t_node.data[1] = (t_node.data[1] - mean_s)/std_s\n t_h = t_h + 1", "def HI_mass(mhalo,aa):\n zp1 = 1.0/aa\n zz = zp1-1\n # Set the parameters of the HOD, using the \"simple\" form.\n # MHI ~ M0 x^alpha Exp[-1/x] x=Mh/Mmin\n # from the Appendix of https://arxiv.org/pdf/1804.09180.pdf, Table 6.\n # Fits valid for 1<z<6:\n mcut= 1e10*(6.11-1.99*zp1+0.165*zp1**2)\n alp = (1+2*zz)/(2+2*zz)\n # Work out the HI mass/weight per halo -- ignore prefactor.\n xx = mhalo/mcut+1e-10\n mHI = xx**alp * np.exp(-1/xx)\n # Scale to some standard number in the right ball-park.\n mHI*= 2e9*np.exp(-1.9*zp1+0.07*zp1**2)\n # Return the HI masses.\n return(mHI)\n #", "def agg_history(self):\n cd_list, cr_list = zip(*self._history)\n return pd.concat(cd_list), pd.concat(cr_list)", "def algo(segregatedJob):\n global total\n rho = computeRho(segregatedJob)\n r = len(rho);\n\n S = [[0 for x in range(r)] for y in range(r)]\n k = 0\n #implementaion of scheduling algorithm\n while(k<len(S)):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[j - 1][k - 1])\n\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S[j - 1][k - 1])\n\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n\n #Adding the max pay for every individual field in the matrix\n total += S[length-1][length-1]", "def get_mergers_of_major_progenitor(input_halo):\n redshift = []\n ratio = []\n halo = []\n while input_halo is not None:\n mergers = db.relation_finding.MultiHopMostRecentMergerStrategy(input_halo, order_by='weight').all()\n if len(mergers)>0 :\n for m in mergers[1:]:\n redshift.append(mergers[0].timestep.next.redshift)\n halo.append((mergers[0], m))\n ratio.append(float(mergers[0].NDM)/m.NDM)\n input_halo = mergers[0]\n else:\n input_halo = None\n\n return np.array(redshift), np.array(ratio), halo", "def assemble_stats(lma_sum, mma_sum, hma_sum, peer_lma_sum, peer_mma_sum, peer_hma_sum):\n lma_pct = 0.0\n mma_pct = 0.0\n hma_pct = 0.0\n\n peer_lma_pct = 0.0\n peer_mma_pct = 0.0\n peer_hma_pct = 0.0\n\n stats = {}\n\n target_lar_total = lma_sum + mma_sum + hma_sum\n if target_lar_total:\n lma_pct = round(1.0 * lma_sum / target_lar_total, 3)\n mma_pct = round(1.0 * mma_sum / target_lar_total, 3)\n hma_pct = round(1.0 * hma_sum / target_lar_total, 3)\n maj_pct = round(mma_pct + hma_pct, 3)\n stats.update({\n 'lma': lma_sum, \n 'lma_pct': lma_pct, \n 'mma': mma_sum,\n 'mma_pct': mma_pct,\n 'hma': hma_sum,\n 'hma_pct': hma_pct,\n 'maj_pct': maj_pct,\n 'lar_total': target_lar_total\n })\n else:\n stats.update({\n 'lar_total': 0,\n 'lma': 0, \n 'lma_pct': 0, \n 'mma': 0,\n 'mma_pct': 0,\n 'hma': 0,\n 'hma_pct': 0\n })\n #assemble peer data\n peer_lar_total = peer_lma_sum + peer_mma_sum + peer_hma_sum\n if peer_lar_total:\n peer_lma_pct = round(1.0 * peer_lma_sum / peer_lar_total, 3)\n peer_mma_pct = round(1.0 * peer_mma_sum / peer_lar_total, 3)\n peer_hma_pct = round(1.0 * peer_hma_sum / peer_lar_total, 3)\n peer_maj_pct = round(peer_mma_pct + peer_hma_pct, 3)\n stats.update({\n 'peer_lma': peer_lma_sum, \n 'peer_lma_pct': peer_lma_pct, \n 'peer_mma': peer_mma_sum,\n 'peer_mma_pct': peer_mma_pct,\n 'peer_hma': peer_hma_sum,\n 'peer_hma_pct': peer_hma_pct,\n 'peer_maj_pct': peer_maj_pct,\n 'peer_lar_total': peer_lar_total\n })\n else:\n stats.update({\n 'peer_lma': 0,\n 'peer_lma_pct': 0, \n 'peer_mma': 0, \n 'peer_mma_pct': 0,\n 'peer_hma': 0,\n 'peer_hma_pct': 0,\n 'peer_lar_total': 0\n })\n odds_lma = odds_ratio(lma_pct, peer_lma_pct)\n odds_mma = odds_ratio(mma_pct, peer_mma_pct)\n odds_hma = odds_ratio(hma_pct, peer_hma_pct)\n odds_maj = odds_ratio(mma_pct+hma_pct, peer_mma_pct+peer_hma_pct)\n stats.update({\n 'odds_lma':odds_lma,\n 'odds_mma':odds_mma,\n 'odds_hma':odds_hma,\n 'odds_maj':odds_maj\n })\n return stats", "def updateSubhalos_old(host, file):\n f = open(file, 'r')\n line = f.readline()\n i = 0\n while line != '':\n if line[0:5] == \"#tree\":\n #if i%10000 == 0:\n #print 'subhalo finder scanned ', i, ' trees'\n i+=1\n num = int(line[6::])\n # Deal with a=0 halo independently\n line = f.readline()\n sub = MTH.MTHalo(line)\n if sub.pid == host.ID: # not upid. only subhalos, not subsub etc.\n #build tree, add to subhalo list of host\n tree = MT.MergerTree(file, num)\n tree.haloList.append(sub)\n if sub.num_prog ==0:\n tree.progenitors.append(sub)\n\n # Now deal with all other halos in the tree\n index = 1\n line = f.readline()\n while line !='' and line[0:5] != '#tree':\n halo = MTH.MTHalo(line)\n tree.haloList.append(halo)\n if halo.num_prog ==0:\n tree.progenitors.append(halo)\n updateLinks(tree.haloList, index)\n line = f.readline()\n index +=1\n # add a=1 subhalo to subhalo list of host (maybe should add tree?)\n host.subhalos.append(sub)\n else:\n line = f.readline()\n else:\n line = f.readline()\n f.close()", "def branchize(self):\n near_ones = self.get_near_ones()\n\n if self.current_branch[\"g_score\"] == 31:\n return\n\n for item in near_ones:\n\n if self.current_branch.get(\"move\") and self.current_branch[\"move\"] == item:\n continue\n\n self.change(item)\n\n if self.astar:\n serialized = self.serialize()\n if serialized in self.previous_branches:\n self.change(item)\n continue\n else:\n self.previous_branches.append(serialized)\n\n a_branch = {\n \"status\" : True,\n \"move\" : item,\n \"g_score\" : self.current_branch[\"g_score\"] + 1,\n \"h_score\" : self.calc_manhattan(self.goal),\n \"branches\" : [],\n \"parent\" : self.current_branch\n }\n a_branch[\"f_score\"] = a_branch[\"g_score\"] + a_branch[\"h_score\"]\n\n self.current_branch[\"branches\"].append(a_branch)\n self.score_scheme.append((a_branch[\"f_score\"], a_branch))\n self.change(item)\n\n self.score_scheme.sort(key=lambda x: x[0])", "def buildOperatorCache(ham: Dict[str, Any]) -> None:\n sysLevel = ham[\"circuit\"][\"sys_level\"]\n qubitNum = ham[\"circuit\"][\"qubits\"]\n\n # Generator the operator for all of the drift terms\n for key in ham[\"drift\"]:\n drifts = ham[\"drift\"][key]\n operator = generateOperator(drifts[\"on_qubits\"], drifts[\"matrices\"], sysLevel, qubitNum) * drifts[\"amp\"]\n ham[\"cache\"][\"operator\"][\"drift\"][key] = operator\n\n # Sum all the drift terms and save to the cache.\n if isinstance(sysLevel, int):\n driftTotal = numpy.zeros((sysLevel ** qubitNum, sysLevel ** qubitNum), dtype=complex)\n elif isinstance(sysLevel, list):\n dim = 1\n for i in sysLevel:\n dim = dim * i\n driftTotal = numpy.zeros((dim, dim), dtype=complex)\n for key in ham[\"cache\"][\"operator\"][\"drift\"]:\n driftTotal = driftTotal + ham[\"cache\"][\"operator\"][\"drift\"][key]\n ham[\"cache\"][\"matrix_of_drift\"] = driftTotal\n\n # Generator the pulse sequences for all of the control terms.\n for key in ham[\"control\"]:\n ctrls = ham[\"control\"][key]\n operator = generateOperator(ctrls[\"on_qubits\"], ctrls[\"matrices\"], sysLevel, qubitNum)\n ham[\"cache\"][\"operator\"][\"control\"][key] = operator", "def get_history(self, mas=[], value='close'):\n\n if self.history is None:\n self._get_history()\n\n if len(mas) > 0:\n for ma in mas:\n self.history['ma%i' % ma] = self.history[value].rolling(center=False, window=ma).mean()\n\n return self.history", "def calc_carbon_tree(height, diameter=25, age = 10):\n \"\"\"Height in meter, diameter in cm, age in years\"\"\"\n \"\"\"This includes habits: Tree, Bamboo\"\"\"\n \n #convert to imperial\n height = height/3.281 #feet\n diameter = diameter/2.54 #inches\n \n #calculate green weight of tree: (above-ground weight) * 1.2\n if diameter < 11:\n green_weight = (0.25 * diameter**2 * height) * 1.2\n else:\n green_weight = (0.15 * diameter**2 * height) * 1.2\n \n #dry weight: average tree is 72.5 dry matter \n dry_weight = 0.725 * green_weight\n \n #weight of carbon: 50% of tree dry weight\n c_weight = 0.5 * dry_weight\n \n #weight of CO2 sequestered\n co2_weight = 3.67 * c_weight\n \n return co2_weight/2.205/age #convert from lbs to kg and divide by age", "def frequentist_MC(bolo_name, exposure):\n\n\thdata, file_data = PyRPl.open_ROOT_object(\"../ROOT_files/Axion/Spec/\" + bolo_name + \"_spec_perkeV.root\", \"h_\" + bolo_name)\n\tfmodel, file_model = PyRPl.open_ROOT_object(\"../ROOT_files/Axion/Model/\" + bolo_name + \"_model_perkeV.root\", bolo_name + \"_FidGamma\")\n\tfflux, file_flux = PyRPl.open_ROOT_object(\"../ROOT_files/Axion/CBRD_convolved/\" + bolo_name + \"_flux.root\", bolo_name + \"_flux_mass_0\")\n\n\t#Histograms parameters\n\tbin_X, min_X, max_X = hdata.GetNbinsX(), hdata.GetBinCenter(1) - 0.5*hdata.GetBinWidth(0), hdata.GetBinCenter(hdata.GetNbinsX()) + 0.5 * hdata.GetBinWidth(0)\n\n\tclass Flux:\n\t\tdef __call__( self, x, par ):\n\t\t\treturn exposure*fflux.Eval(x[0]) + par[0]\n\tnorm_flux = TF1(\"flux\", Flux(), min_X, max_X, 1)\n\tnorm_flux.SetParameter(0,0)\n\n\t#Create bckg model histo in c/keV\n\thbckg = TH1F(\"hbckg\", \"hbckg\", bin_X, min_X, max_X)\n\tfor i in range(1,bin_X+1) :\n\t\thbckg.SetBinContent(i, fmodel.Eval(hbckg.GetBinCenter(i)))\n\n\tfor gAe in [14, 0.5] :\n\t\tlist_result = []\n\t\tfor nsimu in range(100) :\n\t\t\tprint gAe, nsimu\n\t\t\t#Simulate fake data \n\t\t\thdata_sim = TH1F(\"hdata_sim\"+str(nsimu)+str(gAe), \"hdata_sim\"+str(nsimu)+str(gAe), bin_X, min_X, max_X)\n\t\t\tfor i in range(1, bin_X+1): \n\t\t\t\thdata_sim.SetBinContent(i, hbckg.GetBinContent(i) + np.random.poisson(np.power(gAe*1E-12,4)*norm_flux.Eval(hbckg.GetBinCenter(i))))\n\n\t\t\thdata_sim.SetLineColor(kRed)\n\t\t\t# hsignal.SetLineColor(kGreen-3)\n\t\t\thbckg.Draw()\n\t\t\thdata_sim.Add(hbckg,-1)\n\t\t\thdata_sim.Draw(\"same\")\n\t\t\tprint hdata_sim.Integral()*hdata_sim.GetBinWidth(10)\n\t\t\t# hsignal.Draw(\"same\")\n\t\t\traw_input()\n\n\t\t\t#Build likelihood\n\t\t\tclass likelihood_hist:\n\t\t\t\tdef __call__( self, x, par ):\n\t\t\t\t\tlikelihood =0\n\t\t\t\t\tfor i in range(1,bin_X+1):\n\t\t\t\t\t\tN_expected =hbckg.GetBinContent(i)+np.power(x[0]*1E-12,4)*norm_flux.Eval(hbckg.GetBinCenter(i)) #hsignal.GetBinContent(i)\n\t\t\t\t\t\tN_obs = hdata_sim.GetBinContent(i)\n\t\t\t\t\t\tif N_expected>0:\n\t\t\t\t\t\t\tlikelihood +=-N_expected+N_obs*np.log(N_expected)-log_factorial(N_obs)\n\t\t\t\t\treturn -(likelihood + par[0])\n\n\t\t\tnll = TF1(\"nll\", likelihood_hist(), 1E-2,1E3, 1)\n\t\t\tnll.SetParameter(0,0)\n\t\t\tlist_result.append(nll.GetMinimumX())\n\t\tpl.hist(list_result, histtype = \"step\", bins = 30)\n\t\tpl.show()\n\t\traw_input()\n\n\t# #Create signal histo in c/keV, gAe set to 1\n\t# hsignal = TH1F(\"hsignal\", \"hsignal\", bin_X, min_X, max_X)\n\t# for i in range(1, 301) :\n\t# \thsignal.SetBinContent(i, np.power(1E-12,4)*norm_flux.Eval(hsignal.GetBinCenter(i)))\n\n\t# hbckg.SetLineColor(kRed)\n\t# hsignal.SetLineColor(kGreen-3)\n\t# hbckg.Draw()\n\t# hdata.Draw(\"same\")\n\t# hsignal.Draw(\"same\")\n\t# raw_input()\n\n\n\n\t\n\n\traw_input()", "def baum_welch(self, O):\n alpha, beta, gamma, B, _Z_ = self.forward_backward(O)\n #\n \"\"\"\n gamma_tr = numpy.zeros([len(O) - 1], len(self), len(self)])\n for t in range(gamma_tr.shape[0]):\n for i in range(gamma_tr.shape[1]):\n for j in range(gamma_tr.shape[1]):\n gamma_tr[t, i, j] = alpha[t, i] + self.A.get_log_prob(i, j) + B[j, t + 1] + beta[t + 1,j] - gamma[t, i]\n #\n for i in range(gamma_tr.shape[1]):\n for j in range(gamma_tr.shape[1]):\n _weight_ = HMM.log_add(gamma_tr[:, i, j])\n temp_hmm.A.accumulate_transition(i, j, numpy.exp(_weight_)) # This line is candidate to be modified for accumulating logarithms\n #\n \"\"\"\n\n # UPDATE OF THE STATE-TRANSITION PROBABILITIES\n if len(O) > 1:\n for i in range(len(self)):\n _log_den_ = HMM.log_add(gamma[ : -1, i]) # sum(t = 1..T-1, gamma[t, i])\n for j in range(len(self)):\n gamma_tr = numpy.zeros(len(O) - 1)\n for t in range(gamma_tr.shape[0]):\n gamma_tr[t] = alpha[t, i] + self.A.get_log_prob(i, j) + B[j, t + 1] + beta[t + 1, j] - _Z_\n _weight_ = numpy.exp(HMM.log_add(gamma_tr[:]) - _log_den_)\n self.A.accumulate_transition(i, j, value = _weight_) # This line is candidate to be modified for accumulating logarithms\n #\n # UDPDATE OF THE STATE STARTING PROBABILITIES\n if self.P_accumulator is not None:\n self.P_accumulator[:] += gamma[0, :]\n\n # UDPDATE OF THE OUTPUT PROBABILITIES\n if self.modality in ['Discrete']:\n #\n for i in range(gamma.shape[1]):\n #\n _log_den_ = HMM.log_add(gamma[:, i]) # sum(t = 1..T, gamma[t, i])\n _den_ = numpy.exp(_log_den_)\n #\n for k in numpy.unique(O): # range(self.num_symbols)\n _log_num_ = HMM.log_add(gamma[O == k, i])\n _weight_ = numpy.exp(_log_num_ - _log_den_)\n self.S[i].accumulate_sample(k, _weight_, numpy.exp(_log_num_), _den_) # This line is candidate to be modified for accumulating logarithms\n #\n elif self.modality in ['Continuous']:\n #\n for j in range(len(self)):\n #\n _log_denominator_ = HMM.log_add(gamma[:, j]) # sum(t = 1..T, gamma[t, i])\n _denominator_ = numpy.exp(_log_denominator_)\n #\n _log_densities_ = numpy.zeros([len(O), self.S[j].gmm.n_components])\n for t in range(len(O)):\n _log_densities_[t, :] = self.S[j].gmm.log_densities(O[t]) # log(c_j_k * g_j_k(O_t))\n #\n log_xi = numpy.zeros(len(O)) # A one-dimensional vector for computing _xi_t_j_k_ for fixed 'j' and 'k'\n for k in range(_log_densities_.shape[1]):\n log_xi[0] = self.log_P[j] + _log_densities_[0, k] + beta[0, j] # _xi_0_j_k_\n #\n for t in range(1, len(O)):\n _temp_ = numpy.zeros(len(self))\n for i in range(len(self)): # For all the states in the HMM\n _temp_[i] = alpha[t - 1, i] + self.A.get_log_prob(i, j) + _log_densities_[t, k] + beta[t, j]\n log_xi[t] = HMM.log_add(_temp_) # _xi_t_j_k_ for all t > 0\n #\n log_xi -= _Z_ # Dividing by P(O|lambda)\n #\n _xi_t_j_k_ = numpy.exp(log_xi)\n #\n # In the following lines the code of Baum-Welch directly modifies the accumulators\n # of the GMM of each state 'j'\n #\n self.S[j].gmm_accumulator.acc_posteriors[k] += _xi_t_j_k_.sum() # This value is correct because is used as the denominator for updating mean vectors and covariance matrices\n self.S[j].gmm_accumulator.acc_sample_counter[k] += _denominator_ / self.S[j].gmm_accumulator.n_components\n #\n for t in range(len(O)):\n self.S[j].gmm_accumulator.mu[k] += _xi_t_j_k_[t] * O[t]\n if self.S[j].gmm_accumulator.covar_type in GMM.covar_diagonal_types:\n self.S[j].gmm_accumulator.sigma[k] += _xi_t_j_k_[t] * (O[t] * O[t]) # numpy.diagonal(O[t] * O[t])\n else:\n self.S[j].gmm_accumulator.sigma[k] += _xi_t_j_k_[t] * numpy.outer(O[t], O[t])\n else:\n raise Exception('Modality ' + self.modality + ' is not valid or not implemented yet!')", "def get_history(self, taxlot_view):\n history = []\n\n def record_dict(log):\n filename = None if not log.import_filename else path.basename(log.import_filename)\n if filename:\n # Attempt to remove NamedTemporaryFile suffix\n name, ext = path.splitext(filename)\n pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$')\n match = pattern.match(name)\n if match:\n filename = match.groups()[0] + ext\n return {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n 'source': log.get_record_type_display(),\n 'filename': filename,\n # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None\n }\n\n log = TaxLotAuditLog.objects.select_related('state', 'parent1', 'parent2').filter(\n state_id=taxlot_view.state_id\n ).order_by('-id').first()\n master = {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n }\n\n # Traverse parents and add to history\n if log.name in ['Manual Match', 'System Match', 'Merge current state in migration']:\n done_searching = False\n while not done_searching:\n if (log.parent1_id is None and log.parent2_id is None) or log.name == 'Manual Edit':\n done_searching = True\n elif log.name == 'Merge current state in migration':\n record = record_dict(log.parent1)\n history.append(record)\n if log.parent1.name == 'Import Creation':\n done_searching = True\n else:\n tree = log.parent1\n log = tree\n else:\n tree = None\n if log.parent2:\n if log.parent2.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent2)\n history.append(record)\n elif log.parent2.name == 'System Match' and log.parent2.parent1.name == 'Import Creation' and \\\n log.parent2.parent2.name == 'Import Creation':\n # Handle case where an import file matches within itself, and proceeds to match with\n # existing records\n record = record_dict(log.parent2.parent2)\n history.append(record)\n record = record_dict(log.parent2.parent1)\n history.append(record)\n else:\n tree = log.parent2\n if log.parent1.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent1)\n history.append(record)\n else:\n tree = log.parent1\n\n if not tree:\n done_searching = True\n else:\n log = tree\n elif log.name == 'Manual Edit':\n record = record_dict(log.parent1)\n history.append(record)\n elif log.name == 'Import Creation':\n record = record_dict(log)\n history.append(record)\n\n return history, master", "def writestat(self, snapshot, halos, statoutfile, hubble=None):\n s = snapshot\n mindarkmass = min(s.dark['mass'])\n\n if hubble is None:\n hubble = s.properties['h']\n\n outfile = statoutfile\n logger.info(\"Writing stat file to %s\" % statoutfile)\n fpout = open(outfile, \"w\")\n header = \"#Grp N_tot N_gas N_star N_dark Mvir(M_sol) Rvir(kpc) GasMass(M_sol) StarMass(M_sol) DarkMass(M_sol) V_max R@V_max VelDisp Xc Yc Zc VXc VYc VZc Contam Satellite? False? ID_A\"\n print >> fpout, header\n nhalos = halos._nhalos\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties # halo index starts with 1 not 0\n # 'Contaminated'? means multiple dark matter particle masses in halo)\"\n icontam = np.where(halos[ii + 1].dark['mass'] > mindarkmass)\n if (len(icontam[0]) > 0):\n contam = \"contam\"\n else:\n contam = \"clean\"\n # may want to add implement satellite test and false central\n # breakup test.\n\n n_dark = h['npart'] - h['n_gas'] - h['n_star']\n M_dark = h['mass'] - h['M_gas'] - h['M_star']\n ss = \" \" # can adjust column spacing\n outstring = str(int(h['halo_id'])) + ss\n outstring += str(int(h['npart'])) + ss + str(int(h['n_gas'])) + ss\n outstring += str(int(h['n_star'])) + ss + str(int(n_dark)) + ss\n outstring += str(h['mass'] / hubble) + ss + \\\n str(h['Rvir'] / hubble) + ss\n outstring += str(h['M_gas'] / hubble) + ss + \\\n str(h['M_star'] / hubble) + ss\n outstring += str(M_dark / hubble) + ss\n outstring += str(h['Vmax']) + ss + str(h['Rmax'] / hubble) + ss\n outstring += str(h['sigV']) + ss\n # pos: convert kpc/h to mpc (no h).\n outstring += str(h['Xc'] / hubble / 1000.) + ss\n outstring += str(h['Yc'] / hubble / 1000.) + ss\n outstring += str(h['Zc'] / hubble / 1000.) + ss\n outstring += str(h['VXc']) + ss + \\\n str(h['VYc']) + ss + str(h['VZc']) + ss\n outstring += contam + ss\n outstring += \"unknown\" + \\\n ss # unknown means sat. test not implemented.\n outstring += \"unknown\" + ss # false central breakup.\n print >> fpout, outstring\n fpout.close()\n return 1" ]
[ "0.5746956", "0.53531224", "0.51111126", "0.5078", "0.5021775", "0.49615657", "0.4913952", "0.48098966", "0.4727419", "0.4717308", "0.46857807", "0.4666251", "0.46613628", "0.46605286", "0.46548498", "0.46313113", "0.4621329", "0.45925197", "0.45755017", "0.4556348", "0.45549068", "0.45292795", "0.45108697", "0.4508669", "0.44863328", "0.4485914", "0.4465646", "0.44639456", "0.44600958", "0.44600925" ]
0.565708
1
Number of progenitors at one given snapshot z1
def find_progenitors_at_z(self, SH, mtree, z1, z2): for ss in range(z1, z2): # nodes at redshift ss ss_indx = np.where(mtree.data.snapshotNumber.values == ss) nodeID = mtree.data.index.values[ss_indx] nodeID_desc = mtree.data.descendantIndex.values[ss_indx] # find number of progenitors for nodes at redshift ss if ss != z1: _progcounts = np.zeros(len(nodeID)) for ii in range(len(nodeID_past_desc)): if nodeID_past_desc[ii] in nodeID: indx = np.where(nodeID == nodeID_past_desc[ii]) _progcounts[indx] = count[ii] nodeID_desc_unique, count = np.unique(nodeID_desc, return_counts=True) nodeID_desc_unique=nodeID_desc_unique[1:]; count=count[1:] nodeID_past = nodeID nodeID_past_desc = nodeID_desc_unique if ss != z1: _progcounts_past = _progcounts print('_progcounts', _progcounts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tot_num_of_progenitors_at_z(self, SH, mtree, z1, z2):\n \n for ss in range(z1, z2+1):\n print('redshift:', ss)\n # nodes at redshift ss\n ss_indx = np.where(mtree.data.snapshotNumber.values == ss)\n nodeID = mtree.data.index.values[ss_indx]\n nodeID_desc = mtree.data.descendantIndex.values[ss_indx]\n \n # find number of progenitors for nodes at redshift ss\n if ss != z1:\n progcounts = np.zeros(len(nodeID), dtype=int)\n for ii in range(len(nodeID_past_desc)):\n if nodeID_past_desc[ii] in nodeID:\n indx = np.where(nodeID == nodeID_past_desc[ii])\n progcounts[indx] = count[ii]\n\n nodeID_desc_unique, count = np.unique(nodeID_desc, return_counts=True)\n nodeID_desc_unique=nodeID_desc_unique[1:]; count=count[1:]\n \n # add progenitors of progenitors\n if ss != z1:\n for ii in range(len(nodeID)):\n if progcounts[ii] > 1:\n indx = np.where(nodeID_desc_unique == nodeID_desc[ii])\n count[indx] += progcounts[ii] - 1\n\n nodeID_past = nodeID\n nodeID_past_desc = nodeID_desc_unique\n return nodeID, progcounts", "def find_progenitors_until_z(self, mtree, nodeID, z1, z2):\n snapcount = 0\n print('from %d until %d' % (z2, z1))\n for ss in range(z2, z1, -1):\n if ss == z2:\n df_target = pd.DataFrame({'nodeID':nodeID})\n _indx = np.where(mtree.data.snapshotNumber.values == ss-1)\n nodeID_prog = mtree.data.index.values[_indx]\n nodeID_prog_desc = mtree.data.descendantIndex.values[_indx]\n _indx = np.where((nodeID_prog_desc < 1e15) &\n (nodeID_prog_desc > 1e11))\n nodeID_prog = nodeID_prog[_indx]\n nodeID_prog_desc = nodeID_prog_desc[_indx]\n\n df_prog = pd.DataFrame({'nodeID' : nodeID_prog,\n 'nodeID_target' : nodeID_prog_desc})\n\n # Initiliaze Output Array\n progcounts = np.zeros((df_target['nodeID'].size, z2-z1))\n\n # nodeID_prog_desc_unic is sorted\n nodeID_prog_desc_unic, count = np.unique(nodeID_prog_desc,\n return_counts=True)\n # remove -1's\n nodeID_prog_desc_unic=nodeID_prog_desc_unic[1:]; count=count[1:]\n\n # Nr. of progenitors for sub-&halos at snapshot z2\n s = pd.Index(df_target['nodeID'].tolist())\n _indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n now_sort_indx = np.argsort(df_target['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(nodeID_prog_desc_unic)\n progcounts[_indx_now[now_sort_indx], snapcount] = count[pro_sort_indx]\n \n else:\n df_now = df_prog\n _indx = np.where(mtree.data.snapshotNumber.values == ss-1)\n nodeID_prog = mtree.data.index.values[_indx]\n nodeID_prog_desc = mtree.data.descendantIndex.values[_indx]\n #_indx = np.where((nodeID_prog_desc < 1e15) &\n # (nodeID_prog_desc > 1e10))\n #nodeID_prog = nodeID_prog[_indx]\n #nodeID_prog_desc = nodeID_prog_desc[_indx]\n df_prog = pd.DataFrame({'nodeID' : nodeID_prog})\n \n progcounts_local = np.zeros(df_now['nodeID'].size)\n nodeID_prog_desc_unic, count = np.unique(nodeID_prog_desc,\n return_counts=True)\n # remove -1's\n nodeID_prog_desc_unic=nodeID_prog_desc_unic[1:]; count=count[1:]\n \n # progenitors for snapshot ss\n s = pd.Index(df_now['nodeID'].tolist())\n _indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n now_sort_indx = np.argsort(df_now['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(nodeID_prog_desc_unic)\n progcounts_local[_indx_now[now_sort_indx]] = count[pro_sort_indx]\n df_now['progcount'] = pd.Series(progcounts_local,\n index=df_now.index, dtype=int)\n\n # Nr. of progenitors for sub-&halos at snapshot z2\n df_inter = df_now.groupby(['nodeID_target'],\n as_index=False)['progcount'].sum()\n # only real progeniteurs\n df_inter = df_inter[(df_inter['nodeID_target'] > 1e10) & \n (df_inter['nodeID_target'] < 1e15)]\n df_inter = df_inter.drop_duplicates(subset=['nodeID_target'],\n keep='first')\n \n s = pd.Index(df_target['nodeID'].tolist())\n _indx_now = s.get_indexer(df_inter['nodeID_target'].tolist())\n now_sort_indx = np.argsort(df_target['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(df_inter['nodeID_target'].values)\n progcounts[_indx_now[now_sort_indx], snapcount] = df_inter['progcount'].values[pro_sort_indx]\n\n # sort nodeID_prog to nodeID\n #s = pd.Index(df_now['nodeID'].tolist())\n #_indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n #df_now['nodeID_target'].values[_indx_now]\n \n obs_ref_local = np.zeros(df_prog['nodeID'].size)\n for ii in range(len(nodeID_prog_desc_unic)):\n tarID = df_now.loc[\n df_now['nodeID'] == nodeID_prog_desc_unic[ii],\n 'nodeID_target'].values.astype(int)\n if tarID:\n _indx = np.where(\n nodeID_prog_desc == nodeID_prog_desc_unic[ii])\n obs_ref_local[_indx] = tarID\n df_prog['nodeID_target'] = pd.Series(obs_ref_local,\n index=df_prog.index)\n\n snapcount += 1\n del nodeID_prog_desc\n del df_now, df_inter, df_prog\n return np.asarray(df_target['nodeID'].tolist()), progcounts", "def count():", "def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def find_progenitors_until_z_EAGLE(self, mtree, nodeID, z1, z2):\n snapcount = 0\n print(':Read MergerTree from %d until %d' % (z2, z1))\n for ss in range(z2, z1, -1):\n if ss == z2:\n df_target = pd.DataFrame({'nodeID':nodeID})\n _indx = np.where(mtree.data.snapnum.values == ss-1)\n nodeID_prog = mtree.data.index.values[_indx]\n nodeID_prog_desc = mtree.data.descendantIndex.values[_indx]\n _indx = np.where((nodeID_prog_desc < 1e15) &\n (nodeID_prog_desc > 1e11))\n nodeID_prog = nodeID_prog[_indx]\n nodeID_prog_desc = nodeID_prog_desc[_indx]\n\n df_prog = pd.DataFrame({'nodeID' : nodeID_prog,\n 'nodeID_target' : nodeID_prog_desc})\n\n # Initiliaze Output Array\n progcounts = np.zeros((df_target['nodeID'].size, z2-z1))\n\n # nodeID_prog_desc_unic is sorted\n nodeID_prog_desc_unic, count = np.unique(nodeID_prog_desc,\n return_counts=True)\n # remove -1's\n nodeID_prog_desc_unic=nodeID_prog_desc_unic[1:]; count=count[1:]\n\n # Nr. of progenitors for sub-&halos at snapshot z2\n s = pd.Index(df_target['nodeID'].tolist())\n _indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n now_sort_indx = np.argsort(df_target['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(nodeID_prog_desc_unic)\n progcounts[_indx_now[now_sort_indx], snapcount] = count[pro_sort_indx]\n \n else:\n df_now = df_prog\n _indx = np.where(mtree.data.snapnum.values == ss-1)\n nodeID_prog = mtree.data.index.values[_indx]\n nodeID_prog_desc = mtree.data.descendantIndex.values[_indx]\n #_indx = np.where((nodeID_prog_desc < 1e15) &\n # (nodeID_prog_desc > 1e10))\n #nodeID_prog = nodeID_prog[_indx]\n #nodeID_prog_desc = nodeID_prog_desc[_indx]\n df_prog = pd.DataFrame({'nodeID' : nodeID_prog})\n \n progcounts_local = np.zeros(df_now['nodeID'].size)\n nodeID_prog_desc_unic, count = np.unique(nodeID_prog_desc,\n return_counts=True)\n # remove -1's\n nodeID_prog_desc_unic=nodeID_prog_desc_unic[1:]; count=count[1:]\n \n # progenitors for snapshot ss\n s = pd.Index(df_now['nodeID'].tolist())\n _indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n now_sort_indx = np.argsort(df_now['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(nodeID_prog_desc_unic)\n progcounts_local[_indx_now[now_sort_indx]] = count[pro_sort_indx]\n df_now['progcount'] = pd.Series(progcounts_local,\n index=df_now.index, dtype=int)\n\n # Nr. of progenitors for sub-&halos at snapshot z2\n df_inter = df_now.groupby(['nodeID_target'],\n as_index=False)['progcount'].sum()\n # only real progeniteurs\n df_inter = df_inter[(df_inter['nodeID_target'] > 1e10) & \n (df_inter['nodeID_target'] < 1e15)]\n df_inter = df_inter.drop_duplicates(subset=['nodeID_target'],\n keep='first')\n \n s = pd.Index(df_target['nodeID'].tolist())\n _indx_now = s.get_indexer(df_inter['nodeID_target'].tolist())\n now_sort_indx = np.argsort(df_target['nodeID'].values[_indx_now])\n pro_sort_indx = np.argsort(df_inter['nodeID_target'].values)\n progcounts[_indx_now[now_sort_indx], snapcount] = df_inter['progcount'].values[pro_sort_indx]\n\n # sort nodeID_prog to nodeID\n #s = pd.Index(df_now['nodeID'].tolist())\n #_indx_now = s.get_indexer(list(nodeID_prog_desc_unic))\n #df_now['nodeID_target'].values[_indx_now]\n \n obs_ref_local = np.zeros(df_prog['nodeID'].size)\n for ii in range(len(nodeID_prog_desc_unic)):\n tarID = df_now.loc[\n df_now['nodeID'] == nodeID_prog_desc_unic[ii],\n 'nodeID_target'].values.astype(int)\n if tarID:\n _indx = np.where(\n nodeID_prog_desc == nodeID_prog_desc_unic[ii])\n obs_ref_local[_indx] = tarID\n df_prog['nodeID_target'] = pd.Series(obs_ref_local,\n index=df_prog.index)\n\n snapcount += 1\n del nodeID_prog_desc\n del df_now, df_inter, df_prog\n return np.asarray(df_target['nodeID'].tolist()), progcounts", "def part1(mem):\n return len(paint_panels(mem, 0))", "def count(self,val):\n return sum(1 for e in self.frontierpq if e[0]==val)", "def get_num_carn_landscape(self):\n return len(self.carn_pop)", "def count(self, volume):\n\n countResult = 0\n\n for x in range(volume.shape[0]):\n for y in range(volume.shape[1]):\n for z in range(volume.shape[2]):\n if self.isMember(volume[x,y,z]):\n countResult += 1\n\n return countResult", "def Nprofiles(self):\n return self._nprofiles", "def get_neighbor_live_count(cart):\n count = 0\n for i in range(6):\n cart2 = (cart[0] + dxv[i],cart[1] + dyv[i],cart[2] + dzv[i])\n if check_cart(cart2) and voxel_data[cart_to_loc(cart2)] == 1:\n count += 1\n return count", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def get_num_herb_landscape(self):\n return len(self.herb_pop)", "def report(self):\n print('total 1', len(self.videoids1))\n print('total 2', len(self.videoids2))\n print('total of repeats in_1', len(self.videoids_dict_repeats1))\n print('total of repeats in_2', len(self.videoids_dict_repeats2))\n print('total in_1_missing_in_2', len(self.in_1_missing_in_2))\n print('total in_2_missing_in_1', len(self.in_2_missing_in_1))", "def get_marble_count(self):", "def get_controls_snapshots_count(selenium, src_obj):\n controls_ui_service = webui_service.ControlsService(selenium)\n return {\n \"controls_tab_count\": controls_ui_service.get_count_objs_from_tab(\n src_obj=src_obj),\n \"controls_count\": len(controls_ui_service.get_list_objs_from_tree_view(\n src_obj=src_obj))}", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def numProcs(reportname):\n with open(reportname, \"rb\") as f:\n data = json.load(f)\n numProcesses = len(data[\"behavior\"][\"processes\"])\n return numProcesses", "def pobj_counts(pcode_obj):\n pcode = (pcode_obj.asDict())['pcode'][0] # no multiple pcode blocks - no delimiter\n counts = {'galleries': 0, 'spreads': 0, 'layouts': 0, 'panelgroups': 0}\n # , 'panels': 0, 'skips': 0 }\n galleries = pcode.pop('gallery', '')\n counts['galleries'] = len(galleries)\n for gallery in galleries:\n spreads = gallery.pop('spread', '')\n counts['spreads'] += len(spreads)\n for spread in spreads:\n layouts = spread.pop('layout', '')\n counts['layouts'] += len(layouts)\n for layout in layouts:\n panelgroups = layout.pop('panelgroup', '')\n counts['panelgroups'] += len(panelgroups)\n return counts", "def count_support(projection):\n\tprev_id = -1\n\tsize = 0\n\tfor p in projection:\n\t\tif prev_id != p.id:\n\t\t\tprev_id = p.id\n\t\t\tsize += 1\n\treturn size", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def count_objects_of_size(self, n: int, **parameters: int) -> int:", "def count(self):\n return self.vcount" ]
[ "0.7110273", "0.6287449", "0.5909766", "0.57706916", "0.57402396", "0.5601302", "0.55882317", "0.5510065", "0.5481831", "0.54731274", "0.54057765", "0.5389035", "0.53697664", "0.53530174", "0.5347362", "0.53414553", "0.5321019", "0.53112924", "0.53112924", "0.53112924", "0.53112924", "0.5301699", "0.5271157", "0.5256057", "0.52416265", "0.52416265", "0.52416265", "0.52416265", "0.5238646", "0.52336204" ]
0.6796147
1
Schedule WB category export on Scrapinghub.
def category_export(url: str, chat_id: int, spider='wb', priority=2) -> str: logger.info(f'Export {url} for chat #{chat_id}') client, project = init_scrapinghub() scheduled_jobs = scheduled_jobs_count(project, spider) max_scheduled_jobs = env('SCHEDULED_JOBS_THRESHOLD', cast=int, default=1) if priority < 3 and scheduled_jobs > max_scheduled_jobs: raise Exception('Spider wb has more than SCHEDULED_JOBS_THRESHOLD queued jobs') job = project.jobs.run(spider, priority=priority, job_args={ 'category_url': url, 'callback_url': env('WILDSEARCH_JOB_FINISHED_CALLBACK') + f'/{spider}_category_export', 'callback_params': f'chat_id={chat_id}', }) logger.info(f'Export for category {url} will have job key {job.key}') return 'https://app.scrapinghub.com/p/' + job.key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def daily_task():\n global CATEGORIES_PAGES\n global BROWSER\n global DATE\n global OBSERVATION\n log.info('Scraper started')\n # Refresh date\n DATE = str(datetime.date.today())\n OBSERVATION = 0\n # Initiate headless web browser\n log.info('Initializing browser')\n BROWSER = webdriver.Chrome(executable_path=PROJECT_PATH + \"/bin/chromedriver\",\n options=OPTIONS)\n # Download topsite and get categories directories\n base_file_name = \"All_cat_\" + DATE + \".html\"\n fetch_html(BASE_URL, base_file_name, PATH_HTML, attempts_limit=1000)\n html_file = open(PATH_HTML + base_file_name).read()\n CATEGORIES_PAGES = get_category_list(html_file)\n log.info('Found ' + str(len(CATEGORIES_PAGES)) + ' categories')\n # Read each categories pages and scrape for data\n for cat in track(CATEGORIES_PAGES,\n description = \"[green]Scraping...\",\n total = len(CATEGORIES_PAGES)):\n cat_file = \"cat_\" + cat['name'] + \"_\" + DATE + \".html\"\n download = fetch_html(cat['directlink'], cat_file, PATH_HTML)\n if download:\n scrap_data(cat)\n # Close browser\n BROWSER.close()\n BROWSER.service.process.send_signal(signal.SIGTERM)\n BROWSER.quit()", "def run(self):\n page = self.fetch_data(self.url)\n stock_list = self.pop_stock_list(page)\n self.write_csv(stock_list)", "def cron_main():\n\t_, club_urls = get_all_clubs()\n\t\n\tclub_info_full =[]\n\tcat_full =[]\n\n\tfor url in club_urls:\n\t\tclub_info, cat_arr = get_club_info(url)\n\t\tclub_info_full.append(club_info)\n\t\tcat_full.extend(cat_arr)\n\n\twith open('clubs.csv', 'w+') as f:\n\t newfile = csv.writer(f, delimiter='|',quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t for club in infoClubs:\n\t \tclubStuff = []\n\t \tfor info in club:\n\t \t\tclubStuff.append(info)\n\t newfile.writerow(clubStuff)\n\n\twith open('clubsCat.csv', 'w+') as fn:\n\t catFile = csv.writer(fn, delimiter=',',quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n\t catFile.writerow(['club_id','club_tag'])\n\t for lol in categoryArr:\n\t catFile.writerow(lol)", "def main():\n base_url = \"https://books.toscrape.com/index.html\"\n category_links = scrap_site_category_links(base_url)\n\n\n for category_link in category_links:\n book_links = scrap_book_links(category_link)\n\n headings = [\"product_page_url\", \"upc\", \"title\", \"price_including_tax\", \"price_excluding_tax\",\n \"number_available\", \"product_description\", \"category\", \"review_rating\", \"image_url\"]\n\n with open(get_category_name(category_link) + \".csv\", 'w', encoding=\"utf-8-sig\") as csv_file:\n writer = csv.DictWriter(csv_file, delimiter=';', fieldnames=headings)\n writer.writeheader()\n\n for book_link in book_links:\n book = scrap_book_info(book_link)\n writer.writerow(book)\n\n image_downloader(book[\"image_url\"], \"./Books_to_Scrape_Images\", book[\"upc\"] + \".png\")", "def start_continuous_export():\n pass", "def genScheduleCSV():\r\n try: \r\n printSchedule()\r\n save_class_list()\r\n print(\"\\nSchedule generated, check working directory\")\r\n except Exception as e:\r\n print(\"Exception found\" + str(e))", "def run(self):\n empty = False\n while not empty:\n try:\n # Grab fields\n url = self.genre_urls.get()\n namestamp = \"{}.csv\".format(str(int(round(time.time() * 1000000))))\n # GET request\n self.logger.info('Attempting to request %s', url)\n self.crawler.set_url(url)\n series = self.crawler.get_series()\n self.logger.info('Attempting to write %s', url)\n # Grab writer -> writes series\n csv_dir = './{}/{}'.format(self.directory, namestamp)\n writer = csv.writer(open(csv_dir, 'wb'))\n writer.writerow(Series.fields)\n for s in series:\n writer.writerow(s.to_line())\n self.logger.info('Wrote %s', namestamp)\n except Exception, e: # pylint: disable=W0703\n print e\n finally:\n self.genre_urls.task_done()\n empty = self.genre_urls.empty()", "def _apply_schedule_exporting(self):\n\n # generate synthetic apps from the workload set\n synthetic_apps = self.workload_set.generate_synapps_from_workloads()\n\n # set up the synthetic workload (from synapps and config)\n sa_workload = SyntheticWorkload(self.config, synthetic_apps)\n\n # Apply individual scaling factors to the time-series of the synthetic apps\n sc_facts = self.config.model[\"schedule_exporting\"].get('scaling_factors')\n if sc_facts:\n sa_workload.scaling_factors = sc_facts\n\n # Apply global scaling factor to the synthetic apps (including #CPU's, etc..)\n sc_facts_g = self.config.model[\"schedule_exporting\"].get('global_scaling_factor', 1.0)\n if sc_facts_g:\n sa_workload.glob_scaling_factor = sc_facts_g\n\n # Then export each synthetic app of the workload\n kschedule_path = os.path.join(self.config.dir_output, self.config.kschedule_filename)\n sa_workload.export_kschedule(kschedule_path)", "def run(self):\n while True:\n self.logger.info(\"Scraping...\")\n print('scraping...')\n decks = []\n try:\n decks.extend(scrape_decks())\n except Exception as e:\n print('scraping exception' + str(e))\n self.logger.exception(\n 'Scraper for TappedOut raised an exception'\n )\n\n self.insert_decks(decks)\n\n self.logger.info(\n \"Done scraping, sleeping for {} days\".format(self.interval)\n )\n time.sleep(self.interval * (60 * 60 * 24))", "def run(self):\n\n # creation of directory named after the category name if it doesn't already exist\n if not os.path.exists(\"results/{}/\".format(self.category_name)):\n os.mkdir(\"results/{}\".format(self.category_name))\n\n # creation of books_img directory if it doesn't already exist\n if not os.path.exists(\"results/{}/books_img/\".format(self.category_name)):\n os.mkdir(\"results/{}/books_img\".format(self.category_name))\n\n with open(\"results/{}/{}.csv\".format(self.category_name, self.category_name), \"w\") as file:\n file.write(self.columns_headers)\n\n for book_page_url in self.url_list:\n\n # data extraction from books pages\n\n book_request = requests.get(book_page_url)\n book_request.encoding = \"utf-8\"\n\n if book_request.ok:\n book_soup = BeautifulSoup(book_request.text, \"html.parser\")\n book_title = book_soup.find(\"h1\").get_text()\n rate_and_product_description = book_soup.findAll(\"p\")\n rate = rate_and_product_description[2][\"class\"][1]\n rate = self.rate_dict[rate]\n product_description = rate_and_product_description[3].get_text()\n # We replace \";\" with \":\" in product_description\n product_description = self.regex_wrong_separator.sub(\":\", product_description)\n img_url = book_soup.find(\"img\")\n img_url = img_url[\"src\"].replace(\"../../\", self.target_url)\n information_table = book_soup.findAll(\"td\")\n\n # list which contains all data about the book (sorted in order)\n info_list = [book_page_url]\n\n # loop which gets data from the information tab and adds them in the info_list\n for information_line in information_table:\n information_line = information_line.get_text()\n info_list.append(information_line)\n # remove the field \"Books\"\n del info_list[2]\n # remove the field \"tax\"\n del info_list[4]\n info_list.insert(2, \"{}\".format(book_title))\n info_list.insert(6, product_description)\n info_list.insert(7, self.category_name)\n info_list[8] = rate\n info_list.append(img_url)\n\n # extraction of the number of books available\n info_list[5] = self.regex_extract_number.findall(info_list[5])[0]\n # convert info_list in str using \";\" as separator before writing it in cvs file\n info = \";\".join(info_list)\n\n file.write(\"\\n\" + info)\n\n # We replace \"/\" by \"-\" in book title\n book_title = self.regex_title_correction.sub(\"-\", book_title)\n urllib.request.urlretrieve(img_url, \"results/{}/books_img/{}.png\".format(\n self.category_name, book_title))", "def download_multimonth_csv(out_dir, year_start, month_start, year_end, month_end, station, product='water_level', datum='STND', time_zone='GMT'):\n\n # add trailing slash to directory name, if necessary\n if out_dir[-1] is not ('/' or '\\\\'):\n out_dir = out_dir+'/'\n\n # create directory if necessary\n if not os.path.isdir(out_dir):\n os.mkdir(out_dir)\n\n file_prefix = station+'_'+product+'_'\n\n years = range(int(year_start),int(year_end)+1)\n\n for year in years:\n if year == years[0]:\n mon_range = range(month_start,13)\n elif year == years[-1]:\n mon_range = range(1,month_end+1)\n else:\n mon_range = range(1,13)\n\n for mon in mon_range:\n mon_str = str(mon).zfill(2)\n\n # file to write\n filename = file_prefix+str(year)+mon_str+'.csv'\n out_file = os.path.join(out_dir,filename)\n\n # determine number of days in month\n ndays = monthrange(year, mon)[1]\n ndays_str = str(ndays).zfill(2)\n\n begin_date = str(year)+mon_str+'01'\n end_date = str(year)+mon_str+ndays_str\n\n download_6min_csv(out_file, begin_date, end_date, station, product, datum, time_zone)", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def final_series():\n tickers = pd.read_excel(os.path.abspath(os.path.dirname(__file__)) +\"./codigos.xlsx\", \n header=[0]).values.flatten()\n # tickers = pd.read_excel(\"./codigos.xlsx\", \n # header=[0]).values.flatten()\n ls = fetch_series(list(set(tickers)))\n net_series = [s for s in ls if _cleasing(s, [\"D\", \"M\"]) is not None]\n p = os.path.abspath(os.path.dirname(__file__))\n with open(p + \"/series_bcb\", \"wb\") as f:\n pickle.dump(net_series, f)\n # with open(\"./series_bcb\", \"wb\") as f:\n # pickle.dump(net_series, f) ", "def _after_export(self, *args, **kwargs):\n return", "def crawl_meta_data(api, limit=None):\n app_lists = _discover_apps(api)\n for app_list in app_lists:\n sub_category = app_list.subcategory.proto\n for app in app_list:\n app.proto.category.CopyFrom(sub_category)\n LOGGER.info(app)\n app.write_to_file()\n LOGGER.info(sub_category)", "def download_all():\n task = load_jt('task.json')\n data = load_jt('data.json')\n spider = Crawler()\n \n for _, v in task.iteritems():\n disease_name = v['data']['disease_name']\n data.setdefault(disease_name, {})\n for url, v1 in ignore_iteritems(v, ignore = ['data']):\n print url\n html = spider.html(url)\n if html:\n soup = BS4(html)\n div = soup.find('div', id='main-content')\n data[disease_name].setdefault(v1['data']['category'], str(div))\n dump_jt(data, 'data.json', fastmode = True, replace = True)", "def export_ads(results,out_folder):\n try:\n os.makedirs(out_folder)\n except OSError:\n LOG.debug('Creating output file folder ...')\n \n export_ad_counter = 1 # assign unique number to ads for export to mturk\n #short_listed_companies = ['google adsense', 'doubleclick']\n with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:\n # write the titles\n fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\\n'.format(\\\n 'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\\\n 'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))\n # make sure we only add one ad\n for train_category, cat_dict in results.items():\n for test_site, bug_dict_list in cat_dict.items():\n for refresh_num in range(len(bug_dict_list)):\n bug_dict = bug_dict_list[refresh_num]\n for bug, bugcount in bug_dict.items():\n if not bug.is_ad():\n #TODO check bug_type in ffext\n continue\n if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:\n file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())\n new_location = os.path.join(out_folder, file_name)\n copy2(bug.get_filepath(), new_location)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\\\n \\n'.format(file_name, bug.get_name(), bug.get_filetype(),\n '' ,test_site, refresh_num, train_category, 'N/A', bugcount,\n bug.get_src()))\n export_ad_counter += 1", "def test_scrape(self):\n simfiles = scrape_category.get_category_from_ziv(\"category_test\", self.CATEGORY_URL)\n compare_simfile_records(simfiles, EXPECTED_SIMFILES)", "def save_new_rss_subscription_task(feed_obj):\n save_new_rss_subscription(feed_obj)\n logger.info(\"Entries for new Feed subcription\")", "def download_report():\n entities = get_names()\n save_csv(entities)", "def __init__(self, sleep=1, path_driver=None, headless=True, date_format='%Y-%m-%d'):\n # Current directory\n self.dir = os.getcwd()\n # Define download folder for browser:\n if os.name == 'nt':\n self.download_path = self.dir + r'\\tmp'\n else:\n self.download_path = self.dir + '/tmp'\n # Create a temporary folder in case it does not exist yet\n if not os.path.isdir('tmp'):\n os.mkdir('tmp')\n # Define the path to the downloaded csv-files (this is where the trends are saved)\n if os.name == 'nt':\n self.filename = 'tmp\\\\multiTimeline.csv'\n else:\n self.filename = './tmp/multiTimeline.csv'\n # Whether the browser should be opened in headless mode\n self.headless = headless\n # Path to the driver of Google Chrome\n self.path_driver = path_driver\n # Initialize the browser variable\n self.browser = None\n # Sleep time used during the scraping procedure\n self.sleep = sleep\n # Maximal number of consecutive days scraped\n self.max_days = 200\n # Format of the date-strings\n self.date_format = date_format\n # Format of dates used by google\n self._google_date_format = '%Y-%m-%d'\n # Lunch the browser\n self.start_browser()", "def download_opdashboard_csv():\n try:\n csv = driver.find_element_by_xpath('//*[@id=\"content\"]/div/div/div/div[2]/div/div[3]/div[2]/div/div[2]/div/div[2]/div[2]')\n csv.click()\n wait(wait_time=10)\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def main():\n\n df_links = pd.read_csv('data/tagged/ns-stories-full.csv', header=None)\n df_links.columns = ['source', 'date', 'label', 'url']\n\n df_dj = df_links[df_links['source'] == 'Digital Journal'].reset_index(drop=True)\n df_dj['date'] = pd.to_datetime(df_dj['date'].apply(lambda x: x[:9]))\n\n dj_csv = pd.DataFrame(columns = [\"url\",\"date\",\"title\", \"author\", \"content\",\"tag\", \"label\"])\n for i, row in tqdm(df_dj.iterrows()):\n try:\n url = row[\"url\"]\n html_soup = get_article(url)\n title = get_title(html_soup)\n tag = get_tags(html_soup)\n content = get_content(html_soup)\n author = get_author(html_soup)\n dj_csv = dj_csv.append({\"url\": url, \"date\": row[\"date\"], \"title\": title, \"author\": author, \n \"content\": content,\"tag\": tag, \n \"label\": row[\"label\"]}, \n ignore_index = True)\n except:\n print(i)\n time.sleep(10)\n try:\n url = row[\"url\"]\n html_soup = get_article(url)\n title = get_title(html_soup)\n tag = get_tags(html_soup)\n content = get_content(html_soup)\n author = get_author(html_soup)\n dj_csv = dj_csv.append({\"url\": url, \"date\": row[\"date\"], \"title\": title, \"author\": author, \n \"content\": content,\"tag\": tag, \n \"label\": row[\"label\"]}, \n ignore_index = True)\n except:\n continue\n \n dj_csv.to_csv(\"digital_journal.csv\", index=False)", "def test_create_cwb(self):\n \"\"\"burst-cwb.txt\"\"\"\n time.sleep(SLEEP_TIME)\n eventFile = os.path.join(testdatadir, \"burst-cwb.txt\")\n r = gracedb.createEvent(\"Test\", \"CWB\", eventFile)\n self.assertEqual(r.status, 201) # CREATED\n cwb_event = r.json()\n self.assertEqual(cwb_event['group'], \"Test\")\n self.assertEqual(cwb_event['pipeline'], \"CWB\")\n self.assertEqual(float(cwb_event['gpstime']), 1042312876.5090)", "def csv_export(df_to_export, norm_term, strain_info):\n \n # Makes a list of unique and sorted 'Condition' values to use for the iteration processes.\n i_term = 0\n cond_list = df_to_export.index.get_level_values('Condition')\n cond_list = sorted(list(set(cond_list)))\n \n # Checks for presence of './archive' directory, and creates it if it doesn't exist.\n if not os.path.isdir('./archive'):\n os.mkdir('archive')\n\n # Generate files for all condition sets with 'Temp' = -80.\n for idx, condi in df_to_export[-80].groupby('Condition'):\n csv_f_name = './archive/' + strain_info + '_' + cond_list[i_term] + '_' + '-80_' + norm_term + '.csv'\n condi.to_csv(path_or_buf=csv_f_name, index_label=['Condition', 'Dose'])\n i_term += 1\n\n # Generate files for all condition sets with 'Temp' = 'RT'.\n i_term = 0\n for idx, condi in df_to_export['RT'].groupby('Condition'):\n csv_f_name = './archive/' + strain_info + '_' + cond_list[i_term] + '_' + 'RT_' + norm_term + '.csv'\n condi.to_csv(path_or_buf=csv_f_name, index_label=['Condition', 'Dose'])\n i_term += 1", "def _process(self):\n export_collect_medias(self.kwargs[\"collect\"])", "def __init__(self):\n self.label = \"Export 330 charts\"\n self.description = \"Exports all charts for 330 squadron\"\n self.canRunInBackground = True", "def main(csv_name):\n BASE_URL = \"https://www.thrashermagazine.com/{}\"\n COVER_URL = BASE_URL.format(\"covers/\")\n\n page = requests.get(COVER_URL)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n image_spans = soup.find_all(\"span\", class_=\"icons\")\n date_spans = soup.find_all(\"span\", class_=\"coverDate\")\n\n cover_urls = []\n month_urls = []\n year_urls = []\n\n for (date_info, cover) in zip(date_spans, image_spans):\n date = date_info.text\n # track month info\n month = get_month(date)\n month_urls.append(month)\n # track year info\n year = get_year(date)\n year_urls.append(year)\n # track cover info\n img_path = cover.find_all('a')[0]['href']\n cover_urls.append(BASE_URL.format(img_path))\n\n cover_df = pd.DataFrame({'month': month_urls, \n 'year': year_urls,\n 'cover_url': cover_urls})\n\n # save data to csv\n cover_df.to_csv(op.join(\"..\", \"data\", csv_name), index=False)", "def __download(self, year, month, day):\n print 'Download...'\n logging.info('[download]->Download...')\n t = datetime.datetime(year, month, day)\n spdata.download(stime=t, stations=self.aodSetting.stations, ftp_dir=self.aodSetting.ftp_root, data_dir=self.aodSetting.dd_dir, ftp_ip=self.aodSetting.ftp_ip,\n user=self.aodSetting.ftp_user, pword=self.aodSetting.ftp_psw)\n print 'Download Done!'\n logging.info('[download]->Download Done!')", "def create_book_tickers_channel(self) -> str:" ]
[ "0.62322336", "0.5496554", "0.5323388", "0.5212413", "0.51680547", "0.5047314", "0.50356627", "0.5001191", "0.4996804", "0.4940439", "0.49359438", "0.48962823", "0.48792583", "0.48306614", "0.47664198", "0.47485566", "0.4733813", "0.47102794", "0.46897477", "0.4680858", "0.46699458", "0.46634978", "0.46582645", "0.46540245", "0.4648477", "0.46089798", "0.45961052", "0.45811886", "0.45756102", "0.45718503" ]
0.5811483
1
Train a simple conv net img_h = sentence length (padded where necessary) img_w = word vector length (300 for word2vec) filter_hs = filter window sizes hidden_units = [x,y] x is the number of feature maps (per filter window), and y is the penultimate layer sqr_norm_lim = s^2 in the paper lr_decay = adadelta decay parameter
def train_conv_net(datasets,datasets_weights, U, U_Topical, img_w=300, filter_hs=[3,4,5], hidden_units=[100,2], dropout_rate=[0.5], shuffle_batch=True, n_epochs=25, batch_size=50, lr_decay = 0.95, conv_non_linear="relu", use_valid_set=True, show_states=False, activations=[Iden], sqr_norm_lim=9, non_static=True): rng = np.random.RandomState(3435) img_h = len(datasets[0][0])-1 U_Topical.dtype = "float32" (num_topics,topic_dim) = U_Topical.shape word_w = img_w img_w = int(img_w + num_topics*topic_dim) filter_w = img_w feature_maps = hidden_units[0] filter_shapes = [] pool_sizes = [] for filter_h in filter_hs: filter_shapes.append((feature_maps, 1, filter_h, filter_w)) # 100 1 3 300 pool_sizes.append((img_h-filter_h+1, img_w-filter_w+1)) # size of words samples one parameters = [("image shape",img_h,img_w),("filter shape",filter_shapes), ("hidden_units",hidden_units), ("dropout", dropout_rate), ("batch_size",batch_size),("non_static", non_static), ("learn_decay",lr_decay), ("conv_non_linear", conv_non_linear), ("non_static", non_static) ,("sqr_norm_lim",sqr_norm_lim),("shuffle_batch",shuffle_batch)] #print parameters #define model architecture index = T.lscalar() x = T.matrix('x') y = T.ivector('y') x_topic = T.tensor3('x_topic') Words = theano.shared(value = U, name = "Words") Topics = theano.shared(value=U_Topical,name="Topics") zero_vec_tensor = T.vector() zero_vec = np.zeros(word_w, dtype='float32') set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))]) layer0_input_words = Words[T.cast(x.flatten(),dtype="int32")].reshape((x.shape[0],1,x.shape[1],Words.shape[1])) layer0_inputs_topics = [] for i in range(num_topics): sin_topic = x_topic[:,:,i] Topic = Topics[i].reshape((1,Topics[i].shape[0])) weights = sin_topic.flatten() weights = weights.reshape((weights.shape[0],1)) layer0_inputs_topics.append(T.dot(weights, Topic)) layer0_input_topics = T.concatenate(layer0_inputs_topics,1) layer0_input_topics = layer0_input_topics.reshape((x_topic.shape[0],1,x_topic.shape[1],num_topics*topic_dim)) layer0_input = T.concatenate([layer0_input_words,layer0_input_topics],3) conv_layers = [] layer1_inputs = [] for i in xrange(len(filter_hs)): filter_shape = filter_shapes[i] pool_size = pool_sizes[i] conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,image_shape=(batch_size, 1, img_h, img_w), filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear) layer1_input = conv_layer.output.flatten(2) conv_layers.append(conv_layer) layer1_inputs.append(layer1_input) layer1_input = T.concatenate(layer1_inputs,1) hidden_units[0] = feature_maps*len(filter_hs) classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate) #define parameters of the model and update functions using adadelta params = classifier.params for conv_layer in conv_layers: params += conv_layer.params if non_static: #if word vectors are allowed to change, add them as model parameters params += [Words] #params are model parameters params += [Topics] #Topics embedding are adjusted cost = classifier.negative_log_likelihood(y) dropout_cost = classifier.dropout_negative_log_likelihood(y) grad_updates = sgd_updates_adadelta(params, dropout_cost, lr_decay, 1e-6, sqr_norm_lim) #shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate #extra data (at random) np.random.seed(3435) if datasets[0].shape[0] % batch_size > 0: extra_data_num = batch_size - datasets[0].shape[0] % batch_size random_index = np.random.permutation(np.arange(datasets[0].shape[0])) random_index.astype('int32') train_set = datasets[0][random_index,:] train_set_weights = datasets_weights[0][random_index,:,:] extra_data = train_set[:extra_data_num] extra_data_weights = train_set_weights[:extra_data_num] new_data=np.append(datasets[0],extra_data,axis=0) new_data_weights = np.append(datasets_weights[0],extra_data_weights,axis = 0) else: new_data = datasets[0] new_data_weights = datasets_weights[0] random_index = np.random.permutation(np.arange(new_data.shape[0])) random_index.astype('int32') new_data = new_data[random_index] new_data_weights = new_data_weights[random_index] n_batches = new_data.shape[0]/batch_size n_train_batches = int(np.round(n_batches*0.9)) test_set_x = np.asarray(datasets[1][:,:img_h] ,"float32") test_set_x_topic = np.asarray(datasets_weights[1][:,:img_h,:] ,"float32") test_set_y = np.asarray(datasets[1][:,-1],"int32") if use_valid_set: train_set = new_data[:n_train_batches*batch_size,:] train_set_weights = new_data_weights[:n_train_batches*batch_size,:,:] val_set = new_data[n_train_batches*batch_size:,:] val_set_weights = new_data_weights[n_train_batches*batch_size:,:,:] train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1])) val_set_x, val_set_x_topic, val_set_y = shared_dataset((val_set[:,:img_h],val_set_weights,val_set[:,-1])) n_val_batches = n_batches - n_train_batches val_model = theano.function([index], classifier.errors(y), givens={ x: val_set_x[index * batch_size: (index + 1) * batch_size], x_topic: val_set_x_topic[index * batch_size: (index + 1) * batch_size], y: val_set_y[index * batch_size: (index + 1) * batch_size]}) else: train_set = new_data[:,:] train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1])) #make theano functions to get train/val/test errors test_model = theano.function([index], classifier.errors(y), givens={ x: train_set_x[index * batch_size: (index + 1) * batch_size], x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size], y: train_set_y[index * batch_size: (index + 1) * batch_size]}) train_model = theano.function([index], cost, updates=grad_updates, givens={ x: train_set_x[index*batch_size:(index+1)*batch_size], x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size], y: train_set_y[index*batch_size:(index+1)*batch_size]}) test_pred_layers = [] test_size = test_set_x.shape[0] test_layer0_input_words = Words[T.cast(x.flatten(),dtype="int32")].reshape((test_size,1,img_h,Words.shape[1])) test_layer0_inputs_topics = [] for i in range(num_topics): sin_topic = x_topic[:,:,i] Topic = Topics[i].reshape((1,Topics[i].shape[0])) weights = sin_topic.flatten() weights = weights.reshape((weights.shape[0],1)) test_layer0_inputs_topics.append(T.dot(weights, Topic)) test_layer0_input_topics = T.concatenate(test_layer0_inputs_topics,1) test_layer0_input_topics = test_layer0_input_topics.reshape((test_size,1,img_h,num_topics*topic_dim)) test_layer0_input = T.concatenate([test_layer0_input_words,test_layer0_input_topics],3) for conv_layer in conv_layers: test_layer0_output = conv_layer.predict(test_layer0_input, test_size) test_pred_layers.append(test_layer0_output.flatten(2)) test_layer1_input = T.concatenate(test_pred_layers, 1) test_y_pred = classifier.predict(test_layer1_input) test_error = T.mean(T.neq(test_y_pred, y)) test_model_all = theano.function([x,x_topic,y], test_error) #start training over mini-batches print '... training' epoch = 0 best_val_perf = 0 val_perf = 0 test_perf = 0 cost_epoch = 0 while (epoch < n_epochs): epoch = epoch + 1 if shuffle_batch: for minibatch_index in np.random.permutation(range(n_train_batches)): cost_epoch = train_model(minibatch_index) set_zero(zero_vec) else: for minibatch_index in xrange(n_train_batches): cost_epoch = train_model(minibatch_index) set_zero(zero_vec) train_losses = [test_model(i) for i in xrange(n_train_batches)] train_perf = 1 - np.mean(train_losses) if use_valid_set: val_losses = [val_model(i) for i in xrange(n_val_batches)] val_perf = 1- np.mean(val_losses) if val_perf >= best_val_perf: params_conv = [] params_output = {} test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) test_perf = 1- test_loss best_val_perf = val_perf for conv_layer in conv_layers: params_conv.append(conv_layer.get_params()) params_output = classifier.get_params() word_vec = Words.get_value() Topic_vec = Topics.get_value() else : val_perf = 0 if show_states: print('epoch %i, train perf %f %%, val perf %f' % (epoch, train_perf * 100., val_perf*100.)) if not use_valid_set: params_conv = [] params_output = {} test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) test_perf = 1- test_loss for conv_layer in conv_layers: params_conv.append(conv_layer.get_params()) params_output = classifier.get_params() word_vec = Words.get_value() Topic_vec = Topics.get_value() return test_perf, [params_conv, params_output, word_vec,Topic_vec]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cnn_for_text(input_data, conv_window, filter_num, pooling_window, activation_fn=tf.nn.relu,\n padding_vec=None, use_batch_norm=False, bias_initializer=tf.zeros_initializer,\n keep_prob=1, is_training=False):\n # Do padding\n input_shape = tf.shape(input_data)\n if padding_vec is not None:\n padding_tensor = tf.expand_dims(tf.expand_dims(padding_vec, 0), 0) # (1, 1, len(padding))\n padding_tensor = tf.tile(padding_tensor, scalar_array_to_tensor([input_shape[0], int(math.floor(conv_window[0]/2)), 1]))\n input_data = tf.concat(1, [padding_tensor, input_data])\n input_data = tf.concat(1, [input_data, padding_tensor])\n\n if use_batch_norm:\n bias_initializer = None\n\n temp_input = tf.expand_dims(input_data, -1) # expand the channel dimension (last dimension)\n num_layers = len(conv_window)\n for i in range(num_layers):\n with tf.variable_scope(\"Layer_{0}\".format(i)):\n # Convolution\n if i == 0:\n filter_shape = [conv_window[i], filter_num[i], 1, filter_num[i+1]]\n else:\n filter_shape = [conv_window[i], 1, filter_num[i], filter_num[i+1]] # height is 1 after the first conv\n\n # New style\n # temp_conv_output is of size (batch_size, curr_seq_len, 1, curr_filter_num)\n temp_conv_output = tf.contrib.layers.convolution2d(\n inputs=temp_input, num_outputs=filter_num[i+1], kernel_size=filter_shape[:2], stride=[1, 1],\n padding='VALID', activation_fn=activation_fn,\n normalizer_fn=None,\n biases_initializer=bias_initializer,\n weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),\n # biases_initializer=tf.random_uniform_initializer(-config.conv_init_scale, config.conv_init_scale),\n # weights_initializer=tf.random_uniform_initializer(-config.conv_init_scale, config.conv_init_scale),\n )\n if use_batch_norm:\n temp_conv_output = tf.contrib.layers.batch_norm(\n temp_conv_output, decay=0.999, center=True, scale=True, updates_collections=None, epsilon=0.001,\n is_training=is_training, trainable=True, scope=\"Batch_Norm\")\n\n # Old style\n # temp_filter = tf.get_variable(\n # name=\"filter\", shape=filter_shape,\n # initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=False)\n # )\n # b = tf.get_variable(\n # name=\"bias\", initializer=tf.zeros_initializer(shape=[filter_num[i + 1]])\n # )\n # temp_conv_output = tf.nn.conv2d(temp_input, temp_filter, strides=[1, 1, 1, 1], padding=\"VALID\")\n\n # tmp_seq_len = tmp_seq_len - conv_window[i] + 1\n\n # Pooling\n # final temp_pooling_output is of size (batch_size, 1, 1, final_conv_kernel_num)\n if pooling_window[i] == -1:\n # temp_pooling_output = tf.nn.max_pool(\n # temp_conv_output, ksize=[1, tmp_seq_len, 1, 1], strides=[1, 1, 1, 1], padding=\"VALID\")\n temp_pooling_output = tf.reduce_max(temp_conv_output, reduction_indices=[1], keep_dims=True)\n elif pooling_window[i] > 0:\n temp_pooling_output = tf.nn.max_pool(\n temp_conv_output, ksize=[1, pooling_window[i], 1, 1], strides=[1, 1, 1, 1], padding=\"VALID\")\n else: # no pooling\n temp_pooling_output = temp_conv_output\n\n\n # Old style\n # temp_pooling_output = tf.nn.bias_add(temp_pooling_output, b)\n # temp_input = activation_fn(temp_pooling_output)\n\n # New style\n temp_input = temp_pooling_output\n\n if is_training and keep_prob < 1:\n temp_input = tf.nn.dropout(temp_input, keep_prob)\n\n return tf.squeeze(temp_input, squeeze_dims=[1, 2])", "def train(self, x_train, y_train, w2v_size=300, w2v_window=5, w2v_min_count=1,\n w2v_epochs=100, k_max_sequence_len=500, k_batch_size=128, k_epochs=32, k_lstm_neurons=128,\n k_hidden_layer_neurons=(128, 64, 32), verbose=1):\n # Set variables\n self.w2v_size = w2v_size\n self.w2v_window = w2v_window\n self.w2v_min_count = w2v_min_count\n self.w2v_epochs = w2v_epochs\n self.k_max_sequence_len = k_max_sequence_len\n self.k_batch_size = k_batch_size\n self.k_epochs = k_epochs\n self.k_lstm_neurons = k_lstm_neurons\n self.k_hidden_layer_neurons = k_hidden_layer_neurons\n\n # split text in tokens\n x_train = [gensim.utils.simple_preprocess(text) for text in x_train]\n\n logging.info(\"Build & train Word2Vec model\")\n self.w2v_model = gensim.models.Word2Vec(min_count=self.w2v_min_count, window=self.w2v_window,\n size=self.w2v_size,\n workers=multiprocessing.cpu_count())\n self.w2v_model.build_vocab(x_train)\n self.w2v_model.train(x_train, total_examples=self.w2v_model.corpus_count, epochs=self.w2v_epochs)\n w2v_words = list(self.w2v_model.wv.vocab)\n logging.info(\"Vocabulary size: %i\" % len(w2v_words))\n logging.info(\"Word2Vec trained\")\n\n logging.info(\"Fit LabelEncoder\")\n self.label_encoder = LabelEncoder()\n y_train = self.label_encoder.fit_transform(y_train)\n self.num_classes = len(self.label_encoder.classes_)\n y_train = utils.to_categorical(y_train, self.num_classes)\n\n logging.info(\"Fit Tokenizer\")\n self.tokenizer = Tokenizer()\n self.tokenizer.fit_on_texts(x_train)\n x_train = keras.preprocessing.sequence.pad_sequences(self.tokenizer.texts_to_sequences(x_train),\n maxlen=self.k_max_sequence_len)\n num_words = len(self.tokenizer.word_index) + 1\n logging.info(\"Number of unique words: %i\" % num_words)\n\n logging.info(\"Create Embedding matrix\")\n word_index = self.tokenizer.word_index\n vocab_size = len(word_index) + 1\n embedding_matrix = np.zeros((vocab_size, self.w2v_size))\n for word, idx in word_index.items():\n if word in w2v_words:\n embedding_vector = self.w2v_model.wv.get_vector(word)\n if embedding_vector is not None:\n embedding_matrix[idx] = self.w2v_model.wv[word]\n logging.info(\"Embedding matrix: %s\" % str(embedding_matrix.shape))\n\n logging.info(\"Build Keras model\")\n logging.info('x_train shape: %s' % str(x_train.shape))\n logging.info('y_train shape: %s' % str(y_train.shape))\n\n self.k_model = Sequential()\n self.k_model.add(Embedding(vocab_size,\n self.w2v_size,\n weights=[embedding_matrix],\n input_length=self.k_max_sequence_len,\n trainable=False))\n self.k_model.add(LSTM(self.k_lstm_neurons, dropout=0.5, recurrent_dropout=0.2))\n for hidden_layer in self.k_hidden_layer_neurons:\n self.k_model.add(Dense(hidden_layer, activation='relu'))\n self.k_model.add(Dropout(0.2))\n if self.num_classes > 1:\n self.k_model.add(Dense(self.num_classes, activation='softmax'))\n else:\n self.k_model.add(Dense(self.num_classes, activation='sigmoid'))\n\n self.k_model.compile(loss='categorical_crossentropy' if self.num_classes > 1 else 'binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n logging.info(self.k_model.summary())\n\n # Callbacks\n early_stopping = EarlyStopping(monitor='acc', patience=6, verbose=0, mode='max')\n rop = ReduceLROnPlateau(monitor='acc', factor=0.1, patience=3, verbose=1, epsilon=1e-4, mode='max')\n callbacks = [early_stopping, rop]\n\n logging.info(\"Fit Keras model\")\n self.k_model.fit(x_train, y_train,\n batch_size=self.k_batch_size,\n epochs=self.k_epochs,\n callbacks=callbacks,\n verbose=verbose)\n\n logging.info(\"Done\")", "def init_cnn(model_file, hidden_units, num_filters, filter_hs, dropout_rate, n_words, n_dim):\n assert len(num_filters) == len(filter_hs)\n filter_shapes = []\n pool_sizes = []\n for filter_h in filter_hs:\n filter_shapes.append((filter_h, n_dim))\n pool_sizes.append((n_words - filter_h + 1, 1))\n\n l_in = LL.InputLayer(shape=(None, 1, n_words, n_dim))\n\n layer_list = []\n for i in range(len(filter_hs)):\n l_conv = LL.Conv2DLayer(l_in, num_filters=num_filters[i], filter_size=filter_shapes[i],\n nonlinearity=L.nonlinearities.rectify,\n W=L.init.HeNormal(gain='relu'))\n l_pool = LL.MaxPool2DLayer(l_conv, pool_size=pool_sizes[i])\n layer_list.append(l_pool)\n\n mergedLayer = LL.ConcatLayer(layer_list)\n\n l_hidden1 = LL.DenseLayer(mergedLayer, num_units=hidden_units[0],\n nonlinearity=L.nonlinearities.tanh,\n W=L.init.HeNormal(gain='relu'))\n l_hidden1_dropout = LL.DropoutLayer(l_hidden1, p=dropout_rate[0])\n\n l_hidden2 = LL.DenseLayer(l_hidden1_dropout, num_units=hidden_units[1],\n nonlinearity=L.nonlinearities.tanh,\n W=L.init.HeNormal(gain='relu'))\n l_hidden2_dropout = LL.DropoutLayer(l_hidden2, p=dropout_rate[1])\n\n l_output = LL.DenseLayer(l_hidden2_dropout, num_units=hidden_units[2],\n nonlinearity=L.nonlinearities.tanh)\n\n net_output = theano.function([l_in.input_var], LL.get_output(l_output, deterministic=True))\n\n with np.load(model_file) as f:\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\n LL.set_all_param_values(l_output, param_values)\n\n return net_output", "def forward_train(self, imgs, label, token_ids=None, segment_ids=None, input_mask=None, \n mlm_label=None, dvae_imgs=None, v_token_mask=None, hog_features=None, img_metas=None, **kwargs): \n # (batch_size, num_clips*num_crops, channel, num_segments, h, w) -> (batch_size*num_clips*num_crops, channel, num_segments, h, w)\n imgs = imgs.reshape((-1, ) + imgs.shape[2:]) \n if self.from_scratch:\n imgs = imgs / 255.0\n # text reshape: (batch_size, num_candidates, seq_length) -> (batch_size * num_candidates, seq_length)\n token_ids = token_ids.reshape((-1, ) + token_ids.shape[2:])\n text_input_mask = input_mask.reshape((-1, ) + input_mask.shape[2:])\n if mlm_label is not None:\n mlm_label = mlm_label.reshape((-1, ) + mlm_label.shape[2:])\n\n\n visual_token = self.extract_visual_feat(imgs) # b, d, T, h, w\n\n B, D, T, H, W = visual_token.shape\n losses = dict()\n # -------------- nce loss ------------------- #\n if hasattr(self, 'ssl_head'):\n input_ssl_ids = torch.where(mlm_label == -100, token_ids.clone(), mlm_label.clone())\n input_ssl_mask = text_input_mask.clone()\n text_only_out = self.text_backbone(input_ssl_ids, input_ssl_mask)\n # ------------ complete T -------------- #\n text_out_no_mask = text_only_out['last_hidden_state']\n visual_emb, text_emb = self.ssl_head(visual_token, text_out_no_mask, input_ssl_mask, input_ssl_ids)\n\n\n # ------------ complete V ---------------- #\n visual_token = visual_token.view(B, D, T, -1).permute(0, 2, 3, 1)\n\n\n # ---------- foward mask text input ---------- # \n text_out_with_mask = self.text_backbone(token_ids, text_input_mask)\n text_out_last_hidden_state = text_out_with_mask['last_hidden_state']\n\n # ---------- forward mask v input ------------ #\n visual_token_with_mask, v_mask = self.extract_visual_feat(imgs.clone(), v_token_mask) # b, d, T, h, w\n visual_token_mask = visual_token_with_mask.view(B, D, T, -1).permute(0, 2, 3, 1)\n \n v_fusion_output = self.multimodal_backbone(visual_token=visual_token_mask, text_input_mask=text_input_mask.clone(), text_input_embeds=text_out_no_mask.clone())\n \n t_fusion_output = self.multimodal_backbone(visual_token=visual_token, text_input_mask=text_input_mask, text_input_embeds=text_out_last_hidden_state)\n # for mlm #\n t_last_hidden_state = t_fusion_output['t_last_hidden_state']\n\n\n\n\n\n # ------------ MLM loss ------------ #\n\n if mlm_label is not None and self.mlm_head is not None:\n # we use mask text for MLM\n # because we doubt there will be miss interaction between wrong img-text pair \n # and the model not learn good relationship between vision and language\n # -------- forward masked text ----------- #\n mlm_prediction_score = self.mlm_head(t_last_hidden_state)\n \n if self.mlm_loss_func is not None:\n mlm_label_idx = torch.where(mlm_label.view(-1) != -100)\n mlm_prediction_mask_score = mlm_prediction_score.view(-1, self.text_vocab_size)[mlm_label_idx[0], :]\n mlm_label_mask = mlm_label.view(-1)[mlm_label_idx]\n mlm_loss = self.mlm_loss_func(mlm_prediction_mask_score, mlm_label_mask)\n else:\n mlm_loss = self.loss_func(mlm_prediction_score.view(-1, self.text_vocab_size), mlm_label.view(-1))\n losses['mlm_loss'] = mlm_loss\n\n\n # ------- Tri-modal alignment with mask sample and ranking --------- #\n if self.mlm_ssl_V_head is not None:\n mlm_visual_feat = v_fusion_output['t_last_hidden_state'][:, 0]\n mask_visual_recon_emb = self.mlm_ssl_V_head(mlm_visual_feat)\n mask_word_emb = self.ssl_head.forward_text(text_out_last_hidden_state) if self.use_Cmask else None\n loss_cvt_rank = self.ssl_loss(visual_emb, text_emb, mask_word_emb, mask_visual_recon_emb)\n losses.update(loss_cvt_rank)\n\n\n if self.symmetry_rank:\n mlm_word_feat = t_last_hidden_state[:, 0]\n mask_word_recon_emb = self.mlm_ssl_T_head(mlm_word_feat)\n\n mask_visual_emb = self.ssl_head.forward_vision(visual_token_with_mask) if self.use_Cmask else None\n \n loss_ctv_rank = self.ssl_loss(text_emb, visual_emb, mask_visual_emb, mask_word_recon_emb)\n loss_ctv_rank['v_nce_loss'] = loss_ctv_rank.pop('nce_loss')\n \n if self.ssl_loss.use_rank:\n loss_ctv_rank['rank_v_vm_loss'] = loss_ctv_rank.pop('rank_t_tm_loss')\n\n \n\n losses.update(loss_ctv_rank)\n\n\n\n return losses", "def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))", "def train_word2vec(self, size = 50, window = 20, min_count = 5, epochs = 40):\n\n\n # Read the entire previous data for training\n full_data = pd.read_csv(self.path_full_data, encoding = \"ISO-8859-1\")\n\n # Also read the column which we are performing analysis for\n col_data = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n \n\n # Clean the data in the column\n col_data[self.col_name] = self.cln.clean(col_data[self.col_name], typo = self.typo_ind)\n col_data.replace(np.nan, '', inplace = True)\n col_name_list = list(col_data[self.col_name].apply(lambda x: str(x).split(' ')))\n\n\n # Make a list of lists of the data\n input_list = list(full_data['response'].apply(lambda x: x.split(' ')))\n input_list = input_list + col_name_list\n\n # Remove the responses having only one or two words\n input_list = [x for x in input_list if len(x) > 1]\n\n # Build vocabulary and train model\n model = gensim.models.Word2Vec(\n input_list,\n size = size,\n window = window,\n min_count = min_count)\n\n model.train(input_list, total_examples = len(input_list), epochs = epochs)\n\n return model", "def hnet_bsd(args, x, train_phase):\n # Sure layers weight & bias\n order = 1\n nf = int(args.n_filters)\n nf2 = int((args.filter_gain)*nf)\n nf3 = int((args.filter_gain**2)*nf)\n nf4 = int((args.filter_gain**3)*nf)\n bs = args.batch_size\n fs = args.filter_size\n nch = args.n_channels\n nr = args.n_rings\n tp = train_phase\n std = args.std_mult\n\n x = tf.reshape(x, shape=[bs,args.height,args.width,1,1,3])\n fm = {}\n\n # Convolutional Layers\n with tf.name_scope('stage1') as scope:\n cv1 = hl.conv2d(x, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_1')\n cv1 = hl.non_linearity(cv1, name='1_1')\n\n cv2 = hl.conv2d(cv1, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_2')\n cv2 = hl.batch_norm(cv2, tp, name='bn1')\n mags = to_4d(hl.stack_magnitudes(cv2))\n fm[1] = linear(mags, 1, 1, name='sw1')\n\n with tf.name_scope('stage2') as scope:\n cv3 = hl.mean_pooling(cv2, ksize=(1,2,2,1), strides=(1,2,2,1))\n cv3 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_1')\n cv3 = hl.non_linearity(cv3, name='2_1')\n\n cv4 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_2')\n cv4 = hl.batch_norm(cv4, train_phase, name='bn2')\n mags = to_4d(hl.stack_magnitudes(cv4))\n fm[2] = linear(mags, 1, 1, name='sw2')\n\n with tf.name_scope('stage3') as scope:\n cv5 = hl.mean_pooling(cv4, ksize=(1,2,2,1), strides=(1,2,2,1))\n cv5 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_1')\n cv5 = hl.non_linearity(cv5, name='3_1')\n\n cv6 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_2')\n cv6 = hl.batch_norm(cv6, train_phase, name='bn3')\n mags = to_4d(hl.stack_magnitudes(cv6))\n fm[3] = linear(mags, 1, 1, name='sw3')\n\n with tf.name_scope('stage4') as scope:\n cv7 = hl.mean_pooling(cv6, ksize=(1,2,2,1), strides=(1,2,2,1))\n cv7 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_1')\n cv7 = hl.non_linearity(cv7, name='4_1')\n\n cv8 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_2')\n cv8 = hl.batch_norm(cv8, train_phase, name='bn4')\n mags = to_4d(hl.stack_magnitudes(cv8))\n fm[4] = linear(mags, 1, 1, name='sw4')\n\n with tf.name_scope('stage5') as scope:\n cv9 = hl.mean_pooling(cv8, ksize=(1,2,2,1), strides=(1,2,2,1))\n cv9 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_1')\n cv9 = hl.non_linearity(cv9, name='5_1')\n\n cv10 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_2')\n cv10 = hl.batch_norm(cv10, train_phase, name='bn5')\n mags = to_4d(hl.stack_magnitudes(cv10))\n fm[5] = linear(mags, 1, 1, name='sw5')\n\n fms = {}\n side_preds = []\n xsh = tf.shape(x)\n with tf.name_scope('fusion') as scope:\n for key in fm.keys():\n fms[key] = tf.image.resize_images(fm[key], tf.stack([xsh[1], xsh[2]]))\n side_preds.append(fms[key])\n side_preds = tf.concat(axis=3, values=side_preds)\n\n fms['fuse'] = linear(side_preds, 1, 1, bias_init=0.01, name='side_preds')\n return fms", "def train_sentence_dm(model, sentence, lbls, alpha, work=None, neu1=None, train_words=True, train_lbls=True):\n lbl_indices = [lbl.index for lbl in lbls if lbl is not None]\n lbl_sum = np_sum(model.syn0[lbl_indices], axis=0)\n lbl_len = len(lbl_indices)\n neg_labels = []\n if model.negative:\n # precompute negative labels\n neg_labels = zeros(model.negative + 1)\n neg_labels[0] = 1.\n\n for pos, word in enumerate(sentence):\n if word is None:\n continue # OOV word in the input sentence => skip\n reduced_window = random.randint(model.window) # `b` in the original doc2vec code\n start = max(0, pos - model.window + reduced_window)\n window_pos = enumerate(sentence[start : pos + model.window + 1 - reduced_window], start)\n word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]\n l1 = np_sum(model.syn0[word2_indices], axis=0) + lbl_sum # 1 x layer1_size\n if word2_indices and model.cbow_mean:\n l1 /= (len(word2_indices) + lbl_len)\n neu1e = train_cbow_pair(model, word, word2_indices, l1, alpha, neg_labels, train_words, train_words)\n if train_lbls:\n model.syn0[lbl_indices] += neu1e\n\n return len([word for word in sentence if word is not None])", "def forward_train(self, imgs, label, token_ids=None, segment_ids=None, input_mask=None, ans_ids=None, ans_mask=None, **kwargs):\n # (batch_size, num_clips*num_crops, channel, num_segments, h, w) -> (batch_size*num_clips*num_crops, channel, num_segments, h, w)\n imgs = imgs.reshape((-1, ) + imgs.shape[2:]) \n if self.from_scratch:\n imgs = imgs / 255.0\n B_text = token_ids.shape[0]\n # text reshape: (batch_size, num_candidates, seq_length) -> (batch_size * num_candidates, seq_length)\n token_ids = token_ids.reshape((-1, ) + token_ids.shape[2:])\n segment_ids = segment_ids.reshape((-1, ) + segment_ids.shape[2:])\n input_mask = input_mask.reshape((-1, ) + input_mask.shape[2:])\n losses = dict()\n visual_token = self.extract_visual_feat(imgs) # b, d, T, h, w\n B, D, T, H, W = visual_token.shape\n if B_text != B:\n visual_token = visual_token.view(B_text, -1, D, T, H, W)\n visual_token = visual_token.mean(dim=1)\n \n # text feature #\n text_out_with_mask = self.text_backbone(token_ids, input_mask)\n text_out_last_hidden_state = text_out_with_mask['last_hidden_state']\n\n # contrastive type finetuning retrieval #\n if self.task == 'retrieval':\n # text_only_out = self.text_backbone(token_ids, input_mask)\n visual_emb, text_emb = self.ssl_head(visual_token, text_out_last_hidden_state, input_mask, token_ids)\n nce_loss = self.loss_func(visual_emb, text_emb)\n losses['retrieval_nce_loss'] = nce_loss \n elif self.task == 'video_qa' or self.task == 'FIB':\n B, D, T, H, W = visual_token.shape\n visual_token = visual_token.view(B, D, T, -1).permute(0, 2, 3, 1)\n if hasattr(self.qa_head, 'num_labels'):\n num_choices = self.qa_head.num_labels\n visual_token_all = visual_token\n else:\n num_choices = int(token_ids.shape[0] / B)\n visual_token_all = visual_token.unsqueeze(1).expand(-1, num_choices, -1, -1, -1).flatten(0,1)\n\n output = self.multimodal_backbone(visual_token=visual_token_all, text_input_mask=input_mask, text_input_embeds=text_out_last_hidden_state)\n \n if self.answer_mask:\n mask_idx = torch.where(token_ids == 103)\n itm_output = output['t_last_hidden_state'][mask_idx]\n elif self.answer_cls:\n if 'cls_last_hidden_state' in output:\n itm_output = output['cls_last_hidden_state'].squeeze()\n else:\n itm_output = output['t_last_hidden_state'][:, 0]\n if self.itm_head is not None:\n itm_output = self.itm_head(itm_output)\n\n else:\n all_cls_emb = output['last_hidden_state'][:, 0]\n itm_output = self.itm_head(all_cls_emb)\n \n if self.qa_head is not None:\n final_output = self.qa_head(itm_output).view(-1, num_choices)\n final_label = label\n else:\n final_output = itm_output[:, 1]\n final_label = label\n\n\n qa_loss = self.loss_func(final_output, final_label.view(-1))\n losses['qa_loss'] = qa_loss\n\n\n\n return losses", "def build_character_cnn(model_hyperparameters=None, verbose=None):\r\n if model_hyperparameters is None:\r\n model_hyperparameters = _dutils.load_dictionary('model_hyperparameters.json')\r\n '''\r\n Load hyperparameter-specific values from JSON file.\r\n '''\r\n #The size of the characater vocabulary\r\n vocabulary_size = model_hyperparameters.get(\"vocabulary_size\")\r\n #The max length of the text. Set as 1014 in the original.\r\n text_length = model_hyperparameters.get(\"text_length\")\r\n #Number of filters for each convolutional layer\r\n num_filters = model_hyperparameters.get(\"num_filters\")\r\n #The threshold for the ReLU activation layers\r\n threshold = model_hyperparameters.get(\"relu_threshold\")\r\n #Dropout probability for Dropout layers\r\n dropout_p = model_hyperparameters.get(\"dropout_percent\")\r\n #Embedding output dimension. Implementation sets it equal to vocabulary_size\r\n embed_dim = model_hyperparameters.get(\"embedding_dimension\")\r\n '''\r\n Values below specify the architecture.\r\n These aren't stored in the JSON file due to\r\n architectutre constraints with layers and\r\n kernel sizes.\r\n '''\r\n #The number of units for each dense layer minus output layer\r\n fully_connected_layers = [128,64]\r\n '''\r\n conv_layers is a list of pairs.\r\n First component refers to kernel size.\r\n Second component refers to the size of\r\n the MaxPooling1D layer (-1 indicates said layer is not present).\r\n '''\r\n conv_layers = [[7, 3], [3,-1], [3,-1], [3,-1], [3, 3]]\r\n #Input layer\r\n inputs = Input(shape=(text_length,), name='sent_input', dtype='int32')\r\n #Embedding layers\r\n x = Embedding(vocabulary_size + 1, embed_dim, input_length=text_length, mask_zero=True)(inputs)\r\n #Convolution layers\r\n '''\r\n First Conv1D layer + MaxPooling is separate in case\r\n changes are made upstream. Also it was used to test out\r\n TimeDistributed functionality.\r\n '''\r\n x = (Convolution1D(num_filters, 7))(x)\r\n x = (MaxPooling1D(3))(x)\r\n for cl in conv_layers:\r\n x = (Convolution1D(num_filters, cl[0]))(x)\r\n x = ThresholdedReLU(threshold)(x)\r\n if cl[1] != -1:\r\n x = (MaxPooling1D(cl[1]))(x)\r\n\r\n x = Flatten()(x)\r\n # #Fully connected layers\r\n for fl in fully_connected_layers:\r\n '''\r\n Original architecture did not use L2 regularization.\r\n However, empirical results show that, for my dataset\r\n it works well in handling overfitting.\r\n '''\r\n x = Dense(fl, kernel_regularizer=regularizers.l2(0.0001))(x)\r\n x = ThresholdedReLU(threshold)(x)\r\n '''\r\n Original architecture had dropout at 50%.\r\n This seemed to be too high for my dataset, and\r\n it resulted in underfitting.\r\n '''\r\n x = Dropout(dropout_p)(x)\r\n # #Output layer\r\n predictions = Dense(vocabulary_size, activation='softmax')(x)\r\n # Build and compile model\r\n model = Model(inputs=inputs, outputs=predictions) \r\n if verbose:\r\n model.summary()\r\n return model", "def conv(self,\n x,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n name,\n relu,\n group=1,\n bias_term=False,\n padding=\"SAME\",\n trainable=True):\n with tf.name_scope(name), tf.variable_scope(name):\n # Get the input channel\n c_i = x.get_shape()[-1]/group\n # Create the weights, with shape [k_h, k_w, c_i, c_o]\n weights = self.make_cpu_variables(\"weights\", [k_h, k_w, c_i, c_o], trainable=trainable)\n # Create a function for convolution calculation\n def conv2d(i, w):\n return tf.nn.conv2d(i, w, [1, s_h, s_w, 1], padding)\n # If we don't need to divide this convolutional layer\n if group == 1:\n outputs = conv2d(x, weights)\n # If we need to divide this convolutional layer\n else:\n # Split the input and weights\n group_inputs = tf.split(x, group, 3, name=\"split_inputs\")\n group_weights = tf.split(weights, group, 3, name=\"split_weights\")\n group_outputs = [conv2d(i, w) for i, w in zip(group_inputs, group_weights)]\n # Concatenate the groups\n outputs = tf.concat(group_outputs, 3)\n if bias_term:\n # Create the biases, with shape [c_o]\n biases = self.make_cpu_variables(\"biases\", [c_o], trainable=trainable)\n # Add the biases\n outputs = tf.nn.bias_add(outputs, biases)\n if relu:\n # Nonlinear process\n outputs = tf.nn.relu(outputs)\n # Return layer's output\n return outputs", "def __init__(self, embed_size, dropout=0.5, image_model='resnet101', simple=False, pretrained=True):\n super(EncoderCNNFeatures, self).__init__()\n resnet = globals()[image_model](pretrained=pretrained)\n\n self.linear = nn.Sequential(nn.Conv2d(resnet.fc.in_features, embed_size, kernel_size=1, padding=0),\n nn.Dropout2d(dropout))\n\n self.simple = simple\n if simple:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))", "def bottleneck( inputs, filters, kernel, e, s, squeeze, nl):\n def _relu6( x):\n \"\"\"Relu 6\n \"\"\"\n return K.relu(x, max_value=6.0)\n def _hard_swish( x):\n \"\"\"Hard swish\n \"\"\"\n return x * K.relu(x + 3.0, max_value=6.0) / 6.0\n \n def _return_activation(x, nl):\n \"\"\"Convolution Block\n This function defines a activation choice.\n\n # Arguments\n x: Tensor, input tensor of conv layer.\n nl: String, nonlinearity activation type.\n\n # Returns\n Output tensor.\n \"\"\"\n if nl == 'HS':\n x = Activation(_hard_swish)(x)\n if nl == 'RE':\n x = Activation(_relu6)(x)\n return x\n \n def _conv_block( inputs, filters, kernel, strides, nl):\n \"\"\"Convolution Block\n This function defines a 2D convolution operation with BN and activation.\n\n # Arguments\n inputs: Tensor, input tensor of conv layer.\n filters: Integer, the dimensionality of the output space.\n kernel: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n nl: String, nonlinearity activation type.\n # Returns\n Output tensor.\n \"\"\"\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl)\n\n def _squeeze( inputs):\n \"\"\"Squeeze and Excitation.\n This function defines a squeeze structure.\n # Arguments\n inputs: Tensor, input tensor of conv layer.\n \"\"\"\n input_channels = int(inputs.shape[-1])\n\n x = GlobalAveragePooling2D()(inputs)\n x = Dense(input_channels, activation='relu')(x)\n x = Dense(input_channels, activation='hard_sigmoid')(x)\n return x\n \n \n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n input_shape = K.int_shape(inputs)\n tchannel = input_shape[channel_axis] * e\n x = _conv_block(inputs, tchannel, (1, 1), (1, 1), nl)\n\n x = DepthwiseConv2D(kernel, strides=(s, s), depth_multiplier=1, padding='same')(x)\n x = BatchNormalization(axis=channel_axis)(x)\n if squeeze:\n x = Lambda(lambda x: x * _squeeze(x))(x)\n x = _return_activation(x, nl)\n x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)\n x = BatchNormalization(axis=channel_axis)(x)\n return x", "def build_text_gen(len_encoded_str, len_padded_str=300, lr=1e-4):\n\n # First: let's build the Image Processing head of the model\n image_input_dimension = (1500, 1500, 1)\n image_processing_head_input = keras.Input(shape=image_input_dimension, name='Generator_Input')\n image_processing_head = layers.SeparableConv2D(filters=64, kernel_size=3, strides=(2, 2), activation='relu',\n name='Image_Processing_Conv2D_1')(image_processing_head_input)\n image_processing_head = layers.Dropout(0.1, name='Image_Processing_Dropout_1')(image_processing_head)\n image_processing_head = layers.SeparableConv2D(filters=128, kernel_size=3, strides=(2, 2), activation='relu',\n name='Image_Processing_Conv2D_2')(image_processing_head)\n image_processing_head = layers.Dropout(0.1, name='Image_Processing_Dropout_2')(image_processing_head)\n image_processing_head = layers.SeparableConv2D(filters=512, kernel_size=3, strides=(2, 2), activation='relu',\n name='Image_Processing_Conv2D_3')(image_processing_head)\n image_processing_head = layers.Dropout(0.1, name='Image_Processing_Dropout_3')(image_processing_head)\n image_processing_head = layers.SeparableConv2D(filters=1024, kernel_size=3, strides=(2, 2), activation='relu',\n name='Image_Processing_Conv2D_4')(image_processing_head)\n image_processing_head = layers.Dropout(0.1, name='Image_Processing_Dropout_4')(image_processing_head)\n image_processing_head = layers.SeparableConv2D(filters=len_padded_str, kernel_size=3,\n strides=(2, 2), activation='relu',\n name='Image_Processing_Conv2D_5')(image_processing_head)\n\n image_processed_shape = K.int_shape(image_processing_head)\n image_processing_head = layers.Reshape(target_shape=(image_processed_shape[1] * image_processed_shape[2],\n image_processed_shape[3]))(image_processing_head)\n\n image_processing_head = tf.transpose(image_processing_head, perm=[0, 2, 1])\n\n image_processing_head = layers.LSTM(units=1024, return_sequences=True,\n name='Image_Processing_LSTM_1')(image_processing_head)\n\n # Fifth: Join outputs from the input heads and process into encoded strings\n combined_input_processed_1_1 = layers.LSTM(units=1024, return_sequences=True, dropout=0.1,\n name='Combined_Input_LSTM_1')(image_processing_head)\n combined_input_processed_1_2 = layers.LSTM(units=1024, return_sequences=True, dropout=0.1,\n name='Combined_Input_LSTM_2')(combined_input_processed_1_1)\n combined_input_processed_1_3 = layers.LSTM(units=1024, return_sequences=True, dropout=0.1,\n name='Combined_Input_LSTM_3')(combined_input_processed_1_2)\n combined_input_processed_1_4 = layers.LSTM(units=1024, return_sequences=True, dropout=0.1,\n name='Combined_Input_LSTM_4')(combined_input_processed_1_3)\n\n # Sixth: Define each output tail and compile the model\n inchi_name_output_str = layers.LSTM(units=len_encoded_str, activation='tanh', return_sequences=True,\n name='InChI_Name_Str')(combined_input_processed_1_4)\n\n inchi_name_output_num = layers.LSTM(units=1, activation=None, return_sequences=True,\n name='InChI_Name_Num')(combined_input_processed_1_4)\n\n inchi_name_model = models.Model(inputs=image_processing_head_input,\n outputs=[inchi_name_output_str, inchi_name_output_num], name=\"InChI_Name_Generator\")\n\n # optimizer = tf.keras.optimizers.RMSprop(lr)\n # losses = {\n # 'InChI_Name_Str': tf.losses.BinaryCrossentropy(),\n # 'InChI_Name_Num': tf.losses.MeanSquaredError()\n # }\n # losses_weights = {\"InChI_Name_Str\": 1.0, \"InChI_Name_Num\": 0.5}\n #\n # inchi_name_model.compile(optimizer=optimizer, loss=losses, loss_weights=losses_weights)\n\n print(\"\\n\\n\")\n inchi_name_model.summary()\n print(\"\\n\\n\")\n\n return inchi_name_model", "def __init__(self, logger, p, h, \n save_folder, folder_name,\n word_embedding_weights, \n filters=[3,4,5,6,7], n_filter_out=30,\n capsule_num=5, capsule_dim=6, routings=3, \n lr=None, dr_rate=0.2, \n voca_size=50003, lstm_layer_num=1, lstm_hidden_unit=256, draw_summary_network=True):\n self.logger = logger\n self.p = p\n self.h = h\n self.save_folder = save_folder\n self.folder_name = folder_name\n self.word_embedding_weights = word_embedding_weights\n self.filters = filters\n self.n_filter_out = n_filter_out \n self.capsule_num = capsule_num\n self.capsule_dim = capsule_dim\n self.routings = routings\n self.lr = lr\n self.dr_rate = dr_rate \n self.voca_size = voca_size\n self.lstm_layer_num = lstm_layer_num\n self.lstm_hidden_unit = lstm_hidden_unit\n self.draw_summary_network = draw_summary_network\n \n self.loss = []\n self.metrics = {}\n self.optimizer = adam #adam rmsprop", "def conv_layer(n_in_filters, n_filters, ker_size, stride=1, \n depthwise=False, zero_bn=False, act=True) :\n bn = nn.BatchNorm2d(n_filters)\n nn.init.constant_(bn.weight, 0. if zero_bn else 1.)\n conv = nn.Conv2d(n_in_filters, n_filters, ker_size, stride=stride,padding=ker_size//2, \n bias=False,groups = n_in_filters if depthwise else 1)\n layer = [conv, bn]\n if act: layer += [Swish()]\n return nn.Sequential(*layer)", "def SmallResNet(n = 9, filters = [16, 32, 64],\n include_top=True, weights=None,\n input_tensor=None, input_shape=None,\n pooling='avg', regularizer=regularizers.l2(0.0002), activation = 'relu',\n top_activation='softmax',\n conv_shortcut=False, bn=True,\n classes=100, name=None):\n \n # Determine proper input shape\n if input_shape is None:\n if K.image_data_format() == 'channels_first':\n input_shape = (3, 32, 32) if include_top and pooling is None else (3, None, None)\n else:\n input_shape = (32, 32, 3) if include_top and pooling is None else (None, None, 3)\n\n # Build network\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n if not K.is_keras_tensor(input_tensor):\n img_input = Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n x = Conv2D(filters[0], (3, 3), padding='same', name='conv0', kernel_regularizer = regularizer)(img_input)\n if bn:\n x = BatchNormalization(axis=bn_axis, name='bn0')(x)\n x = Activation(activation)(x)\n \n x = unit(x, [filters[0], filters[0]], n, '1-', kernel_size = 3, stride = 1, regularizer=regularizer, activation=activation, conv_shortcut=conv_shortcut, bn=bn)\n for i in range(1, len(filters)):\n x = unit(x, [filters[i-1], filters[i]], n, str(i+1)+'-', kernel_size = 3, stride = 2, regularizer=regularizer, activation=activation, conv_shortcut=conv_shortcut, bn=bn)\n\n if pooling == 'avg':\n x = GlobalAveragePooling2D(name='avg_pool')(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D(name='max_pool')(x)\n\n if include_top:\n x = Dense(classes, activation=top_activation, name = 'embedding' if top_activation is None else 'prob', kernel_regularizer = regularizer)(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n # Create model.\n model = Model(inputs, x, name='cifar-resnet{}'.format(2*len(filters)*n) if name is None else name)\n\n # load weights\n if weights is not None:\n model.load_weights(weights)\n\n if K.image_data_format() == 'channels_first' and K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model", "def __init__(self, \n input_dim=(3, 32, 32), \n num_filters = (32, 64), filter_sizes = (7, 7), conv_param = {\"stride\": 1, \"pad\": 3},\n hidden_dim= 100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32\n ):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n self.conv_param = conv_param\n self.filter_sizes = filter_sizes\n self.num_layers = 4\n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n ############################################################################\n \n C, H, W = input_dim\n filter_size1, filter_size2 = filter_sizes\n num_filters1, num_filters2 = num_filters\n\n # conv layer 1: (N, C, H, W) -> (N, num_filters1, H, W)\n self.params['W1'] = np.random.normal(0, weight_scale, [num_filters1, C, filter_size1, filter_size1]) # square filter\n self.params['b1'] = np.zeros((num_filters1, ))\n self.params[\"sbnGamma1\"] = np.ones((num_filters1, )) # scale parameter one for each color channel during spatial batch norm\n self.params[\"sbnBeta1\"] = np.zeros((num_filters1, )) # shift parameter one for each color channel during spatial batch norm\n\n # conv layer 2: (N, num_filters1, H, W) -> (N, num_filters2, H, W)\n self.params['W2'] = np.random.normal(0, weight_scale, [num_filters2, num_filters1, filter_size2, filter_size2]) # square filter\n self.params['b2'] = np.zeros((num_filters2, ))\n self.params[\"sbnGamma2\"] = np.ones((num_filters2, ))\n self.params[\"sbnBeta2\"] = np.zeros((num_filters2, ))\n\n # (2, 2, 2) maxpool: (N, num_filters2, H, W) -> (N, num_filters2, H/2. W/2)\n # maxpool layer contributes nothing to self.params that need to be updated.\n self.maxpool_params = {\"pool_height\": 2, \"pool_width\": 2, \"stride\": 2}\n\n # affine layer 3: (N, num_filters2, H/2. W/2) -> (N, hidden_dim)\n self.params['W3'] = np.random.normal(0, weight_scale, [num_filters2 * (H / 2) * (W / 2), hidden_dim])\n self.params['b3'] = np.zeros((hidden_dim, ))\n self.params[\"bnGamma3\"] = np.ones((hidden_dim, ))\n self.params[\"bnBeta3\"] = np.zeros((hidden_dim, ))\n\n # output affine - sfmx layer 4: (N, hidden_dim) -> (N, num_classes)\n self.params['W4'] = np.random.normal(0, weight_scale, [hidden_dim, num_classes])\n self.params['b4'] = np.zeros((num_classes, ))\n\n self.bn_params = [{\"mode\": \"train\"} for _ in range(self.num_layers)]\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def create_cnn(num_half_rows, num_half_columns, num_channels):\n\n error_checking.assert_is_integer(num_half_rows)\n error_checking.assert_is_integer(num_half_columns)\n error_checking.assert_is_integer(num_channels)\n\n error_checking.assert_is_greater(num_half_rows, 0)\n error_checking.assert_is_greater(num_half_columns, 0)\n error_checking.assert_is_greater(num_channels, 0)\n\n regularizer_object = keras.regularizers.l1_l2(l1=L1_WEIGHT, l2=L2_WEIGHT)\n\n num_grid_rows = 2 * num_half_rows + 1\n num_grid_columns = 2 * num_half_columns + 1\n input_layer_object = keras.layers.Input(\n shape=(num_grid_rows, num_grid_columns, num_channels)\n )\n\n current_num_filters = None\n current_layer_object = None\n\n # Add convolutional layers.\n for _ in range(NUM_CONV_LAYER_SETS):\n for _ in range(NUM_CONV_LAYERS_PER_SET):\n\n if current_num_filters is None:\n current_num_filters = (\n num_channels * NUM_CHANNELS_TO_FIRST_NUM_FILTERS)\n this_input_layer_object = input_layer_object\n\n else:\n current_num_filters *= 2\n this_input_layer_object = current_layer_object\n\n current_layer_object = keras.layers.Conv2D(\n filters=current_num_filters,\n kernel_size=(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS),\n strides=(1, 1), padding='valid', data_format='channels_last',\n dilation_rate=(1, 1), activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(this_input_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if CONV_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=CONV_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n current_layer_object = keras.layers.MaxPooling2D(\n pool_size=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n strides=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n padding='valid', data_format='channels_last'\n )(current_layer_object)\n\n these_dimensions = numpy.array(\n current_layer_object.get_shape().as_list()[1:], dtype=int)\n num_features = numpy.prod(these_dimensions)\n\n current_layer_object = keras.layers.Flatten()(current_layer_object)\n\n # Add intermediate dense layers.\n _, num_outputs_by_dense_layer = (\n architecture_utils.get_dense_layer_dimensions(\n num_input_units=num_features, num_classes=NUM_CLASSES,\n num_dense_layers=NUM_DENSE_LAYERS)\n )\n\n for k in range(NUM_DENSE_LAYERS - 1):\n current_layer_object = keras.layers.Dense(\n num_outputs_by_dense_layer[k], activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n # Add output layer (also dense).\n current_layer_object = keras.layers.Dense(\n NUM_CLASSES, activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.Activation(\n 'softmax'\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None and NUM_DENSE_LAYERS == 1:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n # Put the whole thing together and compile.\n cnn_model_object = keras.models.Model(\n inputs=input_layer_object, outputs=current_layer_object)\n cnn_model_object.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=LIST_OF_METRIC_FUNCTIONS)\n\n cnn_model_object.summary()\n return cnn_model_object", "def model(data, train=None):\n\n conv = tf.nn.conv2d(data, layer1_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer1_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,1,1,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer2_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer2_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,2,2,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer3_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer3_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,1,1,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer6_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer6_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,2,2,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer7_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer7_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,1,1,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer8_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer8_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,2,2,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n shape = pool.get_shape().as_list()\n reshape = tf.reshape(pool, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.relu(tf.matmul(reshape, layer4_weights) + layer4_biases)\n\n return (tf.matmul(hidden, layer5_weights1) + layer5_biases1), (tf.matmul(hidden, layer5_weights2) + layer5_biases2), \\\n (tf.matmul(hidden, layer5_weights3) + layer5_biases3), (tf.matmul(hidden, layer5_weights4) + layer5_biases4), \\\n (tf.matmul(hidden, layer5_weights5) + layer5_biases5)", "def convnet_layers( inputs, widths, mode ):\n\n training = (mode == \"train\")\n \n with tf.variable_scope( \"convnet\" ): # h,w\n \n #print(inputs.shape)\n x = conv_layer( inputs, layer_params[0], training ) \n #print(x.shape)\n x = conv_layer( x, layer_params[1], training ) \n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool2' )\n #print(x.shape)\n x = conv_layer( x, layer_params[2], training ) \n x = conv_layer( x, layer_params[3], training )\n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool4' )\n #print(x.shape)\n x = conv_layer( x, layer_params[4], training ) \n x = conv_layer( x, layer_params[5], training )\n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool6') \n #print(x.shape)\n x = conv_layer( x, layer_params[6], training ) \n x = conv_layer( x, layer_params[7], training )\n \n x = tf.layers.max_pooling2d( x, [2, 1], [2, 1], \n padding='valid', \n name='pool8' ) \n\n #print(x.shape)\n\n # squeeze row dim\n x = tf.squeeze( x, axis=1, name='features' )\n\n #print(x.shape)\n\n sequence_length = get_sequence_lengths( widths ) \n\n return x, sequence_length", "def main():\n\n gpu_id = 1\n d_batch = 64\n d_embed = 256\n d_hidden = 256\n d_image_size = 256\n device = torch.device(f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu')\n dataset, train_loader = get_default_flickr30k_loader(d_batch=d_batch, d_image_size=d_image_size)\n model = Img2Txt(dataset.d_vocab, d_embed, d_hidden, dataset.start_token, dataset.end_token).to(device)\n\n train(model, dataset, train_loader, device)", "def __init__(self, E, U, height, width, filter_hs, conv_non_linear,\n hidden_units, batch_size, non_static, dropout_rates,subspace_size=None,\n activations=[Iden]):\n rng = np.random.RandomState(3435)\n feature_maps = hidden_units[0]\n self.batch_size = batch_size\n\n # define model architecture\n self.index = T.lscalar()\n self.x = T.matrix('x') \n self.y = T.ivector('y') \n self.Words = theano.shared(value=E, name=\"Words\") \n self.Users = None \n self.u = None\n self.subspace_size = subspace_size\n zero_vec_tensor = T.vector()\n self.zero_vec = np.zeros(width)\n # reset Words to 0?\n self.set_zero = theano.function([zero_vec_tensor],\n updates=[(self.Words, T.set_subtensor(self.Words[0,:],zero_vec_tensor))],\n allow_input_downcast=True)\n # inputs to the ConvNet go to all convolutional filters:\n layer0_input = self.Words[T.cast(self.x.flatten(), dtype=\"int32\")].reshape(\n (self.x.shape[0], 1, self.x.shape[1], self.Words.shape[1]))\n self.conv_layers = [] \n \n # outputs of convolutional filters\n layer1_inputs = []\n image_shape = (batch_size, 1, height, width)\n filter_w = width \n for filter_h in filter_hs: \n filter_shape = (feature_maps, 1, filter_h, filter_w)\n pool_size = (height-filter_h+1, width-filter_w+1)\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,\n image_shape=image_shape,\n filter_shape=filter_shape,\n poolsize=pool_size,\n non_linear=conv_non_linear)\n layer1_input = conv_layer.output.flatten(2)\n self.conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n # inputs to the MLP\n layer1_input = T.concatenate(layer1_inputs, 1)\n if U is not None:\n print \"Will use user embeddings\"\n self.u = T.ivector('u')\n self.Users = theano.shared(value=U, name=\"Users\")\n them_users = self.Users[self.u]\n if self.subspace_size:\n print \"and subspace\"\n # set_trace()\n self.subspace = HiddenLayer(rng, them_users, U.shape[1], subspace_size, Sigmoid)\n self.peep = theano.function([self.x, self.u],[self.subspace.output,layer1_input],allow_input_downcast=True)\n\n layer1_input = T.concatenate((layer1_input,T.nnet.sigmoid(self.subspace.output)),1)\n layer_sizes = [feature_maps*len(filter_hs)+subspace_size] \n # layer1_input = T.concatenate((layer1_input,them_users),1)\n # layer_sizes = [feature_maps*len(filter_hs)+U.shape[1]]\n\n else:\n layer1_input = T.concatenate((layer1_input,them_users),1)\n layer_sizes = [feature_maps*len(filter_hs)+U.shape[1]]\n\n else:\n print \"NO user embeddings\"\n layer_sizes = [feature_maps*len(filter_hs)]\n layer_sizes += hidden_units[1:]\n \n super(ConvNet, self).__init__(rng, input=layer1_input,\n layer_sizes=layer_sizes,\n activations=activations,\n dropout_rates=dropout_rates)\n\n # add parameters from convolutional layers\n for conv_layer in self.conv_layers:\n self.params += conv_layer.params\n if non_static:\n # if word vectors are allowed to change, add them as model parameters\n self.params += [self.Words]\n if U is not None:\n # if self.subspace_size is None:\n self.params += [self.Users]", "def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,\n padding='SAME', trainable=True, log_weights=False):\n # Get number of input channels\n input_channels = int(x.get_shape()[-1])\n\n # Create lambda function for the convolution\n\n with tf.variable_scope(name) as scope:\n\n regularizer = tf.contrib.layers.l2_regularizer(scale=0.01)\n\n weights = tf.get_variable(name='weights',\n shape=[filter_height, filter_width,\n input_channels, num_filters],\n initializer=tf.glorot_uniform_initializer(),\n regularizer=regularizer,\n trainable=trainable)\n\n biases = tf.get_variable(name='biases',\n shape=[num_filters],\n initializer=tf.zeros_initializer(),\n trainable=trainable)\n\n out = tf.nn.conv2d(x, weights, strides=[1, stride_y, stride_x, 1],\n padding=padding)\n # Add biases\n out = tf.nn.bias_add(out, biases)\n\n # Apply relu function\n out = tf.nn.relu(out, name=scope.name)\n\n if log_weights == True:\n tf.summary.image('weights', weights[tf.newaxis,:,:,0,0,tf.newaxis])\n tf.summary.histogram('weights', weights)\n tf.summary.histogram('biases', biases)\n\n return out, weights, biases", "def __init__(self, embeddings, embeddings_freeze, slen, output_size,\n dropout_p=0.1,\n kernel_sizes=(3, 4, 5),\n channels_outs=(100, 100, 100),\n hidden_size=100,\n activation='relu',\n alpha_dropout=False):\n super(TextCNN, self).__init__()\n self.nk = len(kernel_sizes)\n self.dim_e = embeddings.shape[1]\n self.dim_sum_filter = sum(channels_outs) # sum of all channels_out\n self.hidden_size = hidden_size\n\n if activation == 'relu':\n activation_function = F.relu\n elif activation == 'elu':\n activation_function = F.elu\n elif activation == 'gelu':\n activation_function = F.gelu\n\n # embedding\n self.embed = nn.Embedding.from_pretrained(torch.Tensor(embeddings),\n freeze=embeddings_freeze)\n\n # Convolution Block\n # by default pytorch uses Lecun Intialization for convolutions\n self.conv_blocks = List([ConvBlock(self.dim_e, slen, f, k, activation)\n for k, f in zip(kernel_sizes, channels_outs)])\n\n # dropout\n self.dropout = None\n self.dropout2 = None\n if alpha_dropout:\n self.dropout = nn.AlphaDropout(dropout_p)\n else:\n self.dropout = nn.Dropout(dropout_p)\n\n # a fc hidden layer to squeeze into a desired size\n if hidden_size > 0:\n self.fc = nn.Linear(self.dim_sum_filter, self.hidden_size)\n if alpha_dropout:\n self.dropout2 = nn.AlphaDropout(dropout_p)\n else:\n self.dropout2 = nn.Dropout(dropout_p)\n self.fc_act = activation_function\n # output\n self.output = nn.Linear(self.hidden_size, output_size)\n # no squeezing\n else:\n self.output = nn.Linear(self.dim_sum_filter, output_size)", "def square_attack_linf_old(sess, model, x, y, eps, n_iters, p_init, print_every=50):\n np.random.seed(0) # important to leave it here as well\n min_val, max_val = -0.5, 0.5 \n h, w, c = x.shape[1:]\n n_features = c*h*w\n n_ex_total = x.shape[0]\n\n test_in = tf.placeholder(tf.float32, (1, h, w, c), 'x')\n test_pred = model.predict(test_in)\n \n \n # Vertical stripes initialization\n x_best = np.clip(x + np.random.choice([-eps, eps], size=[x.shape[0], 1, w, c]), min_val, max_val)\n # print(x_best.shape)\n # print('y', y[0])\n logits = sess.run(test_pred, feed_dict={test_in: x_best})[0]\n margin_min = loss(y[0], logits)\n # print('logits', logits)\n print(margin_min)\n n_queries = np.ones(x.shape[0]) # ones because we have already used 1 query\n # print('n_queries', n_queries)\n time_start = time.time()\n # metrics = np.zeros([n_iters, 7])\n for i_iter in range(n_iters - 1):\n idx_to_fool = margin_min > 0\n x_curr, x_best_curr = x, x_best\n y_curr, margin_min_curr = y, margin_min\n deltas = x_best_curr - x_curr\n\n p = p_selection(p_init, i_iter, n_iters)# 1/n_features#\n \n for i_img in range(x_best_curr.shape[0]):\n s = int(round(np.sqrt(p * n_features / c)))\n s = min(max(s, 1), h-1) # at least c x 1 x 1 window is taken and at most c x h-1 x h-1\n center_h = np.random.randint(0, h - s)\n center_w = np.random.randint(0, w - s)\n\n x_curr_window = x_curr[i_img, center_h:center_h+s, center_w:center_w+s, :]\n x_best_curr_window = x_best_curr[i_img, center_h:center_h+s, center_w:center_w+s, :]\n # prevent trying out a delta if it doesn't change x_curr (e.g. an overlapping patch)\n while np.sum(np.abs(np.clip(x_curr_window + deltas[i_img, center_h:center_h+s, center_w:center_w+s, :], \n min_val, max_val)\n - x_best_curr_window) \n < 10**-7) == c*s*s:\n # the updates are the same across all elements in the square\n deltas[i_img, center_h:center_h+s, center_w:center_w+s, :] = np.random.choice([-eps, eps], size=[s, s, c])\n\n x_new = np.clip(x_curr + deltas, min_val, max_val)\n\n logits = sess.run(test_pred, feed_dict={test_in: x_new})[0]\n margin = loss(y_curr[0], logits)\n # print('margin',margin, 'logits', logits)\n # print(margin)\n idx_improved = margin < margin_min_curr\n # print('====idx', margin_min[idx_to_fool], margin_min)\n margin_min[idx_to_fool] = idx_improved * margin + ~idx_improved * margin_min_curr\n idx_improved = np.reshape(idx_improved, [-1, *[1]*len(x.shape[:-1])])\n x_best[idx_to_fool] = (idx_improved) * x_new + (~idx_improved) * x_best_curr\n n_queries[idx_to_fool] += 1\n\n # different metrics to keep track of\n acc = margin_min[0]\n time_total = time.time() - time_start\n if np.mod(i_iter, print_every)==0:\n print('[L1] {}: margin={} (n_ex={}, eps={:.3f}, {:.2f}s)'.\n format(i_iter+1, acc, x.shape[0], eps, time_total))\n\n if acc<=0:\n break\n return n_queries, x_best", "def sentmodel(sent_data):\n\n # with tf.variable_scope(\"sent\", reuse=tf.AUTO_REUSE):\n with tf.variable_scope(\"sent\"):\n sent_data = tf.expand_dims(sent_data, -1)\n filter_sizes = [2, 3, 5]\n filter_bitsent = mul_filtercnn(filter_sizes, sent_data, 'sent')\n \n fc_sent = tf.identity(tf.layers.conv1d(\\\n inputs=filter_bitsent,\\\n filters=1,\\\n kernel_size=1,\\\n padding=\"same\",\\\n activation=tf.nn.sigmoid),name=\"fc_sent\")\n return fc_sent", "def __init__(self, voc_size=8000, embed_size=100, hid_size=100, trunc=4,\n model=None):\n\n self.log = logging.getLogger(\"TEST.Embed\")\n self.log.setLevel(logging.INFO)\n\n self.unknown_token = \"UNKNOWN_TOKEN\"\n self.sentence_start_token = \"SENTENCE_START\"\n self.sentence_end_token = \"SENTENCE_END\"\n\n if model is None:\n self.log.info(\"Initializing RNN parameters and functions...\")\n\n self.vocabulary_size = voc_size\n self.embed_size = embed_size\n self.hidden_size = hid_size\n self.bptt_truncate = trunc\n\n # Instantiate the network weights\n # I feel like the first and third are switched for some reason...\n # but it's pretty consistent in the example code. Perhaps it's\n # backwards for a purpose\n # The weights going from the input layer to the word embedding\n # layer (E, in tutorial)\n weights_ie = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (embed_size, voc_size))\n\n # The weights going from input layer to hidden layer\n # (U, in tutorial)\n weights_eh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, embed_size))\n\n # The weights going from hidden layer to hidden layer\n # (W, in tutorial)\n weights_hh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, hid_size))\n\n # The weights going from hidden layer to output layer\n # (V, in tutorial)\n weights_ho = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (voc_size, hid_size))\n\n # The bias for the hidden units (no bias applied to embedding layer)\n bias = np.zeros((3, hid_size))\n\n # The bias for the output units\n out_bias = np.zeros(voc_size)\n\n self.weights_ie = theano.shared(\n name='weights_ie',\n value=weights_ie.astype(theano.config.floatX))\n\n self.weights_eh = theano.shared(\n name='weights_eh',\n value=weights_eh.astype(theano.config.floatX))\n\n self.weights_hh = theano.shared(\n name='weights_hh',\n value=weights_hh.astype(theano.config.floatX))\n\n self.weights_ho = theano.shared(\n name='weights_ho',\n value=weights_ho.astype(theano.config.floatX))\n\n self.bias = theano.shared(\n name='bias',\n value=bias.astype(theano.config.floatX))\n\n self.out_bias = theano.shared(\n name='out_bias',\n value=out_bias.astype(theano.config.floatX))\n\n self.cache_ie = theano.shared(\n name='cache_ie',\n value=np.zeros(weights_ie.shape).astype(theano.config.floatX))\n\n self.cache_eh = theano.shared(\n name='cache_eh',\n value=np.zeros(weights_eh.shape).astype(theano.config.floatX))\n\n self.cache_hh = theano.shared(\n name='cache_hh',\n value=np.zeros(weights_hh.shape).astype(theano.config.floatX))\n\n self.cache_ho = theano.shared(\n name='cache_ho',\n value=np.zeros(weights_ho.shape).astype(theano.config.floatX))\n\n self.cache_bias = theano.shared(\n name='cache_bias',\n value=np.zeros(bias.shape).astype(theano.config.floatX))\n\n self.cache_out_bias = theano.shared(\n name='cache_out_bias',\n value=np.zeros(out_bias.shape).astype(theano.config.floatX))\n\n self.vocabulary = []\n self.word_to_index = {}\n self.index_to_word = []\n else:\n self.log.info(\"Loading model parameters from saved model...\")\n\n with open(model, \"rb\") as modelFile:\n params = cPickle.load(modelFile)\n\n self.vocabulary_size = params[0]\n self.embed_size = params[1]\n self.hidden_size = params[2]\n self.bptt_truncate = params[3]\n\n self.weights_ie = params[4]\n self.weights_eh = params[5]\n self.weights_hh = params[6]\n self.weights_ho = params[7]\n\n self.vocabulary = params[8]\n if not self.vocabulary[-1] == self.unknown_token:\n self.log.info(\"Appending unknown token\")\n self.vocabulary[-1] = self.unknown_token\n self.index_to_word = params[9]\n self.word_to_index = params[10]\n\n self.bias = params[11]\n self.out_bias = params[12]\n\n self.cache_ie = params[13]\n self.cache_eh = params[14]\n self.cache_hh = params[15]\n self.cache_ho = params[16]\n self.cache_bias = params[17]\n self.cache_out_bias = params[18]\n # End of if statement\n\n # Symbolic representation of one input sentence\n input = T.ivector('sentence')\n\n # Symbolic representation of the one output sentence\n output = T.ivector('sentence')\n\n # Symbolic representation of the cache decay for RMSprop\n decay = T.scalar('decay')\n\n # Stochastic Gradient Descent step\n learning_rate = T.scalar('learning_rate')\n\n def forward_propagate(word, previous_state):\n \"\"\"\n Vertically propagates one of the words.\n\n :type word: int\n :param word: the index of the current input word\n\n :type previous_state: T.dvector()\n :param word: the output of the hidden layer from the previous\n horizontal layer\n \"\"\"\n # Embedding layer\n word_vector = self.weights_ie[:, word]\n\n # GRU layer\n update_gate = T.nnet.hard_sigmoid(\n self.weights_eh[0].dot(word_vector) +\n self.weights_hh[0].dot(previous_state) +\n self.bias[0]\n )\n\n reset_gate = T.nnet.hard_sigmoid(\n self.weights_eh[1].dot(word_vector) +\n self.weights_hh[1].dot(previous_state) +\n self.bias[1]\n )\n\n hypothesis = T.tanh(\n self.weights_eh[2].dot(word_vector) +\n self.weights_hh[2].dot(previous_state * reset_gate) +\n self.bias[2]\n )\n\n current_state = (T.ones_like(update_gate) - update_gate) * hypothesis + update_gate * previous_state\n\n # Output layer\n current_output = T.nnet.softmax(\n self.weights_ho.dot(current_state) + self.out_bias\n )[0]\n\n # Not sure why current_output[0] and not just current_output...\n return [current_output, current_state]\n\n #######################################################################\n # Symbolically represents going through each input sentence word and\n # then calculating the state of the hidden layer and output word for\n # each word. The forward_propagate function is the one used to\n # generate the output word and hidden layer state.\n #######################################################################\n self.theano = {}\n\n [out, state], updates = theano.scan(\n forward_propagate,\n sequences=input,\n truncate_gradient=self.bptt_truncate,\n outputs_info=[None, dict(initial=T.zeros(self.hidden_size))],\n name=\"forward_propagate\"\n )\n\n # Predicts the output words for each word in the sentence\n prediction = T.argmax(out, axis=1)\n\n # Calculates the output error between the predicted output and the\n # actual output\n out_error = T.sum(T.nnet.categorical_crossentropy(out, output))\n\n # Symbolically represents gradient calculations for gradient descent\n d_weights_ie = T.grad(out_error, self.weights_ie)\n d_weights_eh = T.grad(out_error, self.weights_eh)\n d_weights_hh = T.grad(out_error, self.weights_hh)\n d_weights_ho = T.grad(out_error, self.weights_ho)\n d_bias = T.grad(out_error, self.bias)\n d_out_bias = T.grad(out_error, self.out_bias)\n\n # Symbolic theano functions\n self.forward_propagate = theano.function([input], out,\n name=\"forward_propagate\")\n self.predict = theano.function([input], prediction, name=\"predict\")\n self.calculate_error = theano.function([input, output], out_error,\n name=\"calculate_error\")\n self.bptt = theano.function([input, output],\n [d_weights_ie, d_weights_eh, d_weights_hh, d_weights_ho, d_bias,\n d_out_bias],\n name=\"bptt\")\n\n # RMSprop parameters\n cache_ie = (decay * self.cache_ie) + ((1 - decay) * d_weights_ie ** 2)\n cache_eh = (decay * self.cache_eh) + ((1 - decay) * d_weights_eh ** 2)\n cache_hh = (decay * self.cache_hh) + ((1 - decay) * d_weights_hh ** 2)\n cache_ho = (decay * self.cache_ho) + ((1 - decay) * d_weights_ho ** 2)\n cache_bias = (decay * self.cache_bias) + ((1 - decay) * d_bias ** 2)\n cache_out_bias = (decay * self.cache_out_bias) + ((1 - decay) * d_out_bias ** 2)\n eps = 1e-6 # Prevents division by 0\n\n self.sgd_step = theano.function(\n [input, output, learning_rate, theano.In(decay, value=0.9)],\n [],\n updates=[\n (self.weights_ie, self.weights_ie - learning_rate *\n d_weights_ie / (T.sqrt(self.cache_ie + eps))),\n (self.weights_eh, self.weights_eh - learning_rate *\n d_weights_eh / (T.sqrt(self.cache_eh + eps))),\n (self.weights_hh, self.weights_hh - learning_rate *\n d_weights_hh / (T.sqrt(self.cache_hh + eps))),\n (self.weights_ho, self.weights_ho - learning_rate *\n d_weights_ho / (T.sqrt(self.cache_ho + eps))),\n (self.bias, self.bias - learning_rate * d_bias /\n (T.sqrt(self.cache_bias + eps))),\n (self.out_bias, self.out_bias - learning_rate *\n d_out_bias / (T.sqrt(self.cache_out_bias + eps))),\n (self.cache_ie, cache_ie),\n (self.cache_eh, cache_eh),\n (self.cache_hh, cache_hh),\n (self.cache_ho, cache_ho),\n (self.cache_bias, cache_bias),\n (self.cache_out_bias, cache_out_bias)]\n )\n\n self.x_train = None\n self.y_train = None", "def _make_conv_layers(self):\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=1), # padding=3 so, output is 224.\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, padding=1), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv", "def simple_cnn():\n return torch.nn.Sequential(\n torch.nn.Conv2d(1, 20, 5, 1),\n torch.nn.ReLU(),\n torch.nn.MaxPool2d(2, 2),\n torch.nn.Conv2d(20, 50, 5, 1),\n torch.nn.ReLU(),\n torch.nn.MaxPool2d(2, 2),\n torch.nn.Flatten(),\n torch.nn.Linear(4 * 4 * 50, 500),\n torch.nn.ReLU(),\n torch.nn.Linear(500, 10),\n )" ]
[ "0.6291026", "0.61072165", "0.6082616", "0.59387344", "0.59314805", "0.5919466", "0.5789336", "0.5736913", "0.5708138", "0.5659221", "0.5653504", "0.5638298", "0.5631338", "0.5591712", "0.55789256", "0.55471337", "0.5543536", "0.5524808", "0.550148", "0.54923403", "0.5481331", "0.546617", "0.54611033", "0.544798", "0.54410607", "0.54348123", "0.54346234", "0.5432023", "0.54300916", "0.5428308" ]
0.6283863
1
Return the absolute path to a valid plugins.cfg file. Copied from sf_OIS.py
def getPluginPath(): import sys import os import os.path paths = [os.path.join(os.getcwd(), 'plugins.cfg'), '/etc/OGRE/plugins.cfg', os.path.join(os.path.dirname(os.path.abspath(__file__)), 'plugins.cfg')] for path in paths: if os.path.exists(path): return path sys.stderr.write("\n" "** Warning: Unable to locate a suitable plugins.cfg file.\n" "** Warning: Please check your ogre installation and copy a\n" "** Warning: working plugins.cfg file to the current directory.\n\n") raise ogre.Exception(0, "can't locate the 'plugins.cfg' file", "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME", "def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')", "def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None", "def cfgpath(p):\n p = Path(p)\n if p.is_absolute():\n return p\n else:\n for d in reversed(cfgdirs):\n try:\n fp = (d / p).resolve()\n except FileNotFoundError:\n continue\n if fp.is_file():\n return fp\n else:\n return p", "def cfg_path(self):\n return self._cfg_path", "def configPath(self):\n return os.path.dirname(__file__)", "def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;", "def _cfg_path(argv):\n cfg_path = argv[1] if len(argv) > 1 else None\n _is_file = os.path.isfile\n if not cfg_path or not _is_file(cfg_path):\n if cfg_path:\n _info(\"no config at {}, trying the default location\".format(\n cfg_path))\n cfg_path = _DEFAULT_PATH\n if not _is_file(cfg_path):\n _info(\"no config at {}, exiting\".format(cfg_path))\n return None\n return cfg_path", "def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path", "def config_file(self):\n return join_path(self.prefix.etc.bohrium, \"config.ini\")", "def get_cfg_path(filename):\n return os.path.join(get_cfg_dir(), filename)", "def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')", "def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath", "def get_configuration_file():\n path = os.path.abspath(os.curdir)\n while path != os.sep:\n config_path = os.path.join(path, CONFIG_FILE_NAME)\n if os.path.exists(config_path):\n return config_path\n path = os.path.dirname(path)\n return None", "def _get_egg_path(self):\n try:\n _dist = get_distribution('janitoo_nut')\n return _dist.__file__\n except AttributeError:\n return 'src-nut/config'", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def get_config_path(config):\n section = config.sections()[0]\n return Path(config.get(section, \"path\")).expanduser().absolute()", "def get_plugin_directory_path(self):\n return os.path.join(MODULE_RUNTIME_HOME,\n 'var', 'plugins', self.plugin_name)", "def user_plugin_dir() -> str:\n return os.path.join(user_data_dir(), 'plugins')", "def get_config_filepath():\n scs_installation_dirs = _path_utils.get_addon_installation_paths()\n\n # SEARCH FOR CONFIG...\n scs_config_file = ''\n for i, location in enumerate(scs_installation_dirs):\n test_path = os.path.join(location, 'config.txt')\n if os.path.isfile(test_path):\n scs_config_file = test_path\n break\n\n # IF NO CONFIG FILE, CREATE ONE...\n if scs_config_file == '':\n lprint(\"S Creating new 'config.txt' file:\\n\\t %r\", (os.path.join(scs_installation_dirs[0], 'config.txt'),))\n scs_config_file = new_config_file(os.path.join(scs_installation_dirs[0], 'config.txt'))\n\n # print('SCS Blender Tools Config File:\\n \"%s\"\\n' % os.path.join(scs_installation_dirs[0], 'config.txt'))\n return scs_config_file", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def get_production_config_file_path(path: pathlib.Path) -> pathlib.Path:\n return get_production_config_dir_path(path) / \"config.py\"", "def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")", "def get_instance_config_path():\n return join(settings.PROJECT_DIR, \"conf\", \"eoxserver.conf\")", "def _find_config_root(self) -> str:\n location = [\"apache2.conf\", \"httpd.conf\", \"conf/httpd.conf\"]\n for name in location:\n if os.path.isfile(os.path.join(self.root, name)):\n return os.path.join(self.root, name)\n raise errors.NoInstallationError(\"Could not find configuration root\")", "def get_config(self):\n root_folder = os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n root_folder = root_folder.replace('/core', '/config')\n # print root_folder, '<----------------------------------------'\n proj_config = os.path.join(root_folder, self.project.lower()).replace('\\\\', '/')\n # print proj_config, '============================================='\n if not os.path.isfile(proj_config):\n proj_config = os.path.join(root_folder, 'default').replace('\\\\', '/')\n # print proj_config, '<========================================'\n return proj_config", "def get_base_config(eva_installation_dir: Path) -> Path:\n # if eva package is installed into environment\n if importlib_resources.is_resource(\"eva\", EVA_CONFIG_FILE):\n with importlib_resources.path(\"eva\", EVA_CONFIG_FILE) as yml_path:\n return yml_path\n else:\n # For local dev environments without package installed\n return eva_installation_dir / EVA_CONFIG_FILE", "def find_conf():\n path = os.path.abspath(os.path.expanduser(os.getcwd()))\n while path not in ('', '/'):\n conf_path = os.path.join(path, 'dataplicity.conf')\n if os.path.exists(conf_path):\n return conf_path\n path = os.path.dirname(path)\n return None", "def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath", "def cfgPath( *args ):\n return '/'.join( [str( k ) for k in args] )" ]
[ "0.7101111", "0.7079233", "0.70316", "0.691988", "0.6856361", "0.6809086", "0.67529494", "0.6713169", "0.6694192", "0.6685862", "0.6680839", "0.6547461", "0.64858216", "0.6481523", "0.6454455", "0.64139074", "0.6406523", "0.64045656", "0.6398177", "0.63761204", "0.635552", "0.63336074", "0.6251192", "0.6250835", "0.6235193", "0.6228812", "0.62168187", "0.6196227", "0.61842114", "0.6179357" ]
0.82794327
0
This shows the config dialog and returns the renderWindow.
def configure(ogre_root): user_confirmation = ogre_root.showConfigDialog() if user_confirmation: return ogre_root.initialise(True, "OGRE Render Window") else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def openRocConfig(self):\n self.rocConfig_Window = QtWidgets.QDialog()\n self.rocConfig_ui = Ui_rocConfigure()\n self.rocConfig_ui.setupUi(self.rocConfig_Window)\n self.rocConfig_Window.show()", "def display_window(self):\n frame = tk.Frame(master=self.param_window)\n frame.grid(padx=10, pady=20, columnspan=2)\n tk.Label(master=frame, text=\"Enter simulation parameters\").pack()\n\n self.status_text = tk.StringVar()\n self.status_text.set(\"Status message\")\n \n self.rows = 1\n for input_key in self.inputs.keys():\n input_dict = self.inputs[input_key]\n \n frame = tk.Frame(master=self.param_window)\n frame.grid(row=self.rows, column=0, padx=10, pady=1)\n input_dict['label'] = tk.Label(master=frame, text=input_dict['label'])\n input_dict['label'].pack()\n\n frame = tk.Frame(master=self.param_window)\n frame.grid(row=self.rows, column=1, padx=10, pady=1)\n input_dict['entry'] = tk.Entry(master=frame, width=10)\n input_dict['entry'].insert(0, input_dict['default'])\n input_dict['entry'].pack()\n \n self.rows += 1\n\n frame = tk.Frame(master=self.param_window)\n frame.grid(padx=10, pady=20, columnspan = 2)\n self.submit_btn = tk.Button(master=frame, text=\"Submit\", width=10)\n self.submit_btn.pack()\n self.submit_btn.bind(\"<Button-1>\", self.submit_values)\n\n self.param_window.mainloop()\n return self.parameters", "def open_configuration(self,event):\n configDevFrame = Single_deviceconf(parent=self, ID=996)\n configDevFrame.Centre()\n configDevFrame.Show()\n configDevFrame.ShowModal()\n configDevFrame.Destroy()", "def configure(self):\n dlg = ConfigureDialog(self._main_window)\n dlg.identifierOccursCount = self._identifierOccursCount\n dlg.setConfig(self._config)\n dlg.validate()\n dlg.setModal(True)\n\n if dlg.exec_():\n self._config = dlg.getConfig()\n self._data.config = self._config\n\n self._configured = dlg.validate()\n self._configuredObserver()", "def configure(self):\n dlg = ConfigureDialog(QtGui.QApplication.activeWindow().currentWidget())\n dlg.identifierOccursCount = self._identifierOccursCount\n\n if dlg.exec_():\n self._config = dlg.getConfig()\n dlg.setConfig(self._config)\n dlg.validate()\n dlg.setModal(True)\n\n\n\n self._configured = dlg.validate()\n self._configuredObserver()\n self._configured = True", "def _on_build_programmatic_display_config_gui(self):\n if self.ui.active_figure_format_config_widget is None:\n # Create a new one:\n # curr_selected_context = self.ui.contextSelectorWidget.current_selected_context\n active_config_name = self.ui.contextSelectorWidget.current_selected_context_key\n curr_active_config = self.owning_pipeline.active_configs[active_config_name] # Get default config for this config name\n # print(f'active_config_name: {active_config_name}, curr_active_config: {curr_active_config}')\n self.ui.active_figure_format_config_widget = FigureFormatConfigControls(config=curr_active_config)\n self.ui.active_figure_format_config_widget.figure_format_config_finalized.connect(self.on_finalize_figure_format_config)\n self.ui.active_figure_format_config_widget.show() # even without .show() being called, the figure still appears\n\n ## Get the figure_format_config from the figure_format_config widget:\n figure_format_config = self.ui.active_figure_format_config_widget.figure_format_config\n else:\n print(f'figure GUI already exists. Just showing again.')\n self.ui.active_figure_format_config_widget.show()", "def show(self, window):\r\n\r\n return", "def showSettings(self):\n self.c.show()", "def show_window(self):\n self.show()", "def show_preferences_dialog(self):\n dialog = self.ui.get_object(\"pref_dialog\") # type: Gtk.Dialog\n dialog.set_transient_for(self.ui.get_object(\"mainWindow\"))\n\n store = Gtk.ListStore(str, str)\n for page in self.pages.keys():\n store.append([page.title(), page])\n store.append([\"Continue where you left\", \"dynamic\"])\n page_map = {\"search\": 0,\n \"library\": 1,\n \"decks\": 2,\n \"wants\": 3,\n \"dynamic\": 4}\n self.ui.get_object(\"pref_start_view_combo\").set_model(store)\n self.ui.get_object(\"pref_start_view_combo\").set_active(page_map[self.config[\"start_page\"]])\n\n self.ui.get_object(\"pref_show_all_check\").set_active(self.config[\"show_all_in_search\"])\n\n result = dialog.run()\n dialog.hide()\n\n if not result == Gtk.ResponseType.OK:\n return\n\n tree_iter = self.ui.get_object(\"pref_start_view_combo\").get_active_iter()\n value = self.ui.get_object(\"pref_start_view_combo\").get_model().get_value(tree_iter, 1)\n self.config[\"start_page\"] = value\n\n self.config[\"show_all_in_search\"] = self.ui.get_object(\"pref_show_all_check\").get_active()\n\n self.save_config()\n self.config = self.load_config()", "def showSettingsWindow(self) -> None:\n if not self._settings_dialog:\n self._settings_dialog = self._createDialog(\"ThingiSettings.qml\")\n self._settings_dialog.show()", "async def _show_config_form(\n self,\n ):\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {\n vol.Required(CONF_DEVICEID): str,\n }\n ),\n errors=self._errors,\n )", "def openTB4Settings(self):\n self.TB4_Window = QtWidgets.QDialog()\n self.TB4_ui = Ui_robotFourConfig()\n self.TB4_ui.setupUi(self.TB4_Window)\n self.TB4_Window.show()", "def configure_window(self, width, height):\n self.configure_surface(width, height)", "def _configureWindow(self):\n if self._win_type == WindowType.IMMERSIVE:\n pg.setConfigOptions(\n foreground='d',\n background=(_DARK_COLOUR if self._dark else _LIGHT_COLOUR))\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n self._plt.setAspectLocked(True, 1)\n self._plt.hideAxis('left')\n self._plt.hideAxis('bottom')\n else: # DEFAULT\n pg.setConfigOptions(foreground='k', background='w')\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n\n # Set up the overlay objects as they are static\n self._overlay_items = [\n QtWidgets.QGraphicsRectItem(-_OVERLAY_WIDTH / 2,\n -_OVERLAY_HEIGHT / 2, _OVERLAY_WIDTH,\n _OVERLAY_HEIGHT)\n ]\n self._overlay_items[0].setBrush(pg.mkBrush(_OVERLAY_COLOUR))\n self._overlay_items[0].setZValue(1000)\n self._win.addItem(self._overlay_items[0])\n self.toggleOverlay(enable=False)\n\n # Do any last settings in the window\n # self._win.parentWidget().showMaximized()\n limit = 30\n self._win.setRange(xRange=[-limit, limit], yRange=[-limit, limit])", "def getConfigPanel():\n\treturn None", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def json_editor(self):\n json_editor = pn.widgets.JSONEditor.from_param(\n self.param.config_dict,\n mode=\"view\",\n menu=False,\n sizing_mode=\"stretch_width\",\n )\n config_viewer = pn.Card(\n json_editor,\n title=\"CONFIG Viewer\",\n sizing_mode=\"stretch_width\",\n collapsed=True,\n )\n return config_viewer", "def InitializeWindow(self):\n \n win_height = 600\n win_width = 900\n \n # 'x' and 'y' coordinates place window in the center of the screen\n y = int((self.winfo_screenheight() / 2) - (win_height / 2))\n x = int((self.winfo_screenwidth() / 2) - (win_width / 2))\n self.geometry(f'{win_width}x{win_height}+{x}+{y}')\n self.resizable(False, False)\n self.title('Log In')\n \n # Initialize the background template frame and canvas\n self.main_frame = Widgets.CreateFrame(self)\n self.main_frame.pack(fill='both', expand='true')\n self.main_canvas = Widgets.CreateCanvas(self.main_frame)\n self.main_canvas.pack(fill='both', expand='true')\n \n # Create a window in the center of the screen to hold widgets\n top_left_x = win_width / 4\n top_left_y = win_height / 4\n bottom_right_x = win_width - top_left_x\n bottom_right_y = win_height - top_left_y\n self.main_canvas.create_rectangle(top_left_x, top_left_y,\n bottom_right_x, bottom_right_y,\n fill='#f8f8ff')\n self.canvas_window = self.main_canvas.create_window(win_width / 2,\n win_height / 2)\n \n # Function to save user data if the window is exited\n self.protocol('WM_DELETE_WINDOW', self.OnClose)", "def show(self):\n self._window.show()", "def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)", "def showUI(cls):\r\n win = cls(uiFile)\r\n win.create()\r\n return win", "def start(self):\n window_layout = self.build_gui()\n window = sg.Window(self.app_name, window_layout)\n fields = self.config.get(\"fields\")\n while True:\n event, values = window.read()\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\n break\n\n if event == 'Build':\n # Validate fields\n errors = False\n for (key, value) in values.items():\n if key in fields:\n errmsg = \"\"\n if fields.get(key).get(\"type\") == \"str\":\n errmsg = self.validate_text_field(\n fields.get(key), value)\n if fields.get(key).get(\"type\") == \"int\":\n errmsg = self.validate_int_field(\n fields.get(key), value)\n if fields.get(key).get(\"type\") == \"list\":\n errmsg = self.validate_list_field(\n fields.get(key), value)\n if fields.get(key).get(\"type\") == \"date\":\n errmsg = self.validate_date_field(\n fields.get(key), value)\n if fields.get(key).get(\"type\") == \"textarea\":\n errmsg = self.validate_textarea_field(\n fields.get(key), value)\n\n if errmsg != \"\":\n sg.Popup(\"Opps!\", f\"{errmsg}\")\n errors = True\n break\n\n # Build document\n if not errors:\n self.sanitize_values(values)\n try:\n filename = self.build_document(values)\n sg.Popup(\n \"Congrats!\", f\"Your file ({filename}) was generated!\")\n break\n except Exception:\n e = sys.exc_info()[0]\n sg.Popup(f\"Problem generating your file. (Error: {e})\")", "def open_generatorWindow(self):\n self.window = generatorWindow(self)\n self.hide()", "def show(self):\n # * displays the window, after using either the iconify or the withdraw methods\n self.wm_deiconify()\n # * this method can be called after the event which needs to happen before the window event\n self.wait_window()", "def show_window(self):\n self._window.grab_set()\n self._window.wait_window()", "def guiMode(options):\n configuration = {'config_project_name': 'untitled', 'config_address': '0.0.0.0',\n 'config_port': 8081, 'config_multiple_instance': True, 'config_enable_file_cache': True,\n 'config_start_browser': True, 'config_resourcepath': './res/'}\n start(MainWindow, address=configuration['config_address'], port=configuration['config_port'],\n multiple_instance=configuration['config_multiple_instance'],\n enable_file_cache=configuration['config_enable_file_cache'],\n start_browser=configuration['config_start_browser'])", "def openTB1Settings(self):\n self.TB1_Window = QtWidgets.QDialog()\n self.TB1_ui = Ui_robotOneConfig()\n self.TB1_ui.setupUi(self.TB1_Window)\n self.TB1_Window.show()", "def open_options_window(self):\n window_options = OptionsWindow(self.master)\n window_options.lift() # Show above main window\n # TODO: block the user from interacting with the main window\n # while the options window is open\n window_options.focus_force()", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win" ]
[ "0.6363973", "0.6353406", "0.62993693", "0.6273288", "0.62459344", "0.62194985", "0.6168433", "0.61429256", "0.6130523", "0.6106466", "0.60738796", "0.6022143", "0.59356743", "0.5909631", "0.58671", "0.5833457", "0.58322257", "0.57948744", "0.5749032", "0.5740846", "0.570118", "0.5663449", "0.56608564", "0.5652644", "0.56378007", "0.5611416", "0.56107986", "0.5600358", "0.5595631", "0.55865073" ]
0.6725343
0
Read a DICOM file, raising an exception if the 'DICM' marker is not present at byte 128. dicom.read_file() does this as of pydicom 0.9.5.
def read_dicom_file(fname): fo = open(fname) try: preamble = fo.read(128) magic = fo.read(4) if len(preamble) != 128 or magic != 'DICM': raise InvalidDicomError fo.seek(0) do = dicom.read_file(fo) finally: fo.close() return do
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read(path, desired_size):\n \n dcm = pydicom.dcmread(path)\n\n slope, intercept = dcm.RescaleSlope, dcm.RescaleIntercept\n \n try:\n img = (dcm.pixel_array * slope + intercept)\n except:\n img = np.zeros(desired_size[:2])-1\n \n if img.shape != desired_size[:2]:\n img = cv2.resize(img, desired_size[:2], interpolation=cv2.INTER_LINEAR)\n \n img = _normalize(img)\n \n # return np.stack((img,)*3, axis=-1)\n return img", "def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT',\n hdu_mask='MASK', hdu_flags=None, **kwd):\n unsupport_open_keywords = {\n 'do_not_scale_image_data': ('Image data must be scaled to perform '\n 'ccdproc operations.'),\n 'scale_back': 'Scale information is not preserved.'\n }\n for key, msg in unsupport_open_keywords.items():\n if key in kwd:\n prefix = 'unsupported keyword: {0}.'.format(key)\n raise TypeError(' '.join([prefix, msg]))\n with fits.open(filename, **kwd) as hdus:\n hdr = hdus[hdu].header\n\n if hdu_uncertainty is not None and hdu_uncertainty in hdus:\n uncertainty = StdDevUncertainty(hdus[hdu_uncertainty].data)\n else:\n uncertainty = None\n\n if hdu_mask is not None and hdu_mask in hdus:\n # Mask is saved as uint but we want it to be boolean.\n mask = hdus[hdu_mask].data.astype(np.bool_)\n else:\n mask = None\n\n if hdu_flags is not None and hdu_flags in hdus:\n raise NotImplementedError('loading flags is currently not '\n 'supported.')\n\n # search for the first instance with data if\n # the primary header is empty.\n if hdu == 0 and hdus[hdu].data is None:\n for i in range(len(hdus)):\n if hdus.fileinfo(i)['datSpan'] > 0:\n hdu = i\n hdr = hdr + hdus[hdu].header\n log.info(\"first HDU with data is extension \"\n \"{0}.\".format(hdu))\n break\n\n if 'bunit' in hdr:\n fits_unit_string = hdr['bunit']\n # patch to handle FITS files using ADU for the unit instead of the\n # standard version of 'adu'\n if fits_unit_string.strip().lower() == 'adu':\n fits_unit_string = fits_unit_string.lower()\n else:\n fits_unit_string = None\n\n if unit is not None and fits_unit_string:\n log.info(\"using the unit {0} passed to the FITS reader instead of \"\n \"the unit {1} in the FITS file.\".format(unit,\n fits_unit_string))\n\n use_unit = unit or fits_unit_string\n # Try constructing a WCS object. This may generate a warning, but never\n # an error.\n wcs = WCS(hdr)\n # Test for success by checking to see if the wcs ctype has a non-empty\n # value.\n wcs = wcs if wcs.wcs.ctype[0] else None\n ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit,\n mask=mask, uncertainty=uncertainty, wcs=wcs)\n\n return ccd_data", "def _ReadCoverageInfoEntry(data_file):\n\n UINT32_SIZE = 4\n\n pkt_size_buf = data_file.read(UINT32_SIZE)\n if len(pkt_size_buf) != UINT32_SIZE:\n raise ValueError(\"Invalid packet size read.\")\n\n pkt_size = struct.unpack(\"I\", pkt_size_buf)[0]\n\n pkt = data_file.read(pkt_size)\n\n if len(pkt) != pkt_size:\n raise ValueError(\"Incomplete packet.\")\n\n return pkt", "def read_img(img_path):\n img_list=[]\n print('image loading...')\n for _,_,files in os.walk(img_path):\n for f in files:\n if f.find('.dcm')>=0:\n tmp_img=dicom.dcmread(os.path.join(img_path,f))\n tmp_img=tmp_img.pixel_array#[0::2,0::2]\n img_list.append(tmp_img)\n img_data=np.array(img_list)\n print('done')\n return img_data", "def image_builder_dicom(*, files: Set[Path]) -> Iterator[SimpleITKImage]:\n file_errors: DefaultDict[Path, List[str]] = defaultdict(list)\n\n studies = _find_valid_dicom_files(files=files, file_errors=file_errors)\n for dicom_ds in studies:\n try:\n yield dicom_ds.read()\n except Exception as e:\n for d in dicom_ds.headers:\n file_errors[d[\"file\"]].append(format_error(str(e)))\n\n if file_errors:\n raise UnconsumedFilesException(file_errors=file_errors)", "def read_nexrad_cdm(filename, field_mapping=None, field_metadata=None):\n # parse field_mapping and field_metadata parameters\n if field_mapping is None:\n field_mapping = CDM_FIELD_MAPPING.copy()\n if field_metadata is None:\n field_metadata = NEXRAD_METADATA.copy()\n\n dataset = netCDF4.Dataset(filename)\n dattrs = dataset.ncattrs()\n if 'cdm_data_type' not in dattrs or dataset.cdm_data_type != 'RADIAL':\n raise IOError('%s is not a valid CDM NetCDF file' % (filename))\n\n # Might need to add a check to see if all fields/resolution are present.\n refl_hi = _radar_from_cdm(dataset, _gen_vnames('refl', True),\n field_mapping, field_metadata)\n dopl_hi = _radar_from_cdm(dataset, _gen_vnames('doppler', True),\n field_mapping, field_metadata)\n refl_sd = _radar_from_cdm(dataset, _gen_vnames('refl', False),\n field_mapping, field_metadata)\n dopl_sd = _radar_from_cdm(dataset, _gen_vnames('doppler', False),\n field_mapping, field_metadata)\n return refl_hi, dopl_hi, refl_sd, dopl_sd", "def dicom_load():\n # Identify folders with EPI data\n dirs = [i for i in os.listdir(dcm_dir) if os.path.isdir(os.path.join(dcm_dir, i))]\n d_cnt = 0\n for d in dirs:\n dcm_file = os.path.join(dcm_dir,d,os.listdir(os.path.join(dcm_dir,d))[0])\n try:\n dcm_data = pydicom.dcmread(dcm_file)\n except:\n pass\n else:\n # If data is EPI then get start time, etc\n if 'EPI' in dcm_data.ImageType:\n dcm_dict[d_cnt] = {}\n dcm_dict[d_cnt]['dcm_file'] = dcm_file\n dcm_dict[d_cnt]['task_name'] = dcm_data.SeriesDescription\n dcm_dict[d_cnt]['task_name'] = dcm_dict[d_cnt]['task_name'].replace('_','-')\n date = dcm_data.SeriesDate\n start = dcm_data.SeriesTime\n start_time = '%s-%s-%s %s:%s:%s'%(date[0:4],date[4:6],date[6:],start[0:2],start[2:4],start[4:])\n dcm_dict[d_cnt]['start_time'] = datetime.fromisoformat(start_time)\n dcm_dict[d_cnt]['run_length'] = dcm_data[0x0019,0x105a].value/1000\n dcm_dict[d_cnt]['end_time'] = dcm_dict[d_cnt]['start_time'] + timedelta(milliseconds=dcm_dict[d_cnt]['run_length'])\n d_cnt = d_cnt+1", "def test():\n\n fname='./MedData/Lung-PET-CT-Dx/Lung_Dx-A0164/04-12-2010-PET01PTheadlung Adult-08984/8.000000-Thorax 1.0 B31f-52757/1-001.dcm' \n \n ds=pydicom.dcmread(fname)\n # print(ds.pixel_array.shape)\n print(ds.pixel_array[1])\n plt.figure(figsize=(10,10))\n plt.imshow(ds.pixel_array, cmap=plt.cm.bone)\n plt.show()", "def _find_valid_dicom_files(\n files: Set[Path], file_errors: DefaultDict[Path, List[str]]\n) -> List[DicomDataset]:\n studies = _get_headers_by_study(files=files, file_errors=file_errors)\n result = []\n for key in studies:\n headers = studies[key][\"headers\"]\n set_name = studies[key][\"name\"]\n if not headers:\n continue\n\n n_files = len(headers)\n n_time = len(\n {\n int(header[\"data\"].TemporalPositionIndex)\n for header in headers\n if \"TemporalPositionIndex\" in header[\"data\"]\n }\n )\n sop_class_uids = [header[\"data\"].SOPClassUID for header in headers]\n\n arbitrary_header = headers[0][\"data\"]\n try:\n n_slices_per_file = len(\n arbitrary_header.PerFrameFunctionalGroupsSequence\n )\n except AttributeError:\n n_slices_per_file = int(\n getattr(arbitrary_header, \"NumberOfFrames\", 1)\n )\n n_slices = n_files * n_slices_per_file\n\n if \"1.2.840.10008.5.1.4.1.1.77.1.6\" in sop_class_uids:\n for d in headers:\n file_errors[d[\"file\"]].append(\n format_error(\"WSI-DICOM not supported by DICOM builder\")\n )\n elif n_time < 2:\n # Not a 4d dicom file\n result.append(\n DicomDataset(\n name=set_name,\n headers=headers,\n n_time=None,\n n_slices=n_slices,\n n_slices_per_file=n_slices_per_file,\n )\n )\n elif len(headers) % n_time > 0:\n # Invalid 4d dicom file\n for d in headers:\n file_errors[d[\"file\"]].append(\n format_error(\"Number of slices per time point differs\")\n )\n else:\n # Valid 4d dicom file\n result.append(\n DicomDataset(\n name=set_name,\n headers=headers,\n n_time=n_time,\n n_slices=n_slices // n_time,\n n_slices_per_file=n_slices_per_file,\n )\n )\n\n del studies\n return result", "def GetDicomFromNode(self,node):\n storageNode=node.GetStorageNode()\n if storageNode is not None: # loaded via drag-drop\n filepath=storageNode.GetFullNameFromFileName()\n else: # loaded via DICOM browser\n instanceUIDs=node.GetAttribute('DICOM.instanceUIDs').split()\n filepath=slicer.dicomDatabase.fileForInstance(instanceUIDs[0])\n Dcm_tag=pydicom.dcmread(filepath)\n return Dcm_tag", "def read_file(file_name):\n fits_file = fits.open(file_name)\n\n header = fits_file[0].header\n image_data = fits_file[1].data\n\n segmentation_data = fits_file[2].data\n\n header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}\n # clause to differentiate between CDELT3 and CD3_3\n\n for hdr_key, hdr_value in header_keywords.items():\n # finding required header values\n hdr_value = header[hdr_key]\n header_keywords[hdr_key] = hdr_value\n\n return header_keywords, image_data, segmentation_data", "def load_dicom(path: str) -> np.ndarray:\n \n # initialize DICOM reader from vtk module\n reader = vtk.vtkDICOMImageReader()\n reader.SetDirectoryName(path)\n reader.Update()\n\n # Load dimensions\n _extent = reader.GetDataExtent()\n px_dims = [_extent[1]-_extent[0]+1, _extent[3]-_extent[2]+1, _extent[5]-_extent[4]+1]\n\n # Load spacing values\n px_space = reader.GetPixelSpacing()\n\n # bounding axes\n x = np.arange(0.0, (px_dims[0]+1)*px_space[0], px_space[0])\n y = np.arange(0.0, (px_dims[1]+1)*px_space[1], px_space[1])\n z = np.arange(0.0, (px_dims[2]+1)*px_space[2], px_space[2])\n\n # Get the image data\n img_dat = reader.GetOutput()\n # Get the point data\n pt_dat = img_dat.GetPointData()\n # Get the actual point data from the vtk object\n dat = pt_dat.GetArray(0)\n\n # Convert the vtk to numpy array\n dicom = numpy_support.vtk_to_numpy(dat)\n # Reshape the numpy array to 3D using 'ConstPixelDims' as a 'shape'\n dicom = dicom.reshape(px_dims, order='F')\n\n return dicom", "def load_scans(pathDicom):\n reader = sitk.ImageSeriesReader()\n filenamesDICOM = reader.GetGDCMSeriesFileNames(pathDicom)\n reader.SetFileNames(filenamesDICOM)\n img = reader.Execute()\n return img", "def test_file_read_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_utf8()", "def test_file_readas_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_as(\"utf-8\")", "def read(self, filename, byte_offset, length, threadID):\n self.lock.acquire()\n if filename not in self.files_on_disk:\n self.lock.release()\n return \"ERROR: NO SUCH FILE\\n\"\n read_file = self.files_on_disk[filename]\n if(read_file.num_bytes < byte_offset + length):\n self.lock.release()\n return \"ERROR: INVALID BYTE RANGE\\n\"\n else:\n with open(\".storage/\" + filename) as f:\n if self.files_on_disk[filename].type == \"jpg\":\n f.seek(byte_offset*8)\n contents = f.read(length*8)\n else:\n f.seek(byte_offset)\n contents = f.read(length)\n self.lock.release()\n return \"ACK %d\\n%s\\n\" % (length, contents)", "def read(cls, filename):\n with fits.open(str(make_path(filename)), memmap=False) as hdulist:\n return cls.from_hdulist(hdulist)", "def read_file(path_to_file):\n 8", "def read_image(path):\n reader = sitk.ImageSeriesReader()\n dicom_filenames = reader.GetGDCMSeriesFileNames(path)\n reader.SetFileNames(dicom_filenames)\n reader.LoadPrivateTagsOn()\n img = reader.Execute()\n img.SetOrigin((0, 0, 0))\n return img", "def is_dicom_file(filepath):\n if not os.path.exists(filepath):\n raise IOError('File {} not found.'.format(filepath))\n\n filename = os.path.basename(filepath)\n if filename == 'DICOMDIR':\n return False\n\n try:\n _ = dicom.read_file(filepath)\n except Exception as exc:\n log.debug('Checking if {0} was a DICOM, but returned '\n 'False.'.format(filepath))\n return False\n\n return True", "def read_dicom_series(directory, filepattern = \"image_*\"):\n \n if not os.path.exists(directory) or not os.path.isdir(directory):\n raise ValueError(\"Given directory does not exist or is a file : \"+str(directory))\n print('\\tRead Dicom',directory)\n lstFilesDCM = natsorted(glob.glob(os.path.join(directory, filepattern)))\n print('\\tLength dicom series',len(lstFilesDCM) )\n # Get ref file\n RefDs = dicom.read_file(lstFilesDCM[0])\n # get the space sampling\n dx = np.float(RefDs.PixelSpacing[0])\n dy = np.float(RefDs.PixelSpacing[1])\n dz = np.float(RefDs.SliceThickness)\n dsampling = np.array([dx,dy,dz])\n # Load dimensions based on the number of rows, columns, and slices (along the Z axis)\n ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM))\n # The array is sized based on 'ConstPixelDims'\n ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)\n\n # loop through all the DICOM files\n for filenameDCM in lstFilesDCM:\n # read the file\n ds = dicom.read_file(filenameDCM)\n # transform the raw data to HU using Rescale slope and intercept and store it as array \n ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array\n\n return ArrayDicom, dsampling", "def test_read_fail2(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data2.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IEOF' end", "def read_magic_fits_file(magic_file, redshift, flux_unit='TeV-1 s-1 cm-2', hdu=2, energy_unit='TeV'):\n sed = Table.read(magic_file, hdu=hdu)\n x_cen = sed['energy'].to(energy_unit)\n dx = sed['Denergy'].to(energy_unit)\n\n sed['flux'].unit = u.Unit(sed['flux'].unit.to_string().replace(\"ph\", \"\"))\n sed['Dflux'].unit = sed['flux'].unit\n\n y = (sed['flux'] / x_cen.to('TeV') ** 2.).to(flux_unit)\n dy = (sed['Dflux'] / x_cen.to('TeV') ** 2.).to(flux_unit)\n x_edges = np.append(x_cen - dx / 2., x_cen[-1] + dx[-1] / 2.)\n x_min = x_edges[:-1]\n x_max = x_edges[1:]\n\n return FitIACTFermi(x_cen.to(\"TeV\").value, y.value, dy.value,\n redshift,\n x_min=x_min.to(\"TeV\").value, x_max=x_max.to(\"TeV\").value)", "def _read_in_file(path, idc):\n info('read in file %s' % path)\n\n if not os.path.exists(path):\n info('file path not exist: %s' % path)\n sys.exit(1)\n try:\n if path.endswith('csv.gz'):\n mat = pd.read_csv(path, compression='gzip', index_col=0)\n elif path.endswith('.parquet'):\n mat = pd.read_parquet(path)\n else:\n mat = pd.read_csv(path, sep='\\t', index_col=0)\n except:\n traceback.print_exc(file=sys.stderr) # maybe the file type problem\n sys.exit(1)\n # TARGET-RT, too few sample is avaliable\n mat = mat[~mat.project_id.isin(['TARGET-RT'])]\n # check file title\n if 'project_id' not in mat.columns.tolist():\n info('project_id not in column names')\n sys.exit(1)\n if 'sample_type' not in mat.columns.tolist():\n info('sample_type is not in columns')\n sys.exit(1)\n # specify to needed genes:\n # the gene not in matrix columns\n diffgene = list(set(idc) - set(mat.columns.tolist()))\n if diffgene:\n info('these genes %s are not in the expression matrix of this cancer, skip %s' % (\n str(diffgene), str(path)))\n # return(pd.DataFrame()) # return a empty dataframe\n return (mat)", "def fs_read(file_path):\n try:\n with open(str(file_path), 'r') as f:\n return f.read()\n except UnicodeDecodeError:\n with open(str(file_path), 'r', encoding='latin-1') as f:\n return f.read()\n except IOError as e:\n raise e", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def test_read_fail1(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IMOD' start", "def test_file_read_gzip_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_gzip()", "def find_extraneous_bytes_before_marker(filepath):\n code, out, err = run_command(['identify', filepath])\n err_str = err.decode('utf8')\n ending = \"extraneous bytes before marker\"\n if err_str.find(ending) < 0:\n return None, None, None\n m = re.search(r'Corrupt JPEG data: ([\\d]+) extraneous bytes before marker (0x[\\w]+)', err_str)\n size = int(m.group(1))\n marker = m.group(2)\n return size, marker, err_str", "def readcif(filename, **kwds):\n \n # Read the unit cell parameters\n a, b, c, alf, bet, gam = [[]]*6\n with open(filename, 'r') as f:\n \n for line in f:\n if \"length_a\" in line:\n a = numgrab(line)\n elif \"length_b\" in line:\n b = numgrab(line)\n elif \"length_c\" in line:\n c = numgrab(line)\n elif \"angle_alpha\" in line:\n alf = numgrab(line)\n elif \"angle_beta\" in line:\n bet = numgrab(line)\n elif \"angle_gamma\" in line:\n gam = numgrab(line)\n \n crystVec = a + b + c + alf + bet + gam\n \n # Read atomic coordinates\n cifdata = pd.read_csv(filename, delim_whitespace=True, header=None, **kwds)\n atomLabels = np.array(cifdata.values[:,0], dtype='str')\n coords = np.array(cifdata.values[:,1:4]).astype('float64')\n\n return atomLabels, coords, crystVec" ]
[ "0.5621899", "0.54997045", "0.54964674", "0.5454611", "0.53653544", "0.5280224", "0.52707344", "0.5212712", "0.52099603", "0.5170325", "0.516826", "0.51599985", "0.51240665", "0.51210904", "0.5107606", "0.51000935", "0.50604916", "0.5044146", "0.5007753", "0.5000708", "0.4984937", "0.49691802", "0.49520987", "0.49485207", "0.49361086", "0.49300015", "0.4927381", "0.4909337", "0.4886119", "0.4879451" ]
0.7051324
0
given our dicom_files and studies records and a patient ID, return a list of (datetime, study instance UID) ordered by date+time
def patient_studies(dicom_files, studies, patient_id): ps = [] for uid in dicom_files[patient_id]: datetime = '%s%s' % studies[uid] ps.append([datetime, uid]) ps.sort(lambda a, b: cmp(a[0], b[0])) for el in ps: date_time_parts = (el[0][0:4], el[0][4:6], el[0][6:8], el[0][8:10], el[0][10:12], el[0][12:14]) el[0] = '%s-%s-%s %s:%s:%s' % date_time_parts return ps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_all_dicom_images(self, verbose=True):\n if verbose: print(\"Loading dicom files ... This may take a moment.\")\n\n path = self.get_path_to_dicom_files()\n fnames = [fname for fname in os.listdir(path)\n if fname.endswith('.dcm') and not fname.startswith(\".\")]\n images = []\n for fname in fnames:\n image = dicom.dcmread(os.path.join(path,fname))\n\n seid = str(image.SeriesInstanceUID).strip()\n stid = str(image.StudyInstanceUID).strip()\n\n if seid == self.series_instance_uid and\\\n stid == self.study_instance_uid:\n images.append(image)\n\n # ##############################################\n # Clean multiple z scans.\n #\n # Some scans contain multiple slices with the same `z` coordinate \n # from the `ImagePositionPatient` tag.\n # The arbitrary choice to take the slice with lesser \n # `InstanceNumber` tag is made.\n # This takes some work to accomplish...\n zs = [float(img.ImagePositionPatient[-1]) for img in images]\n inums = [float(img.InstanceNumber) for img in images]\n inds = list(range(len(zs)))\n while np.unique(zs).shape[0] != len(inds):\n for i in inds:\n for j in inds:\n if i!=j and zs[i] == zs[j]:\n k = i if inums[i] > inums[j] else j\n inds.pop(inds.index(k))\n\n # Prune the duplicates found in the loops above.\n zs = [zs[i] for i in range(len(zs)) if i in inds]\n images = [images[i] for i in range(len(images)) if i in inds]\n\n # Sort everything by (now unique) ImagePositionPatient z coordinate.\n sort_inds = np.argsort(zs)\n images = [images[s] for s in sort_inds]\n # End multiple z clean.\n # ##############################################\n\n return images", "def get_samples_from_patient_id(patient_id):\n all_files = FileRepository.all()\n q_pid = Q(metadata__cmoPatientId=patient_id)\n q_fg = build_argos_file_groups_query()\n q = q_pid & q_fg\n files = FileRepository.filter(queryset=all_files, q=q, filter_redact=True)\n data = list()\n for current_file in files:\n sample = dict()\n sample[\"id\"] = current_file.file.id\n sample[\"path\"] = current_file.file.path\n sample[\"file_name\"] = current_file.file.file_name\n sample[\"metadata\"] = current_file.metadata\n data.append(sample)\n\n samples = list()\n # group by igoId\n igo_id_group = dict()\n for sample in data:\n igo_id = sample[\"metadata\"][settings.SAMPLE_ID_METADATA_KEY]\n if igo_id not in igo_id_group:\n igo_id_group[igo_id] = list()\n igo_id_group[igo_id].append(sample)\n\n for igo_id in igo_id_group:\n samples.append(build_sample(igo_id_group[igo_id]))\n samples, bad_samples = remove_with_caveats(samples)\n number_of_bad_samples = len(bad_samples)\n if number_of_bad_samples > 0:\n LOGGER.warning(\"Some samples for patient query %s have invalid %i values\", patient_id, number_of_bad_samples)\n return samples", "def process_dicom_file_list(dicom_file_list, parent_sorting_field=\"PatientName\", verbose=False):\n dicom_series_dict_parent = {}\n\n for i, dicom_file in enumerate(sorted(dicom_file_list)):\n if verbose is True:\n logger.debug(\" Sorting file %d\", i)\n\n dicom_file = dicom_file.as_posix()\n\n if \"dicomdir\" in dicom_file.lower():\n logger.warning(\n \"DICOMDIR is not supported in this tool, images are read directly. Skipping.\"\n )\n continue\n\n dicom_object = pydicom.read_file(dicom_file, force=True)\n\n parent_sorting_field_data = dicom_object[parent_sorting_field].value\n\n if parent_sorting_field_data not in dicom_series_dict_parent.keys():\n dicom_series_dict_parent[parent_sorting_field_data] = {}\n\n series_uid = dicom_object.SeriesInstanceUID\n\n if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():\n dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]\n\n else:\n dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)\n\n return dicom_series_dict_parent", "def dicom_load():\n # Identify folders with EPI data\n dirs = [i for i in os.listdir(dcm_dir) if os.path.isdir(os.path.join(dcm_dir, i))]\n d_cnt = 0\n for d in dirs:\n dcm_file = os.path.join(dcm_dir,d,os.listdir(os.path.join(dcm_dir,d))[0])\n try:\n dcm_data = pydicom.dcmread(dcm_file)\n except:\n pass\n else:\n # If data is EPI then get start time, etc\n if 'EPI' in dcm_data.ImageType:\n dcm_dict[d_cnt] = {}\n dcm_dict[d_cnt]['dcm_file'] = dcm_file\n dcm_dict[d_cnt]['task_name'] = dcm_data.SeriesDescription\n dcm_dict[d_cnt]['task_name'] = dcm_dict[d_cnt]['task_name'].replace('_','-')\n date = dcm_data.SeriesDate\n start = dcm_data.SeriesTime\n start_time = '%s-%s-%s %s:%s:%s'%(date[0:4],date[4:6],date[6:],start[0:2],start[2:4],start[4:])\n dcm_dict[d_cnt]['start_time'] = datetime.fromisoformat(start_time)\n dcm_dict[d_cnt]['run_length'] = dcm_data[0x0019,0x105a].value/1000\n dcm_dict[d_cnt]['end_time'] = dcm_dict[d_cnt]['start_time'] + timedelta(milliseconds=dcm_dict[d_cnt]['run_length'])\n d_cnt = d_cnt+1", "def get_metadata(hf_patients_file, metadata_file, output_file):\n\n # Use 'dicom_id' as names for row indices\n hf_patients = pd.read_csv(hf_patients_file, sep=',', index_col=\"dicom_id\")\n\n # Use 'dicom' as name\n metadata = pd.read_csv(metadata_file, index_col=\"dicom\", dtype={\"StudyDate\": str, \"StudyTime\": str})\n\n # Disregard all columns except 'subject_id' and 'study_id'\n hf_patients = pd.concat([hf_patients['study_id'], hf_patients['subject_id']], axis=1)\n\n # Find study date/time for heart failure patients\n study_date = metadata[\"StudyDate\"][hf_patients.index]\n study_time = metadata[\"StudyTime\"][hf_patients.index]\n\n result = pd.concat([hf_patients, study_date, study_time], axis=1)\n result = result.rename(columns={\"StudyDate\": \"study_date\", \"StudyTime\": \"study_time\"})\n\n result.to_csv(output_file)", "def extract_notes(infile):\n\n # get patient ID\n subj_id = patient_id_from_file(infile)\n \n #get lab_events for this patient\n con = open_db()\n \n query = \\\n \"\"\"\n SELECT i.chartdate, i.charttime, i.description, i.category, i.text\n FROM noteevents i\n WHERE subject_id = {};\n \"\"\".format(subj_id)\n\n notes = pd.read_sql_query(query,con)\n \"\"\" change time stamp to seconds from origin \"\"\"\n \n origin = pd.to_datetime(wfdb.rdheader(infile).base_datetime)\n notes.insert(0, 'time', '')\n for idx, row in notes.iterrows():\n notes['time'].iloc[idx]=int((pd.to_datetime(row['charttime'])-origin).total_seconds())\n del notes['charttime']\n del notes['chartdate']\n\n return (notes)", "def extractSeriesInfo(self, inputdir):\n self.m_status.SetLabelText(\"Detecting DICOM data ... please wait\")\n allfiles = [y for x in walk(inputdir) for y in iglob(join(x[0], '*.IMA'))]\n self.controller.parseDicom(self, allfiles)\n # n = 1\n # for filename in allfiles:\n # try:\n # if not self.db.hasFile(filename):\n # dcm = dicom.read_file(filename)\n # updatemsg = \"Detecting DICOM data ... %d of %d\" % (n, len(allfiles))\n # self.m_status.SetLabelText(updatemsg)\n # n += 1\n #\n # # Check DICOM header info\n # series_num = str(dcm.SeriesInstanceUID)\n # uuid = self.generateuid(series_num)\n # imagetype = str(dcm.ImageType[2])\n # dicomdata = {'uuid': uuid,\n # 'patientid': str(dcm.PatientID),\n # 'patientname': str(dcm.PatientName),\n # 'seriesnum': series_num,\n # 'sequence': str(dcm.SequenceName),\n # 'protocol': str(dcm.ProtocolName),\n # 'imagetype': imagetype\n # }\n #\n # if not self.db.hasUuid(uuid):\n # self.db.addDicomdata(dicomdata)\n # if not self.db.hasFile(filename):\n # self.db.addDicomfile(uuid, filename)\n # except InvalidDicomError:\n # print(\"Not DICOM - skipping: \", filename)\n # continue\n # Load for selection\n # Columns: Toggle Select\n # Text PatientID\n # Text Sequence\n # Text Protocol\n # Text Image Type\n # Text Num Files\n # Text Series ID\n\n # for suid in db.getNewUuids():\n # numfiles = db.getNumberFiles(suid)\n # self.m_dataViewListCtrl1.AppendItem(\n # [True, self.controller.db.getDicomdata(suid, 'patientname'),\n # self.controller.db.getDicomdata(suid, 'sequence'),\n # self.controller.db.getDicomdata(suid, 'protocol'),\n # self.controller.db.getDicomdata(suid, 'imagetype'), str(numfiles),\n # self.controller.db.getDicomdata(suid, 'seriesnum')])\n #\n # msg = \"Total Series loaded: %d\" % self.m_dataViewListCtrl1.GetItemCount()\n # self.m_status.SetLabelText(msg)", "def recorded_timestamps(self):\n return sorted(self.reception_records.keys())", "def loadDicomsFromDatabase(self, dicomFiles):\n\n #--------------------\n # Create dictionary of downloaded DICOMS\n # for quick retrieval when comparing with files\n # in the slicer.dicomDatabase. Speed preferred over\n # memory consumption here.\n #-------------------- \n dlDicomObj = {}\n for dlFile in dicomFiles:\n dlDicomObj[os.path.basename(dlFile)] = dlFile\n\n\n \n #--------------------\n # Parse through the slicer.dicomDatabase\n # to get all of the files, as determined by series.\n #--------------------\n matchedDatabaseFiles = []\n for patient in slicer.dicomDatabase.patients():\n for study in slicer.dicomDatabase.studiesForPatient(patient):\n for series in slicer.dicomDatabase.seriesForStudy(study):\n seriesFiles = slicer.dicomDatabase.filesForSeries(series)\n #\n # Compare files in series with what was just downloaded.\n # If there's a match, append to 'matchedDatabaseFiles'.\n #\n for sFile in seriesFiles:\n if os.path.basename(sFile) in dlDicomObj: \n matchedDatabaseFiles.append(sFile)\n\n\n \n #--------------------\n # Acquire loadabes as determined by\n # the 'DICOMScalarVolumePlugin' class, by feeding in \n # 'matchedDatabaseFiles' as a nested array.\n #--------------------\n dicomScalarVolumePlugin = \\\n slicer.modules.dicomPlugins['DICOMScalarVolumePlugin']()\n loadables = dicomScalarVolumePlugin.examine([matchedDatabaseFiles])\n\n\n \n #--------------------\n # Determine loadable with the highest file count. \n # This is usually all DICOM files collated as one volume.\n #--------------------\n highestFileCount = 0\n highestFileCountIndex = 0\n for i in range(0, len(loadables)):\n if len(loadables[i].files) > highestFileCount:\n highestFileCount = len(loadables[i].files)\n highestFileCountIndex = i\n\n\n \n #--------------------\n # Load loadable with the highest file count.\n # This is assumed to be the volume file that contains\n # the majority of the downloaded DICOMS.\n #--------------------\n dicomScalarVolumePlugin.load(loadables[highestFileCountIndex])\n \n\n\n \n #--------------------\n # Return true if login successful.\n #-------------------- \n return True", "def _GetRefdat(self):\n for rfile in self.refdats.keys():\n# Get times for ref.dat files with a time-stamp.\n words = rfile.replace('.','_').split('_')\n if len(words) == 6 and words[-2].count(':') == 20:\n# This file was time-stamped by the sequence. Get the\n# date and time. file name format:\n# ref_Sep_9_2007_11:28:32.dat\n rtime[rfile] = hms_to_secs(words[-2])\n for pfile in self.pfiles:\n min_difftime = 1.e20\n self.info[pfile]['refdat'] = None\n for rfile in self.refdats.keys():\n if rfile[:3] == 'ref' and 'dat' in rfile:\n# This is a reference data file. First see if the orientation is\n# appended. If the file has neither a time-stamp nor a plane and\n# there is more than one ref.dat, the epi reconstruction will\n# be aborted.\n rinfo = {}\n ref_file = None\n if 'sag' in rfile and self.info[pfile]['plane'] == 'sagittal':\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n break\n elif 'cor' in rfile and self.info[pfile]['plane'] == 'coronal':\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n break\n elif 'axial' in rfile and self.info[pfile]['plane'] == 'axial':\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n break\n elif len(self.refdats.keys()) == 1:\n# Use the only one if that is all there is.\n ref_file = rfile\n epi_time = hms_to_secs(self.info[pfile]['acqtime'].split()[-2])\n if epi_time - rtime[rfile] < min_difftime and \\\n rftime[rfile] > epi_time:\n# Use the reference file that acquired nearest to the EPI\n# but before it.\n min_difftime = epi_time - rtime[rfile]\n# self.info[pfile]['refdat'] = rfile\n ref_file = rfile\n if ref_file:\n# Found a candidate.\n if not self.info[pfile]['refdat']:\n# Haven't found one yet, use it.\n self.info[pfile]['refdat'] = ref_file\n else:\n# Found two. Choose one in the same directory.\n oldpath = os.path.dirname(self.info[pfile]['refdat'])\n newpath = os.path.dirname(ref_file)\n pfile_path = os.path.dirname(pfile)\n if oldpath == newpath:\n# Same path, use the old one.\n self.info[pfile]['refdat'] = ref_file\n elif newpath == pfile_path:\n self.info[pfile]['refdat'] = ref_file\n# else Do nothing, use existing choice.\n elif not os.path.exists(rfile):\n self.info[pfile]['refdat'] = None\n elif os.stat(rfile).st_size > 0:\n# This path is taken if no info is encoded in the file name.\n# Don't use empty ref.dat files.\n self.info[pfile]['refdat'] = rfile", "def get_patient_cases(patient):\n # ----- Get database connection\n db = connect_to_db()\n try:\n c1 = db.cursor()\n try:\n c1.execute(\n \"\"\"SELECT tc.SLABEL \"\"\"\n \"\"\"FROM BOM.PATIENT pt \"\"\"\n \"\"\" INNER JOIN BOM.TCASE tc ON pt.SUID = tc.SPATIENTUID \"\"\"\n \"\"\"WHERE \"\"\"\n \"\"\" pt.SID = '%s' \"\"\" %\n patient)\n res = c1.fetchall()\n cases = []\n for re in res:\n cases.append(re[0])\n finally:\n c1.close()\n finally:\n db.close()\n return cases", "def _get_headers_by_study(\n files: Set[Path], file_errors: DefaultDict[Path, List[str]]\n):\n study_key_type = Tuple[str, ...]\n studies: Dict[study_key_type, Dict[str, Any]] = {}\n indices: Dict[str, Dict[study_key_type, int]] = {}\n\n for file in files:\n if not file.is_file():\n continue\n with file.open(\"rb\") as f:\n try:\n # Read header only, skip reading the pixel data for now\n ds = pydicom.dcmread(f, stop_before_pixels=True)\n\n # Group by series instance uid or by stack ID (for 4D images)\n # Additionally also group by SOP class UID to skip over extra\n # raw data (dose reports for example) that are sometimes stored\n # under the same series instance UID.\n key: study_key_type = (\n ds.StudyInstanceUID,\n getattr(ds, \"StackID\", ds.SeriesInstanceUID),\n ds.SOPClassUID,\n )\n\n studies[key] = studies.get(key, {})\n indices[ds.StudyInstanceUID] = indices.get(\n ds.StudyInstanceUID, {}\n )\n\n try:\n index = indices[ds.StudyInstanceUID][key]\n except KeyError:\n index = len(indices[ds.StudyInstanceUID])\n indices[ds.StudyInstanceUID][key] = index\n\n headers = studies[key].get(\"headers\", [])\n headers.append({\"file\": file, \"data\": ds})\n studies[key][\"headers\"] = headers\n\n # Since we might need to combine multiple images with different\n # series instance UID (in 4D images), we cannot use the series\n # as the unique file name - instead, we use the study instance\n # uid and a counter (index) per study\n studies[key][\"name\"] = f\"{ds.StudyInstanceUID}-{index}\"\n\n except Exception as e:\n file_errors[file].append(format_error(str(e)))\n\n return studies", "def get_subjects_info(data_folder, dataset_id, format=\"dict\"):\r\n subjects_info = {} # build of dictionnary of all session for each subject\r\n\r\n if dataset_id == \"raw_clean_32\":\r\n \"\"\" High Versus Low inhibitory Stimuli of Tinnitus and control patients\r\n \"\"\"\r\n patient = 2 # patient group (static for a given dataset)\r\n session = 9 # 6 = 1 old remplacer apres (session 'high')\r\n ses2 = 8 # (session 'low')\r\n names = os.listdir(os.path.join(data_folder, dataset_id, str(patient) + \"_\" + str(session)))\r\n names2 = os.listdir(os.path.join(data_folder, dataset_id, str(patient) + \"_\" + str(ses2)))\r\n\r\n pat = []\r\n pat2 = []\r\n for name in names:\r\n # print name.split('_')[0]\r\n pat.append(name.split('_')[0]) # all subjects ID from names\r\n for name in names2:\r\n # print name.split('_')[0]\r\n pat2.append(name.split('_')[0]) # all subjects ID from names2\r\n\r\n for name in names2:\r\n if pat.__contains__(name.split('_')[0]):\r\n if subjects_info.keys().__contains__(name.split('_')[0]):\r\n subjects_info[name.split('_')[0]].append(name) # add file to the list\r\n else:\r\n subjects_info[name.split('_')[0]] = [name] # add first file to the list\r\n for name in names:\r\n if pat2.__contains__(name.split('_')[0]):\r\n subjects_info[name.split('_')[0]].append(name)\r\n\r\n elif dataset_id == \"Distress2010\":\r\n \"\"\" High Versus Low Distress patients (1, 2, 3, 4 Distress)\r\n \"\"\"\r\n sub_high = 'high distress'\r\n sub_low = 'low distress'\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id, sub_high)) + \\\r\n os.listdir(os.path.join(data_folder, dataset_id, sub_low))\r\n\r\n # get all subjects ID\r\n valid_id = [\"1\", \"2\", \"3\", \"4\"] # Distress group (file begin with)\r\n\r\n for filename in filenames:\r\n if filename[0] in valid_id:\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n symptoms.append({\"distress\": int(filename[0])})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session`\r\n elif dataset_id == \"Tinnitus_EEG\":\r\n \"\"\" extended Distress2010 dataset with more than 310 patients\r\n \"\"\"\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id))\r\n subjects_csv = pd.read_csv(os.path.join(data_folder, dataset_id,\"labels_name_cat_TQ_vas.csv\"),\r\n names=[\"session\", \"distress\", \"TQ\", \"VAS\"], index_col=\"session\")\r\n\r\n for filename in filenames:\r\n if filename.split(\".\")[1] == \"txt\":\r\n if np.any(subjects_csv.index.str.match(filename)):\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n distress_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"distress\"].values[0])\r\n TQ_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"TQ\"].values[0])\r\n VAS_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"VAS\"].values[0])\r\n\r\n symptoms.append({\"distress\": distress_val})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms, \"TQ\": TQ_val, \"VAS\": VAS_val}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session`\r\n else:\r\n print(\"file \" + filename + \" not listed in labels_name_cat_TQ_vas.csv, subject rejected\")\r\n\r\n elif dataset_id == \"NormativeDB\":\r\n \"\"\" Control subjects in resting state\r\n \"\"\"\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id, \"clean-up\", \"M\")) + \\\r\n os.listdir(os.path.join(data_folder, dataset_id, \"clean-up\", \"F\"))\r\n\r\n # get all subjects ID\r\n valid_id = [\"1\", \"2\", \"3\", \"4\"] # Distress group (file begin with)\r\n\r\n for filename in filenames:\r\n if not (filename.split(\".\")[0][-2:] == \"EC\"): # remove eyes closed\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n symptoms.append(\"Control\")\r\n symptoms.append({\"distress\": int(0)})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms, \"gender\": filename[2]}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session\r\n\r\n else:\r\n print(\"get_subjects_info: unknown dataset\")\r\n if format == \"DataFrame\":\r\n subjects_info = _subjects_dict_to_pandas(subjects_info)\r\n\r\n return subjects_info", "def _find_valid_dicom_files(\n files: Set[Path], file_errors: DefaultDict[Path, List[str]]\n) -> List[DicomDataset]:\n studies = _get_headers_by_study(files=files, file_errors=file_errors)\n result = []\n for key in studies:\n headers = studies[key][\"headers\"]\n set_name = studies[key][\"name\"]\n if not headers:\n continue\n\n n_files = len(headers)\n n_time = len(\n {\n int(header[\"data\"].TemporalPositionIndex)\n for header in headers\n if \"TemporalPositionIndex\" in header[\"data\"]\n }\n )\n sop_class_uids = [header[\"data\"].SOPClassUID for header in headers]\n\n arbitrary_header = headers[0][\"data\"]\n try:\n n_slices_per_file = len(\n arbitrary_header.PerFrameFunctionalGroupsSequence\n )\n except AttributeError:\n n_slices_per_file = int(\n getattr(arbitrary_header, \"NumberOfFrames\", 1)\n )\n n_slices = n_files * n_slices_per_file\n\n if \"1.2.840.10008.5.1.4.1.1.77.1.6\" in sop_class_uids:\n for d in headers:\n file_errors[d[\"file\"]].append(\n format_error(\"WSI-DICOM not supported by DICOM builder\")\n )\n elif n_time < 2:\n # Not a 4d dicom file\n result.append(\n DicomDataset(\n name=set_name,\n headers=headers,\n n_time=None,\n n_slices=n_slices,\n n_slices_per_file=n_slices_per_file,\n )\n )\n elif len(headers) % n_time > 0:\n # Invalid 4d dicom file\n for d in headers:\n file_errors[d[\"file\"]].append(\n format_error(\"Number of slices per time point differs\")\n )\n else:\n # Valid 4d dicom file\n result.append(\n DicomDataset(\n name=set_name,\n headers=headers,\n n_time=n_time,\n n_slices=n_slices // n_time,\n n_slices_per_file=n_slices_per_file,\n )\n )\n\n del studies\n return result", "def sortByDate(inlist):\n\n seq = []\n for i, each in enumerate(inlist):\n # Lightly parse each flight (just reads the preamble)\n # Putting the last 3 returns of MISlightly into the _ junk var\n flight, _, _, _ = parseMISlightly(each)\n seq.append(flight.takeoff)\n\n # Sort by takeoff time (flight.takeoff is a datetime obj!)\n newseq = np.argsort(seq)\n\n return newseq", "def getFileList(self):\n sid = 86400 # change to 3600 for hour-by-hour\n uDays = range(sid*(int(self.uStart)/sid),sid+(sid*(int(self.uStop)/sid)),sid)\n fileList = []\n sep = os.path.sep\n for d in uDays:\n s = unixTimeToString(d)\n ymdPath = 'year' + s[0:4] + sep + 'month' + s[5:7] + sep + 'day' + s[8:10]\n dirname = self.basePath + sep + ymdPath + sep + self.sensor + sep + 'padhist'\n pattern = '*' + self.sensor + '_hstv*.mat'\n nameList = glob.glob1(dirname,pattern)\n for name in nameList:\n uTime = stringTimeToUnix(name[0:13] + '_00_00.000')\n if ( self.uStart <= uTime <= self.uStop ):\n #print 'IN: %s' % unixTimeToString(uTime)\n fileList.append(dirname + sep + name)\n fileList.sort()\n self.fileList = fileList", "def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n\n all_uniques = [] # storing a list with all the unique date_times \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n \"\"\" Loop over all the datasets \n k: name of the dataset\n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ]\"\"\"\n\n for k,v in self.datasets.items() :\n self.unique_dates[k] = {}\n for F in v: \n self.unique_dates[k][F] = {}\n \n self.unique_dates[k][F]['indices'] = {} \n self.unique_dates[k][F]['index_offset_next'] = 0 # to be replaced later when slicing \n self.unique_dates[k][F]['index_offset'] = 0 # to be replaced later when slicing \n\n unique_dt = list(data[k][F]['recordtimestamp'])\n \n indices = list(data[k][F]['recordindex'])\n all_uniques += unique_dt # adding to the total unique date_times \n\n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n\n if dt not in which_k_in_dt.keys():\n which_k_in_dt[dt] = {}\n if k not in which_k_in_dt[dt].keys():\n which_k_in_dt[dt][k] = [] \n if F not in which_k_in_dt[dt][k]:\n which_k_in_dt[dt][k].append(F)\n # at this point I have e.g. which_k_in_dt= {1990-01-01-12-00: {era5_1:[file1,file2] , ncar:[file3] } }\n\n self.unique_dates[k][F]['indices'][dt] = {}\n self.unique_dates[k][F]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n index_up = max(indices)+1000000 # dummy large number \n\n self.unique_dates[k][F]['indices'][dt]['up'] = index_up\n self.unique_dates[k][F]['up_to_dt_slice'] = data[k][F]['min_date'] \n \n\n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of *ALL* distinct dt values of all datasets and all files \n logging.debug('*** make_all_datetime finished ')", "def find_records():\r\n\r\n print(\"begin find records\")\r\n\r\n study_list = retrieve_ref('study_list')\r\n sensor_list = retrieve_ref('sensor_list')\r\n # sensor_unit_list = retrieve_ref('sensor_unit_list')\r\n\r\n for study in study_list:\r\n # print('study = ' + str(study))\r\n source_path = os.path.join(study, 'source')\r\n # print('source_path = ' + str(source_path))\r\n\r\n source_folders = os.listdir(source_path)\r\n # print(str(study) + ' source_folders = ')\r\n # print(source_folders)\r\n\r\n df_meta = pd.DataFrame()\r\n df_meta['source_path'] = source_folders\r\n save_meta(study, df_meta)\r\n record_to_summary(study, 'Records found', str(len(source_folders)))\r\n\r\n print(\"completed find records\")", "def get_obsdate():\n\n#\n#--- read sot data\n#\n f = open(sot_directory, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n obsid_list = []\n start_date = []\n index_date = []\n for ent in data:\n temp = re.split('\\^', ent)\n obsid = temp[1]\n#\n#--- check the data are valid\n#\n try:\n atemp = re.split('\\s+', temp[13])\n mon = atemp[0]\n date = atemp[1]\n year = atemp[2][2] + atemp[2][3]\n except:\n continue\n#\n#--- convert month in letter into digit\n#\n for i in range(0, 12):\n if mon == month_list[i]:\n mon = i + 1\n break\n#\n#--- two forms of starting date: 05/23/14 and 20140523\n#\n lmon = str(mon)\n if int(mon) < 10:\n lmon = '0' + lmon\n ldate = str(date)\n if int(date) < 10:\n ldate = '0' + ldate\n\n dline = lmon + '/' + ldate + '/' + year\n iline = atemp[2] + lmon + ldate\n\n obsid_list.append(int(obsid))\n start_date.append(dline)\n index_date.append(iline)\n\n return (obsid_list, start_date, index_date)", "def get_wave_dicoms(folder_name):\n dicom_list = glob.glob(folder_name + \"/*.dcm\")\n time_and_dicom = {}\n for a_dicom in dicom_list:\n dicom_data = pydicom.dcmread(a_dicom)\n if len(dicom_data[0x5400, 0x0100][0][0x5400, 0x1010].value) > 10:\n # print(dicom_data[0x0008, 0x0018].value)\n if dicom_data[0x0008, 0x1010].value == \"H-SIM1\":\n direction = \"H\"\n else:\n direction = \"V\"\n time_and_dicom[a_dicom] = [dicom_data.AcquisitionTime,\n dicom_data[0x0008, 0x0018].value,\n direction]\n\n sorted_t_d = sorted(time_and_dicom.items(),\n key=lambda x: x[1],\n reverse=True)\n return sorted_t_d", "def getFileList(self):\n print 'getting fileList ...',\n sid = 86400 # change to 3600 for hour-by-hour\n uDays = range(sid*(int(self.uStart)/sid),sid+(sid*(int(self.uStop)/sid)),sid)\n fileList = []\n sep = os.path.sep\n for d in uDays:\n s = unixTimeToString(d)\n ymdPath = 'year' + s[0:4] + sep + 'month' + s[5:7] + sep + 'day' + s[8:10]\n dirname = self.basePath + sep + ymdPath + sep + self.subDir\n pattern = '*' + self.sensor\n nameList = glob.glob1(dirname,pattern)\n for name in nameList:\n ufStart = stringTimeToUnix(name[0:23])\n ufStop = stringTimeToUnix(name[24:47])\n if ( ufStart <= self.uStart <= ufStop ) or ( self.uStart <= ufStart <= self.uStop ) or ( ufStart <= self.uStop <= ufStop ):\n #print 'IN: %s' % unixTimeToString(uTime)\n fileList.append(dirname + sep + name)\n## else:\n## print 'OUT:\\n%s\\n%s\\n%s' % (unixTimeToString(ufStart),unixTimeToString(self.uStart),unixTimeToString(ufStop))\n fileList.sort()\n self.fileList = fileList\n print 'done'", "def get_all_patient_mri_ids(subjfolder):\n assert(os.path.exists(subjfolder))\n\n subj_ids = set()\n\n for ext in dicom_file_extensions:\n file_lst = []\n file_lst.extend(glob(os.path.join(subjfolder, '*', '*' + ext)))\n file_lst.extend(glob(os.path.join(subjfolder, '*' + ext)))\n\n if file_lst:\n for dcm_file in file_lst:\n plan = dicom.read_file(dcm_file)\n if hasattr(plan, 'PatientID'):\n if plan.PatientID is not None:\n subj_ids.add(plan.PatientID)\n return subj_ids", "def sortByReadership(self, user_uuids, doc_uuid) -> list:\n docs = {}\n for entry in self.records:\n if ((entry['event_type'] == 'pagereadtime') and (entry['visitor_uuid'] in user_uuids)):\n doc = entry['subject_doc_id']\n if(doc != doc_uuid):\n if(doc in docs):\n docs[doc] += int(entry['event_readtime'])\n else:\n docs[doc] = int(entry['event_readtime'])\n return list(sorted(docs.items(), key = operator.itemgetter(1), reverse = True))", "def group_dicom_files(dicom_paths, hdr_field='PatientID'):\n dicom_groups = defaultdict(list)\n try:\n for dcm in dicom_paths:\n hdr = dicom.read_file(dcm)\n group_key = getattr(hdr, hdr_field)\n dicom_groups[group_key].append(dcm)\n except KeyError as ke:\n raise KeyError('Error reading field {} from file {}.'.format(hdr_field, dcm)) from ke\n\n return dicom_groups", "def pet_dcm_keys_to_copy():\n return ['AcquisitionDate',\n 'AcquisitionTime',\n 'ActualFrameDuration',\n 'AccessionNumber',\n 'DecayCorrection',\n 'DecayCorrectionDateTime',\n 'DecayFactor',\n 'DoseCalibrationFactor',\n 'FrameOfReferenceUID',\n 'FrameReferenceTime',\n 'InstitutionName',\n 'ManufacturerModelName',\n 'OtherPatientIDs',\n 'PatientAge',\n 'PatientBirthDate',\n 'PatientID',\n 'PatientName',\n 'PatientPosition',\n 'PatientSex',\n 'PatientWeight',\n 'ProtocolName',\n 'RadiopharmaceuticalInformationSequence',\n 'RescaleType',\n 'SeriesDate',\n 'SeriesTime',\n 'StudyDate',\n 'StudyDescription',\n 'StudyID',\n 'StudyInstanceUID',\n 'StudyTime',\n 'Units']", "def _get_meas_times_from_db(self):\n meas_times = []\n if self._data['report_save_historical_instances_ind'] != 'Y':\n # for non historical reports take measurement time from saved dataset\n dataset = self._jfile.get_current_stored_dataset()\n try:\n meas_time = datetime.datetime.strptime(dataset['meas_time'], '%Y-%m-%d %H:%M:%S')\n except ValueError:\n raise Exception(\"Cannot unformat string %s to datetime\" % dataset['meas_time'])\n meas_times.append(meas_time)\n\n else:\n # for historical reports take measurement times from db datasets\n where_sql = ''\n where_sql_list = list()\n params = [self._id, self._segment_value_id]\n\n if self._process_dataset_ids:\n for dataset_id in self._process_dataset_ids:\n if type(dataset_id) == list:\n where_sql_list.append(\"(report_data_set_instance_id >= %s AND report_data_set_instance_id <= %s)\")\n if dataset_id[0] < dataset_id[1]:\n params.append(dataset_id[0])\n params.append(dataset_id[1])\n else:\n params.append(dataset_id[1])\n params.append(dataset_id[0])\n else:\n where_sql_list.append(\"report_data_set_instance_id = %s\")\n params.append(dataset_id)\n where_sql = ' AND (%s)' % ' OR '.join(where_sql_list)\n\n self._db.Query(\"\"\"SELECT measurement_time\n FROM report_data_set_instance\n WHERE\n `element_id`= %%s\n AND segment_value_id = %%s\n %s\n ORDER BY measurement_time ASC\"\"\" % where_sql, tuple(params))\n meas_times = [item['measurement_time'] for item in self._db.record]\n\n return meas_times", "def getSampleIDsFromStudy(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_sample_ids_from_study', [study_id, results])\n metadata_fields = []\n for row in results:\n metadata_fields.append(row[0])\n return metadata_fields\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def user_created_id_array(self, uid):\n result = self.find({ pair_data.STATISTICS + '.' + pair_data.CREATOR: self._uid2ref(uid) }, \n { pair_data.SEQUENCE: True } ).sort(pair_data.SEQUENCE, pymongo.ASCENDING)\n\n if result is not None:\n return [data[pair_data.SEQUENCE] for data in result]\n else:\n return []", "def task4(self) ->list:\n user_readTimes = {}\n for entry in self.records:\n if(entry['event_type'] == 'pagereadtime'):\n if (entry['visitor_uuid'] in user_readTimes):\n user_readTimes[entry['visitor_uuid']] += entry['event_readtime']\n else:\n user_readTimes[entry['visitor_uuid']] = entry['event_readtime']\n readTimes = list(sorted(user_readTimes.items(), key=operator.itemgetter(1), reverse = True))[0:10]\n for times in readTimes:\n print(times)\n return readTimes", "def getPadFiles(padPath, dateStart, dateStop, sensor, ext):\n if dateStart >= dateStop:\n raise 'why start after stop?'\n start = split(dateStart, '_')\n startS = float(start[-1])\n startY, startM, startD, startH, startN = map(int, start[:-1])\n stop = split(dateStop, '_')\n stopS = float(stop[-1])\n stopY, stopM, stopD, stopH, stopN = map(int, stop[:-1])\n y,m,d = prevDate(startY,startM,startD)\n result = ''\n #while y <= stopY and m <= stopM and d <= stopD: # does not handle begin month borders\n while (y,m,d) <= (stopY,stopM,stopD): \n # grab all sensor matching headers from each day ('ls' results are sorted)\n cmd = 'ls -1 %s/year%s/month%02d/day%02d/*/*%s%s' % (padPath, y, m, d, sensor, ext)#; print cmd\n cmdOutput = getoutput(cmd)\n if cmdOutput[-25:] != 'No such file or directory':\n result += cmdOutput + '\\n'#; print result\n y, m , d = nextDate(y, m , d)\n\n if result == '': return [],[],[] # no files to process\n\n # make sure all filenames are OK\n trimmed = split(result, '\\n')\n allLines = []\n for i in trimmed:\n if i != '':\n allLines.append(i)\n\n## print 'allLines[0] is ' + allLines[0]\n\n # keep files with data after dateStart & before dateStop\n padFiles = []\n for i in allLines:\n fname = split(i,'/')[-1] # toss path\n e = split(fname, '-')\n if len(e) == 1:\n e = split(fname, '+')\n if (e[1] >'%s.%s%s' % (dateStart, sensor, ext)) and (e[0] <= '%s.%s%s' % (dateStop, sensor, ext)):\n padFiles.append(i)\n \n # get number of dat columns\n dataColumns = 4 # default\n if sensor == u'oare' or sensor == u'ossraw':\n dataColumns = 6 # mams has temperature and status columns\n\n # get sample rate of first PAD header file\n if padFiles:\n if ext == '':\n sampleRate = float(parse(padFiles[0]+'.header').documentElement.getElementsByTagName('SampleRate')[0].childNodes[0].nodeValue)\n else:\n sampleRate = float(parse(padFiles[0]).documentElement.getElementsByTagName('SampleRate')[0].childNodes[0].nodeValue)\n return padFiles,sampleRate,dataColumns\n else:\n return [],[],[]" ]
[ "0.60390854", "0.5873085", "0.5861059", "0.5460806", "0.5381554", "0.53764325", "0.5292173", "0.52161545", "0.5200111", "0.51963425", "0.5172361", "0.516292", "0.5146686", "0.5142398", "0.51141894", "0.511357", "0.5112972", "0.510411", "0.5102937", "0.5091951", "0.5091913", "0.5089491", "0.5047538", "0.504414", "0.50173753", "0.50107783", "0.5003173", "0.5002454", "0.49787384", "0.49586287" ]
0.82577133
0
Check if a project/subject/session identifier is valid. Identifiers can only contain alphanumeric characters and underscores.
def _validate_identifier(self, identifier): for c in identifier: if c not in string.letters + string.digits + '_': return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsProjectIDValid(project):\n if len(project) < 6 or len(project) > 30:\n return False\n return bool(re.match('^[a-z][a-z0-9\\\\-]*[a-z0-9]$', project))", "def IsValidIdentifier(string):\n counter = 0\n if string in keyword.kwlist:\n feedback = (False, \"Invalid: can't use a keyword as your identifier!\")\n return feedback\n if not (string[0].isalpha() or string[0] == \"_\"):\n feedback = (False, \"Invalid: first character must be alphabetic or underscore!\")\n return feedback\n for letter in string[1:]:\n counter += 1\n if not (letter.isalnum() or letter == \"_\"):\n screen_out = \"Invalid: character '%s' at index %d!\" % (letter, counter)\n feedback = (False, screen_out)\n return feedback\n return (True, \"Valid!\")", "def _validate(self, s: str):\n if not re.match(r'[a-z][a-z0-9\\-]{5,29}', s):\n raise ValueError(('Invalid Google Cloud Platform Project ID \"{}\": '\n 'must be between 6 and 30 characters and contain '\n 'lowercase letters, digits or hyphens').format(s))", "def _validate(self, s: str):\n if not s.isidentifier():\n raise ValueError(('Invalid Django project name \"{}\": '\n 'must be a valid Python identifier').format(s))", "def _validate(self, s: str):\n if not s.isidentifier():\n raise ValueError(('Invalid Django project name \"{}\": '\n 'must be a valid Python identifier').format(s))", "def validName(varname):\r\n if (len(varname[0])>32):\r\n return False\r\n if not(varname[0][0].isalpha()):\r\n return False \r\n for ch in varname[0][1:]:\r\n if not(ch.isalpha() or ch.isdigit() or ch=='_'):\r\n return False\r\n \r\n return True", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def is_valid_project_id(project_id):\n return re.match(r'^(google.com:)?[a-z0-9\\-]+$', project_id)", "def validate_identifier(self, identifier):\n pass", "def check_identifiers():\n if not PROJECT_SLUG.isidentifier():\n sys.exit(f\"project_slug='{PROJECT_SLUG}' is not a valid Python identifier.\")\n if not PROJECT_DIRNAME.isidentifier():\n sys.exit(\n f\"project_dirname='{PROJECT_DIRNAME}' is not a valid Python identifier.\"\n )", "def valid_serial_key(serial_key):\n parts = serial_key.split('-')\n if len(parts) != 5:\n return False\n\n for part in parts:\n if not re.match('[A-Z0-9]{5}$', part):\n return False\n\n return True", "def validVarConstructName(self,varname):\r\n if (len(varname[0])>32):\r\n return False, ''\r\n if not(varname[0][0].isalpha()):\r\n return False, '' \r\n for ch in varname[0][1:]:\r\n if not(ch.isalpha() or ch.isdigit() or ch=='_'):\r\n return False, ''\r\n \r\n return True, varname", "def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str([0-9]+|L)$', name): return False\n return True if VALID_NAME_RE.match(name) else False", "def verify_username(username):\n name_reg_exp = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\n return username and name_reg_exp.match(username)", "def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str[0-9]+$', name): return False\n return True if VALID_NAME_RE.match(name) else False", "def validname(name):\r\n return len(name)>0 and (\r\n Context.__invalid_character.search(name) is None)", "def valid_identifier_name(name):\n remove_characters_regex = '[^a-zA-Z0-9_]'\n name = re.sub(remove_characters_regex, '', name)\n # Remove beginning characters that are numbers\n name = re.sub('^[0-9]*', '', name)\n return name", "def id_check(employee_id):\r\n# badge_pattern = re.compile('[A-Za-z]{2}-\\d{4}')\r\n# re.search(badge_pattern, employee_id)\r\n\r\n # if statement\r\n if not re.match('[A-Z]{2}-\\d{4}', employee_id):\r\n print(employee_id, 'is not a valid ID.')", "def __validate_username(username) -> bool:\n if 8 < len(username) < 8:\n logging.critical(\"Incorrect username entered, username entered is -->{}\"\n .format(username))\n raise ValueError('Please enter Associate ID of eight characters')\n return True", "def _database_username_validate(s):\n if len(s) < 1 or len(s) > 63:\n raise ValueError('Database user name must be 1 to 63 characters long')\n if s[0] not in string.ascii_letters:\n raise ValueError('Database user name must start with a letter')\n allowed_characters = frozenset(string.ascii_letters + string.digits + '_')\n if frozenset(s).issuperset(allowed_characters):\n raise ValueError('Invalid character in database user name. Only '\n 'numbers, letters, and _ are acceptable.')", "def check_valid_key_name(name):\n if type(name) not in [str]:\n return False\n bad_chars = [\"*\", \".\", \"&&&&\"]\n for k in bad_chars:\n if k in name:\n return False\n return True", "def _validate(cls, pid_value):\n blop = re.compile('^[-\\w]+$')\n if not bool(blop.match(pid_value)):\n raise ValidationError(\n 'The ID should contain only letters with numbers or dashes.',\n field_name='id',\n )", "def _is_valid_keyspace_name(self, keyspace_name):\n if keyspace_name == None or not keyspace_name:\n return False\n return re.match(r\"^[a-z_]*[^-]$\", keyspace_name)", "def validate_crx_id(crx_id):\n try:\n assert isinstance(crx_id, str)\n assert crx_id.isalnum()\n assert len(crx_id) == 32\n except AssertionError:\n raise MalformedExtId", "def _check_special_token_identifier(key):\n if not (key.endswith('_token') and key != '_token'):\n raise ValueError('Each key needs to have the form \"name_token\".'\n ' Received {}'.format(key))", "def check_string( pname, use ):\n for l in pname:\n if l in string.letters: continue\n if l in string.digits : continue\n if l =='_' : continue\n print( \"your \"+use+\" (\" + pname + \") contains invalid characters, please choose another one!\" )\n return False\n return True", "def name_valid(name):\n return name.isalpha()", "def check_project_id(project_id):\n # Convert variable into a string\n project_id = str(project_id)\n # Replace Capital letters and spaces\n project_id = project_id.replace(\" \", \"-\").lower()\n\n # Throw an error if any known incorrect usages found\n try:\n if re.search(\"^-|[^a-z0-9-]|google|ssl|-$\", project_id):\n raise ValueError(\"Invalid characters or words in Project ID\")\n elif len(project_id) > 30:\n raise ValueError(\"Too many characters in Project ID\")\n elif len(project_id) < 6:\n raise ValueError(\"More Characters required in Project ID\")\n else:\n log.info(f\"Project Id {project_id} passed regex check\")\n project_outcome = {\n \"outcome\": True,\n \"project_id\": project_id\n }\n return project_outcome\n except ValueError as e:\n log.warning(f\"Proposed Id {project_id} violates known google policies: \"\n \"https://cloud.google.com/resource-manager/docs/creating-managing-projects\")\n project_outcome = {\n \"outcome\": False,\n \"project_id\": project_id\n }\n return project_outcome", "def check_token(token):\n valid = re.compile(r\"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-\"\n r\"[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$\")\n\n return valid.match(token)", "def validate(prefix: str, identifier: str) -> Optional[bool]:\n pattern = get_pattern_re(prefix)\n if pattern is None:\n return None\n return bool(pattern.match(normalize_identifier(prefix, identifier)))" ]
[ "0.7817043", "0.7263203", "0.71657807", "0.6915533", "0.6915533", "0.6901704", "0.68005633", "0.6792642", "0.67050254", "0.6685971", "0.6454717", "0.64318484", "0.6427234", "0.64037365", "0.6375381", "0.6367689", "0.6350279", "0.6343211", "0.6312446", "0.63092995", "0.62879276", "0.6261444", "0.6255026", "0.6238283", "0.6217699", "0.62079406", "0.62072784", "0.61789465", "0.61751366", "0.61443573" ]
0.7744844
1
Normalizes USD price with thousand separator into float value
def normalize_price(price: str) -> float: return float(price.strip().replace(',', ''))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_price(str_val):\n return float(str_val.replace('.', '').replace(',', '.'))", "def clean_dollar_to_float(value):\n return (value.replace('$', '').replace(',', ''))", "def convert_currency(val):\n new_val = val.replace(',','').replace('$', '')\n return float(new_val)", "def clean_value(self, value):\n return float(value.replace('.', '').replace(',', '.'))", "def to_usd(price):\n return \"${0:,.2f}\".format(price)", "def to_usd(my_price):\n return \"${0:,.2f}\".format(my_price)", "def to_usd(my_price):\n return f\"${my_price:,.2f}\"", "def normalisePrice(raw):\n if raw:\n return Decimal(str(raw)).to_eng_string()\n else:\n return None", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def to_usd(my_price):\n return f\"${my_price:,.2f}\" #> $12,000.71", "def normalize_loan_amount(value):\n normalized_value = value.lower()\n if 'k' in normalized_value:\n normalized_value = normalized_value.replace('k', '000')\n normalized_value = normalized_value.replace('.', '')\n\n normalized_value = normalized_value.replace('$', '')\n normalized_value = normalized_value.replace(',', '')\n\n try: \n return Decimal(normalized_value)\n except: InvalidOperation\n \n return None", "def check_price(URL, headers):\n page = requests.get(URL, headers=headers)\n soup = BeautifulSoup(page.content, 'html.parser')\n price = soup.find(id=\"priceblock_ourprice\").get_text()\n converted_price = price[:-3]# -3 removes the .99 pence value from product\n float_price = ''\n for c in converted_price:\n if c.isdigit():\n float_price = float_price + c\n #loop that removes the £$,. from product so the string can convert to float correctly\n return float(float_price)", "def eur(value):\n float(value)\n return f\"€{value:,.2f}\"", "def to_us_number(number):\n number = number.replace('.', '')\n number = number.replace(',', '.')\n\n return float(number)", "def get_num_str(price):\n try:\n price = float(price.replace(u',', u'.'))\n except:\n price = float('nan')\n return price", "def format_usd(my_price):\n return f\"${my_price:,.2f}\"", "def clean_currency(x):\n \n if isinstance(x, str):\n x=x.replace(\"*\",\"\")\n x=x.replace(\",\",\"\")\n if x=='':\n return(0)\n elif x[0]!='$':\n return(0)\n else:\n x=x.split(' ')[0]\n x=x.replace('$',\"\")\n return float(x)\n return(x)", "def clean_currency(x: str):\n # cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n # x = str(x)\n if isinstance(x, str):\n if x.startswith(\"$\"):\n return x.replace('$', '').replace(',', '')\n # return float(x)\n return x\n except Exception as ex:\n cprint(traceback.format_exc(), 'red')\n log_exception(traceback.format_exc())", "def _fix_balance(self, balance):\n\n return float(balance.replace(',', '.').replace(' ', ''))", "def usd(value):\r\n return f\"${Decimal(value):,.2f}\"", "def format_as_usd(value):\n return f\"${value:,.2f}\"", "def convert_dollars(df,col_name):\n df[col_name] = df[col_name].replace('[^.0-9]','',regex=True).astype(float)\n return df", "def thousands(value):\n try:\n value = float(value)\n except ValueError:\n return value\n return f\"{value:,}\".replace(',',' ')", "def int2float(value_int, currency):\r\n if currency in \"BTC LTC NMC\":\r\n return value_int / 100000000.0\r\n elif currency in \"JPY SEK\":\r\n return value_int / 1000.0\r\n else:\r\n return value_int / 100000.0", "def test_normal_decimal_input(self):\r\n ws_leader = \"S. O'Neal (14.9)\"\r\n res = treat_input(ws_leader, type=\"float\")\r\n assert res == 14.9", "def obtain_amount(cls, amount_string):\n return float(string.replace(amount_string, ',', '.'))", "def convert_prices(price):\n return int(price.replace(\"$\", \"\").replace(\",\", \"\"))" ]
[ "0.72919416", "0.7071461", "0.6978202", "0.6896954", "0.68778527", "0.6861502", "0.668687", "0.66791004", "0.6679073", "0.6679073", "0.6679073", "0.6679073", "0.6679073", "0.6636291", "0.6459509", "0.64089173", "0.64083546", "0.63997513", "0.6395582", "0.6393109", "0.6388328", "0.63158053", "0.6277507", "0.6270148", "0.6220546", "0.61902374", "0.6186327", "0.6169518", "0.6137453", "0.60829353" ]
0.80026025
0
Reads the csv file CSV file should contain ['Question', 'Answer'] columns Remove NaN values Throw error if format is bad or file does not exist
def parse_csv_file(self, csv_file: str): try: df = pd.read_csv(csv_file) if not set(['Question', 'Answer']).issubset(df.columns): raise BadCSVFile( "CSV file does not contain ['Question', 'Answer'] columns.") df.dropna(inplace=True) except Exception as e: raise BadCSVFile( "Error while reading the csv file. Please check the path of the file or the file might be curropted.") return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def read_csv_file(self):\n pass", "def test_csv(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(answer_file_path), 'r') as answer_file:\n csv_file = open(attach_path(input_file_path))\n assert str(read_csv(csv_file)) == answer_file.read().strip()", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data", "def place_types_read_csv(self, csv_input):\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''])\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)", "def loadCSV(input_file):", "def open_convert_and_clean_csv(csv_data_file):\n imported_data = tablib.Dataset().load(open(csv_data_file).read())\n dataset = []\n for row in imported_data:\n if float(row[1]) > 0 and float(row[2]) > 0:\n dataset.append((row[0], float(row[1]), float(row[2])))\n return dataset", "def read_file(self, fn_input):\n if not os.path.exists(fn_input):\n raise Exception(\"ERROR: Input file does not exist: %s\" % fn_input)\n with open(fn_input, 'rt', newline='') as infile:\n reader = csv.reader(infile)\n self.data = []\n for row in reader:\n self.data.append([])\n for value in row:\n if value == 'nan':\n self.data[-1].append(None)\n else:\n try:\n self.data[-1].append(float(value))\n except:\n raise Exception(\"ERROR: unexpected text in input file: '%s'\" % str(value))", "def parse(file_name):\n \n return pd.read_csv(file_name, na_values = '---')", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def read_specific_problem(filename):\r\n table = []\r\n with open(filename, newline='') as csvfile:\r\n reader = csv.reader(csvfile, skipinitialspace=True, delimiter=',')\r\n for row in reader:\r\n table.append(row)\r\n return table", "def _read_csv(filename):\n loaded = pd.read_csv(filename, index_col=0)\n if len(loaded.columns) == 1:\n return pd.read_csv(filename, index_col=0, header=None)[1]\n else:\n return loaded", "def load_and_clean(self,in_path):\n in_path = Path(in_path)\n try:\n df = pd.read_csv(in_path, index_col = 0, parse_dates = True, infer_datetime_format = True)\n except:\n print(\"Could not read csv file. Please check the path\")\n finally:\n #attempt to clean df\n df.dropna(inplace = True)\n df.drop_duplicates(inplace = True)\n df.sort_index()\n return df", "def read_rf_csv():\n if os.path.exists(\"rf.csv\"):\n #print (\"--decision trees CSV imported\\n\")\n results = pd.read_csv(\"rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results", "def read_data_from_csv(filename: str) -> pd.DataFrame:\n try:\n data = pd.read_csv(filename)\n return data\n except(FileNotFoundError):\n print('Error: Could not read the data from csv.')\n return None", "def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)", "def read_test_rf_csv():\n if os.path.exists(\"test_rf.csv\"):\n #print (\"--testing CSV imported\\n\")\n results = pd.read_csv(\"test_rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results", "def _read_csv(input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f)\n lines = []\n for line in reader:\n lines.append(line)\n return lines[1:] # remove header", "def _read_input_csv(in_file):\n with open(in_file, \"rU\") as in_handle:\n reader = csv.reader(in_handle)\n reader.next() # header\n for line in reader:\n if line: # empty lines\n (fc_id, lane, sample_id, genome, barcode) = line[:5]\n yield fc_id, lane, sample_id, genome, barcode", "def test_misc_csv_read():\n r = csv_reader(\"../test/test.csv\")\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n\n assert(data == \"\"\"\n['EVT_CODE*', 'EVT_DATE.DE', 'CODE', 'AGE', 'FRST', 'LST', 'SPEC', 'de.id']\n['tea', '2018/01/01', 'X', '35', 'PRE', 'WHO', 'BUG', '1']\n['coffee', '2018/05/05', 'X', '35', 'JAN,Z', 'WHO', 'FRG', '1']\n['water', '2018/01/01', 'Y', '35', 'TAN', 'POST', 'CAT', '2']\n \"\"\".strip())", "def read_csv_from_file(self, file_name):\r\n try:\r\n self.csv_df = pd.read_csv(file_name)\r\n except FileNotFoundError:\r\n print('File does not exists!')", "def parse_csv(csv_file):\n if os.path.isfile(csv_file) == True:\n num_lines = sum(1 for line in open(csv_file))\n if num_lines > 1:\n try:\n data = pd.read_csv(csv_file, index_col=False)\n data.insert(0, 'id', range(1, 1 + len(data)))\n return(data)\n except pd.parser.CParserError, err:\n message = \"Can't parse REDCap data. Check CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(3)\n else:\n message = \"CSV file does not contain data: \" + csv_file\n print(message)\n logging.warning(message)\n return(None)\n else:\n message = \"Can't read CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(4)", "def check_valid_csvformat(self, csv_path):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.reader(file_obj, delimiter=',') # CSV DictReader object\n self.check_valid_csv_header(reader.next())\n self.check_valid_csv_data(reader.next())", "def read_data_from_csv(csv_file, header=None, **kwargs):\n if os.path.isabs(csv_file) == False:\n path_to_csv = os.path.join(csv_file)\n else:\n path_to_csv = csv_file\n row_list = []\n if \"field_sep\" not in kwargs.keys():\n field_sep = ','\n else:\n field_sep = kwargs.get(\"field_sep\")\n with open(path_to_csv, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=field_sep, fieldnames=header)\n for record in csv_reader:\n if list(record.values())[0].startswith(\"#\") is not True:\n # IT'S A COMMENT IF IT STARTS WITH \"#\" \n # IF THIS IS YOUR HEADER ROW, SUPPLY A LIST OF COLUMN NAMES WHEN CALLING THE FUNCTION\n row_list.append(record)\n return row_list", "def read_data_for_question_2():\n df_T_fare_info = read_csv_file_data(\"../Data/T_Fare_info_Q2.csv\")\n df_T_fare_info.set_index('FlightId', inplace=True)\n df_T_flight_info = read_csv_file_data(\"../Data/T_Flight_info_Q2.csv\")\n df_T_flight_info.set_index('FlightId', inplace=True)\n df_T_fare_info.dropna(inplace=True)\n df_T_flight_info.dropna(inplace=True)\n return df_T_fare_info, df_T_flight_info", "def read_csv(file_path, has_header = True):\n with open(file_path) as f:\n if has_header: f.readline()\n data = []\n target =[]\n for line in f:\n line = line.strip().split(\",\")\n data.append([float(x) for x in line[:-1]])\n target.append([line[-1]])\n return data, target", "def ReadData( fileName ):\n \n # define column names\n colNames = ['agency_cd', 'site_no', 'Date', 'Discharge', 'Quality']\n\n # open and read the file\n DataDF = pd.read_csv(fileName, header=1, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[2], comment='#',\n na_values=['Eqp'])\n DataDF = DataDF.set_index('Date')\n \n # quantify the number of missing values\n MissingValues = DataDF[\"Discharge\"].isna().sum()\n \n ## Remove invalid streamflow data\n DataDF.Discharge[(DataDF['Discharge']<0)]=np.nan\n \n return( DataDF, MissingValues )" ]
[ "0.7320947", "0.6934107", "0.6727932", "0.6726128", "0.6720706", "0.6720139", "0.67085135", "0.66700244", "0.65878505", "0.6561937", "0.64199805", "0.64198834", "0.6413396", "0.6406278", "0.64025426", "0.63658756", "0.63533485", "0.63505536", "0.63503546", "0.63334334", "0.63267404", "0.631348", "0.6250852", "0.6246433", "0.6237337", "0.62360805", "0.62081647", "0.6188999", "0.61843365", "0.61813664" ]
0.77133363
0
Returns a vector for a given query
def get_vector(self, query: list): if len(query) == 0: raise BadQueryParameter("Query (list) can not be empty.") return self.vectorizer.transform(query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _to_full_vector(self, query_vector: List[Tuple[str, float]]) -> np.array:\n terms = list(self.index.get_terms())\n terms.sort()\n vector = np.zeros(len(terms))\n\n for (term, weight) in query_vector:\n index = terms.index(term)\n vector[index] = weight\n\n return vector", "def transform_query(vectorizer_model, query):\n x_request = vectorizer_model.transform(query)\n x0 = x_request.toarray()\n return x0", "def _get_query_representation(self, query, index):\n term_frequencies = {term: query.count(term) for term in query}\n vec = np.zeros(shape=(index.num_terms,), dtype=np.float64)\n for i, term in enumerate(sorted(index.get_index_terms())):\n vec[i] = self._tfidf(\n term_frequencies.get(term, 0),\n index.get_document_frequency(term),\n index.get_document_count()\n )\n return vec", "def calcQueryVector(self):\n query = input(\"Query: \");\n ana = StemmingAnalyzer() ### lowercases, stems, ignores stopwords\n tokens = [token.text for token in ana(query)]\n\n queryVector = {}\n for token in tokens:\n if token in self.invertedIndex.keys():\n if token in queryVector.keys():\n queryVector[token]+=1;\n else:\n queryVector[token] = 1;\n\n return self.normalizeQueryVector(queryVector);", "def AsVector(self) -> ngsolve.la.BaseVector:", "def result_to_vector(results):\n return [vectorized_result(x) for x in results]", "def generate_query_vector(q, q_dict, inv_index):\n # Create the query vector\n query_vector = dict(Counter(q_dict[q]))\n\n # Add to this query vector, all the indexed terms\n for i_term in inv_index:\n if i_term not in query_vector:\n query_vector[i_term] = 0\n\n return query_vector", "def buildQueryVector(self, termList):\n\t\tquery = self.createVector(\" \".join(termList))\n\t\treturn query", "def vector(self):\n return self.q[1:4]", "def query_to_word_vector(query_string, corpus):\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n word_vec = np.zeros(len(inv_index))\n query_word_list = vsm_retrieval.convert_query(query_string)\n for count_vec, word in enumerate(inv_index):\n if word in query_word_list:\n word_vec[count_vec] = 1\n return word_vec", "def get_vector(self, word):\n string = \"SELECT * FROM Vectors WHERE name=?\"\n params = (word,)\n self.cur.execute(string, params)\n raw_vector = self.cur.fetchone()\n if raw_vector is None:\n raise KeyError(\"Vector not found\")\n else:\n vector = pickle.loads(raw_vector[1])\n return vector", "def find(self, query: str) -> TsInfoVector:\n info = parse_ts_query(ts_query=query)\n domain = self.create_netatmo_connection()[1]\n meas = domain.get_measurement(**info)\n\n # noinspection PyArgumentList\n tsi = TsInfo(\n name=meas.ts_id,\n point_fx=meas.data_type.point_interpretation,\n delta_t=np.nan,\n olson_tz_id=meas.station.place['timezone'],\n data_period=UtcPeriod(meas.module.last_setup, meas.module.last_seen),\n created=meas.module.last_setup,\n modified=meas.module.last_seen\n )\n\n # noinspection PyArgumentList\n tsiv = TsInfoVector()\n tsiv.append(tsi)\n return tsiv", "def __call__(self):\n return self._vector", "def AsVector(self) -> BaseVector:", "def vector_q(q_1: Q) -> Q:\n\n end_q_type = f\"vector_q({q_1.q_type})\"\n\n v = Q(\n [0, q_1.x, q_1.y, q_1.z],\n q_type=end_q_type,\n representation=q_1.representation,\n )\n return v", "def create_query_vector(ix, term_dict, bow):\n\n\tqfv = list()\n\tfor idx, tf in bow:\n\t\t# get term from dict index\n\t\tterm = ix[idx]\n\t\t# filter out terms not contained in self.term_dict\n\t\tif term not in term_dict:\n\t\t\tcontinue\n\t\t# append term w/ tf to tfv\n\t\tqfv.append((term, tf))\n\treturn scale_to_L1_norm(defaultdict(float, qfv))", "def call_single_vec(self, input_value):\n _, eigVectors = self.getEigen(input_value)\n return eigVectors[:,:,-1]", "def query(_from, _select, _geomselect=None, _where=None, _groupby=None, _limit=None):\n # INSTEAD MAKE INTO CLASS\n # WITH .fields attr\n # AND .__iter__()\n # AND .get_vectordata()\n # AND MAKE EACH YIELDED ROW A VECTOR FEATURE CLASS\n # THIS WAY ALLOWING CHAINED QUERIES\n\n # parse args\n iterables = _from\n columnfuncs = _select\n geomfunc = _geomselect\n condition = _where\n key = _groupby\n n = _limit\n \n # first yield header as list of column names\n colnames = [each[0] for each in columnfuncs]\n yield colnames\n\n # make an iterable that yields every combinaion of all input iterables' items\n if len(iterables) == 1:\n iterable = iterables[0]\n else:\n iterable = itertools.product(*iterables)\n\n # iterate and add\n if key:\n groups = groupby(iterable, key)\n\n # limit\n if n:\n groups = limit(groups, n)\n \n for items in groups:\n # filter\n if condition:\n items = where(items, condition)\n \n # aggregate\n # NOTE: columnfuncs and geomfunc must expect an iterable as input and return a single row,geom pair\n item = aggreg(items, columnfuncs, geomfunc)\n yield item\n \n else:\n # filter\n if condition:\n iterable = where(iterable, condition)\n\n # limit\n if n:\n iterable = limit(iterable, n)\n\n # select\n for item in select(iterable, columnfuncs, geomfunc):\n yield item", "def generate_vector(self,dim=0,v=None):\n vec = dl.Vector()\n self.init_vector(vec,dim)\n if v is not None:\n vec[:]=v\n return vec", "def query_and_bundle(session, fields, offset, limit, filter_):\n q = session.query(*fields) # raw query\n q = q.offset(offset) if filter_ is None else q.filter(filter_) # filter / offset\n ids, vectors = zip(*q.limit(limit)) # unravel results\n # bundle into arrays\n _ids = np.array(ids, dtype=STR_TYPE)\n _str_vectors = [json.loads(vector) for vector in vectors]\n _vectors = np.array(_str_vectors, dtype=FLOAT_TYPE)\n return _ids, _vectors", "def get_vector(self,term):\n return self.dict.get(term)", "def take_vec(self):\n vec = aux.vec(self.numbers)\n\n return vec", "def evaluate_as_vector(self, chain_state): \n def vector_representation(n, ordering, it):\n return self.mapping.subspace(zip(ordering,it))\n return self._evaluate(vector_representation, chain_state)", "def to_vec(self):\n column_count = self.column_count\n vector = Matrix(size=(self.row_count * column_count, 1))\n for key, value in self.data.items():\n row, column = key[:2]\n subscript = (column * column_count + row, 0)\n vector.set(subscript, value)\n return vector", "def vector_from_matrix(v_as_matrix):\r\n return [row[0] for row in v_as_matrix]", "def as_vector(self):\n return self.pdm.as_vector()", "def vec(self):\n return np.matrix(self.val.ravel()).transpose()", "def query(self, query):", "def _get_query_vec(self, inputs):\n if self._flags.model_type == \"bilstm_net\":\n network = self.bilstm_net\n elif self._flags.model_type == \"bow_net\":\n network = self.bow_net\n elif self._flags.model_type == \"cnn_net\":\n network = self.cnn_net\n elif self._flags.model_type == \"lstm_net\":\n network = self.lstm_net\n elif self._flags.model_type == \"gru_net\":\n network = self.gru_net\n else:\n raise ValueError(\"Unknown network type!\")\n\n prefix_letter_pool = network(inputs[\"prefix_letter_id\"],\n \"wordid_embedding\",\n self._flags.vocab_size,\n self._flags.emb_dim,\n hid_dim=self.hid_dim,\n fc_dim=0,\n emb_lr=self._flags.emb_lr)\n if isinstance(prefix_letter_pool, list):\n #max-pooling\n prefix_pool = _parse_raw_att(prefix_letter_pool, self, 'prefix')\n else:\n prefix_pool = [prefix_letter_pool]\n\n if self._flags.prefix_word_id:\n prefix_word_pool = network(inputs[\"prefix_word_id\"],\n \"wordid_embedding\",\n self._flags.vocab_size,\n self._flags.emb_dim,\n hid_dim=self.hid_dim,\n fc_dim=0,\n emb_lr=self._flags.emb_lr)\n if isinstance(prefix_word_pool, list):\n #max-pooling\n prefix_word_raw, prefix_word_att = _parse_raw_att(prefix_word_pool, self, 'prefix')\n prefix_pool[0] = fluid.layers.concat([prefix_pool[0], prefix_word_raw], axis=1)\n prefix_pool[1] = fluid.layers.concat([prefix_pool[1], prefix_word_att], axis=1)\n else:\n prefix_pool[0] = fluid.layers.concat([prefix_pool[0], prefix_word_pool], axis=1)\n\n prefix_vec = mlp_pool(prefix_pool, self._flags.prefix_mlp.split(','), self.hid_dim)\n #vector layer\n #fluid.layers.Print(inputs[\"prefix_letter_id\"])\n #fluid.layers.Print(inputs[\"prefix_word_id\"])\n #fluid.layers.Print(prefix_vec)\n loc_vec = None\n if self._flags.use_geohash:\n loc_vec = fluid.layers.reshape(fluid.layers.cast(x=inputs['prefix_loc_geoid'],\n dtype=\"float32\"), [-1, 40])\n loc_vec = fluid.layers.fc(input=loc_vec, size=self.hid_dim, act=\"leaky_relu\", \n param_attr=fluid.ParamAttr(name='loc_fc_weight'),\n bias_attr=fluid.ParamAttr(name='loc_fc_bias'))\n\n # day_vec = fluid.layers.reshape(fluid.layers.cast(x=inputs['day_id'],\n # dtype=\"float32\"), [-1, 14])\n # day_vec = fluid.layers.fc(input=loc_vec, size=self.hid_dim, act=\"leaky_relu\", \n # param_attr=fluid.ParamAttr(name='day_weight'),\n # bias_attr=fluid.ParamAttr(name='day_bias'))\n \n context_pool = fluid.layers.concat([prefix_vec, loc_vec], axis=1) if loc_vec is not None else prefix_vec\n context_vec = fluid.layers.fc(input=context_pool, size=self._flags.fc_dim, act=self._flags.activate,\n param_attr=fluid.ParamAttr(name='context_fc_weight'),\n bias_attr=fluid.ParamAttr(name='context_fc_bias'))\n return context_vec, context_pool", "def query_vectorize(self, query_words: List) -> np.ndarray:\n weights = np.zeros(shape=self.num_terms)\n terms = set(query_words)\n\n for term in terms:\n if term not in self.vector_mapping:\n continue\n else:\n index = self.vector_mapping[term]\n weights[index] = self.tf_idf_weight_query(term, query_words)\n\n return weights" ]
[ "0.710231", "0.6852143", "0.6806123", "0.6790017", "0.65516216", "0.6484472", "0.6346682", "0.62224436", "0.62009585", "0.61882883", "0.6144658", "0.6134512", "0.61188674", "0.6103813", "0.6086374", "0.6070219", "0.60688305", "0.5987167", "0.59024227", "0.5888981", "0.5858321", "0.5857002", "0.58434856", "0.5775602", "0.57752436", "0.57535666", "0.57353854", "0.57132566", "0.57094085", "0.5704476" ]
0.7983144
0
Groups data in SimulationReport's by the value of alpha or gamma2
def group_data(simulation_reports: List[SimulationReport]) -> Dict[float, SimulationTable]: heat_maps: OrderedDict[float, SimulationTable] = OrderedDict() for report in simulation_reports: if report.param not in heat_maps: param_name = "alpha" if report.growth_type == GrowthType.Polynomial else "gamma2" simulation_table = heat_maps.setdefault( report.param, SimulationTable(report.growth_type, param_name, report.param, OrderedDict()), ) else: simulation_table = heat_maps[report.param] errors_by_prefix = simulation_table.errors.setdefault(report.prefix_length, []) errors_by_prefix.append((report.b0, report.error)) return heat_maps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_data_gamma_(idx2_daid, wx2_rvecs, wx2_aids, wx2_idf,\n alpha=3, thresh=0):\n if utool.DEBUG2:\n from ibeis.model.hots.smk import smk_debug\n smk_debug.rrr()\n smk_debug.check_wx2(wx2_rvecs=wx2_rvecs, wx2_aids=wx2_aids)\n wx_sublist = pdh.ensure_values(pdh.ensure_index(wx2_rvecs))\n if utool.VERBOSE:\n print('[smk_index] Compute Gamma alpha=%r, thresh=%r: ' % (alpha, thresh))\n mark1, end1_ = utool.log_progress(\n '[smk_index] Gamma group (by word): ', len(wx_sublist),\n flushfreq=100, writefreq=50, with_totaltime=True)\n # Get list of aids and rvecs w.r.t. words\n aids_list = pdh.ensure_values_subset(wx2_aids, wx_sublist)\n rvecs_list1 = pdh.ensure_values_subset(wx2_rvecs, wx_sublist)\n # Group by daids first and then by word index\n daid2_wx2_drvecs = utool.ddict(lambda: utool.ddict(list))\n for wx, aids, rvecs in zip(wx_sublist, aids_list, rvecs_list1):\n group_aids, groupxs = clustertool.group_indicies(aids)\n rvecs_group = clustertool.apply_grouping(rvecs, groupxs) # 2.9 ms\n for aid, rvecs_ in zip(group_aids, rvecs_group):\n daid2_wx2_drvecs[aid][wx] = rvecs_\n\n if utool.VERBOSE:\n end1_()\n\n # For every daid, compute its gamma using pregrouped rvecs\n # Summation over words for each aid\n if utool.VERBOSE:\n mark2, end2_ = utool.log_progress(\n '[smk_index] Gamma Sum (over daid): ', len(daid2_wx2_drvecs),\n flushfreq=100, writefreq=25, with_totaltime=True)\n # Get lists w.r.t daids\n aid_list = list(daid2_wx2_drvecs.keys())\n # list of mappings from words to rvecs foreach daid\n # [wx2_aidrvecs_1, ..., wx2_aidrvecs_nDaids,]\n _wx2_aidrvecs_list = list(daid2_wx2_drvecs.values())\n _aidwxs_iter = (list(wx2_aidrvecs.keys()) for wx2_aidrvecs in _wx2_aidrvecs_list)\n aidrvecs_list = [list(wx2_aidrvecs.values()) for wx2_aidrvecs in _wx2_aidrvecs_list]\n aididf_list = [[wx2_idf[wx] for wx in aidwxs] for aidwxs in _aidwxs_iter]\n\n #gamma_list = []\n if utool.DEBUG2:\n try:\n for count, (idf_list, rvecs_list) in enumerate(zip(aididf_list, aidrvecs_list)):\n assert len(idf_list) == len(rvecs_list), 'one list for each word'\n #gamma = smk_core.gamma_summation2(rvecs_list, idf_list, alpha, thresh)\n except Exception as ex:\n utool.printex(ex)\n utool.embed()\n raise\n gamma_list = [smk_core.gamma_summation2(rvecs_list, idf_list, alpha, thresh)\n for idf_list, rvecs_list in zip(aididf_list, aidrvecs_list)]\n\n if WITH_PANDAS:\n daid2_gamma = pdh.IntSeries(gamma_list, index=aid_list, name='gamma')\n else:\n daid2_gamma = dict(zip(aid_list, gamma_list))\n if utool.VERBOSE:\n end2_()\n\n return daid2_gamma", "def metrics_group():", "def test_add_alpha_diversity_values_to_mapping_file(self):\r\n\r\n # regular case no special cases for avg method\r\n expected_mapping_file_data = MAPPING_FILE_DATA_WITH_ALPHA_A\r\n expected_mapping_file_headers = ['SampleID', 'BarcodeSequence',\r\n 'LinkerPrimerSequence', 'Treatment', 'DOB', 'Description',\r\n 'chao1_alpha', 'chao1_normalized_alpha', 'chao1_alpha_label',\r\n 'PD_whole_tree_alpha', 'PD_whole_tree_normalized_alpha',\r\n 'PD_whole_tree_alpha_label']\r\n\r\n out_mapping_file_data, out_mapping_file_headers =\\\r\n add_alpha_diversity_values_to_mapping_file(self.metrics,\r\n self.sample_ids, self.alpha_diversity_data,\r\n self.mapping_file_headers, self.mapping_file_data, 4, 'equal')\r\n\r\n self.assertEquals(out_mapping_file_data, expected_mapping_file_data)\r\n self.assertEquals(\r\n out_mapping_file_headers,\r\n expected_mapping_file_headers)\r\n\r\n # regular case no special cases for quantile method\r\n expected_mapping_file_data = MAPPING_FILE_DATA_WITH_ALPHA_B\r\n out_mapping_file_data, out_mapping_file_headers =\\\r\n add_alpha_diversity_values_to_mapping_file(self.metrics,\r\n self.sample_ids, self.alpha_diversity_data,\r\n self.mapping_file_headers, self.mapping_file_data, 4, 'quantile')\r\n\r\n self.assertEquals(out_mapping_file_data, expected_mapping_file_data)\r\n self.assertEquals(\r\n out_mapping_file_headers,\r\n expected_mapping_file_headers)", "def gamma_h_subgroups(self):\n from .all import GammaH\n N = self.level()\n R = IntegerModRing(N)\n return [GammaH(N, H) for H in R.multiplicative_subgroups()]", "def data_grouping(self):\n group_container, film_container, plank_container = [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)]\n\n for i in self.data_labels:\n group = int(i[:-1])\n group_container[group - 1].append(i)\n film_container[group - 1].append(self.film_count[self.data_labels.index(i)])\n plank_container[group - 1].append(self.plank_count[self.data_labels.index(i)])\n\n return group_container, film_container, plank_container", "def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)", "def data_group():\n ...", "def test_group_by_hardware_info(self):\n self._test_group_by('Hardware Info', [1, 1, 2, 1, 1])", "def data_for_grouping() -> NoReturn:\n raise NotImplementedError", "def data_for_grouping() -> NoReturn:\n raise NotImplementedError", "def group_data_by_gs(data_table):\n gene_data = collections.defaultdict(lambda: collections.defaultdict(list))\n for _idx, row in data_table.iterrows():\n samp = row['sample']\n gene = row['gene']\n gene_data[gene][samp].append({\n 'muttype': row['type'].strip(),\n 'normalized': row['Normalized'], # NMAF in the manuscript\n 'consequence': row['MissenseConsequence'].strip(),\n })\n return gene_data", "def divisor_subgroups(self):\n return [Gamma0_constructor(M) for M in self.level().divisors()]", "def get_filter_stats(data: AnnData) -> Tuple[pd.DataFrame, pd.DataFrame]:\n\n # cell stats\n gb1 = data.obs.groupby(\"Channel\")\n df_before = gb1.median()\n df_before = df_before.assign(total=gb1.size())\n df_before.rename(\n columns={\n \"n_genes\": \"median_n_genes_before\",\n \"n_counts\": \"median_n_umis_before\",\n \"percent_mito\": \"median_percent_mito_before\",\n },\n inplace=True,\n )\n\n data = data[data.obs[\"passed_qc\"]] # focusing only on filtered cells\n\n gb2 = data.obs.groupby(\"Channel\")\n df_after = gb2.median()\n df_after = df_after.assign(kept=gb2.size())\n df_after.rename(\n columns={\n \"n_genes\": \"median_n_genes\",\n \"n_counts\": \"median_n_umis\",\n \"percent_mito\": \"median_percent_mito\",\n },\n inplace=True,\n )\n df_cells = pd.concat((df_before, df_after), axis=1, sort=False)\n df_cells.fillna(0, inplace=True)\n df_cells[\"kept\"] = df_cells[\"kept\"].astype(int)\n df_cells[\"filt\"] = df_cells[\"total\"] - df_cells[\"kept\"]\n df_cells = df_cells[\n [\n \"kept\",\n \"median_n_genes\",\n \"median_n_umis\",\n \"median_percent_mito\",\n \"filt\",\n \"total\",\n \"median_n_genes_before\",\n \"median_n_umis_before\",\n \"median_percent_mito_before\",\n ]\n ]\n df_cells.sort_values(\"kept\", inplace=True)\n\n # gene stats\n idx = data.var[\"robust\"] == False\n df_genes = pd.DataFrame(\n {\n \"n_cells\": data.var.loc[idx, \"n_cells\"],\n \"percent_cells\": data.var.loc[idx, \"percent_cells\"],\n }\n )\n df_genes.index.name = \"gene\"\n df_genes.sort_values(\"n_cells\", ascending=False, inplace=True)\n\n return df_cells, df_genes", "def group_mae(outputs: torch.Tensor, targets: torch.Tensor) -> List[Tuple[int, int, int, float, str]]:\n # groups = [\n # (-1, 1800, \"0-0.5h\"),\n # (1800, 3600, \"0.5-1h\"),\n # (3600, 7200, \"1-2h\"),\n # (7200, 10800, \"2-3h\"),\n # (10800, 14400, \"3-4h\"),\n # (14400, 18000, \"4-5h\"),\n # (18000, 21600, \"5-6h\"),\n # (21600, 25200, \"6-7h\"),\n # (25200, 28800, \"7-8h\"),\n # (28800, 32400, \"8-9h\"),\n # (32400, 36000, \"9-10h\"),\n # (36000, 39600, \"10-11h\"),\n # (39600, 43200, \"11-12\"),\n # (43200, 86400, \"12h - 1 day\"),\n # (86400, 172800, \"1 day - 2 days\"),\n # (172800, 259200, \"2 days - 3 days\"),\n # (259200, 345600, \"3 days - 4 days\"),\n # (345600, 432000, \"4 days - 5 days\"),\n # (432000, 518400, \"5 days - 6 days\"),\n # (518400, 604800, \"6 days - 1 week\"),\n # (604800, 155520000, \"1 week - 1 month\"),\n # (155520000, int(data_ranges[\"label\"][\"max\"]), \"> 1 month\")\n # ]\n groups = [\n (-1, 1800, \"0-0.5h\"),\n (1800, 3600, \"0.5-1h\"),\n (3600, 7200, \"1-2h\"),\n (7200, 10800, \"2-3h\"),\n (10800, 14400, \"3-4h\"),\n (14400, 21600, \"4-6h\"),\n (21600, 28800, \"6-8h\"),\n (28800, 36000, \"8-10h\"),\n (36000, 43200, \"10-12h\"),\n (43200, 50400, \"12-16h\"),\n (50400, 64800, \"16-20h\"),\n (64800, 86400, \"20-24h\"),\n (86400, 172800, \"1-2d\"),\n (172800, 259200, \"2-3d\"),\n (259200, 345600, \"3-4d\"),\n (345600, 432000, \"4-5d\"),\n (432000, 518400, \"5-6d\"),\n (518400, 604800, \"6-7d\"),\n (604800, 1209600, \"1-2w\"),\n (1209600, 2419200, \"2-4w\"),\n (2419200, int(data_ranges[\"label\"][\"max\"]), \"> 4w\")\n ]\n\n def scale(seconds: int) -> float:\n # half_range = (data_ranges[\"label\"][\"max\"] - data_ranges[\"label\"][\"min\"]) / 2\n # result = seconds / half_range\n # return -1 + result if seconds < half_range else result\n label_range = data_ranges[\"label\"][\"max\"]\n return seconds / label_range\n\n def process_group(x: torch.Tensor, y: torch.Tensor, group: Tuple[int, int, str]) -> Tuple[int, int, int, float,\n str]:\n criterion = nn.L1Loss(reduction=\"mean\")\n mask = (y > scale(group[0])) & (y <= scale(group[1]))\n # mask = (y > group[0]) & (y <= group[1])\n x = x[mask]\n y = y[mask]\n mae = 0.\n num_data = x.shape[0]\n if num_data > 0:\n loss = criterion(x, y)\n mae = loss.item()\n return group[0], group[1], num_data, mae, group[2]\n\n mae_groups = [process_group(outputs, targets, group) for group in groups]\n return mae_groups", "def test_grouped(self):\n gfile = grades.writers.GradesFile(self.fname)\n gfile.table.compute_grouped_mean('Group')\n gfile.table_format = 'org'\n self.check_output(self.output_str2, gfile)", "def get_report(dataset):\n\n dataset = dataset.round(2)\n print('Overall results (mean): ')\n display(dataset[['classifier', 'preprocessor', 'f1', 'precision', 'recall']].groupby(['preprocessor', 'classifier'])\n .mean().round(2))\n print('Overall results (max): ')\n display(dataset[['classifier', 'preprocessor', 'f1', 'precision', 'recall']].groupby(['preprocessor', 'classifier'])\n .max().round(2))\n print('Grouped by Preprocessor (mean):')\n display(dataset[['preprocessor', 'f1', 'precision', 'recall']].groupby('preprocessor').mean().round(2))\n print('Grouped by Classifier (mean):')\n display(dataset[['classifier', 'f1', 'precision', 'recall']].groupby('classifier').mean().round(2))\n\n preprocessors = dataset['preprocessor'].unique()\n metrics = ['f1', 'precision', 'recall']\n\n # For each metric, display top 10 rounds.\n for m in metrics:\n print(f'Top 10 by {m}:')\n display(dataset.sort_values(m, ascending=False).head(10).round(2))\n\n for p in preprocessors:\n for m in metrics:\n d = dataset[dataset['preprocessor'] == p]\n for c in dataset['classifier'].unique():\n plt.plot(d[d['classifier'] == c]['prior'].unique(), d[d['classifier'] == c].groupby('prior').mean()[m],\n label=str(c))\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n plt.title(m + ' - ' + str(p))\n plt.show()", "def test_color_groups(self):\r\n data_colors = color_dict_to_objects(self.data_color_hsv)\r\n\r\n exp = None\r\n obs = color_groups(self.groups, data_colors, self.data_color_order)\r\n\r\n self.assertEqual(obs, exp)", "def perSaccAcrossGroups(addExp2 = True, trim = True, spacing = .5, \\\n\t\taddSimulation = True, exclOverlap = True, exclY = True):\n\t\n\tfig = plt.figure(figsize = (5,10))\n\ttitle = \"Average towards-handle landings across groups - exclOverlap = %s - exclY = %s\" % (exclOverlap, exclY)\n\tplt.suptitle(title)\n\t\n\t# The first experiment has to be treated differently, because\n\t# it contains two dependent variables: absolute and corrected\n\tdm1 = getDM.getDM(exp = \"004A\", excludeErrors = True, driftCorr = True)\n\tlLandingsAbs = []\n\tlLandingsCorr = []\n\t\n\tfor sacc in [\"1\", \"2\", \"3\"]:\n\t\t\n\t\t# This is the same for corrected landing positions (the saccade\n\t\t# doesn't change; only the reference point does)\n\t\t_dm = dm1.select(\"endX%sNorm != ''\" % sacc)\n\t\t_dm = _dm.select(\"endX%sNorm > -.5\" % sacc)\n\t\t_dm = _dm.select(\"endX%sNorm < .5\" % sacc)\n\t\t\n\t\tif exclY:\n\t\t\t_dm = _dm.select(\"endY%sNorm != ''\" % sacc)\n\t\t\t_dm = _dm.select(\"endY%sNorm > -.5\" % sacc)\n\t\t\t_dm = _dm.select(\"endY%sNorm < .5\" % sacc)\n\t\t\n\t\t\n\t\tif trim:\n\t\t\t_dmAbs = _dm.selectByStdDev(keys = [\"file\"], dv = \"endX%sNormToHandle\" % sacc)\n\t\t\t_dmCorr = _dm.selectByStdDev(keys = [\"file\"], dv = \"endX%sCorrNormToHandle\" % sacc)\n\t\t\n\t\t# Determine avg landing position:\n\t\tavgAbs = _dmAbs[\"endX%sNormToHandle\" % sacc].mean()\n\t\tavgCorr = _dmCorr[\"endX%sCorrNormToHandle\" % sacc].mean()\n\t\t\n\t\t# TODO: determine error bars:\n\n\t\tlLandingsAbs.append(avgAbs)\n\t\tlLandingsCorr.append(avgCorr)\n\t\n\tplt.plot(lLandingsAbs, color = \"#f57900\", linewidth = 2, marker = \"o\")\n\tplt.plot(lLandingsCorr, color = \"#73d216\", linewidth = 2, marker = \"o\")\n\t\n\t# The other 2 experiments can be treated equally:\n\tcolList = [\"#ef2929\", \"#3465a4\"]\n\n\tfor exp in [\"004B\", \"004C\"]:\n\t\t\n\t\tcontinue\n\t\t\n\t\tif not addExp2:\n\t\t\tif exp == \"004B\":\n\t\t\t\tcontinue\n\t\tif not addSimulation:\n\t\t\tif exp == \"004C\":\n\t\t\t\tcontinue\n\n\t\tdm = getDM.getDM(exp = exp, excludeErrors = True, driftCorr = True)\n\t\t\n\t\tif exp == \"004C\":\n\t\t\tprint 'xxx'\n\t\t\tif exclOverlap:\n\t\t\t\tdm = dm.select(\"gap == 'zero'\")\n\n\t\tlLandingsAbs = []\n\t\t\n\t\tfor sacc in [\"1\", \"2\", \"3\", '4']:\n\t\t\t\n\t\t\t# TODO: how to filter only on-object saccades exp 2??\n\t\t\t_dm = dm.select(\"endX%sNorm != ''\" % sacc)\n\t\t\t_dm = _dm.select(\"endX%sNorm > -.5\" % sacc)\n\t\t\t_dm = _dm.select(\"endX%sNorm < .5\" % sacc)\n\t\t\t\n\t\t\tif exclY:\n\t\t\t\n\t\t\t\t_dm = _dm.select(\"endY%sNorm != ''\" % sacc)\n\t\t\t\t_dm = _dm.select(\"endY%sNorm > -.5\" % sacc)\n\t\t\t\t_dm = _dm.select(\"endY%sNorm < .5\" % sacc)\n\n\t\t\tif trim:\n\t\t\t\t_dm = _dm.selectByStdDev(keys = [\"file\"], dv = \"endX%sNormToHandle\" % sacc)\n\t\t\t\n\t\t\t# Determine avg landing position:\n\t\t\tavgAbs = _dm[\"endX%sNormToHandle\" % sacc].mean()\n\t\t\tlLandingsAbs.append(avgAbs)\n\t\t\t\n\t\t\t\n\t\tcol = colList.pop()\n\t\tplt.plot(lLandingsAbs, color = col, linewidth = 2, marker = \"o\")\n\t\n\t# Modify plot:\n\tplt.legend([\"Exp1 abs\", \"Exp1 corr\", \"Exp2\", \"Sim\"])\n\t\t\n\tplt.axhline(0, color = \"#888a85\", linestyle = \"--\", linewidth = 2)\n\txLabels = [\"sacc 1\", \"sacc 2\", \"sacc 3\"]\n\txTicks = range(0,3)\n\tplt.xticks(xTicks, xLabels, rotation = .5)\n\tplt.xlim(min(xTicks)-spacing, max(xTicks)+spacing)\n\t\n\tplt.ylim([-.5, .5])\n\t\n\tplt.savefig(\"%s.png\" % title)", "def group_data(self):\n groups = []\n bug_map = self.bug_map()\n union_map = self.union_map(list(bug_map.keys()))\n # test_id grouping\n for union_id in set(union_map.values()):\n group = []\n for k, v in union_map.items():\n if v == union_id:\n group.extend(bug_map[k])\n if len(group) > 1:\n groups.append(group)\n return groups", "def group_data(data):\n\n data_grouped = dict()\n\n for data_pt in data:\n resonance_id = data_pt.par['resonance_id']\n\n assignment = parse_assignment(resonance_id)\n index = int(assignment[0][0])\n\n data_grouped.setdefault((index, resonance_id), []).append(data_pt)\n\n return data_grouped", "def profile_group(func, args, kwargs, func_result):\n (collection, key, condition, initial, reduce) = args[:5]\n report_kvs = _profile_query(collection)\n\n if key:\n report_kvs['Group_Key'] = _to_json(key)\n\n if condition:\n report_kvs['Group_Condition'] = _to_json(condition)\n\n if initial:\n report_kvs['Group_Initial'] = _to_json(initial)\n\n if reduce:\n report_kvs['Group_Reduce'] = reduce\n\n return report_kvs", "def compare_alpha_diversities(rarefaction_lines, mapping_lines, category,\r\n depth=None, test_type='nonparametric', num_permutations=999):\r\n if test_type == 'nonparametric' and num_permutations < 1:\r\n raise ValueError(\"Invalid number of permutations: %d. Must be greater \"\r\n \"than zero.\" % num_permutations)\r\n\r\n rarefaction_data = parse_rarefaction(rarefaction_lines)\r\n mapping_data = parse_mapping_file_to_dict(mapping_lines)[0]\r\n # samid_pairs, treatment_pairs are in the same order\r\n samid_pairs, treatment_pairs = sampleId_pairs(mapping_data,\r\n rarefaction_data, category)\r\n\r\n ps_avg_div = get_per_sample_average_diversities(rarefaction_data, depth)\r\n\r\n ttest_results, ad_avgs = {}, {}\r\n for sid_pair, treatment_pair in zip(samid_pairs, treatment_pairs):\r\n # if there is only 1 sample for each treatment in a comparison, and mc\r\n # using mc method, will error (e.g. mc_t_two_sample([1],[1]).\r\n if len(sid_pair[0]) == 1 and len(sid_pair[1]) == 1:\r\n ttest_results[treatment_pair] = (None, None)\r\n # add alpha diversity averages and standard deviations. since their\r\n # is only a single sample if we are in this part of the loop, we can\r\n # just record the sample value as the avg and 0 as the std.\r\n ad_avgs[treatment_pair[0]] = (sid_pair[0][0], 0.)\r\n ad_avgs[treatment_pair[1]] = (sid_pair[1][0], 0.)\r\n else:\r\n i = array([ps_avg_div[x] for x in sid_pair[0]])\r\n j = array([ps_avg_div[x] for x in sid_pair[1]])\r\n # add alpha diversity averages and standard deviations.\r\n ad_avgs[treatment_pair[0]] = (i.mean(), i.std())\r\n ad_avgs[treatment_pair[1]] = (j.mean(), j.std())\r\n # conduct tests\r\n if isnan(np_min(i)) or isnan(np_min(j)):\r\n ttest_results[treatment_pair] = (None, None)\r\n continue\r\n if test_type == 'parametric':\r\n obs_t, p_val = t_two_sample(i, j)\r\n elif test_type == 'nonparametric':\r\n obs_t, _, _, p_val = mc_t_two_sample(i, j,\r\n permutations=num_permutations)\r\n if p_val is not None:\r\n p_val = float(format_p_value_for_num_iters(p_val,\r\n num_iters=num_permutations))\r\n elif p_val is None: # None will error in format_p_val\r\n obs_t, p_val = None, None\r\n else:\r\n raise ValueError(\"Invalid test type '%s'.\" % test_type)\r\n ttest_results[treatment_pair] = (obs_t, p_val)\r\n\r\n return ttest_results, ad_avgs", "def formatdata(data,Params):\n\tmndata = dict()\n\talltrials = np.array([])\n\tfor k in range(len(Params[\"conditions\"])):\n\t\tconditionmean = data[0,k].mean(axis = 0)\n\t\tmndata.update({Params[\"conditions\"][k]: {'data' : data[0,k].mean(axis = 0), 'cmax' : conditionmean.max(), 'cmin' : conditionmean.min()}})\n\treturn mndata", "def modelOnBetaGrid(sample,bins,N,l,u):\r\n\r\n betaGrid=np.linspace(l,u,N)\r\n traces=[]\r\n WAIC=dict()\r\n index=0\r\n\r\n for beta in betaGrid:\r\n trace=intensityLogGauss(sample,bins,beta)\r\n traces.append(trace['intensity'])\r\n WAIC[index]=trace\r\n index+=1\r\n\r\n df=pm.compare(WAIC,ic='WAIC')\r\n\r\n return betaGrid,df,traces", "def test_compare_alpha_diversities(self):\r\n # test 'Dose' at 480 inputs\r\n category = 'Dose'\r\n depth = 480\r\n test_type = 'parametric'\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type)\r\n\r\n # hardcoded order of the terms in the keys otherwise would comps fail\r\n exp_tcomps = \\\r\n {('Control', '2xDose'): (1.1746048668554037, 0.44899351189030801),\r\n ('1xDose', '2xDose'): (1.7650193854830403, 0.17574514418562981),\r\n ('Control', '1xDose'): (0.43618805086434992, 0.7052689260099092)}\r\n\r\n # test each key in expected results -- this won't catch if\r\n # obs_tcomps has extra entries, but test that via the next call\r\n for k in exp_tcomps:\r\n assert_almost_equal(exp_tcomps[k], obs_tcomps[k])\r\n self.assertEqual(set(exp_tcomps.keys()), set(obs_tcomps.keys()))\r\n\r\n # test that returned alpha diversity averages are correct\r\n # dose\r\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'],\r\n # Control = ['Sam5']\r\n exp_ad_avgs = {'1xDose': (3.2511951575216664, 0.18664627928763661),\r\n '2xDose': (2.7539647172550001, 0.30099438035250015),\r\n 'Control': (3.3663303519925001, 0.0)}\r\n for k in exp_ad_avgs:\r\n assert_almost_equal(exp_ad_avgs[k], obs_ad_avgs[k])\r\n\r\n # test 'Dose' at 480 inputs with nonparametric test\r\n seed(0) # set the seed to reproduce random MC pvals\r\n category = 'Dose'\r\n depth = 480\r\n test_type = 'nonparametric'\r\n num_permutations = 100\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type, num_permutations=num_permutations)\r\n exp_tcomps = {('1xDose', '2xDose'): (1.7650193854830403, 0.13),\r\n ('Control', '1xDose'): (0.43618805086434992, 0.83), ('Control',\r\n '2xDose'): (1.1746048668554037, 0.62)}\r\n # test each key in expected results -- this won't catch if\r\n # obs_tcomps has extra entries, but test that via the next call\r\n for k in exp_tcomps:\r\n assert_almost_equal(exp_tcomps[k], obs_tcomps[k])\r\n self.assertEqual(set(exp_tcomps.keys()), set(obs_tcomps.keys()))\r\n\r\n # test that returned alpha diversity averages are correct\r\n # dose\r\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'],\r\n # Control = ['Sam5']\r\n exp_ad_avgs = {'Control': (\r\n 3.3663303519925001,\r\n 0.0),\r\n '1xDose': (3.2511951575216664,\r\n 0.18664627928763661),\r\n '2xDose': (2.7539647172550001,\r\n 0.30099438035250015)}\r\n\r\n for k in exp_ad_avgs:\r\n assert_almost_equal(exp_ad_avgs[k], obs_ad_avgs[k])\r\n\r\n # test it works with NA values\r\n # test 'Dose' at 500 inputs with paramteric test\r\n category = 'Dose'\r\n depth = 500\r\n test_type = 'parametric'\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type)\r\n exp_tcomps = \\\r\n {('Control', '2xDose'): (-0.63668873339963239, 0.63906168713487699),\r\n ('1xDose', '2xDose'): (None, None),\r\n ('Control', '1xDose'): (None, None)}\r\n for obs, exp in izip(obs_tcomps, exp_tcomps):\r\n self.assertEqual(obs, exp)\r\n \r\n # test that it works with nonparametric test - this was erroring.\r\n seed(0)\r\n test_type = 'nonparametric'\r\n exp_tcomps = \\\r\n {('Control', '2xDose'): (-0.63668873339963239, 0.672),\r\n ('1xDose', '2xDose'): (None, None),\r\n ('Control', '1xDose'): (None, None)}\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type)\r\n for obs, exp in izip(obs_tcomps, exp_tcomps):\r\n self.assertEqual(obs, exp)\r\n\r\n # test that returned alpha diversity averages are correct\r\n # dose\r\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'],\r\n # Control = ['Sam5']\r\n # will fail on nan comparison so avoid this\r\n exp_ad_avgs = {'1xDose': (nan, nan),\r\n '2xDose': (3.1955144893699998, 0.84206819489000018),\r\n 'Control': (2.2669008538500002, 0.0)}\r\n for k in exp_ad_avgs:\r\n if k != '1xDose':\r\n assert_almost_equal(exp_ad_avgs[k], obs_ad_avgs[k])\r\n if k == '1xDose':\r\n self.assertTrue(all(map(isnan, obs_ad_avgs[k])))\r\n\r\n # test that it works when no depth is passed\r\n category = 'Dose'\r\n depth = None # should return depth = 910\r\n test_type = 'parametric'\r\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(\r\n self.rarefaction_file,\r\n self.mapping_file, category=category, depth=depth,\r\n test_type=test_type)\r\n\r\n # hardcoded order of the terms in the keys otherwise would comps fail\r\n exp_tcomps = \\\r\n {('Control', '2xDose'): (3.3159701868634883, 0.1864642327553255),\r\n ('1xDose', '2xDose'): (-0.48227871733885291, 0.66260803238173183),\r\n ('Control', '1xDose'): (0.83283756452373126, 0.49255115337550748)}\r\n for obs, exp in izip(obs_tcomps, exp_tcomps):\r\n self.assertEqual(obs, exp)\r\n\r\n # test that returned alpha diversity averages are correct\r\n # dose\r\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'],\r\n # Control = ['Sam5']\r\n exp_ad_avgs = {'1xDose': (2.6763340901916668, 0.36025734786901326),\r\n '2xDose': (2.8358041871949999, 0.04611264137749993),\r\n 'Control': (3.1006488615725001, 0.0)}\r\n for k in exp_ad_avgs:\r\n assert_almost_equal(exp_ad_avgs[k], obs_ad_avgs[k])", "def index():\n import numpy as np\n import random\n\n total_gender = {}\n total_gender['Male'] = db(db.patient.sex == 'Male').count()\n total_gender['Female'] = db(db.patient.sex == 'Female').count()\n total_gender['Undeclared'] = db(db.patient.sex == 'Undeclared').count()\n\n groups = db(db.groups).select()\n freq_groups = {}\n grp_gender = {}\n for g in groups:\n freq_groups[g.code] = db(db.patient.groups.contains(g.id)).count()\n grp_gender[g.code] = {}\n grp_gender[g.code]['Male'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Male')).count()\n grp_gender[g.code]['Female'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Female')).count()\n grp_gender[g.code]['Undeclared'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Undeclared')).count()\n\n experiments = db(db.experiments).select()\n freq_experiments = {}\n exp_gender = {}\n for e in experiments:\n freq_experiments[e.code] = db(db.patient.experiments.contains(e.id)).count()\n exp_gender[e.code] = {}\n exp_gender[e.code]['Male'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Male')).count()\n exp_gender[e.code]['Female'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Female')).count()\n exp_gender[e.code]['Undeclared'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Undeclared')).count()\n\n grp_exp = {}\n for e in experiments:\n grp_exp[e.code] = {}\n for g in groups:\n grp_exp[e.code][g.code] = db(db.patient.experiments.contains(e.id) & db.patient.groups.contains(g.id)).count()\n\n return dict(message=T('Pain Network: A web-based tool for diagnosis of the Chronic Pain.'),\n freq_gender=total_gender,freq_groups=freq_groups,freq_experiments=freq_experiments,\n exp_gender=exp_gender,grp_gender=grp_gender,grp_exp=grp_exp)", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "def Group(self) -> _n_5_t_0:", "def Group(self) -> _n_5_t_0:", "def aga_expression_entropies(adata):\n from scipy.stats import entropy\n groups_order, groups_masks = utils.select_groups(adata, smp='aga_groups')\n entropies = []\n for mask in groups_masks:\n X_mask = adata.X[mask]\n x_median = np.median(X_mask, axis=0)\n x_probs = (x_median - np.min(x_median)) / (np.max(x_median) - np.min(x_median))\n entropies.append(entropy(x_probs))\n return entropies" ]
[ "0.5615865", "0.5525419", "0.5523908", "0.54661447", "0.536697", "0.53608376", "0.5330283", "0.52438", "0.5227983", "0.5227983", "0.5154076", "0.5146968", "0.51379746", "0.51017696", "0.5098285", "0.50540596", "0.5026247", "0.5020847", "0.50154185", "0.49952942", "0.4989816", "0.4977468", "0.4965934", "0.49614936", "0.4958199", "0.4956973", "0.49567375", "0.49454173", "0.49454173", "0.4933719" ]
0.6480124
0
Abstract method invoked when a trial is completed or terminated. Do nothing by default.
def trial_end(self, parameter_id, success, **kwargs):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_trial_complete(self, trial_runner, trial, result):\n\n raise NotImplementedError", "def on_trial_complete(self, trial: Trial, result: Dict[str, Any]):\n pass", "def trial(self):\n pass", "def on_trial_complete(self,\r\n trial_id: str,\r\n result: Optional[Dict] = None,\r\n error: bool = False):\r\n raise NotImplementedError", "def trial_clean_up(self):\n pass", "def on_trial_result(self, trial_runner, trial, result):\n\n raise NotImplementedError", "def on_trial_error(self, trial: Trial):\n pass", "def report_trial(self):\n pass", "def on_trial_add(self, trial: Trial):\n pass", "def on_trial_error(self, trial_runner, trial):\n\n raise NotImplementedError", "def on_trial_add(self, trial_runner, trial):\n\n raise NotImplementedError", "def _finished(self) -> None:", "def taskCompleted(self) -> None:\n ...", "def taskCompleted(self) -> None:\n ...", "def done(self):\n raise NotImplementedError()", "def on_trial_remove(self, trial: Trial):\n pass", "def task_done(self) -> None:\n pass", "def finished_sim(self):\n raise NotImplementedError(\n \"finished_sim function not reimplemented form base class\")", "def finalize_integration(self, **kwargs):", "def trial_completed(self, behavior_data):\r\n # Update elapsed_time\r\n self.elapsed_time = datetime.datetime.now() - self.init_datetime\r\n self.behavior_data = behavior_data\r\n correct = ~np.isnan(\r\n self.behavior_data['States timestamps']['correct'][0][0])\r\n error = ~np.isnan(\r\n self.behavior_data['States timestamps']['error'][0][0])\r\n no_go = ~np.isnan(\r\n self.behavior_data['States timestamps']['no_go'][0][0])\r\n assert correct or error or no_go\r\n # Add trial's response time to the buffer\r\n self.response_time = misc.get_trial_rt(self.behavior_data)\r\n self.response_time_buffer.append(self.response_time)\r\n # Update response buffer -1 for left, 0 for nogo, and 1 for rightward\r\n if (correct and self.position < 0) or (error and self.position > 0):\r\n self.response_side_buffer.append(1)\r\n elif (correct and self.position > 0) or (error and self.position < 0):\r\n self.response_side_buffer.append(-1)\r\n elif no_go:\r\n self.response_side_buffer.append(0)\r\n # Update the trial_correct variable + buffer\r\n self.trial_correct = bool(correct)\r\n self.trial_correct_buffer.append(self.trial_correct)\r\n # Increment the trial correct counter\r\n self.ntrials_correct += self.trial_correct\r\n # Update the water delivered\r\n if self.trial_correct:\r\n self.water_delivered += self.reward_amount\r\n\r\n # SAVE TRIAL DATA\r\n params = self.__dict__.copy()\r\n params.update({'behavior_data': behavior_data})\r\n # Convert to str all non serializable params\r\n params['data_file'] = str(params['data_file'])\r\n params['osc_client'] = 'osc_client_pointer'\r\n params['init_datetime'] = params['init_datetime'].isoformat()\r\n params['elapsed_time'] = str(params['elapsed_time'])\r\n params['position'] = int(params['position'])\r\n # Delete buffered data\r\n params['stim_probability_left_buffer'] = ''\r\n params['position_buffer'] = ''\r\n params['contrast_buffer'] = ''\r\n params['signed_contrast_buffer'] = ''\r\n params['response_time_buffer'] = ''\r\n params['response_side_buffer'] = ''\r\n params['trial_correct_buffer'] = ''\r\n # Dump and save\r\n out = json.dumps(params, cls=ComplexEncoder)\r\n self.data_file.write(out)\r\n self.data_file.write('\\n')\r\n self.data_file.close()\r\n # If more than 42 trials save transfer_me.flag\r\n if self.trial_num == 42:\r\n misc.create_flags(self.data_file_path, self.poop_count)\r\n\r\n return self", "def Done(self):\n pass", "def done(self):\n pass", "def trial_prep(self):\n pass", "def on_trial_result(self, trial_id: str, result: Dict):\r\n pass", "def notify_end(self, status, objective):\n pass # pragma: no cover", "def finished(self):\r\n raise NotImplementedError", "def finished(self):\n raise NotImplementedError()", "def on_trial_result(self, trial: Trial, result: Dict[str, Any]) -> str:\n return SchedulerDecision.CONTINUE", "def setupFinished(self, *args, **kwargs): # real signature unknown\n pass", "def on_trial_remove(self, trial_runner, trial):\n\n raise NotImplementedError" ]
[ "0.7465205", "0.7200161", "0.68019694", "0.6782522", "0.66770226", "0.66569257", "0.6578445", "0.6572621", "0.6555807", "0.65140676", "0.64149064", "0.6342137", "0.63419366", "0.63419366", "0.6279795", "0.62722665", "0.62639666", "0.62104183", "0.6204361", "0.6177283", "0.61681616", "0.61126107", "0.61120135", "0.6105541", "0.6105389", "0.60846806", "0.60747254", "0.60672325", "0.604935", "0.6041646" ]
0.7370623
1
Abstract method for updating the search space. Must override. Tuners are advised to support updating search space at runtime. If a tuner can only set search space once before generating first hyperparameters, it should explicitly document this behaviour.
def update_search_space(self, search_space): raise NotImplementedError('Tuner: update_search_space not implemented')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateSearch(self, authenticationToken, search):\r\n pass", "def set_search_space(self,\n search_space_size: int):\n self.search_space = np.linspace(0, 1, search_space_size)", "def _update_search_info(self):\n page_size = int(self._search_data['pageSize'])\n begin_index = int(self._params['beginIndex']) + page_size\n self._params['beginIndex'] = str(begin_index)", "def set_search_params(self, **kwargs):\n self._search_params = kwargs", "def update_search_parameters(self, selected_gender, selected_category, selected_subcategory):\r\n self.model.set_gender(selected_gender)\r\n self.model.set_category(selected_category)\r\n self.model.set_subcategory(selected_subcategory)\r\n self.model.fetch_results()", "def grid_search(self):\n\t\t''' common settings without grid-search '''\n\t\tbinary_rele, unknown_as_zero = False, False\n\t\tcommon_data_dict = dict(data_id=self.data_id, dir_data=self.dir_data, min_docs=10, min_rele=1,\n\t\t\t\t\t\t\t\tunknown_as_zero=unknown_as_zero, binary_rele=binary_rele)\n\n\t\tdata_meta = get_data_meta(data_id=self.data_id) # add meta-information\n\t\tcommon_data_dict.update(data_meta)\n\n\t\t''' some settings for grid-search '''\n\t\tchoice_presort = [True] if self.debug else [True]\n\t\tchoice_sample_rankings_per_q = [1] if self.debug else [1] # number of sample rankings per query\n\t\tchoice_scale_data, choice_scaler_id, choice_scaler_level = get_default_scaler_setting(data_id=self.data_id, grid_search=True)\n\n\t\tfor scale_data, scaler_id, scaler_level, presort, sample_rankings_per_q in product(choice_scale_data,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_scaler_id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_scaler_level,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_presort,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t choice_sample_rankings_per_q):\n\n\t\t\tself.data_dict = dict(presort=presort, sample_rankings_per_q=sample_rankings_per_q,\n\t\t\t\t\t\t\t\t scale_data=scale_data, scaler_id=scaler_id, scaler_level=scaler_level)\n\t\t\tself.data_dict.update(common_data_dict)\n\t\t\tyield self.data_dict", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def _search(self, X, y, search_params, search_method='gridsearchcv'):\n\n # The returned search method is either\n # sequential or parallell. The former\n # identifies Bayesian optimization, while\n # the latter identifies grid or randomized\n # search by Sklearn. \n search_method, search_taxonomy = _check_search_method(search_method=search_method)\n search_params = super()._preprocess_search_params(y=y, search_params=search_params,\n search_taxonomy=search_taxonomy)\n if not hasattr(self, 'pipe'):\n n_samples = _n_samples(y)\n fold_size = np.full(shape=n_samples, fill_value=n_samples // self.cv,\n dtype=np.int)\n estimate_fold_size = n_samples - (np.max(fold_size) + 1)\n self.get_pipeline(y=y, n_quantiles=estimate_fold_size)\n\n if search_method == 'gridsearchcv':\n self._regressor_search = sklearn.model_selection._search.GridSearchCV(\n estimator=self.pipe, param_grid=search_params,\n scoring=self.scoring, refit=self.refit, n_jobs=self.n_jobs,\n cv=self.cv, verbose=self.verbose, pre_dispatch='2*n_jobs',\n error_score=np.nan, return_train_score=self.return_train_score)\n elif search_method == 'randomizedsearchcv':\n self._regressor_search = sklearn.model_selection._search.RandomizedSearchCV(\n estimator=self.pipe, param_distributions=search_params,\n n_iter=self.randomizedcv_n_iter, scoring=self.scoring,\n n_jobs=self.n_jobs, refit=self.refit, cv=self.cv,\n verbose=self.verbose, pre_dispatch='2*n_jobs',\n error_score=np.nan, return_train_score=self.return_train_score)\n elif search_method == 'bayesoptcv':\n self.optimization = _bayesoptcv(X=X, y=y, estimator=self.pipe,\n search_params=search_params,\n cv=self.cv,\n scoring=self.scoring,\n n_jobs=self.n_jobs,\n verbose=self.verbose,\n random_state=self.random_state,\n init_points=self.bayesoptcv_init_points,\n n_iter=self.bayesoptcv_n_iter)\n\n if self.refit:\n max_params = self.optimization.max['params']\n get_best_params_ = _check_bayesoptcv_parameter_type(max_params)\n self._regressor_search = self.pipe.set_params(**get_best_params_)", "def search_boost(self, search_boost):\n\n self._search_boost = search_boost", "def tune_params(self, X_train, Y_train):\n return self.model # No hyper-parameter tuning", "def __on_query_edited(self):\n self.__refresh_search_results()", "def new_search(self):\n return {'search_parameters': h.get_search_parameters(self.query_builder)}", "def update_knowledge(self):\n pass", "def pickupSearch(self):\n self.__searchJob = self.loadSavedHyperSearchJob(\n permWorkDir=self._options[\"permWorkDir\"],\n outputLabel=self._options[\"outputLabel\"])\n\n\n self.monitorSearchJob()", "def modify_search_settings(self):\n want_to_exit = False\n while want_to_exit == False:\n\n print('_____ Current Settings _____\\n'\n ' good_word_tolerance = %d\\n' % self.bot_squad[0].good_word_tolerance,\n 'bad_word_tolerance = %d\\n' % self.bot_squad[0].bad_word_tolerance,\n 'min_years_exp = %d\\n' % self.bot_squad[0].min_years_exp,\n 'min_str_len = %d\\n' % self.bot_squad[0].min_str_len,\n 'page_limit = %d\\n' % self.bot_squad[0].page_limit,)\n\n for bot in self.bot_squad:\n print(' %s is seeded with URL:' % bot.name)\n print(' %s\\n' % bot.base_url)\n\n print('Choose parameter to modify:\\n'\n '____________________________________\\n'\n ' 1-good_word_tolerance | q-Quit\\n'\n ' 2-bad_word_tolerance | w-Seed URLs\\n'\n ' 3-min_years_exp | e-Site Toggles\\n'\n ' 4-min_str_len | r-Filter Tuning\\n'\n ' 5-page_limit |\\n'\n '_______________ Input ______________\\n')\n my_input = input()\n\n if my_input == '1':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/good_word_tolerance.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('good_word_tolerance changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == '2':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/bad_word_tolerance.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('bad_word_tolerance changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == '3':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/min_years_exp.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('min_years_exp changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == '4':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/min_str_len.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('min_str_len changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == '5':\n print('Input integer:\\n')\n parameter_input = input()\n if not is_integer(parameter_input):\n print('Invalid input\\n'\n 'returning to main menu')\n return\n else:\n f = open('trunk/filters/page_limit.txt', 'w')\n f.write(parameter_input)\n f.close()\n print('page_limit changed to %d\\n' % int(parameter_input))\n print('restart program to take effect')\n continue\n\n if my_input == 'q':\n want_to_exit = True\n print('Returning to main menu')\n continue\n\n if my_input == 'w':\n print('Instructions: edit seed URLs directly in the .txt files:\\n'\n ' trunk/branch/indeed_bot.txt\\n'\n ' trunk/branch/monster_bot.tx\\n'\n ' trunk/branch/craigs_bot.tx\\n')\n\n continue\n\n if my_input == 'e':\n print('WIP')\n continue\n\n if my_input == 'r':\n print('Instructions: edit keyword libraries directly in the .txt files:\\n'\n ' trunk/filters/essential_body.txt\\n'\n ' trunk/filters/excluded_body.txt\\n'\n ' trunk/filters/excluded_title.txt\\n')\n return\n\n print('Invalid input\\n')\n\n\n # TODO TODO TODO TODO TODO TODO TODO TODO\n # TODO TODO TODO TODO TODO TODO TODO TODO", "def search(self, search):\n raise NotImplementedError", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here", "def runNewSearch(self):\n self.__searchJob = self.__startSearch()\n\n self.monitorSearchJob()", "def update_params(self):\n pass", "def set_parameters(self, population_size=40, num_tests=5, num_searches=5, num_enabled=17, bonus1=10, bonus2=1,\n **kwargs):\n kwargs.pop('num_searches_best', None)\n super().set_parameters(num_searches_best=0, local_searches=(mts_ls1v1, mts_ls2), **kwargs)", "def updateSearch(self, authenticationToken, search):\r\n self.send_updateSearch(authenticationToken, search)\r\n return self.recv_updateSearch()", "def _update_params(self):\n pass", "def _update(self):\n num_new_evals = (self.metamodel.model_evaluations - self._last_rebuild)\n if num_new_evals >= self.rebuild_interval:\n self._built = True\n self._last_rebuild = self.metamodel.model_evaluations\n\n # Rebuild relevance function and make it usable on arrays.\n self._relevance_function = self._construct_relevance_function()\n rel_fun = np.vectorize(self._relevance_function)\n\n # Learn relevance prediction model\n data = self.metamodel.history.get_model_evaluations()\n relevance_values = rel_fun(data[:, -1])\n self._predictor.fit(data[:, :-1], relevance_values)\n return", "def tune_parameters(self, parameters, search_alg, num_trials=5, metric=\"f1\", direction=\"maximize\", train_ratio=0.7, num_times=1, export_metrics=True):\n self._clear_cache()\n model_id = self.model_id\n if self.comet_key != None:\n exp = init_experiment(self.comet_key, \"model-performance\", \"covid-vaccine\")\n exp.log_parameters({\n \"model_id\":model_id,\n \"model_type\":self.embedding_type,\n \"multiclass\":self.class_label,\n \"train_ratio\":train_ratio,\n \"num_samples\":num_trials,\n \"metric\":metric,\n \"direction\":direction,\n \"search_alg\":search_alg\n })\n log_fixed_params(parameters, exp)\n exp.add_tag(\"multi\" if self.class_label == \"Multiclass\" else \"binary\")\n start = time.time()\n tr_text, tr_label, self.tr_meta, te_text, te_label, self.te_meta, _ = get_train_test_data(self.seed_fp, self.label_fp, train_ratio=train_ratio, meta_cols=self.meta_cols, drop_irrelevant=self.drop_irrelevant, visualize=False, verbose=self.verbose)\n self._transform_labels(tr_label, te_label)\n print(\"data loading:\", time.time() - start, \"seconds\\n\")\n start = time.time()\n self._prepare_feature_components(tr_text, te_text, parameters)\n print(\"total preprocessing:\", time.time() - start, \"seconds\\n\")\n metric_df_parts = []\n def objective(trial):\n config = suggest_config(parameters, trial)\n instances = []\n for _ in range(num_times):\n instance_df = self._run_single_config(train_ratio, config)\n instance_df = instance_df[instance_df[\"part\"] == \"test\"]\n instances.append(instance_df)\n tmp_df = pd.concat(instances, axis=0)\n print(\"metrics 1\", tmp_df.shape)\n group_cols = list(tmp_df.drop(\"score\", axis=1).columns)\n print(group_cols)\n tmp_df = tmp_df.groupby(group_cols)[\"score\"].agg([\"mean\",\"std\"]).reset_index()\n print(\"metrics 2\", tmp_df.shape)\n metric_df_parts.append(tmp_df)\n metrics = dict(zip(tmp_df[\"metric\"],tmp_df[\"mean\"]))\n return metrics[metric]\n if search_alg == \"GRID\":\n algo = GridSampler(extract_grid(parameters))\n elif search_alg == \"RND\":\n algo = RandomSampler()\n elif search_alg == \"TPE\":\n algo = TPESampler(n_startup_trials=int(num_trials*0.3))\n else:#default optuna setting\n algo = None\n study = optuna.create_study(direction=\"maximize\", sampler=algo)\n study.optimize(objective, n_trials=num_trials, n_jobs=1)\n metrics_df = pd.concat(metric_df_parts)\n best_config = study.best_params\n print(\"Best config: \", best_config)\n if export_metrics:\n result_dir = os.path.join(self.model_dir, \"results\")\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n study_fp = os.path.join(result_dir, \"%s.pkl\" % model_id)\n print(\"Study file:\", study_fp)\n joblib.dump(study, study_fp)\n result_fp = os.path.join(result_dir, \"%s.csv\" % model_id)\n print(\"Output file:\", result_fp)\n metrics_df.to_csv(result_fp, index=False)\n if self.comet_key != None:\n exp.log_parameters(best_config)\n exp.log_metrics({\n \"train_size\":len(tr_text),\n \"test_size\":len(te_text)\n })\n best_results = dict(metrics_df.groupby(\"metric\")[\"mean\"].max()[[\"f1\",\"acc\",\"auc\"]])\n exp.log_metrics(best_results)\n exp.end()\n return best_config", "def updateParameters(self):\n\n return", "def init_model(self):\n # n_dims == n_hparams\n n_dims = len(self.searchspace.keys())\n\n if self.interim_results:\n n_dims += 1 # add one dim for augumented budget\n\n cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))\n\n other_kernel = Matern(\n length_scale=np.ones(n_dims),\n length_scale_bounds=[(0.01, 100)] * n_dims,\n nu=2.5,\n )\n base_model = GaussianProcessRegressor(\n kernel=cov_amplitude * other_kernel,\n normalize_y=True,\n noise=\"gaussian\",\n n_restarts_optimizer=2,\n )\n self.base_model = base_model", "def _add_better_search_words(self):\n for kw in self.better_search_kw:\n self.search_query += kw", "def _suggest_samples(dataset: Dataset, settings: ZoomOptSettings) -> np.ndarray:\n\n if settings.batch < 1:\n raise ValueError(f\"Use batch size at least 1. (Was {settings.batch}).\") # pragma: no cover\n\n continuous_dict, categorical_dict = dataset.parameter_space\n\n # If any categorical variable is present, we raise an exception. In theory they should be represented by one-hot\n # encodings, but I'm not sure how to retrieve the bounds of this space and do optimization within it (the\n # best way is probably to optimize it in an unconstrained space and map it to one-hot vectors using softmax).\n # Moreover, in BayesOpt there is iteration over contexts.\n if categorical_dict:\n raise NotImplementedError(\"This method doesn't work with categorical inputs right now.\") # pragma: no cover\n\n # It seems that continuous_dict.values() contains pandas series instead of tuples, so we need to map over it\n # to retrieve the parameter space\n original_space: Hypercuboid = [(a, b) for a, b in continuous_dict.values()]\n\n # Find the location of the optimum. We will shrink the space around it\n optimum: np.ndarray = _get_optimum_location(dataset)\n\n # Estimate how many optimization iterations were performed.\n step_number: int = settings.n_step or _estimate_step_number(\n n_points=len(dataset.output_array), batch_size=settings.batch\n )\n\n # Convert to per-batch shrinking factor if a per-iteration shrinking factor supplied\n per_batch_shrinking_factor = (\n settings.shrinking_factor ** settings.batch if settings.shrink_per_iter else settings.shrinking_factor\n )\n\n # Calculate by what factor each dimension of the hypercube should be shrunk\n shrinking_factor_per_dim: float = _calculate_shrinking_factor(\n initial_shrinking_factor=per_batch_shrinking_factor, step_number=step_number, n_dim=len(original_space)\n )\n\n # Shrink the space\n new_space: Hypercuboid = [\n shrink_interval(\n shrinking_factor=shrinking_factor_per_dim, interval=interval, shrinking_anchor=optimum_coordinate\n )\n for interval, optimum_coordinate in zip(original_space, optimum)\n ]\n\n # The shrunk space may be out of the original bounds (e.g. if the maximum was close to the boundary).\n # Translate it.\n new_space = _move_to_original_bounds(new_space=new_space, original_space=original_space)\n\n # Sample the new space to get a batch of new suggestions.\n parameter_space = ParameterSpace([ContinuousParameter(f\"x{i}\", low, upp) for i, (low, upp) in enumerate(new_space)])\n\n return designs.suggest_samples(\n parameter_space=parameter_space, design_type=settings.design, point_count=settings.batch\n )", "def default_mutate(search_space, rng, old_value, **kwargs):\n multiply_factor = kwargs.pop(\"multiply_factor\", 3.0)\n add_factor = kwargs.pop(\"add_factor\", 1)\n volatility = kwargs.pop(\"volatility\", 0.001)\n if search_space.type == \"real\":\n lower_bound, upper_bound = search_space.interval()\n factors = (\n 1.0 / multiply_factor\n + (multiply_factor - 1.0 / multiply_factor) * rng.random()\n )\n if lower_bound <= old_value * factors <= upper_bound:\n new_value = old_value * factors\n elif lower_bound > old_value * factors:\n new_value = lower_bound + volatility * rng.random()\n else:\n new_value = upper_bound - volatility * rng.random()\n elif search_space.type == \"integer\":\n print(search_space)\n lower_bound, upper_bound = search_space.interval()\n factors = int(add_factor * (2 * rng.randint(2) - 1))\n if lower_bound <= old_value + factors <= upper_bound:\n new_value = int(old_value) + factors\n elif lower_bound > old_value + factors:\n new_value = int(lower_bound)\n else:\n new_value = int(upper_bound)\n elif search_space.type == \"categorical\":\n # TODO: This ignores the probabilities passed to search space.\n # The mutation function should work directly at the search space level\n # instead of separately on each dimensions. This would make it possible\n # to sample properly the categorical dimensions.\n new_value = rng.choice(search_space.interval())\n else:\n print(search_space.type)\n new_value = old_value\n return new_value", "def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()" ]
[ "0.6237321", "0.61739534", "0.6061544", "0.57943547", "0.57356995", "0.5705272", "0.56967473", "0.5678522", "0.5660615", "0.5633325", "0.562737", "0.5563", "0.55493927", "0.5500955", "0.54821765", "0.545939", "0.545895", "0.5434083", "0.542714", "0.5413401", "0.5410262", "0.5406583", "0.5406054", "0.53796226", "0.53796047", "0.5361724", "0.5354891", "0.53470755", "0.5340949", "0.533511" ]
0.78161246
0
By default the nested modules are not imported automatically. Call this function if you would like to import them all. This may be useful for autocompletion in interactive mode.
def import_all(): import theory
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_sub_modules(self):\n try:\n for package in self.package_list:\n Utils.import_utils.import_submodules(package)\n except ImportError, err:\n print_error(\"{0} : \\n\".format(str(err)))\n print_error('unexpected error: {0}'.format(traceback.format_exc()))", "def _import_all_modules():\n import inspect\n import os\n\n all_objects = []\n globals_, locals_ = globals(), locals()\n\n # dynamically import all the package modules\n modules = set()\n json_files = set()\n for filename in os.listdir(os.path.dirname(__file__)):\n # process all python files in directory that don't start with underscore\n # (which also keeps this module from importing itself)\n modulename, ext = os.path.splitext(filename)\n if filename[0] != \"_\":\n if ext == \".py\":\n modules.add(modulename)\n elif ext == \".json\":\n json_files.add(filename)\n\n old_length = len(modules) + 1\n errors = {}\n while len(modules) and old_length > len(modules):\n old_length = len(modules)\n for modulename in modules.copy():\n package_module = \".\".join([__name__, modulename])\n try:\n module = __import__(package_module, globals_, locals_, [modulename])\n except ModuleNotFoundError as err:\n raise err\n except ImportError as err:\n errors[modulename] = repr(err)\n continue\n\n # Only the class with the same name as the file will be imported\n found_class = False\n for obj_name in filter(lambda name: name[0] != \"_\", module.__dict__):\n found_class = modulename.lower() == obj_name.lower()\n obj = module.__dict__[obj_name]\n if found_class and inspect.isclass(\n obj\n ): # Check that the object found is a class\n globals_[obj_name] = module.__dict__[obj_name]\n all_objects.append(obj_name)\n break\n\n if not found_class:\n logger.warning(\n \"File {}.py does not contain a class named {}. The file will be ignored.\"\n \"\".format(package_module, modulename)\n )\n\n modules.discard(modulename) # Remove module from the available list\n\n if modules:\n logger.warning(\"Failed to import from {} modules {}.\".format(__name__, modules))\n for modulename in modules:\n logger.debug(\"{}: {}\".format(modulename, errors[modulename]))\n\n from cosapp.systems import System\n from jsonschema import ValidationError\n\n def systemFactory(name: str, filename: str) -> System:\n obj = System.load(filename)\n obj.name = name\n return obj\n\n for json_file in json_files: # Fake class behavior for System JSON file\n try:\n tmp_system = System.load(json_file)\n except (TypeError, AttributeError, ValidationError):\n logger.warning(\n 'JSON file \"{}\" does not defined a CoSApp System.'.format(json_file)\n )\n else:\n obj_name = tmp_system.name.capitalize()\n globals_[obj_name] = lambda name: systemFactory(name, json_file)\n all_objects.append(obj_name)\n\n return all_objects", "def gather_modules(self, folder: Folder = None):\n folder = folder or self.root\n\n for module in folder.modules:\n self[module.abs_import] = {\n 'module': module,\n 'imports': []\n }\n\n for sub_folder in folder.sub_folders:\n self.gather_modules(sub_folder)", "def import_all_model_modules():\r\n import brokerage.model\r\n # ensure that these imports don't get auto-deleted! they have side effects.\r\n brokerage.model", "def load_sub_modules(module):\n for loader, name, is_pkg in pkgutil.walk_packages(module.__path__):\n if '.' in name:\n continue\n\n import_module(f'{module.__name__}.{name}')", "def supports_ordinary_make_module_imports(self):\n return True", "def load_modules_manually():\n #cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))\n cmd_folder = '../myutils/'\n if cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n #print sys.path", "def load_all_submodules():\n # Load all modules in the current directory.\n pattern_list = _load_all_modules(__file__, __name__)\n return pattern_list", "def imports():\n for name, val in globals().items():\n if isinstance(val, getattr(types, \"ModuleType\")):\n yield val.__name__", "def scan_morepath_modules(cls: type[morepath.App]) -> None:\n for module in sorted(morepath_modules(cls)):\n morepath.scan(import_module(module))", "def getAllImportFiles():\n\tdef get_path(base):\n\t\tb, t = os.path.split(base)\n\t\tif __name__ == t:\n\t\t\treturn [\"animation_nodes\"]\n\t\telse:\n\t\t\treturn get_path(b) + [t]\n\n\tfor root, dirs, files in os.walk(currentPath):\n\t\tpath = \".\".join(get_path(root))\n\t\tfor f in filter(lambda f:f.endswith(\".py\"), files):\n\t\t\tname = f[:-3]\n\t\t\tif not name == \"__init__\":\n\t\t\t\tyield path + \".\" + name", "def extend_import_paths(paths):\n for path in paths:\n dir = os.path.abspath(path if os.path.isdir(path) else os.path.dirname(path))\n while(os.path.exists(os.path.join(dir, '__init__.py'))):\n dir = os.path.dirname(dir)\n sys.path.append(dir)", "def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise", "def _import_all(self):\n # on first load, documents dir may not be in import path\n if not self.app.documents_dir in sys.path:\n sys.path += [self.app.documents_dir]\n # clean modules dict before (re)loading anything\n self._remove_non_current_game_modules()\n # make copy of old modules table for import vs reload check\n old_modules = self.modules.copy()\n self.modules = {}\n # load/reload new modules\n for module_name in self._get_game_modules_list():\n try:\n # always reload built in modules\n if module_name in self.builtin_module_names or \\\n module_name in old_modules:\n m = importlib.reload(old_modules[module_name])\n else:\n m = importlib.import_module(module_name)\n self.modules[module_name] = m\n except Exception as e:\n self.app.log_import_exception(e, module_name)", "def deep_iter_modules(name):\r\n mod = import_dotted_name(name)\r\n yield name\r\n if not hasattr(mod, '__path__'):\r\n return\r\n for _, name, _ in iter_modules(mod.__path__, name + '.'):\r\n for name in deep_iter_modules(name):\r\n yield name", "def import_submodules(package_name):\n\n importlib.import_module(package_name)\n package = sys.modules[package_name]\n for importer, name, is_package in pkgutil.walk_packages(package.__path__):\n # not sure why this check is necessary...\n if not importer.path.startswith(package.__path__[0]):\n continue\n name_with_package = package_name + \".\" + name\n importlib.import_module(name_with_package)\n if is_package:\n import_submodules(name_with_package)", "def _LoadPackages():\n return {module.__name__.split('.')[-1]: module for module in\n import_util.LoadModulesForPath(__path__, __name__)}", "def import_submodules(package_name, *submodules):\n package = sys.modules[package_name]\n return {\n name: importlib.import_module(package_name + '.' + name)\n for _, name, _ in pkgutil.walk_packages(package.__path__)\n if not submodules or name in submodules\n }", "def make_modules_importable(modules: Iterable[Module]) -> Dict[str, Module]:\n sys.modules.update({ module.__name__: module for module in modules })\n return sys.modules", "def modules():", "def _import_submodules(package, recursive=True):\n if isinstance(package, str):\n package = importlib.import_module(package)\n results = {}\n for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):\n full_name = package.__name__ + '.' + name\n results[full_name] = importlib.import_module(full_name)\n if recursive and is_pkg:\n results.update(_import_submodules(full_name))\n return results", "def import_descendants(parent_module, target_globals, target_locals):\n basedir = os.path.dirname(parent_module.__file__)\n\n for root_dir, dirs, files in os.walk(basedir):\n relative_dir = root_dir[len(basedir):]\n package = parent_module.__package__ + relative_dir.replace(os.path.sep, '.')\n components = [os.path.splitext(filename) for filename in files]\n modules = [basename for basename, ext in components\n if ext == '.py' and basename != '__init__']\n\n # Import the directory module, unless it is src_module itself (this\n # function is commonly used to import the descendants of a module into\n # itself, so if we didn't have this guard then we'd try to import the\n # parent module into itself)\n if root_dir != basedir:\n exec 'from %s import *' % (package,) in target_globals, target_locals\n\n for module in modules:\n exec 'from %s.%s import *' % (package, module) in target_globals, target_locals", "def resolve_imports(self):\n\n if not self.koocer:\n # @import resolution disabled\n return\n\n for tl in self.ast.body:\n if not isinstance(tl, knodes.KcImport):\n continue\n\n # load, preprocess, parse:\n kc = self.koocer(tl.file_fullpath)\n kc.parse()\n\n sub_ast = kc.ast\n\n # pass basic visitors\n sub_linkchecks = LinkChecks(self.koocer)\n sub_linkchecks.register()\n sub_linkchecks.run(sub_ast)\n\n sub_class_builder = ClassBuilder()\n sub_class_builder.register()\n sub_class_builder.run(sub_ast)\n\n # merge sub_ast informations\n\n # merge kooc types\n new_ktypes = ChainMap(self.ast.ktypes, sub_ast.ktypes)\n self.ast.ktypes = new_ktypes\n\n # merge C types\n new_types = ChainMap(self.ast.types, sub_ast.types)\n self.ast.types = new_types\n\n # merge C top declarations\n new_c_top_decl = ChainMap(self.ast.c_top_decl, sub_ast.c_top_decl)\n self.ast.c_top_decl = new_c_top_decl", "def import_all():\n import sys\n\n # obviously this is a hack for now... What's the right way to learn\n # the directory that holds the plugins directory? I don't want the\n # directory itself, because I *think* we might get name conflicts if we\n # import them directly. (I'm fuzzy about how that works. Can you\n # import \"x\" from one path and \"x\" from another path, and have them both\n # around with the same name? sys.modules suggests no.\n pdir = \"/home/sandro/riftr\"\n sys.path.append(pdir)\n \n dir = \"plugins\"\n ids = {}\n for filename in os.listdir(pdir + \"/\" + dir):\n if filename.endswith(\".py\") and not filename[0] == \"_\":\n local = filename[0:-3]\n module_name = dir + \".\" + local\n #print \n #print module_name\n m = __import__(module_name)\n mm = getattr(m, local)\n #print \"=> \", mm\n for (name, entry) in mm.__dict__.items():\n if getattr(entry, \"__doc__\", False) and getattr(entry, \"id\", False):\n if entry.id.startswith(dir+\".\"):\n # because they used \"__name__\"\n entry.id = entry.id[len(dir+\".\"):]\n if entry.id in ids:\n raise RuntimeError, (\"Duplicate id: %s used in %s and %s\" %\n entry.id, ids[entry.id], filename)\n ids[entry.id] = filename\n #print \"registering\", name, entry\n register(entry)\n \n # I wonder why issubclass doesn't work for me like this.\n #if type(entry).__name__ in [ \"classobj\", \"type\" ]:\n # print \"is type/class\", name, entry\n # print issubclass(entry, object)\n # print issubclass(entry, Plugin)\n # print issubclass(entry, InputPlugin)\n\n\n sys.path.pop(-1)", "def _load_modules(self):\n modules_src = os.path.abspath(\"src/modules\")\n\n # perform a tree walk over modules directory\n for file_name, file_path in self._tree_walk(modules_src):\n try:\n # try to find a spec for this file and construct a module\n # from it\n spec = spec_from_file_location(file_name, file_path)\n assert spec is not None\n module = module_from_spec(spec)\n assert spec.loader is not None\n spec.loader.exec_module(module)\n self.modules.append(module)\n self._loaded_modules_names.append(module.__name__)\n except:\n pass", "def build_missing_imports(self) -> None:\n self.undefined -= set(dir(__import__(\"builtins\")))\n\n # Optimisation: we will almost always define sys and pypprint. However, in order for us to\n # get to `import sys`, we'll need to examine our wildcard imports, which in the presence\n # of config, could be slow.\n if \"pypprint\" in self.undefined:\n pypprint_def = (\n inspect.getsource(pypprint) if self.define_pypprint else \"from pyp import pypprint\"\n )\n self.before_tree.body = ast.parse(pypprint_def).body + self.before_tree.body\n self.undefined.remove(\"pypprint\")\n if \"sys\" in self.undefined:\n self.before_tree.body = ast.parse(\"import sys\").body + self.before_tree.body\n self.undefined.remove(\"sys\")\n # Now short circuit if we can\n if not self.undefined:\n return\n\n def get_names_in_module(module: str) -> Any:\n try:\n mod = importlib.import_module(module)\n except ImportError as e:\n raise PypError(\n f\"Config contains wildcard import from {module}, but {module} failed to import\"\n ) from e\n return getattr(mod, \"__all__\", (n for n in dir(mod) if not n.startswith(\"_\")))\n\n subimports = {\"Path\": \"pathlib\", \"pp\": \"pprint\"}\n wildcard_imports = (\n [\"itertools\", \"math\", \"collections\"]\n + self.config.wildcard_imports\n + self.wildcard_imports\n )\n subimports.update(\n {name: module for module in wildcard_imports for name in get_names_in_module(module)}\n )\n\n def get_import_for_name(name: str) -> str:\n if name in subimports:\n return f\"from {subimports[name]} import {name}\"\n return f\"import {name}\"\n\n self.before_tree.body = [\n ast.parse(stmt).body[0] for stmt in sorted(map(get_import_for_name, self.undefined))\n ] + self.before_tree.body", "def load_external_modules(pkg):\n for dep in list(pkg.spec.traverse()):\n external_modules = dep.external_modules or []\n for external_module in external_modules:\n load_module(external_module)", "def get_external_imports(tree: dict,\n only_top_level: bool = True) -> set:\n external_imports = set()\n modules = find_tree(tree, lambda x: x[\"type\"] == \"module\", how=\"all\")\n for module in modules:\n for import_item in module[\"imports\"].values():\n if import_item[\"lookup\"] is None:\n if import_item[\"type\"] == \"import\":\n external_imports.add(import_item[\"name\"])\n elif import_item[\"type\"] == \"from-import\":\n if import_item[\"module\"] is not None:\n external_imports.add(import_item[\"module\"])\n if only_top_level:\n external_imports = {i.partition(\".\")[0] for i in external_imports}\n return external_imports", "def import_submodules(package_name: str) -> None:\n importlib.invalidate_caches()\n\n # Import at top level\n module = importlib.import_module(package_name)\n path = getattr(module, '__path__', [])\n path_string = '' if not path else path[0]\n\n # walk_packages only finds immediate children, so need to recurse.\n for module_finder, name, _ in pkgutil.walk_packages(path):\n # Sometimes when you import third-party libraries that are on your path,\n # `pkgutil.walk_packages` returns those too, so we need to skip them.\n if path_string and module_finder.path != path_string:\n continue\n subpackage = f\"{package_name}.{name}\"\n import_submodules(subpackage)", "def get_all_modules(package):\n base = Path(inspect.getabsfile(package)).parent\n\n for fl in base.glob(\"*.py\"):\n print(f\"loading module {fl}\")\n yield load_module(fl)" ]
[ "0.7020762", "0.6595134", "0.63510954", "0.62685543", "0.6243627", "0.62241906", "0.617775", "0.61071664", "0.61007714", "0.6059917", "0.60515565", "0.6002455", "0.59896797", "0.5956948", "0.595397", "0.5918022", "0.591676", "0.59048396", "0.5900739", "0.587304", "0.5872894", "0.5856108", "0.5846242", "0.58411217", "0.5837457", "0.5815426", "0.5809641", "0.5791443", "0.578571", "0.57705927" ]
0.6882997
1
Reset domain_list, origin_list, caching_list, service_name and flavor_id to its default value.
def reset_defaults(self): self.domain_list = [{"domain": "mywebsite%s.com" % uuid.uuid1()}] self.origin_list = [{"origin": "mywebsite1.com", "port": 443, "ssl": False}] self.caching_list = [{"name": "default", "ttl": 3600}, {"name": "home", "ttl": 1200, "rules": [{"name": "index", "request_url": "/index.htm"}]}] self.service_name = str(uuid.uuid1()) self.flavor_id = self.test_config.default_flavor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self.manager.delete_all()\n for name, val in DEFAULT_SETTINGS.items():\n val['name'] = name\n val['default_value'] = val['value']\n self.manager.from_dict(val)", "def reset(self):\n for var in self.var_list:\n var.value = None\n var.domain = copy.deepcopy(var.init_domain)", "def reset(self):\n self.data = self._defaults", "def reset(self):\n self._url_pattern_mod = None\n self._base_url_pattern = []\n self._senior_url_pattern = {}\n self._domain = []", "def resetStoredDefaults( self ):\n keys= list( self._defDict.keys() )\n data= [ self._defDict[ aKey ] for aKey in keys ]\n \n self.prefObj.save( group= self.prefGroup, name= keys, data= data )\n self.resetSelfWithDefaults()", "def reset(self):\n self.ship_list = self.backup_list", "def resetSelfWithDefaults( self ):\n self.__dict__.update( self._defDict )", "def reset( self ):\n self.conf = self.defaults", "def reset_request_data(context):\n for name, default in default_request_data():\n setattr(context, name, default)", "def reset():\n\n REGISTRY.clear()\n _future_dependencies.clear()\n _future_optionals.clear()", "def reset(cls):\n cls._options = None\n cls._scoped_instances = {}", "def reset(self):\n self._pkgs.clear()\n self._catalogs.clear()\n self._categories.clear()\n self._command_to_category.clear()\n self._version = None", "def reset(self):\n\n self.simple_cache = {}\n self.complex_cache = {}\n self.target_cache = {}", "def reset(self):\n self._unset_defaults_and_overrides()\n self.clear()", "def _fillServiceDefaults(self, args):\n\t\tif self.service.core.hasProperty(\"defaultSortKey\"):\n\t\t\tif \"_DBOPTIONS_ORDER\" not in args:\n\t\t\t\targs[\"_DBOPTIONS_ORDER\"] = self.service.core.getProperty(\n\t\t\t\t\t\"defaultSortKey\").split(\",\")", "def reset_original(self):\n self._original = [] # Empty out self._originals", "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "def _reset_seeds(self) -> None:\n self._seeds = [None for _ in range(self.num_envs)]", "def reset(self):\r\n # TODO: have reset flag such that it forces all the bottom changes\r\n self.pwm_freq = self._default[\"pwm_freq\"]\r\n self.gate_logic = self._default[\"gate_logic\"]\r\n self.max_pwm = self._default[\"max_pwm\"]\r\n self.lase_on_power_up = self._default[\"lase_on_power_up\"]\r\n\r\n self.mode = self._default[\"mode\"]\r\n self.lase = self._default[\"lase\"]\r\n self.percent = self._default[\"percent\"] # in percent\r", "async def reset(self, ctx):\n await self.config.clear_all_guilds()\n await ctx.send(\"Reset all settings to default values.\")", "def reset(self):\n self.det_link_map = OrderedDict()\n self.id_link_map = OrderedDict()\n self.declarations_table = None\n self.annotations_table = None\n self.num_frames = 0\n self.num_frames_by_uid = {}\n self.num_frames_by_uid_pre_remove = {}", "def reset(self):\n self.in_compact_method = False\n self.in_setup = False\n self.autoname_cursor = dict()", "def _reset(self):\n self._set(\"_n_init_features\", None)\n self._set(\"_n_output_features\", None)\n self._set(\"_n_intervals\", None)\n self._set(\"_mapper\", {})\n self._set(\"_cpp_preprocessor\", None)\n self._set(\"_fitted\", False)", "def set_initial_values(self):\n\n pass", "def reset(self) -> None:\n self.val = None\n self.notes = []\n self.blocked = False\n self.forbidden = False", "def reset_state(self):\n for name in self._buffers:\n self._buffers[name] = self._defaults[name]", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()", "def reset(self):\n self._setupObjects()", "def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0", "def clear_lists(self): \n self.fp_config_files = []\n self.txt_files = []\n self.fr_config_files = []" ]
[ "0.6844756", "0.6752497", "0.65349716", "0.6512155", "0.6489538", "0.6362991", "0.614943", "0.6136506", "0.610388", "0.59980386", "0.5987554", "0.59747857", "0.59503067", "0.59085506", "0.59052765", "0.58961785", "0.5888273", "0.5880857", "0.58757615", "0.58709276", "0.5867544", "0.5852483", "0.58454186", "0.5840724", "0.58216256", "0.5813119", "0.5801399", "0.5800693", "0.5784233", "0.5776011" ]
0.8584799
0
Create invalid_json like [[[[[[[[[[[[[test]]]]]]]]]]]]]
def create_invalid_json(self, length): str = "" str += "[" * length str += "\"test\"" str += "]" * length return str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_circular_nested(self):\n obj = {}\n obj[\"list\"] = [{\"obj\": obj}]\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)", "def test_schema_invalid_json(self):\n schema_0_input = schema_nested_2_invalid_JSON\n\n # if you uncomment this line:\n # schema_0_input = schema_nested_2\n # this will fail the test: Failed: DID NOT RAISE <class 'simplejson.scanner.JSONDecodeError'>\n # because this is a valid schema\n\n with pytest.raises(simplejson.scanner.JSONDecodeError):\n msg = singer.parse_message(schema_0_input)", "def test_validation_error_json():\n error = ValidationError(\n type=\"Syntax Error\",\n data={\"data\": [1, 2, 3]},\n )\n\n assert ValidationError(**json.loads(error.json())) == error", "def test_lti20_bad_json(self):\r\n for error_inputs, error_message in self.BAD_JSON_INPUTS:\r\n for einput in error_inputs:\r\n with self.assertRaisesRegexp(LTIError, error_message):\r\n self.xmodule.parse_lti_2_0_result_json(einput)", "def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))", "def test_list_2f(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test9\":4}]')\n self.assertFalse(check_json_array(jdic, jobj))", "def test_save_json_with_invalid_step(temp_dir):\n data = json.dumps({\"k\": \"v\", \"list\": [1, 2, 3]})\n\n with pytest.raises(ValueError):\n save_json(temp_dir, data, step={\"invalid\": \"dict\"})", "def test_empty_json(self):\n json_data = '{ }'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{ }'))", "def invalid_train_item(train_items: List[JSONDict]) -> JSONDict:\n altered = train_items[0]\n altered[\"language\"] = \"engl\"\n altered[\"date\"] = \"02-2031-01\"\n altered[\"url\"] = \"incorrect.com\"\n altered[\"categoryid\"] = None\n return altered", "def test_example_json(self):\n json_data = '{ \"a\": 1, \"b\": true, \"c\": { \"d\": 3, \"e\": \"test\" } }'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened),\n json.loads('{ \"a\": 1, \"b\": true, \"c.d\": 3, \"c.e\": \"test\" }'))", "def test_listf(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[]')\n self.assertFalse(check_json_array(jdic, jobj))", "def test_list_4f(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2']),\n JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test2\":4},{\"test1\":3, \"test23\":4}]')\n self.assertFalse(check_json_array(jdic, jobj))", "def test_embedded_json(self):\n json_data = '{\"a\": {\"b\" : true } }'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a.b\" : true}'))", "def test_list_3(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test2\":4},{\"test1\":3, \"test2\":4}]')\n self.assertFalse(check_json_array(jdic, jobj))", "def test_invalid_json_batch(self):\n req = '[{\"jsonrpc\": \"2.0\", \"method\": \"sum\", \"params\": [1,2,4], \"id\": \"1\"},{\"jsonrpc\": \"2.0\", \"method\"]'\n resp = '{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32700, \"message\": \"ParseError: Parse error\"}, \"id\": null}'\n status = 500\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def test_wrong_train_item(invalid_train_item: JSONDict) -> None:\n\n with pytest.raises(ValidationError) as e:\n invalid = TrainItem(**invalid_train_item) # noqa: F841\n\n assert e.value.errors() == [\n {\n \"loc\": (\"categoryid\",),\n \"msg\": \"none is not an allowed value\",\n \"type\": \"type_error.none.not_allowed\",\n },\n {\n \"loc\": (\"url\",),\n \"msg\": \"invalid or missing URL scheme\",\n \"type\": \"value_error.url.scheme\",\n },\n {\n \"ctx\": {\"limit_value\": 2},\n \"loc\": (\"language\",),\n \"msg\": \"ensure this value has at most 2 characters\",\n \"type\": \"value_error.any_str.max_length\",\n },\n {\n \"loc\": (\"date\",),\n \"msg\": \"Could not validate format '02-2031-01'. Must be YYYY-MM-DD or iso-formatted time stamp\",\n \"type\": \"value_error\",\n },\n ]", "def test_circular_list(self):\n obj = []\n obj.append(obj)\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)", "def test_list_2(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test2\":4}]')\n self.assertTrue(check_json_array(jdic, jobj))", "def invalid_item(upload_items: List[JSONDict]) -> JSONDict:\n altered = upload_items[0]\n altered[\"language\"] = \"engl\"\n altered[\"date\"] = \"02-2031-01\"\n altered[\"url\"] = \"incorrect.com\"\n return altered", "def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))", "def validate_json(self):\n pass", "def test_list(self):\n jobj = JList(parent = 'some', keys = [])\n jdic = json.loads('[]')\n self.assertTrue(check_json_array(jdic, jobj))", "def test_list_4(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2']),\n JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test2\":4},{\"test1\":3, \"test2\":4}]')\n self.assertTrue(check_json_array(jdic, jobj))", "def test_circular_dict(self):\n obj = {}\n obj[\"obj\"] = obj\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)", "def test_invalid_data(self):\n\n json_data = {\n \"input\" : {\n 'version': 'BAD',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : self.configuration\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_explode_json(self):\n self.assertEqual({\"a_1\": [{\"a_2\": 2, \"f_2\": 3, \"g_2\": 1}], \"c_3\": 1}, \\\n comparator.explode_json('{\"a_1\": [{\"a_2\": 2, \"f_2\": 3, \"g_2\": 1}], \"c_3\": 1}'))", "def test_nested_obj(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"b\":2}}')\n self.assertTrue(check_json_object(jdic, jobj))", "def test_invalid_JSON_returns_error(self):\n\n response = self.client.post(\n reverse('transcript:record_telegram'),\n content_type='application/json',\n data='''{\"something\":''')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.content, b\"Could not parse JSON\")\n self.assertEqual(Message.objects.count(), 0)", "def testExtendedErrorMessageWithTree(self):\n\n json_message = current.xml.json_message\n\n msg = json_message(False, 405, message=\"Test\", tree='{\"test\": \"value\"}')\n msg = json.loads(msg)\n self.assertEqual(len(msg), 4)\n self.assertEqual(msg[\"status\"], \"failed\")\n self.assertEqual(msg[\"statuscode\"], \"405\")\n self.assertEqual(msg[\"message\"], \"Test\")\n self.assertTrue(isinstance(msg[\"tree\"], dict))\n tree = msg[\"tree\"]\n self.assertEqual(len(tree), 1)\n self.assertEqual(tree[\"test\"], \"value\")", "def test_wrong_upload_item(invalid_item: JSONDict) -> None:\n\n with pytest.raises(ValidationError) as e:\n invalid = UploadItem(**invalid_item) # noqa: F841\n assert e.value.errors() == [\n {\n \"ctx\": {\"limit_value\": 2},\n \"loc\": (\"language\",),\n \"msg\": \"ensure this value has at most 2 characters\",\n \"type\": \"value_error.any_str.max_length\",\n },\n {\n \"loc\": (\"date\",),\n \"msg\": \"Could not validate format '02-2031-01'. Must be YYYY-MM-DD or iso-formatted time stamp\",\n \"type\": \"value_error\",\n },\n {\n \"loc\": (\"url\",),\n \"msg\": \"invalid or missing URL scheme\",\n \"type\": \"value_error.url.scheme\",\n },\n ]" ]
[ "0.6324935", "0.62803555", "0.6237883", "0.615935", "0.5985762", "0.597395", "0.5954119", "0.5931266", "0.5930603", "0.5907497", "0.5892029", "0.588324", "0.5859376", "0.5838681", "0.582026", "0.5753517", "0.57280266", "0.5629437", "0.56058764", "0.5550728", "0.5540297", "0.5539157", "0.5517435", "0.55112606", "0.55074143", "0.54742163", "0.54673314", "0.5444871", "0.5424676", "0.5413775" ]
0.6898037
0
zip the data using gzip format
def data_zip(self, data): stringio = StringIO.StringIO() gzip_file = gzip.GzipFile(fileobj=stringio, mode='wb') gzip_file.write(data) gzip_file.close() return stringio.getvalue()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __unzip(self, data):\n compressed = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=compressed)\n return gzipper.read()", "def save_to_gzip(data,fname):\n with gzip.open(fname + '.gz', 'wb',compresslevel = 9) as f:\n f.write(data.tobytes())", "def gzip_compress(data):\n s = BytesIO()\n g = gzip.GzipFile(fileobj=s, mode='wb')\n g.write(data)\n g.close()\n return s.getvalue()", "def zip_data(self) -> None:\n zipf = zipfile.ZipFile('output.zip', 'w', zipfile.ZIP_DEFLATED)\n self._zipdir(self.path, zipf)\n zipf.close()", "def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()", "def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()", "def gunzip(data):\n return gzip.GzipFile(fileobj=StringIO(data)).read()", "def gzip_worker(args):\n\tp = multiprocessing.current_process()\n\tprint('Start zipping %s: %s %s' %(args[1], p.name, p.pid))\n\tpath = args[0]\n\tfilename = args[1]\n\tassert os.path.splitext(filename)[1] == '.fastq', '%s is not a fastq file' %filename \n\t\n\tcall = 'gzip -c ' + os.path.join(path, filename) + ' > ' + os.path.join(path, filename) + '.gz'\n\tsubprocess.call(call, shell=True)\n\tprint('Completed zipping %s: %s %s' %(filename, p.name, p.pid))", "def _unzip(self, data):\r\n with io.BytesIO(data) as buf:\r\n with gzip.GzipFile(fileobj=buf) as unzipped:\r\n return unzipped.read()", "def zip_data_file(task_id, task_name, data_path):\n zip_file_dir = os.path.join(FILE_PATH, task_id + \".zip\")\n file = zipfile.ZipFile(zip_file_dir, \"w\", zipfile.ZIP_DEFLATED)\n sample_path = os.path.join(data_path, \"datasets\", str(task_id) + \"_\" + task_name + \".csv\")\n true_dag_path = os.path.join(data_path, \"true\", str(task_id) + \"_\" + task_name + \".npz\")\n file.write(sample_path)\n file.write(true_dag_path)\n file.close()\n return zip_file_dir", "def compress(self, data):\r\n return self.add_chunk(data)", "def compressed_pickle(title, data):\n with bz2.BZ2File(title, 'w') as f:\n cPickle.dump(data, f)", "def zip_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with ZipFile(destination, \"w\") as thezip:\n thezip.write(self.file)", "def _open_zip(self):\n self.buffer = io.BytesIO()\n self.zf = zipfile.ZipFile(self.buffer, \"w\", zipfile.ZIP_DEFLATED)", "def to_zarr(self, *args, **kwargs):\n if (\n len(args) == 1\n and isinstance(args[0], str)\n and args[0].endswith(\".zarr.zip\")\n ):\n if {\"compression\", \"mode\"}.issuperset(kwargs.keys()):\n import zarr\n\n with zarr.ZipStore(args[0], **kwargs) as store:\n self.to_zarr(store)\n return\n return super().to_zarr(*args, **kwargs)", "def _compress_content(self, content):\n zbuf = io.BytesIO()\n zfile = gzip.GzipFile(mode=\"wb\", compresslevel=9, fileobj=zbuf)\n\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n\n content.file = zbuf\n content.seek(0)\n\n return content", "def zipFasta(self):\n utils.log(\"zipping {} ...\".format(self.fastaFileName))\n cmd = \"bgzip -f {}\".format(self.fastaFileName)\n utils.runCommand(cmd)", "def make_zip(self):\n shutil.make_archive(self.name, 'zip', self.name)", "def unzip_data():\n zip_ref = zipfile.ZipFile(data_zip, 'r')\n zip_ref.extractall('')\n zip_ref.close()", "def zipstreams(filename):\r\n with open(filename, 'rb') as fh:\r\n data = fh.read()\r\n i = 0\r\n while i < len(data):\r\n try:\r\n zo = zlib.decompressobj()\r\n yield i, zo.decompress(data[i:])\r\n i += len(data[i:]) - len(zo.unused_data)\r\n except zlib.error:\r\n i += 1", "def _gzipencode(content):\n import gzip\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)\n f.write(content)\n f.close()\n return out.getvalue()", "def zip_(input_file, output_file, chunk_size, mode):\n output_file = validator.validate_zip(input_file, output_file)\n process = subprocess.Popen([PBWT_BIN, 'zip', input_file, output_file,\n str(chunk_size), mode], stdout=subprocess.PIPE)\n process_results(str(process.communicate()[0]), input_file, output_file)", "def gzdeflate():\n return zlib.compress(val)", "def prepare_gz(self, filename, *args, **kwargs):\n\n return '/vsigzip/' + filename, args, kwargs", "def _compress_string(content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(content)\n zfile.close()\n return zbuf.getvalue()", "def compress_data():\r\n os.chdir(PATH_CSV)\r\n z = zipfile.ZipFile(SITE_NAME + \"_\" + DATE + \"_csv.zip\", \"a\")\r\n z.write(SITE_NAME + \"_\" + DATE + \".csv\")\r\n os.remove(SITE_NAME + \"_\" + DATE + \".csv\")\r\n\r\n os.chdir(PATH_HTML)\r\n z = zipfile.ZipFile(SITE_NAME + \"_\" + DATE + \"_html.zip\", \"a\")\r\n for file in glob.glob(\"*.html\"):\r\n z.write(file)\r\n os.remove(file)", "def compress_stream(src, dst):\n with gzip.GzipFile(fileobj=dst, mode='wb') as gz:\n for block in iterfile(src):\n gz.write(block)", "def tar_gz_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with tarfile_open(destination, \"w:gz\") as tar:\n tar.add(self.file)", "def gunzip(data):\n f = GzipFile(fileobj=BytesIO(data))\n output_list = []\n chunk = b'.'\n while chunk:\n try:\n chunk = read1(f, 8196)\n output_list.append(chunk)\n except (IOError, EOFError, struct.error):\n if output_list or getattr(f, 'extrabuf', None):\n try:\n output_list.append(f.extrabuf[-f.extrasize:])\n finally:\n break\n else:\n raise\n return b''.join(output_list)", "def zip_files(dict_files, compression=zipfile.ZIP_DEFLATED):\n in_memory = StringIO()\n\n with zipfile.ZipFile(in_memory, 'w', compression) as zf:\n for fname, fp in dict_files.iteritems():\n zf.writestr(fname, fp.read())\n\n zf.close()\n\n in_memory.seek(0)\n\n return in_memory" ]
[ "0.6996331", "0.67672616", "0.67579013", "0.6729529", "0.65639186", "0.65639186", "0.6514379", "0.63974094", "0.6345399", "0.61934805", "0.61905354", "0.61457014", "0.61027217", "0.6101483", "0.6070386", "0.60639983", "0.6051844", "0.604895", "0.6003619", "0.5999707", "0.5944511", "0.5940555", "0.59330684", "0.58793175", "0.5871851", "0.5867225", "0.58218354", "0.5812585", "0.57811445", "0.5739871" ]
0.7971304
0
Check whether it is possible to kill the application by creating a big malicious json blob.
def test_malicious_json_create_service(self): # create a payload with malicous json blob attack_string = self.create_malicious_json(900) headers = {"X-Auth-Token": self.client.auth_token, "X-Project-Id": self.client.project_id} kwargs = {"headers": headers, "data": attack_string} resp = self.client.create_service(service_name=self.service_name, domain_list=self.domain_list, origin_list=self.origin_list, caching_list=self.caching_list, flavor_id=self.flavor_id, requestslib_kwargs=kwargs) if 'location' in resp.headers: self.service_url = resp.headers['location'] else: self.service_url = '' self.assertTrue(resp.status_code < 503)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _metadata_too_large(self):\n # currently the entire POST JSON request body is limited by default to 100kb\n return sys.getsizeof(self.metadata) > 10000", "def can_process(dict_data: dict) -> bool:\n return dict_data[\"experiment\"] in [_023_EXPERIMENT]", "def is_private(self, info):\n result = True\n seconds = 5\n\n file_name = \"test.flv\"\n proc = self.run_rtmpdump(info, file_name, extra_arg=\"-B \" + str(seconds))\n proc.wait()\n\n if os.path.isfile(file_name):\n if os.path.getsize(file_name) > 0:\n result = False\n os.remove(file_name)\n\n return result", "def check_vulnerability(self):\n\t\tpass", "def test_misbehavingBackend(self):\n contentStore = self._store()\n store = contentStore.store\n obj = self._storeObject(\n contentStore=contentStore,\n content='somecontent',\n contentType=u'application/octet-stream')\n contentStore2 = InsaneStore(store=store)\n store.inMemoryPowerUp(contentStore2, IBackendStore)\n f = self.failureResultOf(\n self._verify(contentStore, obj), UnexpectedDigest)\n self.assertEqual(f.value.objectId, obj.objectId)", "def test_malicious_json_utf_8_create_service(self):\n # create a payload with malicous json blob\n attack_string = self.create_malicious_json(800)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id}\n kwargs = {\"headers\": headers, \"data\": attack_string.encode(\"utf-8\")}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def can_process(dict_data: dict) -> bool:\n return dict_data[\"experiment\"] in [\"016s1803_nem\"]", "def other_native_crash(self) -> bool:\n return pulumi.get(self, \"other_native_crash\")", "def other_native_crash(self) -> bool:\n return pulumi.get(self, \"other_native_crash\")", "def GET_kill(self):\n sys.exit(0)", "def should_terminate(self):\n return False", "def test_check_new_data_saved_size_limit_exceeded(self):\n ldump = [[b'\\x01.cafe', 123, 512], [b'\\x00.bbbb', 123, 128], [b'\\x01.babe', 124, 128]]\n self.session.get.side_effect = [pickle.dumps(ldump), True, True, True, True]\n self.sut.init()\n self.sut.track(b'\\x01.ffff', 640)\n ldump.append([b'\\x01.ffff', self.sut.index['keys'][b'\\x01.ffff']['saved_at'], 640])\n ldump = ldump[1:]\n self.loop.run_until_complete(self.sut.check())\n Mock.assert_called_once_with(self.repository.blockchain.remove_block, b'cafe')\n Mock.assert_called_once_with(self.session.put, b'cache_index', pickle.dumps(ldump))", "def is_valid_json(j):\n try:\n json.dumps(j)\n return True\n except json.JSONDecodeError:\n print(\"not valid json\")\n return False", "def test_double_corrupt(pid: int, otId: int) -> bool:\n box_mon = BoxMon()\n box_mon.personality = pid\n box_mon.otId = otId\n box_mon.sub(0).type0.species = 308\n box_mon.sub(0).type0.experience = 2195\n box_mon.sub(0).type0.friendship = 70\n sub1 = box_mon.sub(1).type1\n sub1.moves[0] = 33\n sub1.moves[1] = 253\n sub1.moves[2] = 185\n sub1.pp[0] = 35\n sub1.pp[1] = 10\n sub1.pp[2] = 20\n sub2 = box_mon.sub(2).type2\n sub2.attackEV = 22\n sub2.hpEV = 8\n sub3 = box_mon.sub(3).type3\n sub3.metLocation = 28\n sub3.metLevel = 14\n sub3.metGame = 3\n sub3.pokeBall = 2\n sub3.otGender = 1\n sub3.unk = 977594907\n box_mon.checksum = box_mon.calc_checksum()\n sum1 = box_mon.checksum\n box_mon.encrypt()\n box_mon.personality |= 0x40000000\n box_mon.decrypt()\n sum2 = box_mon.calc_checksum()\n box_mon.encrypt()\n box_mon.otId |= 0x40000000\n box_mon.decrypt()\n sum3 = box_mon.calc_checksum()\n if sum1 == sum2 == sum3 and box_mon.sub(3).type3.isEgg == 0:\n box_mon.encrypt()\n return True\n return False", "def abort(self):\n if self.process:\n self.process.kill()\n return True\n else:\n return False", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def kill(self, sig):\n\n # we parse the signal at the client level to reduce the time we pass\n # in the server.\n signum = parse_signal_value(sig)\n\n body = json.dumps({\"signal\": signum})\n self.server.request(\"post\", \"/jobs/%s/%s/signal\" % (self.sessionid,\n self.name), body=body)\n return True", "def phone_kill(self) -> None:", "def test_malicious_json_utf_16_create_service(self):\n # create a payload with malicous json blob\n attack_string = self.create_malicious_json(400)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id}\n kwargs = {\"headers\": headers, \"data\": attack_string.encode(\"utf-16\")}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def abort(self):\n\n if self.process:\n self.process.kill()\n return True\n else:\n return False", "def memory_limit_exceeded_helper(self, command, executor_type):\n job_uuid, resp = util.submit_job(self.cook_url, command=command, executor=executor_type, mem=128)\n try:\n self.assertEqual(201, resp.status_code, msg=resp.content)\n job = util.wait_for_job(self.cook_url, job_uuid, 'completed')\n job_details = f\"Job details: {json.dumps(job, sort_keys=True)}\"\n self.assertEqual('failed', job['state'], job_details)\n self.assertEqual(1, len(job['instances']), job_details)\n instance = job['instances'][0]\n instance_details = json.dumps(instance, sort_keys=True)\n # did the job fail as expected?\n self.assertEqual(executor_type, instance['executor'], instance_details)\n self.assertEqual('failed', instance['status'], instance_details)\n # Mesos chooses to kill the task (exit code 137) or kill the executor with a memory limit exceeded message\n if 2002 == instance['reason_code']:\n self.assertEqual('Container memory limit exceeded', instance['reason_string'], instance_details)\n elif 99003 == instance['reason_code']:\n # If the command was killed, it will have exited with 137 (Fatal error signal of 128 + SIGKILL)\n self.assertEqual('Command exited non-zero', instance['reason_string'], instance_details)\n if executor_type == 'cook':\n self.assertEqual(137, instance['exit_code'], instance_details)\n else:\n self.fail('Unknown reason code {}, details {}'.format(instance['reason_code'], instance_details))\n finally:\n util.kill_jobs(self.cook_url, [job_uuid])", "def kill(self):\r\n # get current application\r\n currentApplication = self.phone.uiState.getCurrentApplication()\r\n\r\n if not currentApplication in self.__appsThatCantBeKilled:\r\n self.phone.comment('exit.kill()')\r\n self.phone.sx(self.__killCommand)\r\n self.phone.delay(300, False)\r\n self.phone.uiState.getCurrentState(True)\r\n else:\r\n self.phone.warn('Not allowed to kill \"%s\" application using SX' % currentApplication)", "def test_block_unknown_processes(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\"],\n \"inputs\": {\n \"stringInput\": \"string\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\"dockerPull\": \"python:3.7-alpine\"},\n \"InlineJavascriptRequirement\": {},\n \"ResourceRequirement\": {\"ramMin\": 10240, \"coresMin\": 3}\n\n },\n \"outputs\": [],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n resp = mocked_sub_requests(self.app, \"post_json\", \"/processes\", data=body, timeout=5,\n headers=self.json_headers, only_local=True, expect_errors=True)\n assert resp.status_code == 422", "def test_malicious_json_gzip_create_service(self):\n # create a payload with malicious json blob\n attack_string = self.create_malicious_json(2500)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id,\n \"Content-Encoding\": \"gzip\"}\n kwargs = {\"headers\": headers, \"data\": self.data_zip(attack_string)}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "async def crash(request):\n os._exit(1)", "def verify_payload():\n return True", "def kill(self):\n if self.client is None:\n # never started, can't stop - should be warning or exception?\n return False\n try:\n self.client.kill()\n except Py4JError:\n logger.debug(\"Error while attempting to kill\", exc_info=1)\n # fallback\n self.yarn_api.kill(self.app_id)\n if self.proc is not None:\n self.client_gateway.shutdown()\n if on_windows:\n call([\"cmd\", \"/c\", \"taskkill\", \"/f\", \"/t\", \"/pid\",\n str(self.proc.pid)])\n self.proc.terminate()\n self.proc.communicate()\n self.proc = None\n self.client = None\n out = self.runtime_status() == 'KILLED'\n return out", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def test_flag_aborted(self):\n container_dir = os.path.join(self.root, 'apps', 'proid.myapp#001',\n 'data')\n fs.mkdir_safe(container_dir)\n\n app_abort.flag_aborted(container_dir,\n why=app_abort.AbortedReason.INVALID_TYPE,\n payload='test')\n\n aborted_file = os.path.join(container_dir, 'aborted')\n with io.open(aborted_file) as f:\n aborted = json.load(f)\n\n self.assertEqual('invalid_type', aborted.get('why'))\n self.assertEqual('test', aborted.get('payload'))" ]
[ "0.53570426", "0.5039716", "0.498842", "0.4982407", "0.4946164", "0.49423128", "0.49222663", "0.49024215", "0.49024215", "0.48760965", "0.48653737", "0.48636398", "0.4819188", "0.48098838", "0.48077267", "0.48038307", "0.48038307", "0.4798679", "0.47941115", "0.47817224", "0.47712976", "0.47555813", "0.4738558", "0.47132558", "0.47081518", "0.46925682", "0.46910977", "0.46901897", "0.4687121", "0.46866304" ]
0.5133456
1
Check whether it is possible to kill the application by creating a big malicious json blob with utf8 encoding.
def test_malicious_json_utf_8_create_service(self): # create a payload with malicous json blob attack_string = self.create_malicious_json(800) headers = {"X-Auth-Token": self.client.auth_token, "X-Project-Id": self.client.project_id} kwargs = {"headers": headers, "data": attack_string.encode("utf-8")} resp = self.client.create_service(service_name=self.service_name, domain_list=self.domain_list, origin_list=self.origin_list, caching_list=self.caching_list, flavor_id=self.flavor_id, requestslib_kwargs=kwargs) if 'location' in resp.headers: self.service_url = resp.headers['location'] else: self.service_url = '' self.assertTrue(resp.status_code < 503)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bad_encoding(self, app, data_queues):\n body = b'{\"comment\": \"R\\xe9sum\\xe9 from 1990\", \"items\": []}'\n assert \"Résumé\" in body.decode(\"iso8859-1\")\n with pytest.raises(UnicodeDecodeError):\n body.decode(\"utf-8\")\n headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n res = self._call(app, body=body, headers=headers, method=\"post\", status=400)\n detail = (\n \"'utf-8' codec can't decode byte 0xe9 in position 14: invalid\"\n \" continuation byte\"\n )\n self.check_response(data_queues, res, \"parse_error\", details={\"decode\": detail})", "def test_malicious_json_utf_16_create_service(self):\n # create a payload with malicous json blob\n attack_string = self.create_malicious_json(400)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id}\n kwargs = {\"headers\": headers, \"data\": attack_string.encode(\"utf-16\")}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def test_unicode_insert_error():\n# In addition, we should use vagrant or azure deployments of the scanner to Ubuntu and Windows virtual machines\n# to ensure cross-platform behavior.\n pass", "def other_native_crash(self) -> bool:\n return pulumi.get(self, \"other_native_crash\")", "def other_native_crash(self) -> bool:\n return pulumi.get(self, \"other_native_crash\")", "def is_valid_json(j):\n try:\n json.dumps(j)\n return True\n except json.JSONDecodeError:\n print(\"not valid json\")\n return False", "def test_invalid_json_dumpling(self):\n with pytest.raises(InvalidDumpling):\n validate_dumpling(\"{'invalid_single_quotes': 'value'}\")", "def _metadata_too_large(self):\n # currently the entire POST JSON request body is limited by default to 100kb\n return sys.getsizeof(self.metadata) > 10000", "def validate_character_update(characterJson : dict) -> bool:\n return jsonChecker.character_details_exist(characterJson, ATTRIBUTE_NAMES)\n #Check for a prexisting character\n #if characterController.find_character(characterJson['data']['charname']) is None:", "def test_malicious_json_create_service(self):\n # create a payload with malicous json blob\n attack_string = self.create_malicious_json(900)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id}\n kwargs = {\"headers\": headers, \"data\": attack_string}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def test_unicode_content(self):\n unicode_value = '\\xf6\\xe4\\xfc\\xd6\\xc4\\xdc\\xdf\\u20ac'\n con = sqlite.connect(\":memory:\")\n traced_statements = []\n def trace(statement):\n traced_statements.append(statement)\n con.set_trace_callback(trace)\n con.execute(\"create table foo(x)\")\n con.execute(\"insert into foo(x) values ('%s')\" % unicode_value)\n con.commit()\n self.assertTrue(any(unicode_value in stmt for stmt in traced_statements),\n \"Unicode data %s garbled in trace callback: %s\"\n % (ascii(unicode_value), ', '.join(map(ascii, traced_statements))))", "def _check_json(json_data: Any, clean: bool) -> Any:\n try:\n json.loads(json_data)\n except ValueError:\n return \"unknown\" if clean else False\n return \"success\" if clean else True", "async def crash(request):\n os._exit(1)", "def test_flag_aborted(self):\n container_dir = os.path.join(self.root, 'apps', 'proid.myapp#001',\n 'data')\n fs.mkdir_safe(container_dir)\n\n app_abort.flag_aborted(container_dir,\n why=app_abort.AbortedReason.INVALID_TYPE,\n payload='test')\n\n aborted_file = os.path.join(container_dir, 'aborted')\n with io.open(aborted_file) as f:\n aborted = json.load(f)\n\n self.assertEqual('invalid_type', aborted.get('why'))\n self.assertEqual('test', aborted.get('payload'))", "def test_malicious_json_gzip_create_service(self):\n # create a payload with malicious json blob\n attack_string = self.create_malicious_json(2500)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id,\n \"Content-Encoding\": \"gzip\"}\n kwargs = {\"headers\": headers, \"data\": self.data_zip(attack_string)}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def test_py2_application_exception_message_bytes_utf8_encoding_non_english():\n try:\n\n # Bytes literal with non-ascii compatible characters only allowed in\n # python 2\n\n raise ValueError('I💜🐍')\n except ValueError:\n app = application()\n notice_error(application=app)", "def test_bad_dbobjs():\n conf = load_yaml(\n \"\"\"\\\ndb_objects:\n - null\n - \"ouch\"\n - true\n - 1\n - 1.1\n - []\n\"\"\"\n )\n errors = get_config_errors(conf)\n assert len(errors) == 6\n for i, error in enumerate(errors):\n assert \"<unicode string>:%s\" % (i + 2) in error", "def ignore_silently(self):\n return self.fault_code in (17, 33, 48, 49)", "def _has_non_ascii_characters(data_string):\r\n try:\r\n data_string.encode('ascii')\r\n except UnicodeEncodeError:\r\n return True\r\n\r\n return False", "def other_native_crash(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"other_native_crash\")", "def other_native_crash(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"other_native_crash\")", "def _chinese(source):\n return json.dumps(source, ensure_ascii=False)", "def VulnerableBy(self):\n\n print TextColor.WARNING + '[!] Please wait for check the site for vulnerability ...'\n print\n\n with open(\"./WebAttack/sqlinjection/payloads/InjectionChars.json\") as jsonFile:\n payloads = json.load(jsonFile)\n\n for item in payloads[\"defInjectionChars\"]:\n\n resposne = requests.get(url=self.url + str(item), headers=define_headerdata, verify=False)\n result = self.__CheckVulnerability__(content=resposne.text)\n\n if result != 'none':\n print TextColor.RED + '\\t[+] %s ' % (self.url + str(item)) + \" => vulnerable\" + TextColor.WHITE\n return {'database': result, 'char': item}\n else:\n print TextColor.GREEN + '\\t[+] %s ' % (self.url + str(item)) + \" => clear\" + TextColor.WHITE\n continue\n\n sleep(.5)", "def test_unsuccessful_post_answer_with_special_characters(self):\n self.is_authenticated(self.user1)\n response = self.post_answer_with_special_character()\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_unicode_data(httpbin):\n resp = requests.post(\n httpbin + \"/post\",\n data=\"оживлённым\".encode(),\n headers={\n \"content-type\": \"text/html; charset=utf-8\",\n },\n )\n assert resp.json()[\"data\"] == \"оживлённым\"", "def test_ident_not_utf_8_decoded(clientstack):\n _, client = clientstack\n wrong_identifier = b'\\xd1 \\xf8\\x16\\x9a]~~\\x14\\x94CF\\xc1\\x89n\\xd5\\tL\\x1b\\xe8J+\\xa5\\xbe\\x17\\xf6\\xe6J@\\xa1\\xd0#'\n\n msg = b'{\"msg\": \"msg\"}'\n assert not client.rxMsgs\n assert not client._verifyAndAppend(msg, wrong_identifier)\n assert not client.rxMsgs", "def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True", "def is_jsonable(self, input):\n try:\n json.dumps(input)\n return True\n except (TypeError, OverflowError):\n return False", "def unreliability(flag):\n test_str = \"unr3l14b13 p4ck3t!!!!! !!!~\\n\"\n server = start_server()\n client = start_client(flags=[flag, \"100\"])\n\n write_to(client, test_str)\n time.sleep(TEST_TIMEOUT)\n return read_from(server) == test_str" ]
[ "0.57005584", "0.5268216", "0.5156061", "0.51394224", "0.5138233", "0.5138233", "0.50800556", "0.5001988", "0.49770227", "0.4972004", "0.48682755", "0.4838062", "0.48076913", "0.48053455", "0.47852117", "0.47045177", "0.47016084", "0.4695853", "0.46840426", "0.46781862", "0.46764597", "0.46764597", "0.46738508", "0.4671558", "0.46687603", "0.466433", "0.4660157", "0.46584058", "0.46562707", "0.4641073" ]
0.54639024
1
Check whether it is possible to kill the application by creating a big malicious json blob with utf16 encoding.
def test_malicious_json_utf_16_create_service(self): # create a payload with malicous json blob attack_string = self.create_malicious_json(400) headers = {"X-Auth-Token": self.client.auth_token, "X-Project-Id": self.client.project_id} kwargs = {"headers": headers, "data": attack_string.encode("utf-16")} resp = self.client.create_service(service_name=self.service_name, domain_list=self.domain_list, origin_list=self.origin_list, caching_list=self.caching_list, flavor_id=self.flavor_id, requestslib_kwargs=kwargs) if 'location' in resp.headers: self.service_url = resp.headers['location'] else: self.service_url = '' self.assertTrue(resp.status_code < 503)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bad_encoding(self, app, data_queues):\n body = b'{\"comment\": \"R\\xe9sum\\xe9 from 1990\", \"items\": []}'\n assert \"Résumé\" in body.decode(\"iso8859-1\")\n with pytest.raises(UnicodeDecodeError):\n body.decode(\"utf-8\")\n headers = {\"Content-Type\": \"application/json; charset=utf-8\"}\n res = self._call(app, body=body, headers=headers, method=\"post\", status=400)\n detail = (\n \"'utf-8' codec can't decode byte 0xe9 in position 14: invalid\"\n \" continuation byte\"\n )\n self.check_response(data_queues, res, \"parse_error\", details={\"decode\": detail})", "def test_malicious_json_utf_8_create_service(self):\n # create a payload with malicous json blob\n attack_string = self.create_malicious_json(800)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id}\n kwargs = {\"headers\": headers, \"data\": attack_string.encode(\"utf-8\")}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def validate_character_update(characterJson : dict) -> bool:\n return jsonChecker.character_details_exist(characterJson, ATTRIBUTE_NAMES)\n #Check for a prexisting character\n #if characterController.find_character(characterJson['data']['charname']) is None:", "def other_native_crash(self) -> bool:\n return pulumi.get(self, \"other_native_crash\")", "def other_native_crash(self) -> bool:\n return pulumi.get(self, \"other_native_crash\")", "def is_valid_json(j):\n try:\n json.dumps(j)\n return True\n except json.JSONDecodeError:\n print(\"not valid json\")\n return False", "def _metadata_too_large(self):\n # currently the entire POST JSON request body is limited by default to 100kb\n return sys.getsizeof(self.metadata) > 10000", "def _check_json(json_data: Any, clean: bool) -> Any:\n try:\n json.loads(json_data)\n except ValueError:\n return \"unknown\" if clean else False\n return \"success\" if clean else True", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def test_invalid_json_dumpling(self):\n with pytest.raises(InvalidDumpling):\n validate_dumpling(\"{'invalid_single_quotes': 'value'}\")", "def test_unicode_insert_error():\n# In addition, we should use vagrant or azure deployments of the scanner to Ubuntu and Windows virtual machines\n# to ensure cross-platform behavior.\n pass", "def test_malicious_json_create_service(self):\n # create a payload with malicous json blob\n attack_string = self.create_malicious_json(900)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id}\n kwargs = {\"headers\": headers, \"data\": attack_string}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def ignore_silently(self):\n return self.fault_code in (17, 33, 48, 49)", "def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True", "def check_vulnerability(self):\n\t\tpass", "def test_exceptionGreaterThan64kEncoded(self) -> None:\n # The exception text itself is not greater than 64k but SNOWMAN\n # encodes to 3 bytes with UTF-8 so the length of the UTF-8 encoding of\n # the string representation of this exception will be greater than 2\n # ** 16.\n raise Exception(\"\\N{SNOWMAN}\" * 2 ** 15)", "def test_ident_not_utf_8_decoded(clientstack):\n _, client = clientstack\n wrong_identifier = b'\\xd1 \\xf8\\x16\\x9a]~~\\x14\\x94CF\\xc1\\x89n\\xd5\\tL\\x1b\\xe8J+\\xa5\\xbe\\x17\\xf6\\xe6J@\\xa1\\xd0#'\n\n msg = b'{\"msg\": \"msg\"}'\n assert not client.rxMsgs\n assert not client._verifyAndAppend(msg, wrong_identifier)\n assert not client.rxMsgs", "def process_tweet(data):\n decoded = json.loads(data)\n\n twt_text = decoded['text']\n # Do some stuff with the json data if tweet is a closure for specific bridge\n if bridgeName in twt_text and \"closed\" in twt_text:\n \t# Send and email or turn on a light or something\n \tprint \"BRIDGE CLOSED!!! Eat another piece of toast, pet the cats\"\n \tsendAlert(data=decoded, email=to_emailAddress)\n\n \t# temporarily limit the number of calls to one\n \tsys.exit('had a bridge event, shutting down')\n return True", "def can_process(dict_data: dict) -> bool:\n return dict_data[\"experiment\"] in [\"016s1803_nem\"]", "def VulnerableBy(self):\n\n print TextColor.WARNING + '[!] Please wait for check the site for vulnerability ...'\n print\n\n with open(\"./WebAttack/sqlinjection/payloads/InjectionChars.json\") as jsonFile:\n payloads = json.load(jsonFile)\n\n for item in payloads[\"defInjectionChars\"]:\n\n resposne = requests.get(url=self.url + str(item), headers=define_headerdata, verify=False)\n result = self.__CheckVulnerability__(content=resposne.text)\n\n if result != 'none':\n print TextColor.RED + '\\t[+] %s ' % (self.url + str(item)) + \" => vulnerable\" + TextColor.WHITE\n return {'database': result, 'char': item}\n else:\n print TextColor.GREEN + '\\t[+] %s ' % (self.url + str(item)) + \" => clear\" + TextColor.WHITE\n continue\n\n sleep(.5)", "def can_process(dict_data: dict) -> bool:\n return dict_data[\"experiment\"] in [_023_EXPERIMENT]", "def isResp(obxDict):\n readingCode = getReadingCode(obxDict)\n return readingCode == '76270-8'", "def valid_for(obj):\n\n if not obj.filedata:\n return False\n\n #hexstring = \"cffaedfe07000001030000800200\"\n return True", "def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True", "def unreliability(flag):\n test_str = \"unr3l14b13 p4ck3t!!!!! !!!~\\n\"\n server = start_server()\n client = start_client(flags=[flag, \"100\"])\n\n write_to(client, test_str)\n time.sleep(TEST_TIMEOUT)\n return read_from(server) == test_str", "def test_is_bip69_0a6a357e(self):\n self.assertFalse(bip69.is_bip69(self.tx_json_0a6a357e))", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def test_malicious_json_gzip_create_service(self):\n # create a payload with malicious json blob\n attack_string = self.create_malicious_json(2500)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id,\n \"Content-Encoding\": \"gzip\"}\n kwargs = {\"headers\": headers, \"data\": self.data_zip(attack_string)}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def unknown(self, w):\n return True", "def handles(self, data: ByteString) -> Optional[bool]:" ]
[ "0.5257188", "0.5233604", "0.50461715", "0.5023485", "0.5023485", "0.4973716", "0.48788738", "0.4845623", "0.48314884", "0.48003462", "0.47874525", "0.47507095", "0.4736719", "0.4718207", "0.47097185", "0.47041085", "0.46827826", "0.46773538", "0.46709177", "0.4652283", "0.46494344", "0.46417087", "0.46191272", "0.46148336", "0.46059072", "0.45934403", "0.45926464", "0.45905563", "0.45866323", "0.45637748" ]
0.5806415
0
Check whether it is possible to kill the application by creating a big malicious json blob with gzip.
def test_malicious_json_gzip_create_service(self): # create a payload with malicious json blob attack_string = self.create_malicious_json(2500) headers = {"X-Auth-Token": self.client.auth_token, "X-Project-Id": self.client.project_id, "Content-Encoding": "gzip"} kwargs = {"headers": headers, "data": self.data_zip(attack_string)} resp = self.client.create_service(service_name=self.service_name, domain_list=self.domain_list, origin_list=self.origin_list, caching_list=self.caching_list, flavor_id=self.flavor_id, requestslib_kwargs=kwargs) if 'location' in resp.headers: self.service_url = resp.headers['location'] else: self.service_url = '' self.assertTrue(resp.status_code < 503)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gzip(handler,config):\r\n if not config.gzip:\r\n return False\r\n if not gzip_support:\r\n return False\r\n accept_encoding = handler.headers.get('accept-encoding','').split(',')\r\n accept_encoding = [ x.strip() for x in accept_encoding ]\r\n ctype = handler.resp_headers[\"Content-type\"]\r\n # if gzip is supported by the user agent,\r\n # and if the option gzip in the configuration file is set, \r\n # and content type is text/ or javascript, \r\n # set Content-Encoding to 'gzip' and return True\r\n if 'gzip' in accept_encoding and \\\r\n ctype and (ctype.startswith('text/') or \r\n ctype=='application/x-javascript'):\r\n return True\r\n return False", "def test_truncated_gzip(self, app, data_queues):\n wifis = WifiShardFactory.build_batch(2)\n query = self.model_query(wifis=wifis)\n\n body = util.encode_gzip(json.dumps(query).encode())[:-2]\n headers = {\"Content-Encoding\": \"gzip\"}\n res = self._call(app, body=body, headers=headers, method=\"post\", status=400)\n detail = (\n \"GZIPDecodeError(\\\"EOFError('Compressed file ended before the\"\n \" end-of-stream marker was reached')\\\")\"\n )\n self.check_response(data_queues, res, \"parse_error\", details={\"decode\": detail})", "def _metadata_too_large(self):\n # currently the entire POST JSON request body is limited by default to 100kb\n return sys.getsizeof(self.metadata) > 10000", "def is_gzip_result(self):\n return self.__aceQLHttpApi.is_gzip_result()", "def test_flag_aborted(self):\n container_dir = os.path.join(self.root, 'apps', 'proid.myapp#001',\n 'data')\n fs.mkdir_safe(container_dir)\n\n app_abort.flag_aborted(container_dir,\n why=app_abort.AbortedReason.INVALID_TYPE,\n payload='test')\n\n aborted_file = os.path.join(container_dir, 'aborted')\n with io.open(aborted_file) as f:\n aborted = json.load(f)\n\n self.assertEqual('invalid_type', aborted.get('why'))\n self.assertEqual('test', aborted.get('payload'))", "def test_gzip(self, app, data_queues, logs):\n wifis = WifiShardFactory.build_batch(2)\n query = self.model_query(wifis=wifis)\n\n body = util.encode_gzip(json.dumps(query).encode())\n headers = {\"Content-Encoding\": \"gzip\"}\n res = self._call(app, body=body, headers=headers, method=\"post\", status=404)\n self.check_response(data_queues, res, \"not_found\")\n assert logs.only_entry[\"wifi_valid\"] == 2", "def is_gzip(fp):\r\n return open(fp, 'rb').read(2) == '\\x1f\\x8b'", "def is_private(self, info):\n result = True\n seconds = 5\n\n file_name = \"test.flv\"\n proc = self.run_rtmpdump(info, file_name, extra_arg=\"-B \" + str(seconds))\n proc.wait()\n\n if os.path.isfile(file_name):\n if os.path.getsize(file_name) > 0:\n result = False\n os.remove(file_name)\n\n return result", "def test_malicious_json_create_service(self):\n # create a payload with malicous json blob\n attack_string = self.create_malicious_json(900)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id}\n kwargs = {\"headers\": headers, \"data\": attack_string}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def test_gzip_file_no_extension(self):\n # Write the data to a file\n temp_file = tempfile.NamedTemporaryFile()\n with gzip.open(temp_file.name, 'wb') as out:\n for item in self.data:\n serialzed = json.dumps(item).encode()\n out.write(serialzed + b'\\n')\n\n # Load from file, ensure it is correct\n actual_data = []\n with JsonlReader(temp_file.name) as f:\n for item in f:\n actual_data.append(item)\n self.assertEqual(self.data, actual_data)", "def test_no_compress_compressed_response(self):\n self.resp[\"Content-Encoding\"] = \"deflate\"\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"deflate\")", "def test_malicious_json_utf_8_create_service(self):\n # create a payload with malicous json blob\n attack_string = self.create_malicious_json(800)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id}\n kwargs = {\"headers\": headers, \"data\": attack_string.encode(\"utf-8\")}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def test_no_compress_incompressible_response(self):\n self.resp.content = self.incompressible_string\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.incompressible_string)\n self.assertIsNone(r.get(\"Content-Encoding\"))", "def test_compress_non_200_response(self):\n self.resp.status_code = 404\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(self.decompress(r.content), self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")", "def check_zlib():\n\n try:\n import zlib\n zlib.compress('Compress this')\n return True\n except Exception as ex:\n LOG.error(str(ex))\n LOG.error('Failed to import zlib module.')\n return False", "def sniff( self, filename ):\r\n try:\r\n json.load( open(filename) )\r\n return True\r\n except Exception:\r\n return False", "def is_gzipped(response):\n ctype = response.headers.get('Content-Type', b'').lower()\n cenc = response.headers.get('Content-Encoding', b'').lower()\n return (_is_gzipped(ctype) or\n (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))", "def test_gzip_page_disabled(self):\n content = self.unique_gzip()\n self.assertViewBehavior(\n {\"gzip_page\": False, \"get\": content},\n headers={\"HTTP_ACCEPT_ENCODING\": \"gzip\"},\n status_code=200,\n content=content,\n headers_exclude=\"Content-Encoding\")", "def is_valid_json(j):\n try:\n json.dumps(j)\n return True\n except json.JSONDecodeError:\n print(\"not valid json\")\n return False", "def is_garbage(path):\n bn = path.basename\n return bn.startswith(garbage_prefix)", "def test_malicious_json_utf_16_create_service(self):\n # create a payload with malicous json blob\n attack_string = self.create_malicious_json(400)\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": self.client.project_id}\n kwargs = {\"headers\": headers, \"data\": attack_string.encode(\"utf-16\")}\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n self.assertTrue(resp.status_code < 503)", "def test_random_bytes(self):\n with mock.patch(\n \"django.utils.text.secrets.randbelow\", autospec=True, return_value=3\n ):\n r = GZipMiddleware(self.get_response)(self.req)\n # The fourth byte of a gzip stream contains flags.\n self.assertEqual(r.content[3], gzip.FNAME)\n # A 3 byte filename \"aaa\" and a null byte are added.\n self.assertEqual(r.content[10:14], b\"aaa\\x00\")\n self.assertEqual(self.decompress(r.content), self.compressible_string)", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def clean_gzip():\n this_dir = os.getcwd()\n os.chdir(\"/data/COHERENT2/data/CrystalChar/raw\")\n all_files = glob.glob(\"./**\", recursive=True)\n for f in all_files:\n if \".gz\" in f and \"tar\" not in f:\n print(f)\n sh(\"gunzip \" + f)\n os.chdir(this_dir)", "def test_execute_get_success_with_gzip():\n response_queue = run_get(\n TestData.RECEPTOR_CONFIG,\n json.dumps(TestData.JOB_TEMPLATE_PAYLOAD_SINGLE_PAGE_GZIPPED),\n TestData.JOB_TEMPLATE_RESPONSE,\n )\n result = response_queue.get()\n response = ast.literal_eval(gzip.decompress(result).decode(\"utf-8\"))\n validate_get_response(\n response,\n 200,\n TestData.JOB_TEMPLATE_COUNT,\n [TestData.JOB_TEMPLATE_1, TestData.JOB_TEMPLATE_2],\n )", "def test_gzip_page(self):\n content = self.unique_gzip()\n self.assertViewBehavior(\n {\"get\": content},\n headers={\"HTTP_ACCEPT_ENCODING\": \"gzip\"},\n status_code=200,\n content=self.compress(content),\n headers_exact={\"Content-Encoding\": \"gzip\"})", "def test_compress_response(self):\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(self.decompress(r.content), self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertEqual(r.get(\"Content-Length\"), str(len(r.content)))", "def is_gzipped(infile):\n logger = logging.getLogger(__name__)\n\n magic_number = b'\\x1f\\x8b'\n f = open(infile, 'rb')\n with f:\n try:\n assert f.read(2) == magic_number\n except AssertionError as e:\n logger.info(f'{infile} is not gzipped')\n return False\n else:\n logger.debug(f'{infile} is gzipped')\n return True", "def test_no_compress_short_response(self):\n self.resp.content = self.short_string\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.short_string)\n self.assertIsNone(r.get(\"Content-Encoding\"))", "def test_vuln_risklist_gzip(self):\n client = ConnectApiClient()\n resp = client.get_vulnerability_risklist(gzip=True)\n buf = io.BytesIO()\n for itr in resp.iter_content(chunk_size=1024):\n buf.write(itr)\n buf.seek(0)\n self.assertGreater(len(buf.read()), 1000)\n buf.close()" ]
[ "0.53313565", "0.5310692", "0.5241604", "0.51391083", "0.50379765", "0.5004744", "0.50042313", "0.4998042", "0.4920025", "0.49113283", "0.49000984", "0.48996288", "0.48760656", "0.48497862", "0.4841596", "0.48019674", "0.48009473", "0.47862568", "0.47672084", "0.47561482", "0.47060004", "0.46862632", "0.4683594", "0.46622396", "0.46601045", "0.46528032", "0.46497437", "0.46423885", "0.46415672", "0.46232736" ]
0.58686054
0
Check whether it is possible to kill the application by creating a service with huge list of domains.
def test_dos_create_service_domain_list(self): # create a huge list of domain self.reset_defaults() for k in range(1, 30000): self.domain_list.append({"domain": "w.t%s.com" % k}) # send MAX_ATTEMPTS requests for k in range(1, self.MAX_ATTEMPTS): self.service_name = str(uuid.uuid1()) self.check_one_request()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dos_list_service_huge_junk(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"junk\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def test_dos_list_service_huge_limit(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"limit\": attack_string, \"marker\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def test_dontStartPrivilegedService(self):\n ports = self._privilegedStartService(self.highPortNumber)\n self.assertEqual(ports, [])", "def killAll(controller=False):", "def killMongosProc():\n cmd = [\"pgrep -f \\\"\" + MONGOS_KSTR + \"\\\" | xargs kill -9\"]\n executeCommand(cmd)", "def validate_openvpn_pid(result):\n for ps in result:\n if 'openvpn --daemon' in ps:\n print 'OpenVPN Process - OK'\n return True\n print 'OpenVPN Process - DOWN'\n return False", "def test_dos_create_service_origin_list(self):\n # create a huge list of domain\n self.reset_defaults()\n for k in range(1, 9000):\n self.origin_list.append({\"origin\": \"m%s.com\" % k,\n \"port\": 443,\n \"ssl\": False,\n \"rules\": [{\"request_url\": \"/i.htm\",\n \"name\": \"i\"}]})\n\n # send MAX_ATTEMPTS requests\n for k in range(1, self.MAX_ATTEMPTS):\n self.service_name = str(uuid.uuid1())\n self.check_one_request()", "def kill_yelp():\n\n global yelp_process\n if not yelp_process:\n return False\n\n log.debug(\"killing yelp\")\n yelp_process.kill()\n yelp_process = None\n return True", "def terminate_services(self, services):\n services = self._filter_cid(services)\n for service in services:\n ctr = self.check_service_running(service,\n raise_on=['terminated'])\n logger.info(\"Stopping and \"\n \"removing docker instance : %s\" % service)\n self.driver.stop_container(ctr['Id'], remove=True)\n if service not in self._dirty_service:\n self._dirty_service[service] = {\"ctr\": ctr,\n \"terminated\": True}\n else:\n self._dirty_service[service][\"terminated\"] = True\n return services", "def lysis(self) :\n self.kill()\n return True", "def destroy_vm(args):\n libvirtConn = libvirt.open(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n dom = None\n try:\n dom = libvirtConn.lookupByName(args.domain)\n if dom is None:\n print(\"Domain %s does not exist.\" % args.domain, file=sys.stderr)\n return 1\n except Exception as e:\n print(\"Failed looking for Domain %s: %s\" % (args.domain, str(e)))\n return 1\n # from here , domain exists\n if dom.isActive():\n if not args.stop:\n _logger.error(\"Domain %s is running. Only domains that are not running can be destroyed.\", args.domain)\n libvirtConn.close()\n return 1\n\n shut_res = 0\n _logger.debug('Domain is running, stop it first with force ? %s', args.force)\n if args.force:\n shut_res = dom.destroyFlags()\n else:\n shut_res = dom.destroyFlags(libvirt.VIR_DOMAIN_DESTROY_GRACEFUL)\n\n libvirtConn.close()\n\n if shut_res != 0:\n _logger.error(\"Failed to stop domain\")\n return 1\n\n return oci_utils.kvm.virt.destroy(args.domain, args.destroy_disks)", "def killVPN():\n try:\n vpnProcesses = subprocess.check_output(\"ps -A | grep vpn\", shell=True).split(\"\\n\")\n except subprocess.CalledProcessError:\n return True\n log.info(\"[i] ps returned: %s\" % repr(vpnProcesses))\n vpnProcesses.remove(\"\")\n for proc in vpnProcesses:\n id = proc.split(\" \")[1]\n log.info(\"[i] Killing vpn process (%s)\" % id)\n subprocess.Popen(\"sudo kill -9 %s\" % id, shell=True)", "def test_ipam_services_delete(self):\n pass", "def test_stopServiceCleanupScheduledRestarts(self):\r\n self.pm.threshold = 5\r\n self.pm.minRestartDelay = 5\r\n # Start service and add a process (started immediately)\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Stop the process after 1s\r\n self.reactor.advance(1)\r\n self.pm.stopProcess(\"foo\")\r\n # Wait 1s for it to exit it will be scheduled to restart 5s later\r\n self.reactor.advance(1)\r\n # Meanwhile stop the service\r\n self.pm.stopService()\r\n # Advance to beyond the process restart time\r\n self.reactor.advance(6)\r\n # The process shouldn't have restarted because stopService has cancelled\r\n # all pending process restarts.\r\n self.assertEqual(self.pm.protocols, {})", "def kill_all():\n compose_kill_all()", "def kill_running_process(appName=\"bitcoind\"):\n for line in os.popen(\"ps ax | grep \" + appName + \" | grep -v grep\"):\n fields = line.split()\n pid = fields[0]\n os.kill(int(pid), signal.SIGKILL)", "def test_stopService(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.addProcess(\"bar\", [\"bar\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the processes\r\n self.reactor.advance(self.pm.threshold)\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.assertIn(\"bar\", self.pm.protocols)\r\n\r\n self.reactor.advance(1)\r\n\r\n self.pm.stopService()\r\n # Advance to beyond the killTime - all monitored processes\r\n # should have exited\r\n self.reactor.advance(self.pm.killTime + 1)\r\n # The processes shouldn't be restarted\r\n self.assertEqual({}, self.pm.protocols)", "def is_safe_to_kill(hostname):\n return mesos_maintenance.is_host_drained(\n hostname\n ) or mesos_maintenance.is_host_past_maintenance_start(hostname)", "def stop(name):\n __salt__[\"file.touch\"](\"{}/down\".format(_service_path(name)))\n cmd = \"svc -d {}\".format(_service_path(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=False)", "def killall(logger=None, everywhere=False):\r\n if not NailgunExecutor.killall:\r\n return False\r\n else:\r\n return NailgunExecutor.killall(logger=logger, everywhere=everywhere)", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def stop(self):\n \n\n if os.path.isfile(self.pidfilename):\n\n with open(self.pidfilename) as f:\n data = json.load(f)\n pid = data['pid']\n os.kill(int(pid), signal.SIGTERM)\n\n # Check that the process has been killed\n # Give up after 15 seconds\n for i in range(15):\n if int(pid) not in psutil.pids():\n\n return True\n time.sleep(1)\n return False\n\n # If the daemon is not currently running, do nothing\n else:\n log(\"The daemon is not currently running\")", "def test_too_many_cores(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n instance_ids1 = []\n instance_ids2 = []\n for index in xrange(FLAGS.max_cores):\n instance_id = self._create_instance()\n compute1.run_instance(self.context, instance_id)\n instance_ids1.append(instance_id)\n instance_id = self._create_instance()\n compute2.run_instance(self.context, instance_id)\n instance_ids2.append(instance_id)\n instance_id = self._create_instance()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_run_instance,\n self.context,\n instance_id)\n db.instance_destroy(self.context, instance_id)\n for instance_id in instance_ids1:\n compute1.terminate_instance(self.context, instance_id)\n for instance_id in instance_ids2:\n compute2.terminate_instance(self.context, instance_id)\n compute1.kill()\n compute2.kill()", "def test_100_services(self):\n u.log.debug('Checking system services...')\n swift_storage_services = ['swift-account',\n 'swift-account-auditor',\n 'swift-account-reaper',\n 'swift-account-replicator',\n 'swift-container',\n 'swift-container-auditor',\n 'swift-container-replicator',\n 'swift-container-updater',\n 'swift-object',\n 'swift-object-auditor',\n 'swift-object-replicator',\n 'swift-object-updater',\n 'swift-container-sync']\n service_names = {\n self.keystone_sentry: ['keystone'],\n self.glance_sentry: ['glance-registry',\n 'glance-api'],\n self.swift_proxy_sentry: ['swift-proxy'],\n self.swift_storage_sentry: swift_storage_services\n }\n\n if self._get_openstack_release() >= self.trusty_liberty:\n service_names[self.keystone_sentry] = ['apache2']\n\n ret = u.validate_services_by_name(service_names)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)", "def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')", "def termProc(server: str) -> bool:\n\n for process in psutil.process_iter(attrs=['cmdline']):\n if f'{server}.jar' in process.info['cmdline']:\n toKill = process.children()\n toKill.append(process)\n for p in toKill:\n p.terminate()\n _, alive = psutil.wait_procs(toKill, timeout=3)\n for p in alive:\n p.kill()\n _, alive = psutil.wait_procs(toKill, timeout=3)\n if not alive:\n return True\n\n return False", "def remote_kill():", "def check_ambari_server_process_down(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output) is None", "def checkProcess(process_id, time_limit):\n # set an arbitrary time limit\n t_end = time.time() + 60 * time_limit\n while time.time() < t_end:\n if not is_process_running(process_id):\n print(\"process {0} terminated\".format(process_id))\n return\n\n # could be an external integration (slack, pager duty)\n print(\"process {0} still running. Kill?\".format(process_id))", "def test_create_service_with_big_project_id(self):\n failed_count = 0\n for k in range(2500, 8000, 500):\n self.reset_defaults()\n headers = {\"X-Auth-Token\": self.client.auth_token,\n \"X-Project-Id\": \"1\"*k,\n \"Content-Type\": \"application/json\"}\n kwargs = {\"headers\": headers}\n self.service_name = str(uuid.uuid1())\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id,\n requestslib_kwargs=kwargs)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n \n #self.assertTrue(resp.status_code < 503)\n if (resp.status_code == 503):\n failed_count += 1\n resp = self.client.list_services(requestslib_kwargs=kwargs)\n if (resp.status_code == 503):\n failed_count += 1\n self.assertTrue(failed_count <= 3)\n #self.assertTrue(resp.status_code < 503)" ]
[ "0.6063095", "0.6022489", "0.56425107", "0.5547615", "0.54923004", "0.54674953", "0.5434379", "0.54335403", "0.54237306", "0.5372089", "0.53475595", "0.5319241", "0.52755827", "0.5261143", "0.525039", "0.52496856", "0.5240958", "0.52227604", "0.52175325", "0.5212614", "0.5209818", "0.5206252", "0.52055144", "0.5191473", "0.5162637", "0.5156744", "0.5147533", "0.5143237", "0.51415735", "0.5136278" ]
0.6728664
0
Check whether it is possible to kill the application by creating a service with huge list of origins.
def test_dos_create_service_origin_list(self): # create a huge list of domain self.reset_defaults() for k in range(1, 9000): self.origin_list.append({"origin": "m%s.com" % k, "port": 443, "ssl": False, "rules": [{"request_url": "/i.htm", "name": "i"}]}) # send MAX_ATTEMPTS requests for k in range(1, self.MAX_ATTEMPTS): self.service_name = str(uuid.uuid1()) self.check_one_request()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def killAll(controller=False):", "def remote_kill():", "def issuer_liveness_check():\n global app_config\n\n if app_config[\"running\"]:\n # return True until we get a shutdown request\n return True\n\n # return True until the work queue is cleared\n return tob_connection_active()", "def _stop_proxies_if_needed(self) -> bool:\n all_node_ids = {node_id for node_id, _ in get_all_node_ids(self._gcs_client)}\n to_stop = []\n for node_id in self._proxy_states:\n if node_id not in all_node_ids:\n logger.info(\"Removing HTTP proxy on removed node '{}'.\".format(node_id))\n to_stop.append(node_id)\n\n for node_id in to_stop:\n proxy = self._proxy_states.pop(node_id)\n ray.kill(proxy.actor_handle, no_restart=True)", "def lysis(self) :\n self.kill()\n return True", "def test_dontStartPrivilegedService(self):\n ports = self._privilegedStartService(self.highPortNumber)\n self.assertEqual(ports, [])", "def verify_server_availability(url, timeout=60):\n for i in range(timeout):\n try:\n assert all_services_running(), (\"Webservice(s) failed to launch:\\n\"\n + '\\n'.join(supervisor_status()))\n response = requests.get(url)\n assert response.status_code == 200, (\"Expected status 200, got\"\n f\" {response.status_code}\"\n f\" for URL {url}.\")\n response = requests.get(url + '/static/build/bundle.js')\n assert response.status_code == 200, (\"Javascript bundle not found,\"\n \" did Webpack fail?\")\n return # all checks passed\n except Exception as e:\n if i == max(range(timeout)): # last iteration\n raise ConnectionError(str(e)) from None\n time.sleep(1)", "def test_ipam_services_delete(self):\n pass", "def check_one_request(self):\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n # delete the service\n self.assertTrue(resp.status_code < 503)\n\n if self.service_url != '':\n self.client.delete_service(location=self.service_url)", "def test_live_migration_dest_check_service_works_correctly(self):\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n s_ref = self._create_compute_service(host='somewhere',\n memory_mb_used=5)\n\n ret = self.scheduler.driver._live_migration_dest_check(self.context,\n i_ref,\n 'somewhere',\n False)\n self.assertTrue(ret is None)\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])", "def test_dos_list_service_huge_junk(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"junk\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def validate_openvpn_pid(result):\n for ps in result:\n if 'openvpn --daemon' in ps:\n print 'OpenVPN Process - OK'\n return True\n print 'OpenVPN Process - DOWN'\n return False", "def stop(name):\n __salt__[\"file.touch\"](\"{}/down\".format(_service_path(name)))\n cmd = \"svc -d {}\".format(_service_path(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=False)", "def check_stop_flag(con):\n k, v = con.kv.get(\"service/rebootmgr/stop\")\n if v:\n return True\n return False", "def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')", "def test_dos_create_service_domain_list(self):\n # create a huge list of domain\n self.reset_defaults()\n for k in range(1, 30000):\n self.domain_list.append({\"domain\": \"w.t%s.com\" % k})\n\n # send MAX_ATTEMPTS requests\n for k in range(1, self.MAX_ATTEMPTS):\n self.service_name = str(uuid.uuid1())\n self.check_one_request()", "def test_stopServiceCleanupScheduledRestarts(self):\r\n self.pm.threshold = 5\r\n self.pm.minRestartDelay = 5\r\n # Start service and add a process (started immediately)\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Stop the process after 1s\r\n self.reactor.advance(1)\r\n self.pm.stopProcess(\"foo\")\r\n # Wait 1s for it to exit it will be scheduled to restart 5s later\r\n self.reactor.advance(1)\r\n # Meanwhile stop the service\r\n self.pm.stopService()\r\n # Advance to beyond the process restart time\r\n self.reactor.advance(6)\r\n # The process shouldn't have restarted because stopService has cancelled\r\n # all pending process restarts.\r\n self.assertEqual(self.pm.protocols, {})", "def kill(self):\n if self.client is None:\n # never started, can't stop - should be warning or exception?\n return False\n try:\n self.client.kill()\n except Py4JError:\n logger.debug(\"Error while attempting to kill\", exc_info=1)\n # fallback\n self.yarn_api.kill(self.app_id)\n if self.proc is not None:\n self.client_gateway.shutdown()\n if on_windows:\n call([\"cmd\", \"/c\", \"taskkill\", \"/f\", \"/t\", \"/pid\",\n str(self.proc.pid)])\n self.proc.terminate()\n self.proc.communicate()\n self.proc = None\n self.client = None\n out = self.runtime_status() == 'KILLED'\n return out", "def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False", "def kill_running_process(appName=\"bitcoind\"):\n for line in os.popen(\"ps ax | grep \" + appName + \" | grep -v grep\"):\n fields = line.split()\n pid = fields[0]\n os.kill(int(pid), signal.SIGKILL)", "def all_is_stopped(self):\r\n return all(not p.running for p in self._platforms.values())", "def test_live_migration_dest_check_service_lack_memory(self):\n instance_id = self._create_instance()\n instance_id2 = self._create_instance(host='somewhere',\n memory_mb=12)\n i_ref = db.instance_get(self.context, instance_id)\n s_ref = self._create_compute_service(host='somewhere')\n\n self.assertRaises(exception.NovaException,\n self.scheduler.driver._live_migration_dest_check,\n self.context, i_ref, 'somewhere', False)\n\n db.instance_destroy(self.context, instance_id)\n db.instance_destroy(self.context, instance_id2)\n db.service_destroy(self.context, s_ref['id'])", "def kill(targets, controller=False):", "def is_end_host(num):\n\treturn not num in routers", "def run(self):\n for req, resp in self.servings:\n resp.check_timeout()", "def check_ambari_server_process_down(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output) is None", "def stop(self):\n for c in self.openstack_endpoints.values():\n c.stop()\n #for c in self.openstack_endpoints.values():\n # if c.server_thread:\n # print(\"Waiting for WSGIServers to be stopped ...\")\n # c.server_thread.join()", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def stop():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --stop-daemon'])", "def should_terminate(self):\n return False" ]
[ "0.541091", "0.54013634", "0.5319908", "0.5259425", "0.5252269", "0.5223949", "0.52037764", "0.51941264", "0.51511925", "0.51103085", "0.5099697", "0.5097654", "0.50843334", "0.5061235", "0.5056471", "0.5046156", "0.501708", "0.5002467", "0.49952114", "0.4995023", "0.497257", "0.49662548", "0.49577844", "0.4952725", "0.49478212", "0.49400896", "0.49362195", "0.4935497", "0.49281022", "0.49160355" ]
0.5795615
0
Check whether it is possible to kill the application by creating a service with huge list rules within caching list.
def test_dos_create_service_caching_list_rules(self): # create a huge list of domain self.reset_defaults() for k in range(1, 15000): self.caching_list[1]["rules"].append( {"name": "i%s" % k, "request_url": "/index.htm"}) # send MAX_ATTEMPTS requests for k in range(1, self.MAX_ATTEMPTS): self.service_name = str(uuid.uuid1()) self.check_one_request()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_stop_flag(con):\n k, v = con.kv.get(\"service/rebootmgr/stop\")\n if v:\n return True\n return False", "def test_dos_create_service_caching_list(self):\n # create a huge list of domain\n self.reset_defaults()\n for k in range(1, 16000):\n self.caching_list.append({\"name\": \"d%s\" % k, \"ttl\": 3600,\n \"rules\": [{\"request_url\": \"/i.htm\",\n \"name\": \"i\"}]})\n\n # send MAX_ATTEMPTS requests\n for k in range(1, self.MAX_ATTEMPTS):\n self.service_name = str(uuid.uuid1())\n self.check_one_request()", "def test_stopServiceCleanupScheduledRestarts(self):\r\n self.pm.threshold = 5\r\n self.pm.minRestartDelay = 5\r\n # Start service and add a process (started immediately)\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Stop the process after 1s\r\n self.reactor.advance(1)\r\n self.pm.stopProcess(\"foo\")\r\n # Wait 1s for it to exit it will be scheduled to restart 5s later\r\n self.reactor.advance(1)\r\n # Meanwhile stop the service\r\n self.pm.stopService()\r\n # Advance to beyond the process restart time\r\n self.reactor.advance(6)\r\n # The process shouldn't have restarted because stopService has cancelled\r\n # all pending process restarts.\r\n self.assertEqual(self.pm.protocols, {})", "def check_launcher():\n\n # Storage in memory which holds info about currently running checks\n storage = {}\n\n # Storage in memory which holds process info: process id and project objects\n processes = {}\n\n # Close previously opened connections (if the exist)\n django.db.connections.close_all()\n\n while True:\n # Making Copy in order to compare updates in data base\n new_storage = copy.deepcopy(storage)\n\n # Fetch data from database\n check_sync(new_storage)\n\n # Get storage keys in order to compare storages for changes\n old_keys = set(storage.keys())\n new_keys = set(new_storage.keys())\n\n # Get keys of elements in init storage and updated storage\n added_checks = new_keys.difference(old_keys)\n deleted_checks = old_keys.difference(new_keys)\n common_checks = new_keys.intersection(old_keys)\n\n # Launch new processes\n for check_id in added_checks:\n # Spawn new process with name Process#id, where id = check_id\n start_process(check_id, new_storage, processes)\n\n # Stop (kill) deleted check's prorcesses\n for check_id in deleted_checks:\n stop_process(check_id, storage, processes)\n\n for check_id in common_checks:\n if storage[check_id] != new_storage[check_id]:\n stop_process(check_id, storage, processes)\n # Spawn new process with name Process#id, where id = check_id\n start_process(check_id, new_storage, processes)\n\n storage = copy.deepcopy(new_storage)\n time.sleep(30)", "def test_dos_list_service_huge_limit(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"limit\": attack_string, \"marker\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def test_dos_list_service_huge_junk(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"junk\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def KillOldFlows(self):\n if self.IsRunning():\n start_time = self.Get(self.Schema.LAST_RUN_TIME)\n lifetime = self.Get(self.Schema.CRON_ARGS).lifetime\n elapsed = time.time() - start_time.AsSecondsFromEpoch()\n\n if lifetime and elapsed > lifetime.seconds:\n self.StopCurrentRun()\n stats.STATS.IncrementCounter(\"cron_job_timeout\",\n fields=[self.urn.Basename()])\n stats.STATS.RecordEvent(\"cron_job_latency\", elapsed,\n fields=[self.urn.Basename()])\n return True\n\n return False", "def stopped_check(self, timeout=None):", "def check_stop(cmd, filterstr, retry=1, nrinstances=0):\n\n found = get_filtered_pids(filterstr)\n for i in range(retry):\n if len(found) == nrinstances:\n return\n # print \"START:%s\"%cmd\n execute(cmd, die=False)\n time.sleep(1)\n found = get_filtered_pids(filterstr)\n for item in found:\n kill(int(item), 9)\n found = get_filtered_pids(filterstr)\n\n if len(found) != 0:\n raise j.exceptions.RuntimeError(\"could not stop %s, found %s nr of instances.\" % (cmd, len(found)))", "def prune_if_necessary():\n if (current_app.running_context.cache.incr(\"number_of_operations\")\n >= current_app.config['JWT_BLACKLIST_PRUNE_FREQUENCY']):\n prune_database()", "def issuer_liveness_check():\n global app_config\n\n if app_config[\"running\"]:\n # return True until we get a shutdown request\n return True\n\n # return True until the work queue is cleared\n return tob_connection_active()", "def test_dontStartPrivilegedService(self):\n ports = self._privilegedStartService(self.highPortNumber)\n self.assertEqual(ports, [])", "def killAll(controller=False):", "def check_ambari_server_process_down(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output) is None", "def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')", "def lysis(self) :\n self.kill()\n return True", "def kill_all():\n compose_kill_all()", "def basic_overcloud_processes_running(self):\n for attempt_number in range(600):\n\n try:\n\n for process_name in self.processes_to_check:\n # osp16/python3 process is \"neutron-server:\"\n if process_name == 'neutron-server' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'neutron-server:'\n # osp17 mysqld process name is mysqld_safe\n if process_name == 'mysqld' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'mysqld_safe'\n # redis not deployed on osp17 by default, only if some\n # other services such as designate and octavia are deployed\n if (process_name == 'redis-server' and\n not overcloud.is_redis_expected()):\n redis_message = (\"redis-server not expected on OSP 17 \"\n \"and later releases by default\")\n if self.oc_procs_df.query(\n f'PROCESS==\"{process_name}\"').empty:\n LOG.info(redis_message)\n continue\n else:\n raise OvercloudProcessesException(\n process_error=redis_message)\n\n if not self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n LOG.info(\"overcloud processes status checks: \"\n \"process {} is \"\n \"in running state\".format(process_name))\n continue\n else:\n LOG.info(\"Failure : overcloud processes status checks:\"\n \"process {} is not running \".format(\n process_name))\n raise OvercloudProcessesException(\n process_error=\"process {} is not running \".format(\n process_name))\n # if all procs are running we can return true\n return True\n except OvercloudProcessesException:\n LOG.info('Retrying overcloud processes checks attempt '\n '{} of 360'.format(attempt_number))\n time.sleep(1)\n self.oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n # exhausted all retries\n tobiko.fail('Not all overcloud processes are running !\\n')", "def test_live_migration_dest_check_service_lack_memory(self):\n instance_id = self._create_instance()\n instance_id2 = self._create_instance(host='somewhere',\n memory_mb=12)\n i_ref = db.instance_get(self.context, instance_id)\n s_ref = self._create_compute_service(host='somewhere')\n\n self.assertRaises(exception.NovaException,\n self.scheduler.driver._live_migration_dest_check,\n self.context, i_ref, 'somewhere', False)\n\n db.instance_destroy(self.context, instance_id)\n db.instance_destroy(self.context, instance_id2)\n db.service_destroy(self.context, s_ref['id'])", "def test_hugepage_service_state(Service):\n\n service = Service('disable-transparent-hugepages')\n\n assert service.is_enabled\n assert service.is_running", "def test_stopService(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.addProcess(\"bar\", [\"bar\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the processes\r\n self.reactor.advance(self.pm.threshold)\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.assertIn(\"bar\", self.pm.protocols)\r\n\r\n self.reactor.advance(1)\r\n\r\n self.pm.stopService()\r\n # Advance to beyond the killTime - all monitored processes\r\n # should have exited\r\n self.reactor.advance(self.pm.killTime + 1)\r\n # The processes shouldn't be restarted\r\n self.assertEqual({}, self.pm.protocols)", "def stop_check(self):\n pass", "def oci_compute_attack_surface_open_tcp_port_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # ISO Time\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n for instance in get_oci_compute_instances(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(instance,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n instanceId = instance[\"id\"]\n instanceName = instance[\"display_name\"]\n compartmentId = instance[\"compartment_id\"]\n imageId = instance[\"image_id\"]\n shape = instance[\"shape\"]\n lifecycleState = instance[\"lifecycle_state\"]\n # Get the VNIC info\n instanceVnic = get_compute_instance_vnic(ociTenancyId, ociUserId, ociRegionName, ociUserApiKeyFingerprint, compartmentId, instanceId)\n # Skip over instances that are not public\n pubIp = instanceVnic[\"public_ip\"]\n if instanceVnic[\"public_ip\"] is None:\n continue\n # Submit details to the scanner function\n scanner = scan_host(pubIp, instanceName, \"OCI Cloud Compute instance\")\n # NoneType returned on KeyError due to Nmap errors\n if scanner == None:\n continue\n else:\n # Loop the results of the scan - starting with Open Ports which require a combination of\n # a Public Instance, an open SG rule, and a running service/server on the host itself\n # use enumerate and a fixed offset to product the Check Title ID number\n for index, p in enumerate(scanner[pubIp][\"ports\"]):\n # Parse out the Protocol, Port, Service, and State/State Reason from NMAP Results\n checkIdNumber = str(int(index + 1))\n portNumber = int(p[\"portid\"])\n if portNumber == 8089:\n serviceName = 'SPLUNKD'\n elif portNumber == 10250:\n serviceName = 'KUBERNETES-API'\n elif portNumber == 5672:\n serviceName = 'RABBITMQ'\n elif portNumber == 4040:\n serviceName = 'SPARK-WEBUI'\n else:\n try:\n serviceName = str(p[\"service\"][\"name\"]).upper()\n except KeyError:\n serviceName = \"Unknown\"\n serviceStateReason = str(p[\"reason\"])\n serviceState = str(p[\"state\"])\n # This is a failing check\n if serviceState == \"open\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.ComputeInstance.{checkIdNumber}] Cloud Compute instances should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Cloud Compute instance {instanceName} in Compartment {compartmentId} in {ociRegionName} is publicly reachable on port {portNumber} which corresponds to the {serviceName} service. When Services are successfully fingerprinted by the ElectricEye Attack Surface Management Auditor it means the instance is public (mapped 'public_ip` in the associated vNIC), has an open Security List or Network Security Group, and a running service on the host which adversaries can also see. Refer to the remediation insturctions for an example of a way to secure OCI Cloud Compute instances.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Cloud Compute instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Public IP Addresses section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingpublicIPs.htm#Public_IP_Addresses\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"Oracle Cloud Compute\",\n \"AssetComponent\": \"Instance\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudComputeInstance\",\n \"Id\": instanceId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": instanceName,\n \"Id\": instanceId,\n \"ImageId\": imageId,\n \"Shape\": shape,\n \"LifecycleState\": lifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.ComputeInstance.{checkIdNumber}] Cloud Compute instances should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Cloud Compute instance {instanceName} in Compartment {compartmentId} in {ociRegionName} is not publicly reachable on port {portNumber} which corresponds to the {serviceName} service due to {serviceStateReason}. OCI Cloud Compute instances and their respective Security Lists and/or Network Security Groups should still be reviewed for minimum necessary access.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Cloud Compute instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Public IP Addresses section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingpublicIPs.htm#Public_IP_Addresses\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"Oracle Cloud Compute\",\n \"AssetComponent\": \"Instance\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudComputeInstance\",\n \"Id\": instanceId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": instanceName,\n \"Id\": instanceId,\n \"ImageId\": imageId,\n \"Shape\": shape,\n \"LifecycleState\": lifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def check4kill(self,threadPID = -1):\n #TODO: check in the kill log if my main or my thred PID are there.\n # In case True, kill all. /var/log/check_status/check_kills.log\n # kill $TOPPID\n # /var/log/check_status/check_off.log\n off_log = \"/var/log/check_status/check_off.log\"\n kill_log = \"/var/log/check_status/check_kills.log\"\n try:\n f = open (off_log, \"r\")\n l = f.read(self.conn.data_buf_size)\n while (l or self.status != Modem.Status.KILL):\n if l == \"poweroff\":\n self.status = Modem.Status.KILL\n l = f.read(self.conn.data_buf_size)\n f.close()\n except IOError:\n print off_log + \" not found\"\n try:\n f = open (kill_log, \"r\")\n l = f.read(self.conn.data_buf_size)\n while (l or self.status != Modem.Status.KILL):\n if (l == (\"kill \" + str(threadPID)) or \\\n l == (\"kill \" + str(self.mainPID))):\n self.status = Modem.Status.KILL\n l = f.read(self.conn.data_buf_size)\n f.close()\n except IOError:\n print kill_log + \" not found\"", "def verify_services(self):\n services = [\"metric_collector\", \"log_collector\"]\n service_version_9 = [\"lma_collector\"]\n pids = {}\n processes_count = {\n \"collectd \": 1,\n \"collectdmon \": 1\n }\n\n if self.settings.version.startswith(\"0.9\"):\n processes_count[\n \"hekad -config[= ]/etc/{}\".format(service_version_9)] = 1\n else:\n # Starting with 0.10, there are one collector for logs and one for\n # metrics\n for service in services:\n processes_count[\"hekad -config[= ]/etc/{}\".format(service)] = 1\n online_nodes = [node for node in self.helpers.get_all_ready_nodes()\n if node[\"online\"]]\n for node in online_nodes:\n pids[node[\"name\"]] = {}\n with self.env.d_env.get_ssh_to_remote(node[\"ip\"]) as remote:\n for process, count in processes_count.items():\n logger.info(\"Checking process {0} on node {1}\".format(\n process, node[\"name\"]\n ))\n pids[node[\"name\"]][process] = (\n self.checkers.check_process_count(\n remote, process, count))\n return pids", "def reload_resources(self, id=-1, forced=False):\n if self.is_over_threshold() or forced:\n print('memory portfolio too high....quitting... removing [%s] ' % (str(id) ))\n self.reset_bot_resources(new_scraper=self.restart_web_scraper( start_url=self.state_storage_get_prop('start_url'), id=self.bot_data.get('web_driver', {}).id))\n return True\n return False", "def kill_running_process(appName=\"bitcoind\"):\n for line in os.popen(\"ps ax | grep \" + appName + \" | grep -v grep\"):\n fields = line.split()\n pid = fields[0]\n os.kill(int(pid), signal.SIGKILL)", "def _wait_for_management(self, ip, timeout, port=80):\n validation_url = 'http://{0}:{1}/blueprints'.format(ip, port)\n\n end = time.time() + timeout\n\n while end - time.time() >= 0:\n try:\n status = urllib.urlopen(validation_url).getcode()\n if status == 200:\n return True\n except IOError:\n time.sleep(5)\n\n return False", "def test_hms_service_dies(self):\n # Force the tables to be uncached and then kill the hive metastore.\n tbl_name = \"functional.alltypes\"\n self.client.execute(\"invalidate metadata %s\" % tbl_name)\n kill_cmd = os.path.join(os.environ['IMPALA_HOME'], 'testdata/bin/kill-hive-server.sh')\n check_call([kill_cmd], close_fds=True)\n\n try:\n self.client.execute(\"describe %s\" % tbl_name)\n except ImpalaBeeswaxException as e:\n print(str(e))\n assert \"Failed to load metadata for table: %s. Running 'invalidate metadata %s' \"\\\n \"may resolve this problem.\" % (tbl_name, tbl_name) in str(e)\n self.run_hive_server()\n\n self.client.execute(\"invalidate metadata %s\" % tbl_name)\n self.client.execute(\"describe %s\" % tbl_name)", "def _clean_up_when_fail(self):\n if self.user_pool.cache_lock.locked():\n self.user_pool.cache_lock.release()\n abnormal_interrupt = False\n if \"abnormal_interrupt\" in self.case_info_dict:\n abnormal_interrupt = self.case_info_dict[\"abnormal_interrupt\"]\n\n if abnormal_interrupt:\n self.p_stop_signal.set()\n # False,设置终止整个进程信号\n else:\n self.user_pool.cache_lock.acquire()\n\n if self.conf_group_id not in self.user_pool.group_id_list:\n self.user_pool.group_id_list.appendleft(self.conf_group_id)\n\n if self.conf_id is not None:\n self.user_pool.conf_id_list.remove(self.conf_id)\n\n if self.conf_id in self.user_pool.conf_id_obj_dict:\n del self.user_pool.conf_id_obj_dict[self.conf_id]\n\n for in_conf_mem_phone_num in self.in_conf_mem_phone_num_list:\n if in_conf_mem_phone_num in self.user_pool.in_conf_mem_phone_num_list:\n self.user_pool.in_conf_mem_phone_num_list.remove(in_conf_mem_phone_num)\n for out_conf_mem_phone_num in self.out_conf_mem_phone_num_list:\n if out_conf_mem_phone_num in self.user_pool.out_conf_mem_phone_num_list:\n self.user_pool.out_conf_mem_phone_num_list.remove(out_conf_mem_phone_num)\n\n for conf_mem_phone_num in self.conf_mem_phone_num_list:\n if conf_mem_phone_num in self.user_pool.conf_mem_phone_num_list:\n self.user_pool.conf_mem_phone_num_list.remove(conf_mem_phone_num)\n if conf_mem_phone_num in self.user_pool.conf_all_mem_phone_num_list:\n self.user_pool.conf_all_mem_phone_num_list.remove(conf_mem_phone_num)\n if self.chairman_phone_num in self.user_pool.conf_chair_phone_num_list:\n self.user_pool.conf_chair_phone_num_list.remove(self.chairman_phone_num)\n if self.chairman_phone_num in self.user_pool.conf_all_mem_phone_num_list:\n self.user_pool.conf_all_mem_phone_num_list.remove(self.chairman_phone_num)\n\n self.group_obj.is_in_conf = True\n self.group_obj.conf_id = None\n self.user_pool.cache_lock.release()" ]
[ "0.58728933", "0.57062775", "0.5591448", "0.55198026", "0.5498804", "0.54034287", "0.5393193", "0.5377958", "0.53490484", "0.5301769", "0.5300462", "0.5272994", "0.52642924", "0.52431273", "0.52165616", "0.5213221", "0.5209753", "0.5188523", "0.5138128", "0.5119553", "0.51152056", "0.5099388", "0.50838065", "0.5081976", "0.507388", "0.5070005", "0.5065055", "0.5063597", "0.5059398", "0.5054269" ]
0.6129817
0
Check whether it is possible to kill the application by listing all services with a huge limit
def test_dos_list_service_huge_limit(self): # create a huge list of domain attack_string = "1" * 3500 params = {"limit": attack_string, "marker": attack_string} resp = self.client.list_services(param=params) self.assertTrue(resp.status_code < 503)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dos_list_service_huge_junk(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"junk\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def checkProcess(process_id, time_limit):\n # set an arbitrary time limit\n t_end = time.time() + 60 * time_limit\n while time.time() < t_end:\n if not is_process_running(process_id):\n print(\"process {0} terminated\".format(process_id))\n return\n\n # could be an external integration (slack, pager duty)\n print(\"process {0} still running. Kill?\".format(process_id))", "def test_dontStartPrivilegedService(self):\n ports = self._privilegedStartService(self.highPortNumber)\n self.assertEqual(ports, [])", "def check_stop(cmd, filterstr, retry=1, nrinstances=0):\n\n found = get_filtered_pids(filterstr)\n for i in range(retry):\n if len(found) == nrinstances:\n return\n # print \"START:%s\"%cmd\n execute(cmd, die=False)\n time.sleep(1)\n found = get_filtered_pids(filterstr)\n for item in found:\n kill(int(item), 9)\n found = get_filtered_pids(filterstr)\n\n if len(found) != 0:\n raise j.exceptions.RuntimeError(\"could not stop %s, found %s nr of instances.\" % (cmd, len(found)))", "def killAll(controller=False):", "def check_stop_flag(con):\n k, v = con.kv.get(\"service/rebootmgr/stop\")\n if v:\n return True\n return False", "def kill_all():\n compose_kill_all()", "def stopped_check(self, timeout=None):", "def kill_running_process(appName=\"bitcoind\"):\n for line in os.popen(\"ps ax | grep \" + appName + \" | grep -v grep\"):\n fields = line.split()\n pid = fields[0]\n os.kill(int(pid), signal.SIGKILL)", "def killMongosProc():\n cmd = [\"pgrep -f \\\"\" + MONGOS_KSTR + \"\\\" | xargs kill -9\"]\n executeCommand(cmd)", "def terminate_services(self, services):\n services = self._filter_cid(services)\n for service in services:\n ctr = self.check_service_running(service,\n raise_on=['terminated'])\n logger.info(\"Stopping and \"\n \"removing docker instance : %s\" % service)\n self.driver.stop_container(ctr['Id'], remove=True)\n if service not in self._dirty_service:\n self._dirty_service[service] = {\"ctr\": ctr,\n \"terminated\": True}\n else:\n self._dirty_service[service][\"terminated\"] = True\n return services", "def check_ambari_server_process_down(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output) is None", "def kill_all_tunnels():\n lsof_cmd = \"lsof -i:%d-%d -P -n\"%(port_base, port_base+100)\n try:\n lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')\n except subprocess.CalledProcessError:\n return []\n except:\n traceback.print_exc(file=sys.stdout)\n logging.warning(\"Unable to probe active tunnels\")\n return []\n \n ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\\n')[1:] if l]))\n for p in ssh_procs:\n subprocess.call([\"kill\", p])\n\n return ssh_procs", "def test_stopServiceCleanupScheduledRestarts(self):\r\n self.pm.threshold = 5\r\n self.pm.minRestartDelay = 5\r\n # Start service and add a process (started immediately)\r\n self.pm.startService()\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n # Stop the process after 1s\r\n self.reactor.advance(1)\r\n self.pm.stopProcess(\"foo\")\r\n # Wait 1s for it to exit it will be scheduled to restart 5s later\r\n self.reactor.advance(1)\r\n # Meanwhile stop the service\r\n self.pm.stopService()\r\n # Advance to beyond the process restart time\r\n self.reactor.advance(6)\r\n # The process shouldn't have restarted because stopService has cancelled\r\n # all pending process restarts.\r\n self.assertEqual(self.pm.protocols, {})", "def stopFluidinfo():\n for port in range(9001, 9009):\n sudo('stop fluidinfo-api-node PORT=%d || true' % port)\n sudo('/etc/init.d/nginx stop')\n sudo('/etc/init.d/haproxy stop')", "def freak():\n with settings(hide('everything'), warn_only=True):\n result = []\n try:\n check = sudo('lsof | grep DEL | grep -e crypto -e libssl')\n if check.return_code == 1:\n result = 'OK'\n logging.warning(\"%s: %s\" % (env.host, result))\n else:\n services = check.split('\\r')\n for service in services:\n service_name = service.split()[0]\n result.append(service_name)\n result = list(set(result))\n print(\"%s: VULNERABLE: %s\" % (env.host, ' '.join(result)))\n logging.warning(\"%s: VULNERABLE: %s\" % (env.host,\n ' '.join(result)))\n except Exception as e:\n logging.warning('%s: Error: %s' % (env.host, e.message))", "def detect_used_ports():\n MAX_PORT = 1025\n DEFAULT_HOST = '127.0.0.1'\n open_ports = []\n socket.setdefaulttimeout(1)\n for port in range(0, MAX_PORT):\n res = port_scan(DEFAULT_HOST, port)\n if res:\n open_ports.append(port)\n # debugging purpose to see if program is running\n if port % 5000 == 0 and port != 0:\n sys.stderr.write('.')\n return open_ports", "def __stop(self):\n\n # send commands\n poller = Poller()\n for (pipe, svc) in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n\n # give services a few seconds to cleanup and exit before checking responses\n sleep(1)\n\n max_attempts = len(self.__services)\n attempts = 0\n\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n\n # poll for any replies\n items = dict(poller.poll(60000)) # wait for messages\n\n # mark responding services as stopped\n alive = dict(self.__services) # make copy\n for (pipe, svc) in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug('received STOPPED control reply from %s service' % svc)\n svc.join(timeout=5) # STOPPED response should be sent right before svc exit\n if svc.is_alive():\n self.logger.error('%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc)\n poller.unregister(pipe)\n pipe.close()\n del (self.__services[pipe])\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n\n # log some useful info\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % (\n [str(s) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)", "def validate_openvpn_pid(result):\n for ps in result:\n if 'openvpn --daemon' in ps:\n print 'OpenVPN Process - OK'\n return True\n print 'OpenVPN Process - DOWN'\n return False", "def wait_for_cancel():\n for pid in pids.values():\n # ps will return nonzero when the pid doesn't exist\n with pytest.raises(subprocess.CalledProcessError):\n self.env.execute_on_manager(['ps', str(pid)])", "def check_running(process, min=1):\n if j.data.platform.is_linux():\n pids = get_pids(process)\n if len(pids) >= min:\n return True\n return False", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def killVPN():\n try:\n vpnProcesses = subprocess.check_output(\"ps -A | grep vpn\", shell=True).split(\"\\n\")\n except subprocess.CalledProcessError:\n return True\n log.info(\"[i] ps returned: %s\" % repr(vpnProcesses))\n vpnProcesses.remove(\"\")\n for proc in vpnProcesses:\n id = proc.split(\" \")[1]\n log.info(\"[i] Killing vpn process (%s)\" % id)\n subprocess.Popen(\"sudo kill -9 %s\" % id, shell=True)", "def check_timeout(flag: Callable, limit: float) -> bool:\n timed_out = False\n if HAS_SUPERVISOR:\n start = supervisor.ticks_ms()\n while not timed_out and not flag():\n if ticks_diff(supervisor.ticks_ms(), start) >= limit * 1000:\n timed_out = True\n else:\n start = time.monotonic()\n while not timed_out and not flag():\n if time.monotonic() - start >= limit:\n timed_out = True\n return timed_out", "def test_hugepage_service_state(Service):\n\n service = Service('disable-transparent-hugepages')\n\n assert service.is_enabled\n assert service.is_running", "def verify_services(self):\n services = [\"metric_collector\", \"log_collector\"]\n service_version_9 = [\"lma_collector\"]\n pids = {}\n processes_count = {\n \"collectd \": 1,\n \"collectdmon \": 1\n }\n\n if self.settings.version.startswith(\"0.9\"):\n processes_count[\n \"hekad -config[= ]/etc/{}\".format(service_version_9)] = 1\n else:\n # Starting with 0.10, there are one collector for logs and one for\n # metrics\n for service in services:\n processes_count[\"hekad -config[= ]/etc/{}\".format(service)] = 1\n online_nodes = [node for node in self.helpers.get_all_ready_nodes()\n if node[\"online\"]]\n for node in online_nodes:\n pids[node[\"name\"]] = {}\n with self.env.d_env.get_ssh_to_remote(node[\"ip\"]) as remote:\n for process, count in processes_count.items():\n logger.info(\"Checking process {0} on node {1}\".format(\n process, node[\"name\"]\n ))\n pids[node[\"name\"]][process] = (\n self.checkers.check_process_count(\n remote, process, count))\n return pids", "def basic_overcloud_processes_running(self):\n for attempt_number in range(600):\n\n try:\n\n for process_name in self.processes_to_check:\n # osp16/python3 process is \"neutron-server:\"\n if process_name == 'neutron-server' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'neutron-server:'\n # osp17 mysqld process name is mysqld_safe\n if process_name == 'mysqld' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'mysqld_safe'\n # redis not deployed on osp17 by default, only if some\n # other services such as designate and octavia are deployed\n if (process_name == 'redis-server' and\n not overcloud.is_redis_expected()):\n redis_message = (\"redis-server not expected on OSP 17 \"\n \"and later releases by default\")\n if self.oc_procs_df.query(\n f'PROCESS==\"{process_name}\"').empty:\n LOG.info(redis_message)\n continue\n else:\n raise OvercloudProcessesException(\n process_error=redis_message)\n\n if not self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n LOG.info(\"overcloud processes status checks: \"\n \"process {} is \"\n \"in running state\".format(process_name))\n continue\n else:\n LOG.info(\"Failure : overcloud processes status checks:\"\n \"process {} is not running \".format(\n process_name))\n raise OvercloudProcessesException(\n process_error=\"process {} is not running \".format(\n process_name))\n # if all procs are running we can return true\n return True\n except OvercloudProcessesException:\n LOG.info('Retrying overcloud processes checks attempt '\n '{} of 360'.format(attempt_number))\n time.sleep(1)\n self.oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n # exhausted all retries\n tobiko.fail('Not all overcloud processes are running !\\n')", "def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False", "def test_stopService(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.addProcess(\"bar\", [\"bar\"])\r\n # Schedule the process to start\r\n self.pm.startService()\r\n # advance the reactor to start the processes\r\n self.reactor.advance(self.pm.threshold)\r\n self.assertIn(\"foo\", self.pm.protocols)\r\n self.assertIn(\"bar\", self.pm.protocols)\r\n\r\n self.reactor.advance(1)\r\n\r\n self.pm.stopService()\r\n # Advance to beyond the killTime - all monitored processes\r\n # should have exited\r\n self.reactor.advance(self.pm.killTime + 1)\r\n # The processes shouldn't be restarted\r\n self.assertEqual({}, self.pm.protocols)", "def all_is_stopped(self):\r\n return all(not p.running for p in self._platforms.values())" ]
[ "0.6190547", "0.6119105", "0.60541016", "0.6032116", "0.60296106", "0.6005415", "0.5905079", "0.5887385", "0.5848494", "0.57247454", "0.571023", "0.5692282", "0.5672176", "0.56716925", "0.5535039", "0.5512738", "0.550317", "0.5502144", "0.54839885", "0.5466932", "0.5466897", "0.54600775", "0.54555917", "0.5443791", "0.54280895", "0.541309", "0.5402913", "0.5402739", "0.539313", "0.5391468" ]
0.67318535
0
Given the key of the external_issue return the external issue link.
def get_issue_url(self, key): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def external_key_uri(self) -> str:\n return pulumi.get(self, \"external_key_uri\")", "def get_link_issue_config(self, group, **kwargs):\n return [\n {\n 'name': 'externalIssue',\n 'label': 'Issue',\n 'default': '',\n 'type': 'string',\n }\n ]", "def get_link_issue_config(self, group, **kwargs):\n return [\n {\n 'name': 'externalIssue',\n 'label': 'Issue',\n 'default': '',\n 'type': 'string',\n }\n ]", "def get_issue_url(testcase):\n issue_tracker = get_issue_tracker_for_testcase(testcase)\n if not issue_tracker:\n return None\n\n issue_id = (\n testcase.bug_information\n if testcase.bug_information else testcase.group_bug_information)\n if not issue_id:\n return None\n\n # Use str(issue_id) as |group_bug_information| might be an integer.\n return issue_tracker.issue_url(str(issue_id))", "def get_issue_display_name(self, external_issue):\n return ''", "def _get_key_url(self, key):\n urls = self.get_URLS(key)\n\n if len(urls) == 1:\n return urls[0]\n else: # multiple\n # TODO: utilize cache to check which archives might already be\n # present in the cache.\n # Then if not present in the cache -- check which are present\n # locally and choose that one to use\n if self._last_url and self._last_url in urls:\n return self._last_url\n else:\n return urls[0] # just the first one", "def get_similar_issues_url(issue_tracker, testcase, only_open=True):\n keywords = get_search_keywords(testcase)\n return issue_tracker.find_issues_url(keywords=keywords, only_open=only_open)", "def after_link_issue(self, external_issue, **kwargs):\n pass", "def _LoginOrIssueEntryURL(mr, config):\n issue_entry_url = servlet_helpers.ComputeIssueEntryURL(mr, config)\n if mr.auth.user_id:\n return issue_entry_url\n else:\n after_login_url = framework_helpers.FormatAbsoluteURL(\n mr, urls.ISSUE_ENTRY_AFTER_LOGIN)\n return _SafeCreateLoginURL(mr, after_login_url)", "def build_issue_tracker_url(issue_id):\n issue_tracker_tmpl = settings.ISSUE_TRACKER_BUG_URL_TMPL\n url_tmpl = issue_tracker_tmpl if issue_tracker_tmpl else 'http://issue/%s'\n return url_tmpl % issue_id", "def find_issue_in_source(connection, issue):\n check_jql = 'key = %s' % issue.spin_id\n\n return find_one_issue(connection, check_jql)", "def get_item_url(self):\n try:\n return self.content_object.get_absolute_url()\n except AttributeError:\n return self.external_url", "def url_for_object(self, key: typing.Optional[str]=None) -> str:\n return jsii.invoke(self, \"urlForObject\", [key])", "def url_for_object(self, key: typing.Optional[str]=None) -> str:\n return jsii.invoke(self, \"urlForObject\", [key])", "async def get_external_link(segmentnr: str):\n query_result = {\"link\": \"\"}\n filename = segmentnr.split(':')[0]\n database = get_db()\n query_displayname = database.AQLQuery(\n query=main_queries.QUERY_LINK,\n bindVars={\n \"filename\": filename\n },\n rawResults=True\n )\n query_result = {\"link\": query_displayname.result[0]}\n return query_result", "def show_issue(self, msg, issue_id):\n self._asset_bind(msg)\n yield \"https://github.com/{}/issues/{}\".format(task_repository_name(), issue_id)", "def url(self):\n return url_for_item(self.key)", "def url(self):\n return url_for_item(self.key)", "def url_and_display_name(usage_key):\r\n problem_store = modulestore()\r\n if usage_key not in state_keys_to_problem_info:\r\n problem = problem_store.get_item(usage_key)\r\n problem_info = (problem.url_name, problem.display_name_with_default)\r\n state_keys_to_problem_info[usage_key] = problem_info\r\n\r\n return state_keys_to_problem_info[usage_key]", "def issue(ctx, accountable, issue_key):\n accountable.issue_key = issue_key\n if not ctx.invoked_subcommand:\n issue = accountable.issue_meta()\n headers = issue.keys()\n rows = [headers, [v for k, v in issue.items()]]\n print_table(SingleTable(rows))", "def get_link(\n self,\n operator: BaseOperator,\n *,\n ti_key: TaskInstanceKey,\n ) -> str:\n conn = BaseHook.get_connection(\n getattr(operator, \"qubole_conn_id\", None)\n or operator.kwargs[\"qubole_conn_id\"] # type: ignore[attr-defined]\n )\n if conn and conn.host:\n host = re.sub(r\"api$\", \"v2/analyze?command_id=\", conn.host)\n else:\n host = \"https://api.qubole.com/v2/analyze?command_id=\"\n qds_command_id = XCom.get_value(key=\"qbol_cmd_id\", ti_key=ti_key)\n url = host + str(qds_command_id) if qds_command_id else \"\"\n return url", "def key_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_url\")", "def key_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_url\")", "def _get_key_link(self, key_name):\n return '%s%s/%s.key' % (self.ca_dir, PRIVATE_DIR_NAME, key_name)", "def url_for_object(self, key: typing.Optional[str]=None) -> str:\n ...", "def __expandURL(self, link):\n try:\n return requests.get(link).url\n except Exception:\n return link", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")", "def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")" ]
[ "0.65414643", "0.6026105", "0.6026105", "0.5845039", "0.5555224", "0.554478", "0.5533305", "0.5504559", "0.5486554", "0.5481756", "0.5427769", "0.5370131", "0.532764", "0.532764", "0.532104", "0.5304017", "0.5276933", "0.5276933", "0.5267372", "0.52666265", "0.524295", "0.52332854", "0.52332854", "0.5208231", "0.51945555", "0.5191945", "0.51911473", "0.51911473", "0.51626945", "0.51626945" ]
0.75889593
0
Stores the last used field defaults on a perproject basis. This accepts a dict of values that will be filtered to keys returned by ``get_persisted_default_config_fields`` which will automatically be merged into the associated field config object as the default.
def store_issue_last_defaults(self, project_id, data): persisted_fields = self.get_persisted_default_config_fields() if not persisted_fields: return defaults = {k: v for k, v in six.iteritems(data) if k in persisted_fields} self.org_integration.config.update({ 'project_issue_defaults': {project_id: defaults}, }) self.org_integration.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_fields_with_default(annotation_fields, defaults_dict):\n all_fields = OrderedDict()\n all_filed_keys = _merge_field_keys(annotation_fields, defaults_dict)\n for name in all_filed_keys:\n # Get or create annotation\n annotation = (\n annotation_fields[name]\n if name in annotation_fields\n else _get_annotation_by_value(defaults_dict.get(name, Input._EMPTY))\n )\n # Create annotation if is class type and update default\n annotation = _update_annotation_with_default(annotation, name, defaults_dict.get(name, Input._EMPTY))\n all_fields[name] = annotation\n return all_fields", "def _update_fields_with_default(\n annotation_fields: Dict[str, Union[Annotation, Input, Output]], defaults_dict: Dict[str, Any]\n ) -> Dict[str, Union[Annotation, Input, Output]]:\n all_fields = OrderedDict()\n all_filed_keys = _merge_field_keys(annotation_fields, defaults_dict)\n for name in all_filed_keys:\n # Get or create annotation\n annotation = (\n annotation_fields[name]\n if name in annotation_fields\n else _get_annotation_by_value(defaults_dict.get(name, Input._EMPTY))\n )\n # Create annotation if is class type and update default\n annotation = _update_annotation_with_default(annotation, name, defaults_dict.get(name, Input._EMPTY))\n all_fields[name] = annotation\n return all_fields", "def get_persisted_default_config_fields(self):\n return []", "def _inject_defaults(settings, defaults):\n new_settings = {}\n\n if defaults is None:\n return settings\n elif settings is None or len(settings) == 0:\n new_settings = defaults\n else:\n for k, v in settings.items():\n if isinstance(v, dict) or v is None:\n new_settings[k] = Settings._inject_defaults(v, defaults[k])\n else:\n new_settings[k] = settings[k]\n\n for k, v in defaults.items():\n if k not in settings:\n new_settings[k] = defaults[k]\n return new_settings", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def set_defaults(fields, defaults):\n undefined = set(defaults.keys()) - set(fields.keys())\n for k in undefined:\n v = defaults[k]\n # see http://pyparsing.wikispaces.com/share/view/71042464\n fields[k] = v\n fields.append(v)", "def your_reservation_defaults(self, defaults):\n\n default_email = self.email()\n if default_email:\n defaults['email'] = self.email()\n\n data = self.additional_data()\n\n if not data:\n return defaults\n\n for form in data:\n if form in self.context.formsets:\n for field in data[form]['values']:\n defaults[\"%s.%s\" % (form, field['key'])] = field['value']\n\n return defaults", "def storeUnloadedDefaultsOnly( self ):\n \n unStoredKeys= [ aKey \n for aKey in self._defDict.keys() \n if aKey not in self._loadedDefaults ]\n if len( unStoredKeys ) == 0:\n return\n \n # keep track of what has been loaded\n [ self._loadedDefaults.append( aKey ) for aKey in unStoredKeys ]\n \n # get the data \n data= [ self._defDict[ aKey ] for aKey in unStoredKeys ] \n \n # loading only unloaded\n tempDict= self._prefObj.load( group= self.prefGroup, \\\n name= unStoredKeys, default= data )\n \n # add if already not a field\n addDict= { aKey.split(\"/\")[-1]: tempDict[aKey] \n if aKey not in self.__dict__ \n else warnings.warn( \"\\\"\" + aKey + \"\\\" is already stored in the data, \" + \\\n \"Will not updated this field with unstored default\" )\n for aKey in tempDict }\n \n self.__dict__.update( addDict )", "def propagate_defaults(config_doc):\n for group_name, group_doc in config_doc.items():\n if isinstance(group_doc, dict):\n defaults = group_doc.get('defaults', {})\n\n for item_name, item_doc in group_doc.items():\n if item_name == 'defaults':\n continue\n if isinstance(item_doc, dict):\n\n group_doc[item_name] = \\\n dict_merge_pair(copy.deepcopy(defaults), item_doc)\n\n return config_doc", "def save_defaults(self):\n\n pass", "def assign_defaults(self):\n\n def module_default_sort_key(module):\n sort_key = (\n 1 if module.marked_as_default else -1,\n module.version,\n module.variant,\n -self.index(module.modulepath),\n )\n return sort_key\n\n self.defaults = {}\n grouped = groupby(\n [module for path in self.path for module in path.modules], lambda x: x.name\n )\n for (_, modules) in grouped:\n for module in modules:\n module.is_default = False\n if len(modules) > 1:\n modules = sorted(modules, key=module_default_sort_key, reverse=True)\n modules[0].is_default = True\n self.defaults[modules[0].name] = modules[0]", "def _merge_with_default_values(self, cr, uid, external_session, ressource, vals, sub_mapping_list, defaults=None, context=None):\n if not defaults: return vals\n for key in defaults:\n if not key in vals:\n vals[key] = defaults[key]\n return vals", "def resetStoredDefaults( self ):\n keys= list( self._defDict.keys() )\n data= [ self._defDict[ aKey ] for aKey in keys ]\n \n self.prefObj.save( group= self.prefGroup, name= keys, data= data )\n self.resetSelfWithDefaults()", "def save_defaults(self, overwrite=False):\r\n for (section, option), value in self.defaults.iteritems():\r\n if value is None:\r\n continue\r\n if section not in self.__config:\r\n self.__config[section] = {}\r\n if overwrite or option not in self.__config[section]:\r\n self.__config[section][option] = value\r\n self.save()", "def spread_default_parameters(config, dev_cfg):\n def_cfg = config.get('DEFAULT')\n if def_cfg is None:\n return\n\n for (key, value) in def_cfg.items():\n if key not in dev_cfg:\n dev_cfg[key] = value", "def find_defaults(self):\n\n defaults = self.tree.findall('default')\n default_remote = None\n default_revision = None\n\n if len(defaults) > 1 and self.fail_on_invalid:\n raise InvalidManifest(\n 'More than one default entry, must be unique'\n )\n\n try:\n default_remote = defaults[-1].get('remote')\n default_revision = defaults[-1].get('revision', 'master')\n except IndexError:\n pass # Leave defaults to None\n\n self.defaults = {\n 'remote': default_remote, 'revision': default_revision\n }", "def defaults():\n global __preset_staging\n \n t = TreeDict('Default_Parameter_Tree', __defaultpresettree__ = True)\n __preset_staging[id(t)] = t\n return t", "def replaceDefaults(d):\n defaults = d.pop('.defaults')\n for k, v in defaults.items():\n recursiveSearchReplace(d, '!' + k + '!', v)", "def get_drupal_field_defaults(db_obj, db_cur, entity_type, bundle):\n\n # query string and arguments\n query_str = (\n'''\nSELECT fci.field_name, fci.data\nFROM field_config_instance as fci\nLEFT JOIN field_config as fc\nON fc.id = fci.field_id\nWHERE fci.entity_type = %s\nAND fci.bundle = %s\nAND fc.deleted = 0\n'''\n )\n query_args = [entity_type, bundle]\n\n # execute the query\n if not db_obj.execute(db_cur, query_str.strip(), query_args,\n has_results=True):\n return None\n ret = db_obj.fetchall(db_cur)\n if not ret[0]:\n return None\n if not ret[1]:\n return []\n\n # before we worry about the phpserialize module, make sure there are\n # actually defaults\n found_default = 0\n for row in ret[1]:\n if re.search('s:13:\"default_value\";(?!N;)', row[1]):\n found_default = 1\n if found_default == 0:\n return []\n\n if 'phpserialize' not in sys.modules:\n nori.core.email_logger.error(\n'''Warning: there are defaults for Drupal fields under entity type\n{0} and bundle {1}, but the 'phpserialize' module\nis not available, so they can't be interpreted.''' .\n format(*map(nori.pps, [entity_type, bundle]))\n )\n return None\n\n # massage the defaults - not implemented yet\n nori.core.email_logger.error(\n'''Warning: there are defaults for Drupal fields under entity type\n{0} and bundle {1}, but the interpretation code\nhasn't been implemented yet.''' .\n format(*map(nori.pps, [entity_type, bundle]))\n )\n return None\n #ret[1]\n #field_name: endpoints, field_ram, etc.\n #phpserialize.loads(data)['default_value'][0]['value'] -> '2222'", "def update_with_defaults(self, default_values: dict):\n updates = []\n for key, value in default_values.items():\n for item in self._collect.find({key: {\"$exists\": False}}, {'_id': True}):\n updates.append(pymongo.UpdateOne(item, {\"$set\": {key: value}}))\n\n if len(updates):\n print(\"Update:\", self._collect.bulk_write(updates).modified_count)", "def build_defaults(self, fields, defaults):\n # assert '__iter__' in dir(defaults), iterReq('defaults', defaults)\n if not defaults or '__iter__' not in dir(defaults):\n defaults = []\n if len(defaults) != len(fields):\n print 'WARNING: mismatched lengths of defaults and expected_types'\n print 'Found (%d) instead of (%d)' % (len(defaults), len(fields))\n print '>>> OVERRIDING DEFAULTS TO EXPECTED TYPES W/O ARGS'\n defaults = [ self.expected[f]() for f in self.expected ]\n\n return defaults", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def replace_defaults(d):\n\n # remove the defaults section\n defaults = d.pop('.defaults')\n\n # look for default tags and replace them\n for k, v in defaults.items():\n recursive_search_replace(d, '!' + k + '!', v)", "def schema_defaults(schema, dps_list=None, **defaults):\n copy = schema.extend({})\n for field, field_type in copy.schema.items():\n if isinstance(field_type, vol.In):\n value = None\n for dps in dps_list or []:\n if dps.startswith(f\"{defaults.get(field)} \"):\n value = dps\n break\n\n if value in field_type.container:\n field.default = vol.default_factory(value)\n continue\n\n if field.schema in defaults:\n field.default = vol.default_factory(defaults[field])\n return copy", "def merge_config(user: dict, default: dict) -> dict:\n\n if isinstance(user, dict) and isinstance(default, dict):\n for kk, vv in default.items():\n if kk not in user:\n user[kk] = vv\n else:\n user[kk] = merge_config(user[kk], vv)\n\n return user", "def update_crispval(self, val_dict):\n for v in val_dict:\n if v in self.variables_default_val:\n self.variables_default_val[v] = val_dict[v]", "def add_default_configs(configs: dict, default_configs: dict):\n for key, value in default_configs.items():\n if key not in configs:\n configs[key] = value\n elif isinstance(default_configs[key], dict) and isinstance(configs[key], dict):\n add_default_configs(configs[key], default_configs[key])\n else:\n continue\n\n return configs", "def save_configuration_overrides(self):\n _logging_location = self.configuration_widgets.logging_location_label.text().replace('Logging Location: ', '')\n _output_location = self.configuration_widgets.integrate_location_label.text().replace('Output Location: ', '')\n _DEFAULT_CONFIG = {\n 'loggingLocation': self.configuration_widgets.logging_location_label.text().replace('Logging Location: ', ''),\n 'outputLocation': self.configuration_widgets.integrate_location_label.text().replace('Output Location: ', ''),\n 'loggingStatus': 'True' if self.configuration_widgets.logging_status_checkBox.isChecked() else 'False'\n }\n\n write_json(_DEFAULT_CONFIG)", "def add_default_settings_config(self):\n config = {\n mconst.DEF_SETTINGNAME_default_logfilename: mconst.DEF_SETTINGVAL_default_logfilename_defaultvalue,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)" ]
[ "0.6671097", "0.64907825", "0.6364141", "0.6281838", "0.62724817", "0.6210134", "0.6145344", "0.60783255", "0.6055254", "0.6043969", "0.60429865", "0.5975377", "0.5955533", "0.5933369", "0.58850956", "0.585415", "0.56757694", "0.5667322", "0.5656256", "0.5634078", "0.56188464", "0.5572426", "0.5572426", "0.5570144", "0.5569969", "0.55528504", "0.55286777", "0.55258447", "0.5524544", "0.55191123" ]
0.7270465
0
Create an issue via the provider's API and return the issue key, title and description. Should also handle API client exceptions and reraise as an IntegrationError (using the `message_from_error` helper).
def create_issue(self, data, **kwargs): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_issue(*, image: str, repo: str, run: str, stacktrace: str) -> Issue:\n title = f\"Automatic error report from {repo}\"\n body = _report_body(image=image, repo=repo, run=run, stacktrace=stacktrace)\n return TAGBOT_ISSUES_REPO.create_issue(title, body)", "def test_issue_create_issue(self):\n pass", "def create_new_issue(\n self,\n token: str,\n object_id: str,\n customer_id: str,\n project_id: str,\n scope_id: str,\n issue_name: str,\n region: str,\n business_unit: str,\n date_of_raise: str,\n due_date: str,\n nature_of_issue: str,\n criticality: str,\n issue_description: str,\n impact_value: str,\n currency: str,\n impact_on: str,\n document_ref: dict,\n issue_owner: dict,\n resolution_path: str,\n ):\n\n # Type guarding\n assert check_argument_types()\n\n # TODO: make table name environment variable\n table_name = f\"Projects-{customer_id}\"\n\n # Key\n key = {\"projectId\": project_id, \"customerId\": customer_id}\n\n # Projection Expression\n projection_expression = \", \".join([\"projectId\", \"code\"])\n\n # Check if customer and project exist\n logger.info(f\"Checking if project ID or organization ID exists: {key}\")\n response, _ = self._db.read_single_item(table_name, key, projection_expression)\n\n # Get project code\n project_code = response[\"code\"]\n\n # Request body\n dynamo_object = {\n \"scopeId\": scope_id,\n \"issueName\": issue_name,\n \"region\": region,\n \"businessUnit\": business_unit,\n \"dateOfRaise\": date_of_raise,\n \"dueDate\": due_date,\n \"natureOfIssue\": nature_of_issue,\n \"criticality\": criticality,\n \"issueDescription\": issue_description,\n \"status\": \"open\",\n \"impactValue\": impact_value,\n \"currency\": currency,\n \"impactOn\": impact_on,\n \"documentRef\": document_ref,\n \"issueOwner\": issue_owner,\n \"resolutionPath\": resolution_path,\n \"lastUpdated\": str(date.today()),\n \"issueId\": object_id,\n }\n\n # Send project onboarding email\n logger.info(\"Sending project onboarding email\")\n self._email.send_template_email(\n source=getenv(\"SOURCE_EMAIL_ADDRESS\"),\n template_name=getenv(\"ISSUE_ASSIGNMENT_TEMPLATE\"),\n template_data=json.dumps(\n {\n \"issueId\": f'\"{dynamo_object[\"issueName\"]}\"',\n \"projectCode\": project_code,\n }\n ),\n bcc_addresses=[issue_owner[\"email\"]],\n )\n\n # Dynamo update expressions & update\n logger.info(\"Create new project issue\")\n update_expression = (\n f\"SET scopes.#scopeId.issues.#IssueId = :{dynamo_object['issueId']}\"\n )\n expression_attribute_names = {\n \"#scopeId\": scope_id,\n \"#IssueId\": dynamo_object[\"issueId\"]\n }\n expression_attribute_values = {f\":{dynamo_object['issueId']}\": dynamo_object}\n self._db.update_item(\n table_name,\n key,\n update_expression,\n expression_attribute_names,\n expression_attribute_values,\n )\n\n # Log workflow\n message = [f\"Created new issue: {issue_name}\"]\n workflow = Workflows.update_workflows(\n token, \"Create\", message, project_id, dynamo_object[\"issueId\"]\n )\n self._db.create_item(f\"Workflows-{customer_id}\", workflow)\n\n logger.info(\"New issue created successfully\")\n return \"New issue created successfully\", 200", "def create_jira_issue(self, server_url, username, password, issue_summary, issue_description, project_key, issue_type='Bug'):\n status = True\n output_dict = {}\n wdesc = \"Creates a JIRA issue\"\n pSubStep(wdesc)\n issue_summary = issue_summary.replace('\"', \" \")\n issue_description = issue_description.replace('\"', \"-\")\n fetchuri = server_url\n postdata_url=fetchuri+'/rest/api/2/issue/'\n postdata = \"\"\"\n {\n \"fields\": {\n \"project\":\n {\n \"key\": \\\"\"\"\"+project_key+\"\"\"\\\"\n },\n \"summary\": \\\"\"\"\"+issue_summary+\"\"\"\\\",\n \"description\": \\\"\"\"\"+issue_description+\"\"\"\\\",\n \"issuetype\": {\n \"name\": \\\"\"\"\"+issue_type+\"\"\"\\\"\n }\n }\n }\n \"\"\"\n credential_handler=urllib2.HTTPPasswordMgrWithDefaultRealm()\n credential_handler.add_password(None, postdata_url, username, password)\n auth = urllib2.HTTPBasicAuthHandler(credential_handler)\n userpassword = username + \":\" + password\n password = base64.b64encode(userpassword)\n #Create an Authentication handler\n opener = urllib2.build_opener(auth)\n urllib2.install_opener(opener)\n opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1))\n #Create a POST request\n headers={\"Authorization\" : \"Basic \"+password,\"Content-Type\": \"application/json\"}\n request=urllib2.Request(str(postdata_url),postdata,headers)\n try:\n handler = urllib2.urlopen(request)\n extension = json.loads(handler.read())\n issue_id = str(extension['key'])\n pNote(\"JIRA Issue Created. Issue-Id: {0}\".format(issue_id))\n output_dict[\"issue_id\"] = issue_id\n except Exception as e:\n status = False\n pNote(\"Problem creating JIRA issue.\" , \"error\")\n pNote(\"JIRA Error Code: ({0})\".format(e) , \"error\")\n\n Utils.data_Utils.update_datarepository(output_dict)\n Utils.testcase_Utils.report_substep_status(status)\n return status", "def issue_create():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-a\", \"--assignees\", default=[], nargs=\"*\", help=\"users to assign to this issue\"\n )\n parser.add_argument(\"-b\", \"--body\", default=None, help=\"text body of the issue\")\n parser.add_argument(\n \"-c\",\n \"--column\",\n default=DEFAULT_COLUMN_NAME,\n help=\"name of column to place card in\",\n )\n parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store_true\",\n default=DEFAULT_COLUMN_NAME,\n help=\"Edit issue title and body in vim\",\n )\n parser.add_argument(\n \"-l\", \"--labels\", default=None, nargs=\"*\", help=\"labels to add to the new issue\"\n )\n parser.add_argument(\n \"-m\",\n \"--milestone\",\n default=None,\n help=\"milestone id to place this issue in. \"\n \"This should be an integer. \"\n \"Find milestone ids with the `milestones` command.\",\n )\n parser.add_argument(\n \"-p\", \"--project\", default=SCRUM_BOARD_NAME, help=\"project to create issue in\"\n )\n parser.add_argument(\"title\", default=None, nargs=\"?\", help=\"issue title\")\n\n args = parser.parse_args()\n\n # only required arg for creating an issue. can be overridden in interactive mode\n title = args.title\n\n # this can be overridden in interactive mode\n body = args.body\n\n if args.interactive:\n with tempfile.NamedTemporaryFile(\"w\") as fh:\n path = fh.name\n\n editor = os.environ.get(\"EDITOR\", os.environ.get(\"VISUAL\", \"vi\"))\n\n proc = getattr(sh, editor)\n\n proc(path, _fg=True)\n\n with open(path, \"r\") as rfh:\n\n # grab top line as title\n title = rfh.readline().replace(\"\\n\", \"\")\n\n # grab remaining lines as body\n body = \"\".join(rfh.readlines())\n\n session = GithubSession()\n\n additional_args = {\n \"assignees\": args.assignees,\n \"body\": body,\n \"labels\": args.labels,\n \"milestone\": args.milestone,\n }\n\n issue = session.create_issue(title, **additional_args)\n\n column_name = args.column\n project_name = args.project\n\n project = session.get_project(project_name)\n column = session.get_column(project, column_name)\n\n # finally, create the card\n session.create_card(column, issue)\n\n print(json.dumps(issue, indent=2))", "def create_issue(self, group, form_data, **kwargs):\n headers = { \"X-Redmine-API-Key\": self.get_option('key', group.project),\n 'content-type': 'application/json' }\n verifySSL = self.get_option('verify_ssl', group.project)\n url = urlparse.urljoin(self.get_option('host', group.project), \"issues.json\")\n payload = {\n 'project_id': self.get_option('project_id', group.project),\n 'tracker_id': self.get_option('tracker_id', group.project),\n 'status_id': '0',\n 'subject': form_data['title'].encode('utf-8'),\n 'description': form_data['description'].encode('utf-8'),\n }\n #print >> sys.stderr, \"url:\", url\n #print >> sys.stderr, \"payload:\\n\", pformat(payload)\n #print >> sys.stderr, pformat(group)\n #print >> sys.stderr, pformat(dir(group))\n\n try:\n r = requests.post(url, data=json.dumps({'issue': payload}), headers=headers, verify=verifySSL)\n except requests.exceptions.HTTPError as e:\n raise forms.ValidationError('Unable to reach Redmine host: %s' % repr(e))\n\n try:\n data = json.loads(r.text)\n except json.JSONDecodeError as e:\n #print >> sys.stderr, \"ERROR: %s\" % e\n #print >> sys.stderr, \"RESP:\", r.text\n raise forms.ValidationError('Unable to reach Redmine host: %s' % repr(e))\n\n if not 'issue' in data or not 'id' in data['issue']:\n raise forms.ValidationError('Unable to create redmine ticket')\n\n return data['issue']['id']", "def create_jira_ticket(self, issue_dict=None):\n new_ticket = self.get_jira().create_issue(fields=issue_dict)\n return new_ticket.key", "def _create_issue(self, dep_name, dep_latest_version, is_subtask=False, parent_key=None):\n logging.info(\"Creating a new JIRA issue to track {0} upgrade process\".format(dep_name))\n assignee, owners = self._find_owners(dep_name)\n summary = _ISSUE_SUMMARY_PREFIX + dep_name\n if dep_latest_version:\n summary = summary + \" \" + dep_latest_version\n description = \"\"\"\\n\\n{0}\\n\n Please review and upgrade the {1} to the latest version {2} \\n \n cc: \"\"\".format(\n datetime.today(),\n dep_name,\n dep_latest_version\n )\n for owner in owners:\n description += \"[~{0}], \".format(owner)\n try:\n if not is_subtask:\n issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description, assignee=assignee)\n else:\n issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description, assignee=assignee, parent_key=parent_key)\n except Exception as e:\n logging.error(\"Failed creating issue: \"+ str(e))\n raise e\n return issue", "def create_issues(repo, title, body, verbose=None):\n label = get_label(repo, title)\n if not label:\n err = \"A label embedded in parentheses is currently required. For \" \\\n \"example 'Title of Error (title_tag).' You provided: {0}\"\n raise NotImplementedError(err.format(title))\n # get stdout written to file\n with open(body) as fi:\n issues = fi.readlines()\n fi.close()\n # Handle empty body\n if not issues:\n raise RuntimeWarning(\"The body text is empty and no issue will be \"\n \"created for file: {}.\".format(body))\n # Handle multiline error messages.\n if 'Traceback' in ''.join(issues):\n if verbose:\n print \"Issue is a Traceback...\"\n string = \"\".join(issues)\n sha = hashlib.sha1(string).hexdigest()[0:6]\n error = dict(experiment_site_id=\"Traceback:{}\".format(sha),\n error=\"Traceback\",\n message=string)\n issues = [json.dumps(error, sort_keys=True)]\n for issue in issues:\n # Check for new format\n try:\n issue_dict = json.loads(issue)\n issue_dict.update({'title': get_valid_title(title)})\n error_msg = issue_dict.get('error')\n experiment_site_id = issue_dict.get('experiment_site_id')\n subject = \"{}, {}\".format(experiment_site_id, error_msg)\n body = generate_body(issue_dict)\n except:\n if verbose:\n print(\"Falling back to old issue formatting.\")\n # Old error handling approach.\n # Create a unique id.\n sha1 = hashlib.sha1(issue).hexdigest()[0:6]\n subject_base = title[0:title.index(' (')]\n subject = subject_base + \": {0}\".format(sha1)\n body = issue\n if is_open_issue(repo, subject, verbose=verbose):\n pass\n else:\n try:\n github_issue = repo.create_issue(subject, body=body, labels=label)\n except Exception as e:\n print 'Failed to create_issue with title:{0}, body:{1} and label:{2}, \\\n exception: {3}'.format(subject, body, label, str(e))\n if verbose:\n print \"Created issue... See: {0}\".format(github_issue.url)\n return None", "async def create_issue(\n self,\n title: str or None = None,\n body: str or None = None,\n state: str or None = None,\n milestone: int or None = None,\n labels: [str] or None = None,\n assignees: [str] or None = None,\n ):\n _endpoint = f\"/repos/{self.full_name}/issues\"\n\n data = {}\n if title is not None:\n data[\"title\"] = title\n if body is not None:\n data[\"body\"] = body\n if state is not None:\n data[\"state\"] = state\n if milestone is not None:\n data[\"milestone\"] = milestone\n if labels is not None:\n data[\"labels\"] = labels\n if assignees is not None:\n data[\"assignees\"] = assignees\n\n issue = await self.client.post(endpoint=_endpoint, data=data, jsondata=True)\n return AIOGitHubAPIRepositoryIssue(self.client, issue)", "def create_issue(self, issue_field_dict, assign_current_user=False):\r\n issue_field_dict = eval(str(issue_field_dict))\r\n print issue_field_dict\r\n\r\n new_issue = self.jira.create_issue(issue_field_dict)\r\n if assign_current_user is True:\r\n self.assign_user_to_issue(new_issue, self.jira.current_user())\r\n return new_issue", "def issue_tracker(self) -> IssueTracker:\n issue_tracker_data = self.get(\"issue_tracker\", {})\n parameters = issue_tracker_data.get(\"parameters\", {})\n url = parameters.get(\"url\", \"\")\n issue_parameters = IssueParameters(\n parameters.get(\"project_key\", \"\"),\n parameters.get(\"issue_type\", \"\"),\n parameters.get(\"issue_labels\", []),\n parameters.get(\"epic_link\", \"\"),\n )\n credentials = IssueTrackerCredentials(\n parameters.get(\"username\", \"\"),\n parameters.get(\"password\", \"\"),\n parameters.get(\"private_token\", \"\"),\n )\n return IssueTracker(url, issue_parameters, credentials)", "def test_issue_get_issue(self):\n pass", "def get_issue_tracker(project_name=None):\n issue_tracker_config = local_config.IssueTrackerConfig()\n if not project_name:\n from clusterfuzz._internal.datastore import data_handler\n project_name = data_handler.get_issue_tracker_name()\n\n issue_project_config = issue_tracker_config.get(project_name)\n if not issue_project_config:\n raise ValueError('Issue tracker for {} does not exist'.format(project_name))\n\n constructor = _ISSUE_TRACKER_CONSTRUCTORS.get(issue_project_config['type'])\n if not constructor:\n raise ValueError('Invalid issue tracker type: ' +\n issue_project_config['type'])\n\n return constructor(project_name, issue_project_config)", "def issues_insert(self, mar, request):\n if not mar.perms.CanUsePerm(\n permissions.CREATE_ISSUE, mar.auth.effective_ids, mar.project, []):\n raise permissions.PermissionException(\n 'The requester %s is not allowed to create issues for project %s.' %\n (mar.auth.email, mar.project_name))\n\n with work_env.WorkEnv(mar, self._services) as we:\n owner_id = None\n if request.owner and request.owner.name:\n try:\n owner_id = self._services.user.LookupUserID(\n mar.cnxn, request.owner.name)\n except exceptions.NoSuchUserException:\n raise endpoints.BadRequestException(\n 'The specified owner %s does not exist.' % request.owner.name)\n\n cc_ids = []\n request.cc = [cc for cc in request.cc if cc]\n if request.cc:\n cc_ids = list(self._services.user.LookupUserIDs(\n mar.cnxn, [ap.name for ap in request.cc],\n autocreate=True).values())\n comp_ids = api_pb2_v1_helpers.convert_component_ids(\n mar.config, request.components)\n fields_add, _, _, fields_labels, _ = (\n api_pb2_v1_helpers.convert_field_values(\n request.fieldValues, mar, self._services))\n field_helpers.ValidateCustomFields(\n mar, self._services, fields_add, mar.config, mar.errors)\n if mar.errors.AnyErrors():\n raise endpoints.BadRequestException(\n 'Invalid field values: %s' % mar.errors.custom_fields)\n\n logging.info('request.author is %r', request.author)\n reporter_id, timestamp = self.parse_imported_reporter(mar, request)\n new_issue, _ = we.CreateIssue(\n mar.project_id, request.summary, request.status, owner_id,\n cc_ids, request.labels + fields_labels, fields_add,\n comp_ids, request.description,\n blocked_on=api_pb2_v1_helpers.convert_issueref_pbs(\n request.blockedOn, mar, self._services),\n blocking=api_pb2_v1_helpers.convert_issueref_pbs(\n request.blocking, mar, self._services),\n reporter_id=reporter_id, timestamp=timestamp,\n send_email=request.sendEmail)\n we.StarIssue(new_issue, True)\n\n return api_pb2_v1_helpers.convert_issue(\n api_pb2_v1.IssuesGetInsertResponse, new_issue, mar, self._services)", "def test_issue(self):\n issue = Checkmarx.Issue('a_group', 'the_name', 'http://url', 3, 'New')\n\n self.assertEqual('a group', issue.group)\n self.assertEqual('the name', issue.title)\n self.assertEqual('http://url', issue.display_url)\n self.assertEqual(3, issue.count)\n self.assertEqual('New', issue.status)", "def get_issue(self, issue_id):\n try:\n json = self.get('repos/%(owner)s/%(repo)s/issues/%(issue_id)d' % {\n 'owner': self.repo_owner,\n 'repo': self.repo_name,\n 'issue_id': issue_id,\n })\n\n label_list = [label_dict['name'] for label_dict in json['labels']]\n\n return Issue(json['number'], label_list)\n except ResourceNotFound:\n return None", "def get_issue(self, issue_id, **kwargs):\n raise NotImplementedError", "def get_issue(self, issue_id, **kwargs):\n raise NotImplementedError", "def test_create_issue_by_unauthenticated_user_fails(self):\n response = self.client.post(\n self.url,\n json={\"description\": TEST_ISSUE_DESCRIPTION, \"name\": TEST_ISSUE_NAME},\n )\n response_json = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_json[\"SubCode\"], \"InvalidToken\")", "async def get_issue(self, issue: int) -> \"AIOGitHubAPIRepositoryIssue\":\n _endpoint = f\"/repos/{self.full_name}/issues/{issue}\"\n\n response = await self.client.get(endpoint=_endpoint)\n return AIOGitHubAPIRepositoryIssue(self.client, response)", "def Issue(self, **kwargs):\n if 'raise_on_failure' not in kwargs:\n kwargs['raise_on_failure'] = False\n return vm_util.IssueCommand(self._GetCommand(), **kwargs)", "def _load_issue(**args):\n check_required_fields(['token', 'repo', 'number'], **args)\n gh = Github(args['token'])\n repo = gh.get_repo(args['repo'])\n issue = repo.get_issue(args['number'])\n logger.debug(\" ISSUE: %s\", issue.number)\n return issue", "def save_issue(self, item):\n logger.debug(\"saving the issues\")\n issue = Issue(id=item['title'])\n issue.title = item['title']\n if 'subtitle' in item:\n if any(word in item['subtitle'] for word in [\"variant\", \"Variant\"]):\n issue.key = ndb.Key(Issue, item['title'] + \" variant\")\n logger.debug(\"found variant, new issue id is \" + item['title'] + \" variant\")\n issue.subtitle = item['subtitle']\n\n if 'series' in item:\n series = Series(id=item['series'].rstrip('1234567890 '), title=item['series'].rstrip('1234567890 '))\n series.put()\n issue.series = series.key\n\n if 'reprint' in item:\n issue.reprint = item['reprint']\n\n if 'url' in item:\n issue.url = item['url']\n else:\n issue.url = \"#\"\n\n if 'summary' in item:\n issue.summary = item['summary']\n\n if 'date' in item:\n issue.date = item['date']\n\n if 'price' in item:\n issue.price = item['price']\n\n if \"placeholder/default/no-photo\" in item['image']:\n issue.image = item['image']\n else:\n issue.image = item['image'].replace('small_image/200x', 'image')\n\n issue.put_async()\n logger.debug(\"issue \" + issue.title + \" saved\")", "def test_get_existing_issue_passes(self):\n response = self.client.get(self.url)\n response_json = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_json[\"name\"], TEST_ISSUE_NAME)", "def test_create_an_issue(self):\n url = reverse('bulletin:issue-create',\n kwargs={'pk': self.newsletter.id})\n response = self.client.get(url,\n follow=True)\n self.assertEqual(response.status_code, 200)\n\n initial_num_newsletter_issues = self.newsletter.issues.count()\n url = reverse('bulletin:issue-create',\n kwargs={'pk': self.newsletter.id})\n response = self.client.post(url,\n data={'pub_date': '2014-10-04',\n 'name': 'Excellent issue'},\n follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(self.newsletter.issues.count(),\n initial_num_newsletter_issues + 1)", "def test_new_Issue(self, requests_post, get_landowner):\n #requests_post.status_code.return_value = 200\n requests_post.json.return_value = {'features': []}\n get_landowner.return_value = 'TEST landowner'\n cat = Category(name=\"test category\")\n cat.save()\n issue = Issue(description=\"test issue\", position=Point(5, 23), category=cat)\n issue.save()\n self.assertEqual(len(Issue.objects.all()), 1)\n issue = Issue(id=666, description=\"test issue with defined id\", position=Point(5, 23), category=cat)\n issue.save()\n self.assertEqual(issue.id, 666)", "def CreateArticle(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_issue(self, context):", "def test_create_api_key(self):\n pass" ]
[ "0.62188655", "0.61720526", "0.61446583", "0.61345875", "0.6107727", "0.6092078", "0.59738606", "0.5907364", "0.58103263", "0.5800523", "0.5772324", "0.5594634", "0.55728924", "0.5495695", "0.5421277", "0.54186255", "0.53647876", "0.5340007", "0.5340007", "0.52912503", "0.5287492", "0.5256903", "0.5205417", "0.52029216", "0.5135169", "0.5130759", "0.51173466", "0.5115872", "0.5112478", "0.5098671" ]
0.6616591
1
Takes the external issue that has been linked via `get_issue`. Does anything needed after an issue has been linked, i.e. creating a comment for a linked issue.
def after_link_issue(self, external_issue, **kwargs): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue_create_comment(self):\n pass", "def create_issue_link(self, link_type, inwardissue,\r\n outwardissue, comment=None):\r\n self.jira.create_issue_link(type=link_type,\r\n inwardIssue=str(inwardissue),\r\n outwardIssue=str(outwardissue))", "def test_issue_get_comment(self):\n pass", "def comment_issue(self, msg, issue_id, comment):\n self._asset_bind(msg)\n client = self._github_operator(msg)\n comment_obj = client.issue_comment(task_repository_name(), issue_id, comment)\n yield comment_obj.html_url", "def on_issue_comment(self, payload):\n pass", "def test_get_risk_external_comment(self):\n with factories.single_commit():\n risk = factories.RiskFactory()\n comment = factories.ExternalCommentFactory(description=\"comment\")\n factories.RelationshipFactory(source=risk, destination=comment)\n request_data = [{\n \"filters\": {\n \"expression\": {\n \"object_name\": \"Risk\",\n \"op\": {\n \"name\": \"relevant\"\n },\n \"ids\": [risk.id]\n },\n },\n \"object_name\":\"ExternalComment\",\n \"order_by\": [{\"name\": \"created_at\", \"desc\": \"true\"}],\n }]\n\n response = self.api.post(\n all_models.Risk,\n data=request_data,\n url=\"/query\",\n )\n\n self.assert200(response)\n response_data = response.json[0][\"ExternalComment\"]\n self.assertEqual(response_data[\"count\"], 1)\n self.assertEqual(response_data[\"values\"][0][\"description\"], \"comment\")", "def test_issue_get_comments(self):\n pass", "def test_issue_edit_comment(self):\n pass", "def test_query_external_comment(self):\n with factories.single_commit():\n control = factories.ControlFactory()\n comment = factories.ExternalCommentFactory(description=\"test comment\")\n factories.RelationshipFactory(source=control, destination=comment)\n request_data = [{\n \"filters\": {\n \"expression\": {\n \"object_name\": \"Control\",\n \"op\": {\n \"name\": \"relevant\"\n },\n \"ids\": [control.id]\n },\n },\n \"object_name\":\"ExternalComment\",\n \"order_by\": [{\"name\": \"created_at\", \"desc\": \"true\"}],\n }]\n\n response = self.api.post(\n comment,\n data=request_data,\n url=\"/query\"\n )\n\n self.assert200(response)\n response_data = response.json[0][\"ExternalComment\"]\n self.assertEqual(response_data[\"count\"], 1)\n self.assertEqual(response_data[\"values\"][0][\"description\"], \"test comment\")", "def add_comment(self, issue, comment):\n return self.get_jira().add_comment(issue, comment)", "def add_comment_to_issue(self, issue, comment, visibility=None):\r\n self.jira.add_comment(issue=issue, body=comment)", "def __add_comment(self, issue_id, comment):\n import httplib2\n http = httplib2.Http() \n response, content = http.request(\n uri=self.__issue_url % int(issue_id),\n method='PUT',\n body=comment,\n headers={\n 'X-Redmine-API-Key': self.__api_key,\n 'Content-type': 'application/json'\n }\n )\n print(response)\n print(content)", "def comment_added(self, event):\n import simplejson\n comment = str(event[\"comment\"])\n author_name = str(event[\"author\"][\"name\"])\n change_url = str(event[\"change\"][\"url\"])\n change_subject = str(event[\"change\"][\"subject\"])\n comment = simplejson.dumps({\n \"issue\": {\n \"notes\": self._prepare_comment_added_template(event)\n }\n })\n # get a unique list of issue IDs\n subject_issue_ids = self.__get_issue_ids(change_subject)\n comment_issue_ids = self.__get_issue_ids(comment)\n issue_ids = list(set(subject_issue_ids + comment_issue_ids))\n for issue_id in issue_ids:\n self.__add_comment(issue_id, comment)", "def create_comment(self, body):\n return self.client.request(\n \"{}/issues/{}/comments\".format(self.repo.base_path, self.num),\n params={\"body\": body},\n method=\"POST\"\n )", "def main():\n verbose = False\n online = True\n\n if online:\n TOKEN = \"\"\n g = Github(base_url=\"https://github.ibm.com/api/v3\", login_or_token=TOKEN)\n repo = g.get_repo(\"Raphael-Lambert/test_note\")\n\n path = \"C:/Users/RaphaelLambert/Documents/git_issues\"\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]\n if verbose:\n print(onlyfiles)\n treated = []\n issues = []\n\n with open(join(path, 'log.txt'), 'r') as doc:\n for line in doc:\n treated.append(line.rstrip('\\n'))\n\n with open(join(path, 'issues.txt'), 'r') as doc:\n for line in doc:\n issues.append(int(line.rstrip('\\n')))\n\n for title in onlyfiles:\n if title != 'log.txt' and title != 'issues.txt' and title not in treated:\n with open(join(path, title), 'rb') as fhdl:\n raw_email = fhdl.read()\n\n parsed_eml = eml_parser.eml_parser.decode_email_b(raw_email, include_raw_body=True)\n if verbose:\n print('-----------------')\n print(title)\n print('-----------------')\n print(parsed_eml)\n print('-----------------')\n body = parsed_eml['body']\n if len(body) > 0:\n raw_text = body[0]['content']\n else:\n raw_text = \"unable to retrieve the message\"\n raw_text = link_breaker(raw_text)\n num_get = 0\n if online and title[:4] == 'Re ' and title[4:] in treated:\n cont_issue = repo.get_issue(issues[treated.index(title[4:])])\n num_get = cont_issue.number\n cont_issue.create_comment(body=raw_text)\n elif online:\n new_issue = repo.create_issue(title=\"Conversation number {}: {}\".format(len(treated), title[:10]+\"...\"),\n body=raw_text)\n if verbose:\n print(new_issue)\n num_get = new_issue.number\n treated.append(title)\n issues.append(num_get)\n\n if verbose:\n print(treated)\n\n with open(join(path, 'log.txt'), 'w') as doc:\n for title in treated:\n doc.write(title+'\\n')\n with open(join(path, 'issues.txt'), 'w') as doc:\n for title in issues:\n doc.write(str(title)+'\\n')", "def add_comment_to_issue(repo, issue_number, body, allow_duplicates):\n found = False\n issue = repo.issue(issue_number)\n\n if not allow_duplicates:\n for comment in issue.iter_comments():\n if comment.body == body:\n found = True\n break\n\n if allow_duplicates or not found:\n success = issue.create_comment(body)\n if success:\n click.echo(\"The comment was successfully posted to the issue.\")\n else:\n click.echo(\"There was a failure commenting on the issue.\")\n raise SystemExit(1)\n else:\n click.echo(\"An identical comment was found, skipping posting comment.\")", "def test_issue_get_repo_comments(self):\n pass", "def comment(self, comment_id):\r\n return IssueComment(self, comment_id)", "def comment(self, comment_id):\r\n return IssueComment(self, comment_id)", "def handle_issue(self, evt):\n author = self.format_nickname(evt.author)\n if evt.new:\n short_url = \"https://dolp.in/i%d\" % evt.issue\n url = Tags.UnderlineBlue(short_url)\n msg = 'Issue %d created: \"%s\" by %s - %s'\n msg = msg % (evt.issue, evt.title, author, url)\n else:\n short_url = \"https://dolp.in/i%d/%d\" % (evt.issue, evt.update)\n url = Tags.UnderlineBlue(short_url)\n msg = 'Update %d to issue %d (\"%s\") by %s - %s'\n msg = msg % (evt.update, evt.issue, evt.title, author, url)\n self.bot.say(msg)", "def createcomment(request, pk):\n issue = get_object_or_404(Issue, pk=pk)\n if request.method == \"POST\":\n form = CommentCreationForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.issue = issue\n comment.author = request.user\n comment.created_at = timezone.now()\n comment.save()\n return redirect('office:issue', pk=pk)\n else:\n form = CommentForm()\n return render(request, 'blog/add_comment_to_post.html', {'form': form})", "def test_issue_post_comment_reaction(self):\n pass", "def addcomment(accountable, body):\n\n r = accountable.issue_add_comment(body)\n headers = sorted(['author_name', 'body', 'updated'])\n rows = [[v for k, v in sorted(r.items()) if k in headers]]\n rows.insert(0, headers)\n print_table(SingleTable(rows))", "def test_issue_delete_comment(self):\n pass", "def _HandleIssueLink(self, input_line, match, output_stream):\n issue = match[len(\"issue\"):].strip()\n prefix = match[:-len(issue)]\n\n self._formatting_handler.HandleIssue(\n input_line,\n output_stream,\n prefix,\n issue)", "def problem_comments_append(self, identifier, comment, html=None):\n params = {\"text\": comment}\n if html is not None:\n params[\"html\"] = html\n \n self._post(\"problems/%d/comments\" % identifier, json=params)", "def _apply_comment(self, iid, comment):\n data = {\"body\" : comment._body}\n resp = self._post(\n self._base + \"/issues/{}/comments\".format(iid),\n data=self._format_data(data))", "def test_issue_create_issue(self):\n pass", "def test_creates_comment(self):\n comment_id = CommentAndSummary.objects.get(note=self.note).pk\n report = Report.objects.filter(internal_comments__pk=comment_id)[0]\n self.assertEqual(report.pk, self.pk)", "def create_link(\n integration: Integration,\n installation: IntegrationInstallation,\n event: GroupEvent,\n response: Response,\n) -> None:\n external_issue = ExternalIssue.objects.create(\n organization_id=event.group.project.organization_id,\n integration_id=integration.id,\n key=response[\"key\"],\n title=event.title,\n description=installation.get_group_description(event.group, event),\n metadata=response.get(\"metadata\"),\n )\n GroupLink.objects.create(\n group_id=event.group.id,\n project_id=event.group.project_id,\n linked_type=GroupLink.LinkedType.issue,\n linked_id=external_issue.id,\n relationship=GroupLink.Relationship.references,\n data={\"provider\": integration.provider},\n )" ]
[ "0.6610581", "0.64180446", "0.63382167", "0.6309767", "0.62233156", "0.6104971", "0.60037994", "0.59019923", "0.58587396", "0.5726099", "0.5722433", "0.5694132", "0.56749576", "0.56747717", "0.5665945", "0.5628418", "0.56239235", "0.5613582", "0.5613582", "0.55969155", "0.55774635", "0.55714124", "0.5562684", "0.55542374", "0.5485029", "0.5448388", "0.54329026", "0.5393711", "0.536427", "0.5291447" ]
0.6989102
0
Returns the display name of the issue. This is not required but helpful for integrations whose external issue key does not match the disired display name.
def get_issue_display_name(self, external_issue): return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_name(self) -> Optional[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> Optional[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> Optional[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self):\n answer = self._call('display_name')\n return answer.display_name", "def get_display_name(self):\n return DisplayText(self._display_name)", "def get_display_name(self):\n return self.display_name", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None", "def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None" ]
[ "0.71604306", "0.71604306", "0.71604306", "0.7145177", "0.7145177", "0.7145177", "0.7145177", "0.7145177", "0.7145177", "0.7145177", "0.7145177", "0.7145177", "0.7145177", "0.7145177", "0.7145177", "0.70821923", "0.69916433", "0.69874066", "0.69397634", "0.69397634", "0.69397634", "0.69397634", "0.69397634", "0.69397634", "0.69397634", "0.69397634", "0.69397634", "0.69397634", "0.6938606", "0.6938606" ]
0.8553241
0
Helper method for get_repository_choices Returns the choice for the default repo in a tuple to be added to the list of repository choices
def create_default_repo_choice(self, default_repo): return (default_repo, default_repo)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_repository_choices(self, group, **kwargs):\n try:\n repos = self.get_repositories()\n except ApiError:\n raise IntegrationError(\n 'Unable to retrive repositories. Please try again later.'\n )\n else:\n repo_choices = [(repo['identifier'], repo['name']) for repo in repos]\n\n repo = kwargs.get('repo')\n if not repo:\n params = kwargs.get('params', {})\n defaults = self.get_project_defaults(group.project_id)\n repo = params.get('repo', defaults.get('repo'))\n\n try:\n default_repo = repo or repo_choices[0][0]\n except IndexError:\n return '', repo_choices\n\n # If a repo has been selected outside of the default list of\n # repos, stick it onto the front of the list so that it can be\n # selected.\n try:\n next(True for r in repo_choices if r[0] == default_repo)\n except StopIteration:\n repo_choices.insert(0, self.create_default_repo_choice(default_repo))\n\n return default_repo, repo_choices", "def get_default_repo(self):\n for repo in self.get_repos():\n if self.get_safe(repo, 'default') and self.getboolean(repo, 'default'):\n return repo\n return False", "def select_additional_repositories(preset: List[str]) -> List[str]:\n\n\trepositories = [\"multilib\", \"testing\"]\n\n\tchoice = Menu(\n\t\t_('Choose which optional additional repositories to enable'),\n\t\trepositories,\n\t\tsort=False,\n\t\tmulti=True,\n\t\tpreset_values=preset,\n\t\traise_error_on_interrupt=True\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Esc: return preset\n\t\tcase MenuSelectionType.Ctrl_c: return []\n\t\tcase MenuSelectionType.Selection: return choice.value", "def test_default_select():\n # Arange\n REPO = \"https://foo.bar/foobar\"\n\n # Act\n rm = gcbo.RepoManager(REPO)\n\n # Assert\n assert rm.select() == REPO", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def setDefaultRepository(self, repositoryName):\n try:\n utility.execLog(\"Navigating to Firmware Tab\")\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('FW_tab'))), action=\"CLICK\")\n utility.execLog(\"Selecting Firmware Repository '%s' to make it as Default\" % repositoryName)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('default_FW'))),\n action=\"SELECT\", setValue=repositoryName)\n self.handleEvent(EC.invisibility_of_element_located((By.XPATH, self.RepositoriesObjects('loading_in'))))\n try:\n utility.execLog(\"Checking for presence of error message\")\n eleError = self.handleEvent(\n EC.presence_of_element_located((By.CLASS_NAME, self.RepositoriesObjects('alert_danger'))),\n retry=False)\n errorMessage = eleError.find_element_by_tag_name(\"ul\").text\n return self.browserObject, False, \"Failed to Set Default Repository :: '%s' :: Error -> '%s'\" % (\n repositoryName,\n str(errorMessage))\n except:\n utility.execLog(\"Error message not found\")\n return self.browserObject, True, \"Successfully Set Default Repository '%s'\" % repositoryName\n except Exception as e:\n return self.browserObject, False, \"Failed to Set Default Repository :: '%s' :: Error -> %s\" % (\n repositoryName, str(e))", "def initDefaultChoices(self):\n return []", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def default_test_repo(self):\n\n return self.get_raw(\"default_test_repo\")", "def repository_opts(self) -> Optional[pulumi.Input['RepositoryOptsArgs']]:\n return pulumi.get(self, \"repository_opts\")", "def get_repo_options(account, **kwargs):\n client = AsyncHTTPClient()\n uri = \"https://api.github.com/user/repos?per_page=100\"\n data = []\n while uri is not None:\n req = account.get_request(uri, headers={\"Accept\": \"application/vnd.github.moondragon+json\"})\n response = yield client.fetch(req)\n response_object = json.loads(response.body.decode('utf-8'))\n data += response_object\n links = parse_link_header(response.headers.get('Link', ''))\n uri = links.get('next', None)\n return [{\"title\": repo['full_name'], \"value\": repo['full_name']}\n for repo in data]", "def repository_type(self) -> str:\n return pulumi.get(self, \"repository_type\")", "def get_default_org(self):\n for org in self.list_orgs():\n org_config = self.get_org(org)\n if org_config.default:\n return org, org_config\n return None, None", "def selectRepo(self, repositoryName, repositoryType=\"Firmware\"):\n\n def find_repo_name(repo_name, all_repos):\n \"\"\"\n Finds repo in OS and Firmware then clicks on it\n :return: False or True\n \"\"\"\n for repo in all_repos:\n all_names = repo.find_elements_by_xpath(\"./td[2]\")\n for name in all_names:\n if name.text == repo_name:\n name.click()\n return True\n return False\n\n try:\n utility.execLog('selectRepo()')\n if repositoryType == \"OS\":\n utility.execLog('repositoryType: \"{}\"'.format(repositoryType))\n os_repos = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects('OS_repos'))))\n utility.execLog(\"Total Repositories Available in {}: {}\".format(repositoryType, len(os_repos)))\n if find_repo_name(repositoryName, os_repos) is False:\n return self.browserObject, False, \"Failed to Select '%s' Repository '%s'\" % (\n repositoryType, repositoryName)\n if repositoryType == \"Firmware\":\n utility.execLog('repositoryType: \"{}\"'.format(repositoryType))\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('FW_tab'))),\n action=\"CLICK\")\n fw_repos = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects('FW_repos'))))\n utility.execLog(\"Total Repositories Available in {}: {}\".format(repositoryType, len(fw_repos)))\n if find_repo_name(repositoryName, fw_repos) is False:\n utility.execLog(\"Failed to Select '%s' Repository '%s'\" % (repositoryType, repositoryName))\n return self.browserObject, False, \"Failed to Select '%s' Repository '%s'\" % (repositoryType, repositoryName)\n utility.execLog(\"Able to Select '%s' Repository '%s'\" % (repositoryType, repositoryName))\n return self.browserObject, True, \"Able to Select '%s' Repository '%s'\" % (repositoryType, repositoryName)\n except Exception as e:\n return self.browserObject, False, \"Unable to select '%s' Repository '%s' :: Error -> %s\" % (\n repositoryType, repositoryName, str(e))", "def _get_repo_url(self, descriptor):\n configured_repositories = config.get('repositories')\n\n # We need to remove the custom \"__name__\" element before we can show\n # which repository keys are defined in the configuration\n configured_repository_names = configured_repositories.keys()\n\n if '__name__' in configured_repository_names:\n configured_repository_names.remove('__name__')\n\n if descriptor['name'] not in configured_repositories:\n if len(configured_repository_names):\n logger.warning(\"Package repository '%s' used in descriptor is not \"\n \"available in Cekit configuration file. \"\n \"Available repositories: %s\"\n % (descriptor['name'], ' '.join(configured_repository_names)))\n else:\n logger.warning(\"Package repository '%s' used in descriptor is not \"\n \"available in Cekit configuration file. \"\n % descriptor['name'])\n return None\n\n return configured_repositories[descriptor['name']]", "def repo_init(_request):\n python = models.Repository.query(models.Repository.name == 'Python').get()\n if python is None:\n python = models.Repository(name='Python', url=SVN_ROOT)\n python.put()\n pybranches = []\n else:\n pybranches = list(models.Branch.query(models.Branch.repo_key == python.key))\n for category, name, url in BRANCHES:\n url = python.url + url\n for br in pybranches:\n if (br.category, br.name, br.url) == (category, name, url):\n break\n else:\n br = models.Branch(repo_key=python.key, repo_name='Python',\n category=category, name=name, url=url)\n br.put()\n return HttpResponseRedirect(reverse(repos))", "def test_get_component_defaultpackage(self):\n self._ucr({\n 'repository/online/component/b/defaultpackage': 'b',\n 'repository/online/component/c/defaultpackages': 'ca cb',\n 'repository/online/component/d/defaultpackages': 'da,db',\n })\n self.assertEqual(set(('b',)), self.u.get_component_defaultpackage('b'))\n self.assertEqual(set(('ca', 'cb')), self.u.get_component_defaultpackage('c'))\n self.assertEqual(set(('da', 'db')), self.u.get_component_defaultpackage('d'))", "def repository_name(self) -> Optional[str]:\n return pulumi.get(self, \"repository_name\")", "def repository_name(self) -> Optional[str]:\n return pulumi.get(self, \"repository_name\")", "def repository_name(self) -> Optional[str]:\n return pulumi.get(self, \"repository_name\")", "def repository_opts(self) -> pulumi.Output[Optional['outputs.RepositoryOpts']]:\n return pulumi.get(self, \"repository_opts\")", "def get_pr_branch(repo: Repository, branches: Dict[str, Branch]) -> Union[Branch, None]:\n if repo.default_branch in branches:\n return branches[repo.default_branch]\n else:\n return None", "def _get_default_org(user):\n org = user.default_organization\n # check if user is still in the org, i.e. s/he wasn't removed from his/her\n # default org or didn't have a set org and try to set the first one\n if (\n not org\n or not OrganizationUser.objects.filter(\n organization=org, user=user\n ).exists()\n ):\n org = user.orgs.first()\n user.default_organization = org\n user.save()\n if org:\n org_id = org.pk\n org_name = org.name\n ou = user.organizationuser_set.filter(organization=org).first()\n # parent org owner has no role (None) yet has access to the sub-org\n org_user_role = _get_js_role(ou.role_level) if ou else \"\"\n return org_id, org_name, org_user_role\n else:\n return \"\", \"\", \"\"", "def _determine_storage_repo(session, resource_pool, vm_):\n storage_repo = \"\"\n if \"storage_repo\" in vm_.keys():\n storage_repo = _get_sr(vm_[\"storage_repo\"], session)\n else:\n storage_repo = None\n if resource_pool:\n default_sr = session.xenapi.pool.get_default_SR(resource_pool)\n sr_record = session.xenapi.SR.get_record(default_sr)\n log.debug(\"storage repository: %s\", sr_record[\"name_label\"])\n storage_repo = default_sr\n else:\n storage_repo = None\n log.debug(\"storage repository: %s\", storage_repo)\n return storage_repo", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos", "def sync_repo(self) -> Optional[str]:\n return pulumi.get(self, \"sync_repo\")", "def sync_repo(self) -> Optional[str]:\n return pulumi.get(self, \"sync_repo\")", "def repo(self, user, repo):\r\n return repos.Repo(self, user, repo)", "def get_repo(repo_id):\n if repo_id == \"orphans\":\n pkgs = Database().db.get_orphans()\n else:\n pkgs = Database().db.get_repo_pkgs(repo_id)\n return render_template(\"repo.html\", \n title=\" - \"+repo_id,\n repos=Database().db.get_repos_names(),\n pkgs=pkgs,\n repo=repo_id)" ]
[ "0.7996344", "0.6400903", "0.6090768", "0.604242", "0.60069907", "0.56722534", "0.56320244", "0.557179", "0.55387455", "0.54395264", "0.5438947", "0.5407968", "0.5386718", "0.535366", "0.5341311", "0.53403133", "0.5228114", "0.5218312", "0.5218312", "0.5218312", "0.52169466", "0.516126", "0.51381665", "0.50834876", "0.5078879", "0.5026405", "0.49737996", "0.49737996", "0.49667594", "0.49598977" ]
0.8641376
0
Propagate a sentry issue's assignee to a linked issue's assignee. If assign=True, we're assigning the issue. Otherwise, deassign.
def sync_assignee_outbound(self, external_issue, user, assign=True, **kwargs): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_assignment(self, updates, original=None):\n if not original:\n original = {}\n\n self.set_type(updates, original)\n\n if not updates.get('assigned_to'):\n if updates.get('priority'):\n # Priority was edited - nothing to set here\n return\n else:\n updates['assigned_to'] = {}\n\n assigned_to = updates.get('assigned_to') or {}\n if (assigned_to.get('user') or assigned_to.get('contact')) and not assigned_to.get('desk'):\n raise SuperdeskApiError.badRequestError(message=\"Assignment should have a desk.\")\n\n # set the assignment information\n user = get_user()\n if original.get('assigned_to', {}).get('desk') != assigned_to.get('desk'):\n if original.get('assigned_to', {}).get('state') in \\\n [ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS, ASSIGNMENT_WORKFLOW_STATE.SUBMITTED]:\n raise SuperdeskApiError.forbiddenError(\n message=\"Assignment linked to content. Desk reassignment not allowed.\")\n\n assigned_to['assigned_date_desk'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_desk'] = user.get(config.ID_FIELD)\n\n if assigned_to.get('user') and original.get('assigned_to', {}).get('user') != assigned_to.get('user'):\n assigned_to['assigned_date_user'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_user'] = user.get(config.ID_FIELD)\n\n if not original.get(config.ID_FIELD):\n updates['original_creator'] = str(user.get(config.ID_FIELD)) if user else None\n updates['assigned_to'][\n ITEM_STATE] = get_next_assignment_status(updates, updates['assigned_to'].get(ITEM_STATE) or\n ASSIGNMENT_WORKFLOW_STATE.ASSIGNED)\n else:\n # In case user was removed\n if not assigned_to.get('user'):\n assigned_to['user'] = None\n else:\n # Moving from submitted to assigned after user assigned after desk submission\n if original.get('assigned_to')['state'] == ASSIGNMENT_WORKFLOW_STATE.SUBMITTED:\n updates['assigned_to']['state'] = get_next_assignment_status(updates,\n ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS)\n\n updates['version_creator'] = str(user.get(config.ID_FIELD)) if user else None", "def assigned_by_user(self, assigned_by_user):\n\n self._assigned_by_user = assigned_by_user", "def assigned_user(self, assigned_user):\n self._assigned_user = assigned_user", "def assign(self, assignee, created_by, unit):\n assignment = ReferralAssignment.objects.create(\n assignee=assignee,\n created_by=created_by,\n referral=self,\n unit=unit,\n )\n ReferralActivity.objects.create(\n actor=created_by,\n verb=ReferralActivityVerb.ASSIGNED,\n referral=self,\n item_content_object=assignee,\n )\n # Notify the assignee by sending them an email\n Mailer.send_referral_assigned(\n referral=self,\n assignment=assignment,\n assigned_by=created_by,\n )\n\n if self.state in [ReferralState.IN_VALIDATION, ReferralState.PROCESSING]:\n return self.state\n\n return ReferralState.ASSIGNED", "def assign_user_to_issue(self, issue, JIRAUsername):\r\n # TODO: Review docs\r\n self.jira.assign_issue(issue=issue, assignee=JIRAUsername)", "def accept_assignment(self, assignment_id, assignee):\n\n # Fetch the assignment to ensure that it exists and is in a state that it makes sense to flag as accepted\n original = self.find_one(req=None, _id=ObjectId(assignment_id))\n if not original:\n raise Exception('Accept Assignment unable to locate assignment {}'.format(assignment_id))\n\n if (original.get('assigned_to') or {}).get('state') != ASSIGNMENT_WORKFLOW_STATE.ASSIGNED:\n raise Exception('Assignment {} is not in assigned state'.format(assignment_id))\n\n # try to find a user that the assignment is being accepted by\n user_service = superdesk.get_resource_service('users')\n user = user_service.find_one(req=None, _id=ObjectId(assignee))\n if not user:\n # no user try to find a contact\n contact_service = superdesk.get_resource_service('contacts')\n contact = contact_service.find_one(req=None, _id=ObjectId(assignee))\n if contact:\n # make sure it is the assigned contact accepting the assignment\n if str(contact.get(config.ID_FIELD)) != str(original.get('assigned_to', {}).get('contact')):\n raise Exception('Attempt to accept assignment by contact that it is not assigned to')\n else:\n raise Exception(\n 'Unknown User or Contact accepting assignment {} user/contact'.format(assignment_id, assignee))\n else:\n # make sure that the assignment is still assigned to the user that is accepting the assignment\n if str(user.get(config.ID_FIELD)) != str(original.get('assigned_to', {}).get('user')):\n raise Exception('Attempt to accept assignment by user that it is not assigned to')\n\n # If the assignment has already been accepted bail out!\n if original.get('accepted', False):\n raise Exception('The assignment {} is already accepted'.format(assignment_id))\n\n update = {'accepted': True}\n\n # Set flag using system update, bypass locks, etag problems\n self.system_update(ObjectId(assignment_id), update, original)\n\n # update the history\n superdesk.get_resource_service('assignments_history').on_item_updated(\n update, original, ASSIGNMENT_HISTORY_ACTIONS.ACCEPTED)\n\n # send notification\n self.notify('assignments:accepted', update, original)\n\n self.send_acceptance_notification(original)", "def send_assignment_notification(self, updates, original=None, force=False):\n # No notifications for 'draft' assignments\n if self.is_assignment_draft(updates, original):\n return\n\n # No assignment notification sent on start work\n if original.get('assigned_to', {}).get('state') == ASSIGNMENT_WORKFLOW_STATE.ASSIGNED and \\\n updates.get('assigned_to', {}).get('state') == ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS:\n return\n\n assigned_to = updates.get('assigned_to', {})\n assignment_id = (updates.get('_id') or assigned_to.get('assignment_id', 'Unknown'))\n if not original:\n original = {}\n else:\n assignment_id = original.get('_id')\n\n if not force and not self.is_assignment_modified(updates, original):\n return\n\n user = get_user()\n\n # Determine the name of the desk that the assigment has been allocated to\n assigned_to_desk = get_resource_service('desks').find_one(req=None, _id=assigned_to.get('desk'))\n desk_name = assigned_to_desk.get('name') if assigned_to_desk else 'Unknown'\n\n # Determine the display name of the assignee\n assignee = None\n if assigned_to.get('contact'):\n assigned_to_contact = get_resource_service('contacts').find_one(\n req=None,\n _id=assigned_to.get('contact')\n )\n if assigned_to_contact and len(assigned_to_contact.get('contact_email') or []):\n assignee = '{} {} ({})'.format(\n assigned_to_contact.get('first_name') or '',\n assigned_to_contact.get('last_name') or '',\n assigned_to_contact['contact_email'][0]\n )\n\n if assignee is None and assigned_to.get('user'):\n assigned_to_user = get_resource_service('users').find_one(\n req=None,\n _id=assigned_to.get('user')\n )\n if assigned_to_user and assigned_to_user.get('slack_username'):\n assignee = '@' + assigned_to_user.get('slack_username')\n else:\n assignee = assigned_to_user.get('display_name') if assigned_to_user else 'Unknown'\n\n coverage_type = updates.get('planning', original.get('planning', {})).get('g2_content_type', '')\n slugline = updates.get('planning', original.get('planning', {})).get('slugline', 'with no slugline')\n\n client_url = app.config['CLIENT_URL']\n\n assignment = deepcopy(original)\n assignment.update(updates)\n planning_id = assignment.get('planning_item', -1)\n planning_item = get_resource_service('planning').find_one(req=None, _id=planning_id)\n if planning_item and planning_item.get('event_item'):\n event_item = get_resource_service('events').find_one(req=None, _id=planning_item.get('event_item'))\n contacts = []\n for contact_id in event_item.get('event_contact_info', []):\n contact_details = get_resource_service('contacts').find_one(req=None, _id=contact_id)\n if contact_details:\n contacts.append(contact_details)\n if len(contacts):\n event_item['event_contact_info'] = contacts\n else:\n event_item = None\n\n # The assignment is to an external contact or a user\n if assigned_to.get('contact') or assigned_to.get('user'):\n # If it is a reassignment\n meta_msg = 'assignment_details_internal_email' if assigned_to.get('user') else 'assignment_details_email'\n if original.get('assigned_to'):\n # it is being reassigned by the original assignee, notify the new assignee\n if original.get('assigned_to', {}).get('user', '') == str(user.get(config.ID_FIELD, None)):\n PlanningNotifications().notify_assignment(target_user=assigned_to.get('user'),\n message='assignment_reassigned_1_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n desk=desk_name,\n client_url=client_url,\n assignment_id=assignment_id,\n assignment=assignment,\n event=event_item,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n # notify the desk\n if assigned_to.get('desk'):\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n message='assignment_reassigned_3_msg',\n meta_message=meta_msg,\n assignee=assignee,\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True)\n\n else:\n # if it was assigned to a desk before, test if there has been a change of desk\n if original.get('assigned_to') and original.get('assigned_to').get('desk') != updates.get(\n 'assigned_to').get('desk'):\n # Determine the name of the desk that the assigment was allocated to\n assigned_from_desk = get_resource_service('desks').find_one(req=None,\n _id=original.get('assigned_to').get(\n 'desk'))\n desk_from_name = assigned_from_desk.get('name') if assigned_from_desk else 'Unknown'\n assigned_from = original.get('assigned_to')\n assigned_from_user = get_resource_service('users').find_one(req=None,\n _id=assigned_from.get('user'))\n old_assignee = assigned_from_user.get('display_name') if assigned_from_user else ''\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n target_desk2=original.get('assigned_to').get('desk'),\n message='assignment_reassigned_2_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assignee=assignee,\n desk=desk_name,\n old_assignee=old_assignee,\n client_url=client_url,\n assignment_id=assignment_id,\n old_desk=desk_from_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else:\n # it is being reassigned by someone else so notify both the new assignee and the old\n PlanningNotifications().notify_assignment(target_user=original.get('assigned_to').get('user'),\n target_desk=original.get('assigned_to').get(\n 'desk') if original.get('assigned_to').get(\n 'user') is None else None,\n message='assignment_reassigned_3_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assignee=assignee,\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=original.get('assigned_to').get('contact'))\n # notify the assignee\n assigned_from = original.get('assigned_to')\n assigned_from_user = get_resource_service('users').find_one(req=None,\n _id=assigned_from.get('user'))\n old_assignee = assigned_from_user.get('display_name') if assigned_from_user else None\n PlanningNotifications().notify_assignment(target_user=assigned_to.get('user'),\n message='assignment_reassigned_4_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assignor=user.get('display_name', ''),\n old_assignee=' from ' + old_assignee\n if old_assignee else '',\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n event=event_item,\n assignment=assignment,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else: # A new assignment\n # Notify the user the assignment has been made to unless assigning to your self\n if str(user.get(config.ID_FIELD, None)) != assigned_to.get('user', ''):\n PlanningNotifications().notify_assignment(target_user=assigned_to.get('user'),\n message='assignment_assigned_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n client_url=client_url,\n assignment_id=assignment_id,\n assignor='by ' + user.get('display_name', '')\n if str(\n user.get(config.ID_FIELD, None)) != assigned_to.get(\n 'user', '') else 'to yourself',\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else: # Assigned/Reassigned to a desk, notify all desk members\n # if it was assigned to a desk before, test if there has been a change of desk\n if original.get('assigned_to') and original.get('assigned_to').get('desk') != updates.get(\n 'assigned_to', {}).get('desk'):\n # Determine the name of the desk that the assigment was allocated to\n assigned_from_desk = get_resource_service('desks').find_one(req=None,\n _id=original.get('assigned_to').get('desk'))\n desk_from_name = assigned_from_desk.get('name') if assigned_from_desk else 'Unknown'\n if original.get('assigned_to', {}).get('user', '') == str(user.get(config.ID_FIELD, None)):\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n message='assignment_to_desk_msg',\n meta_message='assignment_details_email',\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assign_type='reassigned',\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else:\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n target_desk2=original.get('assigned_to').get('desk'),\n message='assignment_submitted_msg',\n meta_message='assignment_details_email',\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n desk=desk_name,\n client_url=client_url,\n assignment_id=assignment_id,\n from_desk=desk_from_name,\n assignment=assignment,\n event=event_item,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else:\n assign_type = 'reassigned' if original.get('assigned_to') else 'assigned'\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n message='assignment_to_desk_msg',\n meta_message='assignment_details_email',\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assign_type=assign_type,\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))", "def _assign(request, obj, person_id):\n try:\n if request.method == \"POST\":\n person_id = request.POST.get('person_1', None)\n\n if person_id is None:\n obj.assigned_to = None\n else:\n person = Person.objects.get(pk=person_id)\n obj.assigned_to = person\n\n obj.save()\n\n except Person.DoesNotExist:\n raise Http404(\"No person found matching the query.\")", "def sync_group_assignee_inbound(integration, email, external_issue_key, assign=True):\n from sentry import features\n from sentry.models import Group, UserEmail, User\n\n logger = logging.getLogger('sentry.integrations.%s' % integration.provider)\n\n orgs_with_sync_enabled = []\n for org in integration.organizations.all():\n has_issue_sync = features.has('organizations:integrations-issue-sync',\n org)\n if not has_issue_sync:\n continue\n\n installation = integration.get_installation(org.id)\n if installation.should_sync('inbound_assignee'):\n orgs_with_sync_enabled.append(org.id)\n\n affected_groups = list(\n Group.objects.get_groups_by_external_issue(\n integration, external_issue_key,\n ).filter(project__organization_id__in=orgs_with_sync_enabled),\n )\n\n if not affected_groups:\n return []\n\n if not assign:\n for group in affected_groups:\n GroupAssignee.objects.deassign(group)\n return affected_groups\n\n users = {u.id: u for u in User.objects.filter(\n id__in=UserEmail.objects.filter(\n is_verified=True,\n email=email,\n ).values_list('user_id', flat=True),\n )}\n\n projects_by_user = get_user_project_ids(users.values())\n\n groups_assigned = []\n for group in affected_groups:\n try:\n user_id = [\n user_id for user_id, projects in projects_by_user.items()\n if group.project_id in projects\n ][0]\n except IndexError:\n logger.info(\n 'assignee-not-found-inbound',\n extra={\n 'integration_id': integration.id,\n 'email': email,\n 'issue_key': external_issue_key,\n }\n )\n else:\n user = users[user_id]\n GroupAssignee.objects.assign(group, user)\n groups_assigned.append(group)\n\n return groups_assigned", "def assign(self, assignee: np.ndarray):\n if isinstance(self.data, pd.DataFrame):\n self.data = pd.concat([self.data, assignee], axis=1, ignore_index=True)\n else:\n self.data = pd.DataFrame(data=assignee)", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def perform_exercise_assign(args):\n FBDPCommon.callSelectionHook(args, 'trades', 'exercise_assign_selection')\n e = Exercise('Exercise Assign', args['Testmode'], args)\n e.perform()\n e.end()\n\n # Fix physically settled future closeouts. Please see JIRA ABITFA-2562\n # for more detail about this fix.\n if args['DoFixPhysicals']:\n fixPhysicals(args['trades'], args['Testmode'])", "def reassign(self,assign_to,**kwargs):\n q=\"UPDATE bugs SET assigned_to=? WHERE \"\n params=[assign_to,]\n if \"bug_id\" in kwargs:\n q += \"ROWID=?\"\n params.append(kwargs[\"bug_id\"])\n elif self.NAME_COLUMN in kwargs:\n q += BugDB.NAME_COLUMN+\"=?\"\n params.append(kwargs[\"bug_name\"])\n else:\n raise FattyException(\"You must supply either a bug_id or a bug_name as a keyword argument. Not provided in kwargs: \"+str(kwargs))\n \n cur=self.cxn.cursor()\n cur.execute(q,params)\n self.cxn.commit()", "def update_assign_unassign(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update_assign_unassign\"), kwargs)", "def status_assignee_reset(self):\n self.assigned_to = None\n self.status = 'new'\n self.primary_statute = None", "def test_handle_assign_assign_error(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (self.testcommand.assigned_error, 200))", "def test_handle_force_assign(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name -f\",\r\n user),\r\n (\"Project successfully assigned!\", 200))", "def cleanup_new_bugs_with_assignee(self):\n logger.info(\"Cleanup new bugs with an assignee\")\n\n message = (\"@%s:\\n\\nSince you are set as assignee, I switch the \"\n \"status to 'In Progress'.\")\n subject = \"Cleanup\"\n\n project = self.client.projects[self.project_name]\n bug_tasks = project.searchTasks(status=['New'],\n omit_duplicates=True)\n switched_bugs = []\n\n for t in bug_tasks:\n bug_id = t.bug.id\n if bug_id in self.ignoreable_bug_ids:\n logger.debug(\"Ignore bug '%s'. \", bug_id)\n continue\n logger.debug(\"Checking bug '%s'\", bug_id)\n assignee = t.assignee\n if assignee is None:\n continue\n t.status = 'In Progress'\n switched_bugs.append(bug_id)\n content = message % assignee.display_name\n if self.dryrun:\n logger.debug(\"DRYRUN: I would switch bug '%s'\", bug_id)\n continue\n logger.debug(\"Switching status of bug '%s'\", bug_id)\n t.lp_save()\n t.bug.newMessage(content=content, subject=subject)\n\n logger.info(\"Switched bugs: '%s'\", switched_bugs)", "def assign(self, task: Task, artist: Artist):\n self.unassign(task)\n self.assignments.append(Assignment(artist, task))", "def hook_assign_assistance(self, data):\n request_id = data[\"request_id\"]\n assignee_chat_id = data[\"volunteer\"]\n log.info(\"ASSIGN req:%s to vol:%s\", request_id, assignee_chat_id)\n\n try:\n request_details = self.updater.persistence.bot_data[request_id]\n except KeyError:\n log.debug(\"No such request %s, ignoring\", request_id)\n return\n else:\n self.updater.dispatcher.bot_data[request_id].update(\n {\"time\": utc_short_to_user_short(data[\"time\"])}\n )\n\n # first of all, notify the others that they are off the hook and update their state accordingly\n for chat_id in request_details[\"volunteers\"]:\n if chat_id != assignee_chat_id:\n self.send_message(chat_id, c.MSG_ANOTHER_ASSIGNEE)\n updated_state = {\"state\": c.State.AVAILABLE, \"reviewed_request\": None}\n self.updater.dispatcher.user_data[chat_id].update(updated_state)\n\n self.updater.dispatcher.user_data[assignee_chat_id].update({\"current_request\": request_id})\n self.updater.dispatcher.update_persistence()\n\n # notify the assigned volunteer, so they know they're responsible; at this point they still have to confirm\n # that they're in good health and they still have an option to cancel\n self.updater.bot.send_message(\n chat_id=assignee_chat_id,\n text=c.MSG_CAUTION,\n reply_markup=InlineKeyboardMarkup(k.caution_choices),\n )", "def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node", "def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False", "def send_referral_assigned(cls, referral, assignment, assigned_by):\n\n template_id = settings.SENDINBLUE[\"REFERRAL_ASSIGNED_TEMPLATE_ID\"]\n\n # Get the path to the referral detail view from the unit inbox\n link_path = FrontendLink.unit_referral_detail(\n unit=assignment.unit.id, referral=referral.id\n )\n\n data = {\n \"params\": {\n \"assigned_by\": assigned_by.get_full_name(),\n \"case_number\": referral.id,\n \"link_to_referral\": f\"{cls.location}{link_path}\",\n \"referral_users\": referral.get_users_text_list(),\n \"title\": referral.title or referral.object,\n \"topic\": referral.topic.name,\n \"unit_name\": assignment.unit.name,\n \"urgency\": referral.urgency_level.name,\n },\n \"replyTo\": cls.reply_to,\n \"templateId\": template_id,\n \"to\": [{\"email\": assignment.assignee.email}],\n }\n\n cls.send(data)", "def unassign(self, assignment, created_by):\n assignee = assignment.assignee\n assignment.delete()\n self.refresh_from_db()\n ReferralActivity.objects.create(\n actor=created_by,\n verb=ReferralActivityVerb.UNASSIGNED,\n referral=self,\n item_content_object=assignee,\n )\n # Check the number of remaining assignments on this referral to determine the next state\n assignment_count = ReferralAssignment.objects.filter(referral=self).count()\n\n if self.state == ReferralState.ASSIGNED and assignment_count == 0:\n return ReferralState.RECEIVED\n\n return self.state", "def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)", "def assigned_to_changed(self, ar):\n # self.add_change_watcher(self.assigned_to)\n\n if (self.assigned_to is not None and\n self.assigned_to != ar.user and\n dd.is_installed('notify')):\n ctx = dict(user=ar.user, what=ar.obj2memo(self))\n def msg(user, mm):\n subject = _(\"{user} has assigned you to ticket: {what}\").format(**ctx)\n return (subject , tostring(E.span(subject)))\n\n mt = rt.models.notify.MessageTypes.tickets\n\n rt.models.notify.Message.emit_notification(\n ar, self, mt, msg,\n [(self.assigned_to, self.assigned_to.mail_mode)]\n )", "def get_assign(self):\n return self.assign", "def assure_tender_assigned_to_user(self, tender_new_id, assigned_user):\n tenders_from_admin = ToDoTenders(division_admin_login, universal_password) # only admin see all chains\n\n all_tender_id_responsibles_chains = tenders_from_admin.get_all_assigned_users_for_tenders(\n tenders_from_admin.get_tenders_with_responsibles('in_work'))\n\n for chain in all_tender_id_responsibles_chains:\n if chain['tender_new_id'] == tender_new_id:\n for res in chain['responsibles']:\n if res['emailAddress'] == assigned_user:\n return True\n else:\n pass", "def assigned_to(self) -> Optional[str]:\n return pulumi.get(self, \"assigned_to\")", "def send_case_assignment_slack(issue: str):\n assert issue.paralegal, f\"Assigned paralegal not found for Issue<{issue.pk}>\"\n assert issue.lawyer, f\"Assigned lawyer not found for Issue<{issue.pk}>\"\n logging.info(\n \"Notifying User<%s> of assignment to Issue<%s>\", issue.paralegal.pk, issue.pk\n )\n slack_user = get_slack_user_by_email(issue.paralegal.email)\n if slack_user:\n msg = CASE_ASSIGNMENT_MSG.format(\n case_start_date=issue.created_at.strftime(\"%d/%m/%Y\"),\n client_name=issue.client.get_full_name(),\n fileref=issue.fileref,\n lawyer_email=issue.lawyer.email,\n lawyer_name=issue.lawyer.get_full_name(),\n paralegal_name=issue.paralegal.get_full_name(),\n case_url=settings.CLERK_BASE_URL\n + reverse(\"case-detail-view\", args=(str(issue.pk),)),\n )\n send_slack_direct_message(msg, slack_user[\"id\"])\n else:\n logger.error(f\"Slack user not found for User<{issue.paralegal.pk}>\")" ]
[ "0.6341137", "0.6102024", "0.59917015", "0.59198666", "0.5881656", "0.5821527", "0.5817272", "0.57891667", "0.57301235", "0.5691763", "0.56128204", "0.55561805", "0.55232775", "0.528361", "0.52553594", "0.52330756", "0.5215624", "0.51927084", "0.5185444", "0.5087694", "0.5084273", "0.5079449", "0.507938", "0.5066525", "0.50216454", "0.5019266", "0.50061566", "0.5003353", "0.49111885", "0.49078456" ]
0.7126472
1
return the current position in axis x
def get_pos_x(self): return self.__pos_x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_x(self):\n return self.position.x", "def get_x(self):\n return self.posX", "def get_x_position(self):\n return self.rect.x", "def get_axis_x(self):\r\n return self.__x_axis", "def get_x_position(self):\n return self.actual_coordinates[0]", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def x(self):\r\n return self.position.x", "def get_x(self):\n return self.coords[0]", "def x(self):\n return self.coords[0]", "def Getxcoord(self):\n return self.x_coord", "def xaxis(self):\n return self._xaxis", "def x(self):\n return self._coords[0]", "def getX(self):\n return self.position[0]", "def getPosition(self):\n return self.x", "def x(self):\n return self.axes[1]", "def __get_x__(self):\n return self.Direction['x']", "def x(self):\n return _libsbml.Point_x(self)", "def origin_x(self):\n return self._origin[0]", "def getX(self):\n return self.position.getX()", "def x_coord(self):\n\n return self.x0 + np.arange(self.nx) * self.dx", "def x(self):\n if self._x is None:\n self.compute_coordinates()\n return self._x", "def pos_x(self, *args, **kwargs) -> Any:\n pass", "def getX(self):\n return self.x", "def get_x(self) -> int:\n return self.__x", "def x_origin(self):\n return self._x_origin", "def get_origin_x_position(self):\n return self.origin_coordinates[0]", "def getX(self):\r\n\t\treturn self._x", "def GetX(self):\r\n\r\n return self._x" ]
[ "0.84356475", "0.8302575", "0.825707", "0.8191136", "0.81628805", "0.809894", "0.808944", "0.808944", "0.8071701", "0.79766065", "0.7853635", "0.7823813", "0.77839094", "0.7724001", "0.7704104", "0.7700276", "0.76920474", "0.76795393", "0.7627869", "0.7589636", "0.75225663", "0.7506523", "0.7503354", "0.7497437", "0.7451979", "0.7447604", "0.7425516", "0.7413771", "0.7396919", "0.7333906" ]
0.8329716
1
return the current speed in axis x
def get_speed_x(self): return self.__speed_x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_speed_x(self):\r\n return self.__X_speed", "def speedup_x(self):\r\n new_speed = math.cos((self.__direction*math.pi)/180) + self.__X_speed\r\n self.__X_speed = new_speed", "def getXVelocity(self):\n return self.xvelocity", "def get_speed(self):\r\n return self.__x_speed, self.__y_speed", "def get_axis_x(self):\r\n return self.__x_axis", "def set_speed_x(self, new_speed):\n self.__speed_x = new_speed", "def getVelX(self):\n return self.posvel.getX()", "def __get_x__(self):\n return self.Direction['x']", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def x(self):\r\n return self.position.x", "def getXVelocity(self):\n return self._vx", "def get_x(self) -> int:\n return self.__x", "def _get_x(self):\n return self.position.x", "def x(self):\n return self._turtle.xcor()", "def x(self):\n return self._turtle.xcor()", "def x(self) -> float:\n return self.data[0]", "def x(self):\n return self.x", "def acceleration(self):\n # speed is by formula: x axis speed: by cos of the heading and y\n # axis by sine of the heading\n self.x_speed += math.cos(math.radians(self.degrees))\n self.y_speed += math.sin(math.radians(self.degrees))", "def get_x(self):\n return self.posX", "def getAngVelX(self):\n return self.angvel.getX()", "def getStartSpeed(self):\n cmd_string = '?1'\n data = self.sendRcv(cmd_string)\n self.state['start_speed'] = int(data)\n return self.state['start_speed']", "def xaxis(self):\n return self._xaxis", "def speed(self) -> str:\n return self._current_speed", "def speed(self):\n return self._speed.value", "def speed(self):\n return sqrt(self.velocity_x ** 2 + self.velocity_y ** 2)", "def x(self):\n self._sort_measurements()\n return self._distances*np.cos(self._angles)", "def set_velocity_x(self):\n self.__dx *= -1", "def getX(self):\n return self.x" ]
[ "0.8555267", "0.7207319", "0.71035296", "0.68359816", "0.6763808", "0.67338073", "0.6704949", "0.66456616", "0.65846103", "0.6549981", "0.6549981", "0.651929", "0.6485276", "0.6482039", "0.6412599", "0.63956916", "0.63956916", "0.6384349", "0.6323819", "0.63225263", "0.63196826", "0.6316279", "0.6298807", "0.6288256", "0.62337613", "0.6229724", "0.6227898", "0.62270343", "0.6224416", "0.62209773" ]
0.84871364
1
return the current speed in axis y
def get_speed_y(self): return self.__speed_y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_speed_y(self):\r\n return self.__y_speed", "def speedup_y(self):\r\n new_speed = math.sin((self.__direction*math.pi)/180) + self.__y_speed\r\n self.__y_speed = new_speed", "def verticalspeed(self):\n return self.__vertspeed.value", "def get_axis_y(self):\r\n return self.__y_axis", "def get_speed(self):\r\n return self.__x_speed, self.__y_speed", "def getYVelocity(self):\n return self.yvelocity", "def y(self) -> float:\n return self.data[1]", "def getVelY(self):\n return self.posvel.getY()", "def __get_y__(self):\n return self.Direction['y']", "def get_cmd_velocity(self):\n return self.gripper_io.get_signal_value(\"speed_mps\")", "def getYVelocity(self):\n return self._vy", "def get_max_y(self) -> float:\n return self.pendulum2.get_max_y()", "def get_y(self):\n return self.__y", "def get_speed(self):\n return self._speed", "def speed(self):\n return self._getAttribute(Attribute.maxVelocity)", "def y(self):\n return self.y", "def set_speed_y(self, new_speed):\n self.__speed_y = new_speed", "def yaxis ( self ) :\n return self.__yaxis", "def yaxis ( self ) :\n return self.__yaxis", "def y(self):\n return self._turtle.ycor()", "def y(self):\n return self._turtle.ycor()", "def Y(self):\n return self.y\n pass", "def y(self):\n self._sort_measurements()\n return self._distances*np.sin(self._angles)", "def speed(self):\n return self._turtle.speed()", "def speed(self):\n return self._turtle.speed()", "def speed(self):\n return sqrt(self.velocity_x ** 2 + self.velocity_y ** 2)", "def GetY(self):\r\n\r\n return self._y", "def y(self):\n return self.__y", "def y(self):\n return self.__y", "def y(self):\n return self.__y" ]
[ "0.8572007", "0.7324329", "0.7246045", "0.7071657", "0.70214754", "0.69910544", "0.6868786", "0.681344", "0.680576", "0.67981166", "0.67907566", "0.6772865", "0.6765004", "0.67371374", "0.6736603", "0.6687959", "0.6687255", "0.6685152", "0.6685152", "0.66710705", "0.66710705", "0.6651276", "0.6642843", "0.66191614", "0.66191614", "0.6607635", "0.660415", "0.66031665", "0.66031665", "0.66031665" ]
0.8426076
1
set new speed (new_speed) in axis x for the torpedo
def set_speed_x(self, new_speed): self.__speed_x = new_speed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def speedup_x(self):\r\n new_speed = math.cos((self.__direction*math.pi)/180) + self.__X_speed\r\n self.__X_speed = new_speed", "def set_speed(self, axis, speed):\n #log.info(f\"set speed {axis} {speed}\")\n self.cmd_axis_speed[axis] = speed", "def set_speed(self, new_speed):\n self.__x_speed, self.__y_speed = new_speed", "def changespeed(self, x):\n self.change_x += x * self.velocity", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += 200", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def setDirectionTowardPoint(self, x, y, speed):\n currX = self.xcor()\n currY = self.ycor()\n # get actual vector from t to x,y\n dXactual = x - currX\n dYactual = y - currY\n\n # get the length of that vector. Can also use turtle.distance\n length = math.hypot(dXactual, dYactual)\n\n # now scale the vector\n try:\n self.dx = dXactual / length * speed\n self.dy = dYactual / length * speed\n except:\n self.dx = 0\n self.dy = 0", "def changespeed(self, x1, y1):\n self.change_x += x1\n self.change_y += y1", "def set_speed(self,speed):\n self.speed_p = speed", "def tick(self, dt):\n self.x += dt * self.x_speed\n self.y += dt * self.y_speed", "def set_speed(self,speed):\n self.speed = speed", "def set_speed(self, speed):\n self._kernel.set_speed(float(speed))", "def set_cmd_velocity(self, speed):\n self.gripper_io.set_signal_value(\"speed_mps\", speed)", "def increment_speed(self):\n self.speed += 0.0004", "def set_x(self, new_x):\r\n self.x = new_x", "def change_speed(self, speed):\n\n self.__corrds[self.X_SPEED], self.__corrds[self.Y_SPEED] = speed\n if type(speed) != tuple:\n raise ValueError('speed must be a tuple of the form (sp_x, sp_y)')", "def set_speed_y(self, new_speed):\n self.__speed_y = new_speed", "def set_speed(self, speed):\n self.speed = speed", "def customize_torpedo_speed(self, current_gameboard, turn, new_speed):\n current_gameboard['torpedo_speed'][turn] = new_speed", "def addLeftSpeed(self, newSpeed):\n ns = self.leftWeelSpeed + newSpeed\n ns = min(ns, self.maxSpeed)\n ns = max(ns, self.minSpeed)\n e = vrep.simxSetJointTargetVelocity(self.clientID, self.leftMotor, ns, vrep.simx_opmode_oneshot_wait)\n self.erCheck(e, 'leftMotor')", "def move(self) -> None:\r\n self._x += self._speed", "def set_speed(self, v):\n self.v = v", "def set_velocity_x(self):\n self.__dx *= -1", "def set_axis_x(self, new_axis_point):\r\n self.__x_axis = new_axis_point", "def set_x(self, x: float):\n self.x = x", "def get_speed_x(self):\r\n return self.__X_speed", "def move_set_speed(self, speed):\n # self.motor_set_speed(MOTOR_LEFT, speed)\n # self.motor_set_speed(MOTOR_RIGHT, speed)\n self.move_speed = speed\n print(\"move_speed is now:\", self.move_speed)", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)" ]
[ "0.7153202", "0.69229406", "0.6859478", "0.6688785", "0.66181517", "0.6542605", "0.6542605", "0.6542605", "0.65331244", "0.64134264", "0.6341696", "0.62807065", "0.6228572", "0.6213521", "0.61853576", "0.617665", "0.61610395", "0.6149115", "0.6138381", "0.6097479", "0.6094318", "0.60709643", "0.6053674", "0.6051197", "0.60379624", "0.60356253", "0.60256505", "0.60212755", "0.6003752", "0.59753036" ]
0.79879344
0
set new speed (new_speed) in axis y for the torpedo
def set_speed_y(self, new_speed): self.__speed_y = new_speed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def speedup_y(self):\r\n new_speed = math.sin((self.__direction*math.pi)/180) + self.__y_speed\r\n self.__y_speed = new_speed", "def customize_torpedo_speed(self, current_gameboard, turn, new_speed):\n current_gameboard['torpedo_speed'][turn] = new_speed", "def set_speed(self, new_speed):\n self.__x_speed, self.__y_speed = new_speed", "def set_axis_y(self, new_axis_point):\r\n self.__y_axis = new_axis_point", "def set_speed(self, axis, speed):\n #log.info(f\"set speed {axis} {speed}\")\n self.cmd_axis_speed[axis] = speed", "def __update_speed_stop(self):\n if self.velocidade > SERVO_DUTY_CYCLE_MEIO:\n self.velocidade -= self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade <= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n elif self.velocidade < SERVO_DUTY_CYCLE_MEIO:\n self.velocidade += self.incremento_veloc\n \n # Para mesmo que haja arredondamento de float\n if self.velocidade >= SERVO_DUTY_CYCLE_MEIO:\n self.velocidade = SERVO_DUTY_CYCLE_MEIO\n self.servo.set_duty_cycle(0.0)\n else:\n self.servo.set_duty_cycle(self.velocidade)\n else:\n self.servo.set_duty_cycle(0.0)", "def set_velocity_y(self):\n self.__dy *= -(1+SPEED_UP)", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += 200", "def set_ypos(self, deg):\n if deg < -10:\n deg = -10\n elif deg > 10:\n deg = 10\n deg += 10\n self.kit.servo[8].angle = deg", "def set_y(self, new_y):\r\n self.y = new_y", "def set_vals(self, speed=0, spin=0):\n self.twist.linear.x = speed; self.twist.linear.y = 0; self.twist.linear.z = 0\n self.twist.angular.x = 0; self.twist.angular.y = 0; self.twist.angular.z = spin", "def set_cmd_velocity(self, speed):\n self.gripper_io.set_signal_value(\"speed_mps\", speed)", "def set_speed(self,speed):\n self.speed_p = speed", "def setWheelsSpeed(self, dc_motor_speed):\n self.dcmotorSpeed = dc_motor_speed # changed rightSpeed to dcmotorSpeed and right to\n self.updatePWM()", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def get_speed_y(self):\r\n return self.__y_speed", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def addRightSpeed(self, newSpeed):\n ns = self.rightWeelSpeed + newSpeed\n ns = min(ns, self.maxSpeed)\n ns = max(ns, self.minSpeed)\n e = vrep.simxSetJointTargetVelocity(self.clientID, self.rightMotor, ns, vrep.simx_opmode_oneshot_wait)\n self.erCheck(e, 'rightMotor')", "def set_speed(self,speed):\n self.speed = speed", "def setY(self, value):\n self.components[1] = value", "def setY(self, value):\n self.components[1] = value", "def ystep(self):\n\n # Update t step\n tprv = self.t\n self.t = self.momentum.update(self.var_momentum())\n\n # Update Y\n if self.opt['Monotone'] and self.k > 0:\n self.Y = self.X + (tprv / self.t) * (self.ZZ - self.X) \\\n + ((tprv - 1.) / self.t) * (self.X - self.Xprv)\n else:\n self.Y = self.X + ((tprv - 1.) / self.t) * (self.X - self.Xprv)", "def change_speed(self, speed):\n\n self.__corrds[self.X_SPEED], self.__corrds[self.Y_SPEED] = speed\n if type(speed) != tuple:\n raise ValueError('speed must be a tuple of the form (sp_x, sp_y)')", "def set_speed(self, speed):\n self._kernel.set_speed(float(speed))", "def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def vel_y(self, *args, **kwargs) -> Any:\n pass", "def set_speed(self, speed):\n self.speed = speed", "def set_speed(self, v):\n self.v = v" ]
[ "0.74478376", "0.7266299", "0.6885508", "0.68010527", "0.67818487", "0.65538603", "0.64874476", "0.6445851", "0.6438389", "0.64053863", "0.6376815", "0.6363942", "0.63039124", "0.63012075", "0.62974465", "0.62974465", "0.62974465", "0.62857443", "0.6271287", "0.62608033", "0.619561", "0.6181049", "0.6181049", "0.61725163", "0.61712176", "0.61208004", "0.6107542", "0.6095212", "0.60667175", "0.6055213" ]
0.8085644
0
set new position (new_pos) in axis x for the torpedo
def set_new_pos_in_x(self, new_pos): self.__pos_x = new_pos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setXPos(self,newXPos):\n self.xPos=newXPos", "def set_axis_x(self, new_axis_point):\r\n self.__x_axis = new_axis_point", "def setX(self, value):\n self.position[0] = value", "def set_pos(self, x):\n self._pos = x", "def set_x(self, new_x):\r\n self.x = new_x", "def setX(self, x):\n self.position.setX(x)", "def adjust_x_pos():\n pos = self.variables.table.get_current_position()\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis(\n [True, True, True]\n ) # so all axis can be adressed\n xpos = self.table_move_ui.x_move.value()\n error = self.variables.table.move_to(\n [xpos, pos[1], pos[2]],\n True,\n self.variables.default_values_dict[\"settings\"][\"height_movement\"],\n )\n # if error:\n # self.variables.message_to_main.put(error)\n self.variables.table.set_joystick(True)\n self.variables.table.set_axis(\n [True, True, False]\n ) # so z axis cannot be adressed", "def pos_x(self, *args, **kwargs) -> Any:\n pass", "def set_position(self, axis, x):\n\n if not self.enabled:\n return\n\n self.send_cmd(axis, ' POS={:.3f}'.format(x))\n return float(self.get_position(axis))", "def set_pos(self, x, y, orien):\n self.pos_x = x\n self.pos_y = y\n self.orientation = orien", "def set_x(self, x):\n self.scene.set_x_loc(x)\n self.redraw()", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def setPose(self, newPosition):\n self.origin1 = newPosition\n self.axis1 = self.G_gl[0:3, 0:3] @ self.axis0", "def x(self, destination):\n destination = (destination, self.center[1])\n self.move(destination=destination, origin=self.center, axis=\"x\")", "def set_drone_position(self, new_point):\n self.drone.set_drone_position(new_point)", "def setPosition(self,x):\n if x is None:\n self.x = Cartesian3DVector()\n else:\n if isinstance(x,Cartesian3DVector):\n self.x = Cartesian3DVector(x.x,x.y,x.z)\n else:\n raise CoordinateException(\"Initializing a particle with the incorrect position vector type.\")", "def set_xpos(self, deg):\n if deg < 0:\n deg = 0\n if deg > 90:\n deg = 90\n deg = deg*2\n self.kit.servo[7].angle = deg", "def setPos(self,pos):\n self.Xpos,self.Ypos=pos", "def setX(self, x):\r\n\t\tself._x=x", "def new_position(self, p):\n if self.track:\n self.gnx = p.gnx\n else:\n p = self.get_position()\n\n self.new_position_edit(p)\n self.new_position_view(p)", "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "def setzePosition(self, x, y):\n self.zielX = x\n self.zielY = y", "def set_speed_x(self, new_speed):\n self.__speed_x = new_speed", "def set_new_location(self, xPos, yPos):", "def setPosition(position):", "def setX(self, x):\n self.x = x\n pass", "def set_position(self, x, y):\n self.geometry('%s%s' % (x, y))", "def update(self):\n self.pos_x -=1", "def cambiovelocidad(self,x,y):\n self.change_x += x\n self.change_y += y", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y" ]
[ "0.7246535", "0.7111097", "0.70392", "0.6978868", "0.6963708", "0.6886643", "0.68568933", "0.6825037", "0.6757608", "0.6701941", "0.6696527", "0.6664855", "0.6620675", "0.6489275", "0.64415085", "0.63936675", "0.63880914", "0.634943", "0.6321042", "0.6301134", "0.62831503", "0.62813795", "0.6268617", "0.6257055", "0.62444186", "0.6217963", "0.62067443", "0.620602", "0.62049335", "0.6202651" ]
0.7426567
0
set new position (new_pos) in axis y for the torpedo
def set_new_pos_in_y(self, new_pos): self.__pos_y = new_pos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_axis_y(self, new_axis_point):\r\n self.__y_axis = new_axis_point", "def set_y(self, new_y):\r\n self.y = new_y", "def set_ypos(self, deg):\n if deg < -10:\n deg = -10\n elif deg > 10:\n deg = 10\n deg += 10\n self.kit.servo[8].angle = deg", "def setY(self, value):\n self.position[1] = value", "def setYPos(self,newYPos):\n self.yPos=newYPos", "def adjust_y_pos():\n pos = self.variables.table.get_current_position()\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis(\n [True, True, True]\n ) # so all axis can be adressed\n ypos = self.table_move_ui.y_move.value()\n error = self.variables.table.move_to(\n [pos[0], ypos, pos[2]],\n self.variables.default_values_dict[\"settings\"][\"height_movement\"],\n )\n # if error:\n # self.variables.message_to_main.put(error)\n self.variables.table.set_joystick(True)\n self.variables.table.set_axis(\n [True, True, False]\n ) # so z axis cannot be adressed", "def pos_y(self, *args, **kwargs) -> Any:\n pass", "def set_y(self, y):\n self.scene.set_y_loc(y)\n self.redraw()", "def y(self, destination):\n destination = (self.center[0], destination)\n self.move(destination=destination, origin=self.center, axis=\"y\")", "def setPose(self, newPosition):\n self.origin1 = newPosition\n self.axis1 = self.G_gl[0:3, 0:3] @ self.axis0", "def setY(self, value):\n self.components[1] = value", "def setY(self, value):\n self.components[1] = value", "def set_speed_y(self, new_speed):\n self.__speed_y = new_speed", "def setY(self, y):\n self.position.setY(y)", "def update_ballpos(self,pos):\n if self.options.visualize_switch_xy:\n self.col.set_offsets(pos[:,::-1]) # reverse x-y direction\n else:\n self.col.set_offsets(pos)", "def speedup_y(self):\r\n new_speed = math.sin((self.__direction*math.pi)/180) + self.__y_speed\r\n self.__y_speed = new_speed", "def set_y(self,Y):\n self.posY = Y", "def setY(self, *args):\n return _libsbml.Point_setY(self, *args)", "def set_y(self, y: float):\n self.y = y", "def setY(self, y):\r\n\t\tself._y=y", "def set_new_location(self, xPos, yPos):", "def set_pos(self, x, y, orien):\n self.pos_x = x\n self.pos_y = y\n self.orientation = orien", "def setY(self, y):\n self.y = y\n pass", "def vel_y(self, *args, **kwargs) -> Any:\n pass", "def moveY(self, delta):\n self.setY(delta + self.getY())", "def set_y(self, y):\n self._y = y", "def y(self, value):\n if not (0 < value < SCREEN_HEIGHT - self.height):\n self.dir_y = -self.dir_y\n self._y += abs(self._y - value) * self.dir_y", "def set_velocity_y(self):\n self.__dy *= -(1+SPEED_UP)", "def update_pose(self, data):\n self.pose = data\n \n self.pose.y = round(self.pose.y, 6)", "def setY(self, *args):\n return _libsbml.BoundingBox_setY(self, *args)" ]
[ "0.7400605", "0.7042508", "0.70301294", "0.70165414", "0.69808483", "0.67663985", "0.66854906", "0.6540774", "0.6528592", "0.65152085", "0.64745337", "0.64745337", "0.6468849", "0.64469284", "0.6417484", "0.6325399", "0.6307254", "0.6282218", "0.62483877", "0.6222319", "0.62108356", "0.6198408", "0.61950165", "0.6165477", "0.6158491", "0.6147068", "0.6128809", "0.60477996", "0.6026903", "0.59689367" ]
0.7415262
0
set the new number of lives (new_number_of_lives) of the torpedo
def set_lives(self, new_number_of_lives): self.__lives = new_number_of_lives
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setLives(self, lives):\n assert type(lives) == int\n self._lives = lives", "def set_lives(self, lives):\n self._lives = lives", "def update_lives(self, amount):\n self.lives += amount", "def setNbLives(self, nb_lives: int) -> None:\n self._nbLives = nb_lives\n if self._nbLives <= 0:\n self._isAlive = False\n else:\n self._isAlive = True", "def change_lives(self, dl):\n\t\tself._lives += dl\n\t\tif dl < 0:\n\t\t\tself._invincible = 100\n\t\t\tself.jump()", "def setLives(self,life):\n self._lives = life", "def setlife(self,life):\n self.vida=life", "def set_life(self):\n self.life -= 1", "def set_tries(self,lives):\n self._tries = lives", "def set_life(self, value):\n self._life = value", "def lose_life(self):\n self.__num_lives -= 1", "def lose_life(self):\n self.lives -= 1\n self.alive = self.calculate_alive()", "def _decrease_lives(self, player):\n player.lives -= 1\n if player.lives:\n self.dead_player = True\n player.is_alive = False\n else:\n self.game_over = True", "def remove_life(self):\r\n if self.__lives < 0:\r\n self.__lives -= 1", "def liver(self, liver):\n\n self.logger.debug(\"In 'liver' setter.\")\n\n self._liver = liver", "def get_lives(self):\n return self.__num_lives", "def generate_lives(self, score, shooter):\n self.lives = random.randint(1, (score.score * shooter.damage // 4 + 1))", "def update_lives(self, lives):\n self.lives_text = str(lives)\n self.lives_text = self.font_18.render(\n self.lives_text, True, pg.Color('black'))\n self.lives_text_rect = self.lives_text.get_rect(\n topleft=self.lives_text_pos)", "def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives", "def _activate(self):\n self.game.lives += 1", "def perde_life(self, dano):\n if self.life > 0 and self.conta_tempo_morte == fps * 5:\n self.life-= dano", "def customize_torpedo_speed(self, current_gameboard, turn, new_speed):\n current_gameboard['torpedo_speed'][turn] = new_speed", "def get_lives(self) -> int:\n return self.rstate.lives()", "def set_age(self, newage):\n self.age = newage", "def set_legs(self, number_of_legs):\n self.legs = number_of_legs", "def update(self):\n self.age += 1\n self.starve -= 1\n if self.starve < 1:\n self.alive = False\n self.move()", "def increase_age(self, nr):\n self.age = self.age + nr", "def setVelocity(self, new_vel):\n\n self.vel = limiter(new_vel)", "def lives(self) -> int:\n return self.__state.lives()", "def setGoalLength(self, length):\n assert isinstance(length, int)\n self.goal_length = length" ]
[ "0.7700371", "0.7667687", "0.7629367", "0.73052084", "0.72219425", "0.7182382", "0.6548997", "0.6420976", "0.6353151", "0.6246695", "0.62098116", "0.5965814", "0.5958923", "0.58790517", "0.5875401", "0.56700194", "0.5605648", "0.55878407", "0.5576551", "0.5528152", "0.5494097", "0.54636896", "0.5453058", "0.5407311", "0.540617", "0.5399252", "0.53672457", "0.5334266", "0.5246785", "0.5233075" ]
0.8256307
0
Initialize the object with a placeholder value of 1.
def __init__(self) -> None: super().__init__() self.placeholder = 1.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, value=1.0):\n self.value = value", "def __init__(self,value = 0):\n\n self.value = value", "def __init__(self):\n super().__init__()\n self._value = 0", "def __init__(self, BLANK=0):\n self.BLANK = BLANK", "def __init__(self, number=0):\n pass", "def __init__(self, value=None):\n self.set(value)", "def __init__(self, **kwargs):\n self.is_initialized = False\n self.delta = 1", "def __init__(self, a = None):\n if a is None:\n self.a = 0.0\n else:\n self.a = a", "def test_init_with_default_value(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 10, default_value=2)\n\n assert type(dim.default_value) is int", "def __init__(__self__, *,\n number: int):\n pulumi.set(__self__, \"number\", number)", "def __init__(self):\n self.val = None", "def __init__(self):\n super(RobinBoundary, self).__init__()\n self.value = RobinBoundary.value\n RobinBoundary.value += 1\n self.update(param=\"1\")", "def __init__(self):\n self.a = 0\n self.b = 1", "def __init__(self, value=None):", "def __init__(self, value=None):", "def __init__(self, value):\r\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def __init__(self, value):\n self.value = value", "def test_new_instance_defaults_to_zero(self):\r\n instance = TestCounterModel()\r\n assert instance.counter == 0", "def __init__(__self__, *,\n number: Optional[pulumi.Input[int]] = None):\n if number is not None:\n pulumi.set(__self__, \"number\", number)", "def __init__(self,value):\n self.value = value", "def initial(self):\n return zero", "def _initialize(self, index, value):\n # by default we just set corresponding value\n self.setvalue(index, value)", "def __init__(self) -> None:\n # Values are already set on __new__.\n # Override this method when value modification on initialization is\n # required.\n raise NotImplementedError()", "def __init__ (self):\n self.x = 10" ]
[ "0.69245744", "0.6914013", "0.68501645", "0.6800856", "0.6740337", "0.6642504", "0.65677077", "0.65439326", "0.6520527", "0.64555895", "0.6452134", "0.64311016", "0.64110297", "0.6386236", "0.6386236", "0.6364008", "0.6297296", "0.6297296", "0.6297296", "0.6297296", "0.6297296", "0.6297296", "0.62912107", "0.6281748", "0.627147", "0.6188166", "0.61838025", "0.6137344", "0.6124199", "0.6105701" ]
0.7242961
0
Return an empty RequiredParameters object.
def _required_parameters(self) -> RequiredParameters: return RequiredParameters([])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_required_params():\n return {}", "def get_empty_required_fields(self):\n empty_fields = self.get_empty_fields()\n return [f for f in empty_fields if f in self.REQUIRED_FIELDS]", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def get_mandatory_param_names(self):\n all_names = self.params.keys()\n return [name for name in all_names \n if not self.params[name].is_optional]", "def params_required(self) -> bool:\n if self.no_params or self.params_optional:\n return False\n else:\n return True", "def assignRequiredValues(self):\n return _libsbml.Model_assignRequiredValues(self)", "def _mandatory_structure(self):\n self.mandatory_structure = {\n 'title': str,\n 'description': str,\n 'authors': [dict],\n 'defined_type': str,\n }", "def required(cls):\n return []", "def _get_mandatory_parameters(template_dict):\n # type: (Dict) -> Dict[str, str]\n mandatory_params = {}\n all_params = template_dict.get(\"instance\", {}).get(\"variables\", {})\n for p in all_params.items():\n if not p[1].get(\"optional\", False) and \"value\" not in p[1]:\n mandatory_params[p[0]] = p[1][\"type\"]\n return mandatory_params", "def get_mandatory_args(self):\n raise NotImplementedError(\"ICallable.get_mandatory_args\")", "def get_optional_params():\n return {\n 'regularizer': None, # any valid TensorFlow regularizer\n 'regularizer_params': dict,\n 'initializer': None, # any valid TensorFlow initializer\n 'initializer_params': dict,\n 'dtype': [tf.float32, tf.float16, 'mixed'],\n }", "def Mandatory(cls, **_kwargs):\n\n kwargs = dict(min_occurs=1, nillable=False)\n if cls.get_type_name() is not cls.Empty:\n kwargs['type_name'] = '%s%s%s' % (const.MANDATORY_PREFIX,\n cls.get_type_name(), const.MANDATORY_SUFFIX)\n kwargs.update(_kwargs)\n if issubclass(cls, Unicode):\n kwargs.update(dict(min_len=1))\n\n elif issubclass(cls, Array):\n (k,v), = cls._type_info.items()\n if v.Attributes.min_occurs == 0:\n cls._type_info[k] = Mandatory(v)\n\n return cls.customize(**kwargs)", "def _validate_params(self):\n assert set(self.required_params) - set(self._params) == set()\n for par, val in self.optional_params.items():\n if par not in self._params:\n self._params[par] = val", "def mandatory(self):\n return self._mandatory", "def testRequiredFields(self):\n required = Project.required_fields()\n\n self.assertEqual(type(required), tuple,\n \"required_fields() returns a tuple.\")\n\n self.assertTrue(len(required) > 0,\n \"required_field() did not return empty value.\")", "def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n # missing language in the model\n _ = ProductModelFactory(model_definition)", "def parameters(self):\n return self._default_params", "def test_missing_required(self):\n param_types = {\n 'string': str,\n 'unicode': unicode,\n 'integer': int,\n 'boolean': bool,\n 'list': list,\n 'json': 'json',\n 'datetime': 'datetime',\n 'date': 'date',\n }\n expected_types = {\n 'string': '',\n 'unicode': u'',\n 'integer': 0,\n 'boolean': False,\n 'list': [],\n 'json': u'',\n 'datetime': None,\n 'date': None,\n }\n handler = self.create_handler(r'')\n self.assertEqual(handler.get_params(param_types, required=True),\n expected_types)", "def get_optional_parameters(self) -> list:\n results = []\n if self.no_params or self.params_required:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def _check_required_params(self):\n logging.debug('.. check if Experiment have all required parameters')\n for n in self.REQUIRED_PARAMS:\n if n not in self.params:\n raise ValueError('missing \"%s\" among %r' % (n, self.params.keys()))", "def required_dict_validator(self, dict_fields, model_name, erp_required=[]):\n required_fields = self.env['settings.field'].sudo().search([('model_id.model', '=', model_name)])\n\n if required_fields:\n erp_required.extend(required_fields.required_field_ids.filtered(lambda x: x.id not in [er.id for er in erp_required]))\n\n for field in erp_required:\n if field.name in dict_fields and 'required' not in dict_fields[field.name]:\n dict_fields[field.name]['required'] = True\n dict_fields[field.name]['empty'] = False\n\n return dict_fields", "def required_fields():\n module_logger.debug(\"In required_fields.\")\n return (\"comment\", \"lib_layout\", \"lib_selection\",\n \"ncbi_taxon_id\", \"prep_id\", \"sequencing_center\",\n \"sequencing_contact\", \"storage_duration\", \"tags\")", "def required_fields():\n return tuple(MIMARKS._fields.keys())", "def required(project):\n required = [\n {\n 'short_name': 'sic',\n 'optional': 'true'\n },\n {\n 'short_name': 'siconca',\n 'optional': 'true'\n }]\n return required", "def required(self) -> Optional[List[str]]:\n return self._required", "def check_required(self, required):\n for k in required:\n if self.__dict__.get(k) is None:\n raise ValueError(\n \"Required argument: '{0}' not provided\".format(k))", "def __mandatory_is_not_given(self):\n\n strTestName = 'Mandatory parameter must be given (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mandatory_parameter', 'Mandatory parameter')\n\n self.__parametersCheck_error(RxCSObject, ParameterMissingError, strTestName)", "def _ensure_required_inputs(self):\n for name, input_type in self._input_types.items():\n if not input_type.optional and self._input_vars[name] is None:\n msg_prefix = 'Op \"{}\" (op_type: {}) '.format(self.name, self.op_type)\n raise ValueError(\n msg_prefix + \"Required input {} is missing\".format(name)\n )", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None" ]
[ "0.7562365", "0.6865271", "0.6684548", "0.6580648", "0.642038", "0.6418872", "0.63192177", "0.6319068", "0.6306489", "0.6298723", "0.623854", "0.62329423", "0.6214493", "0.61800486", "0.61040115", "0.6102065", "0.6099858", "0.6063706", "0.60514194", "0.6025997", "0.6002806", "0.5990289", "0.59744805", "0.5941653", "0.58991146", "0.58979154", "0.58773285", "0.586812", "0.58249944", "0.58249944" ]
0.86397606
0
Return an empty DerivedParameterCollection.
def _get_derived_parameters(self) -> DerivedParameterCollection: return DerivedParameterCollection([])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self) -> None:\n super().clear()\n self._parameters = np.array([], dtype=object)", "def get_parameters(self):\n self.unimpl_base_class()", "def get_parameters(self):\n d = super().get_parameters()\n d.pop('population_size', None)\n return d", "def empty_collection(self):\n raise NotImplementedError", "def remove_parameters(self):\n self.parameters = []", "def empty(cls):\n x = cls(base_types=set(), template_types={}, refined_types={}, humannames={},\n type_aliases={}, cpp_types={}, numpy_types={}, from_pytypes={},\n cython_ctypes={}, cython_cytypes={}, cython_pytypes={},\n cython_cimports={}, cython_cyimports={}, cython_pyimports={},\n cython_functionnames={}, cython_classnames={}, cython_c2py_conv={},\n cython_py2c_conv={})\n del x.extra_types\n del x.dtypes\n del x.stlcontainers\n return x", "def parameters(self):\n ps = super().parameters()\n exclude = set(self.estimator.parameters())\n ps = (p for p in ps if not p in exclude)\n return ps", "def get_params_iter(self):\n return []", "def all(self):\n datapoint_params = self._make_datapooint_param_iter()\n if datapoint_params is None:\n return iter([])\n params_list = list(datapoint_params) # construct param list\n return self._gen(params_list)", "def get_params(self):\n return []", "def parameters(self):\n return self._default_params", "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def filter_empty_subparams(self, param_name):\n param = self.module.params.get(param_name)\n filtered = []\n if isinstance(param, list):\n for subparam in param:\n if isinstance(subparam, dict):\n filtered.append(\n dict(\n (key, value)\n for key, value in subparam.items()\n if value is not None\n )\n )\n else:\n filtered = param\n return filtered", "def get_parameters(self):\n return(_deepcopy(self.parameters))", "def placeholder(self):\n return []", "def get_base_parameters(cls):\n return {\n \"cutoff\": None,\n \"method\": None\n }", "def _derived(self, derived=True):\n self.partSchemes()\n for par, part, refs, reorder in ASParameters._derived(self, derived):\n yield (par, part, refs, reorder)", "def parameters(self):\n return []", "def optional(cls):\n return []", "def empty_like(self):\n res = type(self)(\n self.shape.copy(),\n qhape=self.qhape.copy(),\n qodulus=self.qodulus,\n dtype=self.dtype,\n defval=self.defval,\n invar=self.invar,\n charge=self.charge,\n dirs=self.dirs.copy(),\n )\n return res", "def make_empty(cls):\n args = inspect.getargspec(cls.__init__).args\n # remove self; always first arg of __init__\n args = args[1:]\n return cls(**dict.fromkeys(args))", "def param(self):\n return []", "def param(self):\n return []", "def generate_free_parameters(self):\n free_parameters = OrderedDict()\n for p in self.free_parameter_names:\n free_parameters[p] = 0.\n return free_parameters", "def _empty(self, *dims, **kwargs):\n size = []\n dtypes = []\n for d in dims:\n size.append(len(self[d]))\n dtypes.append(self[d].dtype)\n dtype = kwargs.pop('dtype', numpy.result_type(*dtypes))\n fv = kwargs.pop('fill_value')\n return numpy.full(size, fill_value=fv, dtype=dtype)", "def get_params(self, deep=True):\n return super().get_params(deep=deep)", "def param(self):\r\n return []", "def initial_parameters(self):\n return self._initial_parameters", "def clone(self):\n return _libsbml.ListOfParameters_clone(self)", "def all(cls):\n return []" ]
[ "0.60794294", "0.60395926", "0.59694386", "0.59527194", "0.5874923", "0.5814086", "0.57916915", "0.57306236", "0.5725052", "0.56777626", "0.5676922", "0.56153905", "0.5593001", "0.5562131", "0.5517442", "0.5513053", "0.5445878", "0.5444455", "0.5426895", "0.5397799", "0.5352691", "0.5303792", "0.5303792", "0.5291877", "0.5286699", "0.5273922", "0.52694887", "0.5255946", "0.5249065", "0.52466434" ]
0.8430621
1
Return an EmptyLikelihood object.
def empty_likelihood() -> EmptyLikelihood: return EmptyLikelihood()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNoData(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.getNoData(self)", "def mempty(self):\n return identity", "def empty() -> ObservableBase:\n from ..operators.observable.empty import empty\n return empty()", "def empty_like(self):\n res = type(self)(\n self.shape.copy(),\n qhape=self.qhape.copy(),\n qodulus=self.qodulus,\n dtype=self.dtype,\n defval=self.defval,\n invar=self.invar,\n charge=self.charge,\n dirs=self.dirs.copy(),\n )\n return res", "def zero_proximal(sigma=1.0):\n return ZeroOperator(self.domain)", "def nan(klass):\n return RatTerm(RatNum(1, 0), 0)", "def zeroIntelligence_behavior(self):\n return np.ones((self.N, self.Q, self.M)) / float(self.M)", "def empty(cls) -> EnvelopeStructure:\n return _EmptyEnvelopeStructure()", "def empty_instance():\n from weighted_graph import Graph\n return Graph()", "def empty(cls) -> BodyStructure:\n return _EmptyBodyStructure()", "def empty(cls):\n pass", "def empty(cls):\n return Marker()", "def empty(cls):\n x = cls(base_types=set(), template_types={}, refined_types={}, humannames={},\n type_aliases={}, cpp_types={}, numpy_types={}, from_pytypes={},\n cython_ctypes={}, cython_cytypes={}, cython_pytypes={},\n cython_cimports={}, cython_cyimports={}, cython_pyimports={},\n cython_functionnames={}, cython_classnames={}, cython_c2py_conv={},\n cython_py2c_conv={})\n del x.extra_types\n del x.dtypes\n del x.stlcontainers\n return x", "def f_empty(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def empty(self):", "def zero(self):\n return self.create()", "def zero(klass):\n return RatTerm(RatNum(0, 1), 0)", "def zero(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def empty(*shape, **kwargs):\n return Tensor(np.empty(shape), **kwargs)", "def zero(self):\n q = pinocchio.neutral(self.model)\n v = np.zeros(self.model.nv)\n return np.concatenate([q.flat, v])", "def zero(self) -> 'PFElement':\n return self(0)", "def empty_model() -> Model:\n yield Model()", "def proximal(self):\n def zero_proximal(sigma=1.0):\n \"\"\"Proximal factory for zero operator.\n\n Parameters\n ----------\n sigma : positive float, optional\n Step size parameter.\n \"\"\"\n return ZeroOperator(self.domain)\n\n return zero_proximal", "def trend_none(self):\n raise NotImplementedError()", "def __init__(self, likelihood, model):\n if not isinstance(likelihood, GaussianLikelihood):\n raise RuntimeError(\"Likelihood must be Gaussian for exact inference\")\n super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)", "def zeroed_observation(observation):\n if hasattr(observation, 'shape'):\n return np.zeros(observation.shape)\n elif hasattr(observation, '__iter__'):\n out = []\n for x in observation:\n out.append(zeroed_observation(x))\n return out\n else:\n return 0.", "def zeros_like(self):\n raise NotImplementedError", "def empty_like(other, dtype=None, constant=False):\n if isinstance(other, Tensor):\n other = other.data\n \n return Tensor(np.empty_like(other, dtype), constant=constant)", "def empty(shape, dtype=np.float32, constant=False):\n return Tensor(np.empty(shape, dtype), constant=constant)", "def empty(lm=None, start_with_bos=True):\n self = CtcBeamSearchCandidate()\n # State of the candidate text after removing duplicates and blanks\n self.text_state = TextState.empty()\n self.logp_blank = 0.\n self.logp_non_blank = -np.inf\n if lm is not None:\n # self.lm_state relates to all words except the last unfinished word\n self.lm_state = kenlm.State()\n if start_with_bos:\n lm.BeginSentenceWrite(self.lm_state)\n else:\n lm.NullContextWrite(self.lm_state)\n else:\n self.lm_state = None\n return self" ]
[ "0.68557054", "0.63254637", "0.62527746", "0.61421937", "0.6079142", "0.6053957", "0.6017173", "0.6011714", "0.5980676", "0.59661746", "0.59467256", "0.5892879", "0.5858775", "0.584163", "0.5826427", "0.58017796", "0.5760485", "0.5741958", "0.56936145", "0.56328344", "0.56234956", "0.56152797", "0.5606572", "0.5513603", "0.550683", "0.5504243", "0.54672813", "0.54621553", "0.54544806", "0.54525036" ]
0.89379823
0
Initialize the ParameterizedLikelihood by reading the specificed sacc_filename value.
def __init__(self, params: NamedParameters): super().__init__() self.sacc_filename = params.get_string("sacc_filename")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, initial_param_file, fasta_file):\n self.sequences = read_fasta_sequences_to_str(fasta_file)\n self.obs = observe_differences(self.sequences[0], self.sequences[1])\n self.theta = parse_params(initial_param_file)\n self.estimate = None\n self.likelihood = None\n self.initial_likelihood = None", "def __init__(self, configfile_name):\n assert os.path.exists(configfile_name), \\\n 'Config file %s does not exist' % (configfile_name)\n\n correct_param_load = self.load_config_file(configfile_name)\n assert correct_param_load,\\\n 'Config params could not be loaded from file'\n\n self.fullpath_input_configfile = configfile_name\n self.word_det_rfc = None\n self.reg_coeffs = None\n self.bb_reg = None\n self.img_files = None", "def __init__ (self) :\n self.loadCSPAD2x2CalibParsDefault()", "def initialize(filename='params.yaml'):\n home_path = str(Path.home())\n project_path = 'Documents/SideProjects/sailboatsfactory'\n work_path = 'src/nn-core'\n params_path = join(home_path, join(project_path, work_path))\n yaml_file = join(params_path, filename)\n print(\"Reading parameters from:\", filename)\n with open(yaml_file, 'r') as f:\n my_params = load(f)\n my_params['x_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n my_params['y_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n\n raw = data.read(my_params)\n adjusted = adjust(raw, my_params)\n\n return adjusted, my_params", "def __init__(self, filepath, baseline_name=BASELINE_FILE_NAME,\n filename=FILE_NAME, sway_name=FILE_NAME_S):\n self.filepath = filepath\n self.baseline_name = baseline_name\n self.filename = filename\n self.sway_name = sway_name\n self.XSCALE = 22.5\n self.YSCALE = 13.\n self.lim_X = 20\n self.lim_Y = 20\n self.get_baseline_points()", "def __init__(self, ann_path, train_val_list, test_list, config=None):\n self.ann_path = ann_path\n self.config = config\n self.train_val_list = train_val_list\n self.test_list = test_list", "def __init__(self, file_path, num_bases, lambda_):\n # data IO\n assert num_bases >= 2\n self._num_bases = int(num_bases)\n self._lambda = float(lambda_)\n self._data = []\n self._input = []\n self._label = []\n with open(file_path, 'r') as file_:\n for line in file_.readlines():\n # 1 for x^0 (bias)\n self._data.append([1, int(line.strip().split(',')[0])])\n self._label.append([int(line.strip().split(',')[1])])\n for v in self._data:\n for i in range(2, num_bases):\n v += [v[1]**i]\n self._input.append(v)\n self._input = Mat(self._input)\n self._label = Mat(self._label)\n self._weights = self._fit()\n self._error = self._mse()\n # print('input shape = {}'.format(self._input.shape))\n # print('label shape = {}'.format(self._label.shape))", "def __init__(self, __file):\n\n\t\tself.fileName = __file\n\t\tif (os.path.isfile(self.fileName)):\n\t\t\t# config.ini found, load it\n\t\t\tself.config.read(self.fileName)\n\t\t\tself.default = False\n\t\telse:\n\t\t\t# config.ini not found, generate a default one\n\t\t\tself.generateDefaultConfig()\n\t\t\tself.default = True", "def __init__(self):\n parameters_list = []\n self.config_dict = self.open_config(parameters_list)\n\n # Define defaults\n self.disc_gt = 0.0\n self.disc_out = 0.0", "def __init__(self, fname='spectral_basis.desc', K=None, L=None, beta=2):\n if K is None and L is None:\n self.init_from_file(fname)\n elif fname is None:\n self.init_from_params(K, L, beta)\n else:\n raise Exception(\"incompatible calling arguments\")", "def __init__(self, file_path, num_bases, converge_epsilon, init_weights):\n assert num_bases == len(init_weights)\n self._num_bases = int(num_bases)\n self._converge_epsilon = float(converge_epsilon)\n self._init_weights = Mat([[float(i)] for i in init_weights])\n self._data = []\n self._input = []\n self._label = []\n with open(file_path, 'r') as file_:\n for line in file_.readlines():\n # 1 for x^0 (bias)\n self._data.append([1, int(line.strip().split(',')[0])])\n self._label.append([int(line.strip().split(',')[1])])\n for v in self._data:\n for i in range(2, num_bases):\n v += [v[1]**i]\n self._input.append(v)\n self._input = Mat(self._input)\n self._label = Mat(self._label)\n self._weights = self._fit()\n # print('input shape = {}'.format(self._input.shape))\n # print('label shape = {}'.format(self._label.shape))", "def __init__(self, path_to_config_file):\n self.file_path = path_to_config_file", "def __init__(self, fname=None, Umean=None, verbose=False, **kwargs):\n super(self.__class__,self).__init__(verbose,**kwargs)\n self.Umean = Umean\n\n if fname is not None:\n self.read_field(fname)", "def initAccessSecurityFile(self, filename, **subst):\n macro = ','.join(['%s=%s' % (k, v) for k, v in subst.items()])\n pcaspy.asInitFile(filename, macro)\n pcaspy.asCaStart()", "def __init__(self, file_name=None):\n self.file_name = file_name\n self.frd = None\n self._steps = []\n if file_name is not None:\n self.load(file_name)", "def __init__(self, file_stem: str, num_trials: int,\n differentiator: BaseDifferentiator,\n outcome_var: str,\n noisemaker: NoiseMaker = None,\n known_vars: list = None,\n dep_var_name: str = 'u',\n ind_var_name: str = 'x'):\n self.file_stem = file_stem\n self.num_trials = num_trials\n self.differentiator = differentiator\n self.outcome_var = outcome_var\n self.noisemaker = noisemaker\n self.known_vars = known_vars\n self.dv_name = dep_var_name\n self.iv_name = ind_var_name", "def loadParameters(self, parmfile=''):\n if not parmfile:\n raise IOError(\"You need to specify a parameter filename\")\n parmdir = os.getenv('ATMOSPHERE_PARAMETERS_DIR')\n parmpath = os.join.path(parmdir, parmfile)\n # Read from file\n with open(parmpath, 'r') as parmf:\n data = pickle.load(parmf)\n # Dictionary list\n self.modtran_visits = data[0]\n # Tuple list\n self.aerosol_visits = data[1]\n # seed value\n nruns = len(self.modtran_visits)\n print('Parameters for {1} runs computed with seed = {0}'.format(data[2],\n nruns))\n # Init transmission array\n self.initTransmissionArray(nruns)", "def __init__(self, file_path):\r\n self.file_path = Path(file_path)\r\n self.fname = self.file_path.name\r\n self.d_stgs = settings.DisplaySettings()\r\n self.c_stgs = settings.CalculationSettings()\r\n logger.info(f'{self} create')", "def __init__(self, initialLearnRate):\n self.initialLearnRate = initialLearnRate", "def load(self, filename):\n param_dict = pickle.load(open('%s' % filename, 'rb'))\n self.learningrate = param_dict['learningrate']\n self.verbose = param_dict['verbose']\n self._loadsize = param_dict['loadsize']\n self._batchsize = param_dict['batchsize']\n self.momentum = param_dict['momentum']\n self.epochcount = param_dict['epochcount']\n self._momentum_batchcounter = param_dict['momentum_batchcounter']\n for param_name in param_dict['incs'].keys():\n for p in self._params:\n if p.name == param_name:\n self._incs[p].set_value(param_dict['incs'][param_name])\n if self.rmsprop is not None:\n for param_name in param_dict['avg_grad_sqrs'].keys():\n for p in self._params:\n if p.name == param_name:\n self._avg_grad_sqrs[p].set_value(param_dict['avg_grad_sqrs'][param_name])\n self._numbatches = self._loadsize // self._batchsize\n if self._inputs_type != 'function':\n self._numloads = self._inputs.shape[0] // self._loadsize\n if self._inputs_type == 'h5':\n self._inputs_theano.set_value(\n self._inputs.read(stop=self._loadsize))\n else:\n self._inputs_theano.set_value(self._inputs[:self._loadsize])", "def __init__(self, simname, pointing):\n\n conf=files.read_config(simname)\n self.update(conf)\n\n self['pointing_id']=pointing\n self['fnum']=FILTERNUM[self['filter']]\n\n # use 2*seed for images, seed for catalogs\n numpy.random.seed(2*self['seed'])\n\n self._load_pointing()\n self._load_catalog()", "def __init__(self, filename: str | Path, *args, **kwargs) -> None:\n super().__init__(filename, *args, **kwargs)\n self._non_metadata_keys = None\n self._score_key = None\n self._rt_key = None\n self._spectrum_rt_key = None\n self._qvalue_key = None\n self._pep_key = None\n\n self._source = self._infer_source()", "def __init__(self, data_dir, pairs_filepath, img_ext):\n self.data_dir = data_dir\n self.pairs_filepath = pairs_filepath\n self.img_ext = img_ext", "def __init__(self, file_name, config, sed=3142):\n set_sed(sed)\n self.time_stamp = None\n\n self.input_file_name = file_name\n\n self.estimators_list = config.estimators_list\n self.optim_params = config.optim_params\n\n self.work_dataframe = None\n self.raw_dataframe = None\n\n self.x_matrix = None\n self.y_vector = None\n\n self.estimator = None\n self.pipeline = None\n\n self.training_report_pool = None\n self.model_pool = None\n\n self.feature_importance_pool = None\n self.feature_importance_hist = None\n\n self.receiver_operating_characteristic_curve = None\n self.area_under_curve_pool = None\n\n self.learning_report = None\n self.learning_line = None\n\n self.label_encoder_matrix = None\n self.dropped_cols = None\n self.mask_query = None\n self.gs_mask = None", "def __init__(self,\n path=None,\n sample_rate=16000,\n noise_levels=(0, 0.5)):\n if not os.path.exists(path):\n print(\"Directory doesn't exist: {}\".format(path))\n raise IOError\n # self.paths = path is not None and librosa.util.find_files(path)\n with open(path) as f:\n self.paths = f.readlines()\n self.sample_rate = sample_rate\n self.noise_levels = noise_levels", "def __init__(self, filepath):\n try:\n config_file_r = open(filepath)\n self.sim_parametres = yaml.load(config_file_r, Loader=yaml.FullLoader)\n except:\n raise Exception(\"Le fichier de configuration n'a pas été atteint ou n'a pas pu être lu. Veuillez vérifier \"\n \"qu'il n'y ait aucune erreur de syntaxe.\")", "def __init__(self, file_name):\n self.file_name = file_name\n\n self.A = 1\n self.B = 0\n self.C = 1\n self.R = FILTER_R\n self.Q = FILTER_Q\n\n self.data_stream = []", "def __init__(self, file_name=None, file_object=None, pdb_code=None):\n self.line_number = 0\n if file_name is not None:\n assert file_object is None\n assert pdb_code is None\n self.file_object = open(file_name)\n elif file_object is not None:\n assert pdb_code is None\n self.file_object = file_object\n elif pdb_code is not None:\n self.file_object = mmcif_files.getFile(pdb_code)\n else:\n raise ValueError(\"No input file given\")", "def __init__(self, parameters, learning_rate):\n self.parameters = parameters\n self.learning_rate = learning_rate", "def __init__(self, seqfile, predfile):\n assert seqfile.endswith(\".mpd\"), predfile.endswith(\".pred\")\n\n self._seqfile = seqfile\n self._predfile = predfile\n self._parse = None # to stock the parsed sequences\n self._motifs = None" ]
[ "0.6211385", "0.5966141", "0.5762728", "0.5676559", "0.5669715", "0.5662447", "0.55601126", "0.5527512", "0.5518656", "0.54877645", "0.5482554", "0.5455407", "0.54435134", "0.5440038", "0.54361457", "0.54338324", "0.54028064", "0.5392907", "0.5386685", "0.53818727", "0.5377004", "0.5373688", "0.5352941", "0.5349324", "0.5334924", "0.53315896", "0.53030396", "0.53013676", "0.5300082", "0.52934444" ]
0.72975916
0
Return an empty RequiredParameters object.
def _required_parameters(self) -> RequiredParameters: return RequiredParameters([])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_required_params():\n return {}", "def get_empty_required_fields(self):\n empty_fields = self.get_empty_fields()\n return [f for f in empty_fields if f in self.REQUIRED_FIELDS]", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def get_mandatory_param_names(self):\n all_names = self.params.keys()\n return [name for name in all_names \n if not self.params[name].is_optional]", "def params_required(self) -> bool:\n if self.no_params or self.params_optional:\n return False\n else:\n return True", "def assignRequiredValues(self):\n return _libsbml.Model_assignRequiredValues(self)", "def _mandatory_structure(self):\n self.mandatory_structure = {\n 'title': str,\n 'description': str,\n 'authors': [dict],\n 'defined_type': str,\n }", "def required(cls):\n return []", "def _get_mandatory_parameters(template_dict):\n # type: (Dict) -> Dict[str, str]\n mandatory_params = {}\n all_params = template_dict.get(\"instance\", {}).get(\"variables\", {})\n for p in all_params.items():\n if not p[1].get(\"optional\", False) and \"value\" not in p[1]:\n mandatory_params[p[0]] = p[1][\"type\"]\n return mandatory_params", "def get_mandatory_args(self):\n raise NotImplementedError(\"ICallable.get_mandatory_args\")", "def get_optional_params():\n return {\n 'regularizer': None, # any valid TensorFlow regularizer\n 'regularizer_params': dict,\n 'initializer': None, # any valid TensorFlow initializer\n 'initializer_params': dict,\n 'dtype': [tf.float32, tf.float16, 'mixed'],\n }", "def Mandatory(cls, **_kwargs):\n\n kwargs = dict(min_occurs=1, nillable=False)\n if cls.get_type_name() is not cls.Empty:\n kwargs['type_name'] = '%s%s%s' % (const.MANDATORY_PREFIX,\n cls.get_type_name(), const.MANDATORY_SUFFIX)\n kwargs.update(_kwargs)\n if issubclass(cls, Unicode):\n kwargs.update(dict(min_len=1))\n\n elif issubclass(cls, Array):\n (k,v), = cls._type_info.items()\n if v.Attributes.min_occurs == 0:\n cls._type_info[k] = Mandatory(v)\n\n return cls.customize(**kwargs)", "def _validate_params(self):\n assert set(self.required_params) - set(self._params) == set()\n for par, val in self.optional_params.items():\n if par not in self._params:\n self._params[par] = val", "def mandatory(self):\n return self._mandatory", "def testRequiredFields(self):\n required = Project.required_fields()\n\n self.assertEqual(type(required), tuple,\n \"required_fields() returns a tuple.\")\n\n self.assertTrue(len(required) > 0,\n \"required_field() did not return empty value.\")", "def test_missing_mandatory_attributes():\n model_definition = {'source': {'type': 'list',\n 'required': True,\n 'persisted': True},\n 'resources.title': {'type': 'text',\n 'required': True,\n 'persisted': True}}\n # missing language in the model\n _ = ProductModelFactory(model_definition)", "def parameters(self):\n return self._default_params", "def test_missing_required(self):\n param_types = {\n 'string': str,\n 'unicode': unicode,\n 'integer': int,\n 'boolean': bool,\n 'list': list,\n 'json': 'json',\n 'datetime': 'datetime',\n 'date': 'date',\n }\n expected_types = {\n 'string': '',\n 'unicode': u'',\n 'integer': 0,\n 'boolean': False,\n 'list': [],\n 'json': u'',\n 'datetime': None,\n 'date': None,\n }\n handler = self.create_handler(r'')\n self.assertEqual(handler.get_params(param_types, required=True),\n expected_types)", "def get_optional_parameters(self) -> list:\n results = []\n if self.no_params or self.params_required:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def _check_required_params(self):\n logging.debug('.. check if Experiment have all required parameters')\n for n in self.REQUIRED_PARAMS:\n if n not in self.params:\n raise ValueError('missing \"%s\" among %r' % (n, self.params.keys()))", "def required_dict_validator(self, dict_fields, model_name, erp_required=[]):\n required_fields = self.env['settings.field'].sudo().search([('model_id.model', '=', model_name)])\n\n if required_fields:\n erp_required.extend(required_fields.required_field_ids.filtered(lambda x: x.id not in [er.id for er in erp_required]))\n\n for field in erp_required:\n if field.name in dict_fields and 'required' not in dict_fields[field.name]:\n dict_fields[field.name]['required'] = True\n dict_fields[field.name]['empty'] = False\n\n return dict_fields", "def required_fields():\n module_logger.debug(\"In required_fields.\")\n return (\"comment\", \"lib_layout\", \"lib_selection\",\n \"ncbi_taxon_id\", \"prep_id\", \"sequencing_center\",\n \"sequencing_contact\", \"storage_duration\", \"tags\")", "def required_fields():\n return tuple(MIMARKS._fields.keys())", "def required(project):\n required = [\n {\n 'short_name': 'sic',\n 'optional': 'true'\n },\n {\n 'short_name': 'siconca',\n 'optional': 'true'\n }]\n return required", "def required(self) -> Optional[List[str]]:\n return self._required", "def check_required(self, required):\n for k in required:\n if self.__dict__.get(k) is None:\n raise ValueError(\n \"Required argument: '{0}' not provided\".format(k))", "def __mandatory_is_not_given(self):\n\n strTestName = 'Mandatory parameter must be given (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mandatory_parameter', 'Mandatory parameter')\n\n self.__parametersCheck_error(RxCSObject, ParameterMissingError, strTestName)", "def _ensure_required_inputs(self):\n for name, input_type in self._input_types.items():\n if not input_type.optional and self._input_vars[name] is None:\n msg_prefix = 'Op \"{}\" (op_type: {}) '.format(self.name, self.op_type)\n raise ValueError(\n msg_prefix + \"Required input {} is missing\".format(name)\n )", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None" ]
[ "0.7562365", "0.6865271", "0.6684548", "0.6580648", "0.642038", "0.6418872", "0.63192177", "0.6319068", "0.6306489", "0.6298723", "0.623854", "0.62329423", "0.6214493", "0.61800486", "0.61040115", "0.6102065", "0.6099858", "0.6063706", "0.60514194", "0.6025997", "0.6002806", "0.5990289", "0.59744805", "0.5941653", "0.58991146", "0.58979154", "0.58773285", "0.586812", "0.58249944", "0.58249944" ]
0.86397606
1
Return an empty DerivedParameterCollection.
def _get_derived_parameters(self) -> DerivedParameterCollection: return DerivedParameterCollection([])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self) -> None:\n super().clear()\n self._parameters = np.array([], dtype=object)", "def get_parameters(self):\n self.unimpl_base_class()", "def get_parameters(self):\n d = super().get_parameters()\n d.pop('population_size', None)\n return d", "def empty_collection(self):\n raise NotImplementedError", "def remove_parameters(self):\n self.parameters = []", "def empty(cls):\n x = cls(base_types=set(), template_types={}, refined_types={}, humannames={},\n type_aliases={}, cpp_types={}, numpy_types={}, from_pytypes={},\n cython_ctypes={}, cython_cytypes={}, cython_pytypes={},\n cython_cimports={}, cython_cyimports={}, cython_pyimports={},\n cython_functionnames={}, cython_classnames={}, cython_c2py_conv={},\n cython_py2c_conv={})\n del x.extra_types\n del x.dtypes\n del x.stlcontainers\n return x", "def parameters(self):\n ps = super().parameters()\n exclude = set(self.estimator.parameters())\n ps = (p for p in ps if not p in exclude)\n return ps", "def get_params_iter(self):\n return []", "def all(self):\n datapoint_params = self._make_datapooint_param_iter()\n if datapoint_params is None:\n return iter([])\n params_list = list(datapoint_params) # construct param list\n return self._gen(params_list)", "def get_params(self):\n return []", "def parameters(self):\n return self._default_params", "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def filter_empty_subparams(self, param_name):\n param = self.module.params.get(param_name)\n filtered = []\n if isinstance(param, list):\n for subparam in param:\n if isinstance(subparam, dict):\n filtered.append(\n dict(\n (key, value)\n for key, value in subparam.items()\n if value is not None\n )\n )\n else:\n filtered = param\n return filtered", "def get_parameters(self):\n return(_deepcopy(self.parameters))", "def placeholder(self):\n return []", "def get_base_parameters(cls):\n return {\n \"cutoff\": None,\n \"method\": None\n }", "def parameters(self):\n return []", "def _derived(self, derived=True):\n self.partSchemes()\n for par, part, refs, reorder in ASParameters._derived(self, derived):\n yield (par, part, refs, reorder)", "def optional(cls):\n return []", "def empty_like(self):\n res = type(self)(\n self.shape.copy(),\n qhape=self.qhape.copy(),\n qodulus=self.qodulus,\n dtype=self.dtype,\n defval=self.defval,\n invar=self.invar,\n charge=self.charge,\n dirs=self.dirs.copy(),\n )\n return res", "def make_empty(cls):\n args = inspect.getargspec(cls.__init__).args\n # remove self; always first arg of __init__\n args = args[1:]\n return cls(**dict.fromkeys(args))", "def param(self):\n return []", "def param(self):\n return []", "def generate_free_parameters(self):\n free_parameters = OrderedDict()\n for p in self.free_parameter_names:\n free_parameters[p] = 0.\n return free_parameters", "def _empty(self, *dims, **kwargs):\n size = []\n dtypes = []\n for d in dims:\n size.append(len(self[d]))\n dtypes.append(self[d].dtype)\n dtype = kwargs.pop('dtype', numpy.result_type(*dtypes))\n fv = kwargs.pop('fill_value')\n return numpy.full(size, fill_value=fv, dtype=dtype)", "def get_params(self, deep=True):\n return super().get_params(deep=deep)", "def param(self):\r\n return []", "def initial_parameters(self):\n return self._initial_parameters", "def clone(self):\n return _libsbml.ListOfParameters_clone(self)", "def all(cls):\n return []" ]
[ "0.6081113", "0.6039337", "0.59705603", "0.5954725", "0.5876655", "0.58154607", "0.5792134", "0.5731025", "0.5725712", "0.56782943", "0.56777143", "0.56163365", "0.5595286", "0.5563155", "0.551771", "0.55127925", "0.54450375", "0.5443118", "0.5427724", "0.5399277", "0.53542346", "0.5304332", "0.5304332", "0.5292576", "0.5287981", "0.5274574", "0.52700245", "0.5256432", "0.5249941", "0.5247289" ]
0.84288347
0
Return a ParameterizedLikelihood object.
def parameterized_likelihood(params: NamedParameters): return ParamaterizedLikelihood(params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_likelihood(self, discretized=False, state=None):\n if not hasattr(self, 'softmax'):\n self.generate_softmax()\n\n if self.softmax is not None:\n if state is not None:\n return self.softmax.probability(class_=self.softmax_class_label,\n state=state)\n elif discretized:\n return self.softmax.probability(class_=self.softmax_class_label)\n else:\n return self.softmax, self.softmax_class_label\n else:\n logging.error(\"Couldn't generate softmax model for {}\"\n .format(self.__str__()))", "def get_likelihood_parameters(self):\n\n params=[]\n params.append(likelihood_parameter.LikelihoodParameter(\n name='g_star',min_value=0.95,max_value=0.99,\n value=self.linP_params['g_star']))\n params.append(likelihood_parameter.LikelihoodParameter(\n name='f_star',min_value=0.95,max_value=0.99,\n value=self.linP_params['f_star']))\n params.append(likelihood_parameter.LikelihoodParameter(\n name='Delta2_star',min_value=0.25,max_value=0.4,\n value=self.linP_params['Delta2_star']))\n params.append(likelihood_parameter.LikelihoodParameter(\n name='n_star',min_value=-2.35,max_value=-2.25,\n value=self.linP_params['n_star']))\n params.append(likelihood_parameter.LikelihoodParameter(\n name='alpha_star',min_value=-0.27,max_value=-0.16,\n value=self.linP_params['alpha_star']))\n\n return params", "def _sample_likelihood_params(self):\r\n self._sample_omega()\r\n self._sample_beta()\r\n self._sample_r()", "def _sample_likelihood_params(self):\n if self.marginalize:\n # We integrated out `beta` a la Bayesian linear regression.\n pass\n else:\n self._sample_beta_and_sigma_y()", "def _build_likelihood(self):\n\n # Get prior KL.\n KL = self.build_prior_KL()\n\n # Get conditionals\n fmean, fvar = self._build_predict(self.X, full_cov=False)\n\n # Get variational expectations.\n var_exp = self.likelihood.variational_expectations(fmean, fvar, self.Y) * self.obs_weight\n\n # re-scale for minibatch size\n scale = tf.cast(self.num_data, gp.settings.float_type) / tf.cast(tf.shape(self.X)[0], gp.settings.float_type)\n scale = scale / tf.reduce_mean(self.obs_weight)\n return tf.reduce_sum(var_exp) * scale - KL", "def likelihood(self):\n \n raise NotImplementedError()", "def createParameter(self):\n return _libsbml.Model_createParameter(self)", "def createParameter(self):\n return _libsbml.KineticLaw_createParameter(self)", "def lnprob(self, p):\n\n\n\n\t\tchisq = np.sum(self.deviates(p)[-1]**2)/2.0\n\t\tN = np.sum(self.TLC.bad == 0)\n\n\t\t# sum the deviates into a chisq-like thing\n\t\tlnlikelihood = -N * np.log(self.instrument.rescaling.value) - chisq/self.instrument.rescaling.value**2\n\t\tif np.isfinite(lnlikelihood) == False:\n\t\t\tlnlikelihood = -1e9\n\n\t\t# initialize an empty constraint, which could freak out if there's something bad about this fit\n\t\tconstraints = 0.0\n\n\t\t# loop over the parameters\n\n\n\t\tfor parameter in self.parameters:\n\n\t\t\t# if a parameter is outside its allowed range, then make the constraint very strong!\n\t\t\tinside = (parameter.value < parameter.limits[1]) & (parameter.value > parameter.limits[0])\n\t\t\ttry:\n\t\t\t\tassert(inside)\n\t\t\texcept AssertionError:\n\t\t\t\tconstraints -= 1e6\n\n\t\t# return the constrained likelihood\n\t\treturn lnlikelihood + constraints", "def prior_param(self, param_dict={}): \n self.param_obj = Params(param_dict) # parameter object \n self.param_names = param_dict.keys() \n self.n_params = len(param_dict.keys()) # number of parameters in theta ", "def log_likelihood(self):\n raise NotImplementedError(\"the log_likelihood property should \"\n \"be defined in the Estimator sub-class\")", "def _get_param_iterator(self):\n return model_selection.ParameterSampler(\n self.param_distributions, self.n_iter, random_state=self.random_state\n )", "def get_likelihood_param_values(self):\n likelihood_param_values = {}\n for name in self.likelihood_params:\n likelihood_param_values[name] = getattr(self.model, name)\n return likelihood_param_values", "def get_likelihood(self, d):\n pos = d.pos - self.parent.pos\n pos = np.dot(rotmat(-self.angle), pos)\n lik = halfnorm.pdf(pos[0],scale=self.length) * \\\n vonmises.pdf(np.arctan2(pos[1],pos[0]),self.vonmisesscale,loc=self.angle)\n #assert lik!=0.0\n return lik", "def _get_param_iterator(self):\n return model_selection.ParameterSampler(self.param_distributions,\n self.n_iter, random_state=self.random_state)", "def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")", "def posterior_sample_parameter(self, parameter):\n pass", "def gen_parameter(self, g, ng, p):\n pass", "def get_log_likelihood(response_probability, response):\n pass", "def Params(cls):\n return hyperparams.InstantiableParams(cls)", "def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)", "def __init__(self, loglike, data, x, sigma):\n\n # add inputs as class attributes\n self.likelihood = loglike\n self.data = data\n self.x = x\n self.sigma = sigma", "def newParams(self):\n return package(Proposal.GaussianProposal.newParams(self))", "def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])", "def _create_log_likelihood(self, individual):\n # Get individuals data\n times = []\n observations = []\n mask = self._data[self._id_key] == individual\n data = self._data[mask][\n [self._time_key, self._obs_key, self._value_key]]\n for output in self._mechanistic_model.outputs():\n # Mask data for observable\n observable = self._output_observable_dict[output]\n mask = data[self._obs_key] == observable\n temp_df = data[mask]\n\n # Filter times and observations for non-NaN entries\n mask = temp_df[self._value_key].notnull()\n temp_df = temp_df[[self._time_key, self._value_key]][mask]\n mask = temp_df[self._time_key].notnull()\n temp_df = temp_df[mask]\n\n # Collect data for output\n times.append(temp_df[self._time_key].to_numpy())\n observations.append(temp_df[self._value_key].to_numpy())\n\n # # Count outputs that were measured\n # # TODO: copy mechanistic model and update model outputs.\n # # (Useful for e.g. control group and dose group training)\n # n_measured_outputs = 0\n # for output_measurements in observations:\n # if len(output_measurements) > 0:\n # n_measured_outputs += 1\n\n # Create log-likelihood and set ID to individual\n log_likelihood = chi.LogLikelihood(\n self._mechanistic_model, self._error_models, observations, times)\n log_likelihood.set_id(individual)\n\n return log_likelihood", "def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood", "def objective(self, param):\n self.__init__(param, self.data)\n # return self.rmse() + self.penalty()\n return self.rmse() + self.penalty()", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def get_parameter_dict(self):\n prm = ModelParameters()\n prm.define(\"a\", self.a)\n return prm", "def prob_or_util(cls):\n\n @wraps(cls)\n def wrapper(*args, **kwargs):\n \"\"\"\n A wrapping function\n \"\"\"\n def __new__(cls, *args, **kwargs):\n kind = args[0]\n base = ProbabilityPotential if kind == KIND.PROBABILITY else UtilityPotential\n\n __dict__ = dict(cls.__dict__)\n new_type = type(cls.__name__, (base,), __dict__)\n obj = base.__new__(new_type, *args, **kwargs)\n\n obj.__init__(*args, **kwargs)\n return obj\n setattr(cls, \"__new__\", __new__)\n return cls(*args, **kwargs)\n\n return wrapper" ]
[ "0.6216977", "0.6201429", "0.59271574", "0.5916395", "0.59147215", "0.5894737", "0.5885555", "0.57895315", "0.5743663", "0.5680548", "0.5580204", "0.5562542", "0.55499244", "0.5537347", "0.5528815", "0.552764", "0.55163604", "0.549442", "0.54925627", "0.5490574", "0.5468335", "0.5466741", "0.54335064", "0.5425258", "0.5420976", "0.5418177", "0.53937703", "0.53926146", "0.5392105", "0.5389711" ]
0.75006866
0
Return an email Message object. This works like mboxutils.get_message, except it doesn't junk the headers if there's an error. Doing so would cause a headerless message to be written back out!
def get_message(obj): if isinstance(obj, email.Message.Message): return obj if hasattr(obj, "read"): obj = obj.read() try: msg = email.message_from_string(obj) except email.Errors.MessageParseError: msg = None return msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop_message(self):\n try:\n result = self.messages.get()\n except Queue.Empty:\n return None\n else:\n return Message(body=result.getBody(), subject=result.getBody(), sender=result.getFrom())", "def get_message(self, **kwargs):\n message = Mail()\n if \"from_email\" in kwargs:\n sender = Email()\n message_content = kwargs.get(\"message_content\", \"\")\n sender.name = message_content.get(\"sender\", emailconf.DEFAULT_SENDER)\n sender.email = kwargs.get(\"from_email\", emailconf.DEFAULT_SENDER_EMAIL)\n message.from_email = sender\n if \"subject\" in kwargs:\n message.subject = kwargs.get(\"subject\", \"\")\n if \"text\" in kwargs:\n content = Content(\"text/plain\", kwargs.get(\"text\", \"\"))\n message.add_content(content)\n if \"html\" in kwargs:\n content = Content(\"text/html\", kwargs.get(\"html\", \"\"))\n message.add_content(content)\n if \"category\" in kwargs:\n category = Category(kwargs.get(\"category\", \"\"))\n message.add_category(category)\n\n personalization = self.create_personalization(**kwargs)\n if personalization:\n message.add_personalization(personalization)\n\n return message.get()", "def createMessage( self, *args, **kw ):\n return MailMessage( *args, **kw )", "def get_message(self) -> Union[\"Message\", None]:\n raw_data = (\n self.raw_data.get(\"message\") or\n self.raw_data.get(\"edited_message\")\n )\n\n if raw_data:\n return Message(raw_data)\n\n return None", "def get_message(self, email):\n\n message = MIMEText(self.message, 'html')\n\n message['Subject'] = self.subject\n message['From'] = self.from_\n message['To'] = email\n\n return message", "def getmessage(self, uid):\n data = self._fetch_from_imap(str(uid), self.retrycount)\n\n # data looks now e.g.\n #[('320 (X-GM-LABELS (...) UID 17061 BODY[] {2565}','msgbody....')]\n # we only asked for one message, and that msg is in data[0].\n # msbody is in [0][1].\n body = data[0][1].replace(\"\\r\\n\", \"\\n\")\n\n # Embed the labels into the message headers\n if self.synclabels:\n m = re.search('X-GM-LABELS\\s*\\(([^\\)]*)\\)', data[0][0])\n if m:\n labels = set([imaputil.dequote(lb) for lb in imaputil.imapsplit(m.group(1))])\n else:\n labels = set()\n labels = labels - self.ignorelabels\n labels_str = imaputil.format_labels_string(self.labelsheader, sorted(labels))\n\n # First remove old label headers that may be in the message content retrieved\n # from gmail Then add a labels header with current gmail labels.\n body = self.deletemessageheaders(body, self.labelsheader)\n body = self.addmessageheader(body, '\\n', self.labelsheader, labels_str)\n\n if len(body)>200:\n dbg_output = \"%s...%s\"% (str(body)[:150], str(body)[-50:])\n else:\n dbg_output = body\n\n self.ui.debug('imap', \"Returned object from fetching %d: '%s'\"%\n (uid, dbg_output))\n return body", "def get_message(self):\n context = self.context\n\n charset = str(context.charset)\n contentType = context.content_type\n\n mail_body = context.render()\n maintype, subtype = contentType.split('/')\n\n return MIMEText(mail_body, subtype, charset)", "def _get_message(self, sender_message):\n # type: (str) -> Message or None\n st_re = self.SENDER_TEXT.search(sender_message)\n if st_re is None:\n return None\n else:\n return Message(speaker=st_re.group(1), text=st_re.group(2).strip())", "def get_message(service, user_id, msg_id):\n try:\n # grab the message instance\n message = service.users().messages().get(userId=user_id, id=msg_id,format='raw').execute()\n\n # decode the raw string, ASCII works pretty well here\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n\n # grab the string from the byte object\n mime_msg = email.message_from_bytes(msg_str)\n\n # check if the content is multipart (it usually is)\n content_type = mime_msg.get_content_maintype()\n if content_type == 'multipart':\n # there will usually be 2 parts the first will be the body in text\n # the second will be the text in html\n parts = mime_msg.get_payload()\n\n # return the encoded text\n final_content = parts[0].get_payload()\n #return final_content\n return final_content\n\n elif content_type == 'text':\n return mime_msg.get_payload()\n #return mime_msg.get_payload()\n\n else:\n return \"\"\n print(\"\\nMessage is not text or multipart, returned an empty string\")\n # unsure why the usual exception doesn't work in this case, but \n # having a standard Exception seems to do the trick\n except Exception as error:\n print(\"An error occured: {}\".format(error))", "def get_message(self, _id):\n return Message.deserialize(self._get_single('messages', {'id': _id}))", "def fetch_message(conn, msg_uid ):\n # TODO: Could we fetch just the envelope of the response to save bandwidth?\n rv, data = conn.uid('fetch', msg_uid, \"(RFC822)\")\n if rv != 'OK':\n print (\"ERROR fetching message #\", msg_uid)\n return {}\n\n return email.message_from_bytes(data[0][1]) # dict-like object", "def _copy_message(self, message):\r\n gmsg = aeemail.EmailMessage(sender=message.from_email,\r\n to=message.to,\r\n subject=message.subject,\r\n body=message.body)\r\n if message.extra_headers.get('Reply-To', None):\r\n gmsg.reply_to = message.extra_headers['Reply-To']\r\n if message.cc:\r\n gmsg.cc = list(message.cc)\r\n if message.bcc:\r\n gmsg.bcc = list(message.bcc)\r\n if message.attachments:\r\n # Must be populated with (filename, filecontents) tuples.\r\n attachments = []\r\n for attachment in message.attachments:\r\n if isinstance(attachment, MIMEBase):\r\n attachments.append((attachment.get_filename(),\r\n attachment.get_payload(decode=True)))\r\n else:\r\n attachments.append((attachment[0], attachment[1]))\r\n gmsg.attachments = attachments\r\n # Look for HTML alternative content.\r\n if isinstance(message, EmailMultiAlternatives):\r\n for content, mimetype in message.alternatives:\r\n if mimetype == 'text/html':\r\n gmsg.html = content\r\n break\r\n return gmsg", "def make_message(self, mto, mbody=None, msubject=None, mtype=None,\n mhtml=None, mfrom=None, mnick=None):\n message = self.Message(sto=mto, stype=mtype, sfrom=mfrom)\n message['body'] = mbody\n message['subject'] = msubject\n if mnick is not None:\n message['nick'] = mnick\n if mhtml is not None:\n message['html']['body'] = mhtml\n return message", "def get_message(self, message_id):\n r = requests.get('https://outlook.office.com/api/v2.0/me/messages/' + message_id, headers=self._headers)\n check_response(r)\n return Message._json_to_message(self, r.json())", "def message(self):\n if not hasattr(self, '_message'):\n self._message = email.message_from_string(self.data)\n return self._message", "def get_message_from_request(request):\n sender = request.form['from']\n recipients = request.form['to'].split()\n subject = request.form['subject']\n body = request.form['body']\n cc = request.form.get('cc', '').split()\n bcc = request.form.get('bcc', '').split()\n attachments = parse_attachments(request)\n return Message(sender, recipients, subject, body, cc, bcc, attachments)", "def prepare_message(self, body, priority=None, content_type=None,\n content_encoding=None, headers=None, properties=None):\n return amqp.Message(body, priority=priority,\n content_type=content_type,\n content_encoding=content_encoding,\n application_headers=headers,\n **properties)", "def GetMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id,format='raw').execute()\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mime_msg = email.message_from_string(msg_str)\n data = {}\n data['to'] = mime_msg['To']\n data['from'] = mime_msg['From']\n data['date'] = mime_msg['Date']\n data['subject'] = mime_msg['Subject']\n data['message'] = \"\"\n return data\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def get_message(self, bulk_id):\n res = self.client.get(\"/v1/messages/\" + str(bulk_id))\n\n try:\n return Message(res.data[\"message\"])\n except:\n raise ValueError(\"returned response not valid\")", "def createMessage( self, *args, **kw ):\n if not kw.has_key('charset'):\n kw['charset'] = self.getInputCharset()\n return MailServerBase.createMessage( self, *args, **kw )", "def message(self, body=None, style=None):\n return Message(body=body, style=style, server=self)", "def createMessage( self, *args, **kw ):\n if not kw.has_key('charset'):\n kw['charset'] = self.getOutputCharset()\n kw['to_mail'] = 1\n return MailServerBase.createMessage( self, *args, **kw )", "def get_message(self, id):\n url = \"https://api.imgur.com/3/message/{0}\".format(id)\n resp = self._send_request(url)\n return Message(resp, self)", "def message(self, message_id):\r\n return Message(self, message_id)", "def GetMimeMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id,\n format='raw').execute()\n\n #print('Message snippet: %s' % message['snippet'])\n \n\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n\n \n\n mime_msg = email.message_from_string(msg_str)\n\n return mime_msg\n \n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def Message(self, *args, **kwargs):\n return Message(self, *args, **kwargs)", "def get_message(self):\n data = self.socket.recv(1024)\n if not data:\n logging.error('Failed to read data from socket')\n return\n\n return self.decode_message(data)", "def createMessage(self, sender: str, to: str, subject: str, message_text: str):\n message = MIMEText(message_text)\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n raw_message = {'raw': base64.urlsafe_b64encode(message.as_bytes())}\n raw_message['raw']=raw_message['raw'].decode('utf-8')\n return raw_message", "def _get_plain_message (self) :\n return self._message", "def get_message(cls):\n rp = cls.get()\n try:\n message = rp.queue_send.get_nowait()\n except Exception:\n return None\n\n return message" ]
[ "0.695734", "0.6895103", "0.6767101", "0.6699613", "0.6677977", "0.65786904", "0.6405224", "0.63856715", "0.637261", "0.6364298", "0.6361784", "0.63502926", "0.62194216", "0.62027186", "0.6197121", "0.6094284", "0.6087418", "0.60842526", "0.60625416", "0.6039069", "0.6022497", "0.6018018", "0.59144324", "0.5907478", "0.59019715", "0.5888402", "0.5855994", "0.58472437", "0.58175665", "0.58060634" ]
0.7487364
0
Train bayes with all messages from a maildir.
def maildir_train(h, path, is_spam, force, removetrained): if loud: print(" Reading %s as Maildir" % (path,)) import time import socket pid = os.getpid() host = socket.gethostname() counter = 0 trained = 0 for fn in os.listdir(path): cfn = os.path.join(path, fn) tfn = os.path.normpath(os.path.join(path, "..", "tmp", "%d.%d_%d.%s" % (time.time(), pid, counter, host))) if (os.path.isdir(cfn)): continue counter += 1 if loud and counter % 10 == 0: sys.stdout.write("\r%6d" % counter) sys.stdout.flush() f = file(cfn, "rb") msg = get_message(f) f.close() if not msg: print("Malformed message: %s. Skipping..." % cfn) continue if not msg_train(h, msg, is_spam, force): continue trained += 1 if not options["Headers", "include_trained"]: continue f = file(tfn, "wb") f.write(mboxutils.as_string(msg)) f.close() shutil.copystat(cfn, tfn) os.rename(tfn, cfn) if (removetrained): os.unlink(cfn) if loud: sys.stdout.write("\r%6d" % counter) sys.stdout.write("\r Trained %d out of %d messages\n" % (trained, counter))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mhdir_train(h, path, is_spam, force):\n if loud:\n print(\" Reading as MH mailbox\")\n import glob\n counter = 0\n trained = 0\n for fn in glob.glob(os.path.join(path, \"[0-9]*\")):\n counter += 1\n cfn = fn\n tfn = os.path.join(path, \"spambayes.tmp\")\n if loud and counter % 10 == 0:\n sys.stdout.write(\"\\r%6d\" % counter)\n sys.stdout.flush()\n f = file(fn, \"rb\")\n msg = get_message(f)\n f.close()\n if not msg:\n print(\"Malformed message: %s. Skipping...\" % cfn)\n continue\n msg_train(h, msg, is_spam, force)\n trained += 1\n if not options[\"Headers\", \"include_trained\"]:\n continue\n f = file(tfn, \"wb\")\n f.write(mboxutils.as_string(msg))\n f.close()\n shutil.copystat(cfn, tfn)\n os.rename(tfn, cfn)\n if loud:\n sys.stdout.write(\"\\r%6d\" % counter)\n sys.stdout.write(\"\\r Trained %d out of %d messages\\n\" %\n (trained, counter))", "def train_naive_Bayes_classificator(self):\n positive_tweet_tokens = twitter_samples.tokenized(\n 'positive_tweets.json')\n negative_tweet_tokens = twitter_samples.tokenized(\n 'negative_tweets.json')\n\n positive_cleaned_tokens_list = []\n negative_cleaned_tokens_list = []\n\n for tokens in positive_tweet_tokens:\n positive_cleaned_tokens_list.append(\n self.clean_tokens_and_lemmetize(tokens))\n for tokens in negative_tweet_tokens:\n negative_cleaned_tokens_list.append(\n self.clean_tokens_and_lemmetize(tokens))\n\n negative_dataset = [(token, \"negative\") for token in self.format_tweets_for_model(\n negative_cleaned_tokens_list)]\n positive_dataset = [(token, \"positive\") for token in self.format_tweets_for_model(\n positive_cleaned_tokens_list)]\n\n dataset = positive_dataset + negative_dataset\n\n shuffle(dataset)\n\n self.train_data = dataset[:8000]\n self.test_data = dataset[8000:]\n\n self.classifier = NaiveBayesClassifier.train(self.train_data)\n self.bayes_accuracy = classify.accuracy(\n self.classifier, self.test_data)\n with open(TWEET_BAYES_FILENAME, 'wb') as f:\n pickle.dump(\n (self.classifier, self.bayes_accuracy),\n f,\n protocol=pickle.HIGHEST_PROTOCOL)", "def traintenbilacshear(self, simparams, trainparamslist):\n\t\t\n\t\t# We load the training catalog\n\t\t#simcat = megalut.tools.io.readpickle(os.path.join(self.worksimdir, simparams.name, \"groupmeascat_cases.pkl\"))\n\t\tsimcat = megalut.tools.io.readpickle(os.path.join(self.worksimdir, simparams.name, \"groupmeascat_cases_pw.pkl\"))\n\t\t\n\t\tname = \"with_\" + simparams.name\n\t\ttraindir = os.path.join(self.workmldir, name)\n\t\t\n\t\tmegalut.learn.run.train(simcat, traindir, trainparamslist, ncpu=self.ncpu)", "def train():\n pass", "def train_model(database):\n train_set = creating_set(database)\n return NaiveBayesClassifier.train(train_set)", "async def train(self):", "def train():\n num_spam=0 \n num_ham=0\n spam_words=()\n ham_words=()\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n #print(dataArray)\n dataArrayTrain=dataArray[0:21300] #opens training set from folder 000-070\n \n for eachLine in dataArrayTrain:\n kind,file = eachLine.split(' ')\n file=file.strip('../') \n #print(kind)\n #print(file)\n \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n print(filepath)\n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n #print(email)\n email_words = processText(contentEmail(email))\n #print(email_words)\n email_words = tuple(list(set(email_words))) #converted it into a set to avoid repetition of words in every email\n #print(email_words)\n if (kind == \"spam\"):\n num_spam+=1 #counts how many spam emails\n spam_words= spam_words + tuple(email_words) #adds every word to a spam tuple\n\n elif (kind==\"ham\"):\n num_ham+=1 #counts how many ham emails\n ham_words= ham_words + tuple(email_words) #adds every word to a ham tuple\n\n spam_words= tuple(spam_words)\n ham_words= tuple(ham_words)\n\n \n count_spam = collections.Counter(spam_words) #counts how many times a words appears in all spam emails\n count_ham = collections.Counter(ham_words) #counts how many times a words appears in all ham emails\n total_count = (count_spam + count_ham).most_common(5000) #adds the total occurences of the words and gets top 5000\n #print(total_count)\n #print(num_ham, num_spam)\n\n top_words = []\n for everyword in total_count:\n top_words.append(everyword[0])\n for everyword in list(count_spam):\n if everyword not in top_words:\n del count_spam[everyword] #deletes words in spam emails not included in top 5000\n for everyword in list(count_ham):\n if everyword not in top_words:\n del count_ham[everyword] #deletes words in ham emails not included in top 5000\n #print(words, count_ham, count_spam)\n\n file_encoder = open(\"top_word_count.txt\", \"w+\", encoding = 'utf-8', errors = 'ignore')\n file_encoder.write(\"HERE ARE YOUR TOP 5000 WORDS: \"+\"\\n\"+str(total_count)+\"\\n\"+\"\\n\"+\"SPAM WORDS: \"+\"\\n\"+str(count_spam)+\"\\n\"+\"\\n\"+\"HAM WORDS: \"+\"\\n\"+str(count_ham))\n file_encoder.close()\n print(\"Counting and getting top 5000 words successful!\")\n probabilityGet(num_spam, num_ham, count_spam, count_ham)", "def train(self, train_data):\n with open(train_data, 'r') as train_data:\n while True:\n tokens = train_data.readline().split()\n pos = train_data.readline().split()\n labels = train_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Generate transition probabilities\n for i in range(0, len(labels) - self.N_VALUE + 1):\n self.add_label_sequence(labels[i:i + self.N_VALUE])\n # Generate lexical generation probabilities\n for i in range(0, len(tokens)):\n token = tokens[i].lower()\n label = labels[i]\n self.add_word_tag(token, label)\n self.handle_unknowns()", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def train(self):\n\n if(self.net.killAll):\n self._kill()\n\n empty = False\n state = []\n actions = []\n rewards = []\n while(not empty):\n example = self.globalQueue.get()\n \n for prevState, action, reward in zip(example['prevStates'], example['actions'],example['rewards']):\n state.append(np.array(prevState).reshape(-1,84,84,4))\n actions.append(np.eye(self.actionSpace)[np.array(action)].reshape(-1,self.actionSpace).astype(np.float32))\n rewards.append(np.array(reward).reshape(-1))\n empty = self.globalQueue.empty()\n \n if(len(rewards) != 0 ):\n states = np.array(state).reshape(-1, 84,84,4)\n actions = np.array(actions).reshape(-1,self.actionSpace)\n rewards = np.array(rewards).reshape(-1)\n self.net.train(states, rewards, actions)", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def train(self, training_data, model_name):\n dataset = []\n for example in training_data:\n entity_offsets = self._convert_example(example)\n dataset.append(self._from_json_to_crf(example, entity_offsets))\n\n features = [self._sentence_to_features(s) for s in dataset]\n labels = [self._sentence_to_labels(s) for s in dataset]\n trainer = sklearn_crfsuite.CRF(\n algorithm=\"lbfgs\",\n # coefficient for L1 penalty\n c1=0.1,\n # coefficient for L2 penalty\n c2=0.1,\n # stop earlier\n max_iterations=50,\n # include transitions that are possible, but not observed\n all_possible_transitions=True,\n )\n trainer.fit(features, labels)\n logger.info(\"Creating Model for Intent %s\",model_name)\n joblib.dump(trainer, 'core/agent/model_files/%s.model' % model_name)\n return True", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def train(self, trainfile):", "def train_with_corpus(corpus):\n\n chatbot.set_trainer(\"chatterbot.trainers.ChatterBotCorpusTrainer\")\n chatbot.train(corpus)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self, trainfile):\r\n\r\n # We load the data and lower the text\r\n data_train = pd.read_csv(trainfile, sep = \"\\t\", names = [\"polarity\", \"category\", \"word\", \"offsets\", \"sentence\"])\r\n data_train['sentence_l'] = data_train['sentence'].apply(str.lower)\r\n data_train['word'] = data_train['word'].apply(str.lower)\r\n \r\n # We try to keep all the no/nor/not words as this changes radically the sentiment analysis\r\n data_train['sentence_l'] = data_train[\"sentence_l\"].apply(lambda sentence: sentence.replace(\"can\\'t\", \"can not\"))\r\n data_train['sentence_l'] = data_train[\"sentence_l\"].apply(lambda sentence: sentence.replace(\"n\\'t\", \" not\"))\r\n self.stopwords = stopwords.words(\"english\")\r\n self.stopwords.remove('nor')\r\n self.stopwords.remove('no')\r\n self.stopwords.remove('not')\r\n \r\n # We clean the train data and stem the words\r\n self.stemmer = nltk.porter.PorterStemmer()\r\n clean_sentences = []\r\n for row in data_train['sentence_l']:\r\n tokens = word_tokenize(row)\r\n tokens = [word for word in tokens if word.isalpha()]\r\n tokens = [w for w in tokens if not w in self.stopwords] \r\n tokens = [self.stemmer.stem(word) for word in tokens]\r\n clean_sentences.append(tokens)\r\n data_train['stems'] = clean_sentences\r\n \r\n # We also stem the target words to be coherent with the stemmed words in the sentences\r\n data_train['word'] = [self.stemmer.stem(word) for word in data_train['word']]\r\n \r\n # We recreate the sentences with the selected and cleaned words\r\n Classifier.create_sentence = staticmethod(Classifier.create_sentence)\r\n data_train.clean_sentence = Classifier.create_sentence(data_train.stems)\r\n \r\n # We create a BOW vector\r\n self.restaurant_vect = CountVectorizer(min_df=1, tokenizer=nltk.word_tokenize)\r\n reviews_counts = self.restaurant_vect.fit_transform(data_train.clean_sentence)\r\n \r\n # We transform the BOW vector with the tfidf scores\r\n self.tfidf_transformer = TfidfTransformer()\r\n reviews_tfidf = self.tfidf_transformer.fit_transform(reviews_counts)\r\n \r\n polarities = []\r\n for row in data_train['polarity']:\r\n if row == 'positive':\r\n polarities.append(1)\r\n if row == 'neutral':\r\n polarities.append(0)\r\n if row == 'negative':\r\n polarities.append(-1)\r\n data_train['polarity_floats'] = polarities\r\n \r\n # Split data into training and test sets\r\n test_size = 10\r\n X_train, X_test, y_train, y_test = train_test_split(reviews_tfidf, data_train.polarity_floats,\r\n test_size = test_size/100, random_state = None)\r\n \r\n ############# CNN MODEL ##############\r\n \r\n from keras.layers import Input, Dense, Embedding, Conv2D, MaxPool2D\r\n from keras.layers import Reshape, Flatten, Dropout, Concatenate\r\n from keras.callbacks import ModelCheckpoint\r\n from keras.optimizers import Adam\r\n from keras.models import Model\r\n \r\n sequence_length = X_train.shape[1] # 7\r\n vocabulary_size = X_train.shape[0] # 1503\r\n embedding_dim = 256\r\n filter_sizes = [3,4,5]\r\n num_filters = 512\r\n drop = 0.5\r\n \r\n epochs = 10\r\n batch_size = 50\r\n \r\n # this returns a tensor\r\n print(\"Creating Model...\")\r\n inputs = Input(shape=(sequence_length,), dtype='int32')\r\n embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=sequence_length)(inputs)\r\n reshape = Reshape((sequence_length,embedding_dim,1))(embedding)\r\n \r\n conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n \r\n maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1), strides=(1,1), padding='valid')(conv_0)\r\n maxpool_1 = MaxPool2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1), strides=(1,1), padding='valid')(conv_1)\r\n maxpool_2 = MaxPool2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1), strides=(1,1), padding='valid')(conv_2)\r\n \r\n concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])\r\n flatten = Flatten()(concatenated_tensor)\r\n dropout = Dropout(drop)(flatten)\r\n output = Dense(units=1, activation='softmax')(dropout)\r\n \r\n # this creates a model that includes\r\n model = Model(inputs=inputs, outputs=output)\r\n \r\n checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='auto')\r\n adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\r\n \r\n model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])\r\n print(\"Training Model...\")\r\n model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[checkpoint], validation_data=(X_test, y_test)) # starts training\r", "def train_with(corpus):\n\n from chatterbot.trainers import ChatterBotCorpusTrainer, UbuntuCorpusTrainer\n import time\n\n if corpus == 'ubu':\n start = time.time()\n corpus_trainer = UbuntuCorpusTrainer(shanisirbot)\n corpus_trainer.train()\n else:\n start = time.time()\n corpus_trainer = ChatterBotCorpusTrainer(shanisirbot)\n if corpus == 'eng':\n corpus_trainer.train(\"chatterbot.corpus.english\")\n elif corpus == 'woz':\n corpus_trainer.train(\"./MULTIWOZ2.1\")\n else:\n print(\"Invalid corpus.\")\n return\n end = time.time()\n time_taken = end - start\n print(f\"\\n\\nThe Shani Sir chatbot has been trained using the corpus {corpus}. Time taken: {time_taken}s\")", "def set_train(self):\n for m in self.models.values():\n m.train()", "def train():\n import trace\n trace.train()", "def _Train(self, limit):\n if len(self.Memory)>BATCH_SIZE: \n # Limit of Agents to Train\n for i in range(limit): \n # 'n' number of rounds to train \n for _ in range(50):\n # Get Batch Data\n experiances = self.Memory.sample()\n # Train Models\n self._Learn(self.Actor[i], self.ActorTarget, self.actorOpt[i], experiances)", "def train_from_dir(self, path, cat):\n dirfiles = glob.glob(os.path.join(path, '*'))\n total = len(dirfiles)\n count = 0\n for infile in dirfiles:\n f = open(infile, \"r\")\n text = f.read()\n self.train(text, cat)", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def train(self, batch):\n pass", "def test():\n listpost,listclass = bayes.loaddataset()\n myvocablist = bayes.createlist(listpost)\n tmatrix = list()\n for doc in listpost:\n\t vec = bayes.word2vec(myvocablist,doc)\n\t tmatrix.append(vec)\n p0,p1,pa = bayes.train(tmatrix,listclass)\n testdoc1 = ['love','my','dalmation']\n testvec1 = bayes.word2vec(myvocablist,testdoc1)\n print testdoc1,'classify as :',bayes.classify(testvec1,p0,p1,pa)\n testdoc2 = ['stupid','love']\n testvec2 = bayes.word2vec(myvocablist,testdoc2)\n print testdoc2,'classify as :',bayes.classify(testvec2,p0,p1,pa)" ]
[ "0.6355704", "0.59449077", "0.58821434", "0.57554334", "0.5644402", "0.56377137", "0.563683", "0.55812377", "0.5577881", "0.55669934", "0.55518764", "0.5542003", "0.5526991", "0.5500925", "0.54829085", "0.54665416", "0.5462842", "0.5462842", "0.5462842", "0.5462842", "0.5462842", "0.54460317", "0.5439", "0.54114515", "0.5408981", "0.5406774", "0.5393202", "0.5391224", "0.53900766", "0.5388423" ]
0.6894524
0
Train bayes with a Unix mbox
def mbox_train(h, path, is_spam, force): if loud: print(" Reading as Unix mbox") import mailbox import fcntl f = file(path, "r+b") fcntl.flock(f, fcntl.LOCK_EX) mbox = mailbox.PortableUnixMailbox(f, get_message) outf = os.tmpfile() counter = 0 trained = 0 for msg in mbox: if not msg: print("Malformed message number %d. I can't train on this mbox, sorry." % counter) return counter += 1 if loud and counter % 10 == 0: sys.stdout.write("\r%6d" % counter) sys.stdout.flush() if msg_train(h, msg, is_spam, force): trained += 1 if options["Headers", "include_trained"]: outf.write(mboxutils.as_string(msg, True)) if options["Headers", "include_trained"]: outf.seek(0) try: os.ftruncate(f.fileno(), 0) f.seek(0) except: print("Problem truncating mbox--nothing written") raise try: for line in outf: f.write(line) except: print(file=sys.stderr ("Problem writing mbox! Sorry, " "I tried my best, but your mail " "may be corrupted.")) raise fcntl.flock(f, fcntl.LOCK_UN) f.close() if loud: sys.stdout.write("\r%6d" % counter) sys.stdout.write("\r Trained %d out of %d messages\n" % (trained, counter))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mhdir_train(h, path, is_spam, force):\n if loud:\n print(\" Reading as MH mailbox\")\n import glob\n counter = 0\n trained = 0\n for fn in glob.glob(os.path.join(path, \"[0-9]*\")):\n counter += 1\n cfn = fn\n tfn = os.path.join(path, \"spambayes.tmp\")\n if loud and counter % 10 == 0:\n sys.stdout.write(\"\\r%6d\" % counter)\n sys.stdout.flush()\n f = file(fn, \"rb\")\n msg = get_message(f)\n f.close()\n if not msg:\n print(\"Malformed message: %s. Skipping...\" % cfn)\n continue\n msg_train(h, msg, is_spam, force)\n trained += 1\n if not options[\"Headers\", \"include_trained\"]:\n continue\n f = file(tfn, \"wb\")\n f.write(mboxutils.as_string(msg))\n f.close()\n shutil.copystat(cfn, tfn)\n os.rename(tfn, cfn)\n if loud:\n sys.stdout.write(\"\\r%6d\" % counter)\n sys.stdout.write(\"\\r Trained %d out of %d messages\\n\" %\n (trained, counter))", "def naive_bayes_train(sms_file):\n dic, list1, list2 = tokenize_and_split_bis(sms_file)\n nbr_words = len(list1) + len(list2)\n spam_ratio = len(list1) / nbr_words\n document = list1 + list2\n\n nbr_spam = 0\n for line in list1:\n for word in line:\n nbr_spam += 1\n \n nbr_ham = 0\n for line in list2:\n for word in line:\n nbr_ham += 1\n \n nbr_words = nbr_ham + nbr_spam\n sms_ratio_list = compute_frequencies(nbr_words, document)\n spam_ratio_list = compute_frequencies(nbr_words, list1)\n spamicity = [0. for i in range(nbr_words)]\n\n # print(nbr_words)\n\n for i in range(nbr_words):\n if sms_ratio_list[i] != 0:\n spamicity[i] = spam_ratio_list[i] / sms_ratio_list[i]\n\n return spam_ratio, dic, spamicity", "def bayes_model(feature_train, help_rank_train, model_name):\n model = MultinomialNB()\n model.fit(feature_train, help_rank_train)\n modelpkl = open(model_name,'wb')\n dump(model, modelpkl, -1)\n return", "def naive_bayes_train_bis(sms_file):\n dic, list1, list2 = tokenize_and_split_bis(sms_file)\n nbr_words = len(list1) + len(list2)\n spam_ratio = len(list1) / nbr_words\n document = list1 + list2\n\n nbr_spam = 0\n for line in list1:\n for word in line:\n nbr_spam += 1\n \n nbr_ham = 0\n for line in list2:\n for word in line:\n nbr_ham += 1\n \n nbr_words = nbr_ham + nbr_spam\n sms_ratio_list = compute_frequencies(nbr_words, document)\n spam_ratio_list = compute_frequencies(nbr_words, list1)\n spamicity = [0. for i in range(nbr_words)]\n # print(sms_ratio_list)\n # print(spam_ratio_list)\n spamicity_no = [0. for i in range(nbr_words)]\n spamicity_inv = [0. for i in range(nbr_words)]\n\n product_word_dic = 1\n for i in range(nbr_words):\n if sms_ratio_list[i] != 0:\n spamicity[i] = ((spam_ratio_list[i]) / sms_ratio_list[i])\n spamicity_no[i] = 1 - ((spam_ratio_list[i]) / sms_ratio_list[i])\n spamicity_inv[i] = ((1 - (spam_ratio_list[i])) / (1 - sms_ratio_list[i]))\n # print(spamicity_inv[i])\n # if spamicity_inv[i] != 0 :\n product_word_dic *= spamicity_inv[i]\n \n return spam_ratio, dic, spamicity, spamicity_no, spamicity_inv, product_word_dic", "def maildir_train(h, path, is_spam, force, removetrained):\n if loud:\n print(\" Reading %s as Maildir\" % (path,))\n import time\n import socket\n pid = os.getpid()\n host = socket.gethostname()\n counter = 0\n trained = 0\n for fn in os.listdir(path):\n cfn = os.path.join(path, fn)\n tfn = os.path.normpath(os.path.join(path, \"..\", \"tmp\",\n \"%d.%d_%d.%s\" % (time.time(), pid,\n counter, host)))\n if (os.path.isdir(cfn)):\n continue\n counter += 1\n if loud and counter % 10 == 0:\n sys.stdout.write(\"\\r%6d\" % counter)\n sys.stdout.flush()\n f = file(cfn, \"rb\")\n msg = get_message(f)\n f.close()\n if not msg:\n print(\"Malformed message: %s. Skipping...\" % cfn)\n continue\n if not msg_train(h, msg, is_spam, force):\n continue\n trained += 1\n if not options[\"Headers\", \"include_trained\"]:\n continue\n f = file(tfn, \"wb\")\n f.write(mboxutils.as_string(msg))\n f.close()\n shutil.copystat(cfn, tfn)\n os.rename(tfn, cfn)\n if (removetrained):\n os.unlink(cfn)\n if loud:\n sys.stdout.write(\"\\r%6d\" % counter)\n sys.stdout.write(\"\\r Trained %d out of %d messages\\n\" %\n (trained, counter))", "def msg_train(h, msg, is_spam, force):\n try:\n mboxutils.as_string(msg)\n except TypeError:\n return False\n if is_spam:\n spamtxt = options[\"Headers\", \"header_spam_string\"]\n else:\n spamtxt = options[\"Headers\", \"header_ham_string\"]\n oldtxt = msg.get(options[\"Headers\", \"trained_header_name\"])\n if force:\n if oldtxt != None:\n del msg[options[\"Headers\", \"trained_header_name\"]]\n elif oldtxt == spamtxt:\n return False\n elif oldtxt != None:\n del msg[options[\"Headers\", \"trained_header_name\"]]\n h.untrain(msg, not is_spam)\n h.train(msg, is_spam)\n msg.add_header(options[\"Headers\", \"trained_header_name\"], spamtxt)\n return True", "def init_boldpostprocess_wf(\n lower_bpf,\n upper_bpf,\n contigvol,\n bpf_order,\n motion_filter_order,\n motion_filter_type,\n band_stop_min,\n band_stop_max,\n smoothing,\n bold_file,\n head_radius,\n params,\n custom_conf,\n omp_nthreads,\n dummytime,\n output_dir,\n fd_thresh,\n num_bold,\n mni_to_t1w,\n despike,\n brain_template='MNI152NLin2009cAsym',\n layout=None,\n name='bold_postprocess_wf',\n ):\n\n\n TR = layout.get_tr(bold_file)\n file_base = os.path.basename(str(bold_file))\n workflow = Workflow(name=name)\n\n workflow.__desc__ = \"\"\"\nFor each of the {num_bold} BOLD series found per subject (across all\ntasks and sessions), the following post-processing was performed:\n\"\"\".format(num_bold=num2words(num_bold))\n\n if dummytime > 0:\n nvolx = str(np.floor(dummytime / TR))\n workflow.__desc__ = workflow.__desc__ + \"\"\" \\\nbefore nuisance regression and filtering of the data, the first {nvol} were discarded,\n.Furthermore,volumes with framewise-displacement greater than \n{fd_thresh} mm [@power_fd_dvars;@satterthwaite_2013] were flagged as outliers\n and excluded from nuisance regression.\n\"\"\".format(nvol=num2words(nvolx),fd_thresh=fd_thresh)\n\n else:\n workflow.__desc__ = workflow.__desc__ + \"\"\" \\\nbefore nuisance regression and filtering of the data, volumes with framewise-displacement greater than \n{fd_thresh} mm [@power_fd_dvars;@satterthwaite_2013] were flagged as outliers\n and excluded from nuisance regression.\n\"\"\".format(fd_thresh=fd_thresh)\n\n workflow.__desc__ = workflow.__desc__ + \"\"\" \\\n{regressors} [@benchmarkp;@satterthwaite_2013]. These nuisance regressors were \nregressed from the BOLD data using linear regression - as implemented in Scikit-Learn {sclver} [@scikit-learn].\nResidual timeseries from this regression were then band-pass filtered to retain signals within the {highpass}-{lowpass} Hz frequency band. \n \"\"\".format(regressors=stringforparams(params=params),sclver=sklearn.__version__,\n lowpass=upper_bpf,highpass=lower_bpf)\n\n\n # get reference and mask\n mask_file,ref_file = _get_ref_mask(fname=bold_file)\n\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['bold_file','ref_file','bold_mask','cutstom_conf','mni_to_t1w','t1w','t1seg']),\n name='inputnode')\n\n inputnode.inputs.bold_file = str(bold_file)\n inputnode.inputs.ref_file = str(ref_file)\n inputnode.inputs.bold_mask = str(mask_file)\n inputnode.inputs.custom_conf = str(custom_conf)\n\n\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['processed_bold', 'smoothed_bold','alff_out','smoothed_alff',\n 'reho_out','sc217_ts', 'sc217_fc','sc417_ts','sc417_fc','ts50_ts','ts50_fc',\n 'gs360_ts', 'gs360_fc','gd333_ts', 'gd333_fc','qc_file','fd']),\n name='outputnode')\n\n mem_gbx = _create_mem_gb(bold_file)\n\n\n fcon_ts_wf = init_fcon_ts_wf(mem_gb=mem_gbx['timeseries'],mni_to_t1w=mni_to_t1w,\n t1w_to_native=_t12native(bold_file),bold_file=bold_file,\n brain_template=brain_template,name=\"fcons_ts_wf\")\n\n alff_compute_wf = init_compute_alff_wf(mem_gb=mem_gbx['timeseries'], TR=TR,\n lowpass=upper_bpf,highpass=lower_bpf,smoothing=smoothing, cifti=False,\n name=\"compute_alff_wf\" )\n\n reho_compute_wf = init_3d_reho_wf(mem_gb=mem_gbx['timeseries'],smoothing=smoothing,\n name=\"afni_reho_wf\")\n\n write_derivative_wf = init_writederivatives_wf(smoothing=smoothing,bold_file=bold_file,\n params=params,cifti=None,output_dir=output_dir,dummytime=dummytime,\n lowpass=upper_bpf,highpass=lower_bpf,TR=TR,omp_nthreads=omp_nthreads,\n name=\"write_derivative_wf\")\n\n confoundmat_wf = pe.Node(ConfoundMatrix(head_radius=head_radius, params=params,\n filtertype=motion_filter_type,cutoff=band_stop_max,\n low_freq=band_stop_max,high_freq=band_stop_min,TR=TR,\n filterorder=motion_filter_order),\n name=\"ConfoundMatrix_wf\", mem_gb=mem_gbx['resampled'])\n\n censorscrub_wf = init_censoring_wf(mem_gb=mem_gbx['timeseries'],TR=TR,custom_conf=custom_conf,head_radius=head_radius,\n contigvol=contigvol,dummytime=dummytime,fd_thresh=fd_thresh,name='censoring')\n \n resdsmoothing_wf = init_resd_smoohthing(mem_gb=mem_gbx['timeseries'],smoothing=smoothing,cifti=False,\n name=\"resd_smoothing_wf\")\n \n filtering_wf = pe.Node(FilteringData(tr=TR,lowpass=upper_bpf,highpass=lower_bpf,\n filter_order=bpf_order),\n name=\"filtering_wf\", mem_gb=mem_gbx['timeseries'])\n\n regression_wf = pe.Node(regress(tr=TR),\n name=\"regression_wf\",mem_gb = mem_gbx['timeseries'])\n\n interpolate_wf = pe.Node(interpolate(TR=TR),\n name=\"interpolation_wf\",mem_gb = mem_gbx['timeseries'])\n\n \n executivesummary_wf =init_execsummary_wf(tr=TR,bold_file=bold_file,layout=layout,\n output_dir=output_dir,mni_to_t1w=mni_to_t1w,omp_nthreads=2)\n \n\n # get transform file for resampling and fcon\n \n \n \n transformfile = get_transformfile(bold_file=bold_file,\n mni_to_t1w=mni_to_t1w,t1w_to_native=_t12native(bold_file))\n t1w_mask = get_maskfiles(bold_file=bold_file,mni_to_t1w=mni_to_t1w)[1]\n\n bold2MNI_trans,bold2T1w_trans = get_transformfilex(bold_file=bold_file,\n mni_to_t1w=mni_to_t1w,t1w_to_native=_t12native(bold_file)) \n\n \n resample_parc = pe.Node(ApplyTransforms(\n dimension=3,\n input_image=str(get_template(\n 'MNI152NLin2009cAsym', resolution=1, desc='carpet',\n suffix='dseg', extension=['.nii', '.nii.gz'])),\n interpolation='MultiLabel',transforms=transformfile),\n name='resample_parc')\n \n resample_bold2T1w = pe.Node(ApplyTransforms(\n dimension=3,\n input_image=mask_file,reference_image=t1w_mask,\n interpolation='NearestNeighbor',transforms=bold2T1w_trans),\n name='bold2t1_trans')\n \n resample_bold2MNI = pe.Node(ApplyTransforms(\n dimension=3,\n input_image=mask_file,reference_image=str(get_template(\n 'MNI152NLin2009cAsym', resolution=2, desc='brain',\n suffix='mask', extension=['.nii', '.nii.gz'])),\n interpolation='NearestNeighbor',transforms=bold2MNI_trans),\n name='bold2mni_trans')\n\n qcreport = pe.Node(computeqcplot(TR=TR,bold_file=bold_file,dummytime=dummytime,t1w_mask=t1w_mask,\n template_mask = str(get_template('MNI152NLin2009cAsym', resolution=2, desc='brain',\n suffix='mask', extension=['.nii', '.nii.gz'])),\n head_radius=head_radius), name=\"qc_report\",mem_gb = mem_gbx['resampled'])\n \n\n workflow.connect([\n # connect bold confound matrix to extract confound matrix \n (inputnode, confoundmat_wf, [('bold_file', 'in_file'),]),\n ])\n \n # if there is despiking\n if despike:\n despike_wf = pe.Node(Despike(outputtype='NIFTI_GZ',args='-NEW'),name=\"despike_wf\",mem_gb=mem_gbx['resampled'])\n\n workflow.connect([\n (inputnode,despike_wf,[('bold_file','in_file')]),\n (despike_wf,censorscrub_wf,[('out_file','inputnode.bold')])\n ])\n else:\n workflow.connect([\n (inputnode,censorscrub_wf,[('bold_file','inputnode.bold')]),\n ])\n \n # add neccessary input for censoring if there is one\n workflow.connect([\n\t (inputnode,censorscrub_wf,[('bold_file','inputnode.bold_file'),\n\t ('bold_mask','inputnode.bold_mask')]),\n\t (confoundmat_wf,censorscrub_wf,[('confound_file','inputnode.confound_file')])\n ])\n\n # regression workflow \n workflow.connect([\n\t (inputnode,regression_wf,[('bold_mask','mask')]),\n\t (censorscrub_wf,regression_wf,[('outputnode.bold_censored','in_file'),\n\t ('outputnode.fmriprepconf_censored','confounds'), \n\t\t ('outputnode.customconf_censored','custom_conf')])\n ])\n # interpolation workflow\n workflow.connect([\n\t (inputnode,interpolate_wf,[('bold_file','bold_file'),('bold_mask','mask_file')]),\n\t (censorscrub_wf,interpolate_wf,[('outputnode.tmask','tmask')]),\n\t (regression_wf,interpolate_wf,[('res_file','in_file')]), \n\t])\n # add filtering workflow \n workflow.connect([\n (inputnode,filtering_wf,[('bold_mask','mask')]),\n\t (interpolate_wf,filtering_wf,[('bold_interpolated','in_file')]),\n\n ])\n \n # residual smoothing \n workflow.connect([\n\t (filtering_wf,resdsmoothing_wf,[('filt_file','inputnode.bold_file')]) \n ])\n\n #functional connect workflow\n workflow.connect([\n (inputnode,fcon_ts_wf,[('ref_file','inputnode.ref_file'),]),\n (filtering_wf,fcon_ts_wf,[('filt_file','inputnode.clean_bold'),]),\n ])\n # reho and alff\n workflow.connect([ \n\t (inputnode,alff_compute_wf,[('bold_mask','inputnode.bold_mask')]),\n\t (inputnode,reho_compute_wf,[('bold_mask','inputnode.bold_mask')]),\n\t (filtering_wf, alff_compute_wf,[('filt_file','inputnode.clean_bold')]),\n\t (filtering_wf, reho_compute_wf,[('filt_file','inputnode.clean_bold')]),\n ])\n\n # qc report\n workflow.connect([\n (inputnode,qcreport,[('bold_mask','mask_file')]),\n (filtering_wf,qcreport,[('filt_file','cleaned_file')]),\n (censorscrub_wf,qcreport,[('outputnode.tmask','tmask')]),\n (inputnode,resample_parc,[('ref_file','reference_image')]),\n (resample_parc,qcreport,[('output_image','seg_file')]),\n (resample_bold2T1w,qcreport,[('output_image','bold2T1w_mask')]),\n (resample_bold2MNI,qcreport,[('output_image','bold2temp_mask')]),\n (qcreport,outputnode,[('qc_file','qc_file')]),\n ])\n\n \n\n # write to the outputnode, may be use in future\n workflow.connect([\n\t(filtering_wf,outputnode,[('filt_file','processed_bold')]),\n\t(censorscrub_wf,outputnode,[('outputnode.fd','fd')]),\n\t(resdsmoothing_wf,outputnode,[('outputnode.smoothed_bold','smoothed_bold')]),\n\t(alff_compute_wf,outputnode,[('outputnode.alff_out','alff_out'),\n ('outputnode.smoothed_alff','smoothed_alff')]),\n (reho_compute_wf,outputnode,[('outputnode.reho_out','reho_out')]),\n\t (fcon_ts_wf,outputnode,[('outputnode.sc217_ts','sc217_ts' ),('outputnode.sc217_fc','sc217_fc'),\n ('outputnode.sc417_ts','sc417_ts'),('outputnode.sc417_fc','sc417_fc'),\n ('outputnode.gs360_ts','gs360_ts'),('outputnode.gs360_fc','gs360_fc'),\n ('outputnode.gd333_ts','gd333_ts'),('outputnode.gd333_fc','gd333_fc'),\n ('outputnode.ts50_ts','ts50_ts'),('outputnode.ts50_fc','ts50_fc')]),\n\n ])\n \n # write derivatives \n workflow.connect([\n (filtering_wf,write_derivative_wf,[('filt_file','inputnode.processed_bold')]),\n\t (resdsmoothing_wf,write_derivative_wf,[('outputnode.smoothed_bold','inputnode.smoothed_bold')]),\n (censorscrub_wf,write_derivative_wf,[('outputnode.fd','inputnode.fd')]),\n (alff_compute_wf,write_derivative_wf,[('outputnode.alff_out','inputnode.alff_out'),\n ('outputnode.smoothed_alff','inputnode.smoothed_alff')]),\n (reho_compute_wf,write_derivative_wf,[('outputnode.reho_out','inputnode.reho_out')]),\n (fcon_ts_wf,write_derivative_wf,[('outputnode.sc217_ts','inputnode.sc217_ts' ),\n ('outputnode.sc217_fc','inputnode.sc217_fc'),\n ('outputnode.sc417_ts','inputnode.sc417_ts'),\n ('outputnode.sc417_fc','inputnode.sc417_fc'),\n ('outputnode.gs360_ts','inputnode.gs360_ts'),\n ('outputnode.gs360_fc','inputnode.gs360_fc'),\n ('outputnode.gd333_ts','inputnode.gd333_ts'),\n ('outputnode.gd333_fc','inputnode.gd333_fc'),\n ('outputnode.ts50_ts','inputnode.ts50_ts'),\n ('outputnode.ts50_fc','inputnode.ts50_fc')]),\n (qcreport,write_derivative_wf,[('qc_file','inputnode.qc_file')]),\n\n\n\n ])\n functional_qc = pe.Node(FunctionalSummary(bold_file=bold_file,tr=TR),\n name='qcsummary', run_without_submitting=True)\n\n ds_report_qualitycontrol = pe.Node(\n DerivativesDataSink(base_directory=output_dir, desc='qualitycontrol',source_file=bold_file, datatype=\"figures\"),\n name='ds_report_qualitycontrol', run_without_submitting=True)\n\n ds_report_preprocessing = pe.Node(\n DerivativesDataSink(base_directory=output_dir, desc='preprocessing',source_file=bold_file, datatype=\"figures\"),\n name='ds_report_preprocessing', run_without_submitting=True)\n ds_report_postprocessing = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='postprocessing', datatype=\"figures\"),\n name='ds_report_postprocessing', run_without_submitting=True)\n\n ds_report_connectivity = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='connectvityplot', datatype=\"figures\"),\n name='ds_report_connectivity', run_without_submitting=True)\n\n ds_report_rehoplot = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='rehoplot', datatype=\"figures\"),\n name='ds_report_rehoplot', run_without_submitting=True)\n\n ds_report_afniplot = pe.Node(\n DerivativesDataSink(base_directory=output_dir,source_file=bold_file, desc='afniplot', datatype=\"figures\"),\n name='ds_report_afniplot', run_without_submitting=True)\n\n workflow.connect([\n (qcreport,ds_report_preprocessing,[('raw_qcplot','in_file')]),\n (qcreport,ds_report_postprocessing ,[('clean_qcplot','in_file')]),\n (qcreport,functional_qc,[('qc_file','qc_file')]),\n (functional_qc,ds_report_qualitycontrol,[('out_report','in_file')]),\n (fcon_ts_wf,ds_report_connectivity,[('outputnode.connectplot','in_file')]),\n (reho_compute_wf,ds_report_rehoplot,[('outputnode.rehohtml','in_file')]),\n (alff_compute_wf,ds_report_afniplot ,[('outputnode.alffhtml','in_file')]),\n ])\n\n\n ## exexetive summary workflow\n workflow.connect([\n (inputnode,executivesummary_wf,[('t1w','inputnode.t1w'),('t1seg','inputnode.t1seg'),\n ('bold_file','inputnode.bold_file'),('bold_mask','inputnode.mask')]),\n\n (regression_wf,executivesummary_wf,[('res_file','inputnode.regdata'),]),\n (filtering_wf,executivesummary_wf,[('filt_file','inputnode.resddata')]),\n (censorscrub_wf,executivesummary_wf,[('outputnode.fd','inputnode.fd')]),\n ]),\n\n return workflow", "def test():\n listpost,listclass = bayes.loaddataset()\n myvocablist = bayes.createlist(listpost)\n tmatrix = list()\n for doc in listpost:\n\t vec = bayes.word2vec(myvocablist,doc)\n\t tmatrix.append(vec)\n p0,p1,pa = bayes.train(tmatrix,listclass)\n testdoc1 = ['love','my','dalmation']\n testvec1 = bayes.word2vec(myvocablist,testdoc1)\n print testdoc1,'classify as :',bayes.classify(testvec1,p0,p1,pa)\n testdoc2 = ['stupid','love']\n testvec2 = bayes.word2vec(myvocablist,testdoc2)\n print testdoc2,'classify as :',bayes.classify(testvec2,p0,p1,pa)", "def train():\n num_spam=0 \n num_ham=0\n spam_words=()\n ham_words=()\n pullData = open(\"labels\", \"r\").read()\n dataArray= pullData.split('\\n')\n #print(dataArray)\n dataArrayTrain=dataArray[0:21300] #opens training set from folder 000-070\n \n for eachLine in dataArrayTrain:\n kind,file = eachLine.split(' ')\n file=file.strip('../') \n #print(kind)\n #print(file)\n \n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filepath = os.path.join(fileDir,file) \n print(filepath)\n email=\"\"\n fh = open(filepath, encoding=\"ascii\", errors=\"ignore\")\n for line in fh:\n email += line\n fh.close()\n email= email.lower()\n #print(email)\n email_words = processText(contentEmail(email))\n #print(email_words)\n email_words = tuple(list(set(email_words))) #converted it into a set to avoid repetition of words in every email\n #print(email_words)\n if (kind == \"spam\"):\n num_spam+=1 #counts how many spam emails\n spam_words= spam_words + tuple(email_words) #adds every word to a spam tuple\n\n elif (kind==\"ham\"):\n num_ham+=1 #counts how many ham emails\n ham_words= ham_words + tuple(email_words) #adds every word to a ham tuple\n\n spam_words= tuple(spam_words)\n ham_words= tuple(ham_words)\n\n \n count_spam = collections.Counter(spam_words) #counts how many times a words appears in all spam emails\n count_ham = collections.Counter(ham_words) #counts how many times a words appears in all ham emails\n total_count = (count_spam + count_ham).most_common(5000) #adds the total occurences of the words and gets top 5000\n #print(total_count)\n #print(num_ham, num_spam)\n\n top_words = []\n for everyword in total_count:\n top_words.append(everyword[0])\n for everyword in list(count_spam):\n if everyword not in top_words:\n del count_spam[everyword] #deletes words in spam emails not included in top 5000\n for everyword in list(count_ham):\n if everyword not in top_words:\n del count_ham[everyword] #deletes words in ham emails not included in top 5000\n #print(words, count_ham, count_spam)\n\n file_encoder = open(\"top_word_count.txt\", \"w+\", encoding = 'utf-8', errors = 'ignore')\n file_encoder.write(\"HERE ARE YOUR TOP 5000 WORDS: \"+\"\\n\"+str(total_count)+\"\\n\"+\"\\n\"+\"SPAM WORDS: \"+\"\\n\"+str(count_spam)+\"\\n\"+\"\\n\"+\"HAM WORDS: \"+\"\\n\"+str(count_ham))\n file_encoder.close()\n print(\"Counting and getting top 5000 words successful!\")\n probabilityGet(num_spam, num_ham, count_spam, count_ham)", "def train_naive_Bayes_classificator(self):\n positive_tweet_tokens = twitter_samples.tokenized(\n 'positive_tweets.json')\n negative_tweet_tokens = twitter_samples.tokenized(\n 'negative_tweets.json')\n\n positive_cleaned_tokens_list = []\n negative_cleaned_tokens_list = []\n\n for tokens in positive_tweet_tokens:\n positive_cleaned_tokens_list.append(\n self.clean_tokens_and_lemmetize(tokens))\n for tokens in negative_tweet_tokens:\n negative_cleaned_tokens_list.append(\n self.clean_tokens_and_lemmetize(tokens))\n\n negative_dataset = [(token, \"negative\") for token in self.format_tweets_for_model(\n negative_cleaned_tokens_list)]\n positive_dataset = [(token, \"positive\") for token in self.format_tweets_for_model(\n positive_cleaned_tokens_list)]\n\n dataset = positive_dataset + negative_dataset\n\n shuffle(dataset)\n\n self.train_data = dataset[:8000]\n self.test_data = dataset[8000:]\n\n self.classifier = NaiveBayesClassifier.train(self.train_data)\n self.bayes_accuracy = classify.accuracy(\n self.classifier, self.test_data)\n with open(TWEET_BAYES_FILENAME, 'wb') as f:\n pickle.dump(\n (self.classifier, self.bayes_accuracy),\n f,\n protocol=pickle.HIGHEST_PROTOCOL)", "def main():\n \n # Load the model\n model = EpamModel()\n model.load(\"bayes_1.zip\")\n \n # Load and clean/prepare test data \n x_test = pd.read_csv('BAZA_VALID_INPUT.csv')\n x_test_clean = cleanup_df(x_test)\n \n # Predict\n # FIXME: This currently does probabilistic prediction only!\n y_pred = model.predict(x_test_clean)\n \n with open('output.txt', 'w+') as f:\n for label in y_pred:\n f.write(f'{label}\\n')", "def create_pipelines_lingspam():\n stop = ('stop', StopWordRemovalTransformer())\n lemma = ('lemma', LemmatizeTransformer())\n binz = ('binarizer', CountVectorizer())\n we = ('document embedding', DocEmbeddingVectorizer())\n sel = ('fsel', SelectKBest(score_func=mutual_info_classif, k=100))\n clf = ('cls', BernoulliNB()) # Binary features in the original paper. \n return Pipeline([binz, sel, clf]), \\\n Pipeline([stop, binz, sel, clf]), \\\n Pipeline([lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, we, sel, clf])", "def brain(msg):\n\n def check_message(msg):\n \"\"\"\n Check wich neuron to use.\n :param msg:\n :return:\n \"\"\"\n words_of_message = msg.split()\n find = False\n for key in gc_words:\n if words_of_message in gc_words[key]['groups']:\n getattr(neuron.general_conversations, key)()\n find = True\n break\n for key in fc_words:\n if words_of_message in fc_words[key]['groups']:\n getattr(neuron.forecast, key)()\n find = True\n break\n for key in twitter_words:\n if words_of_message in twitter_words[key]['groups']:\n getattr(neuron.twitter, key)()\n find = True\n break\n for key in pipo_words:\n if words_of_message in pipo_words[key]['groups']:\n getattr(neuron.pipotron, key)()\n find = True\n break\n if not find:\n neuron.general_conversations.undefined()\n\n check_message(msg)", "def open_bold (subject): \n\n\tsub_path = os.path.realpath(subject)\n\tsub_path_BOLD = sub_path + '/BOLD'\n\ttask_run = [ i for i in os.listdir(sub_path_BOLD) if not (i.startswith('.'))]\n\n\treturn task_run", "def msg_to_br(msg_body, agent_directory):\r\n agents_df = agents_data()\r\n agents_df = agents_df.loc[agents_df['Name'] == \"browser\"]\r\n jid = agents_df['User name'].iloc[-1]\r\n msg_br = Message(to=jid)\r\n msg_br.body = msg_body\r\n msg_br.set_metadata(\"performative\", \"inform\")\r\n return msg_br", "def label_notes(all_notes_lines):\n# nf = nemo.core.NeuralModuleFactory(backend=nemo.core.Backend.PyTorch, log_dir=None)\n #note_line_queries = notes.split('\\n')\n #note_line_queries = ['pt arrived obtunded not answering questions responding to voice and sternal rub speaking in garbled voice pupils unequal left 3mm and right 2mm brisk bilaterally trauma sicu MD aware currently recieving keppra IV finished dilantin gtt due for level at 08a EEG today LSCTA on 3LNC sats 100 % SBP 90 s to 100 s HR NSR no ectopy 60 s NS @ 75cc continuous +BS no stools rec d lactulose at OSH to recieve PR q4h abd soft non-tender non-distended foley in place draining adequate amt clear yellow urine skin intact left 20G x2 WNL wife Name NI']\n\n# labels_dict = get_vocab(LABELS_DICT)\n# pretrained_bert_model = nemo_nlp.nm.trainables.get_huggingface_model(\n# bert_config=BERT_CONFIG, pretrained_model_name=PRETRAINED_MODEL_NAME\n# )\n\n# tokenizer = nemo.collections.nlp.data.tokenizers.get_tokenizer(\n# tokenizer_name=TOKENIZER,\n# pretrained_model_name=PRETRAINED_MODEL_NAME,\n# tokenizer_model=TOKENIZER_MODEL,\n# )\n# hidden_size = pretrained_bert_model.hidden_size\n\n load_datalayer_begin_time = time.time()\n data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationInferDataLayer(\n queries=all_notes_lines, tokenizer=tokenizer, max_seq_length=MAX_SEQ_LENGTH, batch_size=2000\n )\n load_datalayer_end_time = time.time()\n\n classifier = TokenClassifier(hidden_size=hidden_size, num_classes=len(labels_dict))\n\n input_ids, input_type_ids, input_mask, _, subtokens_mask = data_layer()\n \n load_hidden_states_begin_time = time.time()\n hidden_states = pretrained_bert_model(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)\n load_hidden_states_end_time = time.time()\n load_logits_begin_time = time.time()\n logits = classifier(hidden_states=hidden_states)\n load_logits_end_time = time.time()\n\n ###########################################################################\n\n # Instantiate an optimizer to perform `infer` action\n infer_begin_time = time.time()\n evaluated_tensors = nf.infer(tensors=[logits, subtokens_mask], checkpoint_dir=CHECKPOINT_DIR)\n infer_end_time = time.time()\n\n logits, subtokens_mask = [concatenate(tensors) for tensors in evaluated_tensors]\n\n preds = np.argmax(logits, axis=2) \n all_notes_labeled_lines = []\n\n for i, query in enumerate(all_notes_lines):\n logging.info(f'Query: {query}')\n\n pred = preds[i][subtokens_mask[i] > 0.5]\n words = query.strip().split()\n\n #replaced with logic below instead of raising an error:\n '''\n if len(pred) != len(words):\n logging.info('Preds length: ' + str(len(preds[i])))\n logging.info('subtokens_mask length: ' + str(len(subtokens_mask[i])))\n logging.info('Pred length: ' + str(len(pred)))\n logging.info('words length: ' + str(len(words)))\n logging.info('Preds: ' + str(preds.tolist()))\n logging.info('subtokens_mask: ' + str(subtokens_mask[i]))\n logging.info('Pred:' + str(pred.tolist()))\n logging.info('words:' + str(words))\n\n labeled_note = '__Prediction/Word Mismatch__ pred length: ' + str(len(pred)) + ', words length: ' + str(len(words))\n break\n #raise ValueError('Pred and words must be of the same length')\n \n output = ''\n for j, w in enumerate(words):\n output += w\n label = labels_dict[pred[j]]\n if label != NONE_LABEL:\n label = add_brackets(label)\n output += label\n output += ' '\n labeled_note += '\\n' + output.strip()\n logging.info(f'Combined: {output.strip()}')\n\n '''\n\n if len(pred) == len(words):\n output = ''\n for j, w in enumerate(words):\n output += w\n label = labels_dict[pred[j]]\n if label != NONE_LABEL:\n label = add_brackets(label)\n output += label\n output += ' '\n all_notes_labeled_lines.append(output.strip())\n logging.info(f'Combined: {output.strip()}')\n else:\n all_notes_labeled_lines.append(query)\n pred_length = str(len(pred))\n word_length = str(len(words))\n logging.info(f'__Prediction/Word Length Mismatch__ pred length: {pred_length}, words length: {word_length}')\n logging.info(f'{query}')\n \n\n print(str(load_datalayer_end_time-load_datalayer_begin_time)+' seconds to load the datalayer')\n print(str(load_hidden_states_end_time-load_hidden_states_begin_time)+' seconds to load hidden states')\n print(str(load_logits_end_time-load_logits_begin_time)+' seconds to load logits')\n print(str(infer_end_time-infer_begin_time)+' seconds to run inference')\n\n return all_notes_labeled_lines", "def get_naive_Bayes_classificator(self):\n try:\n with open(TWEET_BAYES_FILENAME, 'rb') as f:\n self.classifier, self.bayes_accuracy = pickle.load(f)\n print('It was read sucessfully!')\n except IOError:\n self.train_naive_Bayes_classificator()", "def with_manual_kb_program(agent):\n\n helping = ['?', 'help']\n stopping = ['quit', 'stop', 'exit']\n actions = ['TurnRight', 'TurnLeft', 'Forward', 'Grab', 'Release', 'Shoot', 'Wait']\n queries = [('qp','Query a single proposition;\\n' \\\n + ' E.g. \\'qp B1_1\\' or \\'qp OK1_1_3\\', \\'qp HeadingWest4\\''),\n ('qpl','Query a-temporal location-based proposition at all x,y locations;\\n' \\\n + ' E.g., \\'qpl P\\' runs all queries of P<x>_<y>'),\n ('qplt','Query temporal and location-based propositions at all x,y locations;\\n' \\\n + ' E.g., \\'qplt OK 4\\' runs all queries of the OK<x>_<y>_4'),\n ('q!','Run ALL queries for optionally specified time (default is current time);\\n'\\\n + ' (can be time consuming!)')]\n\n def show_commands():\n print \"Available Commands:\"\n print \" The following are valid Hunt The Wumpus actions:\"\n print \" {0}\".format(', '.join(map(lambda a: '\\'{0}\\''.format(a), actions)))\n print \" Enter {0} to get this command info\" \\\n .format(' or '.join(map(lambda a: '\\'{0}\\''.format(a), helping)))\n print \" Enter {0} to stop playing\" \\\n .format(' or '.join(map(lambda a: '\\'{0}\\''.format(a), stopping)))\n print \" Enter 'env' to display current wumpus environment\"\n print \" Enter 'kbsat' to check if the agent's KB is satisfiable\"\n print \" If the KB is NOT satisfiable, then there's a contradiction that needs fixing.\"\n print \" NOTE: A satisfiable KB does not mean there aren't other problems.\"\n print \" Enter 'save-axioms' to save all of the KB axioms to 'kb-axioms.txt'\"\n print \" This will overwrite any existing 'kb-axioms.txt'\"\n print \" Enter 'save-clauses' to save all of the KB clauses to text file 'kb-clauses.txt'\"\n print \" This will overwrite any existing 'kb-clauses.txt'\"\n print \" Enter 'props' to list all of the proposition bases\"\n print \" Queries:\"\n for query,desc in queries:\n print \" {0} : {1}\".format(query,desc)\n\n def show_propositions():\n print \"Proposition Bases:\"\n print \" Atemporal location-based propositions (include x,y index: P<x>_<y>)\"\n print \" '\" + '\\', \\''.join(proposition_bases_atemporal_location) + '\\''\n print \" Perceptual propositions (include time index: P<t>)\"\n print \" '\" + '\\', \\''.join(proposition_bases_perceptual_fluents) + '\\''\n print \" Location fluent propositions (include x,y and time index: P<x>_<y>_<t>)\"\n print \" '\" + '\\', \\''.join(proposition_bases_location_fluents) + '\\''\n print \" State fluent propositions (include time index: P<t>)\"\n print \" '\" + '\\', \\''.join(proposition_bases_state_fluents[:4]) + '\\','\n print \" '\" + '\\', \\''.join(proposition_bases_state_fluents[4:]) + '\\''\n print \" Action propositions (include time index: P<t>)\"\n print \" '\" + '\\', \\''.join(proposition_bases_actions) + '\\''\n\n def write_list_to_text_file(filename,list):\n outfile = file(filename, 'w')\n for item in list:\n outfile.write('{0}\\n'.format(item))\n outfile.close()\n\n def check_kb_status():\n \"\"\"\n Tests whether the agent KB is satisfiable.\n If not, that means the KB contains a contradiction that needs fixing.\n However, being satisfiable does not mean the KB is correct.\n \"\"\"\n result = minisat(agent.kb.clauses)\n if result:\n print \"Agent KB is satisfiable\"\n else:\n print \"Agent KB is NOT satisfiable!! There is contradiction that needs fixing!\"\n\n def simple_query(proposition):\n \"\"\"\n Executes a simple query to the agent KB for specified proposition.\n \"\"\"\n result = agent.kb.ask(expr(proposition))\n if result == None:\n print \"{0}: Unknown!\".format(proposition)\n else:\n print \"{0}: {1}\".format(proposition,result)\n\n def location_based_query(proposition_base):\n \"\"\"\n Executes queries for the specified type of proposition, for\n each x,y location.\n proposition_base := as all of the propositions include in their\n name 1 or more indexes (for time and/or x,y location), the\n proposition_base is the simple string representing the base\n of the proposition witout the indexes, which are added in\n code, below.\n time := the time index of the propositions being queried\n \"\"\"\n display_env = WumpusEnvironment(agent.width, agent.height)\n start_time = clock()\n print \"Running queries for: {0}<x>_<y>\".format(proposition_base)\n for x in range(1,agent.width+1):\n for y in range(1,agent.height+1):\n query = expr('{0}{1}_{2}'.format(proposition_base,x,y))\n result = agent.kb.ask(query)\n if result == None:\n display_env.add_thing(Proposition(query,'?'),(x,y))\n else:\n display_env.add_thing(Proposition(query,result),(x,y))\n end_time = clock()\n print \" >>> time elapsed while making queries:\" \\\n + \" {0}\".format(end_time-start_time)\n print display_env.to_string(agent.time,\n title=\"All {0}<x>_<y> queries\".format(proposition_base))\n\n def location_time_based_query(proposition_base, time):\n \"\"\"\n Executes queries for the specified type of proposition, for\n each x,y location, at the specified time.\n proposition_base := as all of the propositions include in their\n name 1 or more indexes (for time and/or x,y location), the\n proposition_base is the simple string representing the base\n of the proposition witout the indexes, which are added in\n code, below.\n time := the time index of the propositions being queried\n \"\"\"\n display_env = WumpusEnvironment(agent.width, agent.height)\n start_time = clock()\n print \"Running queries for: {0}<x>_<y>_{1}\".format(proposition_base,time)\n for x in range(1,agent.width+1):\n for y in range(1,agent.height+1):\n query = expr('{0}{1}_{2}_{3}'.format(proposition_base,x,y,time))\n result = agent.kb.ask(query)\n if result == None:\n display_env.add_thing(Proposition(query,'?'),(x,y))\n else:\n display_env.add_thing(Proposition(query,result),(x,y))\n end_time = clock()\n print \" >>> time elapsed while making queries:\" \\\n + \" {0}\".format(end_time-start_time)\n print display_env.to_string(agent.time,\n title=\"All {0}<x>_<y>_{1} queries\".format(proposition_base,\n time))\n\n def run_all_queries(time):\n check_kb_status()\n for p in proposition_bases_perceptual_fluents:\n simple_query(p + '{0}'.format(time))\n for p in proposition_bases_atemporal_location:\n location_based_query(p)\n for p in proposition_bases_location_fluents:\n location_time_based_query(p,time)\n for p in proposition_bases_state_fluents:\n simple_query(p + '{0}'.format(time))\n # remove the quotes below and add quotes to the following if-statement\n # in order to query all actions from time 0 to now\n '''\n print \"Querying actions from time 0 to {0}\".format(time)\n for p in propositions_actions:\n for t in range(time+1):\n simple_query(p + '{0}'.format(t))\n '''\n if time-1 > 0:\n print \"Actions from previous time: {0}\".format(time-1)\n for p in proposition_bases_actions:\n simple_query(p + '{0}'.format(time-1))\n \n print \"FINISHED running all queries for time {0}\".format(time)\n\n def manual_kb_program(percept):\n\n print \"------------------------------------------------------------------\"\n print \"At time {0}\".format(agent.time)\n # update current location and heading based on current KB knowledge state\n print \" HWA.infer_and_set_belief_location()\"\n agent.infer_and_set_belief_location()\n print \" HWA.infer_and_set_belief_heading()\"\n agent.infer_and_set_belief_heading()\n\n percept_sentence = agent.make_percept_sentence(percept)\n print \" HWA.agent_program(): kb.tell(percept_sentence):\"\n print \" {0}\".format(percept_sentence)\n agent.kb.tell(percept_sentence) # update the agent's KB based on percepts\n\n clauses_before = len(agent.kb.clauses)\n print \" HWA.agent_program(): Prepare to add temporal axioms\"\n print \" Number of clauses in KB before: {0}\".format(clauses_before)\n agent.add_temporal_axioms()\n clauses_after = len(agent.kb.clauses)\n print \" Number of clauses in KB after: {0}\".format(clauses_after)\n print \" Total clauses added to KB: {0}\".format(clauses_after - clauses_before)\n agent.number_of_clauses_over_epochs.append(len(agent.kb.clauses))\n\n action = None\n while not action:\n print \"[{0}] You perceive: {1}\".format(agent.time,\n agent.pretty_percept_vector(percept))\n val = raw_input(\"Enter Action ('?' for list of commands): \")\n val = val.strip()\n if val in helping:\n print\n show_commands()\n print\n elif val in stopping:\n action = 'Stop'\n elif val in actions:\n action = val\n elif val == 'env':\n print\n print \"Current wumpus environment:\"\n print agent.env.to_string()\n elif val == 'props':\n print\n show_propositions()\n print\n elif val == 'kbsat':\n check_kb_status()\n print\n elif val == 'save-axioms':\n write_list_to_text_file('kb-axioms.txt',agent.kb.axioms)\n print \" Saved to 'kb-axioms.txt'\"\n print\n elif val == 'save-clauses':\n write_list_to_text_file('kb-clauses.txt',agent.kb.clauses)\n print \" Saved to 'kb-clauses.txt'\"\n print\n else:\n q = val.split(' ')\n if len(q) == 2 and q[0] == 'qp':\n simple_query(q[1])\n print\n elif len(q) == 2 and q[0] == 'qpl':\n location_based_query(q[1])\n print\n elif len(q) == 3 and q[0] == 'qplt':\n location_time_based_query(q[1],q[2])\n print\n elif q[0] == 'q!':\n if len(q) == 2:\n t = int(q[1])\n run_all_queries(t)\n else:\n run_all_queries(agent.time)\n print\n else:\n print \"'{0}' is an invalid command;\".format(val) \\\n + \" try again (enter '?' for list of commands)\"\n print\n\n # update KB with selected action\n agent.kb.tell(add_time_stamp(action, agent.time))\n\n agent.time += 1\n \n return action\n\n agent.program = manual_kb_program\n return agent", "def standard_job_set(msg):\n\n run_num = msg['run']\n jobs = [[], [], [], [], []]\n new_dep = {'time': None, 'md5': None}\n\n # Add ROME jobs first\n cmd_prefix = \"./midanalyzer.exe -b -i romeConfig.xml -r \"\n cmd_suffix = \" -m offline -p 0 -q\"\n rome_dir = offline_dir + '/rome-processors'\n\n job = {}\n job['meta'] = datadir + '/shim/.crunchd_metadata.json'\n job['cmd'] = cmd_prefix + str(run_num) + cmd_suffix\n job['clean'] = 'rm histos*.root run*.root'\n\n job['name'] = 'single-laser'\n job['dir'] = rome_dir + '/single-laser'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'double-laser'\n job['dir'] = rome_dir + '/double-laser'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'capacitec'\n job['dir'] = rome_dir + '/capacitec'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'shim-platform'\n job['dir'] = rome_dir + '/shim-platform'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'metrolab'\n job['dir'] = rome_dir + '/metrolab'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'mscb-cart'\n job['dir'] = rome_dir + '/mscb-cart'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'mscb-ring'\n job['dir'] = rome_dir + '/mscb-ring'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'tilt-sensor'\n job['dir'] = rome_dir + '/tilt-sensor'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'hall-probe'\n job['dir'] = rome_dir + '/hall-probe'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n # Make sure run attributes are extracted.\n job = {}\n job['name'] = 'extract_run_attr'\n job['dir'] = offline_dir + '/crunchers'\n job['cmd'] = 'python scripts/extract_run_attr.py %i' % run_num\n job['clean'] = None\n job['meta'] = datadir + '/crunched/.crunchd_metadata.json'\n job['deps'] = {}\n jobs[0].append(job)\n\n # Now the data bundling job.\n job = {}\n job['name'] = 'make-shim-dataset'\n job['cmd'] = 'bin/make_shim_dataset %i' % run_num\n job['clean'] = None\n job['dir'] = offline_dir + '/crunchers'\n job['meta'] = datadir + '/shim/.crunchd_metadata.json'\n job['deps'] = {}\n job['deps'][offline_dir + '/crunchers/bin/make_shim_dataset'] = new_dep\n job['deps']['data/shim/*%05i.root' % run_num] = new_dep\n jobs[1].append(job)\n\n # Finally apply fixes.\n # job = {}\n # job['name'] = 'fix-probe-remap'\n # job['dir'] = offline_dir\n # job['cmd'] = 'bin/fix_run_probe_map '\n # job['cmd'] += 'data/crunched/run_%05i.root ' % run_num\n # job['cmd'] += 'data/crunched/ %i' % run_num\n # job['clean'] = None\n # job['meta'] = datadir + '/crunched/.crunchd_metadata.json'\n # job['deps'] = {}\n # job['deps'][offline_dir + '/bin/recrunch_fids'] = new_dep\n # job['deps'][datadir + '/shim/run_%05i.root' % run_num] = new_dep\n # jobs[2].append(job)\n\n # Automatically generate extracted dataset\n job = {}\n job['name'] = 'extraction'\n job['dir'] = offline_dir + '/crunchers'\n job['cmd'] = 'bin/make_extracted_dataset '\n job['cmd'] += 'data/crunched/run_%05i.root' % run_num\n job['clean'] = None\n job['meta'] = datadir + '/extracted/.crunchd_metadata.json'\n job['deps'] = {}\n job['deps'][offline_dir + '/crunchers/bin/make_extracted_dataset'] = new_dep\n job['deps'][datadir + '/crunched/run_%05i.root' % run_num] = new_dep\n jobs[3].append(job)\n\n return jobs", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def train_model(database):\n train_set = creating_set(database)\n return NaiveBayesClassifier.train(train_set)", "def run():\n from mne.commands.utils import get_optparser\n\n parser = get_optparser(\n __file__,\n usage=\"usage: %prog options args\",\n prog_prefix=\"mne_bids\",\n version=mne_bids.__version__,\n )\n\n parser.add_option(\n \"--bids_root\",\n dest=\"bids_root\",\n help=\"The path of the folder containing the BIDS \" \"dataset\",\n )\n parser.add_option(\"--subject_id\", dest=\"subject\", help=(\"Subject name\"))\n parser.add_option(\"--session_id\", dest=\"session\", help=\"Session name\")\n parser.add_option(\"--file\", dest=\"fname\", help=\"The path of the crosstalk file\")\n parser.add_option(\n \"--verbose\",\n dest=\"verbose\",\n action=\"store_true\",\n help=\"Whether do generate additional diagnostic output\",\n )\n\n opt, args = parser.parse_args()\n if args:\n parser.print_help()\n parser.error(\n f\"Please do not specify arguments without flags. \" f\"Got: {args}.\\n\"\n )\n\n if opt.bids_root is None:\n parser.print_help()\n parser.error(\"You must specify bids_root\")\n if opt.subject is None:\n parser.print_help()\n parser.error(\"You must specify a subject\")\n\n bids_path = BIDSPath(subject=opt.subject, session=opt.session, root=opt.bids_root)\n\n logger.info(f\"Writing crosstalk file {bids_path.basename} …\")\n write_meg_crosstalk(fname=opt.fname, bids_path=bids_path, verbose=opt.verbose)", "def test_rename_to_mlflow(mlflow):\n atom = ATOMClassifier(X_bin, y_bin, experiment=\"test\", random_state=1)\n atom.run(\"GNB\")\n atom.scoring()\n assert mlflow.call_count == 10 # 9 from scoring + 1 from training", "def make_tdm_packet_list(bfile):\n\n tdm_list = []\n\n if os.path.exists(bfile):\n with open(bfile, mode='rb') as f:\n num_bytes = os.path.getsize(bfile)\n while f.tell() < num_bytes:\n ver_adf = f.read(1)\n ver = int.from_bytes(ver_adf, byteorder='big') >> 4\n adf_words = int.from_bytes(ver_adf, byteorder='big') & 0x0f\n f.read(1) # Read byte. Field is RESERVED\n flags = int.from_bytes(f.read(2), byteorder='big')\n mdid = int.from_bytes(f.read(4), byteorder='big')\n seqno = int.from_bytes(f.read(4), byteorder='big')\n msglen = int.from_bytes(f.read(4), byteorder='big')\n secs = int.from_bytes(f.read(4), byteorder='big')\n nanosecs = int.from_bytes(f.read(4), byteorder='big')\n hdrlen = 24 + (adf_words * 4)\n adf_payload = ''\n if adf_words > 0:\n adf_payload = f.read(adf_words * 4)\n payloadlen = msglen - hdrlen\n payload = f.read(payloadlen)\n\n new_msg = TmnsDataMessage(ver=ver, flags=flags, mdid=mdid, seqno=seqno, msglen=msglen, secs=secs,\n nanosecs=nanosecs, adf_payload=adf_payload, payload=payload)\n tdm_list.append(new_msg)\n return tdm_list\n else:\n print(\"The file '{0}' was not found.\".format(bin))\n return tdm_list", "def run(self, fileStore):\n work_dir = fileStore.getLocalTempDir()\n fastaFile = os.path.join(work_dir, 'seq.fa')\n fileStore.readGlobalFile(self.fastaID, fastaFile)\n\n # download the model\n modelFile = os.path.join(work_dir, 'model.knm')\n assert os.environ.get(\"CACTUS_DNA_BRNN_MODEL_ID\") is not None \n modelID = os.environ.get(\"CACTUS_DNA_BRNN_MODEL_ID\")\n fileStore.readGlobalFile(modelID, modelFile)\n\n # ignore existing model flag\n if '-i' in self.dnabrnnOpts:\n i = self.dnabrnnOpts.index('-i')\n del self.dnabrnnOpts[i]\n del self.dnabrnnOpts[i]\n\n cmd = ['dna-brnn', fastaFile] + self.dnabrnnOpts.split() + ['-i', modelFile]\n \n if self.cores:\n cmd += ['-t', str(self.cores)]\n\n bedFile = os.path.join(work_dir, 'regions.bed')\n\n # run dna-brnn to make a bed file\n cactus_call(outfile=bedFile, parameters=cmd)\n\n if self.mergeLength is None:\n self.mergeLength = 0\n if self.minLength is None:\n self.minLength = 0\n \n # merge up the intervals into a new bed file\n mergedBedFile = os.path.join(work_dir, 'filtered.bed')\n merge_cmd = []\n merge_cmd.append(['awk', '{{if($3-$2 > {}) print}}'.format(self.minLength), bedFile])\n merge_cmd.append(['bedtools', 'sort', '-i', '-'])\n merge_cmd.append(['bedtools', 'merge', '-i', '-', '-d', str(self.mergeLength)]) \n cactus_call(outfile=mergedBedFile, parameters=merge_cmd)\n\n maskedFile = os.path.join(work_dir, 'masked.fa')\n \n if self.action in ('softmask', 'hardmask'):\n mask_cmd = ['cactus_fasta_softmask_intervals.py', '--origin=zero', bedFile]\n if self.minLength:\n mask_cmd += ['--minLength={}'.format(self.minLength)]\n if self.action == 'hardmask':\n mask_cmd += ['--mask=N']\n # do the softmasking\n cactus_call(infile=fastaFile, outfile=maskedFile, parameters=mask_cmd)\n else:\n assert self.action == \"clip\"\n # to clip, we need a bed of the regions we want to *keep*. We'll start with the whole thing\n allRegionsFile = os.path.join(work_dir, 'chroms.bed')\n cactus_call(parameters=['samtools', 'faidx', fastaFile])\n cactus_call(outfile=allRegionsFile, parameters=['awk', '{print $1 \"\\\\t0\\\\t\" $2}', fastaFile + '.fai'])\n # load the contig lengths\n contig_lengths = {}\n with open(fastaFile + '.fai', 'r') as fai:\n for line in fai:\n toks = line.strip().split('\\t')\n contig_lengths[toks[0]] = int(toks[1])\n # now we cut out the regions\n clippedRegionsFile = os.path.join(work_dir, 'clipped.bed')\n cactus_call(outfile=clippedRegionsFile, parameters=['bedtools', 'subtract', '-a', allRegionsFile, '-b', mergedBedFile])\n # now we make a fiadx input regions\n faidxRegionsFile = os.path.join(work_dir, 'faidx_regions.txt')\n with open(clippedRegionsFile, 'r') as clipFile, open(mergedBedFile, 'a') as mergeFile, open(faidxRegionsFile, 'w') as listFile:\n for line in clipFile:\n toks = line.strip().split(\"\\t\")\n if len(toks) > 2:\n seq, start, end = toks[0], int(toks[1]), int(toks[2])\n if end - start > self.minLength or contig_lengths[seq] <= self.minLength:\n region = seq\n if end - start < contig_lengths[seq]:\n # go from 0-based end exlusive to 1-based end inclusive when\n # converting from BED to samtools region\n region += ':{}-{}'.format(start + 1, end)\n else:\n assert start == 0 and end == contig_lengths[seq]\n listFile.write('{}\\n'.format(region))\n else:\n # the region was too small, we remember it in our filtered bed file\n mergeFile.write(line)\n # and cut the fasta apart with samtools\n cactus_call(outfile=maskedFile, parameters=['samtools', 'faidx', fastaFile, '-r', faidxRegionsFile])\n \n return fileStore.writeGlobalFile(maskedFile), fileStore.writeGlobalFile(bedFile), fileStore.writeGlobalFile(mergedBedFile)", "def main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--target-dir\", required=True,\n help=\"Directory to store bigARTM files\")\n parser.add_argument(\"--start-date\", default=\"2015-12-01\",\n help=\"Start date(YYYY-MM-DD) to convert mongodb files to BigARTM format\")\n parser.add_argument(\"--end-date\", default=str(datetime.today().date()),\n help=\"End date(YYYY-MM-DD) to convert mongodb files to BigARTM format\")\n parser.add_argument(\"--min-token-number\", default=10,\n help=\"Minimal number of tokens in tokenized issue\")\n parser.add_argument(\"--min-df\", default=5,\n help=\"Ignore terms that have a document frequency strictly \"\n \"lower than the given threshold (absolute counts)\")\n parser.add_argument(\"--max-df\", default=0.5,\n help=\"Ignore terms that have a document frequency strictly \"\n \"higher than the given threshold (proportion of documents) \")\n args = parser.parse_args()\n topic_issue_model = TopicIssueModel(min_token_number=args.min_token_number, min_df=args.min_df,\n max_df=args.max_df, target_dir=args.target_dir)\n\n corpus = topic_issue_model.build_corpus_from_dumps(args.start_date, args.end_date)\n topic_issue_model.fit(corpus)\n\n with open(\"topic_issue_model.pickle\", \"wb\") as issue_pickle_file:\n pickle.dump(topic_issue_model, issue_pickle_file)", "def train_cell(self, email_path, tag):\n (header_set, body_set) = self.transform(email_path)\n if tag == 'ham':\n self.total_ham += 1\n for token in header_set:\n if self.header_ham.has_key(token):\n self.header_ham[token] += 1.0\n else:\n self.header_ham[token] = 1.0\n if not(self.header_spam.has_key(token)):\n self.header_spam[token] = 0.0\n if not(self.header_confidence.has_key(token)):\n self.header_confidence[token] = 1.0\n for token in body_set:\n if self.body_ham.has_key(token):\n self.body_ham[token] += 1.0\n else:\n self.body_ham[token] = 1.0\n if not(self.body_spam.has_key(token)):\n self.body_spam[token] = 0.0\n if not(self.body_confidence.has_key(token)):\n self.body_confidence[token] = 1.0\n else:\n self.total_spam += 1\n for token in header_set:\n if self.header_spam.has_key(token):\n self.header_spam[token] += 1.0\n else:\n self.header_spam[token] = 1.0\n if not(self.header_ham.has_key(token)):\n self.header_ham[token] = 0.0\n if not(self.header_confidence.has_key(token)):\n self.header_confidence[token] = 1.0\n for token in body_set:\n if self.body_spam.has_key(token):\n self.body_spam[token] += 1.0\n else:\n self.body_spam[token] = 1.0\n if not(self.body_ham.has_key(token)):\n self.body_ham[token] = 0.0\n if not(self.body_confidence.has_key(token)):\n self.body_confidence[token] = 1.0", "def traintenbilacshear(self, simparams, trainparamslist):\n\t\t\n\t\t# We load the training catalog\n\t\t#simcat = megalut.tools.io.readpickle(os.path.join(self.worksimdir, simparams.name, \"groupmeascat_cases.pkl\"))\n\t\tsimcat = megalut.tools.io.readpickle(os.path.join(self.worksimdir, simparams.name, \"groupmeascat_cases_pw.pkl\"))\n\t\t\n\t\tname = \"with_\" + simparams.name\n\t\ttraindir = os.path.join(self.workmldir, name)\n\t\t\n\t\tmegalut.learn.run.train(simcat, traindir, trainparamslist, ncpu=self.ncpu)", "def convert_example_to_features_test(example, cur_time, max_seq_length, tokenizer):\n tokens_a = example\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n\n\n\n\n tokens_a = truncate_seq_pair_test(tokens_a, max_seq_length - 3)\n\n\n\n\n savedtoken = []\n for l in range(5):\n if tokens_a[-1] in {\".\", \",\", \"'\", \"`\" , \"'\", \"?\"}:\n savedtoken.insert(0, tokens_a[-1])\n tokens_a.pop()\n\n else:\n break\n\n\n lmlabel = tokens_a[-1]\n lmlabel = tokenizer.vocab[lmlabel]\n tokens_a.pop()\n\n\n # concatenate lm labels and account for CLS, SEP, SEP\n if not savedtoken:\n extra_lm_labels = 1\n else:\n extra_lm_labels = len(savedtoken)\n\n lm_label_ids = ([-1] + len(tokens_a)*[-1] + [lmlabel] + extra_lm_labels * [-1] + [-1])\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n\n\n\n\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[MASK]\")\n if not savedtoken:\n tokens.append(\".\")\n segment_ids.append(0)\n else:\n tokens.extend(savedtoken)\n for _ in range(len(savedtoken)):\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n\n \n segment_ids.append(0)\n segment_ids.append(0)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n lm_label_ids.append(-1)\n\n # print(\"input, segment, lmlabel\")\n # print(len(input_ids))\n # print(len(segment_ids))\n # print(len(lm_label_ids))\n if len(input_ids) != max_seq_length:\n import pdb; pdb.set_trace()\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(lm_label_ids) == max_seq_length\n\n if cur_time < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"cur_time: %s\" % (cur_time))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"LM label: %s \" % (lm_label_ids))\n\n features = InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n lm_label_ids=lm_label_ids,\n )\n return features", "def create_corpus(crawled_lyrics_file, save=False):\n\n # generating cleaned lyrics corpus from crawled data\n clean_lyrics(crawled_lyrics_file) # the corpus is one sequence of characters per line\n subprocess.call('kytea < ./data/cleaned_lyrics.txt > ./data/kytea_out.txt', shell=True) # processing with kytea\n logger.info(\" Done kytea processing! \")\n\n pron = []\n unk_pat = re.compile(u\"/補助記号/UNK\")\n slash_pat = re.compile(ur\"\\\\\")\n\n with codecs.open(\"data/kytea_out.txt\", 'UTF-8') as f:\n for line in f:\n line = line.decode(encoding=\"utf-8\").strip()\n line = unk_pat.sub(u\"\", line)\n line = slash_pat.sub(u\"\", line)\n\n triplets = line.split(u\" \") # take a look at Kytea output: https://github.com/chezou/Mykytea-python\n seq = []\n for item in triplets:\n try:\n # hir = item.split(u\"/\")[2]\n # if hir != \"UNK\":\n hir = item.split(u\"/\")[0]\n if hir != \"\\\\\":\n seq.append(hir)\n except IndexError:\n continue\n\n candidate_line = unicodedata.normalize(\"NFKC\", u\" \".join(seq))\n candidate_line = re.sub(u\"[A-Za-z]\", u\"\", candidate_line)\n candidate_line = re.sub(u\"\\s+\", u\"\", candidate_line)\n candidate_line = re.sub(u\"\\d+\", u\"5\", candidate_line)\n\n if len(candidate_line) > 10:\n pron.append(candidate_line)\n\n\n NN_input = u\"\\n\".join(pron)\n return NN_input" ]
[ "0.64364576", "0.5568894", "0.5551472", "0.5536631", "0.55087847", "0.5448178", "0.5402387", "0.5222322", "0.5197107", "0.5109843", "0.5092976", "0.5084856", "0.5042653", "0.49968976", "0.49160054", "0.48901597", "0.48886093", "0.48782876", "0.48486832", "0.4847762", "0.48258558", "0.48149282", "0.47561258", "0.47483873", "0.4745108", "0.47411206", "0.47274408", "0.47127724", "0.47063106", "0.47031763" ]
0.7277786
0
Train bayes with an mh directory
def mhdir_train(h, path, is_spam, force): if loud: print(" Reading as MH mailbox") import glob counter = 0 trained = 0 for fn in glob.glob(os.path.join(path, "[0-9]*")): counter += 1 cfn = fn tfn = os.path.join(path, "spambayes.tmp") if loud and counter % 10 == 0: sys.stdout.write("\r%6d" % counter) sys.stdout.flush() f = file(fn, "rb") msg = get_message(f) f.close() if not msg: print("Malformed message: %s. Skipping..." % cfn) continue msg_train(h, msg, is_spam, force) trained += 1 if not options["Headers", "include_trained"]: continue f = file(tfn, "wb") f.write(mboxutils.as_string(msg)) f.close() shutil.copystat(cfn, tfn) os.rename(tfn, cfn) if loud: sys.stdout.write("\r%6d" % counter) sys.stdout.write("\r Trained %d out of %d messages\n" % (trained, counter))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, trainfile):", "def maildir_train(h, path, is_spam, force, removetrained):\n if loud:\n print(\" Reading %s as Maildir\" % (path,))\n import time\n import socket\n pid = os.getpid()\n host = socket.gethostname()\n counter = 0\n trained = 0\n for fn in os.listdir(path):\n cfn = os.path.join(path, fn)\n tfn = os.path.normpath(os.path.join(path, \"..\", \"tmp\",\n \"%d.%d_%d.%s\" % (time.time(), pid,\n counter, host)))\n if (os.path.isdir(cfn)):\n continue\n counter += 1\n if loud and counter % 10 == 0:\n sys.stdout.write(\"\\r%6d\" % counter)\n sys.stdout.flush()\n f = file(cfn, \"rb\")\n msg = get_message(f)\n f.close()\n if not msg:\n print(\"Malformed message: %s. Skipping...\" % cfn)\n continue\n if not msg_train(h, msg, is_spam, force):\n continue\n trained += 1\n if not options[\"Headers\", \"include_trained\"]:\n continue\n f = file(tfn, \"wb\")\n f.write(mboxutils.as_string(msg))\n f.close()\n shutil.copystat(cfn, tfn)\n os.rename(tfn, cfn)\n if (removetrained):\n os.unlink(cfn)\n if loud:\n sys.stdout.write(\"\\r%6d\" % counter)\n sys.stdout.write(\"\\r Trained %d out of %d messages\\n\" %\n (trained, counter))", "def bayes_model(feature_train, help_rank_train, model_name):\n model = MultinomialNB()\n model.fit(feature_train, help_rank_train)\n modelpkl = open(model_name,'wb')\n dump(model, modelpkl, -1)\n return", "def train_model(database):\n train_set = creating_set(database)\n return NaiveBayesClassifier.train(train_set)", "def train():\n pass", "def train(self):\n backend = self.config.backend.build(self.config, self.tmp_dir)\n backend.train(source_bundle_uri=self.config.source_bundle_uri)", "def train(self, absList, modelFilename):\n pass", "def train_lm(dir_path, pretrain_path, cuda_id=0, cl=3, pretrain_id='wt103', lm_id='', bs=32,\n dropmult=1.0, backwards=False, lr=4e-3, preload=True, bpe=False, startat=0,\n use_clr=True, use_regular_schedule=False, use_discriminative=True, notrain=False, joined=False,\n train_file_id='', early_stopping=True):\n print(f'dir_path {dir_path}; pretrain_path {pretrain_path}; cuda_id {cuda_id}; '\n f'pretrain_id {pretrain_id}; cl {cl}; bs {bs}; backwards {backwards} '\n f'dropmult {dropmult}; lr {lr}; preload {preload}; bpe {bpe};'\n f'startat {startat}; use_clr {use_clr}; notrain {notrain}; joined {joined} '\n f'early stopping {early_stopping}')\n\n if not hasattr(torch._C, '_cuda_setDevice'):\n print('CUDA not available. Setting device=-1.')\n cuda_id = -1\n torch.cuda.set_device(cuda_id)\n\n PRE = 'bwd_' if backwards else 'fwd_'\n PRE = 'bpe_' + PRE if bpe else PRE\n IDS = 'bpe' if bpe else 'ids'\n train_file_id = train_file_id if train_file_id == '' else f'_{train_file_id}'\n joined_id = 'lm_' if joined else ''\n lm_id = lm_id if lm_id == '' else f'{lm_id}_'\n lm_path = f'{PRE}{lm_id}lm' # fwd_pretrain_wt103_lm.h5\n enc_path = f'{PRE}{lm_id}lm_enc' # fwd_pretrain_wt103_lm_enc.h5\n\n dir_path = Path(dir_path)\n pretrain_path = Path(pretrain_path)\n pre_lm_path = pretrain_path / 'models' / f'{PRE}{pretrain_id}.h5'\n for p in [dir_path, pretrain_path, pre_lm_path]:\n assert p.exists(), f'Error: {p} does not exist.'\n\n bptt = 70\n em_sz, nh, nl = 400, 1150, 3\n # 优化方式,Adam\n opt_fn = partial(optim.Adam, betas=(0.8, 0.99))\n\n if backwards:\n trn_lm_path = dir_path / 'tmp' / f'trn_{joined_id}{IDS}{train_file_id}_bwd.npy'\n val_lm_path = dir_path / 'tmp' / f'val_{joined_id}{IDS}_bwd.npy'\n else:\n # trn_lm_path = dir_path + 'tmp/trn_ids.npy' or 'tmp/val_ids.npy'\n trn_lm_path = dir_path / 'tmp' / f'trn_{joined_id}{IDS}{train_file_id}.npy'\n val_lm_path = dir_path / 'tmp' / f'val_{joined_id}{IDS}.npy'\n\n print(f'Loading {trn_lm_path} and {val_lm_path}')\n trn_lm = np.load(trn_lm_path)\n # 拼接np.concatenate()\n trn_lm = np.concatenate(trn_lm)\n val_lm = np.load(val_lm_path)\n val_lm = np.concatenate(val_lm)\n\n if bpe:\n vs = 30002\n else:\n itos = pickle.load(open(dir_path / 'tmp' / 'itos.pkl', 'rb')) # the frequence words\n vs = len(itos) # the words number\n # LanguageModelLoader(): Returns a language model iterator that iterates through batches that are of length N(bptt,5)\n trn_dl = LanguageModelLoader(trn_lm, bs, bptt)\n val_dl = LanguageModelLoader(val_lm, bs, bptt)\n md = LanguageModelData(dir_path, 1, vs, trn_dl, val_dl, bs=bs, bptt=bptt)\n\n drops = np.array([0.25, 0.1, 0.2, 0.02, 0.15]) * dropmult # dropmult=1.0\n # md.get_model(): Returns a SequentialRNN model\n # drpoouti: dropout to apply to the input layer,\n # dropouth: dropout to apply to the activations going from one LSTM layer to another\n # wdrop: dropout used for a LSTM's internal (or hidden) recurrent weights\n # dropoute: dropout to apply to the embedding layer\n learner = md.get_model(opt_fn, em_sz, nh, nl,\n dropouti=drops[0], dropout=drops[1], wdrop=drops[2], dropoute=drops[3], dropouth=drops[4])\n # paritial()调用seq2seq,固定函数参数alpha=2,beta=1\n learner.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)\n learner.clip = 0.3\n learner.metrics = [accuracy]\n wd = 1e-7\n\n lrs = np.array([lr / 6, lr / 3, lr, lr / 2]) if use_discriminative else lr\n #\n if preload and startat == 0: # preload=True\n # import pre-training model weight\n wgts = torch.load(pre_lm_path, map_location=lambda storage, loc: storage)\n if bpe:\n learner.model.load_state_dict(wgts)\n else:\n print(f'Loading pretrained weights...')\n # convert wgts['0.encoder.weight'] to array\n ew = to_np(wgts['0.encoder.weight'])\n row_m = ew.mean(0)\n # itos2: the word-to-token mapping\n itos2 = pickle.load(open(pretrain_path / 'tmp' / f'itos_wt103.pkl', 'rb'))\n # collections.defaultdict会返回一个类似dictionary的对象,注意是类似的对象,不是完全一样的对象。\n # 这个defaultdict和dict类,几乎是一样的,除了它重载了一个方法和增加了一个可写的实例变量。\n stoi2 = collections.defaultdict(lambda: -1, {v: k for k, v in enumerate(itos2)})\n nw = np.zeros((vs, em_sz), dtype=np.float32) # em_sz: embedding_size, vs: words dictionary nubs\n nb = np.zeros((vs,), dtype=np.float32)\n # 根据目标文本语料在原始pre-training模型中选择确定 weight\n for i, w in enumerate(itos):\n r = stoi2[w]\n if r >= 0:\n nw[i] = ew[r]\n else:\n nw[i] = row_m\n # T(): Convert numpy array into a pytorch tensor.\n wgts['0.encoder.weight'] = T(nw)\n wgts['0.encoder_with_dropout.embed.weight'] = T(np.copy(nw))\n wgts['1.decoder.weight'] = T(np.copy(nw))\n # model.load_state_dict(): 加载模型\n learner.model.load_state_dict(wgts)\n ## learner.freeze_to(-1)\n ## learner.fit(lrs, 1, wds=wd, use_clr=(6,4), cycle_len=1)\n elif preload:\n print('Loading LM that was already fine-tuned on the target data...')\n learner.load(lm_path)\n\n if not notrain: # unfreeze=False\n learner.unfreeze()\n if use_regular_schedule: # use_regular_schedule=False\n print('Using regular schedule. Setting use_clr=None, n_cycles=cl, cycle_len=None.')\n use_clr = None\n n_cycles = cl # c1 the number of epoch\n cl = None\n else:\n n_cycles = 1\n callbacks = []\n if early_stopping:\n callbacks.append(EarlyStopping(learner, lm_path, enc_path, patience=5))\n print('Using early stopping...')\n # use_clr=True,\n learner.fit(lrs, n_cycles, wds=wd, use_clr=(32, 10) if use_clr else None, cycle_len=cl,\n callbacks=callbacks)\n learner.save(lm_path)\n learner.save_encoder(enc_path)\n else:\n print('No more fine-tuning used. Saving original LM...')\n learner.save(lm_path)\n learner.save_encoder(enc_path)", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train_model(data_dir, params={}, feature_version=2):\n # update params\n params.update({\"application\": \"binary\"})\n\n # Read data\n X_train, y_train = read_vectorized_features(data_dir, \"train\", feature_version)\n\n # Filter unlabeled data\n train_rows = (y_train != -1)\n\n # Train\n lgbm_dataset = lgb.Dataset(X_train[train_rows], y_train[train_rows])\n lgbm_model = lgb.train(params, lgbm_dataset)\n\n return lgbm_model", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(config, data_path):\n register_data(data_path, prefix='yeast_cells_')\n os.makedirs(config.OUTPUT_DIR, exist_ok=True)\n trainer = DefaultTrainer(config)\n trainer.resume_or_load(resume=True)\n trainer.train()\n return trainer", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train(self, hyps):\n\n # Print Hyperparameters To Screen\n items = list(hyps.items())\n for k, v in sorted(items):\n print(k+\":\", v)\n\n # Make Save Files\n if \"save_folder\" in hyps:\n save_folder = hyps['save_folder']\n else:\n save_folder = \"./saved_data/\"\n\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n base_name = save_folder + hyps['exp_name']\n net_save_file = base_name+\"_net.p\"\n best_net_file = base_name+\"_best.p\"\n optim_save_file = base_name+\"_optim.p\"\n log_file = base_name+\"_log.txt\"\n if hyps['resume']: log = open(log_file, 'a')\n else: log = open(log_file, 'w')\n for k, v in sorted(items):\n log.write(k+\":\"+str(v)+\"\\n\")\n\n # Miscellaneous Variable Prep\n logger = Logger()\n shared_len = hyps['n_tsteps']*hyps['n_rollouts']\n env = gym.make(hyps['env_type'])\n obs = env.reset()\n prepped = hyps['preprocess'](obs)\n hyps['state_shape'] = [hyps['n_frame_stack']] + [*prepped.shape[1:]]\n if hyps['env_type'] == \"Pong-v0\":\n action_size = 3\n else:\n action_size = env.action_space.n*(hyps['env_type']!=\"Pong-v0\")\n hyps['action_shift'] = (4-action_size)*(hyps['env_type']==\"Pong-v0\") \n print(\"Obs Shape:,\",obs.shape)\n print(\"Prep Shape:,\",prepped.shape)\n print(\"State Shape:,\",hyps['state_shape'])\n print(\"Num Samples Per Update:\", shared_len)\n print(\"Samples Wasted in Update:\", shared_len % hyps['batch_size'])\n del env\n\n # Make Network\n net = hyps['model'](hyps['state_shape'],action_size,h_size=hyps['h_size'],bnorm=hyps['use_bnorm'])\n if hyps['resume']:\n net.load_state_dict(torch.load(net_save_file))\n base_net = copy.deepcopy(net)\n net = cuda_if(net)\n net.share_memory()\n base_net = cuda_if(base_net)\n\n # Prepare Shared Variables\n shared_data = {'states': cuda_if(torch.zeros(shared_len, *hyps['state_shape']).share_memory_()),\n 'rewards': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'deltas': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'dones': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'actions': torch.zeros(shared_len).long().share_memory_()}\n if net.is_recurrent:\n shared_data['h_states'] = cuda_if(torch.zeros(shared_len, hyps['h_size']).share_memory_())\n n_rollouts = hyps['n_rollouts']\n gate_q = mp.Queue(n_rollouts)\n stop_q = mp.Queue(n_rollouts)\n reward_q = mp.Queue(1)\n reward_q.put(-1)\n\n # Make Runners\n runners = []\n for i in range(hyps['n_envs']):\n runner = Runner(shared_data, hyps, gate_q, stop_q, reward_q)\n runners.append(runner)\n\n # Start Data Collection\n print(\"Making New Processes\")\n procs = []\n for i in range(len(runners)):\n proc = mp.Process(target=runners[i].run, args=(net,))\n procs.append(proc)\n proc.start()\n print(i, \"/\", len(runners), end='\\r')\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Make Updater\n updater = Updater(base_net, hyps)\n if hyps['resume']:\n updater.optim.load_state_dict(torch.load(optim_save_file))\n updater.optim.zero_grad()\n updater.net.train(mode=True)\n updater.net.req_grads(True)\n\n # Prepare Decay Precursors\n entr_coef_diff = hyps['entr_coef'] - hyps['entr_coef_low']\n epsilon_diff = hyps['epsilon'] - hyps['epsilon_low']\n lr_diff = hyps['lr'] - hyps['lr_low']\n\n # Training Loop\n past_rews = deque([0]*hyps['n_past_rews'])\n last_avg_rew = 0\n best_rew_diff = 0\n best_avg_rew = -1000\n epoch = 0\n T = 0\n while T < hyps['max_tsteps']:\n basetime = time.time()\n epoch += 1\n\n # Collect data\n for i in range(n_rollouts):\n stop_q.get()\n collection_time = time.time() - col_start_time\n\n T += shared_len\n\n # Reward Stats\n avg_reward = reward_q.get()\n reward_q.put(avg_reward)\n last_avg_rew = avg_reward\n if avg_reward > best_avg_rew:\n best_avg_rew = avg_reward\n updater.save_model(best_net_file, None)\n\n # Calculate the Loss and Update nets\n start_time = time.time()\n updater.update_model(shared_data)\n update_time = time.time() - start_time\n net.load_state_dict(updater.net.state_dict()) # update all collector nets\n \n # Resume Data Collection\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Decay HyperParameters\n if hyps['decay_eps']:\n updater.epsilon = (1-T/(hyps['max_tsteps']))*epsilon_diff + hyps['epsilon_low']\n print(\"New Eps:\", updater.epsilon)\n if hyps['decay_lr']:\n new_lr = (1-T/(hyps['max_tsteps']))*lr_diff + hyps['lr_low']\n updater.new_lr(new_lr)\n print(\"New lr:\", new_lr)\n if hyps['decay_entr']:\n updater.entr_coef = entr_coef_diff*(1-T/(hyps['max_tsteps']))+hyps['entr_coef_low']\n print(\"New Entr:\", updater.entr_coef)\n\n # Periodically save model\n if epoch % 10 == 0:\n updater.save_model(net_save_file, optim_save_file)\n\n # Print Epoch Data\n past_rews.popleft()\n past_rews.append(avg_reward)\n max_rew, min_rew = deque_maxmin(past_rews)\n updater.print_statistics()\n avg_action = shared_data['actions'].float().mean().item()\n print(\"Epoch\", epoch, \"– T =\", T)\n print(\"Grad Norm:\",float(updater.norm),\"– Avg Action:\",avg_action,\"– Best AvgRew:\",best_avg_rew)\n print(\"Avg Rew:\", avg_reward, \"– High:\", max_rew, \"– Low:\", min_rew, end='\\n')\n updater.log_statistics(log, T, avg_reward, avg_action, best_avg_rew)\n updater.info['AvgRew'] = avg_reward\n logger.append(updater.info, x_val=T)\n\n # Check for memory leaks\n gc.collect()\n max_mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n print(\"Time:\", time.time()-basetime, \"– Collection:\", collection_time, \"– Update:\", update_time)\n if 'hyp_search_count' in hyps and hyps['hyp_search_count'] > 0 and hyps['search_id'] != None:\n print(\"Search:\", hyps['search_id'], \"/\", hyps['hyp_search_count'])\n print(\"Memory Used: {:.2f} memory\\n\".format(max_mem_used / 1024))\n\n logger.make_plots(base_name)\n log.write(\"\\nBestRew:\"+str(best_avg_rew))\n log.close()\n # Close processes\n for p in procs:\n p.terminate()\n return best_avg_rew", "def go_train(sources, targets, model, dictloc, max_epochs):\n\n\ttrain.trainer(targets, sources, model, \n\t\tsaveto=\"data/trainer.npz\", \n\t\tdictionary=dictloc, \n\t\tmax_epochs=max_epochs, \n\t\tsaveFreq=100, \n\t\treload_=os.path.isfile(\"data/trainer.npz\")\n\t)", "def train(self, absList, modelFilename):\n pass", "def model_train(data_dir,test=False):\r\n \r\n if not os.path.isdir(MODEL_DIR):\r\n os.mkdir(MODEL_DIR)\r\n\r\n if test:\r\n print(\"... test flag on\")\r\n print(\"...... subsetting data\")\r\n print(\"...... subsetting countries\")\r\n \r\n ## fetch time-series formatted data\r\n ts_data = fetch_ts(data_dir)\r\n\r\n ## train a different model for each data sets\r\n for country,df in ts_data.items():\r\n if test and country not in ['all','united_kingdom']:\r\n continue\r\n model_name = re.sub(\"\\.\",\"_\",str(MODEL_VERSION))\r\n saved_model = os.path.join(MODEL_DIR,\r\n \"sl-{}-{}.joblib\".format(country,model_name))\r\n saved_test_model = os.path.join(MODEL_DIR,\r\n \"test-{}-{}.joblib\".format(country,model_name))\r\n saved_baseline = os.path.join(BASELINE_DIR,\r\n \"b-sl-{}-{}.joblib\".format(country,model_name))\r\n saved_test_baseline = os.path.join(BASELINE_DIR,\r\n \"b-test-{}-{}.joblib\".format(country,model_name))\r\n if (test and (not os.path.isfile(saved_test_model))) or ((not test) and (not os.path.isfile(saved_model))):\r\n _model_train(df,country,test=test)\r\n if (test and (not os.path.isfile(saved_test_baseline))) or ((not test) and (not os.path.isfile(saved_baseline))):\r\n _baseline_train(df,country,test=test)", "def train(input_path, model_path, n_estimators):\n\n logger = logging.getLogger(__name__)\n\n logger.info(\"Loading input dataset\")\n train_dataset = pd.read_csv(input_path)\n\n X_train = train_dataset.drop([\"Survived\"], axis=1)\n y_train = train_dataset[\"Survived\"]\n\n logger.info(f\"Training model with n_estimators = {n_estimators}\")\n model = TitanicModel(n_estimators=n_estimators)\n model.fit(X_train, y=y_train)\n\n logger.info(f\"Writing output to {model_path}\")\n model_dir = Path(model_path).parent\n model_dir.mkdir(parents=True, exist_ok=True)\n joblib.dump(model, model_path)", "def train(self, batch):\n pass", "def trainNet():", "def train_naive_Bayes_classificator(self):\n positive_tweet_tokens = twitter_samples.tokenized(\n 'positive_tweets.json')\n negative_tweet_tokens = twitter_samples.tokenized(\n 'negative_tweets.json')\n\n positive_cleaned_tokens_list = []\n negative_cleaned_tokens_list = []\n\n for tokens in positive_tweet_tokens:\n positive_cleaned_tokens_list.append(\n self.clean_tokens_and_lemmetize(tokens))\n for tokens in negative_tweet_tokens:\n negative_cleaned_tokens_list.append(\n self.clean_tokens_and_lemmetize(tokens))\n\n negative_dataset = [(token, \"negative\") for token in self.format_tweets_for_model(\n negative_cleaned_tokens_list)]\n positive_dataset = [(token, \"positive\") for token in self.format_tweets_for_model(\n positive_cleaned_tokens_list)]\n\n dataset = positive_dataset + negative_dataset\n\n shuffle(dataset)\n\n self.train_data = dataset[:8000]\n self.test_data = dataset[8000:]\n\n self.classifier = NaiveBayesClassifier.train(self.train_data)\n self.bayes_accuracy = classify.accuracy(\n self.classifier, self.test_data)\n with open(TWEET_BAYES_FILENAME, 'wb') as f:\n pickle.dump(\n (self.classifier, self.bayes_accuracy),\n f,\n protocol=pickle.HIGHEST_PROTOCOL)", "def traintenbilacshear(self, simparams, trainparamslist):\n\t\t\n\t\t# We load the training catalog\n\t\t#simcat = megalut.tools.io.readpickle(os.path.join(self.worksimdir, simparams.name, \"groupmeascat_cases.pkl\"))\n\t\tsimcat = megalut.tools.io.readpickle(os.path.join(self.worksimdir, simparams.name, \"groupmeascat_cases_pw.pkl\"))\n\t\t\n\t\tname = \"with_\" + simparams.name\n\t\ttraindir = os.path.join(self.workmldir, name)\n\t\t\n\t\tmegalut.learn.run.train(simcat, traindir, trainparamslist, ncpu=self.ncpu)", "def train_lg(gateway_name=None):\n users = None\n if gateway_name and len(gateway_name) > 0:\n file_name = \"data/\" + gateway_name+\".txt\"\n users = read_users(file_name)\n\n data = np.genfromtxt(file_name, delimiter='\\t',skip_header=True)\n out_file = \"models/\" + gateway_name + \"_model.pk\"\n else:\n data = np.genfromtxt('data/all.txt', delimiter='\\t', skip_header=True)\n out_file = \"models/all_model.pk\"\n\n model = lg()\n if users:\n model.fit(data[:, 2:], data[:, 0])\n print \"R2(%s)=%f\" % (gateway_name, model.score(data[:, 2:], data[:, 0]))\n pk.dump([model, users], open(out_file, \"w\"))\n else:\n model.fit(data[:, 1:], data[:, 0])\n print \"R2(all)=%f\" % model.score(data[:, 1:], data[:, 0])\n pk.dump(model, open(out_file, \"w\"))", "def train_model(data_dir, rows):\n X, y = read_vectorized_features(data_dir, rows)\n\n # Set params\n # Scores ~0.784 (without tuning and early stopping)\n params = {'boosting_type': 'gbdt',\n 'max_depth' : -1,\n 'objective': 'binary',\n 'nthread': 3, # Updated from nthread\n 'num_leaves': 64,\n 'learning_rate': 0.05,\n 'max_bin': 512,\n 'subsample_for_bin': 200,\n 'subsample': 1,\n 'subsample_freq': 1,\n 'colsample_bytree': 0.8,\n 'reg_alpha': 5,\n 'reg_lambda': 10,\n 'min_split_gain': 0.5,\n 'min_child_weight': 1,\n 'min_child_samples': 5,\n 'scale_pos_weight': 1,\n 'num_class' : 1,\n 'metric' : 'binary_error'}\n\n # Create parameters to search\n gridParams = {\n 'learning_rate': [0.15, 0.2, 0.25, 0.3], #default = 0.1\n 'n_estimators': [40],\n 'num_leaves': [6,8,12,16],\n 'boosting_type' : ['gbdt'],\n 'objective' : ['binary'],\n 'random_state' : [501], # Updated from 'seed'\n 'colsample_bytree' : [0.65, 0.66],\n 'subsample' : [0.7,0.75],\n 'reg_alpha' : [1,1.2],\n 'reg_lambda' : [1,1.2,1.4],\n }\n\n # Create classifier to use. Note that parameters have to be input manually\n # not as a dict!\n mdl = lgb.LGBMClassifier(boosting_type= 'gbdt',\n objective = 'binary',\n n_jobs = 3, # Updated from 'nthread'\n silent = True,\n max_depth = params['max_depth'],\n max_bin = params['max_bin'],\n subsample_for_bin = params['subsample_for_bin'],\n subsample = params['subsample'],\n subsample_freq = params['subsample_freq'],\n min_split_gain = params['min_split_gain'],\n min_child_weight = params['min_child_weight'],\n min_child_samples = params['min_child_samples'],\n scale_pos_weight = params['scale_pos_weight'])\n\n # Create the grid\n grid = GridSearchCV(mdl, gridParams,\n verbose=0,\n cv=4,\n n_jobs=2)\n # train\n grid.fit(X, y)\n print(grid.best_params_)\n print(grid.best_score_)\n\n\n # train\n lgbm_dataset = lgb.Dataset(X, y)\n lgbm_model = lgb.train({\"application\": \"binary\"}, lgbm_dataset)\n\n return lgbm_model" ]
[ "0.6380039", "0.6258465", "0.61761475", "0.61341226", "0.61309236", "0.6125414", "0.5975877", "0.5948309", "0.593305", "0.593305", "0.593305", "0.593305", "0.593305", "0.59213877", "0.59187734", "0.59036076", "0.5867951", "0.5858581", "0.5823942", "0.5820531", "0.5797346", "0.5793616", "0.57742274", "0.5770718", "0.5770331", "0.57688713", "0.57565624", "0.5724552", "0.57210416", "0.5709122" ]
0.6462255
0
Runt 1x de pool, dus alle combinaties van teams worden gespeeld. Na 1x spelen van de pool komt er een huidige score uit met welke teams welke punten hebben.
def run_one_pool(curr_pool, goals=False): curr_score = { "Ajax": 0, "Feyenoord": 0, "PSV": 0, "FC Utrecht": 0, "Willem II": 0 } for match in curr_pool: if curr_pool[match]: teamvsteam, chance = match, curr_pool[match] if not goals: outcome = run_one_match(teamvsteam, chance) else: outcome = run_one_match_with_goals(teamvsteam, curr_pool) curr_score = add_score(teamvsteam[0], teamvsteam[1], outcome, curr_score) return curr_score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scoreTeams(curTeams, oppTeam, pokedex, league, minDistWanted):\n battleData, htmlData = loadBattleData(league)\n similarities = loadSims() \n \n #If not given an opponent team then simply randomly choose losers from the dataset to compare to.\n if len(oppTeam) == 0:\n picks = set([])\n while (len(picks) < NUMLOSINGTEAMS and (not len(picks) == len(battleData))):\n picks.add(random.randint(0,len(battleData)-1))\n\n losers = []\n loserDict = {}\n for i in picks:\n entry = battleData[i]\n winner,loser = determineWinner(entry)\n loserDict[str(loser)] = [winner]\n losers.append( (loser,0) )\n\n #Given opponent team then find similar teams\n else:\n oppTeam = [getSimPokemon(opp,similarities) for opp in oppTeam]\n\n #create dictionary from losers team to the team that beat them.\n loserDict = {}\n sims = []\n for d in battleData:\n winner, loser = determineWinner(d)\n\n wTeam = teamToArray(winner,pokedex)\n lTeam = np.array(teamToArray(loser, pokedex))\n\n score = 0\n for oppNp in oppTeam:\n score+= np.amax(lTeam*oppNp) \n\n if str(loser) in loserDict:\n loserDict[str(loser)].append(winner)\n else:\n #new to dictonary\n loserDict[str(loser)] = [winner]\n\n sims.append((loser, score))\n\n\n sims = sorted(sims, key = lambda x : x[1], reverse = True)\n\n cutoff = min(len(sims),NUMLOSINGTEAMS)\n losers = sims[:cutoff]\n\n #Gather winners to losing teams\n winnersComp = []\n for loser,_ in losers:\n for winner in loserDict[str(loser)]:\n winnersComp.append(teamToArray(winner,pokedex))\n \n topScore = len(winnersComp)*6 #pkmn team size\n\n results = []\n inverted_idx = {}\n\n existsSet = []\n\n #Creates inverted index for teams, while simoultaneously weeding out any teams that are exactly similar.\n for i in range(len(curTeams)):\n team = curTeams[i]\n results.append((team,0))\n sTeam = set(team)\n if not (sTeam in existsSet):\n existsSet.append(sTeam)\n for pkm in team:\n if pkm != EMPTY:\n if pkm in inverted_idx:\n inverted_idx[pkm].append(i)\n else:\n inverted_idx[pkm] = [i]\n \n #Giving the similiarity scores to the winners based off of the inverted index.\n for pkm in inverted_idx:\n for winner in winnersComp:\n wArr = np.array(winner)\n #tArr = getSimPokemon(pkm,similarities)\n tArr = similarities[pkm]\n \n vals = wArr * tArr\n\n score = np.amax(vals)\n\n for i in inverted_idx[pkm]:\n results[i] = (results[i][0],results[i][1]+(score/topScore))\n\n results = sorted(results, key = lambda x : x[1], reverse = True)\n\n if len(results) < NUMTEAMSRETURN:\n if len(results) == 0:\n returnTeams = [[] for x in range(NUMTEAMSRETURN)]\n teamScores = [0 for x in range(NUMTEAMSRETURN)]\n\n else:\n returnTeams = [result[0] for result in results]\n teamScores = [result[1] for result in results]\n else:\n firstResult, firstScore = results[0]\n returnTeams = [firstResult]\n teamScores = [round(firstScore*100,1)]\n returnSets = [set(firstResult)]\n \n i = 1\n\n #Loops through results and adds teams with the proper edit distance away.\n while(len(returnTeams) < NUMTEAMSRETURN and minDistWanted > 0):\n teamToConsider,teamToConsiderScore = results[i]\n \n considerSet = set(teamToConsider)\n add = True\n ##checks the edit distance of teams is above wanted\n for team in returnSets:\n if len(team.union(considerSet)) < len(team)+minDistWanted:\n add = False\n\n ##If indeed above wanted levels then add\n if add:\n returnTeams.append(teamToConsider)\n returnSets.append(considerSet)\n teamScores.append(round(teamToConsiderScore*100,1))\n \n i+=1\n\n if i >= len(results):\n i = 1\n minDistWanted -= 1 \n \n winHtmls = []\n if htmlData != None:\n for team,_ in losers:\n for winner in loserDict[str(team)]:\n winHtmls.extend(htmlData[str(sorted(winner))])\n \n\n return returnTeams, teamScores, winHtmls", "def run_monte_carlo(runs, pool, goals=False):\n total_ranking = {\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: []\n }\n for run in range(runs):\n if goals:\n curr_score = run_one_pool(pool, True)\n else:\n curr_score = run_one_pool(pool)\n total_ranking = rank_teams_of_curr_run(curr_score, total_ranking)\n return total_ranking", "def update(self):\n for pl, result in zip(self._players, self.golf_round.doc.results):\n for score in result.scores:\n n = score.num-1\n # update net \n pl.dct_net['holes'][n] = score.gross - pl._bumps[n]\n pl.update_totals(pl.dct_net)", "def calc_pool(players):\n players = [str(x) for x in players]\n n = len(players)\n for player in players:\n nopool = payoff_nopool(p=percentages[player])\n print(nopool)\n p = {i: percentages[key] for i, key in zip([x for x in range(2, n+1)],\n [x for x in players if x != player])}\n p[1] = percentages[player]\n pool = payoff_n_p(p=p, n=n)\n print(pool)", "def tournament(submissions, num_rounds):\n strategies = [Strategy(submission) for submission in submissions]\n game_num = 1\n games = []\n for i in range(len(strategies)):\n for j in range(i+1, len(strategies)):\n #print(strategies[i].name, strategies[j].name)\n g = Game(strategies[i], strategies[j], num_rounds, game_num)\n score1, score2 = g.play()\n strategies[i].update_score(score1)\n strategies[j].update_score(score2)\n game_num += 1\n games.append(g)\n \n for strat in strategies:\n print(\"Final score for {} submitted by {} is {}\".format(strat.name, strat.author, strat.score))\n write_results(strategies, games)", "def play_one_round(self):\r\n new_solutions = self.breeder.breed(self.solutions)\r\n self.solutions.clear()\r\n self.solutions.extend(new_solutions)\r\n self.mutation_maker.mutate(self.solutions)\r\n self.round += 1\r\n self.simulation_stats.add_stats(self.round, self.solutions)\r\n if self.simulation_viz is SimulationViz.FRONT:\r\n self.report_progress()", "def run(times=100):\n for i in range(times):\n for pool in pools:\n pool.updatesum()\n for unit in units:\n unit.computenewact()\n for unit in units:\n unit.commitnewact()\n print('-' * 20)\n for pool in pools:\n pool.display()", "def matchbetween(self):\n team1_toss_factor, team2_toss_factor = self.toss_factor()\n\n avgScoredByTeam1 = self.team1.attack / self.team2.defense * team1_toss_factor\n avgScoredByTeam2 = self.team2.attack / self.team1.defense * team2_toss_factor\n\n\n while True:\n self.team1score = np.random.poisson(avgScoredByTeam1)\n self.team2score = np.random.poisson(avgScoredByTeam2)\n if self.team1score > self.team2score:\n self.team1.points += 3\n self.team1.won += 1\n self.team2.lost += 1\n self.winner = self.team1\n break\n elif self.team1score < self.team2score:\n self.team2.points += 3\n self.team2.won += 1\n self.team1.lost += 1\n self.winner = self.team2\n break\n else:\n if self.groupcheck is True:\n self.team1.points += 1\n self.team2.points += 1\n self.team1.tie += 1\n self.team2.tie += 1\n break\n self.team1.scored += self.team1score\n self.team2.scored += self.team2score\n self.team1.conceded += self.team2score\n self.team2.conceded += self.team1score\n self.team1.goaldifference += self.team1score-self.team2score\n self.team2.goaldifference += self.team2score-self.team1score", "def main() -> None:\n parser = argparse.ArgumentParser(\n description=\"Optimise your Sportpools player selection\"\n )\n parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Path to file to import\",\n type=str,\n default=\"./data/Tennis Abstract_ 2020 Australian Open Men's Draw Forecast Forecast.htm\",\n required=True,\n )\n parser.add_argument(\n \"-b\",\n \"--black-points\",\n \"--black\",\n help=\"Total number of black points to use\",\n type=int,\n default=20,\n )\n parser.add_argument(\n \"-c\",\n \"--count\",\n \"--player-count\",\n help=\"Number of players to select\",\n type=int,\n default=14,\n )\n parser.add_argument(\n \"-l\", \"--loser\", help=\"Selected loser\", type=str,\n )\n\n args, _ = parser.parse_known_args()\n\n pool = TennisPool(ROUNDS).load_data(args.file).apply_filters().add_features()\n\n emulator = TennisPoolEmulator(pool.get_results())\n\n pool_results = emulator.play_draw(ROUNDS).add_features(ROUNDS).get_results()\n\n selection_optimum = optimise_selection(\n pool_results,\n selection_limit=args.count,\n black_points_limit=args.black_points,\n rounds=ROUNDS,\n loser=args.loser,\n )\n\n LOGGER.info(\"Optimal set of players is as follows:\")\n LOGGER.info(\"\\r\\n%s\", selection_optimum[\"schedule\"].head(25))\n\n LOGGER.info(\n \"The selection of these players results in %d points with %d black points\",\n selection_optimum[\"schedule\"][\"potency\"].sum(),\n selection_optimum[\"schedule\"][\"black\"].sum(),\n )\n LOGGER.info(\"Select your joker in this order:\")\n LOGGER.info(\n \"\\r\\n%s\",\n str(\n selection_optimum[\"schedule\"][selection_optimum[\"schedule\"][\"rounds\"] >= 4]\n .sort_values(by=[\"black\"], ascending=True)\n .head(5)\n ),\n )", "def main():\n # Instantiate a mixed-integer solver.\n solver = pywraplp.Solver('SolveAssignmentProblemMIP',\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n\n # Number of teams (h and i)\n n = 9\n # Number of rooms (j)\n r = 3\n # Number of timeslots (k)\n t = 4\n # Number of matches\n m = 4\n\n # List of teams\n teams = [i for i in range(9)]\n\n x = {}\n\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if (h == i):\n x[h, i, j, k] = solver.IntVar(0, 0, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n else:\n x[h, i, j, k] = solver.IntVar(0, 1, 'x[%i,%i,%i,%i]' % (h, i, j, k))\n\n # # Objective\n # solver.Minimize(solver.Sum([cost[i][j] * x[i,j] for i in range(num_workers)\n # for j in range(num_tasks)]))\n\n # Constraints\n\n # 2 Ensures that the matrix is the same across the diagonal\n for h in range(n):\n for j in range(r):\n for k in range(t):\n solver.Add((x[h, i, j, k] == x[i, h, j, k]))\n\n # 3 No pair plays each other more than once\n for h in range(n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t)]) <= 1)\n\n # 4 No team can be in more than one place at a time\n for h in range(n):\n for k in range(t):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n) for j in range(r)]) <= 2)\n\n # 5 Each team plays exactly m matches\n for i in range(n):\n solver.Add(solver.Sum([x[h, i, j, k] for j in range(r) for k in range(t) for h in range(n)]) == 2 * m)\n\n # 6 Need 3 teams in a room at each timeslot\n for j in range(r):\n for k in range(t - 1):\n solver.Add(solver.Sum([x[h, i, j, k] for i in range(n - 1) for h in range(i + 1, n)]) == 3)\n\n # Need 3 teams in a room at each timeslot\n for g in range(n - 2):\n for h in range(g + 1, n - 1):\n for i in range(h + 1, n):\n solver.Add(solver.Sum(\n [x[g, h, j, k] + x[h, i, j, k] + x[g, i, j, k] for j in range(r) for k in range(t)]) != 2)\n\n sol = solver.Solve()\n\n print('Total cost = ', solver.Objective().Value())\n print()\n for h in range(n):\n for i in range(n):\n for j in range(r):\n for k in range(t):\n if x[h, i, j, k].solution_value() > 0:\n print('teams %i,%i assigned to room %i at time %i.' % (h, i, j, k))\n\n print()\n print(\"Time = \", solver.WallTime(), \" milliseconds\")", "def tournament_selection(pool):\n return max(random.sample(pool, len(pool) // 5))", "def rpsls(player_guess):\n\n\n # convert name to player_number using name_to_number\n \n player_number = name_to_number(player_guess)\n \n # compute random guess for comp_number using random.randrange()\n \n comp_number = random.randrange(0, 5)\n \n # compute difference of player_number and comp_number modulo five\n \n difference = (player_number - comp_number) % 5\n \n # use if/elif/else to determine winner (but don't forget that players can tie !)\n \n if difference == 1 or difference == 2:\n result = \"Player wins\"\n elif difference == 3 or difference == 4:\n result = \"Computer wins\"\n else:\n result = \"Player and computer tie!\"\n \n # convert comp_number to name using number_to_name\n \n comp_guess = number_to_name(comp_number)\n \n # print results\n \n print \"Player chooses\", player_guess\n print \"Computer chooses\", comp_guess\n print result\n print", "def opt_play():\n global piles\n global num_piles \n nim_sum = game_nim_sum()\n pile_sum = list(piles)\n for x in range(len(piles)):\n pile_sum[x] = nim_sum^piles[x]\n \n for y in range(len(piles)):\n if pile_sum[y] < piles[y]:\n return (y, piles[y]-pile_sum[y])\n\n for z in range(len(piles)):\n if piles[z] != 0:\n return (z,1)", "def undoScore(list_teams, roundScore):\n\tfor t, s in zip(list_teams, roundScore):\n\t\tt.roundPoints(-s)\n\t\tt.roundNumber -= 1", "def give_round_scores(list_of_players):\n print(\"\\nThe round has ended !\\nWe shall now unveil the cards and the scores!\")\n\n for player in list_of_players:\n cards = [card.name for card in player.cards]\n cards_string = \" \"\n for card in cards:\n cards_string += card + \", \"\n cards_string = cards_string[:-2]\n print(\"\\n{} has these cards: \".format(player.name), cards_string)\n print(\"{} has a score of {}\".format(player.name, player.score()))\n final_scores = [player.score() for player in list_of_players]\n min_score = min(final_scores)\n winners_index = [i for i, x in enumerate(final_scores) if x == min_score]\n if len(winners_index) == 1:\n index_winner = winners_index[0]\n winner = list_of_players[index_winner]\n print(winner.name, \"won the round with a score of {}\".format(winner.score()))\n if len(winners_index) > 1:\n print(\"It's a tie!\")\n winners_names = \"\"\n winners = [list_of_players[i] for i in winners_index]\n for winner in winners:\n winners_names += winner.name\n print(winners_names, \"won the round with a score of \", str(min_score))", "def rank_members(game_obj, team_name):\n clear_rating(game_obj)\n team = getattr(game_obj, team_name)\n pool = game_obj.pool\n map = game_obj.map\n for member in pool.members:\n for dimension in dimension_list:\n if getattr(team, dimension) < getattr(map, dimension):\n setattr(member, 'rating', getattr(member, 'rating') + getattr(member, dimension))\n else:\n setattr(member, 'rating', getattr(member, 'rating') + (getattr(member, dimension) * 0.2))\n return game_obj.pool.max_members()", "def tournament( pl, game ):\r\n\tlosses=[0 for p in pl]\r\n\tfor i in range(len(pl)):\r\n\t\tfor j in range(len(pl)):\r\n\t\t\tif i==j: continue\r\n\t\t\twinner=game([pl[i],pl[j]])\r\n\t\t\t\r\n\t\t\tif winner==0:\r\n\t\t\t\tlosses[j]+=2\r\n\t\t\telif winner==1:\r\n\t\t\t\tlosses[i]+=2\r\n\t\t\telif winner==-1:\r\n\t\t\t\tlosses[i]+=1\r\n\t\t\t\tlosses[j]+=1\r\n\t\t\t\tpass\r\n\tz=zip(losses,pl)\r\n\tz.sort()\r\n\treturn z", "def cull(self):\r\n\r\n # From each node in population we get [node_index, node_score] in population_ranking\r\n population_ranking = [[x, self.score(self.population[x])] for x in \\\r\n range(len(self.population))]\r\n population_ranking.sort(key=lambda x: x[1]) # sort by score from lowest to highest\r\n\r\n # The new population is the top population_size guys as ranked\r\n # x[0] is the index of the node\r\n self.population = [self.population[x[0]] for x in population_ranking[-self.population_size:]]\r\n # The actual scores, with the same indices as their node counterparts in population\r\n self.ranking = [x[1] for x in population_ranking[-self.population_size:]]\r\n\r\n #score keeping\r\n self.complete_scores.append(self.ranking)\r\n minimum = self.ranking[0]\r\n maximum = self.ranking[-1]\r\n mean = sum(self.ranking)/self.population_size\r\n median = self.ranking[math.ceil(self.population_size/2)]\r\n self.summary_scores.append([minimum, maximum, mean, median])", "def pick(self, inv, pl, group, sc):\r\n if self.rect.colliderect(pl) and not self.used:\r\n group.remove(self)\r\n inv += ['score {}'.format(id(self))]\r\n sc += [sc[len(sc) - 1] + 100]\r\n self.used = True", "def play_strategic_game():\n board, winner = create_board(), 0\n board[1,1] = 1\n while winner == 0:\n for player in [2,1]:\n board = random_place(board, player)\n winner = evaluate(board)\n if winner != 0:\n break\n return winner", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def toss_factor(self):\n self.coin = random.randint(1, 2)\n if self.coin == 1:\n # print(\"Team1 won the toss\")\n self.team1out = 1\n elif self.coin == 2:\n # print(\"Team2 won the toss\")\n self.team2out = 1\n self.toss = random.uniform(0.5, 1)\n if self.team1out == 1:\n self.tossfactorteam1 = (1 - self.toss)\n self.tossfactorteam2 = self.toss\n elif self.team2out == 1:\n self.tossfactorteam2 = (1 - self.toss)\n self.tossfactorteam1 = self.toss\n return self.tossfactorteam1, self.tossfactorteam2", "def playRound(budget: int) -> tuple:\n sum = sumOfDice(random.randint(1,6), random.randint(1,6))\n if sum == 7:\n budget += 4\n return (\"Win\",budget)\n else:\n budget -= 1\n return (\"Loss\",budget)", "def Repeater(algorithm, runs, nationtxt, schemeIn):\n\n scores = {}\n\n # Make sure appropriate range is used for scores\n\n scoreRange = range(0, 10000)\n\n # score range has to be between these two numbers\n for i in scoreRange:\n scores.update({i : 0})\n\n #~ print \"Running \" + str(algorithm)[0:-18] + \"> \" + str(runs) + \" times...\\n\"\n\n\n minScore = 10**40\n\n\n scheme = schemeIn\n avg = (scheme[0] + scheme[1] + scheme[2] + scheme[3] + scheme[4] + scheme[5] + scheme[6]) / 7.\n p0 = (scheme[0] - avg)**2\n p1 = (scheme[1] - avg)**2\n p2 = (scheme[2] - avg)**2\n p3 = (scheme[3] - avg)**2\n p4 = (scheme[4] - avg)**2\n p5 = (scheme[5] - avg)**2\n p6 = (scheme[6] - avg)**2\n var = (p0 + p1 + p2 + p3 + p4 + p5 + p6) / 7.\n sDev = var**0.5\n\n\n q0 = scheme[1] - scheme[0]\n q1 = scheme[2] - scheme[1]\n q2 = scheme[3] - scheme[2]\n q3 = scheme[4] - scheme[3]\n q4 = scheme[5] - scheme[4]\n q5 = scheme[6] - scheme[5]\n\n for i in range(runs):\n nation = algorithm(nationtxt)\n\n score = randScoreFunction(nation, scheme)\n scores[score] += 1\n\n # keep track of best scores and nation\n if score < minScore:\n minScore = score\n bestNation = nation\n\n maxFreq = 0\n\n scoreCount = 0\n\n for score in scores:\n if scores[score] > maxFreq:\n maxFreq = scores[score]\n maxFreqScore = score\n if score == minScore:\n minScoreFreq = scores[score]\n if scores[score] >= 1:\n scoreCount += 1\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in bestNation:\n\n if bestNation[province][1] == 1:\n one += 1\n if bestNation[province][1] == 2:\n two += 1\n if bestNation[province][1] == 3:\n three += 1\n if bestNation[province][1] == 4:\n four += 1\n if bestNation[province][1] == 5:\n five += 1\n if bestNation[province][1] == 6:\n six += 1\n if bestNation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if scheme[3] != scheme[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n\n return minScore, minScoreFreq, scheme, fivePlus, fivePlusNoDuplicate, usedTrans, scoreCount, sDev, q0, q1, q2, q3, q4, q5, avg", "def __init__(self, teamName):\n \n for i in range (4):\n for j in range(16):\n if (teamName == pm.R1teams[i, j]):\n self.regional, self.position = i, j\n break\n \n self.totWin = pm.totWin[self.regional][self.position]\n self.awayWin = pm.awayWin[self.regional][self.position]\n self.recency = pm.recency[self.regional][self.position]\n self.seed = pm.seed[self.position]\n self.name = pm.R1teams[self.regional][self.position]\n \n # Define Parameter Weights\n weightTotWin = 1/3\n weightAwayWin = 1/2\n weightRecency = 1/6\n \n self.score = weightTotWin*self.totWin + weightAwayWin*self.awayWin + weightRecency*self.recency", "def playerdetail (request,name):\n\n #Basic player details\n player_image = PlayerModel.objects.get(name=name).image\n player_HC = PlayerModel.objects.get(name=name).HC\n player_highfinish = PlayerModel.objects.get(name=name).highfinish\n player_tournum = PlayerModel.objects.get(name=name).tournum\n player_totalpoints = LeaderBoardModel.objects.get(player=name).overall_total\n player_totalrank = LeaderBoardModel.objects.filter(overall_total__gt=player_totalpoints).count() + 1\n\n\n target_holes = 10 #Change to 10 in production\n\n ##START ROUND 1 CALCULATIONS -->\n #Trigger to show score only when round finished\n try:\n rd1holes_played = Rd1SlotModel.objects.get(player_name__name=name).player_holesplayed\n if rd1holes_played is None:\n rd1holes_played = 0\n else:\n rd1holes_played = Rd1SlotModel.objects.get(player_name__name=name).player_holesplayed\n except:\n rd1holes_played = 0\n\n\n\n #Rd1 Player golf score & rank\n if rd1holes_played >= target_holes:\n rd1golf_score = Rd1SlotModel.objects.get(player_name__name=name).player_score\n rd1golf_scoreRank = Rd1SlotModel.objects.filter(player_score__lt=rd1golf_score).count() + 1\n rd1golf_stbl = Rd1SlotModel.objects.get(player_name__name=name).player_stbl\n rd1golf_stblRank = Rd1SlotModel.objects.filter(player_stbl__gt=rd1golf_stbl).count() + 1\n else:\n rd1golf_score = \"-\"\n rd1golf_scoreRank= \"n/a\"\n rd1golf_stbl = \"-\"\n rd1golf_stblRank= \"n/a\"\n\n #Rd1PlayerPoints\n try:\n rd1golf_points = LeaderBoardModel.objects.get(player=name).rd1_golf\n except:\n rd1golf_points = \"-\"\n try:\n rd1golf_rank = LeaderBoardModel.objects.filter(rd1_golf__gt=rd1golf_points).count() + 1\n except:\n rd1golf_rank = \"-\"\n try:\n rd1ctpld_points = LeaderBoardModel.objects.get(player=name).rd1_ctpld\n except:\n rd1ctpld_points = \"-\"\n try:\n rd1ctpld_rank = LeaderBoardModel.objects.filter(rd1_ctpld__gt=rd1ctpld_points).count() + 1\n except:\n rd1ctpld_rank = \"-\"\n try:\n rd1bonus_points = LeaderBoardModel.objects.get(player=name).rd1_bonus\n except:\n rd1bonus_points = \"-\"\n try:\n rd1bonus_rank = LeaderBoardModel.objects.filter(rd1_bonus__gt=rd1bonus_points).count() + 1\n except:\n rd1bonus_rank = \"-\"\n try:\n rd1total_points = rd1golf_points + rd1ctpld_points + rd1bonus_points\n except:\n rd1total_points = \"-\"\n try:\n rd1total_rank = LeaderBoardModel.objects.filter(rd1_total__gt=rd1total_points).count() + 1\n except:\n rd1total_rank = \"-\"\n\n try:\n round1overall_points = list(LeaderBoardModel.objects.aggregate(Sum('rd1_total')).values())[0]\n except:\n round1overall_points = 0\n\n\n ##START ROUND 2 CALCULATIONS -->\n #Trigger to show score only when round finished\n try:\n rd2holes_played = Rd2SlotModel.objects.get(player_name__name=name).player_holesplayed\n if rd2holes_played is None:\n rd2holes_played = 0\n else:\n rd2holes_played = Rd2SlotModel.objects.get(player_name__name=name).player_holesplayed\n except:\n rd2holes_played = 0\n\n #Rd2 Player golf score & rank\n if rd2holes_played >= target_holes:\n rd2golf_score = Rd2SlotModel.objects.get(player_name__name=name).player_score\n rd2golf_scoreRank = Rd2SlotModel.objects.filter(player_score__lt=rd2golf_score).count() + 1\n rd2golf_stbl = Rd2SlotModel.objects.get(player_name__name=name).player_stbl\n rd2golf_stblRank = Rd2SlotModel.objects.filter(player_stbl__gt=rd2golf_stbl).count() + 1\n else:\n rd2golf_score = \"-\"\n rd2golf_scoreRank= \"n/a\"\n rd2golf_stbl = \"-\"\n rd2golf_stblRank= \"n/a\"\n\n #Rd2PlayerPoints\n try:\n rd2golf_points = LeaderBoardModel.objects.get(player=name).rd2_golf\n except:\n rd2golf_points = \"-\"\n try:\n rd2golf_rank = LeaderBoardModel.objects.filter(rd2_golf__gt=rd2golf_points).count() + 1\n except:\n rd2golf_rank = \"-\"\n try:\n rd2ctpld_points = LeaderBoardModel.objects.get(player=name).rd2_ctpld\n except:\n rd2ctpld_points = \"-\"\n try:\n rd2ctpld_rank = LeaderBoardModel.objects.filter(rd2_ctpld__gt=rd2ctpld_points).count() + 1\n except:\n rd2ctpld_rank = \"-\"\n try:\n rd2bonus_points = LeaderBoardModel.objects.get(player=name).rd2_bonus\n except:\n rd2bonus_points = \"-\"\n try:\n rd2bonus_rank = LeaderBoardModel.objects.filter(rd2_bonus__gt=rd2bonus_points).count() + 1\n except:\n rd2bonus_rank = \"-\"\n try:\n rd2total_points = rd2golf_points + rd2ctpld_points + rd2bonus_points\n except:\n rd2total_points = \"-\"\n try:\n rd2total_rank = LeaderBoardModel.objects.filter(rd2_total__gt=rd2total_points).count() + 1\n except:\n rd2total_rank = \"-\"\n\n try:\n round2overall_points = list(LeaderBoardModel.objects.aggregate(Sum('rd2_total')).values())[0]\n except:\n round2overall_points = 0\n\n ##START ROUND 3 CALCULATIONS -->\n #Trigger to show score only when round finished\n try:\n rd3holes_played = Rd3SlotModel.objects.get(player_name__name=name).player_holesplayed\n if rd3holes_played is None:\n rd3holes_played = 0\n else:\n rd3holes_played = Rd3SlotModel.objects.get(player_name__name=name).player_holesplayed\n except:\n rd3holes_played = 0\n\n #Rd3 Player golf score & rank\n if rd3holes_played >= target_holes:\n rd3golf_score = Rd3SlotModel.objects.get(player_name__name=name).player_score\n rd3golf_scoreRank = Rd3SlotModel.objects.filter(player_score__lt=rd2golf_score).count() + 1\n rd3golf_stbl = Rd3SlotModel.objects.get(player_name__name=name).player_stbl\n rd3golf_stblRank = Rd3SlotModel.objects.filter(player_stbl__gt=rd2golf_stbl).count() + 1\n else:\n rd3golf_score = \"-\"\n rd3golf_scoreRank= \"n/a\"\n rd3golf_stbl = \"-\"\n rd3golf_stblRank= \"n/a\"\n\n #Rd2PlayerPoints\n try:\n rd3golf_points = LeaderBoardModel.objects.get(player=name).rd3_golf\n except:\n rd3golf_points = \"-\"\n try:\n rd3golf_rank = LeaderBoardModel.objects.filter(rd3_golf__gt=rd3golf_points).count() + 1\n except:\n rd3golf_rank = \"-\"\n try:\n rd3ctpld_points = LeaderBoardModel.objects.get(player=name).rd3_ctpld\n except:\n rd3ctpld_points = \"-\"\n try:\n rd3ctpld_rank = LeaderBoardModel.objects.filter(rd3_ctpld__gt=rd3ctpld_points).count() + 1\n except:\n rd3ctpld_rank = \"-\"\n try:\n rd3bonus_points = LeaderBoardModel.objects.get(player=name).rd3_bonus\n except:\n rd3bonus_points = \"-\"\n try:\n rd3bonus_rank = LeaderBoardModel.objects.filter(rd3_bonus__gt=rd3bonus_points).count() + 1\n except:\n rd3bonus_rank = \"-\"\n try:\n rd3total_points = rd3golf_points + rd3ctpld_points + rd3bonus_points\n except:\n rd3total_points = \"-\"\n try:\n rd3total_rank = LeaderBoardModel.objects.filter(rd3_total__gt=rd3total_points).count() + 1\n except:\n rd3total_rank = \"-\"\n\n try:\n round3overall_points = list(LeaderBoardModel.objects.aggregate(Sum('rd3_total')).values())[0]\n except:\n round3overall_points = 0\n\n ##START OTHER_SCORES CALCULATIONS -->\n\n #Other Player Points\n try:\n social_points = LeaderBoardModel.objects.get(player=name).social\n except:\n social_points = \"-\"\n try:\n social_rank = LeaderBoardModel.objects.filter(social__gt=social_points).count() + 1\n except:\n social_rank = \"-\"\n try:\n bestdressed_points = LeaderBoardModel.objects.get(player=name).best_dressed\n except:\n bestdressed_points = \"-\"\n try:\n bestdressed_rank = LeaderBoardModel.objects.filter(best_dressed__gt=bestdressed_points).count() + 1\n except:\n bestdressed_rank = \"-\"\n try:\n tipping_points = LeaderBoardModel.objects.get(player=name).tipping\n except:\n tipping_points = \"-\"\n try:\n tipping_rank = LeaderBoardModel.objects.filter(tipping__gt=tipping_points).count() + 1\n except:\n tipping_rank = \"-\"\n try:\n othertotal_points = social_points + bestdressed_points + tipping_points\n except:\n othertotal_points = \"-\"\n try:\n othertotal_rank = LeaderBoardModel.objects.filter(other_total__gt=othertotal_points).count() + 1\n except:\n othertotal_rank = \"-\"\n\n try:\n otheroverall_points = list(LeaderBoardModel.objects.aggregate(Sum('other_total')).values())[0]\n except:\n otheroverall_points = 0\n\n## == END SCORING CALCS ==\n\n context ={\n 'name': name,\n 'player_image': player_image,\n 'player_HC': player_HC,\n 'player_highfinish': player_highfinish,\n 'player_tournum': player_tournum,\n 'player_totalpoints': player_totalpoints,\n 'player_totalrank': player_totalrank,\n 'rd1golf_score': rd1golf_score,\n 'rd1golf_stbl': rd1golf_stbl,\n 'rd1golf_scoreRank': rd1golf_scoreRank,\n 'rd1golf_stblRank': rd1golf_stblRank,\n 'rd1golf_points': rd1golf_points,\n 'rd1golf_rank': rd1golf_rank,\n 'rd1ctpld_points': rd1ctpld_points,\n 'rd1ctpld_rank': rd1ctpld_rank,\n 'rd1bonus_points': rd1bonus_points,\n 'rd1bonus_rank': rd1bonus_rank,\n 'rd1total_points': rd1total_points,\n 'rd1total_rank': rd1total_rank,\n 'round1overall_points': round1overall_points,\n 'rd2golf_score': rd2golf_score,\n 'rd2golf_stbl': rd2golf_stbl,\n 'rd2golf_scoreRank': rd2golf_scoreRank,\n 'rd2golf_stblRank': rd2golf_stblRank,\n 'rd2golf_points': rd2golf_points,\n 'rd2golf_rank': rd2golf_rank,\n 'rd2ctpld_points': rd2ctpld_points,\n 'rd2ctpld_rank': rd2ctpld_rank,\n 'rd2bonus_points': rd2bonus_points,\n 'rd2bonus_rank': rd2bonus_rank,\n 'rd2total_points': rd2total_points,\n 'rd2total_rank': rd2total_rank,\n 'round2overall_points': round2overall_points,\n 'rd3golf_score': rd3golf_score,\n 'rd3golf_stbl': rd3golf_stbl,\n 'rd3golf_scoreRank': rd3golf_scoreRank,\n 'rd3golf_stblRank': rd3golf_stblRank,\n 'rd3golf_points': rd3golf_points,\n 'rd3golf_rank': rd3golf_rank,\n 'rd3ctpld_points': rd3ctpld_points,\n 'rd3ctpld_rank': rd3ctpld_rank,\n 'rd3bonus_points': rd3bonus_points,\n 'rd3bonus_rank': rd3bonus_rank,\n 'rd3total_points': rd3total_points,\n 'rd3total_rank': rd3total_rank,\n 'round3overall_points': round3overall_points,\n 'social_points': social_points,\n 'social_rank': social_rank,\n 'bestdressed_points': bestdressed_points,\n 'bestdressed_rank': bestdressed_rank,\n 'tipping_points': tipping_points,\n 'tipping_rank': tipping_rank,\n 'othertotal_points': othertotal_points,\n 'othertotal_rank': othertotal_rank,\n 'otheroverall_points': otheroverall_points,\n\n }\n\n return render(request, 'playerDetail.html', context=context)", "def counter_opponent_adv(self):\n\n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n opaffinity = self.get_opponent().get_affinity()\n\n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n\n # get advantageous blocks\n blocks_advn = board.get_adv_blocks(opaffinity)\n best_moves = []\n best_move = None\n\n # sort the blocks which may be countered\n for block in blocks_advn:\n if block.direction == 'horizontal':\n x1 = block.tiles[0][0] - 1\n y1 = block.tiles[0][1] \n x2 = block.tiles[2][0] + 1\n y2 = block.tiles[2][1] \n if x1 < 0 or x2 >= 7: return None\n if board.get_tile(x1,y1) == BLANK_TILE() and \\\n board.get_tile(x2,y2) == BLANK_TILE():\n best_moves.append((x1,y1))\n elif block.direction == 'vertical':\n x1 = block.tiles[0][0] \n y1 = block.tiles[0][1] - 1 \n x2 = block.tiles[2][0]\n y2 = block.tiles[2][1] + 1\n if y1 < 0 or y2 >= 7: return None\n if board.get_tile(x1,y1) == BLANK_TILE() and \\\n board.get_tile(x2,y2) == BLANK_TILE():\n best_moves.append((x2,y2))\n elif block.direction == 'diagonal(\\)':\n x1 = block.tiles[0][0] - 1\n y1 = block.tiles[0][1] - 1\n x2 = block.tiles[2][0] + 1\n y2 = block.tiles[2][1] + 1\n if x1 < 0 or y1 < 0 or x2 >= 7 or y2 >= 7: return None\n if board.get_tile(x1,y1) == BLANK_TILE() and \\\n board.get_tile(x2,y2) == BLANK_TILE():\n best_moves.append((x1,y1))\n elif block.direction == 'diagonal(/)':\n x1 = block.tiles[0][0] - 1\n y1 = block.tiles[0][1] + 1\n x2 = block.tiles[2][0] + 1\n y2 = block.tiles[2][1] - 1\n if x1 < 0 or y1 >= 7 or x2 >= 7 or y2 < 0: return None\n if board.get_tile(x1,y1) == BLANK_TILE() and \\\n board.get_tile(x2,y2) == BLANK_TILE():\n best_moves.append((x1,y1))\n\n # pick the best move in the best block to counter\n for move in best_moves:\n print('considered advantageous move:'+str(move))\n if best_move is None: best_move = move \n elif move[0] < best_move[0] and move[1] == best_move[1]:\n best_move = move\n elif move[0] == best_move[0] and move[1] > best_move[1]:\n best_move = move\n elif move[0] < best_move[0] and move[1] > best_move[1]:\n best_move = move\n\n return best_move", "def generate_round_boosters(number_of_players=4, seed=0):\n\n if seed is not 0:\n random.seed(seed)\n all_boosters_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n randomized_tiles = list()\n\n for _ in range(number_of_players + 3):\n chosen_tile_index = random.randint(0, len(all_boosters_list) - 1)\n randomized_tiles.append(all_boosters_list[chosen_tile_index])\n all_boosters_list.pop(chosen_tile_index)\n\n return tuple(randomized_tiles)", "def select_trump(self, rnd: PlayerRound) -> int:\n # select the trump with the largest number of cards\n # print(rnd.hand)\n # print(rnd.hand.shape)\n\n trump = 0\n max_number_in_color = 0\n for c in range(4):\n number_in_color = (rnd.hand * color_masks[c]).sum()\n if number_in_color > max_number_in_color:\n max_number_in_color = number_in_color\n trump = c\n return trump", "def find_opponent(standings,odds):\n\n # simulate all games\n for i in range(len(odds)):\n play_game(odds.loc[i],standings)\n\n # update the points and GD tally\n standings['P']=standings['W']*3 + standings['D']\n standings['GD']=standings['F']-standings['A']\n\n # see if teams have equal amount of points, and award h2h_points for\n # h2h results against those teams.\n for group in \"ABCDEF\":\n gelijk = standings.loc[standings['Group']==group][standings.loc[standings['Group']==group].duplicated(subset='P',keep=False)]\n gelijk[\"h2h_points\"]=np.zeros(len(gelijk))\n\n for i in gelijk.index:\n for team1 in gelijk.loc[i][\"h2h\"]:\n for team2 in gelijk[\"TEAMS\"]:\n if team1==team2:\n standings.loc[i,\"h2h_points\"]+=1\n\n # sort the final standings\n standings = standings.sort_values(by=['Group','P',\"h2h_points\",'GD','F','W'],ascending=[True,False,False,False,False,False])\n\n # determine third placed teams\n standings = standings.reset_index()\n third = standings.loc[[2,6,10,14,18,22]]\n\n # determine best number threes\n third = third.sort_values(by=['P','GD','F','W'],ascending=False)\n\n groups_of_best_no_3 = \"\"\n for i in third.head(4).Group:\n groups_of_best_no_3+=i\n groups_of_best_no_3 = ''.join(sorted(groups_of_best_no_3))\n\n # look up the opponent of the dutch team\n a = third.loc[third.Group == opponent_matrix[groups_of_best_no_3]]['TEAMS']\n\n return a.reset_index().TEAMS[0]" ]
[ "0.6253384", "0.6129651", "0.6121192", "0.58671147", "0.56712914", "0.56542623", "0.5605723", "0.55911577", "0.5516436", "0.5502499", "0.5497719", "0.5487439", "0.5475032", "0.5417539", "0.5398502", "0.53883797", "0.5378638", "0.53657436", "0.5356736", "0.53533053", "0.5319766", "0.5306733", "0.5295486", "0.52912647", "0.52380073", "0.5229269", "0.5220981", "0.52128905", "0.5212859", "0.5206324" ]
0.6507316
0
Het draaien van de monte carlo machine speelt gewoon heel veel pools (de hoeveelheid runs). Na het spelen van een pool wordt de ranking van die partij toegevoegd aan de totale ranking. Als bijvoorbeeld ajax 10x eerste wordt, dan staat die 10x in de lijst van total_ranking[1]
def run_monte_carlo(runs, pool, goals=False): total_ranking = { 1: [], 2: [], 3: [], 4: [], 5: [] } for run in range(runs): if goals: curr_score = run_one_pool(pool, True) else: curr_score = run_one_pool(pool) total_ranking = rank_teams_of_curr_run(curr_score, total_ranking) return total_ranking
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mlbpowerrankings(self, irc, msg, args):\n \n url = self._b64decode('aHR0cDovL2VzcG4uZ28uY29tL21sYi9wb3dlcnJhbmtpbmdz')\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to fetch: %s\" % url)\n return\n \n html = html.replace(\"evenrow\", \"oddrow\")\n\n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'class': 'tablehead'})\n prdate = table.find('td', attrs={'colspan': '6'}).renderContents()\n t1 = table.findAll('tr', attrs={'class': 'oddrow'})\n\n if len(t1) < 30:\n irc.reply(\"Failed to parse MLB Power Rankings. Did something break?\")\n return\n\n object_list = []\n\n for row in t1:\n rowrank = row.find('td', attrs={'class': 'pr-rank'}).renderContents().strip()\n rowteam = row.find('div', attrs={'style': re.compile('^padding.*')}).find('a').text.strip()\n rowrecord = row.find('span', attrs={'class': 'pr-record'}).renderContents().strip()\n rowlastweek = row.find('span', attrs={'class': 'pr-last'}).renderContents().strip().replace(\"Last Week\", \"prev\") \n\n d = collections.OrderedDict()\n d['rank'] = int(rowrank)\n d['team'] = str(rowteam)\n d['record'] = str(rowrecord)\n d['lastweek'] = str(rowlastweek)\n object_list.append(d)\n\n if prdate:\n irc.reply(ircutils.mircColor(prdate, 'blue'))\n\n for N in self._batch(object_list, 6):\n irc.reply(' '.join(str(str(n['rank']) + \".\" + \" \" + ircutils.bold(n['team'])) + \" (\" + n['lastweek'] + \")\" for n in N))", "def nflpowerrankings(self, irc, msg, args):\n \n url = self._b64decode('aHR0cDovL2VzcG4uZ28uY29tL25mbC9wb3dlcnJhbmtpbmdz')\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n html = html.replace(\"evenrow\", \"oddrow\")\n except:\n irc.reply(\"Failed to fetch: %s\" % url)\n return\n\n soup = BeautifulSoup(html)\n updated = soup.find('div', attrs={'class':'date floatleft'}).text.replace('Updated:','- ')\n table = soup.find('table', attrs={'class': 'tablehead'})\n prdate = table.find('td', attrs={'colspan': '5'}).renderContents()\n t1 = table.findAll('tr', attrs={'class': 'oddrow'})\n\n if len(t1) < 30:\n irc.reply(\"Failed to parse NFL Power Rankings. Did something break?\")\n return\n\n object_list = []\n\n for row in t1:\n rowrank = row.find('td', attrs={'class': 'pr-rank'}).renderContents()\n rowteam = row.find('div', attrs={'style': re.compile('^padding.*')}).find('a').text\n rowrecord = row.find('span', attrs={'class': 'pr-record'}).renderContents()\n rowlastweek = row.find('span', attrs={'class': 'pr-last'}).renderContents().replace(\"Last Week\", \"prev\") \n\n d = collections.OrderedDict()\n d['rank'] = int(rowrank)\n d['team'] = self._translateTeam('team', 'short', str(rowteam).strip())\n d['record'] = str(rowrecord).strip()\n d['lastweek'] = str(rowlastweek).strip()\n object_list.append(d)\n\n if prdate:\n irc.reply(ircutils.mircColor(prdate, 'blue') + \" \" + updated)\n\n for N in self._batch(object_list, 8):\n irc.reply(' '.join(str(str(n['rank']) + \".\" + \" \" + ircutils.bold(n['team'])) + \" (\" + n['lastweek'] + \")\" for n in N))", "def calc_rank(id=13197473):\r\n player_url = urllib.parse.urlparse(\"http://osu.ppy.sh/pages/include/profile-general.php?u=player_id&m=0\".replace('player_id', str(id)))\r\n page = urlopen(player_url.geturl())\r\n soup = BeautifulSoup(page, features=\"html.parser\")\r\n table_divs = soup.findAll('div', attrs={'class': 'profileStatLine'})\r\n\r\n import re\r\n pattern = '\\(#\\d*,*\\d+\\)'\r\n for div in table_divs:\r\n for childdiv in div.find_all('b'):\r\n result = re.search(pattern, str(childdiv.text))\r\n my_ranking = int(result.group(0).replace(',', '').replace(\"(#\", '').replace(\")\", ''))\r\n break\r\n break\r\n return my_ranking", "def print_end_of_round_rankings(self):\n print('\\nFINAL SCORES')\n print('-'*12)\n for k, v in enumerate(self.ranking_dict):\n print(f\"{k+1} {v[0]}: {v[1]}\")\n print('\\n')", "def rank():\n return 0", "def genStats(data, printStats = 0):\n fVotes = open('/home/eduardo/ForestWatchers/ann2besttile/results/votes.txt','w')\n tileCount = []\n numberTasks = len(data)\n for task in range(numberTasks):\n tileCount.append([0] * 12)\n numberResults = len(data[task])\n fVotes.write(str(task)+\" \")\n for result in range(numberResults):\n fVotes.write(data[task][result]['answer']+\" \")\n if data[task][result]['answer'] == '2011352':\n tileCount[task][0] += 1\n elif data[task][result]['answer'] == '2011353':\n tileCount[task][1] += 1\n elif data[task][result]['answer'] == '2011355':\n tileCount[task][2] += 1\n elif data[task][result]['answer'] == '2011357':\n tileCount[task][3] += 1\n elif data[task][result]['answer'] == '2011358':\n tileCount[task][4] += 1\n elif data[task][result]['answer'] == '2011359':\n tileCount[task][5] += 1\n elif data[task][result]['answer'] == '2011360':\n tileCount[task][6] += 1\n elif data[task][result]['answer'] == '2011361':\n tileCount[task][7] += 1\n elif data[task][result]['answer'] == '2011362':\n tileCount[task][8] += 1\n elif data[task][result]['answer'] == '2011363':\n tileCount[task][9] += 1\n elif data[task][result]['answer'] == '2011364':\n tileCount[task][10] += 1\n elif data[task][result]['answer'] == '2011365':\n tileCount[task][11] += 1\n fVotes.write(\"\\n\")\n #Print info for debug\n if printStats == 1:\n print \"Stats for task \" + str(task)\n print \"Tile 00 (352) = \" + str(tileCount[task][0])\n print \"Tile 01 (353) = \" + str(tileCount[task][1])\n print \"Tile 02 (355) = \" + str(tileCount[task][2])\n print \"Tile 03 (357) = \" + str(tileCount[task][3])\n print \"Tile 04 (358) = \" + str(tileCount[task][4])\n print \"Tile 05 (359) = \" + str(tileCount[task][5])\n print \"Tile 06 (360) = \" + str(tileCount[task][6])\n print \"Tile 07 (361) = \" + str(tileCount[task][7])\n print \"Tile 08 (362) = \" + str(tileCount[task][8])\n print \"Tile 09 (363) = \" + str(tileCount[task][9])\n print \"Tile 10 (364) = \" + str(tileCount[task][10])\n print \"Tile 11 (365) = \" + str(tileCount[task][11])\n print \"Maximum value = \" + str(max(tileCount[task]))\n print \"Position = \" + str(tileCount[task].index(max(tileCount[task])))\n print \"\"\n fVotes.close()\n return tileCount", "def nfltotalqbr(self, irc, msg, args, optlist):\n \n postseason = False\n for (option, arg) in optlist:\n if option == 'postseason':\n postseason = True\n \n if postseason:\n url = self._b64decode('aHR0cDovL2VzcG4uZ28uY29tL25mbC9xYnIvXy9zZWFzb250eXBlLzM=')\n else:\n url = self._b64decode('aHR0cDovL2VzcG4uZ28uY29tL25mbC9xYnI=')\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n html = html.replace('tr class=\"evenrow','tr class=\"oddrow')\n\n soup = BeautifulSoup(html)\n\n title = soup.find('div', attrs={'class':'mod-header stathead'}).find('h4')\n table = soup.find('table', attrs={'class':'tablehead'})\n rows = table.findAll('tr', attrs={'class': re.compile('^oddrow')})[0:10]\n\n append_list = []\n\n for row in rows:\n rank = row.find('td', attrs={'align':'left'})\n name = rank.findNext('td').find('a')\n qbr = name.findNext('td', attrs={'class':'sortcell'})\n append_list.append(rank.text + \". \" + ircutils.bold(name.text) + \" \" + qbr.text)\n\n output = string.join([item for item in append_list], \" | \")\n irc.reply(ircutils.mircColor(title.text, 'red') + \": \" + output)", "def rank(self):\r\n\t\trank = self.n % 13\r\n\t\treturn rank", "def rank(self, urlids, wordids):\r\n\t\tself.score()\r\n\t\treturn sorted(self.scores.items(), key=lambda v:v[1], reverse=self.rev)", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()", "def opponents_score(self):\n if self.opponent_wickets == 10:\n var1 = \"All Out\"\n return str('{0} {1}').format(self.opponent_runs, var1)\n else:\n var1 = self.opponent_wickets\n return str('{0}-{1}').format(self.opponent_runs, var1)", "def rank_teams_of_curr_run(curr_score, curr_ranking):\n for place in curr_ranking:\n curr_place = get_key_with_max_value(curr_score)\n curr_ranking[place] = curr_ranking[place].__add__([curr_place])\n curr_score.pop(curr_place)\n return curr_ranking", "def diversified_ranking(self):\n self.error_throw('rank')\n instance = Instance(self.table_name)\n instance.addTable(Table(instance,False,'','')) # 'False'->transformed '',''->no describe yet\n if self.import_method == 'mysql': instance = self.mysql_handle(instance)\n elif self.import_method == 'csv': instance = self.csv_handle(instance)\n\n self.rank_partial(instance)\n\n self.rank_method = methods_of_ranking[3] # = 'diversified_ranking'", "def _compute_ranking_clusters(load_file=False):\n # Define file names.\n TMP_PATH = gettempdir()\n _script = join(ROOT_PATH, '..', 'scripts',\n 'compute_ranking_clusters.perl')\n _wmt16 = join(TMP_PATH, 'wmt16-researcher-results.csv')\n _dump = join(TMP_PATH, 'wmt16-ranking-clusters.txt')\n \n # If not loading cluster data from file, re-compute everything.\n if not load_file:\n results = [u'srclang,trglang,srcIndex,documentId,segmentId,judgeId,' \\\n 'system1Number,system1Id,system2Number,system2Id,system3Number,' \\\n 'system3Id,system4Number,system4Id,system5Number,system5Id,' \\\n 'system1rank,system2rank,system3rank,system4rank,system5rank']\n \n # Compute current dump of WMT16 results in CSV format. We ignore any\n # results which are incomplete, i.e. have been SKIPPED.\n for result in RankingResult.objects.filter(item__hit__completed=True,\n item__hit__mturk_only=False):\n _csv_output = result.export_to_csv()\n if not _csv_output.endswith('-1,-1,-1,-1,-1'):\n results.append(_csv_output)\n \n results.append('')\n export_csv = u\"\\n\".join(results)\n \n # Write current dump of results to file.\n with open(_wmt16, 'w') as outfile:\n outfile.write(export_csv)\n \n # Run Philipp's Perl script to compute ranking clusters.\n PERL_OUTPUT = check_output(['perl', _script, _wmt16], shell=True)\n \n with open(_dump, 'w') as outfile:\n outfile.write(PERL_OUTPUT)\n \n else:\n PERL_OUTPUT = ''\n with open(_dump, 'r') as infile:\n PERL_OUTPUT = infile.read()\n \n # Compute ranking cluster data for status page.\n CLUSTER_DATA = {}\n for line in PERL_OUTPUT.split(\"\\n\"):\n _data = line.strip().split(',')\n if not len(_data) == 5 or _data[0] == 'task':\n continue\n \n _data[0] = _data[0].replace('-', u' → ')\n if not CLUSTER_DATA.has_key(_data[0]):\n CLUSTER_DATA[_data[0]] = {}\n \n if not CLUSTER_DATA[_data[0]].has_key(_data[1]):\n CLUSTER_DATA[_data[0]][_data[1]] = []\n \n CLUSTER_DATA[_data[0]][_data[1]].append(_data[2:])\n \n _cluster_data = []\n _sorted_language_pairs = [x[1].decode('utf-8') for x in LANGUAGE_PAIR_CHOICES]\n for language_pair in _sorted_language_pairs:\n _language_data = []\n for cluster_id in sorted(CLUSTER_DATA[language_pair].keys()):\n _data = CLUSTER_DATA[language_pair][cluster_id]\n _language_data.append((cluster_id, _data))\n _cluster_data.append((language_pair, _language_data))\n \n return _cluster_data", "def __rank__(self) -> int:", "def get_mojo_rank_info():\n \n #get movies from the db and calulate rank info\n rank_info_df = movie_helper.get_highest_mojo_rank()\n \n with tqdm(total=len(rank_info_df)) as pbar:\n for index, row in rank_info_df.iterrows(): \n \n #update the database\n updates = {\"best_rank\" : int(row['best_rank']), \n 'weekends_at_best_rank' : int(row['weekends_at_best_rank']), \n 'weekends_in_top_3' : int(row['weekends_in_top_3']), \n 'weekends_in_top_5' : int(row['weekends_in_top_5']), \n 'weekends_in_top_10' : int(row['weekends_in_top_10']), \n 'weekends_in_top_15' : int(row['weekends_in_top_15'])}\n selects = {\"movieId\" : int(row[\"movieId\"])}\n database_helper.update_data(\"movies\", update_params = updates, select_params = selects)\n \n pbar.update(1)", "def create_text_rank(self):\n # filtered_tokens = self.filter_pos() #if use, replace 2 self.lemma_tokens below\n vocab = self.create_vocab(self.lemma_tokens)\n token_windows = self.create_token_windows(self.lemma_tokens)\n graph = self.create_matrix(vocab, token_windows)\n text_rank = np.array([1] * len(vocab))\n previous_tr = 0\n d = 0.85\n min_difference = 1e-5\n for epoch in range(10):\n text_rank = (1 - d) + d * np.dot(graph, text_rank)\n if abs(previous_tr - sum(text_rank)) < min_difference:\n break\n else:\n previous_tr = sum(text_rank)\n node_weight = {}\n for word in vocab:\n node_weight[word] = text_rank[vocab[word]]\n return node_weight", "def run_one_pool(curr_pool, goals=False):\n curr_score = {\n \"Ajax\": 0,\n \"Feyenoord\": 0,\n \"PSV\": 0,\n \"FC Utrecht\": 0,\n \"Willem II\": 0\n }\n for match in curr_pool:\n if curr_pool[match]:\n teamvsteam, chance = match, curr_pool[match]\n if not goals:\n outcome = run_one_match(teamvsteam, chance)\n else:\n outcome = run_one_match_with_goals(teamvsteam, curr_pool)\n curr_score = add_score(teamvsteam[0], teamvsteam[1], outcome, curr_score)\n return curr_score", "def stats_strategy_response(data: OTreeSessionData, ws=None):\n\n rounds = [data.get_round(i + 1) for i in range(data.num_rounds())]\n rnd_stats = {}\n for rnd, (pr, ne) in enumerate(zip(rounds[:-1], rounds[1:]), 2):\n stats = {k1: {k2: 0 for k2 in product(('Node', 'Edge'), repeat=2)}\n for k1 in product(list('CD'), repeat=2)}\n print(f'\\rCalculating strategy response (round {rnd}) ... ', end='')\n for pid in range(1, ne.num_players() + 1):\n nep = ne.loc[pid]\n prp = pr.loc[pid]\n stats[(nep.player.choice_L, prp.player.choice_nei_L)][(nep.player.type, prp.player.type_L)] += 1\n stats[(nep.player.choice_U, prp.player.choice_nei_U)][(nep.player.type, prp.player.type_U)] += 1\n stats[(nep.player.choice_R, prp.player.choice_nei_R)][(nep.player.type, prp.player.type_R)] += 1\n stats[(nep.player.choice_D, prp.player.choice_nei_D)][(nep.player.type, prp.player.type_D)] += 1\n if ws:\n ws.append((rnd, pid, nep.player.type,\n nep.player.choice_L, nep.player.type_L, prp.player.choice_nei_L,\n nep.player.choice_U, nep.player.type_U, prp.player.choice_nei_U,\n nep.player.choice_R, nep.player.type_R, prp.player.choice_nei_R,\n nep.player.choice_D, nep.player.type_D, prp.player.choice_nei_D))\n rnd_stats[rnd] = stats\n print('Done')\n global_vars['rnd_stats'] = rnd_stats\n return rnd_stats", "def scoreTeams(curTeams, oppTeam, pokedex, league, minDistWanted):\n battleData, htmlData = loadBattleData(league)\n similarities = loadSims() \n \n #If not given an opponent team then simply randomly choose losers from the dataset to compare to.\n if len(oppTeam) == 0:\n picks = set([])\n while (len(picks) < NUMLOSINGTEAMS and (not len(picks) == len(battleData))):\n picks.add(random.randint(0,len(battleData)-1))\n\n losers = []\n loserDict = {}\n for i in picks:\n entry = battleData[i]\n winner,loser = determineWinner(entry)\n loserDict[str(loser)] = [winner]\n losers.append( (loser,0) )\n\n #Given opponent team then find similar teams\n else:\n oppTeam = [getSimPokemon(opp,similarities) for opp in oppTeam]\n\n #create dictionary from losers team to the team that beat them.\n loserDict = {}\n sims = []\n for d in battleData:\n winner, loser = determineWinner(d)\n\n wTeam = teamToArray(winner,pokedex)\n lTeam = np.array(teamToArray(loser, pokedex))\n\n score = 0\n for oppNp in oppTeam:\n score+= np.amax(lTeam*oppNp) \n\n if str(loser) in loserDict:\n loserDict[str(loser)].append(winner)\n else:\n #new to dictonary\n loserDict[str(loser)] = [winner]\n\n sims.append((loser, score))\n\n\n sims = sorted(sims, key = lambda x : x[1], reverse = True)\n\n cutoff = min(len(sims),NUMLOSINGTEAMS)\n losers = sims[:cutoff]\n\n #Gather winners to losing teams\n winnersComp = []\n for loser,_ in losers:\n for winner in loserDict[str(loser)]:\n winnersComp.append(teamToArray(winner,pokedex))\n \n topScore = len(winnersComp)*6 #pkmn team size\n\n results = []\n inverted_idx = {}\n\n existsSet = []\n\n #Creates inverted index for teams, while simoultaneously weeding out any teams that are exactly similar.\n for i in range(len(curTeams)):\n team = curTeams[i]\n results.append((team,0))\n sTeam = set(team)\n if not (sTeam in existsSet):\n existsSet.append(sTeam)\n for pkm in team:\n if pkm != EMPTY:\n if pkm in inverted_idx:\n inverted_idx[pkm].append(i)\n else:\n inverted_idx[pkm] = [i]\n \n #Giving the similiarity scores to the winners based off of the inverted index.\n for pkm in inverted_idx:\n for winner in winnersComp:\n wArr = np.array(winner)\n #tArr = getSimPokemon(pkm,similarities)\n tArr = similarities[pkm]\n \n vals = wArr * tArr\n\n score = np.amax(vals)\n\n for i in inverted_idx[pkm]:\n results[i] = (results[i][0],results[i][1]+(score/topScore))\n\n results = sorted(results, key = lambda x : x[1], reverse = True)\n\n if len(results) < NUMTEAMSRETURN:\n if len(results) == 0:\n returnTeams = [[] for x in range(NUMTEAMSRETURN)]\n teamScores = [0 for x in range(NUMTEAMSRETURN)]\n\n else:\n returnTeams = [result[0] for result in results]\n teamScores = [result[1] for result in results]\n else:\n firstResult, firstScore = results[0]\n returnTeams = [firstResult]\n teamScores = [round(firstScore*100,1)]\n returnSets = [set(firstResult)]\n \n i = 1\n\n #Loops through results and adds teams with the proper edit distance away.\n while(len(returnTeams) < NUMTEAMSRETURN and minDistWanted > 0):\n teamToConsider,teamToConsiderScore = results[i]\n \n considerSet = set(teamToConsider)\n add = True\n ##checks the edit distance of teams is above wanted\n for team in returnSets:\n if len(team.union(considerSet)) < len(team)+minDistWanted:\n add = False\n\n ##If indeed above wanted levels then add\n if add:\n returnTeams.append(teamToConsider)\n returnSets.append(considerSet)\n teamScores.append(round(teamToConsiderScore*100,1))\n \n i+=1\n\n if i >= len(results):\n i = 1\n minDistWanted -= 1 \n \n winHtmls = []\n if htmlData != None:\n for team,_ in losers:\n for winner in loserDict[str(team)]:\n winHtmls.extend(htmlData[str(sorted(winner))])\n \n\n return returnTeams, teamScores, winHtmls", "def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)", "def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)", "def evalrank(model, data, split='dev'):\n print 'Loading dataset'\n if split == 'dev':\n X = load_dataset(data, load_train=False)[1]\n else:\n X = load_dataset(data, load_train=False)[2]\n\n print 'Computing results...'\n train = load_dataset('CAD', load_train=True)[0]\n vectors = encode_sentences(model, train[0], verbose=False)\n # demo.retrieve_captions(model, net, train[0], vectors, 'image.jpg', k=5)\n ls = encode_sentences(model, X[0])\n lim = encode_images(model, X[1])\n\n (r1, r5, r10) = i2t(lim, X[0], train[0], vectors)\n print \"Image to text: %.1f, %.1f, %.1f\" % (r1, r5, r10)\n # (r1i, r5i, r10i, medri) = t2i(lim, ls)\n # print \"Text to image: %.1f, %.1f, %.1f, %.1f\" % (r1i, r5i, r10i, medri)", "def update(self, rank):\n # calculate MR and MRR\n self.mr += rank\n self.mrr += 1 / rank\n # calculate Hits@k\n if rank <= 1:\n self.hits1 += 1\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 3:\n self.hits3 += 1\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 5:\n self.hits5 += 1\n self.hits10 += 1\n elif rank <= 10:\n self.hits10 += 1", "def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)", "def determine_jobs_per_pool(numpools, totaljobs):\n cluster = os.environ['CC_CLUSTER']\n if cluster in ['graham', 'beluga']:\n jobs_per_pool = math.floor(totaljobs / numpools)\n else:\n jobs_per_pool = totaljobs\n return jobs_per_pool", "def update_ranking(request=None):\n if request is not None:\n RANKINGS_CACHE['clusters'] = _compute_ranking_clusters(load_file=True)\n return HttpResponse('Ranking updated successfully')\n \n else:\n RANKINGS_CACHE['clusters'] = _compute_ranking_clusters()", "def display_ranking(args: argparse.Namespace, cookie: str) -> None:\n number: int = args.number\n if not number:\n return\n\n if number >= 50:\n print(\"Number of users to display cannot exceed 49 for now. Exiting.\")\n return\n\n cookies = {\n \"spip_session\": cookie,\n }\n\n ranking = get_global_ranking(args.offset, cookies)\n\n for counter, entry in enumerate(ranking):\n\n if counter >= number:\n break\n\n userid = ranking[entry][\"id_auteur\"]\n userinfo = get_user_info(userid, cookies)\n if userinfo is None:\n continue\n\n print(\n f\"{Fore.YELLOW}{ranking[entry]['place']:>4}{Style.RESET_ALL}. \"\n f\"{userinfo['nom']:<25} \"\n f\"{userinfo['score']} \"\n )\n\n return", "def calculate_power_ranking(wins, losses, is_winning_streak,\n streak_length, recent_wins, recent_losses):\n pred1 = 0\n pred2 = round( ( ((wins*1.0)/(wins+losses)) - 0.500 ) * 16 * 9 * 10/9 )\n pred3 = recent_wins - recent_losses\n streak_factor = 1 if is_winning_streak else -1\n pred4 = streak_factor * round( ( streak_length - 1 ) / 2.0 )\n print pred1, pred2, pred3, pred4\n return pred1 + pred2 + pred3 + pred4", "def test_job_rank(self):\n inv_search = 'rank:Postdoc'\n spi_search = 'find rank Postdoc'\n self._compare_searches(inv_search, spi_search)" ]
[ "0.6204498", "0.6093356", "0.5933787", "0.58172834", "0.57272923", "0.5666633", "0.5657551", "0.5655824", "0.5565564", "0.55135745", "0.54635066", "0.5455027", "0.5450292", "0.54313564", "0.5428389", "0.54252523", "0.5423509", "0.54218894", "0.5421884", "0.5414211", "0.5411612", "0.5398058", "0.53938663", "0.5390618", "0.53894955", "0.536331", "0.5357657", "0.53565615", "0.5351237", "0.5348483" ]
0.6112421
1
compute truncate_div calculating data's truncate_div, res = floor(x / y) if x/y>0 else ceil(x/y)
def truncate_div_compute(input_x, input_y, output_x, kernel_name="truncate_div"): shape_list = broadcast_shapes( te.lang.cce.util.shape_to_list(input_x.shape), te.lang.cce.util.shape_to_list(input_y.shape), param_name_input1="input_x", param_name_input2="input_y") int_list = ("int32", "int8", "uint8") input_dtype = input_x.dtype if input_dtype in int_list: data_zero = te.lang.cce.broadcast(tvm.const(0, 'float32'), shape_list[2], 'float32') data_x_broad = te.lang.cce.cast_to(input_x, 'float32') data_y_broad = te.lang.cce.cast_to(input_y, 'float32') data_x_broad = te.lang.cce.broadcast(data_x_broad, shape_list[2]) data_y_broad = te.lang.cce.broadcast(data_y_broad, shape_list[2]) res_div = te.lang.cce.vdiv(data_x_broad, data_y_broad) res_min_int = te.lang.cce.ceil(te.lang.cce.vmin(res_div, data_zero)) res_max_int = te.lang.cce.floor(te.lang.cce.vmax(res_div, data_zero)) res_trunc = te.lang.cce.vadd(res_min_int, res_max_int) else: if tbe_platform.cce_conf.api_check_support("te.lang.cce.vlog", "float32"): input_x = te.lang.cce.cast_to(input_x, 'float32') input_y = te.lang.cce.cast_to(input_y, 'float32') data_x_broad = te.lang.cce.broadcast(input_x, shape_list[2]) data_y_broad = te.lang.cce.broadcast(input_y, shape_list[2]) res_trunc = te.lang.cce.vdiv(data_x_broad, data_y_broad) res = te.lang.cce.cast_to(res_trunc, input_dtype) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trunc_divide(lhs, rhs):\n return _make.trunc_divide(lhs, rhs)", "def ceildiv(a, b):\n return - (-a // b)", "def floor_div(a, b):\r\n # see decorator for function body\r", "def division(x, y, val = 0.0):\n if y != 0.0:\n val = float(x)/y\n return val", "def ceil_division(left_number, right_number):\n\t\t\treturn -(-left_number // right_number)", "def tryDivide(x, y):\r\n s = 0.0\r\n if y != 0.0: s = x / y\r\n return s", "def div(x, y):\n return x / y", "def divide(x, y):\n return round(x / y)", "def safe_divide(num,denom,ifzero=0.0):\n\n return ifzero if denom==0 else num/denom", "def division(a, b):\n if b != 0:\n return a//b", "def trunc(x):\n return 0", "def trunc(value):\n\t\treturn round(value, 3) if math.modf(value)[0] != 0 else round(value)", "def div1(left: float, right: float) -> float:\n return left / right", "def floor_divide(lhs, rhs):\n return _make.floor_divide(lhs, rhs)", "def __floordiv__(self, other):\r\n return self.__divmod__(other)[0]", "def _div0(self, a, b , val = 0):\n with np.errstate(divide='ignore', invalid='ignore'):\n c = np.true_divide( a, b )\n c[ ~ np.isfinite( c )] = val # -inf inf NaN\n return c", "def division(x, y):\n return x / y", "def quotient_floor(numerator, denominator):\n return math.floor(numerator / denominator)", "def normalize(data):\n min = np.min(data)\n if min:\n data = data + min\n return old_div(data,np.max(data))\n else: # if min is 0\n return old_div(data,np.max(data))", "def div(a: Decimal, b: Decimal) -> Decimal:\n return a / b", "def safe_div(numerator, denominator, name='safe_div'):\n return array_ops.where(\n math_ops.equal(denominator, 0),\n array_ops.zeros_like(numerator),\n math_ops.div(numerator, denominator),\n name=name)", "def test_floordiv():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = value // 2\n num_a.value //= 2\n assert num_a.value == new_value", "def div(a, b):\n a = float(a)\n b = float(b)\n return a / b", "def trunc(data):\n return _make.trunc(data)", "def div(a, b):\r\n if type(b) in inttypes_set:\r\n if not b:\r\n return Infinity(a)\r\n raise ZeroDivisionError('%r / %r' % (a, b))\r\n if b == 1:\r\n return a\r\n if type(a) in inttypes_set:\r\n return normalized_fraction(a, b)\r\n return a / b", "def int_div_inplace(a, b):", "def __ifloordiv__(self, obj):\n # calls __floordiv__\n tmp = self // obj\n self.data = tmp.data\n return self", "def div(a,b):\r\n return a/b", "def div2(left: float, right: float) -> float:\n return left / right", "def __floordiv__(self, other):\n return MyCustomNumber(self.value // other.value)" ]
[ "0.72226495", "0.6773521", "0.67653507", "0.65383255", "0.6387221", "0.63650364", "0.63168216", "0.63073", "0.62371224", "0.61715484", "0.6150904", "0.61437595", "0.6043365", "0.6009371", "0.6003909", "0.598048", "0.597958", "0.5978322", "0.5967595", "0.5960188", "0.5945854", "0.59417987", "0.59389293", "0.5935592", "0.5906042", "0.58941734", "0.58825815", "0.58807445", "0.58765805", "0.5861654" ]
0.73679143
0
Test alert policies .create() calls put with correct parameters
def test_create_success(self, mock_post): self.policies.create( name=self.policy_single_response['policy']['name'], incident_preference=self.policy_single_response['policy']['incident_preference'] ) mock_post.assert_called_once_with( url='https://api.newrelic.com/v2/alerts_policies.json', headers=self.policies.headers, data=json.dumps({ "policy": { "name": self.policy_single_response['policy']['name'], "incident_preference": self.policy_single_response['policy']['incident_preference'] } }) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update(self, mock_put):\n self.policies.update(id=333114, policy_update=self.policy_show_response)\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alert_policies/333114.json',\n headers=self.policies.headers,\n data=json.dumps(self.policy_show_response)\n )", "def test_alert_create(self):\n pass", "def test_update_success(self, mock_put):\n self.policies.update(\n id=self.policy_single_response['policy']['id'],\n name=self.policy_single_response['policy']['name'],\n incident_preference=self.policy_single_response['policy']['incident_preference']\n )\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alerts_policies/{0}.json'.format(\n self.policy_single_response['policy']['id']\n ),\n headers=self.policies.headers,\n data=json.dumps({\n \"policy\": {\n \"name\": self.policy_single_response['policy']['name'],\n \"incident_preference\": self.policy_single_response['policy']['incident_preference']\n }\n })\n )", "def test_update_risk_profile_using_put(self):\n pass", "def test_resource_policy(self):\n expected_actions = sorted(['rt:get', 'rt:put', 'rt:update', 'rt:delete'])\n test_resource = ResourceTypeName.get()\n test_policy_name = 'test_policy'\n test_policy = create_test_ResourcePolicy('tp{i}', actions=expected_actions)\n self.app.post(\n f'/v1/resource/{test_resource}',\n data=json.dumps({'actions': expected_actions}),\n headers=admin_headers)\n\n # 400 returned when creating a policy with invalid actions\n test_policy = create_test_ResourcePolicy('tp{i}', actions=['invalid:actions'])\n resp = self.app.post(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n data=json.dumps({'policy': test_policy}),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 400)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 404)\n\n # 201 return when creating a valid resource policy\n test_policy = create_test_ResourcePolicy('tp{i}', actions=expected_actions)\n resp = self.app.post(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n data=json.dumps({'policy': test_policy}),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 201)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertJSONEqual(json.loads(resp.body)['policy'], test_policy)\n\n # 200 returned when modifying the policy with valid actions\n test_policy = create_test_ResourcePolicy('tp{i}', actions=expected_actions[:2])\n resp = self.app.put(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n data=json.dumps({'policy': test_policy}),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertJSONEqual(json.loads(resp.body)['policy'], test_policy)\n\n # 400 returned when modifying the policy with invalid actions\n test_policy2 = create_test_ResourcePolicy('tp{i}', actions=['invalid:actions'])\n resp = self.app.put(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n data=json.dumps({'policy': test_policy2}),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 400)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertJSONEqual(json.loads(resp.body)['policy'], test_policy)\n\n # delete the policy\n resp = self.app.delete(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n resp = self.app.get(\n f\"/v1/resource/{test_resource}/policy/{test_policy_name}\",\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 404)", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def test_validate_put_new(client):\n response = client.put(\n '/user/2',\n data=json.dumps({\n 'name': 'Elissa Knupp',\n 'email': '[email protected]',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def test_owner_edit_assessment_valid(self):\n req = {'weight': 60, 'additional_description': 'asdfqwer'}\n response = self.user_01.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n response = self.user_01.patch(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['weight'], req['weight'])\n self.assertEqual(response.data['additional_description'], req['additional_description'])", "def test_db_creating_put(self):\n data = {\n 'start_time': '2019-10-29',\n 'end_time': '2019-12-29',\n 'week_day': 6,\n 'time': '23:58:59'\n }\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n\n with mock.patch('notification.models.Notification.update') as notification_update:\n notification_update.return_value = False\n\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def test_create_policy_type(mock_send_message):\n A1sim.create_policy_type(BASE_URL, \"test_id\", {})\n mock_send_message.assert_called_once_with('PUT',\n 'Create Policy Type',\n (f\"{BASE_URL}/policytype?id=test_id\"),\n data={},\n headers=HEADER)", "def test_validate_put_existing(client):\n response = client.put(\n '/user/1',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n 'email': '[email protected]',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def test_set_alert(self):\n alert = dweepy.set_alert(\n self.my_thing_id,\n ['[email protected]', '[email protected]'],\n test_alert_condition,\n test_key,\n )\n self.assertEqual(alert['condition'], test_alert_condition)", "def taco_test_put_new(self):\n body = '{ \"id\": 400, \"name\": \"item_new\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))", "def test_staff_update_duplicate_procedure_fails(self):\n res = self.client.post(PROCEDURE_URL, self.payload, format='json')\n second_payload = {\n 'name': 'abc',\n 'speciality': [self.speciality.id],\n 'overview': 'bla bla bla'\n }\n self.client.post(PROCEDURE_URL, second_payload, format='json')\n\n url = get_item_url(res.data['id'])\n new_payload = {\n 'name': 'abc',\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_edit_alert_by_id(self):\n pass", "def test_put_success(self):\n\n data = {\n 'time': '23:58:53'\n }\n\n url = reverse('notification', kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def test_put_from_another_way(self):\n data = {\n 'start_time': '2019-10-29',\n 'end_time': '2019-12-29',\n 'week_day': 6,\n 'time': '23:58:59'\n }\n url = reverse('notification', kwargs={'way_id': 101, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_associate_with_notification_channel_success(self, mock_put):\n self.policies.associate_with_notification_channel(\n id=self.policy_single_response['policy']['id'],\n channel_id=self.channel_single_response['channel']['id'],\n )\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alerts_policy_channels.json?policy_id={0}&channel_ids={1}'.format(\n self.policy_single_response['policy']['id'],\n self.channel_single_response['channel']['id']\n ),\n headers=self.policies.headers\n )", "def test_company_put_permissions(self):\n companyPK = Company.objects.get(name=self.admin.profile.company.name).pk\n url = reverse('Company-detail', kwargs={'pk': companyPK + 1})\n data = {'name': 'NewTestCompany', 'address': {'address1': '123 fake st',\n 'address2': 'fake address 2',\n 'city': 'nowhere', 'state': 'IN', 'zip': '90210'}}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(Company.objects.get(pk=companyPK).name,\n 'NewTestCompany')", "def test_create_successful(self):\n self.webkom.add_user(self.disallowed_user)\n request = self.factory.post(\"/permissiontest/\", self.test_update_object)\n force_authenticate(request, self.disallowed_user)\n view = TestViewSet.as_view({\"post\": \"create\"})\n\n response = view(request)\n created = response.data\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(created[\"name\"], self.test_update_object[\"name\"])", "def test_create_risk_profile_using_post(self):\n pass", "def test_put(self):\n client = kazoo.client.KazooClient()\n zkutils.put(client, '/foo/bar')\n kazoo.client.KazooClient.create.assert_called_with(\n '/foo/bar', b'', acl=mock.ANY, makepath=True,\n sequence=False, ephemeral=False)", "def put(self, consumer_key, rid):\n policy = Policy.query.filter(\n Policy.consumer_key == consumer_key,\n Policy.rid == rid\n ).first_or_404()\n\n payload = json.loads(request.data)\n if \"actions\" not in payload:\n abort(400, \"Missing required field: actions\")\n\n policy.actions = set(payload[\"actions\"])\n policy.save()\n return self.jsonify(self._serialize(policy), status_code=200)", "def test_update_ikepolicy(self):\r\n resource = 'ikepolicy'\r\n cmd = ikepolicy.UpdateIKEPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def test_create_amendment_edit(session, client, jwt, description, data, sp_amend_id, debtor_amend_id):\n json_data = copy.deepcopy(data)\n if sp_amend_id is not None:\n json_data['addSecuredParties'][0]['amendPartyId'] = sp_amend_id\n else:\n del json_data['addSecuredParties'][0]['amendPartyId']\n if debtor_amend_id is not None:\n json_data['addDebtors'][0]['amendPartyId'] = debtor_amend_id\n else:\n del json_data['addDebtors'][0]['amendPartyId']\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY)\n\n response = client.post('/api/v1/financing-statements/TEST0001/amendments',\n json=json_data,\n headers=create_header_account(jwt, [PPR_ROLE, STAFF_ROLE], 'test-user', STAFF_ROLE),\n content_type='application/json')\n assert response.status_code == HTTPStatus.CREATED\n result = response.json\n if sp_amend_id is None or sp_amend_id > 0:\n assert result['changes'][0]['addSecuredParties'][0].get('former_name')\n else:\n assert 'former_name' not in result['changes'][0]['addSecuredParties'][0]\n if debtor_amend_id is None or debtor_amend_id > 0:\n assert result['changes'][0]['addDebtors'][0].get('former_name')\n else:\n assert 'former_name' not in result['changes'][0]['addDebtors'][0]", "def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_put(populate_hostnames, authenticated_client):\n rv = authenticated_client.get(\n '/api/observables/{0:d}/'.format(populate_hostnames[0].id))\n observable_json = json.loads(rv.data)\n rv = authenticated_client.put(\n '/api/observables/{0:d}/'.format(observable_json['id']),\n data=json.dumps({'value': 'qwe'}),\n content_type='application/json')\n assert rv.status_code == 400\n response = json.loads(rv.data)\n assert 'ValidationError' in response\n assert 'not a valid string for domain-name' in response['ValidationError']", "def test_put_non_id(self):\n\n data = {\n 'time': '23:38:54'\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def create_policy_request():\n return {\n 'public_key':\n r'BBLewg4VqLR38b38daE7Fj\\/uhr543uGrEpyoPFgmFZK6EZ9g2XdK\\/i65RrSJ6sJ96aXD3DJHY3Me2GJQO9\\/ifjE=',\n 'label':\n 'Integration Test Policy',\n 'operations': [{\n 'sensor_id': 10,\n 'action': 'SHARE',\n }, {\n 'sensor_id': 53,\n 'action': 'BIN',\n 'bins': [30.0, 60.0, 90.0]\n }, {\n 'sensor_id': 55,\n 'action': 'MOVING_AVG',\n 'interval': 300\n }]\n }", "def test_create_bios_policy(self):\n pass" ]
[ "0.68285656", "0.6772421", "0.67621154", "0.64469576", "0.6395389", "0.63586205", "0.6352661", "0.6176362", "0.60673046", "0.6066516", "0.60473686", "0.60405195", "0.59732294", "0.5936393", "0.59167117", "0.5909094", "0.5891978", "0.58826816", "0.5856741", "0.58251625", "0.58161926", "0.581604", "0.5799174", "0.579433", "0.5792194", "0.57909995", "0.57728344", "0.5765584", "0.5751711", "0.57239807" ]
0.67850477
1
Test alert policies .update() calls put with correct parameters
def test_update_success(self, mock_put): self.policies.update( id=self.policy_single_response['policy']['id'], name=self.policy_single_response['policy']['name'], incident_preference=self.policy_single_response['policy']['incident_preference'] ) mock_put.assert_called_once_with( url='https://api.newrelic.com/v2/alerts_policies/{0}.json'.format( self.policy_single_response['policy']['id'] ), headers=self.policies.headers, data=json.dumps({ "policy": { "name": self.policy_single_response['policy']['name'], "incident_preference": self.policy_single_response['policy']['incident_preference'] } }) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update(self, mock_put):\n self.policies.update(id=333114, policy_update=self.policy_show_response)\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alert_policies/333114.json',\n headers=self.policies.headers,\n data=json.dumps(self.policy_show_response)\n )", "def test_update_risk_profile_using_put(self):\n pass", "def test_security_on_put(self):\n # test the update url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.put(url,{'description':'my new description'})\n self.failUnlessEqual(response.status_code, 401)", "def taco_test_put_update(self):\n body = '{ \"id\": 400, \"name\": \"item4\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))", "def update():\n return 'update api in put'", "def test_update_ikepolicy(self):\r\n resource = 'ikepolicy'\r\n cmd = ikepolicy.UpdateIKEPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def test_update(self, init_db, audit):\n params = {\n \"resource_type\": \"Category\",\n \"action\": \"Updated\",\n \"activity\": \"changed name\"\n }\n audit.update(**params)\n assert audit.resource_type == params['resource_type']\n assert audit.action == params['action']\n assert audit.activity == params['activity']", "def test_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_owner_edit_assessment_valid(self):\n req = {'weight': 60, 'additional_description': 'asdfqwer'}\n response = self.user_01.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n response = self.user_01.patch(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['weight'], req['weight'])\n self.assertEqual(response.data['additional_description'], req['additional_description'])", "def test_user_update_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n new_payload = {\n 'other_details': 'new details'\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_case(self):\n pass", "def test_update_risk(self):\n test_date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d\")\n with factories.single_commit():\n risk_id = factories.RiskFactory().id\n created_at = test_date\n updated_at = test_date\n new_values = {\n \"title\": \"New risk\",\n \"created_at\": created_at,\n \"updated_at\": updated_at,\n \"review_status\": all_models.Review.STATES.UNREVIEWED,\n \"review_status_display_name\": \"some status\",\n }\n risk = all_models.Risk.query.get(risk_id)\n\n response = self.api.put(risk, risk.id, new_values)\n\n self.assert200(response)\n risk = all_models.Risk.query.get(risk_id)\n self.assert_instance(new_values, risk)", "def test_update(self):\n obj = self.provision_single_asset()\n test_string = \"testing this thing\"\n p = {'id': obj.id, 'description': test_string}\n self.put('widget', 200, params=p)\n self.session.refresh(obj)\n assert obj.description == test_string", "def test_update(self):\n doctor = DoctorFactory.create(id=21)\n data = {'name': 'Joe'}\n self.assertNotEqual(doctor.name, data['name'])\n\n response = self.unath_client.put(reverse('doctor-detail', args=[21]), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.put(reverse('doctor-detail', args=[21]), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def put(self):\n sample = request.get_json()\n if 'receipt_id' not in sample:\n return CustomError('No receipt_id in payload', status_code=400)\n if 'authorization_status' not in sample:\n return CustomError('No authorization_status in payload', status_code=400)\n\n DBHelper.update_receipt(sample)\n return {'message': 'updated!'}, 200", "def test_deprecated_update_ae(self):\n with app.test_client() as client:\n self.login_client(client)\n\n res = client.put(\n '/v1/sim/configs/ae',\n data=json.dumps({}),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(data['message'], 'Method Not Allowed.')\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(res.status_code, 405)", "def test_successful_article_edit(self):\n saved_article = self.create_article()\n url = saved_article[0]\n token = saved_article[2]\n response = self.test_client.put(url, self.article_update_data, format='json', HTTP_AUTHORIZATION=token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['message'], \"Article has been successfully updated.\")", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_update():\n payload = {'age': 99}\n sample_uuid = get_sample_id()\n response = requests.put(f'http://localhost:5000/api/persons/{sample_uuid}', json=payload)\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data", "async def test_update(self):\n rsps = respx.put(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200))\n await provisioning_client.update_provisioning_profile('id', {'name': 'new name'})\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert rsps.calls[0].request.content == json.dumps({'name': 'new name'}).encode('utf-8')", "def test_update_scenario(self):\n pass", "def test_update_exchange(self):\n new_exchange = self.app.add_exchange(\"test\", \"test\", \"test\")\n values = {\"exchange_name\": \"111\", \"api_key\": \"111\", \"secret\": \"111\"}\n ret = self.app.update_exchange(new_exchange.id, values)\n self.assertIn(ret[0], \"success\")", "def test_update_pet(self):\n body = Pet()\n response = self.client.open(\n '/pet',\n method='PUT',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_update(client):\n rv = update(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def policy_update_fn(self, data: Dict[str, Any], result: Dict[str, Any]) -> None:", "def taco_test_put_new(self):\n body = '{ \"id\": 400, \"name\": \"item_new\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))", "def update_policy(self, *args, **kwargs):\r\n pass", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def test_update_location_premium(self):\n\n url = reverse('location-detail', args=(self.location.id,))\n data = {\n 'point': 200,\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n response = self.client.put(path=url, content_type='application/json', data=json_data)\n self.assertEqual(first=400, second=response.status_code)" ]
[ "0.79217553", "0.71475583", "0.709934", "0.6928383", "0.6687903", "0.66773266", "0.6663834", "0.6655021", "0.66526735", "0.65455186", "0.6470367", "0.6439922", "0.6429988", "0.6426166", "0.641693", "0.63938844", "0.63904506", "0.6387737", "0.63814616", "0.6356233", "0.63526785", "0.6346595", "0.63357794", "0.6315201", "0.6315076", "0.6314459", "0.63122904", "0.63083225", "0.63068724", "0.63057905" ]
0.77502126
1
Test alert policies .delete() success
def test_delete_success(self, mock_delete): self.policies.delete(id=self.policy_single_response['policy']['id']) mock_delete.assert_called_once_with( url='https://api.newrelic.com/v2/alerts_policies/{0}.json'.format( self.policy_single_response['policy']['id'] ), headers=self.policies.headers )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_alert_by_id(self):\n pass", "def test_delete_success_alert():\n app = HelperApp(server.message_app)\n app.post('/login/', {'username': 'jessie', 'password': 'frog'})\n\n # Add a an message\n app.post('/compose/', {'to': 'james', 'subject': 's', 'body': 'S'})\n app.get('/') # Clears alerts\n\n # Delete something real\n msg_file, = glob(\"messages/*.json\")\n msg_id = os.path.basename(msg_file).rstrip('.json')\n app.post('/delete/{}/'.format(msg_id))\n\n # Make sure we display a success message\n alerts = unpack_alerts(app.cookies)\n assert len(alerts) == 1\n assert alerts == [{'kind': 'success',\n 'message': 'Deleted {}.'.format(msg_id)}]", "def test_delete_bogus_alert():\n app = HelperApp(server.message_app)\n app.post('/login/', {'username': 'jessie', 'password': 'frog'})\n\n # Add a message\n app.post('/compose/', {'to': 'james', 'subject': 's', 'body': 'b'})\n app.get('/') # Clears alerts\n\n # Remove something bogus\n # Pick some arbitrary UUID. Collision is improbable.\n bogus_uuid = \"b58cba44-da39-11e5-9342-56f85ff10656\"\n app.post('/delete/{}/'.format(bogus_uuid))\n\n # Make sure we warn the user about it\n alerts = unpack_alerts(app.cookies)\n assert len(alerts) == 1\n assert alerts == [{'kind': 'danger',\n 'message': 'No such message {}'.format(bogus_uuid)}]", "def test_security_on_delete(self):\n # test the delete product url\n product = Product.objects.all()[0]\n url = '/product/xml/%s/' % product.item_number\n response = self.client.delete(url)\n self.failUnlessEqual(response.status_code, 401)", "def test_remove_alert(self):\n dweepy.set_alert(\n self.my_thing_id,\n ['[email protected]', '[email protected]'],\n test_alert_condition,\n test_key,\n )\n dweepy.remove_alert(self.my_thing_id, test_key)", "def test_client_verification_document_delete(self):\n pass", "def test_alarm_view_delete(self):\n # delete event\n request = self.factory.post('/module/alarm/del/1/', follow=True)\n request.user = self.user\n request.session = {}\n response = alarm_del(request, 1)\n self.assertEqual(response.status_code, 302)\n\n request = self.factory.post('/module/alarm/del/', {'select': '1'})\n request.user = self.user\n request.session = {}\n response = alarm_del(request, 0)\n self.assertEqual(response.status_code, 302)", "def delete():", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_delete_success(self):\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, 200)", "def test_delete_bios_policy(self):\n pass", "def test_delete_risk_profile_using_delete(self):\n pass", "def test_delete(self):\n pass", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_401_UNAUTHORIZED)", "def test_delete_namespaced_policy(self):\n pass", "def test_lti20_delete_success(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n SCORE = 0.55 # pylint: disable=invalid-name\r\n COMMENT = u\"ಠ益ಠ\" # pylint: disable=invalid-name\r\n self.xmodule.module_score = SCORE\r\n self.xmodule.score_comment = COMMENT\r\n mock_request = self.get_signed_lti20_mock_request(\"\", method=u'DELETE')\r\n # Now call the handler\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n # Now assert there's no score\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIsNone(self.xmodule.module_score)\r\n self.assertEqual(self.xmodule.score_comment, u\"\")\r\n (_, evt_type, called_grade_obj), _ = self.system.publish.call_args\r\n self.assertEqual(called_grade_obj, {'user_id': self.USER_STANDIN.id, 'value': None, 'max_value': None})\r\n self.assertEqual(evt_type, 'grade')", "def delete():\n click.echo('delete was called.')", "def test_delete_run(self):\n pass", "def test_validate_delete(client):\n response = client.delete('/user/1')\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def test_delete_case(self):\n pass", "def test_delete_subscription(self):\n pass", "def test_client_risk_assessment_delete(self):\n pass", "def test_delete(self):\n scenario = factories.Scenario(config='', status=Scenario.Status.INACTIVE)\n scenario.delete()\n self.assertEqual(scenario.status, Scenario.Status.INACTIVE)", "def test_delete_message(client, test_db):\n rv = client.get(\"/delete/1\")\n data = json.loads(rv.data)\n assert data[\"status\"] == 0\n login(client, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n rv = client.get(\"/delete/1\")\n data = json.loads(rv.data)\n assert data[\"status\"] == 1", "def test_event_deleted(dummy_regform, api_delete, api_post):\n dummy_regform.event.delete('Unit tests')\n assert api_delete.call_count == 1\n assert api_post.call_count == 0", "def test_event_deleted(dummy_regform, api_delete, api_post):\n dummy_regform.event.delete('Unit tests')\n assert api_delete.call_count == 1\n assert api_post.call_count == 0", "def test_delete(self):\n\n value = self.instance.delete()\n self.client.delete_instance.assert_called_once_with('nginx')\n self.assertEqual(value, self.client.delete_instance.return_value)", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def test_user_delete_procedure_failure(self):\n p1 = models.Procedure.objects.create(\n name='temp',\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n url = get_item_url(res.data[0]['id'])\n del_procedure = self.client.delete(url)\n\n self.assertEqual(del_procedure.status_code,\n status.HTTP_403_FORBIDDEN)", "def test_delete_cluster_policy(self):\n pass" ]
[ "0.7788935", "0.74828786", "0.7098903", "0.70895654", "0.70789915", "0.7048444", "0.7027411", "0.69928205", "0.69801384", "0.6939634", "0.6917779", "0.69133645", "0.69022495", "0.6870939", "0.68659055", "0.68542594", "0.68441856", "0.6840614", "0.67858964", "0.67836225", "0.6775251", "0.67630875", "0.67623293", "0.6751513", "0.6714726", "0.6714726", "0.67001337", "0.66798264", "0.66782045", "0.6650713" ]
0.7756372
1
Test alert policies .associate_with_notification_channel() calls put with correct parameters
def test_associate_with_notification_channel_success(self, mock_put): self.policies.associate_with_notification_channel( id=self.policy_single_response['policy']['id'], channel_id=self.channel_single_response['channel']['id'], ) mock_put.assert_called_once_with( url='https://api.newrelic.com/v2/alerts_policy_channels.json?policy_id={0}&channel_ids={1}'.format( self.policy_single_response['policy']['id'], self.channel_single_response['channel']['id'] ), headers=self.policies.headers )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dissociate_from_notification_channel(self, mock_put):\n self.policies.associate_with_notification_channel(\n id=self.policy_single_response['policy']['id'],\n channel_id=self.channel_single_response['channel']['id'],\n )\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alerts_policy_channels.json?policy_id={0}&channel_ids={1}'.format(\n self.policy_single_response['policy']['id'],\n self.channel_single_response['channel']['id']\n ),\n headers=self.policies.headers\n )", "def upsert_alert(\n self,\n alerts: str,\n project_id: str = PROVIDE_PROJECT_ID,\n retry: Retry | _MethodDefault = DEFAULT,\n timeout: float | None = None,\n metadata: Sequence[tuple[str, str]] = (),\n ) -> None:\n policy_client = self._get_policy_client()\n channel_client = self._get_channel_client()\n\n record = json.loads(alerts)\n existing_policies = [\n policy[\"name\"] for policy in self.list_alert_policies(project_id=project_id, format_=\"dict\")\n ]\n existing_channels = [\n channel[\"name\"]\n for channel in self.list_notification_channels(project_id=project_id, format_=\"dict\")\n ]\n policies_ = []\n channels = []\n for channel in record.get(\"channels\", []):\n channels.append(NotificationChannel(**channel))\n for policy in record.get(\"policies\", []):\n policies_.append(AlertPolicy(**policy))\n\n channel_name_map = {}\n\n for channel in channels:\n channel.verification_status = (\n monitoring_v3.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED\n )\n\n if channel.name in existing_channels:\n channel_client.update_notification_channel(\n request={\"notification_channel\": channel},\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n else:\n old_name = channel.name\n channel.name = None\n new_channel = channel_client.create_notification_channel(\n request={\"name\": f\"projects/{project_id}\", \"notification_channel\": channel},\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n channel_name_map[old_name] = new_channel.name\n\n for policy in policies_:\n policy.creation_record = None\n policy.mutation_record = None\n\n for i, channel in enumerate(policy.notification_channels):\n new_channel = channel_name_map.get(channel)\n if new_channel:\n policy.notification_channels[i] = new_channel\n\n if policy.name in existing_policies:\n with contextlib.suppress(InvalidArgument):\n policy_client.update_alert_policy(\n request={\"alert_policy\": policy},\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n else:\n policy.name = None\n for condition in policy.conditions:\n condition.name = None\n policy_client.create_alert_policy(\n request={\"name\": f\"projects/{project_id}\", \"alert_policy\": policy},\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )", "def test_set_alert(self):\n alert = dweepy.set_alert(\n self.my_thing_id,\n ['[email protected]', '[email protected]'],\n test_alert_condition,\n test_key,\n )\n self.assertEqual(alert['condition'], test_alert_condition)", "def put_bucket_notification(Bucket=None, NotificationConfiguration=None):\n pass", "def put_bucket_notification_configuration(Bucket=None, NotificationConfiguration=None):\n pass", "def test_alert_create(self):\n pass", "def test_put_from_another_way(self):\n data = {\n 'start_time': '2019-10-29',\n 'end_time': '2019-12-29',\n 'week_day': 6,\n 'time': '23:58:59'\n }\n url = reverse('notification', kwargs={'way_id': 101, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_update_preference_communication_channel_id(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def test_update(self, mock_put):\n self.policies.update(id=333114, policy_update=self.policy_show_response)\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alert_policies/333114.json',\n headers=self.policies.headers,\n data=json.dumps(self.policy_show_response)\n )", "def test_put_success(self):\n\n data = {\n 'time': '23:58:53'\n }\n\n url = reverse('notification', kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def test_db_creating_put(self):\n data = {\n 'start_time': '2019-10-29',\n 'end_time': '2019-12-29',\n 'week_day': 6,\n 'time': '23:58:59'\n }\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n\n with mock.patch('notification.models.Notification.update') as notification_update:\n notification_update.return_value = False\n\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def test_update_success(self, mock_put):\n self.policies.update(\n id=self.policy_single_response['policy']['id'],\n name=self.policy_single_response['policy']['name'],\n incident_preference=self.policy_single_response['policy']['incident_preference']\n )\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alerts_policies/{0}.json'.format(\n self.policy_single_response['policy']['id']\n ),\n headers=self.policies.headers,\n data=json.dumps({\n \"policy\": {\n \"name\": self.policy_single_response['policy']['name'],\n \"incident_preference\": self.policy_single_response['policy']['incident_preference']\n }\n })\n )", "def create_notification(notification_name):\n url = CMX_URL + '/api/config/v1/notification'\n print('CMX URL and Resource: ', url)\n payload = {\n \"name\": notification_name,\n \"rules\": [\n {\n \"conditions\": [\n {\n \"condition\": \"inout.deviceType == client\"\n },\n {\n \"condition\": \"inout.in/out == in\"\n },\n {\n \"condition\": \"inout.hierarchy == DevNetCampus>DevNetBuilding>DevNetZone\"\n }\n ]\n }\n ],\n \"subscribers\": [\n {\n \"receivers\": [\n {\n \"uri\": \"http://128.107.70.29:8010\",\n \"messageFormat\": \"JSON\",\n \"qos\": \"AT_MOST_ONCE\"\n }\n ]\n }\n ],\n \"enabled\": True,\n \"enableMacScrambling\": True,\n \"macScramblingSalt\": \"listening\",\n \"notificationType\": \"InOut\"\n }\n header = {'content-type': 'application/json', 'accept': 'application/json'}\n notification_response = requests.put(url, data=json.dumps(payload), headers=header, auth=CMX_AUTH, verify=False)\n print('Notification Status Code: ', notification_response.status_code)\n return notification_response.status_code", "def test_update_notification(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_message_one = 'Fortnite has a new winner'\n new_notification_category_one = 'Information'\n post_response = create_notification(\n client,\n new_notification_message_one,\n 30,\n new_notification_category_one)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 1\n\n post_response_data = json.loads(post_response.get_data(as_text=True))\n new_notification_url = post_response_data['url']\n new_displayed_times = 1\n data = {'displayed_times': new_displayed_times}\n patch_response = client.patch(\n new_notification_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS),\n data=json.dumps(data))\n assert patch_response.status_code == HttpStatus.ok_200.value\n\n get_response = client.get(\n new_notification_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_response.status_code == HttpStatus.ok_200.value\n\n get_response_data = json.loads(get_response.get_data(as_text=True))\n assert get_response_data['displayed_times'] == new_displayed_times", "def test__AutoModerationActionMetadataSendAlertMessage__copy_with__1():\n old_channel_id = 202211130006\n new_channel_id = 202211130007\n \n metadata = AutoModerationActionMetadataSendAlertMessage(old_channel_id)\n \n copy = metadata.copy_with(channel_id = new_channel_id)\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy.channel_id, new_channel_id)", "def test_get_apns_message_with_succes(self, mock_client): \n\n event = {\n 'operation': 'getMessage', \n 'arguments': {\n 'template': 'my-sample-geofence-id',\n }\n }\n\n response = {\n \"PushNotificationTemplateResponse\": {\n 'APNS': {\n 'Action': 'OPEN_APP',\n 'Title': 'Sample Title',\n 'Body': 'This is a sample body'\n }\n }\n }\n\n mock_client().get_push_template.return_value = response\n response = manageMessages.handler(event, None)\n\n self.assertTrue(response)\n self.assertEqual(response['status'], 'MESSAGE_OK')\n self.assertEqual(response['message']['service'], 'APNS')", "def test_all_options(self, provider, test_message):\n data = {\n \"message\": test_message,\n \"title\": \"title\",\n \"priority\": 2,\n \"url\": \"http://foo.com\",\n \"url_title\": \"url title\",\n \"sound\": \"bike\",\n \"timestamp\": \"0\",\n \"retry\": 30,\n \"expire\": 30,\n \"callback\": \"http://callback.com\",\n \"html\": True,\n }\n rsp = provider.notify(**data)\n rsp.raise_on_errors()", "def test_update_multiple_preferences_communication_channel_id(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def test_update_subscription(self):\n pass", "def test_update_risk_profile_using_put(self):\n pass", "def test_successful_subscriptions_notifies_pm(self) -> None:\n invitee = self.example_user(\"iago\")\n\n current_stream = self.get_streams(invitee)[0]\n invite_streams = self.make_random_stream_names([current_stream])[:1]\n self.common_subscribe_to_streams(\n invitee,\n invite_streams,\n extra_post_data={\n \"announce\": \"true\",\n \"principals\": orjson.dumps([self.user_profile.id]).decode(),\n },\n )", "def put(self, notification_id):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n if not mark_notification_as_read(user, notification_id):\n return {'msg': \"Not allowed to mark notification as read\"}, 401", "def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='[email protected]', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def notify(nt_id, application, action, remedy, subj, heading):\n\n email = get_email(nt_id)\n lambda_client = boto3.client('lambda')\n messages = create_messages(application, action, remedy)\n print(email)\n email_data = {\n 'sender_mail': SENDER_EMAIL,\n 'email': email,\n 'subj': subj,\n 'heading': heading,\n 'messages': messages,\n 'region': os.environ.get(\"AWS_DEFAULT_REGION\")\n }\n invoke_email_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"formatted_email\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(email_data)\n )\n err = checkError(invoke_email_response, \"Error sending email!\")\n if err:\n print(str(err))\n\n slack_data = {\n 'application_url': APP_URL,\n 'channel': CHANNEL,\n 'message': messages[1].rsplit(\"\\n\",5)[0],\n 'channel_id': CHANNEL_ID,\n 'nt_ids': [nt_id]\n }\n invoke_slack_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"slack_message\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(slack_data)\n )\n err = checkError(invoke_slack_response, \"Error sending slack message!\")\n if err:\n print(str(err))", "def test_signal_update_achievement(self):\n # Create two users for test all the achievements. Two for the podium\n client = Client()\n user_michu = create_user('passwordmichu', 'michu')\n create_user('passwordimmobile', 'immobile')\n client.login(username='immobile', password='passwordimmobile')\n # Create the Collection for the achievement NumSolvedCollectionAchievementDefinition and Problem\n coll = create_collection('Coleccion de cartas')\n # Create PodiumAchievementDefinition\n ach_podium = PodiumAchievementDefinition(name={\"es\":'Presidente del podio'},\n description={\"es\":'Consigue ser el primero'},\n num_problems=1, position=1)\n ach_podium.save()\n # Create NumSolvedCollectionAchievementDefinition\n ach_collection = NumSolvedCollectionAchievementDefinition(name={\"es\":'Coleccionista'},\n description={\"es\":'Resuelve 50\\\n problemas de esta coleccion'},\n num_problems=50,\n collection=coll)\n ach_collection.save()\n # Create NumSolvedAchievementDefinition\n ach_solved = NumSolvedAchievementDefinition(name={\"es\":'Resolvista'},\n description={\"es\":'Resuelve 50 problemas'},\n num_problems=50)\n ach_solved.save()\n # Create NumSolvedTypeAchievementDefinition\n ach_type = NumSolvedTypeAchievementDefinition(name={\"es\":'Procedista'},\n description={\"es\":'Resuelve un problema PROC'},\n num_problems=1, problem_type=ProblemType.PROC.name)\n ach_type.save()\n # Create NumSubmissionsProblemsAchievementDefinition\n ach_submi_pro = NumSubmissionsProblemsAchievementDefinition(name={\"es\":'Muchos envios'},\n description={\"es\":'Envia muchas soluciones'},\n num_submissions=80, num_problems=1)\n ach_submi_pro.save()\n # Create problem and submit correct answer with \"immobile\" user, for make this the first to solve the problem\n problem = create_select_problem(coll, 'Problema')\n submit_select_url = reverse('judge:submit', args=[problem.pk])\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n client.logout()\n # Login with \"michu\" and submit correct answer. All the checks will be with this user\n client.login(username='michu', password='passwordmichu')\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n # Whit this definitions our user \"michu\" don't have any achievement\n self.assertEqual(ObtainedAchievement.objects.filter(user=user_michu).count(), 0)\n # PodiumAchievementDefinition now only need to stay in podium\n # In this test our user \"michu\" stay at second position, that is why before he didn't have the achievement\n ach_podium.position = 3\n ach_podium.save()\n # NumSolvedCollectionAchievementDefinition only needs one correct submission\n # In this test our user only have one correct submission, that is why before he didn't have the achievement\n ach_collection.num_problems = 1\n ach_collection.save()\n # NumSolvedAchievementDefinition only needs one correct submission\n # In this test our user only have one correct submission, that is why before he didn't have the achievement\n ach_solved.num_problems = 1\n ach_solved.save()\n # NumSolvedTypeAchievementDefinition change to type SELECT\n # In this test our user only resolved a SELECT type problem, not PROC.\n ach_type.problem_type = ProblemType.SELECT.name\n ach_type.save()\n # NumSubmissionsProblemsAchievementDefinition only needs one submission now\n ach_submi_pro.num_submissions = 1\n ach_submi_pro.save()\n # Now our user \"michu\" have 5 achievements\n self.assertEqual(ObtainedAchievement.objects.filter(user=user_michu).count(), 5)", "def put(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n mark_all_notifications_as_read(user)", "def test_put_non_id(self):\n\n data = {\n 'time': '23:38:54'\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def test_put_wrong_notification_id(self):\n\n data = {\n 'time': '23:38:54'\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 6778})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def handler(event, context):\n alert_message = json.loads(event['Records'][0]['Sns']['Message'])\n alarm_name = alert_message['AlarmName']\n reason = alert_message['NewStateReason']\n new_state = alert_message['NewStateValue']\n color = \"good\" if new_state == 'OK' else \"danger\"\n\n region = os.getenv('AWS_DEFAULT_REGION')\n alert_url = f'https://console.aws.amazon.com/cloudwatch/home?region={region}#alarm:alarmFilter=ANY;name={alarm_name}'\n link = f\"<{alert_url}|{alarm_name}>\"\n\n secrets = json.loads(get_secret()['SecretString'])\n default_slack_channel = secrets['slack_alert_channel']\n alarm_description = json.loads(alert_message.get('AlarmDescription', '{}'))\n slack_channel = alarm_description.get(\"slack_channel\", default_slack_channel)\n description = alarm_description.get(\"description\")\n slack_message = '\\n'.join(\n [f\"New state: {new_state}\", f\"Description: {description}\", reason]\n )\n\n attachments = [{\n \"fallback\": f\"{link} {slack_message}\",\n \"title\": alarm_name,\n \"title_link\": alert_url,\n \"text\": slack_message,\n \"color\": color\n }]\n\n slack_url = secrets['slack_webhooks'][slack_channel]\n\n post_message_to_url(slack_url, {\"attachments\": attachments})", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass" ]
[ "0.73635596", "0.6459767", "0.60581565", "0.6028323", "0.60136247", "0.57947695", "0.5774505", "0.57623434", "0.5741606", "0.57012004", "0.5592528", "0.5584041", "0.55792534", "0.5516984", "0.55149835", "0.54886246", "0.5476943", "0.5455049", "0.5427881", "0.53757334", "0.53474516", "0.5344876", "0.5334119", "0.5334053", "0.53101504", "0.52883434", "0.5282166", "0.52770865", "0.52310884", "0.52268857" ]
0.7614871
0
Test alert policies .associate_with_notification_channel() calls put with correct parameters
def test_dissociate_from_notification_channel(self, mock_put): self.policies.associate_with_notification_channel( id=self.policy_single_response['policy']['id'], channel_id=self.channel_single_response['channel']['id'], ) mock_put.assert_called_once_with( url='https://api.newrelic.com/v2/alerts_policy_channels.json?policy_id={0}&channel_ids={1}'.format( self.policy_single_response['policy']['id'], self.channel_single_response['channel']['id'] ), headers=self.policies.headers )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_associate_with_notification_channel_success(self, mock_put):\n self.policies.associate_with_notification_channel(\n id=self.policy_single_response['policy']['id'],\n channel_id=self.channel_single_response['channel']['id'],\n )\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alerts_policy_channels.json?policy_id={0}&channel_ids={1}'.format(\n self.policy_single_response['policy']['id'],\n self.channel_single_response['channel']['id']\n ),\n headers=self.policies.headers\n )", "def upsert_alert(\n self,\n alerts: str,\n project_id: str = PROVIDE_PROJECT_ID,\n retry: Retry | _MethodDefault = DEFAULT,\n timeout: float | None = None,\n metadata: Sequence[tuple[str, str]] = (),\n ) -> None:\n policy_client = self._get_policy_client()\n channel_client = self._get_channel_client()\n\n record = json.loads(alerts)\n existing_policies = [\n policy[\"name\"] for policy in self.list_alert_policies(project_id=project_id, format_=\"dict\")\n ]\n existing_channels = [\n channel[\"name\"]\n for channel in self.list_notification_channels(project_id=project_id, format_=\"dict\")\n ]\n policies_ = []\n channels = []\n for channel in record.get(\"channels\", []):\n channels.append(NotificationChannel(**channel))\n for policy in record.get(\"policies\", []):\n policies_.append(AlertPolicy(**policy))\n\n channel_name_map = {}\n\n for channel in channels:\n channel.verification_status = (\n monitoring_v3.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED\n )\n\n if channel.name in existing_channels:\n channel_client.update_notification_channel(\n request={\"notification_channel\": channel},\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n else:\n old_name = channel.name\n channel.name = None\n new_channel = channel_client.create_notification_channel(\n request={\"name\": f\"projects/{project_id}\", \"notification_channel\": channel},\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n channel_name_map[old_name] = new_channel.name\n\n for policy in policies_:\n policy.creation_record = None\n policy.mutation_record = None\n\n for i, channel in enumerate(policy.notification_channels):\n new_channel = channel_name_map.get(channel)\n if new_channel:\n policy.notification_channels[i] = new_channel\n\n if policy.name in existing_policies:\n with contextlib.suppress(InvalidArgument):\n policy_client.update_alert_policy(\n request={\"alert_policy\": policy},\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n else:\n policy.name = None\n for condition in policy.conditions:\n condition.name = None\n policy_client.create_alert_policy(\n request={\"name\": f\"projects/{project_id}\", \"alert_policy\": policy},\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )", "def test_set_alert(self):\n alert = dweepy.set_alert(\n self.my_thing_id,\n ['[email protected]', '[email protected]'],\n test_alert_condition,\n test_key,\n )\n self.assertEqual(alert['condition'], test_alert_condition)", "def put_bucket_notification(Bucket=None, NotificationConfiguration=None):\n pass", "def put_bucket_notification_configuration(Bucket=None, NotificationConfiguration=None):\n pass", "def test_alert_create(self):\n pass", "def test_put_from_another_way(self):\n data = {\n 'start_time': '2019-10-29',\n 'end_time': '2019-12-29',\n 'week_day': 6,\n 'time': '23:58:59'\n }\n url = reverse('notification', kwargs={'way_id': 101, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_update_preference_communication_channel_id(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def test_update(self, mock_put):\n self.policies.update(id=333114, policy_update=self.policy_show_response)\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alert_policies/333114.json',\n headers=self.policies.headers,\n data=json.dumps(self.policy_show_response)\n )", "def test_put_success(self):\n\n data = {\n 'time': '23:58:53'\n }\n\n url = reverse('notification', kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def test_db_creating_put(self):\n data = {\n 'start_time': '2019-10-29',\n 'end_time': '2019-12-29',\n 'week_day': 6,\n 'time': '23:58:59'\n }\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n\n with mock.patch('notification.models.Notification.update') as notification_update:\n notification_update.return_value = False\n\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def test_update_success(self, mock_put):\n self.policies.update(\n id=self.policy_single_response['policy']['id'],\n name=self.policy_single_response['policy']['name'],\n incident_preference=self.policy_single_response['policy']['incident_preference']\n )\n\n mock_put.assert_called_once_with(\n url='https://api.newrelic.com/v2/alerts_policies/{0}.json'.format(\n self.policy_single_response['policy']['id']\n ),\n headers=self.policies.headers,\n data=json.dumps({\n \"policy\": {\n \"name\": self.policy_single_response['policy']['name'],\n \"incident_preference\": self.policy_single_response['policy']['incident_preference']\n }\n })\n )", "def create_notification(notification_name):\n url = CMX_URL + '/api/config/v1/notification'\n print('CMX URL and Resource: ', url)\n payload = {\n \"name\": notification_name,\n \"rules\": [\n {\n \"conditions\": [\n {\n \"condition\": \"inout.deviceType == client\"\n },\n {\n \"condition\": \"inout.in/out == in\"\n },\n {\n \"condition\": \"inout.hierarchy == DevNetCampus>DevNetBuilding>DevNetZone\"\n }\n ]\n }\n ],\n \"subscribers\": [\n {\n \"receivers\": [\n {\n \"uri\": \"http://128.107.70.29:8010\",\n \"messageFormat\": \"JSON\",\n \"qos\": \"AT_MOST_ONCE\"\n }\n ]\n }\n ],\n \"enabled\": True,\n \"enableMacScrambling\": True,\n \"macScramblingSalt\": \"listening\",\n \"notificationType\": \"InOut\"\n }\n header = {'content-type': 'application/json', 'accept': 'application/json'}\n notification_response = requests.put(url, data=json.dumps(payload), headers=header, auth=CMX_AUTH, verify=False)\n print('Notification Status Code: ', notification_response.status_code)\n return notification_response.status_code", "def test_update_notification(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_message_one = 'Fortnite has a new winner'\n new_notification_category_one = 'Information'\n post_response = create_notification(\n client,\n new_notification_message_one,\n 30,\n new_notification_category_one)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 1\n\n post_response_data = json.loads(post_response.get_data(as_text=True))\n new_notification_url = post_response_data['url']\n new_displayed_times = 1\n data = {'displayed_times': new_displayed_times}\n patch_response = client.patch(\n new_notification_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS),\n data=json.dumps(data))\n assert patch_response.status_code == HttpStatus.ok_200.value\n\n get_response = client.get(\n new_notification_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_response.status_code == HttpStatus.ok_200.value\n\n get_response_data = json.loads(get_response.get_data(as_text=True))\n assert get_response_data['displayed_times'] == new_displayed_times", "def test__AutoModerationActionMetadataSendAlertMessage__copy_with__1():\n old_channel_id = 202211130006\n new_channel_id = 202211130007\n \n metadata = AutoModerationActionMetadataSendAlertMessage(old_channel_id)\n \n copy = metadata.copy_with(channel_id = new_channel_id)\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy.channel_id, new_channel_id)", "def test_get_apns_message_with_succes(self, mock_client): \n\n event = {\n 'operation': 'getMessage', \n 'arguments': {\n 'template': 'my-sample-geofence-id',\n }\n }\n\n response = {\n \"PushNotificationTemplateResponse\": {\n 'APNS': {\n 'Action': 'OPEN_APP',\n 'Title': 'Sample Title',\n 'Body': 'This is a sample body'\n }\n }\n }\n\n mock_client().get_push_template.return_value = response\n response = manageMessages.handler(event, None)\n\n self.assertTrue(response)\n self.assertEqual(response['status'], 'MESSAGE_OK')\n self.assertEqual(response['message']['service'], 'APNS')", "def test_all_options(self, provider, test_message):\n data = {\n \"message\": test_message,\n \"title\": \"title\",\n \"priority\": 2,\n \"url\": \"http://foo.com\",\n \"url_title\": \"url title\",\n \"sound\": \"bike\",\n \"timestamp\": \"0\",\n \"retry\": 30,\n \"expire\": 30,\n \"callback\": \"http://callback.com\",\n \"html\": True,\n }\n rsp = provider.notify(**data)\n rsp.raise_on_errors()", "def test_update_multiple_preferences_communication_channel_id(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def test_update_subscription(self):\n pass", "def test_update_risk_profile_using_put(self):\n pass", "def test_successful_subscriptions_notifies_pm(self) -> None:\n invitee = self.example_user(\"iago\")\n\n current_stream = self.get_streams(invitee)[0]\n invite_streams = self.make_random_stream_names([current_stream])[:1]\n self.common_subscribe_to_streams(\n invitee,\n invite_streams,\n extra_post_data={\n \"announce\": \"true\",\n \"principals\": orjson.dumps([self.user_profile.id]).decode(),\n },\n )", "def put(self, notification_id):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n if not mark_notification_as_read(user, notification_id):\n return {'msg': \"Not allowed to mark notification as read\"}, 401", "def notify(nt_id, application, action, remedy, subj, heading):\n\n email = get_email(nt_id)\n lambda_client = boto3.client('lambda')\n messages = create_messages(application, action, remedy)\n print(email)\n email_data = {\n 'sender_mail': SENDER_EMAIL,\n 'email': email,\n 'subj': subj,\n 'heading': heading,\n 'messages': messages,\n 'region': os.environ.get(\"AWS_DEFAULT_REGION\")\n }\n invoke_email_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"formatted_email\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(email_data)\n )\n err = checkError(invoke_email_response, \"Error sending email!\")\n if err:\n print(str(err))\n\n slack_data = {\n 'application_url': APP_URL,\n 'channel': CHANNEL,\n 'message': messages[1].rsplit(\"\\n\",5)[0],\n 'channel_id': CHANNEL_ID,\n 'nt_ids': [nt_id]\n }\n invoke_slack_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"slack_message\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(slack_data)\n )\n err = checkError(invoke_slack_response, \"Error sending slack message!\")\n if err:\n print(str(err))", "def test_put_non_owner(self):\n another_user = CustomUser.objects.create(id=1067, email='[email protected]', is_active=True)\n another_user.set_password('testpassword')\n another_user.save()\n\n self.client.login(email='[email protected]', password='testpassword')\n\n data = {\n 'week_day': 3\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': self.notification.id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 403)", "def test_signal_update_achievement(self):\n # Create two users for test all the achievements. Two for the podium\n client = Client()\n user_michu = create_user('passwordmichu', 'michu')\n create_user('passwordimmobile', 'immobile')\n client.login(username='immobile', password='passwordimmobile')\n # Create the Collection for the achievement NumSolvedCollectionAchievementDefinition and Problem\n coll = create_collection('Coleccion de cartas')\n # Create PodiumAchievementDefinition\n ach_podium = PodiumAchievementDefinition(name={\"es\":'Presidente del podio'},\n description={\"es\":'Consigue ser el primero'},\n num_problems=1, position=1)\n ach_podium.save()\n # Create NumSolvedCollectionAchievementDefinition\n ach_collection = NumSolvedCollectionAchievementDefinition(name={\"es\":'Coleccionista'},\n description={\"es\":'Resuelve 50\\\n problemas de esta coleccion'},\n num_problems=50,\n collection=coll)\n ach_collection.save()\n # Create NumSolvedAchievementDefinition\n ach_solved = NumSolvedAchievementDefinition(name={\"es\":'Resolvista'},\n description={\"es\":'Resuelve 50 problemas'},\n num_problems=50)\n ach_solved.save()\n # Create NumSolvedTypeAchievementDefinition\n ach_type = NumSolvedTypeAchievementDefinition(name={\"es\":'Procedista'},\n description={\"es\":'Resuelve un problema PROC'},\n num_problems=1, problem_type=ProblemType.PROC.name)\n ach_type.save()\n # Create NumSubmissionsProblemsAchievementDefinition\n ach_submi_pro = NumSubmissionsProblemsAchievementDefinition(name={\"es\":'Muchos envios'},\n description={\"es\":'Envia muchas soluciones'},\n num_submissions=80, num_problems=1)\n ach_submi_pro.save()\n # Create problem and submit correct answer with \"immobile\" user, for make this the first to solve the problem\n problem = create_select_problem(coll, 'Problema')\n submit_select_url = reverse('judge:submit', args=[problem.pk])\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n client.logout()\n # Login with \"michu\" and submit correct answer. All the checks will be with this user\n client.login(username='michu', password='passwordmichu')\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n # Whit this definitions our user \"michu\" don't have any achievement\n self.assertEqual(ObtainedAchievement.objects.filter(user=user_michu).count(), 0)\n # PodiumAchievementDefinition now only need to stay in podium\n # In this test our user \"michu\" stay at second position, that is why before he didn't have the achievement\n ach_podium.position = 3\n ach_podium.save()\n # NumSolvedCollectionAchievementDefinition only needs one correct submission\n # In this test our user only have one correct submission, that is why before he didn't have the achievement\n ach_collection.num_problems = 1\n ach_collection.save()\n # NumSolvedAchievementDefinition only needs one correct submission\n # In this test our user only have one correct submission, that is why before he didn't have the achievement\n ach_solved.num_problems = 1\n ach_solved.save()\n # NumSolvedTypeAchievementDefinition change to type SELECT\n # In this test our user only resolved a SELECT type problem, not PROC.\n ach_type.problem_type = ProblemType.SELECT.name\n ach_type.save()\n # NumSubmissionsProblemsAchievementDefinition only needs one submission now\n ach_submi_pro.num_submissions = 1\n ach_submi_pro.save()\n # Now our user \"michu\" have 5 achievements\n self.assertEqual(ObtainedAchievement.objects.filter(user=user_michu).count(), 5)", "def put(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n mark_all_notifications_as_read(user)", "def test_put_non_id(self):\n\n data = {\n 'time': '23:38:54'\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def test_put_wrong_notification_id(self):\n\n data = {\n 'time': '23:38:54'\n }\n\n url = reverse('notification',\n kwargs={'way_id': self.notification.way_id, 'notification_id': 6778})\n response = self.client.put(url, json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n self.assertEqual(response.status_code, 400)", "def handler(event, context):\n alert_message = json.loads(event['Records'][0]['Sns']['Message'])\n alarm_name = alert_message['AlarmName']\n reason = alert_message['NewStateReason']\n new_state = alert_message['NewStateValue']\n color = \"good\" if new_state == 'OK' else \"danger\"\n\n region = os.getenv('AWS_DEFAULT_REGION')\n alert_url = f'https://console.aws.amazon.com/cloudwatch/home?region={region}#alarm:alarmFilter=ANY;name={alarm_name}'\n link = f\"<{alert_url}|{alarm_name}>\"\n\n secrets = json.loads(get_secret()['SecretString'])\n default_slack_channel = secrets['slack_alert_channel']\n alarm_description = json.loads(alert_message.get('AlarmDescription', '{}'))\n slack_channel = alarm_description.get(\"slack_channel\", default_slack_channel)\n description = alarm_description.get(\"description\")\n slack_message = '\\n'.join(\n [f\"New state: {new_state}\", f\"Description: {description}\", reason]\n )\n\n attachments = [{\n \"fallback\": f\"{link} {slack_message}\",\n \"title\": alarm_name,\n \"title_link\": alert_url,\n \"text\": slack_message,\n \"color\": color\n }]\n\n slack_url = secrets['slack_webhooks'][slack_channel]\n\n post_message_to_url(slack_url, {\"attachments\": attachments})", "def test_subscribe_to_topic_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass" ]
[ "0.7613531", "0.64621556", "0.6059576", "0.60289395", "0.6013516", "0.57984847", "0.57735455", "0.57597476", "0.5742299", "0.570157", "0.5592619", "0.55849236", "0.5579137", "0.55171704", "0.5513387", "0.54899305", "0.54773784", "0.5452856", "0.54276377", "0.5375686", "0.5347609", "0.534428", "0.5334385", "0.5332746", "0.53111345", "0.528796", "0.52823526", "0.52772903", "0.52328926", "0.5226273" ]
0.7362509
1
Add a contact representing a publication coauthored by the ego to the ego network.
def add_publication(self, pub_id: int, timestamp: int, title: str, coauthors: List[str], contact_type="__all__"): if self.min_pub_date is None or timestamp >= self.min_pub_date: # standardize names, remove possible duplicates and wrong names, and remove ego if present std_coauth_names = set() for coauthor_name in coauthors: if len(coauthor_name) > 1: std_coauth_names.add(self.get_std_author_name(coauthor_name)) std_coauth_names.discard(self.get_std_author_name(self.ego_name)) for coauthor_name in std_coauth_names: if self.last_time is None or timestamp > self.last_time: self.last_time = timestamp super(CoauthorshipNamedEgoNetwork, self).add_contact(timestamp=timestamp, alter_id=coauthor_name, contact_type=contact_type, text=title, num_contacted_alters=len(std_coauth_names)) self.publications[pub_id] = (title, std_coauth_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddContact(self, contact):\n\t\tcontact.group_membership_info = [gdata.contacts.data.GroupMembershipInfo(href=self.GetFirstGroupId())]\n\t\ttry:\n\t\t\tself.client.CreateContact(contact)\n\t\texcept gdata.client.RequestError:\n\t\t\tpass", "def add_contact(self, contact):\n\t\tclient_log.debug(f'Создание контакта {contact}')\n\t\treq = {\n\t\t\tACTION: ADD_CONTACT,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username,\n\t\t\tACCOUNT_NAME: contact\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tself.process_server_ans(get_message(self.transport))", "def addContact (self, dleseContributor):\n\t\tcontacts_el = self.selectSingleNode(self.dom, 'record:collection:contacts')\n\t\tif not contacts_el:\n\t\t\traise Exception, 'contacts node not found'\n\t\tel = XmlUtils.addElement(self.dom, contacts_el, 'contact')\n\t\t\n\t\tel.setAttribute (\"email\",dleseContributor.getEmail());\n\t\tel.setAttribute (\"name\", dleseContributor.getFullName());\n\t\tel.setAttribute (\"urlReport\", 'false');\n\t\tel.setAttribute (\"active\", 'false');", "def add_contact(self, contact):\n self.db.insert_contact(contact)\n return self.update_contacts()", "def add_contact():\n return 'add contact'", "def add(self, publication: P) -> None:\n ...", "def add_contact(self, contact):\n assert self.contact_in_range(contact), 'Wrong KBucket.'\n try:\n self._contacts.remove(contact)\n except ValueError:\n pass\n\n if len(self._contacts) < constants.K:\n self._contacts.append(contact)\n else:\n raise FullBucketError('No space in bucket to insert contact')", "def add_contact(self, name, number, email, zipcode):\n \n new_contact = f\"{name}, {number}, {email}, {zipcode}\"\n contact_list = [name,number,email,zipcode]\n self.contacts.append(contact_list)\n self.save()\n print(f\"Thank you {new_contact} has been added to your contact book.\")", "def add_contact(contact):\n db = get_db()\n \n if contact.get_hash_name() not in db:\n db[contact.get_hash_name()] = json.loads(contact.json())\n write_db(db)\n else:\n sys.exit(logger.fail('fatal: contact already exists'))", "def add_contact(cmd, *args):\n cfg = get_config()\n nick = None\n if len(args) == 0:\n print(add_contact.__doc__)\n if len(args) >= 1:\n nick = args[0]\n fulname = nick # fullname fallback\n if len(args) >= 2:\n fullname = args[1]\n #print('fullname %s' %fullname)\n else:\n print(\"cant handle those params \" + str(args))\n\n vcard_fn = nick + '.vcf'\n vcard_fn = os.path.join(cfg['vcard_dir'], vcard_fn)\n #print('expecting file at %s' %vcard_fn)\n\n info = {}\n info['nick'] = nick\n info['fullname'] = fullname\n if len(fullname.split(' ')) > 1:\n subname = fullname.split()\n info['name'] = {'family': subname[0], 'given': subname[1]}\n if os.path.isfile(vcard_fn):\n print('file exists for %s, at %s please move or rename it'\n % (nick, vcard_fn))\n return False\n vcard = vobject.vCard()\n if os.path.isfile(vcard_fn):\n vcard = loadcraphere\n else:\n vcard_merge_in_dict(info, vcard)\n rawdata = vcard.serialize()\n with open(vcard_fn, 'w+') as fh:\n fh.write(rawdata)\n #print('written, sucker!')\n #annoyingly verbose vcard here'\n #Full Name = fn. Single string, entire name, required\n #x = vobject.vCard()\n # x.name = 'Foo'", "def create_contact(contact, party_type, party):\n\tcontact = contact\t.split(\" \")\n\n\tcontact = frappe.get_doc({\n\t\t\"doctype\":\"Contact\",\n\t\t\"first_name\":contact[0],\n\t\t\"last_name\": len(contact) > 1 and contact[1] or \"\"\n\t})\n\tcontact.append('links', dict(link_doctype=party_type, link_name=party))\n\tcontact.insert()", "def addcontact(name, address=None, phone=None, email=None):\n try:\n newid = str(r.incr(\"global:nextUserId\"))\n _setcontact(newid, name, address, phone, email)\n r.sadd(\"contacts\", newid)\n\n return _getcontact(newid)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def contact(self, contact):\n\n self._contact = contact", "def contact(self, contact):\n\n self._contact = contact", "def add_contact(self):\n contact_list = {}\n contact_list[self.my_number] = self.name\n connect_db = Database()\n connect_db.add_contact(self.name, self.my_number)", "def test_add_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n dictionary = org.as_dict()\n assert dictionary['contacts']\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']", "def add_to_group(self, org, contact, group):\n pass", "def do_addContact(self, line):\n\t\tif not(self.db is None):\n\t\t\tcont = self.db.contact\n\t\t\tcontact_info = {\n\t\t\t\t'first_name': input(\"First name: \"),\n\t\t\t\t'surname': input(\"Surname: \"),\n\t\t\t\t'company': input(\"Company: \"),\n\t\t\t\t'address': input(\"Address: \"),\n\t\t\t\t'telephone': input(\"Telephone: \"),\n\t\t\t\t'email': input(\"Email: \")\n\t\t\t}\n\t\t\tcont.insert_one(contact_info)\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")", "def add_contact(self, type, value, note=\"\", area_code=None):\n if type:\n type = clean_string(type)\n if note:\n note = clean_string(note)\n if type in CONTACT_DETAIL_TYPE_MAP:\n type = CONTACT_DETAIL_TYPE_MAP[type]\n if note in CONTACT_DETAIL_NOTE_MAP:\n note = CONTACT_DETAIL_NOTE_MAP[note]\n\n type = type.lower()\n\n if type in (\"text\", \"voice\", \"fax\", \"cell\", \"video\", \"pager\"):\n value = self.clean_telephone_number(clean_string(value), area_code=area_code)\n elif type == \"address\":\n value = self.clean_address(value)\n else:\n value = clean_string(value)\n\n # The post membership is added before the party membership.\n self._related[0].add_contact_detail(type=type, value=value, note=note)", "def create_contact_on_google(self, info):\n\n\t\twith open('client.pickle') as pickle_file:\n\t\t\tclient = pickle.load(pickle_file)\n\n\t\t#create contact in google\n\t\tnew_contact = gdata.contacts.data.ContactEntry()\n\n\t\t# Set the contact's name.\n\t\tnew_contact.name = gdata.data.Name( given_name=gdata.data.GivenName(text=info['name']), family_name=gdata.data.FamilyName(text=info['name']),\n\t\t\tfull_name=gdata.data.FullName(text=info['name']))\n\n\t\tnew_contact.content = atom.data.Content(text='Notes')\n\n\t\t# Set the contact's email addresses.\n\t\tnew_contact.email.append(gdata.data.Email(address=info['email'], primary='true', rel=gdata.data.WORK_REL, display_name=info['name']))\n\n\t\t# Set the contact's phone numbers.\n\t\tnew_contact.phone_number.append(gdata.data.PhoneNumber(text=info['phone'], rel=gdata.data.WORK_REL, primay='true'))\n\n\t\tcontact_entry = client.CreateContact(new_contact)\n\t\twebnotes.errprint(\"Contact's ID: %s\" % contact_entry.id.text)\n\n\t\twebnotes.conn.set_value(\"Contact\",self.name,\"contct_id\", contact_entry.id.text)", "def add_contact(self, request, **kwargs):\n if request.data is None:\n return Response({'message': 'Invalid contact details'}, status=status.HTTP_400_BAD_REQUEST)\n if request.data.get('first_name') is None:\n return Response({'message': 'First name not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n contact_data = request.data.get('contact')\n for data in contact_data:\n print(data.get('phone'))\n try:\n parse_number = phonenumbers.parse(data.get('phone'), None)\n except Exception:\n return Response({'details': 'Invalid Phonenumber'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n if not phonenumbers.is_valid_number(parse_number):\n return Response({'details': 'Invalid Phonenumber entered'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n new_contact_data = ContactCreationAndUpdationMixin().create(request.data)\n group = self.get_object()\n group.contacts.add(new_contact_data)\n serializer_data = ContactSerializer(new_contact_data) \n return Response(serializer_data.data)", "def do_adduser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tself.cl.add_contact()\n\t\telse:\n\t\t\tprint(\"To add contacts you need to open or create a book.\")", "def add_cc_recipient(self, address):\n if not self.validate_email_address(address):\n raise Exception(\"Invalid email address '%s'\" % address)\n self._cc.append(address)", "def create_contact(contact, party_type, party, email):\n\tcontact = contact.split(' ')\n\n\tcontact = frappe.get_doc({\n\t\t'doctype': 'Contact',\n\t\t'first_name': contact[0],\n\t\t'last_name': len(contact) > 1 and contact[1] or \"\"\n\t})\n\tcontact.append('email_ids', dict(email_id=email, is_primary=1))\n\tcontact.append('links', dict(link_doctype=party_type, link_name=party))\n\tcontact.insert()", "def add_contact(self):\n contact = Contact.create_contact()\n self.contact_list.append(contact)\n\n df = pd.read_csv('address_book.csv')\n #print(df)\n adf = pd.DataFrame({'FIRST NAME': [contact.first_name],\n 'LAST NAME': [contact.last_name],\n 'ADDRESS': [contact.address],\n 'CITY': [contact.city],\n 'STATE': [contact.state],\n 'ZIP CODE': [contact.zip],\n 'PHONE NUMBER': [contact.phone_number],\n 'EMAIL': [contact.email]})\n adf.to_csv('address_book.csv',mode='a', header=False, index=None)\n #storing all contacts in address_book.csv file\n \"\"\"with open(\"address_book.csv\", \"w\") as f:\n for contact in self.contact_list:\n f.write(f\"FIRST NAME -> {contact.first_name}\\n\"\n f\"LAST NAME -> {contact.last_name}\\n\"\n f\"ADDRESS -> {contact.address}\\n\"\n f\"CITY -> {contact.city}\\n\"\n f\"STATE -> {contact.state}\\n\"\n f\"ZIP CODE -> {contact.zip}\\n\"\n f\"PHONE NUMBER -> {contact.phone_number}\\n\"\n f\"EMAIL -> {contact.email}\\n\\n\")\"\"\"", "def add(self, connection):\n id = len(self.contacts)\n self.contacts[id] = connection\n self.order.append(id)", "def menu_contact_author(self, event=None):\n self.parentPanel.contact_author()", "async def post(self):\n await self.handle_request(self.contacts_new_api, 1)", "def add_contact(self):\n contact_mob_num = self._input_mob_num(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be added: \")\n if contact_mob_num == self._user.mob_num:\n print(\"You can't add yourself, IDIOT!!\")\n return self.homepage()\n \n found_contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if found_contact != None:\n print('A user with Mobile number: \"{0}\", and User name: \"{1}\" is found'.format(found_contact.mob_num, found_contact.username))\n user_choice = self._int_input_in_range(\" (1) Add the found user. \\n (0) Back to Home page \\n Your choice: \" \n ,range_ = (0, 1))\n if user_choice:\n add_flag = self._user.add_contact(found_contact)\n if not add_flag:\n print('This user is already one of your contacts')\n return self.homepage()\n print(\"Contact added successfully\")\n else:\n self.homepage()\n else:\n print('This user mobile number has no matches')\n return self.homepage()", "def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return" ]
[ "0.64493227", "0.6373825", "0.62784225", "0.6231083", "0.60652024", "0.59836507", "0.59490603", "0.579805", "0.5783082", "0.5768027", "0.5641641", "0.5640455", "0.56229985", "0.56229985", "0.5609489", "0.55984056", "0.54995424", "0.54777515", "0.5452757", "0.5432362", "0.54296756", "0.5428254", "0.54191923", "0.539068", "0.5390614", "0.53530324", "0.53353184", "0.5300052", "0.5246999", "0.5245395" ]
0.6801305
0
Reset the function call count to zero.
def reset_count(self): self.count = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_counter(self) -> None:", "def reset_calls(self) -> None:\n self.logger.info(\"Reset calls\")\n\n self._has_bob = False\n self._has_single = False", "def reset (self):\n self.counter = 0", "def reset(self):\n self.counter = 0", "def reset(self):\n # self.compile_time = 0.\n self.fct_call_time = 0.0\n self.fct_callcount = 0\n self.vm_call_time = 0.0\n self.apply_time = {}\n self.apply_callcount = {}\n # self.apply_cimpl = None\n # self.message = None", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def reset(self, *args, **kwargs):", "def resetOperationCount():\n global _operationCount\n _countLock.acquire()\n try:\n _operationCount = 0\n finally:\n _countLock.release()", "def reset():\n pass", "def reset():\n pass", "def reset():", "def reset():", "def reset():", "def reset() -> None:\n ...", "def reset(self, *args, **kwargs):\n ...", "def _reset_count(self):\n self._triple_count = 0\n self._error_count = 0\n self._ignored_count = 0", "def reset():\r\n pass", "def reset(*args):", "def reset(*args):", "def reset(*args):", "def reset(self):\n ...", "def reset(self):\n ...", "def reset(self):\n \n pass", "def reset(self):\n self.ignoreCount = 0\n self.errorCount = 0\n self.warningCount = 0\n self.fatalCount = 0\n self.stage = 'initialise'\n self.event = None", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def reset() -> None:\n\t_flag.clear()" ]
[ "0.7342854", "0.7133632", "0.7101041", "0.7076571", "0.6986588", "0.6887243", "0.68343556", "0.6833832", "0.6802058", "0.6802058", "0.68014395", "0.68014395", "0.68014395", "0.6790398", "0.67423177", "0.6730629", "0.6721631", "0.6674541", "0.6674541", "0.6674541", "0.6646674", "0.6646674", "0.662316", "0.6583428", "0.6582089", "0.6582089", "0.6582089", "0.6582089", "0.65689343", "0.65617627" ]
0.7426931
0
Test the average execution time of a given function.
def time_function(function, runs=1, average=min): results = [None] * runs for i in range(runs): t0 = time.perf_counter() function() t1 = time.perf_counter() results[i] = t1 - t0 return average(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateRunTime(function, *args):\n startTime = time.time()\n result = function(*args)\n return time.time() - startTime, result", "def execution_time(function: Callable, args=tuple(), kwargs=dict()):\n start_time = time.time()\n function(*args, **kwargs)\n end_time = time.time()\n return end_time - start_time", "def time_function(func: \"Function call to be evaluted as str.\") -> float:\n start = time.time()\n eval(func)\n return time.time() - start", "def _run_time(func):\n start_time = datetime.datetime.now()\n func\n end_time = datetime.datetime.now()\n return end_time - start_time", "def timetest(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n trials = 10\n total = 0\n\n for i in range(0, trials):\n\n print func.__name__ + \" Trial:\" + str(i + 1)\n\n start = time.time()\n r = func(*args, **kwargs)\n end = time.time()\n\n total += end - start\n\n return {'test': func.__name__, 'data_information': r, 'average_time': total / trials, 'trials': trials}\n return wrapper", "def time_func(func):\r\n start = time.clock()\r\n func()\r\n elapsed = time.clock() - start\r\n print elapsed, \"sec\"", "def timing(n_runs: int, func, warm_up: bool = True, verbose: bool = True):\n\n lower = float('inf')\n upper = -float('inf')\n avg = 0\n\n if verbose:\n print(f\"Timing (runs:{n_runs}): '{str(func)}' - \", end=\"\", flush=True)\n\n # Call once without measurement \"to get warm\"\n if warm_up:\n if verbose:\n print(\"warm-up...\", end=\"\", flush=True)\n\n func()\n\n if verbose:\n print(\"done. \", end=\"\", flush=True)\n\n for i in range(n_runs):\n start = time.time()\n func()\n end = time.time()\n\n diff = end - start\n\n lower = min(lower, diff)\n upper = max(upper, diff)\n avg += (diff - avg)/(i+1)\n\n if verbose:\n print(\"#\", end=\"\", flush=True)\n\n if verbose:\n print(\" done.\")\n\n return avg, lower, upper", "def log_execution_time(func):\n @wraps(func)\n def _func(*args, **kwargs):\n start_time = time()\n func(*args, **kwargs)\n execution_time = time() - start_time\n print('Function {} took {:05.3f} seconds to run.'.format(\n func.__name__, execution_time))\n\n return _func", "def timeit(func):\n\n def measure_time(*args, **kw):\n start_time = time.perf_counter()\n result = func(*args, **kw)\n time_ms = (time.perf_counter() - start_time) * 1000\n if time_ms < 0.1:\n print(\"Processing time of %s(): %.1f μs.\"\n % (func.__qualname__, time_ms*1000))\n else:\n print(\"Processing time of %s(): %.3f ms.\"\n % (func.__qualname__, time_ms))\n return result\n\n return measure_time", "def measure(func):\n @functools.wraps(func)\n def _time_it(*args, **kwargs):\n start = int(round(time() * 1000000000))\n try:\n return func(*args, **kwargs)\n finally:\n end_ = int(round(time() * 1000000000)) - start\n print(f\"Total execution time: {end_ if end_ > 0 else 0} ns\")\n\n return _time_it", "def time_function_total(function, runs=1):\n t0 = time.perf_counter()\n for _ in range(runs):\n function()\n t1 = time.perf_counter()\n\n return t1 - t0", "def time_me(function, argument, type):\n start = time.perf_counter()\n function(argument, type)\n end = time.perf_counter()\n return end - start", "def test_func(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit(func)), types.FunctionType)", "def test_func(f, n):\n t = [[1]] * n\n\n start = etime()\n f(t, [])\n end = etime()\n elapsed = end - start\n return elapsed", "def measure_time(func):\n def timer(*args, **kwargs):\n start = timeit.default_timer()\n ret = func(*args, **kwargs)\n end = timeit.default_timer()\n print(\"Time[{}] : {}\".format(func.__name__, end-start))\n return ret\n return timer", "def log(func):\n def timed(*args, **kwargs):\n ts = time.time()\n result = func(*args, **kwargs)\n te = time.time()\n exectime = te - ts\n if (exectime < 1):\n exectime = str(round(exectime * 100, 3)) +\" ms\"\n else:\n exectime = str(round(exectime, 3)) + \" s\"\n logger.info(\"Running: \"+ func.__name__ + \" [ exec-time = \" + exectime + \" ]\")\n return result\n \n return timed", "def execution_time(func):\n import time\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n start = time.time()\n output = func(*args, **kwargs)\n end = time.time()\n print(\"Took {} secondes.\".format(end - start))\n return output\n\n return decorated", "def function_timer(*args, **kwargs):\n start = time.time()\n value = func(*args, **kwargs)\n end = time.time()\n runtime = end - start\n msg = f\"The runtime for {func.__name__} took {runtime} seconds to complete\"\n #print(msg.format(func=func.__name__, time=runtime))\n print(msg)\n return value", "def calculate_time(func):\n def timer(*args, **kwargs):\n start_time = time.time()\n x = func(*args, **kwargs)\n end_time = time.time()\n run_time = end_time - start_time\n print(f'Total time',run_time)\n return x\n return timer", "def measure(func):\n if func not in measured_funcs:\n measured_funcs.add(func)\n if not hasattr(func, 'total_runtime'):\n func.total_runtime = 0.0\n if not hasattr(func, 'total_calls'):\n func.total_calls = 0\n\n def wrapper(*args, **kwargs):\n before_call = datetime.datetime.now()\n res = func(*args, **kwargs)\n elapsed = datetime.datetime.now() - before_call\n func.total_runtime += elapsed.total_seconds()\n func.total_calls += 1\n return res\n\n return wrapper", "def count_time(func):\n\n def wrapper(*args, **kwargs):\n start_time = time()\n res = func(*args, **kwargs)\n over_time = time()\n total_time = over_time - start_time\n logging.info('Func: %s, Run Time: %.6f' % (func.__name__, total_time))\n return res\n\n return wrapper", "def wrapper():\n start_time = time.time()\n func()\n end_time = time.time()\n run = end_time - start_time\n print(f'Total time {run}')", "def timeit(func):\n def inner(*args, **kwargs):\n time_start = time.time()\n ret = func(*args, **kwargs)\n time_end = time.time()\n print('**** With total running time of {:.2f}s'.format(\n time_end - time_start\n ))\n return ret\n return inner", "def time_func(func):\n def return_fn(*args, **kwargs):\n global FUNCTION_LOGS\n stopwatch = Stopwatch()\n result = func(*args, **kwargs)\n split = stopwatch.mark()[1]\n FUNCTION_LOGS.append((func.__name__, args, kwargs, split))\n return result\n\n return return_fn", "def timed(function):\n def timed_function(*args, **kwargs):\n t0 = time.time()\n result = function(*args, **kwargs)\n print(\"[{}] - Elapsed time : {} s\"\n .format(function.__name__, sec_to_time(time.time() - t0)))\n return result\n return timed_function", "def full_test(power_func, test_num, test_pow, test_dict):\n # Get sort_func name\n func_name = power_func.__name__\n # Measure time function takes\n start = timer()\n power_func(test_num, test_pow)\n end = timer()\n # Store time in test dictionary\n test_dict[func_name] = end-start\n # Test for correctness\n pass_test(power_func, test_num, test_pow)\n print(\"{}: {} seconds\".format(func_name, test_dict[func_name]))\n print()", "def timed(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n \"\"\"\n Inner function to calculate the time.\n \"\"\"\n start = perf_counter()\n result = fn(*args, **kwargs)\n end = perf_counter()\n time_elapsed = (end - start)\n return time_elapsed, result\n return inner", "def time_function(f, *args):\r\n tic = time.time()\r\n f(*args)\r\n toc = time.time()\r\n return toc - tic", "def time_fn(fn):\r\n\r\n @wraps(fn) # to save __name__, type(), ...\r\n def measure_time(*args, **kwargs):\r\n t1 = time.time()\r\n result = fn(*args, **kwargs)\r\n t2 = time.time()\r\n print(f'@time_fn: {fn.__name__:20} took {t2 - t1} seconds')\r\n return result\r\n\r\n return measure_time", "def benchmark(func, inputs):\n t0 = time.clock()\n results = [func(x) for x in inputs]\n t1 = time.clock()\n average_time = (t1 - t0) / len(inputs)\n return average_time, results" ]
[ "0.72246695", "0.7189616", "0.69638824", "0.69256145", "0.68879926", "0.6848878", "0.6752755", "0.6739705", "0.67355597", "0.6723226", "0.67124665", "0.6660492", "0.66482943", "0.6646599", "0.66432923", "0.66369736", "0.66163874", "0.6569194", "0.6540607", "0.6531069", "0.6525538", "0.65074694", "0.6495992", "0.64868873", "0.64652216", "0.6458357", "0.6456812", "0.64444846", "0.64277196", "0.6422645" ]
0.73044527
0
Query if a value is in an array via iterative linear search.
def linear_search_iterative(array, value): for elt in array: if compare(elt, value) == 0: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linear_search_foundornot(arr: IntList, query: int) -> bool:\n position: int = 0\n found: bool = False\n while position < len(arr) and not found:\n if arr[position] == query:\n found = True\n position += 1\n return found", "def linear_search_recursive(array, value):\n # Base case for empty list\n n = len(array)\n if n == 0:\n return False\n\n # Recursive case\n if compare(array[0], value) == 0:\n return True\n else:\n return linear_search_recursive(array[1:], value)", "def linearSearchTwoDArray(array, value):\n for row in range(len(array)):\n for col in range(len(array[row])):\n if value == array[row][col]:\n print(f\"Element: {value} has one occurrence at row={row}, col={col} in this array\")\n return\n else:\n print(\"No such value exists in this array\")", "def in_array(val, obj):\n return (val in obj)", "def checkElementInArray(element,array):\n\t\n\texists = False\n\t\n\tfor i in array:\n\t\n\t\tif i == element:\n\t\t\texists = True\n\n\treturn exists", "def Search(array, value):\n left = 0\n right = len(array) - 1\n while left <= right:\n mid = left + (right - left) // 2\n if array[mid] == value:\n return True\n elif array[mid] < value:\n left = mid + 1\n else:\n right = mid - 1\n\n return False", "def __contains__(self, value):\n found = False\n for i in range(len(self.data)):\n if self.data[i] == value:\n return True\n return found", "def find(self, value):\n left, right = 0, len(self.arr)-1\n while left < right:\n total = self.arr[left] + self.arr[right]\n if total == value:\n return True\n elif total > value:\n right -= 1\n else:\n left += 1\n return False", "def arrayContains(arr, item):\n\tcontains = True\n\ttry:\n\t\tarr.index(item)\n\texcept ValueError:\n\t\tcontains = False\n\treturn contains", "def binary_search_iterative(array, value):\n # Iteration terminates when (min, max) range has shrunk such that min > max\n min = 0\n max = len(array) - 1\n while min <= max:\n middle = (min + max) // 2\n comparison = compare(array[middle], value)\n if comparison == 0:\n return True\n elif comparison < 0:\n min = middle + 1\n else:\n max = middle - 1\n\n return False", "def __contains__(self, i):\n return i in self._ar", "def linear_search(L, key):\r\n for element in L:\r\n if element == key:\r\n return True\r\n return False", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(arr: IntList, query: int) -> int:\n arr_len: int = len(arr)\n for idx in range(arr_len):\n if arr[idx] == query:\n return idx\n return -1", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive beleft, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linearsearch(input, value):\n count = 0\n for i in input:\n if (value == i):\n count += 1\n if count > 0:\n return \"Value, {0}, is in the list\".format(value)\n else:\n return \"Value, {0}, cannot be found\".format(value)", "def in_array(array1, array2):", "def row_is_in_array(row, array):\n return any((array[:] == row).all(1))", "def search(A, v):\r\n\tfor i in range(0, len(A)):\r\n\t\tif A[i] == v:\r\n\t\t\treturn i", "def in_list(value, arg):\r\n return value in arg", "def linear_search(arr, x):\n for i in range(len(arr)):\n if arr[i] == x:\n return i\n \n return -1", "def binary_search_whole_array(arr, target):\n return binary_search(arr, target, 0, len(arr))", "def binarySearch(searchValue, array):\r\n first = 0\r\n last = len(array) - 1\r\n beenFound = False\r\n\t\r\n while first <= last and not beenFound:\r\n midpoint = (first + last)//2\r\n\r\n if array[midpoint] == searchValue:\r\n result = str(searchValue) + \" has been found\"\r\n beenFound = True\r\n \r\n\t \r\n else:\r\n if searchValue < array[midpoint]:\r\n last = midpoint-1\r\n else:\r\n first = midpoint+1\t\r\n return()", "def find(number, A):\n\tfor x in A:\n\t\tif number == x:\n\t\t\treturn True\n\t\treturn False", "def bin_search(array, key):\n return bin_search_util(array, key, 0, len(array) - 1)", "def index_equals_value_search1(arr):\n for key, value in enumerate(arr):\n if value == key:\n return value\n return -1", "def contains(self, value):\n for item in self.data:\n if item == value:\n return item\n return False" ]
[ "0.7187239", "0.7014195", "0.68994564", "0.68296456", "0.6682872", "0.65983593", "0.6571656", "0.65281767", "0.6450205", "0.6415358", "0.6367779", "0.6351479", "0.6338945", "0.6338945", "0.6338945", "0.6338945", "0.63235927", "0.6252645", "0.6220145", "0.6189393", "0.6184134", "0.61811066", "0.61629385", "0.6162", "0.6124393", "0.6123104", "0.6107045", "0.6096636", "0.60839677", "0.6076101" ]
0.7844321
0
Query if a value is in an array via recursive linear search.
def linear_search_recursive(array, value): # Base case for empty list n = len(array) if n == 0: return False # Recursive case if compare(array[0], value) == 0: return True else: return linear_search_recursive(array[1:], value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_search_recursive(array, value):\n # Base cases for empty or singular list\n n = len(array)\n if n == 0:\n return False\n elif n == 1:\n return compare(array[0], value) == 0\n\n # Recursive case\n middle = n // 2\n if compare(array[middle], value) == 0:\n return True\n elif compare(array[middle], value) < 0:\n return binary_search_recursive(array[middle + 1:], value)\n else:\n return binary_search_recursive(array[:middle], value)", "def linear_search_iterative(array, value):\n for elt in array:\n if compare(elt, value) == 0:\n return True\n\n return False", "def in_array(val, obj):\n return (val in obj)", "def Search(array, value):\n left = 0\n right = len(array) - 1\n while left <= right:\n mid = left + (right - left) // 2\n if array[mid] == value:\n return True\n elif array[mid] < value:\n left = mid + 1\n else:\n right = mid - 1\n\n return False", "def linear_search_foundornot(arr: IntList, query: int) -> bool:\n position: int = 0\n found: bool = False\n while position < len(arr) and not found:\n if arr[position] == query:\n found = True\n position += 1\n return found", "def find(self, value):\n left, right = 0, len(self.arr)-1\n while left < right:\n total = self.arr[left] + self.arr[right]\n if total == value:\n return True\n elif total > value:\n right -= 1\n else:\n left += 1\n return False", "def linearSearchTwoDArray(array, value):\n for row in range(len(array)):\n for col in range(len(array[row])):\n if value == array[row][col]:\n print(f\"Element: {value} has one occurrence at row={row}, col={col} in this array\")\n return\n else:\n print(\"No such value exists in this array\")", "def in_list(value, arg):\r\n return value in arg", "def checkElementInArray(element,array):\n\t\n\texists = False\n\t\n\tfor i in array:\n\t\n\t\tif i == element:\n\t\t\texists = True\n\n\treturn exists", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive beleft, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def __contains__(self, value):\n found = False\n for i in range(len(self.data)):\n if self.data[i] == value:\n return True\n return found", "def contains(self, value):\n\n node, parent, found = self.search(value)\n\n return found", "def binary_search_recursive(arr, val, start, end):\n\n #base case, we've searched the entire array\n if end < start:\n return -1\n\n mid = ((end - start) // 2) + start\n\n #we found the value we want. Hurray!\n if arr[mid] == val:\n return mid\n elif arr[mid] > val:\n #search lower half of the array\n return binary_search_recursive(arr, val, start, mid - 1)\n elif arr[mid] < val:\n #search upper half of the array\n return binary_search_recursive(arr, val, mid + 1, end)", "def binary_search_iterative(array, value):\n # Iteration terminates when (min, max) range has shrunk such that min > max\n min = 0\n max = len(array) - 1\n while min <= max:\n middle = (min + max) // 2\n comparison = compare(array[middle], value)\n if comparison == 0:\n return True\n elif comparison < 0:\n min = middle + 1\n else:\n max = middle - 1\n\n return False", "def arrayContains(arr, item):\n\tcontains = True\n\ttry:\n\t\tarr.index(item)\n\texcept ValueError:\n\t\tcontains = False\n\treturn contains", "def binary_search_whole_array(arr, target):\n return binary_search(arr, target, 0, len(arr))", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binarySearch(searchValue, array):\r\n first = 0\r\n last = len(array) - 1\r\n beenFound = False\r\n\t\r\n while first <= last and not beenFound:\r\n midpoint = (first + last)//2\r\n\r\n if array[midpoint] == searchValue:\r\n result = str(searchValue) + \" has been found\"\r\n beenFound = True\r\n \r\n\t \r\n else:\r\n if searchValue < array[midpoint]:\r\n last = midpoint-1\r\n else:\r\n first = midpoint+1\t\r\n return()", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive beleft, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(array, item):\n # change this to call your implementation to verify it passes all tests\n # return binary_search_iterative(array, item)\n return binary_search_recursive(array, item)", "def search(self, value):\n if self.data == value:\n return True\n\n if value < self.data:\n if self.left:\n return self.left.search(value)\n else:\n return False\n\n if value > self.data:\n if self.right:\n return self.right.search(value)\n else:\n return False", "def contains(self, key: int) -> bool:\n hashedVal = self.hashValue(key)\n head = self.array[hashedVal] \n while(head != None): \n if head.val == key:\n return True\n head = head.next\n return False", "def binary_search(arr: List[int], value: int):\n start = 0\n end = len(arr) - 1\n\n while start <= end:\n # to avoid start+end overflow and bit operate is faster, use current start+((end-start)>>1)\n # which is start+(end-start)/2\n mid = start + ((end - start) >> 1)\n mid_value = arr[mid]\n if value == mid_value:\n return mid\n elif value < mid_value:\n end = mid - 1\n else:\n start = mid + 1\n\n return -1", "def bin_search(array, key):\n return bin_search_util(array, key, 0, len(array) - 1)" ]
[ "0.69967544", "0.69195235", "0.6862056", "0.67465794", "0.65924174", "0.65537065", "0.64034545", "0.6274502", "0.6266495", "0.62526584", "0.6248882", "0.6248882", "0.6248882", "0.6248882", "0.6202555", "0.6176431", "0.6166238", "0.6148709", "0.60954624", "0.6085641", "0.60850006", "0.60850006", "0.60850006", "0.606562", "0.6050216", "0.60078627", "0.5987206", "0.5987118", "0.59614694", "0.59355116" ]
0.7844012
0
Query if a value is in an array via recursive binary search.
def binary_search_recursive(array, value): # Base cases for empty or singular list n = len(array) if n == 0: return False elif n == 1: return compare(array[0], value) == 0 # Recursive case middle = n // 2 if compare(array[middle], value) == 0: return True elif compare(array[middle], value) < 0: return binary_search_recursive(array[middle + 1:], value) else: return binary_search_recursive(array[:middle], value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linear_search_recursive(array, value):\n # Base case for empty list\n n = len(array)\n if n == 0:\n return False\n\n # Recursive case\n if compare(array[0], value) == 0:\n return True\n else:\n return linear_search_recursive(array[1:], value)", "def Search(array, value):\n left = 0\n right = len(array) - 1\n while left <= right:\n mid = left + (right - left) // 2\n if array[mid] == value:\n return True\n elif array[mid] < value:\n left = mid + 1\n else:\n right = mid - 1\n\n return False", "def in_array(val, obj):\n return (val in obj)", "def binary_search_recursive(arr, val, start, end):\n\n #base case, we've searched the entire array\n if end < start:\n return -1\n\n mid = ((end - start) // 2) + start\n\n #we found the value we want. Hurray!\n if arr[mid] == val:\n return mid\n elif arr[mid] > val:\n #search lower half of the array\n return binary_search_recursive(arr, val, start, mid - 1)\n elif arr[mid] < val:\n #search upper half of the array\n return binary_search_recursive(arr, val, mid + 1, end)", "def find(self, value):\n left, right = 0, len(self.arr)-1\n while left < right:\n total = self.arr[left] + self.arr[right]\n if total == value:\n return True\n elif total > value:\n right -= 1\n else:\n left += 1\n return False", "def binary_search_iterative(array, value):\n # Iteration terminates when (min, max) range has shrunk such that min > max\n min = 0\n max = len(array) - 1\n while min <= max:\n middle = (min + max) // 2\n comparison = compare(array[middle], value)\n if comparison == 0:\n return True\n elif comparison < 0:\n min = middle + 1\n else:\n max = middle - 1\n\n return False", "def binarySearch(searchValue, array):\r\n first = 0\r\n last = len(array) - 1\r\n beenFound = False\r\n\t\r\n while first <= last and not beenFound:\r\n midpoint = (first + last)//2\r\n\r\n if array[midpoint] == searchValue:\r\n result = str(searchValue) + \" has been found\"\r\n beenFound = True\r\n \r\n\t \r\n else:\r\n if searchValue < array[midpoint]:\r\n last = midpoint-1\r\n else:\r\n first = midpoint+1\t\r\n return()", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(arr: List[int], value: int):\n start = 0\n end = len(arr) - 1\n\n while start <= end:\n # to avoid start+end overflow and bit operate is faster, use current start+((end-start)>>1)\n # which is start+(end-start)/2\n mid = start + ((end - start) >> 1)\n mid_value = arr[mid]\n if value == mid_value:\n return mid\n elif value < mid_value:\n end = mid - 1\n else:\n start = mid + 1\n\n return -1", "def linear_search_iterative(array, value):\n for elt in array:\n if compare(elt, value) == 0:\n return True\n\n return False", "def binary_search(array, item):\n # implement binary_search_iterative and binary_search_recursive beleft, then\n # change this to call your implementation to verify it passes all tests\n return binary_search_iterative(array, item)\n # return binary_search_recursive(array, item)", "def binary_search(array, item):\n # change this to call your implementation to verify it passes all tests\n # return binary_search_iterative(array, item)\n return binary_search_recursive(array, item)", "def binary_search_whole_array(arr, target):\n return binary_search(arr, target, 0, len(arr))", "def binary_search(array, elem):\n if len(array) == 0:\n return (f'{elem} is not found in the list')\n else:\n midpoint = len(array)//2\n if array[midpoint]==elem:\n return (f'{elem} is found in the list')\n else:\n if elem<array[midpoint]:\n return binary_search(array[:midpoint],elem)\n else:\n return binary_search(array[midpoint+1:],elem)", "def binary_search(arr, value, start=None, end=None):\n if start is None:\n start = 0\n if end is None:\n end = len(arr) -1\n \n index = int((end - start)/2 + start)\n mid_value = arr[index]\n if mid_value == value:\n return index\n elif mid_value > value:\n return binary_search(arr, value, start, index)\n elif mid_value < value:\n return binary_search(arr, value, index, end)", "def search(self, value):\n if self.data == value:\n return True\n\n if value < self.data:\n if self.left:\n return self.left.search(value)\n else:\n return False\n\n if value > self.data:\n if self.right:\n return self.right.search(value)\n else:\n return False", "def binary_search(array, x):\n if len(array) < 1:\n return False\n elif len(array) == 1:\n if array[0] == x:\n return True\n else:\n return False\n else:\n _mid = int(len(array) / 2)\n _mid_element = array[_mid]\n if _mid_element == x:\n return True\n else:\n if _mid_element < x:\n return binary_search(array[_mid+1:], x)\n else:\n return binary_search(array[:_mid], x)\n \n return _mid", "def bin_search(array, key):\n return bin_search_util(array, key, 0, len(array) - 1)", "def binary_search(input_array, value):\n first = 0\n last = len(input_array)-1\n\n while(first <= last):\n mid_index = int((first + last) / 2)\n\n if input_array[mid_index] == value:\n return mid_index\n elif input_array[mid_index] < value:\n first = mid_index + 1\n else:\n last = mid_index - 1\n\n return -1", "def checkElementInArray(element,array):\n\t\n\texists = False\n\t\n\tfor i in array:\n\t\n\t\tif i == element:\n\t\t\texists = True\n\n\treturn exists", "def contains(self, value):\n\n node, parent, found = self.search(value)\n\n return found", "def in_list(value, arg):\r\n return value in arg", "def linear_search_foundornot(arr: IntList, query: int) -> bool:\n position: int = 0\n found: bool = False\n while position < len(arr) and not found:\n if arr[position] == query:\n found = True\n position += 1\n return found", "def __contains__(self, value):\n found = False\n for i in range(len(self.data)):\n if self.data[i] == value:\n return True\n return found", "def binary_search_iterative(arr, x):\n\n if len(arr) > 1:\n mid = len(arr) // 2\n \n first_half = arr[: mid]\n second_half = arr[mid :]\n \n if x == arr[mid]:\n return True\n \n elif x < arr[mid]:\n i = 0\n while i <= len(first_half):\n if first_half[i] == x:\n return True\n else:\n i += 1\n \n elif x > arr[mid]:\n j = 0 \n while j < len(second_half):\n if second_half[j] == x:\n return True\n else:\n j += 1\n \n else:\n return f\"X: {x} no in array!\"\n \n else:\n return -1", "def binary_search(input_array, value):\n \n array_length = len(input_array)\n \n #(\"array length:\", array_length)\n \n left = 0\n right = array_length-1\n \n while left <= right:\n \n mid = ( left + right ) // 2\n #print(\"mid, mid value: \", mid, input_array[mid])\n \n if input_array[ mid ] == value:\n return mid\n \n elif input_array[ mid ] < value:\n # midpoint value is smaller than target, then search right half\n left = mid + 1\n \n else:\n # midpoint value is larger than target, then search left half\n right = mid - 1\n \n \n \n return -1", "def binary_search_find_first(arr: List[int], value: int):\n start = 0\n end = len(arr) - 1\n\n while start <= end:\n # to avoid start+end overflow and bit operate is faster, use current start+((end-start)>>1)\n # which is start+(end-start)/2\n mid = start + ((end - start) >> 1)\n mid_value = arr[mid]\n\n if value < mid_value:\n end = mid - 1\n elif value > mid_value:\n start = mid + 1\n else:\n if mid == 0 or arr[mid - 1] != value:\n return mid\n else:\n end = mid - 1\n\n return -1", "def arrayContains(arr, item):\n\tcontains = True\n\ttry:\n\t\tarr.index(item)\n\texcept ValueError:\n\t\tcontains = False\n\treturn contains" ]
[ "0.75950164", "0.72089994", "0.6862112", "0.6780811", "0.6759658", "0.6693581", "0.6673692", "0.6622626", "0.6622626", "0.6622626", "0.66219646", "0.66128653", "0.65874904", "0.65533173", "0.65308404", "0.6481852", "0.6433637", "0.64171046", "0.64025944", "0.63988364", "0.6291715", "0.6286969", "0.6269333", "0.625311", "0.6228115", "0.62044245", "0.6190186", "0.61842954", "0.6162136", "0.61482346" ]
0.7462939
1
Sort a list via hybrid recursive (topdown) mergesort. Delegates to insertion sort when n is less than or equal to some threshold.
def mergesort_recursive_hybrid(array, threshold=37): # Base case delegates to insertion sort n = len(array) if n <= threshold: return insertion_sort(array) # Recur on two halves of array and merge results mid = n // 2 return merge( mergesort_recursive(array[:mid]), mergesort_recursive(array[mid:]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_sort(L):\n n = len(L)\n if n < 2:\n return L\n mid = n // 2\n left = L[:mid]\n right = L[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(L, left, right)", "def merge_sort(unsorted, threshold, reverse):\r\n length = len(unsorted)\r\n if length < 2:\r\n return unsorted\r\n elif length < threshold:\r\n return insertion_sort(unsorted, reverse)\r\n else:\r\n mid = length//2\r\n list1 = unsorted[0:mid]\r\n list2 = unsorted[mid:length]\r\n list1 = merge_sort(list1, threshold, reverse)\r\n list2 = merge_sort(list2, threshold, reverse)\r\n unsorted = merge(list1, list2, reverse)\r\n return unsorted", "def merge_sort(input_list: list) -> list:\n n = len(input_list)\n if n <= 1:\n return input_list\n else:\n left = merge_sort(input_list[:n // 2])\n right = merge_sort(input_list[n // 2:])\n return merge(left, right)", "def merge_sort (arr):\n n = len(arr)\n if n is 1: # Base case.\n return arr\n else:\n left = merge_sort(arr[0:int(floor(n/2))])\n right = merge_sort(arr[int(ceil(n/2)):])\n res = merge(left, right)\n return res", "def merge_sort(aList):\n\n n = len(aList)\n\n # Check for base case\n if n <= 1:\n return aList\n\n # Split the list into two halves and call recursively\n first = merge_sort(aList[0:int(n/2)])\n\n second = merge_sort(aList[int(n/2):n])\n\n #pdb.set_trace()\n\n # Perform Merge of two sorted lists\n # Initialize counters, lengths and the newly sorted array\n i, j = 0, 0\n firstLen = len(first)\n secondLen = len(second)\n\n sortedList = []\n\n # Populate the sorted list with the lesser of each half-list\n for k in range(n):\n\n # Make sure we won't try to access past the end of a list\n # If we've reached the end of the first array, then\n # add the element from the second array.\n if i == firstLen:\n sortedList.append(second[j])\n j += 1\n\n # If we've reached the end of the second array, add\n # the element from the first array\n elif j == secondLen:\n sortedList.append(first[i])\n i += 1\n\n # The normal case (before we've reached the end of either array)\n elif first[i] < second[j]:\n sortedList.append(first[i])\n i += 1\n\n else:\n sortedList.append(second[j])\n j += 1\n\n\n return sortedList", "def merge_sort(input_list,start,end):\n if start < end:\n mid=(start+end)//2\n merge_sort(input_list,start,mid)\n merge_sort(input_list,mid+1,end)\n return merge(input_list,start,mid,end)", "def merge_sort(mylist):\n n = len(mylist)\n if n < 2:\n return # list is already sorted\n # divide\n mid = n // 2\n S1 = mylist[0:mid] # copy of first half\n S2 = mylist[mid:n] # copy of second half\n # conquer (with recursion)\n merge_sort(S1) # sort the copy of the first half\n merge_sort(S2) # sort the copy of the second half\n # merge results\n _merge(S1, S2, mylist)", "def intro_sort(data):\n recurssion_depth=2*math.log(len(data))\n if len(data) < 15:\n insertion_sort(data)\n elif recurssion_depth==0:\n merge_sort(data)\n else:\n quick_sort(data)", "def merge_sort(l): \n # Raise value\n if not isinstance(l, list):\n raise TypeError(\"Not a list\")\n\n # Initialize variables to count\n c = r = w = 0\n\n def merge_sort_aux(l, start1, last2):\n \"\"\"\n Split the list to sublist till size becomes one by recursively calls itself \n and merge them\n \n Parameter\n -------------------\n start1: the first index of the list in need of splitting\n int\n last2: the last index of the list in need of splitting\n int\n \"\"\"\n nonlocal c, w, r\n\n def merge(l, s1, l1, s2, l2): \n \"\"\"\n Sort the sublists and merge two halves\n \n Parameter\n ----------------------\n l: unsorted list\n list\n s1: the index of the first element of the 1st list (left side)\n int \n l1: the index of the last element of the 1st list (left side)\n int\n s2: the index of the first element of the 2nd list (right side)\n int\n l2: the index of the last element of the 2nd list (right side)\n int\n \"\"\"\n nonlocal c, r, w\n\n # Create temporary list to store sorted value\n tempList = l.copy() \n\n # Compare pairs of values of two list, start from the first element\n i = s1 # Beginning of the left list\n j = s2 # Beginning of the right list\n k = 0\n\n # Compare and add to temporary list\n c += 2\n while i <= l1 and j <= l2: \n c += 3\n r += 2 \n w += 1 \n if l[i] < l[j]:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1 \n else:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n\n # Copy remaining elements of the first list\n c += 1\n while i <= l1:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy remaining elements of the second list \n c += 1\n while j <= l2:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy elements from tempList to list l\n i = s1\n j = 0 \n c += 1\n while i <= l2:\n l[i] = tempList[j]\n i = i + 1\n j = j + 1\n c += 1\n w += 1 \n r += 1 \n \n # Split the list to sublist untill size become one\n c += 1\n if start1 < last2:\n last1 = (start1 + last2) // 2 \n start2 = last1 + 1\n merge_sort_aux(l, start1, last1) #the left side\n merge_sort_aux(l, start2, last2) #the right side\n # Call merge function to merge subarrays \n merge(l, start1, last1, start2, last2)\n \n start = 0\n last = len(l) - 1\n merge_sort_aux(l, start, last) \n \n return c, r, w", "def merge_sort (t,cmp):\n n = len(t)\n if n <= 1:\n # cas de base\n return copy.deepcopy(t)\n else:\n # cas general\n t1 = merge_sort((t[0:((n-1)//2+1)]),cmp)\n t2 = merge_sort((t[((n-1)//2+1):n]),cmp)\n return merge(t1,t2,cmp)", "def merge_sort(cls, num_list):\n if len(num_list) > 1:\n first_half = num_list[:len(num_list) // 2]\n second_half = num_list[len(num_list) // 2:]\n cls.merge_sort(first_half)\n cls.merge_sort(second_half)\n first_index = 0\n second_index = 0\n list_index = 0\n\n while first_index < len(first_half) and \\\n second_index < len(second_half):\n if first_half[first_index] > second_half[second_index]:\n num_list[list_index] = second_half[second_index]\n second_index += 1\n else:\n num_list[list_index] = first_half[first_index]\n first_index += 1\n list_index += 1\n\n for i in range(first_index, len(first_half)):\n num_list[list_index] = first_half[first_index]\n list_index += 1\n first_index += 1\n\n for x in range(second_index, len(second_half)):\n num_list[list_index] = second_half[second_index]\n list_index += 1\n second_index += 1", "def merge_sort(arr):\n n = len(arr)\n # Base case\n if n == 1:\n return arr\n # Recursive step: sort each half of the elements\n return merge(merge_sort(arr[:n//2]), merge_sort(arr[n//2:]))", "def merge_sort(list):\r\n \r\n if len(list) <= 1:\r\n return list\r\n \r\n left_half, right_half = split(list)\r\n left = merge_sort(left_half)\r\n right = merge_sort(right_half)\r\n \r\n return merge(left, right)", "def merge_sort(self, lst):\r\n [sorted_lst, number_of_inversions] = self.sort_and_get_number_of_inversions(lst)\r\n \r\n return sorted_lst", "def merge_sort(my_list):\n if len(my_list) < 1:\n return my_list\n if len(my_list) > 1:\n middle = len(my_list) // 2\n left_half = my_list[:middle]\n right_half = my_list[middle:]\n\n merge_sort(left_half)\n merge_sort(right_half)\n\n i = 0\n j = 0\n k = 0\n while i < len(left_half) and j < len(right_half):\n if left_half[i] < right_half[j]:\n my_list[k] = left_half[i]\n i += 1\n else:\n my_list[k] = right_half[j]\n j += 1\n k += 1\n\n while i < len(left_half):\n my_list[k] = left_half[i]\n i += 1\n k += 1\n\n while j < len(right_half):\n my_list[k] = right_half[j]\n j += 1\n k += 1\n\n return my_list", "def merge_sort(list):\n\n\tif len(list) <= 1:\n\t\treturn list\n\n\tleft_half, right_half = split(list)\n\tleft = merge_sort(left_half)\n\tright = merge_sort(right_half)\n\n\treturn merge(left, right)", "def shell_sort(a_list):\n sublist_count = len(a_list) // 2\n while sublist_count > 0:\n for start_position in range(sublist_count):\n a_list = insertion_sort(\n a_list,\n start=start_position,\n gap=sublist_count\n )\n sublist_count = sublist_count // 2\n return a_list", "def merge_sort(list):\n # Base Condition\n if len(list) <= 1:\n return list\n\n left_half, right_half = split(list)\n left = merge_sort(left_half)\n right = merge_sort(right_half)\n\n return merge(left,right)", "def merge_sort(a_list):\n if len(a_list) <= 1:\n # a list with one element is sorted by definition\n return a_list\n # apply recursion if length is 2 or more\n else:\n middle_term = len(a_list) // 2\n left_half = a_list[:middle_term]\n right_half = a_list[middle_term:]\n\n left_half = merge_sort(left_half)\n right_half = merge_sort(right_half)\n\n return merge_lists(left_half, right_half)", "def merge_sort(l, start, end):\r\n if (end-start < 2):\r\n return;\r\n middle = (start+end)//2\r\n\r\n def merge():\r\n nonlocal l, start, middle, end\r\n res = []\r\n rlen = end - start\r\n i, j, k = start, middle, 0\r\n while k<rlen:\r\n if i!=middle and (j==end or l[i]<=l[j]):\r\n res.append(l[i])\r\n i = i + 1\r\n elif j!=end and (i==middle or l[i]>l[j]):\r\n res.append(l[j])\r\n j = j + 1\r\n k = k + 1\r\n l[start:end] = res[:]\r\n\r\n mergesort(l, start, middle)\r\n mergesort(l, middle, end)\r\n merge(l, start, middle, end)", "def mergesort(T:list) -> \"void\":\n\n\tif len(T) <= 32:\n\t\tinsertionsort(T)\n\n\telse:\n\t\tU = T[0:len(T)//2]\n\t\tV = T[len(T)//2:len(T)]\n\t\tmergesort(U)\n\t\tmergesort(V)\n\t\tmerge(U, V, T)", "def mergesort(lst, inversions):\n\t# inversions contains inverted list elements, once for each inversion\n\tif len(lst) == 1:\n\t\treturn lst\n\tcut_idx = (len(lst) + 1) / 2\n\tleft = lst[:cut_idx]\n\tright = lst[cut_idx:]\n\tleft = mergesort(left, inversions)\n\tright = mergesort(right, inversions)\n\treturn merge(left, right, inversions)", "def merge_sort(a_list):\n\n if len(a_list) > 1:\n mid = len(a_list) // 2\n left_half = a_list[:mid]\n right_half = a_list[mid:]\n\n merge_sort(left_half)\n merge_sort(right_half)\n\n i = 0\n j = 0\n k = 0\n while i < len(left_half) and j < len(right_half):\n if left_half[i] < right_half[j]:\n a_list[k] = left_half[i]\n i += 1\n else:\n a_list[k] = right_half[j]\n j += 1\n k += 1\n\n while i < len(left_half):\n a_list[k] = left_half[i]\n i += 1\n k += 1\n\n while j < len(right_half):\n a_list[k] = right_half[j]\n j += 1\n k += 1\n return a_list", "def insertionSort(list):", "def merge_sort(items):\r\n # TODO: Check if list is so small it's already sorted (base case)\r\n # TODO: Split items list into approximately equal halves\r\n # TODO: Sort each half by recursively calling merge sort\r\n # TODO: Merge sorted halves into one list in sorted order\r", "def quick_sort(partition_list, low, high):\n if low >= high:\n return\n part_point = get_partition(partition_list, low, high)\n quick_sort(partition_list, low, part_point - 1)\n quick_sort(partition_list, part_point + 1, high)", "def merge_sort(alist):\n print(\"Splitting \", alist)\n # Temporary list to store sorted list\n work = [None] * len(alist)\n rec_merge_sort(work, start=0, end=len(alist)-1)", "def merge_sort(list1):\n if len(list1) <= 1:\n return list1\n \n mid_point = int(len(list1)/2)\n \n return merge(merge_sort(list1[:mid_point]), merge_sort(list1[mid_point:]))", "def merge_sort(items):\n # Running time: O(nlogn) Best and Worst case\n # Memory usage: O(nlogn) \n # Check if list is so small it's already sorted (base case)\n if len(items) > 1:\n # Split items list into approximately equal halves\n pivot = len(items)//2\n first_half = items[:pivot]\n second_half = items[pivot:]\n # Sort each half by recursively calling merge sort\n merge_sort(first_half)\n merge_sort(second_half)\n # Merge sorted halves into one list in sorted order\n items[:] = merge(first_half,second_half)", "def heap_sort(list):\n pass" ]
[ "0.7029845", "0.696164", "0.68413234", "0.68319064", "0.6830567", "0.6794161", "0.6786466", "0.6744685", "0.66700613", "0.66018665", "0.65889466", "0.6586275", "0.6528771", "0.65160424", "0.65155774", "0.6514788", "0.650817", "0.65026766", "0.6501337", "0.6469715", "0.6448953", "0.64308083", "0.64277446", "0.6427545", "0.6421217", "0.6371518", "0.63677996", "0.6364059", "0.6347985", "0.6337189" ]
0.7173727
0
Sort a list via hybrid iterative (bottomup) mergesort. Delegates to insertion sort when n is less than or equal to some threshold.
def mergesort_iterative_hybrid(array, threshold=37): n = len(array) result = array.copy() # Initial insertion sort pass for i in range(0, n, threshold): result[i:i+threshold] = insertion_sort(result[i:i+threshold]) # Merge runs of length threshold, 2*threshold, ... length = threshold while length < n: # Merge each pair of runs for i in range(0, n, 2 * length): mid = i + length upper = i + 2 * length result[i:upper] = merge(result[i:mid], result[mid:upper]) length *= 2 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mergesort_recursive_hybrid(array, threshold=37):\n # Base case delegates to insertion sort\n n = len(array)\n if n <= threshold:\n return insertion_sort(array)\n\n # Recur on two halves of array and merge results\n mid = n // 2\n return merge(\n mergesort_recursive(array[:mid]),\n mergesort_recursive(array[mid:]))", "def merge_sort(unsorted, threshold, reverse):\r\n length = len(unsorted)\r\n if length < 2:\r\n return unsorted\r\n elif length < threshold:\r\n return insertion_sort(unsorted, reverse)\r\n else:\r\n mid = length//2\r\n list1 = unsorted[0:mid]\r\n list2 = unsorted[mid:length]\r\n list1 = merge_sort(list1, threshold, reverse)\r\n list2 = merge_sort(list2, threshold, reverse)\r\n unsorted = merge(list1, list2, reverse)\r\n return unsorted", "def merge_sort(L):\n n = len(L)\n if n < 2:\n return L\n mid = n // 2\n left = L[:mid]\n right = L[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(L, left, right)", "def merge_sort (arr):\n n = len(arr)\n if n is 1: # Base case.\n return arr\n else:\n left = merge_sort(arr[0:int(floor(n/2))])\n right = merge_sort(arr[int(ceil(n/2)):])\n res = merge(left, right)\n return res", "def merge_sort(cls, num_list):\n if len(num_list) > 1:\n first_half = num_list[:len(num_list) // 2]\n second_half = num_list[len(num_list) // 2:]\n cls.merge_sort(first_half)\n cls.merge_sort(second_half)\n first_index = 0\n second_index = 0\n list_index = 0\n\n while first_index < len(first_half) and \\\n second_index < len(second_half):\n if first_half[first_index] > second_half[second_index]:\n num_list[list_index] = second_half[second_index]\n second_index += 1\n else:\n num_list[list_index] = first_half[first_index]\n first_index += 1\n list_index += 1\n\n for i in range(first_index, len(first_half)):\n num_list[list_index] = first_half[first_index]\n list_index += 1\n first_index += 1\n\n for x in range(second_index, len(second_half)):\n num_list[list_index] = second_half[second_index]\n list_index += 1\n second_index += 1", "def merge_sort(aList):\n\n n = len(aList)\n\n # Check for base case\n if n <= 1:\n return aList\n\n # Split the list into two halves and call recursively\n first = merge_sort(aList[0:int(n/2)])\n\n second = merge_sort(aList[int(n/2):n])\n\n #pdb.set_trace()\n\n # Perform Merge of two sorted lists\n # Initialize counters, lengths and the newly sorted array\n i, j = 0, 0\n firstLen = len(first)\n secondLen = len(second)\n\n sortedList = []\n\n # Populate the sorted list with the lesser of each half-list\n for k in range(n):\n\n # Make sure we won't try to access past the end of a list\n # If we've reached the end of the first array, then\n # add the element from the second array.\n if i == firstLen:\n sortedList.append(second[j])\n j += 1\n\n # If we've reached the end of the second array, add\n # the element from the first array\n elif j == secondLen:\n sortedList.append(first[i])\n i += 1\n\n # The normal case (before we've reached the end of either array)\n elif first[i] < second[j]:\n sortedList.append(first[i])\n i += 1\n\n else:\n sortedList.append(second[j])\n j += 1\n\n\n return sortedList", "def merge_sort(input_list,start,end):\n if start < end:\n mid=(start+end)//2\n merge_sort(input_list,start,mid)\n merge_sort(input_list,mid+1,end)\n return merge(input_list,start,mid,end)", "def insertionSort(list):", "def merge_sort(mylist):\n n = len(mylist)\n if n < 2:\n return # list is already sorted\n # divide\n mid = n // 2\n S1 = mylist[0:mid] # copy of first half\n S2 = mylist[mid:n] # copy of second half\n # conquer (with recursion)\n merge_sort(S1) # sort the copy of the first half\n merge_sort(S2) # sort the copy of the second half\n # merge results\n _merge(S1, S2, mylist)", "def merge_sort(input_list: list) -> list:\n n = len(input_list)\n if n <= 1:\n return input_list\n else:\n left = merge_sort(input_list[:n // 2])\n right = merge_sort(input_list[n // 2:])\n return merge(left, right)", "def merge_sort(self, lst):\r\n [sorted_lst, number_of_inversions] = self.sort_and_get_number_of_inversions(lst)\r\n \r\n return sorted_lst", "def merge_sort(l): \n # Raise value\n if not isinstance(l, list):\n raise TypeError(\"Not a list\")\n\n # Initialize variables to count\n c = r = w = 0\n\n def merge_sort_aux(l, start1, last2):\n \"\"\"\n Split the list to sublist till size becomes one by recursively calls itself \n and merge them\n \n Parameter\n -------------------\n start1: the first index of the list in need of splitting\n int\n last2: the last index of the list in need of splitting\n int\n \"\"\"\n nonlocal c, w, r\n\n def merge(l, s1, l1, s2, l2): \n \"\"\"\n Sort the sublists and merge two halves\n \n Parameter\n ----------------------\n l: unsorted list\n list\n s1: the index of the first element of the 1st list (left side)\n int \n l1: the index of the last element of the 1st list (left side)\n int\n s2: the index of the first element of the 2nd list (right side)\n int\n l2: the index of the last element of the 2nd list (right side)\n int\n \"\"\"\n nonlocal c, r, w\n\n # Create temporary list to store sorted value\n tempList = l.copy() \n\n # Compare pairs of values of two list, start from the first element\n i = s1 # Beginning of the left list\n j = s2 # Beginning of the right list\n k = 0\n\n # Compare and add to temporary list\n c += 2\n while i <= l1 and j <= l2: \n c += 3\n r += 2 \n w += 1 \n if l[i] < l[j]:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1 \n else:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n\n # Copy remaining elements of the first list\n c += 1\n while i <= l1:\n tempList[k] = l[i]\n i = i + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy remaining elements of the second list \n c += 1\n while j <= l2:\n tempList[k] = l[j]\n j = j + 1\n k = k + 1\n c += 1\n r += 1\n w += 1\n\n # Copy elements from tempList to list l\n i = s1\n j = 0 \n c += 1\n while i <= l2:\n l[i] = tempList[j]\n i = i + 1\n j = j + 1\n c += 1\n w += 1 \n r += 1 \n \n # Split the list to sublist untill size become one\n c += 1\n if start1 < last2:\n last1 = (start1 + last2) // 2 \n start2 = last1 + 1\n merge_sort_aux(l, start1, last1) #the left side\n merge_sort_aux(l, start2, last2) #the right side\n # Call merge function to merge subarrays \n merge(l, start1, last1, start2, last2)\n \n start = 0\n last = len(l) - 1\n merge_sort_aux(l, start, last) \n \n return c, r, w", "def merge_sort(l, start, end):\r\n if (end-start < 2):\r\n return;\r\n middle = (start+end)//2\r\n\r\n def merge():\r\n nonlocal l, start, middle, end\r\n res = []\r\n rlen = end - start\r\n i, j, k = start, middle, 0\r\n while k<rlen:\r\n if i!=middle and (j==end or l[i]<=l[j]):\r\n res.append(l[i])\r\n i = i + 1\r\n elif j!=end and (i==middle or l[i]>l[j]):\r\n res.append(l[j])\r\n j = j + 1\r\n k = k + 1\r\n l[start:end] = res[:]\r\n\r\n mergesort(l, start, middle)\r\n mergesort(l, middle, end)\r\n merge(l, start, middle, end)", "def merge_sort(items):\r\n # TODO: Check if list is so small it's already sorted (base case)\r\n # TODO: Split items list into approximately equal halves\r\n # TODO: Sort each half by recursively calling merge sort\r\n # TODO: Merge sorted halves into one list in sorted order\r", "def sort(lst):\n n = len(lst)\n done = False\n round = n - 1\n while not done and round:\n done = True\n for i in range(round):\n if lst[i] > lst[i+1]:\n lst[i], lst[i+1] = lst[i+1], lst[i]\n done = False\n round -= 1", "def merge_sort(my_list):\n if len(my_list) < 1:\n return my_list\n if len(my_list) > 1:\n middle = len(my_list) // 2\n left_half = my_list[:middle]\n right_half = my_list[middle:]\n\n merge_sort(left_half)\n merge_sort(right_half)\n\n i = 0\n j = 0\n k = 0\n while i < len(left_half) and j < len(right_half):\n if left_half[i] < right_half[j]:\n my_list[k] = left_half[i]\n i += 1\n else:\n my_list[k] = right_half[j]\n j += 1\n k += 1\n\n while i < len(left_half):\n my_list[k] = left_half[i]\n i += 1\n k += 1\n\n while j < len(right_half):\n my_list[k] = right_half[j]\n j += 1\n k += 1\n\n return my_list", "def shell_sort(a_list):\n sublist_count = len(a_list) // 2\n while sublist_count > 0:\n for start_position in range(sublist_count):\n a_list = insertion_sort(\n a_list,\n start=start_position,\n gap=sublist_count\n )\n sublist_count = sublist_count // 2\n return a_list", "def bottom_up_merge_sort(items):\n subarray_size = 1\n\n while subarray_size < len(items)//2:\n ## Continue making passes through items until the subarray size is\n ## the size of items, since this means items is finally sorted.\n for i in range(0, len(items), subarray_size):\n merge(items, i, i+subarray_size*2, subarray_size)\n subarray_size *= 2", "def insertion_sort(p_list):\n if len(p_list) > 1: # list of length 0 or 1 is sorted\n marker = p_list.first()\n while marker != p_list.last():\n pivot = p_list.after(marker) # next item to place\n value = pivot.element()\n if value > marker.element(): # pivot is already sorted\n marker = pivot # pivot becomes new marker\n else: # must relocate pivot to be before marker\n walk = marker # find leftmost item greater than value\n while (walk != p_list.first() and\n p_list.before(walk).element() > value):\n walk = p_list.before(walk)\n p_list.delete(pivot)\n p_list.add_before(walk, value) # reinsert value before walk", "def tim_sort(li: Sequence) -> List:\n minrun = find_minrun(len(li))\n \n for start in range(0, len(li), minrun):\n # Note that insertion_sort sorts [left, right)\n end = min(start + minrun, len(li))\n insertion_sort(li, start, end)\n \n size = minrun\n while size < len(li):\n for left in range(0, len(li), 2 * size):\n # Since [left : left+size] and [left+size : left+2*size] have been sorted \n # (when size=minrun, these two have been sorted by insertion_sort; when \n # size is doubled, they are sorted by the previous loop), we can use merge.\n mid = min(left + size, len(li))\n right = min(left + 2 * size, len(li))\n merge(li, left, mid, right)\n size *= 2", "def merge_sort (t,cmp):\n n = len(t)\n if n <= 1:\n # cas de base\n return copy.deepcopy(t)\n else:\n # cas general\n t1 = merge_sort((t[0:((n-1)//2+1)]),cmp)\n t2 = merge_sort((t[((n-1)//2+1):n]),cmp)\n return merge(t1,t2,cmp)", "def heap_sort(list):\n pass", "def merge_sort(a_list):\n\n if len(a_list) > 1:\n mid = len(a_list) // 2\n left_half = a_list[:mid]\n right_half = a_list[mid:]\n\n merge_sort(left_half)\n merge_sort(right_half)\n\n i = 0\n j = 0\n k = 0\n while i < len(left_half) and j < len(right_half):\n if left_half[i] < right_half[j]:\n a_list[k] = left_half[i]\n i += 1\n else:\n a_list[k] = right_half[j]\n j += 1\n k += 1\n\n while i < len(left_half):\n a_list[k] = left_half[i]\n i += 1\n k += 1\n\n while j < len(right_half):\n a_list[k] = right_half[j]\n j += 1\n k += 1\n return a_list", "def merge_sort(list):\r\n \r\n if len(list) <= 1:\r\n return list\r\n \r\n left_half, right_half = split(list)\r\n left = merge_sort(left_half)\r\n right = merge_sort(right_half)\r\n \r\n return merge(left, right)", "def merge_sort(list):\n\n\tif len(list) <= 1:\n\t\treturn list\n\n\tleft_half, right_half = split(list)\n\tleft = merge_sort(left_half)\n\tright = merge_sort(right_half)\n\n\treturn merge(left, right)", "def mergesort(T:list) -> \"void\":\n\n\tif len(T) <= 32:\n\t\tinsertionsort(T)\n\n\telse:\n\t\tU = T[0:len(T)//2]\n\t\tV = T[len(T)//2:len(T)]\n\t\tmergesort(U)\n\t\tmergesort(V)\n\t\tmerge(U, V, T)", "def mergesort(lst, inversions):\n\t# inversions contains inverted list elements, once for each inversion\n\tif len(lst) == 1:\n\t\treturn lst\n\tcut_idx = (len(lst) + 1) / 2\n\tleft = lst[:cut_idx]\n\tright = lst[cut_idx:]\n\tleft = mergesort(left, inversions)\n\tright = mergesort(right, inversions)\n\treturn merge(left, right, inversions)", "def merge_sort_algorithm(self, low, high):\n if low < high:\n mid = (low + high) / 2\n self.merge_sort_algorithm(low, mid)\n self.merge_sort_algorithm(mid+1, high)\n self.merge(low, high, mid)", "def merge_sort(list):\n # Base Condition\n if len(list) <= 1:\n return list\n\n left_half, right_half = split(list)\n left = merge_sort(left_half)\n right = merge_sort(right_half)\n\n return merge(left,right)", "def merge_sort(arr):\n n = len(arr)\n # Base case\n if n == 1:\n return arr\n # Recursive step: sort each half of the elements\n return merge(merge_sort(arr[:n//2]), merge_sort(arr[n//2:]))" ]
[ "0.7101632", "0.6605162", "0.6547044", "0.6530211", "0.6487875", "0.64746773", "0.64605856", "0.64375854", "0.64229447", "0.6305018", "0.62946403", "0.6249969", "0.62481827", "0.62260824", "0.621573", "0.6202988", "0.61955655", "0.61911744", "0.61827713", "0.6164877", "0.61570626", "0.61531866", "0.6151233", "0.6121927", "0.61216444", "0.61185485", "0.6118305", "0.6107643", "0.60970557", "0.609001" ]
0.70031637
1
Siftup the last node (end1) in the given max heap.
def sift_up(heap, start, end): # Swap last node with parents until no longer greater. i = end - 1 heaped = False while i > start and not heaped: parent = (i - 1) // 2 if compare(heap[i], heap[parent]) > 0: heap[i], heap[parent] = heap[parent], heap[i] i = parent else: heaped = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sift_down(heap, start, end):\n # Swap first node with children until no longer smaller.\n i = start\n heaped = False\n while not heaped:\n left = i * 2 + 1\n right = i * 2 + 2\n largest = i\n\n # Find largest of i, left and right\n if left < end and compare(heap[left], heap[largest]) > 0:\n largest = left\n if right < end and compare(heap[right], heap[largest]) > 0:\n largest = right\n\n # If left or right is larger than i, swap and repeat\n if largest == i:\n heaped = True\n else:\n heap[i], heap[largest] = heap[largest], heap[i]\n i = largest", "def heap_pop_max(heap):\n last = heap.pop()\n if heap:\n return_item = heap[0]\n heap[0] = last\n heapq._siftup_max(heap, 0)\n else:\n return_item = last\n return return_item", "def sift_down(self, start, end):\n i, j = start, 2*start+1\n # Temporary variable to decrease exchange times\n temp = self.heap_list[start]\n # end is equal to len(self.heap_list)-1\n while j <= end:\n # compare left child node with right child node\n if j<end and self.heap_list[j]<self.heap_list[j+1]:\n j += 1\n if temp >= self.heap_list[j]:\n break\n else:\n #self.heap_list[i], self.heap_list[j] = self.heap_list[j], self.heap_list[i]\n self.heap_list[i] = self.heap_list[j]\n i = j\n j = 2*j+1\n self.heap_list[i] = temp", "def delete_top_from_max_heap(x):\n last = x[-1]\n x = x.at[0].set(last)[:-1]\n return heapify_subtree(x, 0)", "def __siftup(heap, nodes, pos, stopPos = 0):\n # Loop until past stopping position\n while pos > stopPos:\n # Set parent position\n parentPos = (pos - 1) >> 1\n\n # Swap if child less than parent\n if heap[pos][0] < heap[parentPos][0]:\n Graph.__swapHeapNodes(heap, nodes, pos, parentPos)\n pos = parentPos\n \n # End sift if child's first tuple is greater than or equal to parent\n else: break", "def swap_min_max(heap):\n\ttemp = heap[0]\n\theap[0] = heap[heap.len-1]\n\theap[heap.len-1] = temp\n\theap.len -= 1\n\treturn", "def heappop(heap):\n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n Heap.siftup(heap, 0)\n return returnitem\n return lastelt", "def sift_up(self, i):\n #While the element is not the min value (top) or the second value in the min heap\n while i // 2 > 0:\n # Swap the values if the current value is less than it's parent value\n if self.heap_list[i][0] < self.heap_list[i // 2][0]:\n self.heap_list[i], self.heap_list[i // 2] = self.heap_list[i // 2], self.heap_list[i]\n # Move the index to the parent value (moving up the tree)\n i = i // 2", "def heap_push_max(heap, item):\n heap.append(item)\n heapq._siftdown_max(heap, 0, len(heap)-1)", "def heappop(heap):\n lastelt = heap.pop()\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n _siftup(heap, 0)\n return returnitem\n return lastelt", "def _heapify(self):\n start = self._parent(len(self) - 1)\n for i in range(start, -1, -1):\n self._down_heap(i)", "def delete_max(self):\n retval = self.heap_list[1]\n self.heap_list[1] = self.heap_list[self.size]\n self.size = self.size - 1\n pop_val = self.heap_list.pop()\n self.percolate_down(1)\n return retval", "def max_heapify_unrecursive(heap, i):\n while True:\n left_child = left(i)\n right_child = right(i)\n largest = i\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest == i:\n return\n swap(heap, i, largest)\n i = largest", "def sift_down_recursion(self, index):\n if self.size() == 0:\n return\n\n left = self.left_child(index)\n right = self.right_child(index)\n # if the element is leaf\n if left >= self.size():\n return\n\n max_child_index = left\n if right < self.size():\n if self.heap[right] > self.heap[left]:\n max_child_index = right\n\n # if already max heap, return\n if self.heap[index] >= self.heap[max_child_index]:\n return\n\n self.heap[index], self.heap[max_child_index] = self.heap[max_child_index], self.heap[index]\n\n index = max_child_index\n self.sift_down_recursion(index)", "def heapify(array, highest_index):\n first = (highest_index-1)//2\n for start in range(first, -1, -1):\n Heap.sift_down(array, start, highest_index)", "def __sift_up(self, i: int):\n while i > 0:\n parent = (i - 1) // 2\n if self.__heap[i][0] < self.__heap[parent][0]:\n tmp = self.__heap[parent]\n self.__heap[parent] = self.__heap[i]\n self.__heap[i] = tmp\n i = parent", "def build_max_heap(heap):\n\tfor j in range(heap.len//2, -1, -1):\n\t\tmax_heapify(heap, j)", "def heapify_down(self):\n index = 0\n while self.has_left_child(index):\n smaller_child_index = self.get_left_child_index(index)\n if self.has_right_child(index) and self.get_right_child(index) < self.get_left_child(index):\n smaller_child_index = self.get_right_child_index(index)\n if self.heap[index] < self.heap[smaller_child_index]:\n break\n else:\n self.swap_values(index, smaller_child_index)\n index = smaller_child_index", "def _sift_up(self, i):\n while i > 0:\n p = (i-1)//2\n if self._heap[i] < self._heap[p]:\n self._swap(i, p)\n i = p\n else:\n break", "def sift_up(self, index):\n if self.size() == 1:\n return\n parent_index = self.parent(index)\n # sift up if it is larger than its parent\n while index > 0 and self.heap[index] > self.heap[parent_index]:\n self.heap[index], self.heap[parent_index] = self.heap[parent_index], self.heap[index]\n # update index\n index = parent_index\n parent_index = self.parent(index)", "def __sift_down(self, i: int):\n while (2 * i + 1) <= self.__len__() - 1:\n\n child_idx = self.__get_smallest_child(i)\n\n if self.__heap[i][0] > self.__heap[child_idx][0]:\n tmp = self.__heap[i]\n self.__heap[i] = self.__heap[child_idx]\n self.__heap[child_idx] = tmp\n i = child_idx", "def sift_down(self, i):\n #If the current value has at least one child\n while (i * 2) <= self.current_size:\n #For the current value, get the index of the child with the least value (min child)\n mc = self.min_child(i)\n # If the current value is greater than it's \"min child\" value, swap the values\n if self.heap_list[i][0] > self.heap_list[mc][0]:\n self.heap_list[i], self.heap_list[mc] = self.heap_list[mc], self.heap_list[i]\n i = mc", "def heapify_up(self):\n index = len(self.heap) - 1\n while self.has_parent(index) and self.get_parent(index) > self.heap[index]:\n self.swap_values(self.get_parent_index(index), index)\n index = self.get_parent_index(index)", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def _sift_down(self, i):\n mini = i\n l = 2*i + 1\n if l < self._size and\\\n self._heap[l] < self._heap[mini]:\n mini = l\n r = 2*i + 2\n if r < self._size and\\\n self._heap[r] < self._heap[mini]:\n mini = r\n if mini != i:\n self._swap(i, mini)\n self._sift_down(mini)", "def heap_up(self, index):\n # how can we do this recursively?\n parent_node_index = (index - 1)//2\n while self.store[index].key < self.store[parent_node_index].key and index > 0:\n self.swap(index, parent_node_index)\n index = parent_node_index\n parent_node_index = (index - 1)//2\n else:\n return self.store", "def _upheap(self, node):\n parent = self.parent(node)\n while parent is not None and node.element() < parent.element():\n self._swap(node, parent) # Move node upward while key\n parent = self.parent(node) # smaller than parent's key", "def pop(self):\n try:\n top_node = self._heap[0]\n self._heap = [self._heap[-1]] + self._heap[1:-1]\n self.sort_down(0)\n return top_node\n except IndexError:\n raise IndexError('Cannot pop from an empty heap')", "def percolate_down(self, i):\n while (i * 2) <= self.size:\n max_child = self.max_child(i)\n if self.heap_list[max_child] > self.heap_list[i]:\n tmp = self.heap_list[i]\n self.heap_list[i] = self.heap_list[max_child]\n self.heap_list[max_child] = tmp\n i = max_child" ]
[ "0.71801686", "0.7001086", "0.6813224", "0.65404683", "0.6508815", "0.6496566", "0.6474151", "0.63565713", "0.63440263", "0.6343961", "0.6335834", "0.63289756", "0.63112545", "0.6272625", "0.62513417", "0.6242469", "0.6241716", "0.61991626", "0.6170931", "0.6164732", "0.61421937", "0.61381006", "0.6078182", "0.6046244", "0.6046244", "0.603945", "0.602148", "0.60156024", "0.59815645", "0.5959802" ]
0.73906344
0
Shuffle a list by recursively pileshuffling each pile.
def recursive_pile_shuffle(array, n): # Base case for empty or singular list if len(array) < 2: return array # Pile-shuffle and recur on each of n piles piles = [array[i::n] for i in reversed(range(n))] result = [] for pile in piles: result += recursive_pile_shuffle(pile, n) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shuffle_list(self, tour_list, pop_size):\n x = np.array(tour_list)\n while len(self.pop_group) < self.shuffle_population:\n y = np.random.permutation(x)\n if not any((y == x).all() for x in self.pop_group):\n self.pop_group.append(y.tolist())", "def shuffle_list(l):\n l_out = list(l)[:]\n shuffle(l_out)\n return l_out", "def shuffleSites(myList):\n shuffle(myList)\n ctr = 0\n for x in myList:\n ctr += 1\n yield ctr, x", "def shuffle(L):\n return [L[i] for i in permutation(len(L))]", "def shuffle(list_, random_seed=123):\n random.Random(random_seed).shuffle(list_)", "def shuffle(self) -> List[int]:", "def shuffle_list(self):\n eight_pic = get_image_list(self.folder)\n if len(eight_pic) > 8:\n random.shuffle(eight_pic)\n full_list = eight_pic[:9] * 2\n random.shuffle(full_list)\n return full_list", "def pile_shuffle(array, n):\n result = []\n for i in reversed(range(n)):\n result += array[i::n]\n\n return result", "def shuffle(self) -> List[int]:\n runs = self.nums.copy()\n # Fisher-Yates Algorithm\n n = len(runs)\n for i in range(n):\n j = random.randint(i, n - 1)\n runs[i], runs[j] = runs[j], runs[i]\n return runs", "def shuffle(lol, seed):\n for l in lol:\n random.seed(seed)\n random.shuffle(l)", "def _shuffle():\n\n random.shuffle(deck)", "def shuffle(self) -> List[int]:\n n = len(self.q)\n \n for i in range(n):\n j = random.randrange(i, n)\n self.q[i], self.q[j] = self.q[j], self.q[i]\n return self.q", "def shuffle(self):\n for i in range(10):\n random.shuffle(self.set)", "def shuffle(self) -> 'List':\n copy = self.copy()\n shuffle(copy)\n\n return copy", "def shuffle(data, shuffle_size=10000):\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= shuffle_size:\n random.shuffle(buf)\n for x in buf:\n yield x\n buf = []\n # The sample left over\n random.shuffle(buf)\n for x in buf:\n yield x", "def shuffle( self ):\n random.shuffle(self.__deck)", "def shuffle(self) -> List[int]:\n for i in range(len(self.nums) - 1, 0, -1):\n pivot = random.randint(0, i) # 前闭后闭\n self.nums[i], self.nums[pivot] = self.nums[pivot], self.nums[i]\n return self.nums", "def shuffle(self):\n shuffle(self.cards)", "def shuffle(self):\n shuffle(self.cards)", "def shuffle(self) -> List[int]:\n for i in range(len(self.nums)): #Traverse nums.\n r = randint(i, len(self.nums) - 1) #Generate a random int in [i, len(self.nums) - 1].\n self.nums[i], self.nums[r] = self.nums[r], self.nums[i] #Swap self.nums[i] and self.nums[r].\n return self.nums #Return self.nums.", "def shuffle_list(a):\n if isinstance(a, int):\n a = range(a)\n a = copy.copy(a)\n try:\n random.shuffle(a)\n except TypeError:\n a = list(a)\n random.shuffle(a)\n return a", "def shuffle(self):\n for i in xrange(self.n - 1):\n pos = random.randint(i, self.n - 1)\n self.to[i], self.to[pos] = self.to[pos], self.to[i]\n self.a[i], self.a[pos] = self.a[pos], self.a[i]\n return self.a", "def shuffled(iterable):\n items = list(iterable)\n random.shuffle(items)\n return items", "def Shuffle(self):\r\n random.shuffle(self.cards_list)", "def shuffle(self):\r\n random.shuffle(self.deck)", "def shuffle_list(a, b, c, d, e, f):\r\n z = list(zip(a, b, c, d, e, f))\r\n random.shuffle(z)\r\n a, b, c, d, e, f = zip(*z)\r\n\r\n return a, b, c, d, e, f", "def shuffle(self):\n reorder(self.cards) #importing shuffle as reorder", "def __permute(l,opts):\n MAX_RAND_SIZE = 2080 \n if (len(l)/3 < MAX_RAND_SIZE): \n rd.shuffle(l)\n else:\n sys.stderr.write(\\\n\t\t\"{}:{}: Valid Random Permutation Range Exceeded.\"\\\n\t\t.format(opts.progname,permute.__name__))\n opts.perror+=1", "def partition(lis: list, n: int):\n # prevent destroying the original dataset\n lis_cp = copy.deepcopy(lis)\n random.shuffle(lis_cp)\n if len(lis) > n:\n return [lis_cp[i::n] for i in range(n)]\n else:\n return [[lis_cp[i]] for i in range(len(lis))]", "def shuffle(self):\n index = list(range(self.k))\n random.shuffle(index)\n for i, j in enumerate(index):\n self.list[i] = self.dic[j]\n return self.list" ]
[ "0.680971", "0.6636288", "0.6632501", "0.66021633", "0.6471498", "0.64384234", "0.64200294", "0.632114", "0.630535", "0.62996954", "0.62684", "0.61987203", "0.6188611", "0.61510146", "0.6112788", "0.6100381", "0.6089439", "0.60330445", "0.60330445", "0.60076684", "0.5993398", "0.5993311", "0.59899247", "0.59764695", "0.5969005", "0.5879196", "0.58787125", "0.5876931", "0.5867011", "0.58658165" ]
0.7225837
0
OAuth2 compatible token login, get an access token for future requests
async def login_access_token( form_data: OAuth2PasswordRequestForm = Depends() ): user = await crud.user.authenticate( username=form_data.username, password=form_data.password ) if not user: raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Incorrect credentials") elif not user.is_active: raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail="Inactive user") elif not user.is_email_verified: raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail="Please verify your account via email") access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES) return { "access_token": create_access_token( data={"user_id": user.id}, expires_delta=access_token_expires ), "token_type": "bearer", }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login_access_token(form_data: OAuth2PasswordRequestForm = Depends()):\n user = auth_handler.authenticate_user(\n username=form_data.username, password=form_data.password\n )\n if user is None:\n raise HTTPException(\n detail=\"Incorrect username and/or password\", status_code=400\n )\n\n return APIResponse(\n msg=TokenResponse(\n access_token=auth_handler.encode_token(user.id), token_type=\"bearer\"\n )\n )", "async def login_for_access_token(\n form_data: OAuth2PasswordRequestForm = Depends()\n):\n user = authenticate_user(form_data.username, form_data.password)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(\n data={\"sub\": user.username}, expires_delta=access_token_expires\n )\n return {\"access_token\": access_token, \"token_type\": \"bearer\"}", "def get_access_token(self):\n logger.info('Try to get access token via OAuth')\n\n if self.user_login and not self.user_password:\n # Need user password\n pass\n\n if not self.user_login and self.user_password:\n # Need user login\n pass\n\n auth_session = requests.Session()\n\n login_form_response = auth_session.get(self.LOGIN_URL)\n\n login_form_action = re.findall(r'<form ?.* action=\"(.+)\"', login_form_response.text)\n if not login_form_action:\n raise VkAuthorizationError('vk.com changed login flow')\n\n # Login\n login_form_data = {\n 'email': self.user_login,\n 'pass': self.user_password,\n }\n\n response = auth_session.post(login_form_action[0], login_form_data)\n\n logger.info('Cookies %s', auth_session.cookies)\n logger.info('Login response url %s', response.url)\n\n if 'remixsid' in auth_session.cookies or 'remixsid6' in auth_session.cookies:\n pass\n elif 'sid=' in response.url:\n self.auth_captcha_is_needed(response.content, auth_session)\n elif 'act=authcheck' in response.url:\n self.auth_code_is_needed(response.content, auth_session)\n elif 'security_check' in response.url:\n self.phone_number_is_needed(response.content, auth_session)\n else:\n raise VkAuthorizationError('Authorization error (bad password)')\n\n # OAuth2\n oauth_data = {\n 'response_type': 'token',\n 'client_id': self.app_id,\n 'scope': self.scope,\n 'display': 'mobile',\n }\n response = auth_session.post('https://oauth.vk.com/authorize', oauth_data)\n logger.info('OAuth URL: %s %s', response.request.url, oauth_data)\n\n if 'access_token' not in response.url:\n form_action = re.findall(u'<form method=\"post\" action=\"(.+?)\">', response.text)\n if form_action:\n response = auth_session.get(form_action[0])\n else:\n try:\n json_data = response.json()\n except ValueError: # not json in response\n error_message = 'OAuth2 grant access error'\n else:\n error_message = 'VK error: [{0}] {1}'.format(\n json_data['error'],\n json_data['error_description']\n )\n auth_session.close()\n raise VkAuthorizationError(error_message)\n\n auth_session.close()\n\n parsed_url = urlparse(response.url)\n logger.info('Parsed URL: %s', parsed_url)\n\n token_dict = dict(parse_qsl(parsed_url.fragment))\n if 'access_token' in token_dict:\n self.access_token = token_dict['access_token']\n self.access_token_expires_in = token_dict['expires_in']\n else:\n raise VkAuthorizationError('OAuth2 authorization error')", "def accessToken(self):\n if session.token and 'expires' in session.token:\n expires = session.token['expires']\n # reuse token until expiration\n if expires == 0 or expires > time.time():\n return session.token['access_token']\n\n code = request.vars.code\n\n if code:\n data = dict(client_id=self.env.client_id,\n client_secret=self.env.client_secret,\n redirect_uri=session.redirect_uri,\n code=code,\n grant_type='authorization_code'\n )\n\n open_url = None\n opener = self.__build_url_opener(self.env.token_url)\n try:\n open_url = opener.open(self.env.token_url, urlencode(data),\n self.socket_timeout)\n except urllib2.HTTPError, e:\n tmp = e.read()\n raise Exception(tmp)\n finally:\n if session.code:\n del session.code\n if session.redirect_uri:\n del session.redirect_uri\n\n if open_url:\n try:\n data = open_url.read()\n resp_type = open_url.info().gettype()\n #: try json style first\n if not resp_type or resp_type[:16] == 'application/json':\n try:\n tokendata = json.loads(data)\n session.token = tokendata\n except Exception, e:\n raise Exception(\"Cannot parse oauth server response %s %s\" % (data, e))\n #: try with x-www-form-encoded\n else:\n tokendata = cgi.parse_qs(data)\n session.token = \\\n dict([(k, v[-1]) for k, v in tokendata.items()])\n #: we failed parsing\n if not tokendata:\n raise Exception(\"Cannot parse oauth server response %s\" % data)\n #: set expiration\n if 'expires_in' in session.token:\n exps = 'expires_in'\n elif 'expires' in session.token:\n exps = 'expires'\n else:\n exps = None\n session.token['expires'] = exps and \\\n int(session.token[exps]) + \\\n time.time()\n finally:\n opener.close()\n return session.token['access_token']\n\n session.token = None\n return None", "async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):\n user = example_user_validator(form_data.username, form_data.password)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n access_token_data = jwt_claims.copy()\n access_token_data[\"sub\"] = user[\"username\"]\n access_token_data[\"exp\"] = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token_data[\"jti\"] = str(uuid.uuid4())\n\n refresh_token_data = jwt_claims.copy()\n refresh_token_data[\"sub\"] = user[\"username\"]\n refresh_token_data[\"exp\"] = datetime.utcnow() + timedelta(days=REFRESH_TOKEN_EXPIRE_DAYS)\n refresh_token_data[\"type\"] = \"refresh\"\n refresh_token_data[\"jti\"] = str(uuid.uuid4())\n\n return AccessRefreshToken(\n access_token=jwt.encode(access_token_data, SECRET_KEY, algorithm=ALGORITHM),\n refresh_token=jwt.encode(refresh_token_data, SECRET_KEY, algorithm=ALGORITHM)\n )", "async def oauth2_token(\n request: Request, oauth2_request=Depends(_oauth2_request)\n):", "def login_access_token(\n db: Session = Depends(get_db),\n form_data: OAuth2PasswordRequestForm = Depends()\n) -> Any:\n user = crud.user.authenticate(\n db, email=form_data.username, password=form_data.password\n )\n if not user:\n raise HTTPException(\n status_code=400, detail=\"Incorrect email or password\")\n elif not crud.user.is_active(user):\n raise HTTPException(status_code=400, detail=\"Inactive user\")\n access_token_expires = timedelta(\n minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)\n return {\n \"access_token\": security.create_access_token(\n user.id, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "def _get_access_token(self):\n if self._service_token:\n logger.info('Use service token: %s',\n 5 * '*' + self._service_token[50:])\n return self._service_token\n\n if not all([self.app_id, self._login, self._password]):\n raise ValueError(\n 'app_id=%s, login=%s password=%s (masked) must be given'\n % (self.app_id, self._login,\n '*' * len(self._password) if self._password else 'None'))\n\n logger.info(\"Getting access token for user '%s'\" % self._login)\n with self.http_session as s:\n if self._client_secret:\n url_query_params = self.do_direct_authorization(session=s)\n else:\n self.do_login(http_session=s)\n url_query_params = self.do_implicit_flow_authorization(session=s)\n logger.debug('url_query_params: %s', url_query_params)\n\n if 'access_token' in url_query_params:\n logger.info('Access token has been gotten')\n return url_query_params['access_token']\n else:\n raise VkAuthError('OAuth2 authorization error. Url params: %s'\n % url_query_params)", "def access_token(config, token):\n response = call_api('post', 'oauth/access_token', config,\n params={'oauth_token': token['oauth_token']},\n data={'oauth_verifier': token['oauth_verifier']})\n return dict([(k, v[0]) for k,v in urlparse.parse_qs(response.text).items()])", "def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r", "def fetch_oauth_access_token(consumer_token, request_token):\n url = get_oauth_access_token_url(consumer_token, request_token)\n request = urllib2.urlopen(url)\n token = _oauth_parse_response(request.read())\n request.close()\n return token", "def _request_access_token(self):\n resp = requests.get(self.TOKEN_URL_FORMAT.format(\n self.creds().consumer_key(), self.creds().app_secret())\n )\n status = resp.status_code\n\n # If the token request fails, try to use the configured app id\n # and secret. This probably won't work, but the docs say that it\n # should. for more info, see:\n # https://developers.facebook.com/docs/facebook-login/access-tokens\n token = \"%s|%s\" % (self.creds().consumer_key(),\n self.creds().app_secret())\n if status == 200:\n token = resp.text.split('access_token=')[1]\n else:\n self.logger.error(\n \"Facebook token request failed with status %d\" % status\n )\n return token", "def get_access_token(self, renew=False):\n if self.access_token is None or renew:\n headers = {} # don't use json here, juse urlencode.\n url = self._url_for_op('token')\n data = urllib.urlencode({'grant_type': 'client_credentials',\n 'client_id':self.CLIENT_ID,\n 'client_secret':self.CLIENT_SECRET})\n req = urllib2.Request(url, data, headers)\n try:\n response = urllib2.urlopen(req).read()\n response = json.loads(response)\n except urllib2.HTTPError as e:\n raise ApiError(e.reason)\n except Exception, e:\n raise ApiError(e)\n self.access_token = response['access_token']\n return self.access_token", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def __oauth_login(self):\n\n token = self.accessToken()\n if not token:\n session.redirect_uri = self.__redirect_uri()\n data = dict(redirect_uri=session.redirect_uri,\n response_type='code',\n client_id=self.env.client_id)\n auth_request_url = self.env.auth_url + \"?\" + urlencode(data)\n redirect(auth_request_url)\n return", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except Exception:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)", "def callback__access_token(req, test_env=test_env):\n assert \"Authorization\" in req.headers\n assert req.headers[\"Authorization\"].decode(\"utf-8\").startswith(\"OAuth \")\n assert \"User-Agent\" in req.headers\n assert req.headers[\"User-Agent\"].decode(\"utf-8\") == \"CustomApiClient v0\"\n assert req.url == oauth1_utils.CustomApiClient.OAUTH1_SERVER_ACCESS_TOKEN\n\n # request as SERVER, no cookies\n with IsolatedTestapp(test_env[\"testapp_authority\"]) as testapp:\n _headers = string_headers(\n req.headers\n ) # these can end up being unicode in tests\n res = testapp.get(\n \"/authority/oauth1/access_token\",\n headers=_headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)", "def get_access_token(request_token,request_secret,verifier):\n oauth = OAuth1(CLIENT_KEY, client_secret=CLIENT_SECRET, resource_owner_key=request_token, resource_owner_secret=request_secret, verifier=verifier)\n response = requests.post(ACCESS_TOKEN_URL, auth=oauth)\n credentials = urlparse.parse_qs(response.content)\n access_token = credentials.get(\"oauth_token\")[0]\n access_secret = credentials.get(\"oauth_token_secret\")[0]\n return access_token, access_secret", "def get_access_token(self, callback_uri, request_token):\n verifier = dict(urldecode(urlparse.urlparse(callback_uri).query))\n self.client.verifier = verifier.get('oauth_verifier')\n self.client.resource_owner_key = request_token.get('oauth_token')\n self.client.resource_owner_secret = request_token.get('oauth_token_secret')\n uri, headers, body = self.client.sign(self.access_token_url)\n response = requests.request(self.token_method, uri, headers=headers, data=body)\n self.client.verifier = None\n response.raise_for_status()\n token = dict(urldecode(response.text))\n self.set_token(token)\n return self.normalize_token_data(token)", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_access_token(self):\n payload = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'resource': self.resource\n }\n res = requests.post(self.auth_url, data=payload)\n data = res.json()\n if res.status_code == 200:\n return data['access_token'], res\n\n return False, res", "def _get_access_token(self, url):\n if self.access_token:\n return self.access_token\n data = \"client_id=%s&client_secret=%s&grant_type=password&username=%s&password=%s&scope=write\" %\\\n (self.client_id, self.client_secret, self.username, self.password)\n\n parsed = urlparse(url)\n path = urlunparse(ParseResult(parsed.scheme, parsed.netloc, \"/oauth2/access_token\", None, None, None))\n\n auth_resp = urlopen(Request(path, data), timeout=10)\n if auth_resp.getcode() != 200:\n self.logger.error(\"Error with client credentials\")\n return self.access_token\n auth_resp_data = json.loads(auth_resp.read())\n\n if \"access_token\" in auth_resp_data:\n self.access_token = auth_resp_data[\"access_token\"]\n else:\n self.logger.error(\"Error with client credentials\")\n return self.access_token", "def authorize_access_token(self, request, **kwargs):\n params = self.retrieve_access_token_params(request)\n params.update(kwargs)\n return self.fetch_access_token(**params)", "def get_access_token(self,verifier,access_token_url):\n\t\toauth = OAuth1(client_key=self.CONSUMER_KEY,\n\t\t\tclient_secret=self.CONSUMER_SECRET,\n\t\t\tresource_owner_key=self.resource_owner_key,\n\t\t\tresource_owner_secret=self.resource_owner_secret,\n\t\t\tverifier=verifier)\n\t\tr = requests.post(url=access_token_url, auth=oauth)\n\t\tcredentials = parse_qs(r.content)\n\t\tif \"oauth_token\" not in credentials.keys():\n\t\t\treturn None,None\n\t\ttoken = credentials.get('oauth_token')[0]\n\t\tsecret = credentials.get('oauth_token_secret')[0]\n\t\tself.token=token\n\t\tself.secret=secret\n\t\treturn token,secret", "def get_access_token(self, path='/oauth/token', data={}):\n if data.keys():\n data.update(self.data)\n else:\n data = self.data.copy()\n data.update({\n 'grant_type': 'password',\n 'email': self.env.get('TESLA_EMAIL'),\n 'password': self.env.get('TESLA_PASSWORD')\n })\n try:\n req = requests.post(url='%s%s' % (self.url, path), data=data)\n # print(req.status_code)\n # print(req.content)\n self.token.update(req.json())\n except:\n raise 'invalid credentials'\n return self.token", "def createAccessTokenReplacement(self):\r\n\r\n url = self._config['OAUTH2ENDPOINT']['huddleAuthServer'] + \"request?response_type=code\" + \\\r\n \"&client_id=\" + self._config['OAUTH2']['clientID'] + \\\r\n \"&redirect_uri=\" + self._config['OAUTH2']['redirectUri']\r\n webbrowser.open_new(url)\r\n code = input('Please enter the code from your web browser:')\r\n\r\n response = self._oauth.obtainAccessTokenBy3LeggedOAuth(code)\r\n responseBody = json.loads(response['Body'])\r\n\r\n try:\r\n oauthToken = Token(responseBody)\r\n except TypeError as e:\r\n print (\"Bad response when requesting a token \" + str(response))\r\n sys.exit()\r\n\r\n return oauthToken", "def get_token(self, legs=2):\n if legs == 2:\n\n headers = {}\n\n headers.update({ 'Content-Type' : 'application/x-www-form-urlencoded' })\n\n data = {}\n\n data.update({'client_id' : self.clientId})\n data.update({'client_secret' : self.clientSecret})\n data.update({'grant_type' : 'client_credentials'})\n data.update({'scope' : self.scopes})\n\n resp = self.http.post(self.webAddress, headers=headers, data=data)\n\n if resp.status_code == 200:\n cont = resp.json()\n return (cont['access_token'], cont['expires_in'])\n\n raise ConnectionError(\"Request failed with code {}\".format(resp.status_code) +\n \" and message : {}\".format(resp.content) +\n \" during authentication.\")\n else:\n raise NotImplementedError(\"3-legged authentication has not been implemented.\")", "def get_oauth_token():\n return session.get('remote_oauth')" ]
[ "0.7499437", "0.7365008", "0.7298682", "0.72800106", "0.72585416", "0.7221305", "0.71564126", "0.7155743", "0.71516013", "0.7135813", "0.70970845", "0.70884335", "0.705156", "0.7044395", "0.70353854", "0.7016695", "0.70149344", "0.7002961", "0.70015967", "0.69658285", "0.69394785", "0.69394785", "0.69393224", "0.693881", "0.6919217", "0.6888006", "0.6887808", "0.6863113", "0.68616456", "0.6858632" ]
0.74189544
1
Verify account using token.
async def verify_account( token: str = Form(...) ): email = await verify_register_token(token) if not email: raise HTTPException(status_code=400, detail="Invalid email verify token") record = await crud.user.get_by_email(email) if not record: raise HTTPException( status_code=404, detail="The user with this email does not exist in the system." ) user = DBUser(**record) if user.is_email_verified: raise HTTPException( status_code=HTTP_409_CONFLICT, detail="User already verified", ) await crud.user.update(user.id, {'is_email_verified': True}) send_new_account_email(email=user.email, username=user.username, first_name=user.first_name) return {"msg": "Account verified"}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def verify(token: TextData, background_tasks: BackgroundTasks):\n token_data = token.data\n mail, subject, body = await AccountProcessor.confirm_email(token_data)\n background_tasks.add_task(Utility.validate_and_send_mail, email=mail, subject=subject, body=body)\n return {\"message\": \"Account Verified!\"}", "def verify_token(token):\n return AuthToken.query.filter_by(auth_token=token).first()", "def get(self, request, token):\n user, token = self._authenticate_credentials(request, token)\n\n if not user.is_valid:\n user.is_valid = True\n user.save()\n return Response({\"message\": \"youve been verified\",\n \"status\": 200}, status=status.HTTP_200_OK)\n else:\n return Response({'msg': 'account has already been verified'},\n status=status.HTTP_400_BAD_REQUEST)", "def verify_token(self, token: str) -> str:\n return decode(self.rd.hget(\"auth:by_token\", token))", "def verify_token(self, token):\n return False", "def validate_token(self, token):\n try:\n self._verification = models.EmailVerification.objects.get(\n token=token,\n )\n except models.EmailVerification.DoesNotExist:\n raise serializers.ValidationError(\n code='invalid_token',\n detail=_('The provided token does not exist or has expired.'),\n )\n\n return token", "def verify_token(*token): # pragma: no cover\n\n if current_app.config.get('IGNORE_AUTH') is True:\n return True\n\n g.user = APITokenModel.verify_token(token[0])\n\n if g.user is None:\n return False\n\n return g.user", "def verify_email_token(self, token):\n href = '/accounts/emailVerificationTokens/' + token\n data = self._store.create_resource(href, {})\n\n return self.resource_class(client=self._client, properties=data)", "def verify_email(uid, token):\n return True", "def verify_email_token(self, token):\n href = '/accounts/emailVerificationTokens/' + token\n data = self._store.create_resource(href, {})\n\n return self.resource_class(properties=data, client=self._client)", "def token_verify_handler(token):\n _user = token_get_user_model(token)\n res = dict(user=UserSerializer(_user).data)\n return res", "def verify_two_factor_token(username, token):\n device = TOTPDevice.objects.device_for_user(username)\n if device:\n return device.verify_token(token)", "def verify_auth_token(token):\n\n s = Serializer(current_app.config['SECRET_KEY'])\n\n try:\n data = s.loads(token)\n except SignatureExpired:\n print \"EXP\", token\n return None\n except BadSignature:\n print \"BAD\", token\n return None\n\n user = User.query.get(data['id'])\n return user", "def verify_token(vial_http: urllib3.connectionpool.ConnectionPool) -> bool:\n verify_resp = vial_http.request(\"GET\", \"/api/verifyToken\")\n return verify_resp.status == 200", "def verify_auth_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except BadSignature:\n return None # invalid token\n user = User.query.get(data['email'])\n return user", "def verify_token(event):\n if event['token'] != VERIFICATION_TOKEN:\n print('Presented with invalid token - ignoring message...')\n return False\n return True", "def verify_auth_token(token):\n s = Serializer(mscolab_settings.SECRET_KEY)\n try:\n data = s.loads(token)\n except SignatureExpired:\n logging.debug(\"Signature Expired\")\n return None # valid token, but expired\n except BadSignature:\n logging.debug(\"Bad Signature\")\n return None # invalid token\n user = User.query.filter_by(id=data['id']).first()\n return user", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except SignatureExpired:\n return None # valid token, but expired\n except BadSignature:\n return None # invalid token\n return data['token']", "async def validate_token(self, token):", "def test_token_verification(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertEqual(self.user.verify_auth_token(user_token), self.user)\n self.assertIsNone(self.\n user.verify_auth_token('jdjdje230920093944334j'))", "def verify_auth_token(token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token)\n except (BadSignature, SignatureExpired):\n return None\n return User.query.get(data['id'])", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except (SignatureExpired, BadSignature):\n return None\n else:\n user = User.get(User.id == data['id'])\n return user", "def verify_token(token):\n try:\n idinfo = client.verify_id_token(token, app.config['GOOGLE_CLIENT_ID'])\n if idinfo['iss'] not in [\n 'accounts.google.com',\n 'https://accounts.google.com'\n ]:\n raise crypt.AppIdentityError(\"Wrong issuer.\")\n except crypt.AppIdentityError:\n return False\n return True", "def verify_auth_token(token):\n # In case the token so wrong that it's None\n if not token:\n raise BadSignatureToken\n\n gen_token = Serializer(app.config['API_SECRET_KEY'])\n try:\n data = gen_token.loads(token)\n except SignatureExpired:\n raise ExpiredToken() # valid token, but expired\n except BadSignature:\n raise BadSignatureToken() # invalid token\n user = User.query.get(data['id'])\n return user", "def check_token(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n if 'email' not in decoded_token or 'expires' not in decoded_token \\\n or 'token' not in decoded_token:\n return {'error': 'Token is invalid'}\n\n self.email = decoded_token['email']\n self.user_in_db = User.users_db.get(decoded_token['email'])\n\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if self.user_in_db['token'] != decoded_token['token']:\n return {'error': 'Token is invalid'}\n\n if decoded_token['expires'] < time.time():\n return {'error': 'Token is expired'}\n\n return decoded_token", "async def authenticate(self, token) -> bool:\n return True", "def verify_token(self, token):\n _now = timezone.now()\n\n if (\n (self.token is not None)\n and (token == self.token)\n and (_now < self.valid_until)\n ):\n self.token = None\n self.valid_until = _now\n self.save()\n\n return True\n else:\n return False", "def verify_pending_survey_token(token):\n logger.info(\"Attempting to verify share/transfer survey token with party service\", token=token)\n\n url = f\"{app.config['PARTY_URL']}/party-api/v1/pending-survey/verification/{token}\"\n response = requests.get(url, auth=app.config[\"BASIC_AUTH\"])\n\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.error(\"Failed to verify share/transfer survey token\", token=token)\n raise ApiError(logger, response)\n\n logger.info(\"Successfully verified token\", token=token)\n return response", "def test_verification_with_valid_token(self) -> None:\n\n secret_key = str(self.author.secret_key)\n verification_url = reverse('author:verify', kwargs={'secret_key': str(secret_key)})\n\n # Make sure URL's don't change.\n self.assertEqual(verification_url, f'/api/authors/verify/{secret_key}/')\n\n # Make valid request and get response\n response: Response = self.client.get(verification_url)\n\n self.assertEqual(response.status_code, 302)\n\n # Now test if the method \"verify\" was called\n self.assertEqual(Author.objects.get().verified, True)\n # We don't wanna give him too many privileges\n self.assertEqual(self.author.is_staff, False)", "def verify_user(self, tokendict):\n return self.post('verify', tokendict)" ]
[ "0.73761034", "0.71671677", "0.7066079", "0.6981067", "0.6979663", "0.68349594", "0.68256533", "0.6713865", "0.67072505", "0.6704424", "0.6683784", "0.66674215", "0.6619582", "0.66139793", "0.65695435", "0.65394056", "0.6536171", "0.6521967", "0.6393953", "0.6376964", "0.63546085", "0.63443774", "0.6325104", "0.6318941", "0.6280716", "0.62775934", "0.6245376", "0.6234781", "0.62344366", "0.62291354" ]
0.7953528
0
Returns the number of frames in the trajectory in universe u, using teq as equilibration time and tsample as sampling time
def traj_nslice (u,teq,tsample) : # get the number of frames in the slice (http://stackoverflow.com/a/7223557) traj_slice = u.trajectory[teq::tsample] return sum(1 for _ in traj_slice)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_tracked_samples(self, u=None):\n u = self.virtual_root if u is None else u\n return self._ll_tree.get_num_tracked_samples(u)", "def frameTimes(self):\n sr = self.sampleRate\n offset = self.activeOffset\n stride = self.activeStride\n nf = self.numFrames\n t = np.arange(nf) * (stride[0] / sr) + (offset / sr)\n return t", "def num_samples(self, u=None):\n u = self.virtual_root if u is None else u\n return self._ll_tree.get_num_samples(u)", "def num_trials(self):", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def sample_count(self):\n if self._sample_count:\n return self._sample_count\n else:\n return self._wave.getnframes()", "def getNrSamples(self): \r\n return self.numSamples", "def get_tick_count(self, *args, **kwargs):\n return _uhd_swig.time_spec_t_get_tick_count(self, *args, **kwargs)", "def NbSamplesU(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_NbSamplesU(self, *args)", "def test_get_integration_time_shape():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n inttime_array = utils.get_integration_time(test_uv, reds=baseline_array)\n test_shape = (test_uv.Nbls, test_uv.Ntimes)\n assert test_shape == inttime_array.shape", "def num_frames(self, inp_len: th.Tensor) -> th.Tensor:\n if inp_len is None:\n return None\n if self.spectra_index == -1:\n warnings.warn(\"SpectrogramTransform layer is not found, \" +\n \"return input as the #num_frames\")\n return inp_len\n if self.perturb_index != -1:\n inp_len = self.transform[self.perturb_index].output_length(inp_len)\n num_frames = self.transform[self.spectra_index].num_frames(inp_len)\n # return num_frames // self.subsampling_factor\n return th.div(num_frames,\n self.subsampling_factor,\n rounding_mode=\"trunc\")", "def num_samples(self):\n with audioread.audio_open(self.path) as f:\n return int(f.duration * f.samplerate)", "def count_timepoints(sc, session, files):\n tuples = zip(range(len(files)), files)\n files_sc = sc.parallelize(tuples)\n\n def count_planes(kv):\n index, path2 = kv\n try:\n from ScanImageTiffReader import ScanImageTiffReader\n img = ScanImageTiffReader(path2).data()\n except Exception:\n import tifffile\n img = tifffile.imread(path2)\n return img.shape[0]\n\n data2 = files_sc.map(count_planes).collect()\n frame_numbers = np.array(data2)\n vol_numbers = frame_numbers / len(session.fieldMask)\n return vol_numbers.astype(int)", "def numel(self):\n return self.t.size", "def _interFrameLen(self):\n return np.ceil((self.interFrameDuration * self.sampleRate) / self.downsample) * self.downsample", "def trial_ptdt(trial, omit_missing_frames=True):\n frames = trial.HMM_MLE\n if omit_missing_frames:\n frames = frames[frames >= 0]\n\n transitions_from_distractor_to_target = 0\n transitions_from_distractor = 0\n for first, second in zip(frames, frames[1:]):\n if first > 0 and second != first and second >= 0:\n transitions_from_distractor += 1\n if second == 0:\n transitions_from_distractor_to_target += 1\n try:\n return transitions_from_distractor_to_target \\\n /transitions_from_distractor\n except ZeroDivisionError:\n return float('nan')", "def CEPSTRUM(y, t):\n dt = t[2] - t[1]\n #Fs = 1.0 / dt\n L = len(y)\n #Y = fft(y, L)\n #amp = np.abs(Y)/(L/2) # FFT single sided spectrum\n #T = L * dt #1/T=Fs/L\n #freq = np.arange(0, Fs / 2, 1 / T) # list frequencies up to Nyquist frequency\n #C=real(ifft(log(abs(fft(y)))))\n C = np.abs(ifft(np.log(np.abs(fft(y))**2)))**2\n NumUniquePts = int(np.ceil((L + 1) / 2))\n C = C[0:NumUniquePts]\n q = np.arange(0, NumUniquePts, 1) * dt\n return q, C", "def get_uds_3_frames_count(self, iface):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def cal_samples(self):\n max_omega = max(\n abs(2 * np.pi * self.u.fundamental),\n abs(2 * np.pi * self.v.fundamental),\n abs(2 * np.pi * self.w.fundamental),\n )\n max_freq = max_omega / (2 * np.pi)\n self.fake_samples_number = (\n (max_freq ** 2) * 6 * self.u.data.shape[0] / self.u.sampling_rate\n )", "def tidefit(self,frqnames=None,basetime=None):\r\n \r\n # Get the tidal fruequencies\r\n if frqnames == None:\r\n\t\t\t# This returns the default frequencies from the uspectra class\r\n frq,frqnames = getTideFreq(Fin=None)\r\n else:\r\n frq,frqnames = getTideFreq(Fin=frqnames)\r\n \r\n # Call the uspectra method\r\n U = uspectra(self.tsec,self.y,frq=frq,method='lsqfast')\r\n \r\n amp,phs = U.phsamp(phsbase=basetime)\r\n \r\n return amp, phs, frq, frqnames, U.invfft()", "def getQiimeSffSamplesCount(self,sample):\n try:\n con = self.getSFFDatabaseConnection()\n results = 0\n query_results=con.cursor().callproc('get_qiime_sff_samples_count', \\\n [str(sample),results])\n return query_results\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def inputsAtTime(t, u):\n if u is None:\n raise Exception('u is None')\n theta_p = 0\n Ux = 0\n Uy = 0\n if hasattr(u,'keys'):\n if 'pitch' in u.keys():\n theta_p = u['pitch'](t)\n if 'Ux' in u.keys():\n Ux = u['Ux'](t)\n if 'Uy' in u.keys():\n Uy = u['Uy'](t)\n else:\n try:\n Ux, Uy, theta_p = u(t) \n except:\n Ux, Uy, theta_p = u\n return Ux, Uy, theta_p", "def trial_atd(trial, omit_missing_frames=True):\n frames = trial.HMM_MLE\n if omit_missing_frames:\n frames = frames[frames >= 0]\n total_frames = len(frames)\n num_runs = len([run for run in calc_run_lengths(frames)])\n if num_runs == 0:\n return float('nan')\n return (total_frames/num_runs)/60", "def _getnt(simulation, t=None):\n nt_sim = simulation.nt()\n \n if t is not None:\n \n dummy = np.zeros(nt_sim)\n nt = len2(dummy[t])\n \n else:\n \n nt = nt_sim\n \n return nt", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas", "def find_ts(uh_t):\n input_interval = uh_t[1]-uh_t[0]\n log.debug('Input Timestep = %i seconds' % input_interval)\n return input_interval", "def number_fdma_channels (b_hz, g_hz, u_hz):\n available_bandwidth = b_hz - g_hz #Take off one guard band since we need N+1 guard bands\n sub_channel_req = g_hz + u_hz\n num_users = math.floor(available_bandwidth / sub_channel_req)\n return num_users", "def number_of_answers(frame, frequency= 'M'):\n\n \n result = frame.set_index(DatetimeIndex(frame.inserted))\n if frequency=='M':\n result = result.groupby(['user',lambda x: x.year,lambda x: x.month])\n elif frequency == 'W':\n result = result.groupby(['user',lambda x: x.year,lambda x: x.week])\n else:\n result = result.groupby(['user',lambda x: x.year,lambda x: x.day])\n result = result.apply(lambda x: Series({'length': len(x) if len(x) else None, 'date':x.inserted.values[0]}))\n result = result.set_index(DatetimeIndex(result.date))\n return result.resample(frequency,how='mean').length", "def num_test_samples(self):\n if self._num_test_samples is None:\n for key, value in self._test_data.items():\n self._num_test_samples[key] = len(value[0])\n return self._num_test_samples", "def number_of_users(frame, frequency = 'M'):\n\n times = frame.groupby('user').apply(lambda x: x.inserted.values[0])\n times = times.reset_index()\n times = times.set_index(DatetimeIndex(times[0]))\n return times.resample(frequency,how=len).user" ]
[ "0.5920934", "0.5774861", "0.5752458", "0.54901206", "0.5386585", "0.53777325", "0.53337634", "0.53329504", "0.52839094", "0.52773035", "0.5194862", "0.51744634", "0.5169164", "0.51685876", "0.51301676", "0.51198584", "0.5106076", "0.51057935", "0.50803345", "0.50795835", "0.50770324", "0.5076273", "0.5058351", "0.50546634", "0.5045835", "0.5024122", "0.5021133", "0.50155675", "0.500309", "0.4998995" ]
0.69339037
1
Calculate the Pearson correlation coefficient between the row sum of the given HiC matrix and the given ChIPseq profile.
def hic_chipseq_r2 (hic, chipseq) : hic_rowsum = np.sum(hic,axis=1)/float(np.sum(hic)) return np.corrcoef(hic_rowsum,chipseq)[0,1]**2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pearsons_contingency_coefficient_compute(confmat: Tensor) ->Tensor:\n confmat = _drop_empty_rows_and_cols(confmat)\n cm_sum = confmat.sum()\n chi_squared = _compute_chi_squared(confmat, bias_correction=False)\n phi_squared = chi_squared / cm_sum\n tschuprows_t_value = torch.sqrt(phi_squared / (1 + phi_squared))\n return tschuprows_t_value.clamp(0.0, 1.0)", "def correlation(C):\n\n if type(C) is not np.ndarray:\n raise TypeError('C must be a numpy.ndarray')\n if len(C.shape) < 2 or C.shape[0] is not C.shape[1]:\n raise ValueError('C must be a 2D square matrix')\n return C / np.sqrt(np.outer(np.diagonal(C), np.diagonal(C)))", "def _pearson_correlation_coeff(x_data, y_data):\n reg = linregress(x_data, y_data)\n return reg.rvalue", "def _calc_ppcc(self):\n\n res = self._model.fit()\n normal_quantile = self._calc_res_normal_quantile()\n\n ppcc, _ = stats.pearsonr(normal_quantile, res.resid)\n\n return ppcc", "def calc_ic(data):\n return scs.spearmanr(data[:, 0], data[:, 1]).correlation", "def pearson_correlation(X, Y):\n # should not need X_norm_squared because if you could precompute that as\n # well as Y, then you should just pre-compute the output and not even\n # call this function.\n if X is Y:\n X = Y = np.asanyarray(X)\n else:\n X = np.asanyarray(X)\n Y = np.asanyarray(Y)\n\n if X.shape[1] != Y.shape[1]:\n raise ValueError(\"Incompatible dimension for X and Y matrices\")\n\n XY = ssd.cdist(X, Y, 'correlation', 2)\n\n return 1 - XY", "def pearsoncor(X, Y, code = 0):\r\n n = len(X)\r\n sx = ssd(X)\r\n sy = ssd(Y)\r\n xbar = float(sum(X)) / n\r\n ybar = float(sum(Y)) / n\r\n if code == 0:\r\n return sum([(x - xbar) * (y - ybar) for x, y in zip (X,Y)])/(sx * sy*(n-1.0))\r\n else:\r\n numer = sum([x*y for x,y in zip(X,Y)]) - n*(xbar * ybar)\r\n denom = sqrt((sum([x*x for x in X]) - n* xbar**2)*(sum([y*y for y in Y]) -n* ybar**2))\r\n return (numer /denom)", "def pearsonCorrelation(x, y):\n\tsum_sq_x = 0\n\tsum_sq_y = 0\n\tsum_coproduct = 0\n\tmean_x = x[0]\n\tmean_y = y[0]\n\tif len(x) != len(y):\n\t\traise StatsError(\"Data sets are of different lengths.\")\n\tn = len(x)\n\tfor i in range(1,n):\n\t\tsweep = i / (i+1.0)\n\t\tdelta_x = x[i] - mean_x\n\t\tdelta_y = y[i] - mean_y\n\t\tsum_sq_x += delta_x * delta_x * sweep\n\t\tsum_sq_y += delta_y * delta_y * sweep\n\t\tsum_coproduct += delta_x * delta_y * sweep\n\t\tmean_x += delta_x / (i+1.0)\n\t\tmean_y += delta_y / (i+1.0)\n\tpop_sd_x = math.sqrt( sum_sq_x / n )\n\tpop_sd_y = math.sqrt( sum_sq_y / n )\n\tcov_x_y = sum_coproduct / n\n\tr = cov_x_y / (pop_sd_x * pop_sd_y)\n\tz = math.fabs(r) * math.sqrt(n) / math.sqrt(2.0)\n\tp = Prob_Z(z)\n\tif not (0.0 <= p <= 1.0):\n\t\traise StatsError(\"Invalid P-value of %r.\" % r)\n\treturn (r, p, n)", "def pearson_correlation(sim, obs, dim=\"time\"):\n # wrap numpy function\n kwargs = dict(\n input_core_dims=[[dim], [dim]], dask=\"parallelized\", output_dtypes=[float]\n )\n pearsonr = xr.apply_ufunc(_pearson_correlation, sim, obs, **kwargs)\n pearsonr.name = \"pearson_coef\"\n return pearsonr", "def pearson_r(x, y):\r\n # Compute correlation matrix: corr_mat\r\n \r\n corr_mat=np.corrcoef(x,y)\r\n\r\n # Return entry [0,1]\r\n return corr_mat[0,1]", "def cov_to_corr(matrix):\n sqrtdiag = np.sqrt(np.diag(matrix))\n return matrix / np.outer(sqrtdiag, sqrtdiag)", "def correlation(C):\n if not isinstance(C, np.ndarray):\n raise TypeError(\"C must be a numpy.ndarray\")\n shape = C.shape\n if (len(shape) != 2) or shape[0] != shape[1]:\n raise ValueError(\"C must be a 2D square matrix\")\n\n diagonal = np.diag(C)\n\n # standard deviation\n std = np.sqrt(np.expand_dims(diagonal, axis=0))\n\n correlation = C / np.matmul(std.T, std)\n\n return correlation", "def Corr(x,y):\n \n cocoeff1 = np.empty((y.shape[1],y.shape[2]))\n cocoeff2 = np.empty((y.shape[1],y.shape[2]))\n for i in xrange(y.shape[1]):\n for j in xrange(y.shape[2]):\n cocoeff1[i,j],cocoeff2[i,j] = sts.pearsonr(x[:,i,j],y[:,i,j])\n \n print 'Completed: Correlation calculations!'\n \n return cocoeff1, cocoeff2", "def _pearson_corrcoef_compute(var_x: Tensor, var_y: Tensor, corr_xy: Tensor, nb: Tensor) ->Tensor:\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)", "def einsum_correlation(X, Y_i, type=\"pearson\"):\n\n if type == \"pearson\":\n X -= X.mean(axis=1)[:, None]\n Y_i -= np.nanmean(Y_i)\n elif type == \"cosine\":\n X, Y_i = X, Y_i\n elif type == \"spearman\":\n # check this\n X = stats.rankdata(X, axis=1)\n Y_i = stats.rankdata(Y_i)\n elif type == \"kendalltau\":\n corr = np.array([stats.kendalltau(x, Y_i)[0] for x in X])\n return corr[None, :]\n\n X_norm, Y_norm = norm(X, axis=1), norm(Y_i)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n if Y_norm == 0:\n corr = np.zeros(X_norm.shape[0])\n else:\n corr = np.einsum(\"ij, j\", X, Y_i) / (X_norm * Y_norm)[None, :]\n\n return corr", "def custom_corrcoef(X, Y=None):\n if Y is None:\n Y = X\n \n if X.shape[0] != Y.shape[0]:\n raise Exception(\"X and Y must have the same number of rows.\")\n \n X = X.astype(float)\n Y = Y.astype(float)\n \n X -= X.mean(axis=0)[np.newaxis,...]\n Y -= Y.mean(axis=0)\n \n xx = np.sum(X**2, axis=0)\n yy = np.sum(Y**2, axis=0)\n \n r = np.dot(X.T, Y)/np.sqrt(np.multiply.outer(xx,yy))\n \n return r", "def calc_channel_corr(cube, mask=None):\n from scipy.stats import pearsonr\n\n if mask is None:\n mask = cube.mask.include()\n mask &= np.roll(mask, -1, axis=0)\n mask[-1, :] = False\n\n return pearsonr(cube.filled_data[mask],\n cube.filled_data[np.roll(mask, 1, axis=0)])", "def pearson_cor_co(X, Y, n):\n\n mean_X = get_mean(X,n)\n mean_Y = get_mean(Y,n)\n\n std_X = get_std_dev(X,n)\n std_Y = get_std_dev(Y,n)\n\n err_X = []\n err_Y = [] \n for i in range(0,n):\n err_X.append(X[i] - mean_X)\n err_Y.append(Y[i] - mean_Y)\n\n return sum(map( multiply, err_X, err_Y))/(n*std_X*std_Y)", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n # Return entry [0,1]\n return corr_mat[0,1]", "def _calculate_correlation(self, anomaly):\n if self.silence_level <= 1:\n print(\"Calculating partial correlation matrix at zero lag from \"\n \"anomaly values...\")\n\n # Calculate the correlation matrix, cast to float64 for precise\n # calculation of inverse matrix.\n C = np.corrcoef(anomaly.transpose()).astype(\"float64\")\n\n # Calculate the inverse correlation matrix\n if np.linalg.det(C) != 0.0:\n C_inv = np.linalg.inv(C)\n else:\n C_inv = np.linalg.pinv(C)\n\n # Clean up\n del C\n\n # Get the diagonal of the inverse correlation matrix\n diag = C_inv.diagonal()[:]\n\n # Calculate matrix of normalizations\n norm = np.sqrt(abs(np.outer(diag, diag)))\n\n return - C_inv / norm", "def corrcoef(self):\r\n return np.corrcoef(self.input.data)", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x, y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x, y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def pearson_r(x, y):\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x, y)\n\n # Return entry [0,1]\n return corr_mat[0,1]", "def phi_coefficient (subgroup,target1,target2):\r\n return matthews_corrcoef(subgroup[target1], subgroup[target2])", "def pearson_correlation_comparison(data, synth):\n mean_data = np.average(data)\n mean_synth = np.average(synth)\n cov_data_synth = np.sum((data-mean_data)*(synth-mean_synth))/len(data)\n PCC = cov_data_synth/(np.std(data)*np.std(synth)) # Pearson correlation coefficient (-1 to 1, where 0 is no correlation, -1 is anti-correlation and 1 is correlation.)\n if PCC<0.:\n PCC = 0.\n return PCC" ]
[ "0.65713036", "0.60864913", "0.6080404", "0.6041075", "0.5945653", "0.5941", "0.5863057", "0.57821524", "0.5782027", "0.5723004", "0.5652084", "0.5646357", "0.5644976", "0.56440914", "0.5639138", "0.5633804", "0.56248856", "0.5624098", "0.5592133", "0.5590622", "0.5569893", "0.5561767", "0.5561767", "0.5561767", "0.5561767", "0.5538398", "0.5538398", "0.5538398", "0.5537416", "0.5516964" ]
0.62098205
1
Calculate the relative proportion of contacts of the tracers with binding sites compared with nonbinding sites. As usual user should supply equilibration time, sampling time, and contact threshold value.
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) : # select polymer, tracers, and binding sites polymer = sim.u.select_atoms (polymer_text) tracers = sim.u.select_atoms (tracers_text) bss = sim.u.select_atoms (bindingsites_text) # select binding site indices bs_n = bss.n_atoms bs_idx = bss.indices # select non-binding site indices polymer_idx = polymer.indices nbs_idx = np.setdiff1d (polymer_idx,bs_idx) nbs_n = nbs_idx.size # evaluate contacts with binding sites and non-binding sites for each # independent simulation snapshot c = [] for i,ts in enumerate(sim.u.trajectory[teq::tsample]) : d = distance_array (polymer.positions,tracers.positions, box=ts.dimensions) contacts = d<threshold cB = np.sum (contacts[bs_idx]).astype('float') cA = np.sum (contacts[nbs_idx]).astype('float') if cA != 0 : c.append ((cB/cA) / (float(bs_n)/nbs_n)) return np.mean(np.array(c))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact_probability(summary, results, contacts, bins, feature):\r\n\r\n # prepare sampling interval size\r\n bin_width = bins[2]\r\n n_bins = int(np.ceil(bins[1]/bin_width))\r\n # bin bounds\r\n end_bins = np.arange(bin_width,bin_width*(n_bins+1), bin_width)\r\n # prepare arrays\r\n # two counts will be made, for each assumption. r (contact radius), t (contact time)\r\n activated_counts_r = np.zeros(n_bins, dtype=int)\r\n activated_counts_t = np.zeros(n_bins, dtype=int)\r\n total_counts_r = np.zeros(n_bins, dtype=int)\r\n total_counts_t = np.zeros(n_bins, dtype=int)\r\n n_cells = len(summary.cell_ID)\r\n\r\n for i, cell in summary.iterrows():\r\n if cell.QC == 'good':\r\n trig_contacts = contacts.loc[(contacts.cell_ID == cell.cell_ID) & (contacts['time_to_Ca [s]'] == 0) & (contacts['contact'] == 'CCZ'), :]\r\n if len(trig_contacts) > 0:\r\n max_r_ = np.max(trig_contacts['radius [um]'].values)\r\n max_r = np.max(trig_contacts.loc[trig_contacts['radius [um]'] == max_r_, feature])\r\n max_t_ = np.max(trig_contacts['contact_time [s]'].values)\r\n max_t = np.max(trig_contacts.loc[trig_contacts['contact_time [s]'] == max_t_, feature])\r\n else:\r\n max_r = 0\r\n max_t = 0\r\n if feature == 'radius [um]':\r\n max_r = max_r*1000\r\n max_t = max_t*1000\r\n activated_counts_r += end_bins >= max_r\r\n activated_counts_t += end_bins >= max_t\r\n total_counts_r += 1\r\n total_counts_t += 1\r\n\r\n elif cell.QC == 'good_noCa':\r\n last_frame = np.amax(results.loc[results.cell_ID == cell.cell_ID, 'frame'])\r\n last_contacts = contacts.loc[(contacts.cell_ID == cell.cell_ID) & (contacts['frame'] == last_frame) & (contacts['contact'] == 'CCZ'), :]\r\n max_r_ = np.max(last_contacts['radius [um]'].values)\r\n max_r = np.max(last_contacts.loc[last_contacts['radius [um]'] == max_r_, feature])\r\n max_t_ = np.max(last_contacts['contact_time [s]'].values)\r\n max_t = np.max(last_contacts.loc[last_contacts['contact_time [s]'] == max_t_, feature])\r\n if feature == 'radius [um]':\r\n max_r = max_r*1000\r\n max_t = max_t*1000\r\n total_counts_r += end_bins-bin_width < max_r\r\n total_counts_t += end_bins-bin_width < max_t \r\n\r\n return end_bins, activated_counts_r, activated_counts_t, total_counts_r, total_counts_t", "def _fraction_latency(self, users_distances):\n\n users_desired_latency = np.array(list(map(lambda a: self.services_desired_latency[a],\n self.users_services)))\n check = users_distances < users_desired_latency\n fraction = np.count_nonzero(check==True) / self.num_of_users\n return fraction", "def process(self, compartment_values, computed_values, time):\n return self.detected_proportion_func(time)", "def __call__(self, relsSortedByScores, qrelDict):\n result = 0.\n postQty = len(qrelDict)\n\n pos = 0\n for i, rel in enumerate(relsSortedByScores):\n if rel > RELEVANCE_THRESHOLD:\n pos += 1.\n result += pos / (i + 1.)\n\n return result / postQty", "def __calculate_statistics(self, candidates):\n pdf = {}\n for candidate in candidates:\n neighbors = list(self.G.neighbors(candidate))\n capacity = sum([self.G.get_edge_data(candidate, n)[\"satoshis\"] for n in neighbors])\n average = capacity / len(neighbors)\n pdf[candidate] = average\n cumsum = sum(pdf.values())\n pdf = {k:v/cumsum for k,v in pdf.items()}\n w = 0.7\n print(\"percentage smoothed percentage capacity numchannels alias\")\n print(\"----------------------------------------------------------------------\")\n res_pdf = {}\n for k,v in pdf.items():\n neighbors = list(self.G.neighbors(k))\n capacity = sum([self.G.get_edge_data(k, n)[\"satoshis\"] for n in neighbors])\n name = k\n if \"alias\" in self.G.node[k]:\n name = self.G.node[k][\"alias\"]\n print(\"{:12.2f} \".format(100*v), \"{:12.2f} \".format(100*(w * v + (1-w)/len(candidates))) ,\"{:10} {:10} \".format( capacity, len(neighbors)), name)\n res_pdf[k] = (w * v + (1-w)/len(candidates))\n return res_pdf", "def calMeasuredContactAngle(self):\n #account the base\n bottomLength = 0\n arrayHeight = np.empty([0, ], dtype = 'int64')\n for i in sp.arange(self.nx):\n if (self.densityFluid1[1, i] >= 0.485):\n bottomLength += 1\n #account the height\n for m in sp.arange(self.nx):\n tmpHeight = 0\n for n in sp.arange(1, self.ny - 1):\n if (self.densityFluid1[n, m] >= 0.485):\n tmpHeight += 1\n arrayHeight = np.append(arrayHeight, tmpHeight)\n heightH = np.amax(arrayHeight)\n #radius of droplet\n radiusD = (4. * np.power(heightH, 2.) + np.power(bottomLength, 2.)) / \\\n (8. * heightH)\n contactAngle = np.arctan((bottomLength) / (2. * (radiusD - heightH))) \n return contactAngle", "def compute_duty_factor():\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n print(np.sum(foot_l_contact)/len(foot_l_contact))\n print(np.sum(foot_r_contact)/len(foot_r_contact))\n\n return np.sum(foot_l_contact)/len(foot_l_contact)*0.5 + np.sum(foot_r_contact)/len(foot_r_contact)*0.5", "def _UpdateCriteria(self):\n grad = self.traj.grad[-1]\n disp = self.traj.coords[-1] - self.traj.coords[-2]\n self.delta_e = self.traj.energy[-1] - self.traj.energy[-2]\n self.grad_max = numpy.amax(grad)\n self.disp_max = numpy.amax(disp)\n self.grad_rms = math.sqrt(numpy.mean(grad**2))\n self.disp_rms = math.sqrt(numpy.mean(disp**2))", "def get_proteome_correct_percentages(prots_filtered_feathers, outpath, length_filter_pid=None,\n copynum_scale=False, copynum_df=None,\n force_rerun=False):\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=outpath):\n prot_tracker = defaultdict(int)\n big_strain_counts_df = pd.DataFrame()\n first = True\n for feather in prots_filtered_feathers:\n loaded = load_feather(protein_feather=feather, length_filter_pid=length_filter_pid,\n copynum_scale=copynum_scale,\n copynum_df=copynum_df)\n\n if first:\n big_strain_counts_df = pd.DataFrame(columns=loaded.columns)\n first = False\n tmp_df = pd.DataFrame(columns=loaded.columns)\n for strain in loaded.columns:\n prot_tracker[strain] += 1\n totals = list(filter(lambda x: x.endswith('total'), loaded[strain].index))\n for t in totals:\n counts = t.rsplit('_', 1)[0]\n aa_counts = list(\n filter(lambda x: (x.startswith(counts) and x not in totals), loaded[strain].index))\n for aa_count in aa_counts:\n tmp_df.at[aa_count.replace('count', '%'), strain] = loaded[strain][aa_count] / \\\n loaded[strain][t]\n big_strain_counts_df = big_strain_counts_df.add(tmp_df, fill_value=0)\n\n for c, total in prot_tracker.items():\n big_strain_counts_df.loc[:, c] /= total\n\n if len(big_strain_counts_df) > 0:\n big_strain_counts_df.astype(float).reset_index().to_feather(outpath)\n return big_strain_counts_df\n else:\n return pd.read_feather(outpath).set_index('index')", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def cps(self):\n return self.datacounts / self.exptime", "def _match_rate_goal(self, goal, booked_entity, domains=None):\n if domains is None:\n domains = self.belief_domains\n score = []\n for domain in domains:\n if 'book' in goal[domain]:\n tot = 0\n for key, value in goal[domain].items():\n if value != '?':\n tot += 1\n entity = booked_entity[domain]\n if entity is None:\n score.append(0)\n continue\n if domain in ['taxi', 'hospital', 'police']:\n score.append(1)\n continue\n match = 0\n for k, v in goal[domain].items():\n if v == '?':\n continue\n if k in ['dest', 'depart', 'name'] or k not in self.mapping[domain]:\n tot -= 1\n elif k == 'leave':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['leaveAt'].split(':')[0]) * 100 + int(entity['leaveAt'].split(':')[1])\n if v_constraint <= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n elif k == 'arrive':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['arriveBy'].split(':')[0]) * 100 + int(entity['arriveBy'].split(':')[1])\n if v_constraint >= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n else:\n if v.strip() == entity[self.mapping[domain][k]].strip():\n match += 1\n if tot != 0:\n score.append(match / tot)\n return score", "def _match_rate_goal(self, goal, booked_entity, domains=None):\n if domains is None:\n domains = self.belief_domains\n score = []\n for domain in domains:\n if 'book' in goal[domain]:\n tot = 0\n for key, value in goal[domain].items():\n if value != '?':\n tot += 1\n entity = booked_entity[domain]\n if entity is None:\n score.append(0)\n continue\n if domain in ['taxi', 'hospital', 'police']:\n score.append(1)\n continue\n match = 0\n for k, v in goal[domain].items():\n if v == '?':\n continue\n if k in ['dest', 'depart', 'name'] or k not in self.mapping[domain]:\n tot -= 1\n elif k == 'leave':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['leaveAt'].split(':')[0]) * 100 + int(entity['leaveAt'].split(':')[1])\n if v_constraint <= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n elif k == 'arrive':\n try:\n v_constraint = int(v.split(':')[0]) * 100 + int(v.split(':')[1])\n v_select = int(entity['arriveBy'].split(':')[0]) * 100 + int(entity['arriveBy'].split(':')[1])\n if v_constraint >= v_select:\n match += 1\n except (ValueError, IndexError):\n match += 1\n else:\n if v.strip() == entity[self.mapping[domain][k]].strip():\n match += 1\n if tot != 0:\n score.append(match / tot)\n return score", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def calculate(self):\n\n gt = self.ground_truth.flatten().astype(np.int8)\n seg = self.segmentation.flatten().astype(np.int8)\n\n probability_difference = np.absolute(gt - seg).sum()\n probability_joint = (gt * seg).sum()\n\n if probability_joint != 0:\n return probability_difference / (2. * probability_joint)\n else:\n return -1", "def compute_effort(data):\n\t# get only the data (no timestamps)\n\tedata = data[1:8]\n\t(h,w) = np.shape(edata)\n\teffort = 0.0\n\tfor t in range(w):\n\t\tjoint = edata[:,t]\n\t\t#NOTE: used to be 2-norm: norm = np.linalg.norm(joint)\n\t\ttotal = np.sum(np.abs(joint))\n\t\teffort += total\n\n\treturn effort", "def get_performance(self):\n if self.skip_reference:\n return self.compare_sim.tps\n\n # Avoid divide by zero errors when the simulation is not executed.\n if self.reference_sim.tps == 0:\n return 0\n\n t0 = 1 / self.reference_sim.tps\n t1 = 1 / self.compare_sim.tps\n return 1 / (t1 - t0)", "def traffic_concentration(rFile, sheets, x, y, pollutant):\r\n p = Point(x, y)\r\n road, dis = nearest_road(p, rFile)\r\n \r\n # step 1. check whether this location is within the calculation range. If so, go on. Otherwise exit.\r\n if dis > 60: # I think we dont have to consider points that are too far from the streets.\r\n return 'e1' # error 1\r\n \r\n if dis < 3.5:\r\n dis = 3.5 # In the NSL calculation tool, calculation distance smaller than 3.5 meters are limited to 3.5 meters.\r\n \r\n # step 2. determine all the parameters required.\r\n \r\n #calibration factor\r\n Fk = 0.62\r\n \r\n # Emission number. for SO2, NO2, NOx, PM10, PM2.5, lead, and CO\r\n N = int(road['properties']['intensity']) #the traffic intensity, being the number of vehicles per day\r\n Fs = float(road['properties']['f_cong']) #fraction of stagnant traffic, a number between 0 and 1\r\n Fm = float(road['properties']['f_medium']) #fraction of medium-weight motor vehicles\r\n Fz = float(road['properties']['f_heavy']) #fraction of heavy motor vehicles\r\n Fb = float(road['properties']['f_bus']) #fraction of buses\r\n st = str(road['properties']['speed_type']) #intotal 5 types: a:100, b:44, c:19, d:13, e:26 (km/h)\r\n El = emission_factor(sheets, 'p', st, pollutant) #emission factor of light motor vehicles\r\n Em = emission_factor(sheets, 'm', st, pollutant) #emission factor of medium-weight motor vehicles\r\n Ez = emission_factor(sheets, 'v', st, pollutant) #emission factor of heavy motor vehicles\r\n Eb = emission_factor(sheets, 'b', st, pollutant) #emission factor of buses\r\n Eld = emission_factor(sheets, 'p', 'd', pollutant) #emission factor of light motor vehicles (speedType: d)\r\n Emd = emission_factor(sheets, 'm', 'd', pollutant) #emission factor of medium-weight motor vehicles (speedType: d)\r\n Ezd = emission_factor(sheets, 'v', 'd', pollutant) #emission factor of heavy motor vehicles (speedType: d)\r\n Ebd = emission_factor(sheets, 'b', 'd', pollutant) #emission factor of buses (speedType: d)\r\n \r\n E_regular = N * (1 - Fs) * ((1 - Fm - Fz - Fb) * El + Fm * Em + Fz * Ez + Fb * Eb) * 1000 / 24 / 3600\r\n E_cong = N * Fs * ((1 - Fm - Fz - Fb) * Eld + Fm * Emd + Fz * Ezd + Fb * Ebd) * 1000 / 24 / 3600\r\n E = E_regular + E_cong\r\n# print(\"{}: {}, {}\".format(pollutant, E_regular, E_cong))\r\n #dilution factor\r\n roadType = str(road['properties']['class'])\r\n if roadType == '1': # Broad street canyon\r\n a = 0.000325\r\n b = -0.0205\r\n c = 0.39\r\n alpha = 0.856\r\n elif roadType == '2': # Small street canyon\r\n a = 0.000488\r\n b = -0.0308\r\n c = 0.59\r\n alpha = None\r\n elif roadType == '3': # One-sided buildings\r\n a = 0.0005\r\n b = -0.0316\r\n c = 0.57\r\n alpha = None\r\n elif roadType == '4': # General urban\r\n a = 0.000310\r\n b = -0.0182\r\n c = 0.33\r\n alpha = 0.799\r\n \r\n if dis > 30 and (roadType == 1 or roadType == 4):\r\n theta = alpha * pow(dis, -0.747)\r\n else:\r\n theta = a * dis**2 + b * dis + c\r\n \r\n #tree factor\r\n Fb = road['properties']['t_factor']\r\n \r\n #wind speed\r\n ws = wind_speed(sheets, x, y) # average speed from CAR VL3.0\r\n \r\n #regional factor related to meteorology\r\n Fregio = 5 / ws\r\n \r\n # step 3. calculate the traffic concentration based on the parameters above.\r\n C_traffic = Fk * E * theta * Fb * Fregio\r\n \r\n # If it is NO2, then NOx has to be considered due to its chemical reaction with O3.\r\n if pollutant == 'NO2':\r\n B = 0.6 # fixed number?\r\n K = 100 # parameter for the conversion from NO to NO2\r\n C_background_O3 = background_concentration(sheets, x, y, 'O3')\r\n C_traffic_NOx = traffic_concentration(rFile, sheets, x, y, 'NOx')\r\n C_traffic = C_traffic + B * C_background_O3 * (C_traffic_NOx - C_traffic) / (C_traffic_NOx - C_traffic + K)\r\n \r\n return C_traffic", "def test_construct_signals_proportions(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n assert np.all(cbg_df['completely_home_prop'].values <= 1)\n assert np.all(cbg_df['full_time_work_prop'].values <= 1)\n assert np.all(cbg_df['part_time_work_prop'].values <= 1)", "def _compute_rating(self, cand):\n p = cand.info['bary_period']\n dm = cand.info['dm']\n ra = cand.info['raj_deg']\n decl = cand.info['decj_deg']\n pdiff_min = 0.0\n\n diff_ra = np.abs(self.known_ras - ra)\n diff_dec = np.abs(self.known_decls - decl)\n\n ii_nearby = (diff_ra < 0.2) & (diff_dec < 0.2)\n periods = self.known_periods[ii_nearby]\n dms = self.known_dms[ii_nearby]\n\n for b in range(1, M):\n pdiff = (2.0*np.abs(p*b-periods)/(p*b+periods))\n\n if np.any((pdiff < 0.002)):\n for dispm in dms:\n pdiff_dm=1.0/(2.0*np.abs(((dispm)-dm)/((dispm)+dm)))\n pdiff_min=np.min(pdiff_dm,pdiff_min)\n if pdiff_min == 0.0:\n for rat in self.ratios:\n pdiff = 2.0*np.abs(((p*rat)-periods)/((p*rat)+periods))\n if np.any((pdiff < 0.02)):\n for dispm in dms:\n pdiff_dm=1.0/(2.0*np.abs(((dispm)-dm)/((dispm)+dm)))\n pdiff_min=np.min(pdiff_dm,pdiff_min)\n return pdiff_min", "def specific_binding_fraction(matrix,n=10000):\n return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])", "def connection_probability(prev_props : EdgeProps, curr_props : EdgeProps) -> float:\n d = Delay.__compute_delay(\n arrival_time = prev_props['arr_time'],\n next_departure_time = curr_props['dep_time'], \n ttype = prev_props['ttype'], \n previous_route_id = prev_props['trip_id'],\n next_route_id = curr_props['trip_id']\n )\n proba = Delay.__connection_probability(\n delay = d, \n gamma_params = prev_props['gamma'], \n ttype = curr_props['ttype'], \n previous_route_id = prev_props['trip_id'],\n next_route_id = curr_props['trip_id']\n )\n return proba", "def calculScore(self):\n for cell in self.notComputeRouter:\n if(cell.isCovered==True):\n self.score += 1000\n self.score += self.budget", "def get_snp_call_rate(self):\n d_temp = {}\n results = self.get_results()\n for sample, variants in results.items():\n for snp, info in variants.items():\n if snp in d_temp:\n d_temp[snp].append(info['genotype'])\n else:\n d_temp[snp] = [info['genotype']]\n d = {}\n sample_count = len(results.keys())\n for key, value in d_temp.items():\n na_count = len([x for x in value if not x])\n d[key] = float(na_count) / float(sample_count)\n return d", "def calculate_p(candidate, reference):\n matches = 0\n for grama in candidate:\n if grama in reference:\n matches += 1\n return matches/len(candidate)", "def distance_between(self, n1, n2):\n if self.distance_method == 'direct':\n n1_relevants = 0\n n2_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], n1.anchor):\n n1_relevants += 1\n if is_relevant(self.sample.iloc[i], n2.anchor):\n n2_relevants += 1\n return (n1_relevants - n2_relevants)/len(self.sample)\n else:\n return 0.5", "def calculate_request_threshold(self, requests_per_second):\n request_threshold = 1.0 / float(requests_per_second)\n return request_threshold", "def cf_profile(self):\n x = np.abs(self.gen_profile() / self.sam_sys_inputs['system_capacity'])\n return x", "def relative_range(self):\n self.calculate_relative_mags()\n string = '{:.0f}-{:.0f}Hz: {:.5f}'\n s_ind = self.get_bin(self.s_freq)\n e_ind = self.get_bin(self.e_freq)\n lst = self.rel_mags[s_ind:e_ind+1]\n return sum(lst)/len(lst)", "def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))" ]
[ "0.596737", "0.547867", "0.54080415", "0.5299857", "0.5124064", "0.5097309", "0.5068922", "0.50371313", "0.5016846", "0.4988263", "0.4936729", "0.49304453", "0.49304453", "0.48953757", "0.48854768", "0.4878589", "0.4844149", "0.48417434", "0.48266906", "0.48197976", "0.48078907", "0.47866055", "0.47595146", "0.47585425", "0.4753665", "0.47525272", "0.47485682", "0.47404727", "0.4734357", "0.47331527" ]
0.6338657
0
Perform a simple fit of the supplied timedependent MSD, using a linear regression of the logarithms of the values. User must supply the conversion factor from time to real time and from length to real length. Also, user
def fit_msd (msd,cutoff,delta_t,scale_l) : # prepare the values to fit: exclude the first value because it is zero t = np.arange(msd.size)*delta_t x = np.log(t[cutoff:]) y = np.log(msd[cutoff:]*scale_l**2) # perform fit to y = ax + b with their errors b,a,db,da = mbt.linear_regression (x,y,0.99) # now convert the value of b into a diffusion coefficient D = np.exp(b)/6.0 dD = np.exp(db)/6.0 return a,da,D,dD
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_model(train_ts_dis, data, init_prior = [.5,.5], bias = True, mode = \"biasmodel\"):\r\n if mode == \"biasmodel\":\r\n #Fitting Functions\r\n def bias_fitfunc(rp, tsb, df):\r\n init_prior = [.5,.5]\r\n model = BiasPredModel(train_ts_dis, init_prior, ts_bias = tsb, recursive_prob = rp)\r\n model_likelihoods = []\r\n for i in df.index:\r\n c = df.context[i]\r\n trial_choice = df.subj_ts[i]\r\n conf = model.calc_posterior(c)\r\n model_likelihoods.append(conf[trial_choice])\r\n return np.array(model_likelihoods)\r\n \r\n def bias_errfunc(params,df):\r\n rp = params['rp']\r\n tsb = params['tsb']\r\n #minimize\r\n return abs(np.sum(np.log(bias_fitfunc(rp,tsb,df)))) #single value\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('rp', value = .6, min = 0, max = 1)\r\n if bias == True:\r\n fit_params.add('tsb', value = 1, min = 0)\r\n else:\r\n fit_params.add('tsb', value = 1, vary = False, min = 0)\r\n out = lmfit.minimize(bias_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(out)\r\n return out.values\r\n \r\n elif mode == \"midline\":\r\n #Fitting Functions\r\n def midline_errfunc(params,df):\r\n eps = params['eps'].value\r\n context_sgn = np.array([max(i,0) for i in df.context_sign])\r\n choice = df.subj_ts\r\n #minimize\r\n return -np.sum(np.log(abs(abs(choice - (1-context_sgn))-eps)))\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('eps', value = .1, min = 0, max = 1)\r\n midline_out = lmfit.minimize(midline_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(midline_out)\r\n return midline_out.values", "def linear_trend(times, magnitudes):\n model = linear_model.LinearRegression()\n model.fit(times, magnitudes)\n\n return model.coef_[0][0]", "def dm_time_behaviour(sps, use_env=True):\n pts = get_envelope(sps) if use_env else sps\n _,_,R,_,_ = linregress(pts.time, pts.dm)\n return R**2", "def exp_fit(timeList, voltageList, ySS):\n\n bList = [log(max(y-ySS,1e-6)) for y in voltageList]\n b = np.matrix(bList).T\n rows = [ [1,t] for t in timeList]\n A = np.matrix(rows)\n #w = (pinv(A)*b)\n (w,residuals,rank,sing_vals) = np.linalg.lstsq(A,b)\n tau = -1.0/w[1,0]\n amplitude = np.exp(w[0,0])\n return (amplitude,tau)", "def do_stats_model(x, y):\n Xx = sm.add_constant(x)\n sm_logit = sm.Logit(y, Xx)\n result = sm_logit.fit()\n print result.summary()\n result.pred_table()\n # linear model\n print \"linear regression model:\\n\"\n sm_linear = sm.OLS(y, Xx)\n result = sm_linear.fit()\n print result.summary()", "def test_linear_fit_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=1)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n y = 2 + x + 0.5 * x * x\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)", "def fit_linear_trend(series):\n X = make_design_matrix(np.arange(len(series)) + 1)\n linear_trend_ols = sm.OLS(series.values, X).fit()\n linear_trend = linear_trend_ols.predict(X)\n return linear_trend", "def test_tmle_fit(continuous_dataset_fixture):\n\n tmle = TMLE(\n treatment_grid_bins=[22.1, 30, 40, 50, 60, 70, 80.1],\n random_seed=100,\n verbose=True,\n )\n tmle.fit(\n T=continuous_dataset_fixture[\"treatment\"],\n X=continuous_dataset_fixture[[\"x1\", \"x2\"]],\n y=continuous_dataset_fixture[\"outcome\"],\n )\n\n assert tmle.n_obs == 72\n assert len(tmle.psi_list) == 5\n assert len(tmle.std_error_ic_list) == 5", "def fit_line_Vo(x, y, n):\n x1=x[0:n]\n y1=y[0:n]\n X = sm.add_constant(x1)\n model = sm.OLS(y1, X, missing='drop') # ignores entires where x or y is NaN\n fit = model.fit()\n m=fit.params[1] \n b=fit.params[0] \n# stderr=fit.bse # could also return stderr in each via fit.bse\n \n N = 100 # could be just 2 if you are only drawing a straight line...\n points = np.linspace(x.min(), x.max(), N)\n \n \n fig=plt.figure(1) #PLOTING TOGETHER\n \n ax = fig.add_subplot(111)\n ax.plot(x, y)\n ax.plot(points, m*points + b)\n \n plt.legend(['data','fitt Vo'],fontsize=16)\n \n ax.set_yscale('linear',fontsize=16)\n ax.tick_params(axis='x', labelsize=14)\n ax.tick_params(axis='y', labelsize=14)\n plt.ylabel('Abs',fontsize=16)\n plt.xlabel('Time(sec)',fontsize=16)\n ax.grid()\n plt.grid()\n plt.show()\n \n print(\"The Vo fitted model is: {0:2f}*x+{1:2f} \".format(m, b))\n return m,b", "def lnlike_spectral(params, t, f, ferr, expTime, t01, t02):\n L = -np.sum((f - transitModel_spectral(params, t, expTime, t01, t02))**2 / (2*(ferr)**2)) -\\\n 0.5 * np.sum(np.log(2 * np.pi * (ferr)**2))\n return L", "def experiment_linear_tradeoff_linf(_):\n adv_norm_type = 'linf'\n dual_norm_type = 'l1'\n # Min l1-norm solution found (norm=0.6876)\n attack_eps = 1/0.6876\n attack_step_dir = 'sign_grad'\n module_name = 'train'\n log_dir = 'runs_linear_tradeoff_%s' % adv_norm_type\n exclude = '*'\n\n d_over_n = [32]\n dim = 100\n num_train = [int(dim / p) for p in d_over_n]\n\n # Config params\n shared_params = []\n shared_params += [\n ('config', './config.py'),\n ('seed', list(range(3))),\n ]\n\n # Data hyper-parameters\n shared_params += [\n ('temperature', 0.0001),\n ('num_test', 500),\n ('dim', dim),\n ('num_train', num_train),\n ]\n\n # Adversarial configuration: test\n shared_params += nameit('adv', [\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10),\n # ('eps_iter', attack_eps), # Overwritten by cvxpy\n # ('eps_tot', attack_eps), # Overwritten by cvxpy\n ('pre_normalize', True), # multi attacks\n ('post_normalize', True),\n ('eps_from_cvxpy', True),\n ('step_dir', attack_step_dir),\n ])\n\n # Logging to standard output\n shared_params += [\n ('log_interval', 10000), # 1000),\n ('log_keys', '\\'(\"%s\")\\'' % ('\",\"'.join([\n 'risk/train/zero_one',\n 'risk/train/adv/%s' % adv_norm_type,\n 'weight/linear/norm/%s' % dual_norm_type,\n 'margin/%s' % dual_norm_type,\n ]))),\n # Compare with cvxpy\n ('enable_cvxpy', True),\n ]\n params = []\n\n # reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]\n reg_coeff = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10]\n # Between 1e-3 and 1e-1 for d/n=10 the adv robustness drops\n reg_coeff += [3e-3, 5e-3, 3e-2, 5e-2, 3e-1, 5e-1]\n\n # Model hyper-parameters\n linear_noreg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', 'none'),\n ])\n linear_reg_model_params = nameit('model', [\n ('arch', 'linear'),\n ('regularizer', ['w_%s' % dual_norm_type]),\n ('reg_coeff', reg_coeff),\n ])\n\n # Explicit regularization with line search\n # njobs=3*6*20*4*2=2880\n explicit_reg = nameit('optim', [\n ('name', 'fista'),\n ('niters', 10000),\n ('bound_step', True),\n ('step_size', [1, 10, 100, 1000]),\n ])\n params += [OrderedDict(shared_params+linear_reg_model_params+explicit_reg)]\n\n # Adversarial training with line search\n for i in [1] + list(np.arange(0.1, 2, 0.2)): # [0.1, 0.3, 0.5, 0.7, 1, 1.3]:\n adv_train_params = nameit('optim', [\n ('name', 'gd_ls'),\n ('niters', 10000),\n ('bound_step', True),\n ])\n adv_train_params += nameit('optim', nameit('adv_train', [\n ('enable', True),\n ('norm_type', adv_norm_type),\n ('lr', 0.1),\n ('niters', 10), # niters, 1000\n ('pre_normalize', True),\n ('post_normalize', True),\n ('step_dir', attack_step_dir),\n ('eps_iter', float(attack_eps) * i),\n ('eps_tot', float(attack_eps) * i),\n ]))\n params += [OrderedDict(\n shared_params+linear_noreg_model_params+adv_train_params)]\n\n return params, log_dir, module_name, exclude", "def fit_ar1_t(t, y):\n lntau0 = np.log(np.mean(np.diff(t)))\n sigma = np.std(y)\n yr = y - np.mean(y)\n nlnp = lambda lntau, sigma: -1.0 * ar1_t_like(t, yr, np.exp(lntau), sigma)\n res = minimize(nlnp, lntau0, args=(sigma,), method='Nelder-Mead')\n tau = np.exp(res.x.squeeze())\n return tau, sigma", "def fit_power_law(self, y, cut=0.6, interactive=True):\n\n self.fit_cut = cut\n\n start = np.where(y > 1e-6)[0][0] # find first non-zero value since we will take the log\n end = int(self.fit_cut * len(self.time_uniform)) # fit up until a fraction, cut, of the trajectory\n # print(y[start:end])\n # exit()\n\n # fit line to linear log plot\n A = Poly_fit.poly_fit(np.log(self.time_uniform[start:end]), np.log(y[start:end]), 1)[-1]\n\n return [np.exp(A[0]), A[1]]", "def fitdoubleexp(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=\"\"):\n if domain is not None:\n fitdatax,fitdatay = selectdomain(xdata,ydata,domain)\n else:\n fitdatax=xdata\n fitdatay=ydata\n if fitparams is None:\n fitparams=[0.,0.,0.,0.,0.,0.]\n # fitparams[0]=fitdatay[-1]\n fitparams[1]= max(fitdatay) - fitdatay[0]\n #fitparams[1]=fitdatay[0]-fitdatay[-1]\n #fitparams[2]=fitdatax[0]\n fitparams[3]=(fitdatax[-1]-fitdatax[0])/5\n #fitparams[4]=(max(fitdatay)-fitdatay[-1])\n fitparams[5]=(fitdatax[-1]-fitdatax[0])/5\n #print fitparams\n p1 = fitgeneral(fitdatax,fitdatay,doubleexpfunc,fitparams,domain=None,showfit=showfit,showstartfit=showstartfit,label=label)\n return p1", "def fit_timeseries(xdates, ydata):\n\n pass", "def linear_fit(x, y):\n x = np.array(x)\n y = np.array(y)\n \n invalid_idx = np.isnan(x) | np.isinf(x) | np.isnan(y) | np.isinf(y)\n x = x[~invalid_idx]\n y = y[~invalid_idx]\n\n covs = sm.add_constant(x, prepend=True)\n model = sm.OLS(y, covs)\n result = model.fit()\n return result.params, result.rsquared", "def fitfunc(x_unshifted, p=default()):\n x = x_unshifted+p[4]\n xtr, ytr, gradtr = logcontinuity(p)\n if x < xtr:\n return logpeak(x, p)\n else:\n return logpowerlaw(x, p)", "def linear_regression(d, ind, dep):\n\n\ty=d.get_data([dep])\n\tprint \"y :\",y\n\tA=d.get_data(ind)\n\tprint \"A :\",A\n\tones = np.asmatrix(np.ones( (A.shape[0]) )).transpose()\n\tA=np.concatenate((A, ones), axis=1)\n\tprint \"concatenated A :\",A\n\tAAinv=np.linalg.inv( np.dot(A.transpose(), A))\n\tprint \"AAinv: \\n\",AAinv\n\t\"\"\"\n\tprint \"A :\",A\n\tprint \"y: \",y\n\tprint \"AAinv: \",AAinv\"\"\"\n\tprint \"shape A:\t \",A.shape\n\tprint \"shape y\t:\", y.shape\n\tx=np.linalg.lstsq(A,y)\n\tprint \"x :\\n\",x\n\tb=x[0]\n\tprint \"\\n b : \\n\",b\n\tN=len(y)\n\tprint \"N :\t\\n\",N\n\tC=len(b)\n\tprint \"C :\t \",C\n\tdf_e=N-C\n\tdf_r=C-1\n\terror=y - np.dot(A, b)\n\tprint \"error:\t\",error\n\tsse=np.dot(error.transpose(), error) / df_e\n\tprint \"sse\t:\",sse\n\tstderr=np.sqrt( np.diagonal( sse[0, 0] * AAinv ) )\n\tprint \"stderr: \",stderr\n\tt = b.transpose() / stderr\n\tprint \"t :\", t\n\tp=2*(1 - scipy.stats.t.cdf(abs(t), df_e))\n\tprint \"p:\t\",p\n\tr2=1 - error.var() / y.var()\n\tprint \"R^2\t :\",r2, \"\\n \\n \\n \\n*************************************\"\n\t\n\t\n\treturn [b,sse,r2,t,p]", "def fit_function_LS(data, params, z, fn):\n result = params\n errorfunction = lambda p: fn(*p)(z) - data\n good = True\n [result, cov_x, infodict, mesg, success] = (\n scipy.optimize.leastsq(\n errorfunction, params, full_output = 1, maxfev = 500\n )\n )\n err = errorfunction(result)\n err = scipy.sum(err * err)\n if (success < 1) or (success > 4):\n print( \"Fitting problem!\", success, mesg)\n good = False\n return [result, cov_x, infodict, good]", "def fit_line(data, error_func):\n\n # Generate initial guess for line model\n l = np.float32([0, np.mean(data[:, 1])]) # slope = 0, intercept = mean(y values)\n\n # Plot initial guess (optional)\n x_ends = np.float32([-5, 5])\n plt.plot(x_ends, l[0] * x_ends + l[1], 'm--', linewidth = 2.0, label = 'Initial guess')\n\n # Call optimizer to minimize error function\n result = spo.minimize(error_func, l, args = (data, ), method = 'SLSQP', options = {'disp': True})\n return result.x", "def fit(self):\n self.lr = LRHMC( self.X_train, self.X_test, self.y_train, self.y_test )\n self.lr.fit()", "def ts_fit(series: TimeSeries) -> TimeSeries:\n pass", "def fit(self, x, y, logger):\n history = self.model1.fit(x=x, y=y, batch_size=self.batch_size, epochs=self.epochs)\n logger.log({'ValFuncLoss': history.history['loss'][-1]})", "def _fit_gas_trend(cls, x, y, fit_type=None):\n fit_type = cls._max_trend_poly_deg if fit_type is None else fit_type\n if fit_type == 'exp':\n logger.debug('Using exponential fit to extrapolate {}'.format(cls._gas_name))\n fit = np.polynomial.polynomial.Polynomial.fit(x, np.log(y), 1, w=np.sqrt(y))\n return lambda t: np.exp(fit(t))\n\n else:\n logger.debug('Using order {} polynomial to extrapolate {}'.format(fit_type, cls._gas_name))\n fit = np.polynomial.polynomial.Polynomial.fit(x, y, deg=fit_type)\n return fit", "def nnRegression(data):", "def fitexp(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=\"\"):\n if domain is not None:\n fitdatax,fitdatay = selectdomain(xdata,ydata,domain)\n else:\n fitdatax=xdata\n fitdatay=ydata\n if fitparams is None: \n fitparams=[0.,0.,0.,0.]\n fitparams[0]=fitdatay[-1]\n fitparams[1]=fitdatay[0]-fitdatay[-1]\n fitparams[1]=fitdatay[0]-fitdatay[-1]\n fitparams[2]=fitdatax[0]\n fitparams[3]=(fitdatax[-1]-fitdatax[0])/5.\n #print fitparams\n p1 = fitgeneral(fitdatax, fitdatay, expfunc, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,\n label=label)\n return p1", "def test_fit(self):\n X = self.generate_X()\n task = mmRDTR()\n fit_result = task.fit(X)", "def fit_exp_data(x_vals, y_vals):\n log_vals = []\n for y in y_vals:\n log_vals.append(math.log(y, 2)) #get log base 2\n fit = np.polyfit(x_vals, log_vals, 1)\n return fit, 2", "def _model_fit_term(self):\r\n if self.likelihood.YYT is None:\r\n tmp, _ = dtrtrs(self.L, np.asfortranarray(self.likelihood.Y), lower=1)\r\n return -0.5 * np.sum(np.square(tmp))\r\n # return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y)))\r\n else:\r\n return -0.5 * np.sum(np.multiply(self.Ki, self.likelihood.YYT))", "def test_double_ended_ols_wls_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse')\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5)\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=5)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=6)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)" ]
[ "0.6344083", "0.59664893", "0.5950108", "0.58820957", "0.57336086", "0.5699005", "0.56805956", "0.5650658", "0.5647881", "0.5618573", "0.5590892", "0.55860865", "0.5570882", "0.556389", "0.5562849", "0.55571467", "0.55424637", "0.547708", "0.5467977", "0.54346925", "0.54300976", "0.54300165", "0.5421207", "0.5404414", "0.53966707", "0.5394265", "0.5382482", "0.5376713", "0.53668123", "0.5366511" ]
0.668302
0
Calculate the mean square displacement of the particles defined by 'particles_text' in simulation sim, using sampling tsample and equilibration time teq. Returns the matrix corresponding to the mean square displacement of each particle, along with a matrix corresponding to the variance in the estimate of this quantity.
def msd_t (sim,particles_text,teq,tsample) : u = sim.u particles = u.select_atoms (particles_text) nparticles = particles.n_atoms nslice = traj_nslice (u,teq,tsample) # initialize the matrix containing all the positions # of the particles at all the sampling frames particles_pos = np.zeros ((nslice,nparticles,3)) for i,ts in enumerate(u.trajectory[teq::tsample]) : particles_pos[i,:,:] = particles.positions # now initialize the Delta matrix, which contains the # squared differences between the particles' positions # at different time delays Nt = int(nslice/2) Delta = np.zeros((nparticles,Nt,Nt)) for delay in xrange(1,Nt+1) : for t0 in xrange (Nt) : t1 = t0 + delay pos1 = particles_pos[t1,:,:] pos0 = particles_pos[t0,:,:] Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1) # return the matrices of MSD and its variance return np.mean(Delta,axis=2),np.var(Delta,axis=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msd_t(sim,particles_text,teq,tsample) :\n u = sim.u\n particles = u.select_atoms(particles_text)\n nparticles = particles.n_atoms\n nslice = traj_nslice (u,teq,tsample)\n # initialize the matrix containing all the positions\n # of the particles at all the sampling frames\n particles_pos = np.zeros ((nslice,nparticles,3))\n for i,ts in enumerate(u.trajectory[teq::tsample]) :\n particles_pos[i,:,:] = particles.positions\n # now initialize the Delta matrix, which contains the\n # squared differences between the particles' positions\n # at different time delays\n Nt = int(nslice/2)\n Delta = np.zeros((nparticles,Nt,Nt))\n for delay in xrange(1,Nt+1) :\n for t0 in xrange (Nt) :\n t1 = t0 + delay\n pos1 = particles_pos[t1,:,:]\n pos0 = particles_pos[t0,:,:]\n Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1)\n # return the matrices of MSD and its variance\n return np.mean(Delta,axis=2),np.var(Delta,axis=2)", "def calc_msd(pos_x, pos_y, pos_z):\n particles = pos_x.shape[0]\n N = pos_x.shape[1] \n tamsd = np.zeros(shape = (particles, N - 1)) \n\n for p in np.arange(start = 0, stop = particles, step = 1): \n for n in np.arange(start = 1, stop = N, step = 1): \n sumdis = np.array([((pos_x[p, i + n] - pos_x[p, i]) ** 2 + (pos_y[p, i + n] - pos_y[p, i]) ** 2 + (pos_z[p, i + n] - pos_z[p, i]) ** 2) for i in np.arange(start = 1, stop = N - n, step = 1)]).sum()\n tamsd[p, n] = sumdis / (N - n) \n return tamsd", "def spring_particle(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, noise_std, seed):\n num_particles = NUM_PARTS\n collater = {}\n\n def diffeq_hyper(t, q, k, m, nparts):\n num_particles = nparts\n vels = q[2 * num_particles:]\n xs = q[:2 * num_particles]\n xs = xs.reshape(-1, 2)\n forces = np.zeros(xs.shape)\n new_k = np.repeat(k, num_particles) * np.tile(k, num_particles)\n new_k = np.repeat(new_k, 2).reshape(-1, 2)\n dx = np.repeat(xs, num_particles, axis=0) - np.tile(xs, (num_particles, 1))\n resu = -new_k * dx\n forces = np.add.reduceat(resu, np.arange(0, nparts * nparts, nparts)).ravel()\n\n return np.concatenate([vels / np.repeat(m, 2), forces]).ravel()\n\n def hamiltonian(vec, m, k, num_particles):\n num_particles = num_particles\n x = vec[:num_particles * 2]\n p = vec[2 * num_particles:]\n xs = x.reshape(-1, 2)\n ps = p.reshape(-1, 2)\n U1 = 0\n K = 0\n for i in range(num_particles):\n for j in range(i + 1, num_particles):\n U1 += .5 * k[i] * k[j] * ((xs[i] - xs[j]) ** 2).sum()\n K += 0.5 * ((ps[i] ** 2).sum()) / m[i]\n return K, U1\n\n theta = []\n dtheta = []\n energy = []\n mass_arr = []\n ks_arr = []\n lagrangian = []\n np.random.seed(seed)\n\n for traj in range(num_trajectories):\n ks = np.ones(NUM_PARTS)#np.random.uniform(.5, 1, size=(NUM_PARTS))\n positions = np.random.uniform(-1, 1, size=(NUM_PARTS, 2))\n velocities = np.random.uniform(-3, 3, size=(NUM_PARTS, 2))\n masses = np.ones(NUM_PARTS)#np.random.uniform(0.1, 1, size=NUM_PARTS)\n momentum = np.multiply(velocities, np.repeat(masses, 2).reshape(-1, 2))\n q = np.concatenate([positions, momentum]).ravel()\n qnrk = rk(lambda t, y: diffeq_hyper(t, y, ks, masses, num_particles), (0, T_max), q,\n t_eval=np.arange(0, T_max, dt),\n rtol=1e-12, atol=1e-12, method='DOP853')\n accum = qnrk.y.T\n ssr = int(sub_sample_rate / dt)\n accum = accum[::ssr]\n daccum = np.array([diffeq_hyper(0, accum[i], ks, masses, num_particles) for i in range(accum.shape[0])])\n energies = []\n lags = []\n for i in range(accum.shape[0]):\n ktmp, utmp = hamiltonian(accum[i], masses, ks, NUM_PARTS)\n energies.append(ktmp + utmp)\n lags.append(ktmp - utmp)\n\n accum += np.random.randn(*accum.shape) * noise_std\n daccum += np.random.randn(*daccum.shape) * noise_std\n\n theta.append(accum)\n dtheta.append(daccum)\n energy.append(energies)\n mass_arr.append(masses)\n ks_arr.append(ks)\n lagrangian.append(lags)\n\n collater['x'] = np.concatenate(theta)\n collater['dx'] = np.concatenate(dtheta)\n collater['energy'] = np.concatenate(energy)\n collater['lagrangian'] = np.concatenate(lagrangian)\n\n collater['mass'] = mass_arr\n collater['ks'] = ks_arr\n\n f = open(name + \".pkl\", \"wb\")\n pickle.dump(collater, f)\n f.close()\n\n return collater", "def estimate(self):\n mu = self.mean()\n var = np.average((self.particles - mu) ** 2, weights=self.weights, axis=0)\n\n return mu, var", "def mean_and_variance(self, particles):\n mean = particles.mean(axis=0)\n mean[2] = np.arctan2(\n np.cos(particles[:, 2]).sum(),\n np.sin(particles[:, 2]).sum() \n )\n\n zero_mean = particles - mean\n for i in range(zero_mean.shape[0]):\n zero_mean[i, 2] = minimized_angle(zero_mean[i, 2])\n cov = np.dot(zero_mean.T, zero_mean) / self.num_particles\n\n return mean.reshape((-1, 1)), cov", "def estimate(particles, weights):\n\n pos = particles[:, 0:2]\n mean = np.average(pos, weights=weights, axis=0)\n var = np.average((pos - mean)**2, weights=weights, axis=0)\n return mean, var", "def estimate(particles, weights):\n\n pos = particles[:, 0:2]\n mean = np.average(pos, weights=weights, axis=0)\n var = np.average((pos - mean)**2, weights=weights, axis=0)\n return mean, var", "def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy", "def compute_mean_square_displacement(self):\n # compute the current_positions by adding the number of crossings of the system\n current_positions = self.positions + self.crossings\n # get the dx vector between the current position and the initial positions for all particles\n dx = current_positions - self.initial_positions\n # compute and return the mean square displacement\n return np.mean(norm(dx, axis=1)**2)", "def simulation(self):\n\n t_max = 3\n if self.meas_selected_series == 1:\n particle_density_number = self.particle_density_number\n else: # series 2:\n factors = 4/np.array([4, 6, 8, 10, 12, 14, 16, 18])\n factor = factors[(self.meas_selected_number-1)]\n particle_density_number = self.particle_density_number * factor\n\n p_i, p_f = toolbox_2.get_pressure_change(self.measurement)\n size, time2 = toolbox_2.simulate_extinction(self.particle_size_number * 1e-9,\n p_i, p_f,\n particle_density_number * 1e10,\n t_max, self.saturation_percentage / 100)\n smallest_growing_particle = toolbox_2.minimum_particle_diameter(p_i, p_f, self.saturation_percentage / 100)\n # short print:\n # print(\"M:\", self.meas_selected_number, \", \", round((p_i - p_f) / 1000, 3), \"kPa\", \", \", self.saturation_percentage, \"%\", \", \", round(smallest_growing_particle * 1e9, 2), \"nm\", \", \", sep=\"\")\n\n if smallest_growing_particle > 0:\n print(\"M:\", self.meas_selected_number, \" S:\", self.meas_selected_series, \" D:\", self.selected_data,\n \", smallest growing particle for pressure change (\", round(p_i / 1000, 2), \"-\",\n round(p_f / 1000, 2), \" = \", round((p_i - p_f) / 1000, 2), \"kPa) in \", self.saturation_percentage,\n \"% humidity is \", round(smallest_growing_particle * 1e9, 2), \"nm\", sep=\"\")\n else:\n print(\"M:\", self.meas_selected_number, \" S:\", self.meas_selected_series, \" D:\", self.selected_data,\n \", no particle will grow in \", \"(\", round(p_i / 1000, 2), \"-\", round(p_f / 1000, 2), \" = \",\n round((p_i - p_f) / 1000, 2), \"kPa)\", \" pressure change and \", self.saturation_percentage,\n \"% humidity \", sep=\"\")\n\n self.curve_simulate.setData(time2+0.05, size)\n self.simulate_bool = False", "def tms_E_field(dipole_pos, dipole_moment, didt, positions):\n if dipole_pos.shape != dipole_moment.shape:\n raise ValueError('List of dipole position and moments should have the same'\n 'lengths')\n mu0_4pi = 1e-7\n\n E = np.zeros(positions.shape, dtype=float)\n dp = np.atleast_2d(dipole_pos)\n dm = np.atleast_2d(dipole_moment)\n\n r1 = positions\n \n for m, r2 in zip(dm, dp):\n a = r2 - r1\n norm_a = np.linalg.norm(a, axis=1)[:, None]\n\n norm_r1 = np.linalg.norm(r1, axis=1)[:, None]\n norm_r2 = np.linalg.norm(r2)\n \n r2_dot_a = np.sum(r2 * a, axis=1)[:, None]\n F = norm_a * (norm_r2 * norm_a + r2_dot_a)\n grad_F = (norm_a ** 2 / norm_r2 + 2 * norm_a + 2 * norm_r2 + r2_dot_a / norm_a)\\\n * r2 - (norm_a + 2 * norm_r2 + r2_dot_a / norm_a) * r1\n E += -didt * mu0_4pi / F ** 2 * \\\n (F * np.cross(r1, m) - np.cross(np.sum(m * grad_F, axis=1)[:, None] * r1, r2) )\n\n # Why use -didt? Take a look at the appendix 1 of the reference. It says \"negative\n # time rate of change\"\n return E", "def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],\n mass: dc.float64[N], G: dc.float64):\n # Kinetic Energy:\n # KE = 0.5 * np.sum(np.sum( mass * vel**2 ))\n # KE = 0.5 * np.sum( mass * vel**2 )\n KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)\n\n # Potential Energy:\n\n # positions r = [x,y,z] for all particles\n x = pos[:, 0:1]\n y = pos[:, 1:2]\n z = pos[:, 2:3]\n\n # matrix that stores all pairwise particle separations: r_j - r_i\n # dx = x.T - x\n # dy = y.T - y\n # dz = z.T - z\n # dx = np.transpose(x) - x\n # dy = np.transpose(y) - y\n # dz = np.transpose(z) - z\n dx = np.add.outer(-x, x)\n dy = np.add.outer(-y, y)\n dz = np.add.outer(-z, z)\n\n # matrix that stores 1/r for all particle pairwise particle separations\n inv_r = np.sqrt(dx**2 + dy**2 + dz**2)\n # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]\n I = inv_r > 0\n np.divide(1.0, inv_r, out=inv_r, where=I)\n\n # sum over upper triangle, to count each interaction only once\n # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))\n # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))\n tmp = -np.multiply.outer(mass, mass) * inv_r\n PE = 0.0\n for j in range(N):\n for k in range(j + 1, N):\n PE += tmp[j, k]\n PE *= G\n\n return KE, PE", "def totalmass_comvelocity(particle_list):\r\n total_momentum = sum([particle.linear_momentum()\r\n for particle in particle_list])\r\n total_mass = sum([particle.mass for particle in particle_list])\r\n\r\n return total_mass, total_momentum / total_mass", "def get_mean_emb(self, text):\n return np.mean([self.emb.get(w.lower(), self.emb.get(\"_UNK\")) for w in text.split()], axis=0)", "def evolve_system(self,dt, energy_file = None):\n phi = self.compute_field()\n force_m = self.compute_forces_mesh()\n self.acc_new = np.zeros([len(self),2])\n #Computes the force felt by each particles and deduce the acceleration\n for i in range(len(self)):\n x,y = self.ptclgrid.ixy[i]\n x = int(x)\n y = int(y)\n self.acc_new[i][0] += (1/self.mass[i]*force_m[0][x,y])\n self.acc_new[i][1] += (1/self.mass[i]*force_m[1][x,y])\n #Evolve the position and momenta of the particle in the list\n self.particles.evolve(self.acc,self.acc_new,dt,self.size, boundary_periodic=self.boundary_periodic)\n #For non-periodic condition, deletes the particles that leave the grid from the list\n if self.boundary_periodic!=True: \n index = np.argwhere((self.particles.position>self.size-1))\n index2 = np.argwhere((self.particles.position<0))\n index = {a for a in np.append(index,index2)}\n index = list(index)\n self.particles.momentum = np.delete(self.particles.momentum,index,axis=0)\n self.acc = np.delete(self.acc,index,axis=0)\n self.acc_new = np.delete(self.acc_new,index,axis=0)\n self.mass = np.delete(self.mass,index,axis=0)\n self.particles.position = np.delete(self.particles.position,index,axis=0)\n self.acc = self.acc_new.copy()\n #Update the position of the particles on the grid\n self.ptclgrid.update_position(self.particles.position,self.mass)\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n #Write the energy in a file if on is given\n if energy_file != None:\n energy_file.write(f'{self.energy()}\\n')\n energy_file.flush()\n return self.grid_pos", "def add_mass_energy(particles: list[Particle]) -> u.Quantity:\n total_mass_energy = 0.0 * u.J\n for particle in particles:\n total_mass_energy += particle.mass_energy\n return total_mass_energy.to(u.J)", "def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) :\n u = sim.u\n polymer = u.select_atoms (polymer_text)\n N = polymer.n_atoms\n nslice = mbt.traj_nslice (u,teq,tsample)\n d = np.zeros((N,N))\n for i,ts in enumerate(u.trajectory[teq::tsample]) :\n this_d = distance_array(polymer.positions,\n polymer.positions,\n box=ts.dimensions)\n d = mbt.new_average(i,d,this_d)\n return d", "def computeTsys(beam, row, T_d_x, T_d_y):\n \n xx_on = beam.cols.xx_cal_on[row].astype('float')\n xx_off = beam.cols.xx_cal_off[row].astype('float')\n \n yy_on = beam.cols.yy_cal_on[row].astype('float')\n yy_off = beam.cols.yy_cal_off[row].astype('float')\n\n T_sys_x = np.average(T_d_x[len(T_d_x)/4:3*len(T_d_x)/4]) / (xx_on/xx_off -1)\n T_sys_y = np.average(T_d_y[len(T_d_x)/4:3*len(T_d_x)/4]) / (yy_on/yy_off -1)\n\n l = len(T_sys_x)\n return np.average(T_sys_x[l/4:3*l/4]), np.average(T_sys_y[l/4:3*l/4])", "def total_KE(particles):\r\n return sum([particle.kinetic_energy() for particle in particles])", "def sim_avg(sim_mats):\n return np.array(sim_mats).mean(axis=0)", "def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)", "def _get_tads_mean_std(self, experiments):\n norm_tads = []\n for tad in experiments:\n for brk in self.experiments[tad]['tads'].values():\n if not brk['brk']:\n continue\n norm_tads.append(log((brk['end'] - brk['start']) * self.resolution))\n length = len(norm_tads)\n mean = sum(norm_tads)/length\n std = sqrt(sum([(t-mean)**2 for t in norm_tads])/length)\n return mean, std", "def mass_spring(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, noise_std, seed):\n\n def hamiltonian_fn(coords):\n q, p = np.split(coords, 2)\n\n H = (p ** 2) / 2 + (q ** 2) / 2 # spring hamiltonian (linear oscillator)\n return H\n\n def dynamics_fn(t, coords):\n dcoords = autograd.grad(hamiltonian_fn)(coords)\n dqdt, dpdt = np.split(dcoords, 2)\n S = np.concatenate([dpdt, -dqdt], axis=-1)\n return S\n\n def get_trajectory(t_span=[0, 3], timescale=0.01, ssr=sub_sample_rate, radius=None, y0=None, noise_std=0.1,\n **kwargs):\n\n # get initial state\n if y0 is None:\n y0 = np.random.rand(2) * 2 - 1\n if radius is None:\n radius = np.sqrt(np.random.uniform(0.5, 4.5))\n y0 = y0 / np.sqrt((y0 ** 2).sum()) * (radius)\n\n spring_ivp = rk(lambda t, y: dynamics_fn(t, y), t_span, y0,\n t_eval=np.arange(0, t_span[1], timescale),\n rtol=1e-12, atosl=1e-12, method='DOP853')\n\n accum = spring_ivp.y.T\n ssr = int(ssr / timescale)\n accum = accum[::ssr]\n\n daccum = [dynamics_fn(None, accum[i]) for i in range(accum.shape[0])]\n energies = []\n for i in range(accum.shape[0]):\n energies.append(np.sum(hamiltonian_fn(accum[i])))\n\n return accum, np.array(daccum), energies\n\n def get_dataset(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, seed=seed, test_split=0.5, **kwargs):\n data = {'meta': locals()}\n\n # randomly sample inputs\n np.random.seed(seed)\n data = {}\n ssr = int(sub_sample_rate / dt)\n\n xs, dxs, energies, ks, ms = [], [], [], [], []\n for s in range(num_trajectories):\n x, dx, energy = get_trajectory(t_span=[0, T_max], timescale=dt, ssr=sub_sample_rate)\n\n x += np.random.randn(*x.shape) * noise_std\n dx += np.random.randn(*dx.shape) * noise_std\n\n xs.append(x)\n dxs.append(dx)\n energies.append(energy)\n ks.append([1])\n ms.append([1])\n\n data['x'] = np.concatenate(xs)\n data['dx'] = np.concatenate(dxs)\n data['energy'] = np.concatenate(energies)\n data['ks'] = np.concatenate(ks)\n data['mass'] = np.concatenate(ms)\n\n f = open(name + \".pkl\", \"wb\")\n pickle.dump(data, f)\n f.close()\n\n return data\n\n return get_dataset(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate)", "def read_txt_particles(particles_file, refpart, real_particles, bucket_length, comm, madx_format, verbose):\r\n \r\n four_momentum = refpart.get_four_momentum()\r\n pmass = four_momentum.get_mass()\r\n E_0 = four_momentum.get_total_energy()\r\n p0c = four_momentum.get_momentum()\r\n\r\n myrank = comm.get_rank()\r\n mpisize = comm.get_size()\r\n \r\n if myrank==0 and verbose:\r\n if madx_format:\r\n print \"Loading madX particles from txt file: \", particles_file\r\n else:\r\n print \"Loading Synergia particles from txt file: \", particles_file\r\n\r\n if myrank == 0:\r\n particles = np.loadtxt(particles_file)\r\n num_total_particles = particles.shape[0]\r\n # broadcast num particles to all nodes\r\n MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n else:\r\n num_total_particles = None\r\n num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)\r\n\r\n if myrank == 0:\r\n # make sure the data has the correct shape, either [n,6] without\r\n # particles IDs or [n,7] with particle IDs.\r\n if (particles.shape[1] != 6) and (particles.shape[1] != 7):\r\n raise RuntimeError, \"input data shape %shas incorrect number of particle coordinates\"%repr(particles.shape)\r\n \r\n \r\n if madx_format:\r\n # numpy manipulations to convert kinematics\r\n # convert MAD-X T=-c*dt to Synergia c*ct\r\n particles[:,4] = -particles[:,4]\r\n # convert MAD-X Delta-E/pc to Synergia delta-p/p\r\n # sqrt(((dE/p0c)+(E0/p0c))**2 - (m/p0c)**2) - (p0c/p0c)\r\n m_over_pc = pmass/p0c\r\n E_0_over_pc = E_0/p0c\r\n particles[:,5] = np.sqrt( (particles[:,5] + E_0_over_pc) *\r\n (particles[:,5] + E_0_over_pc) - m_over_pc**2 ) - 1.0\r\n \r\n\r\n # if there are no IDs, append particle ID column\r\n if particles.shape[1] != 7:\r\n particles_w_id = np.column_stack((particles,\r\n np.arange(num_total_particles, dtype='d')))\r\n else:\r\n particles_w_id = particles\r\n \r\n if myrank == 0:\r\n print \"Read \", num_total_particles, \" particles\"\r\n \r\n #Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016\r\n #Using old constructor throws an ArgumentError of a non-standard type.\r\n # Using a try and except to handle both instances.\r\n try:\r\n # try the original constructor\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm,\r\n bucket_length)\r\n except Exception, e:\r\n #look to see if it's an ArgumentError by evaluating the traceback\r\n if (not str(e).startswith(\"Python argument types in\")):\r\n raise\r\n else:\r\n # use the new constructor\r\n if verbose:\r\n print \"Using updated bunch constructor\"\r\n bunch = synergia.bunch.Bunch(\r\n refpart,\r\n num_total_particles, real_particles, comm)\r\n # now set the new parameter 'z_period_length'\r\n if bucket_length is not None:\r\n bunch.set_z_period_length(bucket_length)\r\n else:\r\n bucket_length = 1. #fix this quantity\r\n\r\n local_num = bunch.get_local_num()\r\n local_particles = bunch.get_local_particles()\r\n\r\n # Each processor will have a possibly different number of local particles.\r\n # rank 0 has to find out how many each of them has and distribute them\r\n n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)\r\n if myrank == 0:\r\n # copy in my particles\r\n this_rank_start = 0\r\n local_particles[:,:] = particles_w_id[0:local_num, :]\r\n this_rank_start += local_num\r\n # send particles out to other ranks\r\n for r in range(1, mpisize):\r\n this_rank_end = this_rank_start+n_particles_by_proc[r]\r\n MPI.COMM_WORLD.send(obj=particles_w_id[this_rank_start:this_rank_end, :],\r\n dest=r)\r\n this_rank_start += n_particles_by_proc[r]\r\n else:\r\n # I'm not rank 0. Receive my particles\r\n lp = MPI.COMM_WORLD.recv(source=0)\r\n local_particles[:,:] = lp[:,:]\r\n return bunch", "def com_msd_at_given_time(t, x_array, time_array, com_const=0.0):\n plumeLocation = plume_location_at_given_time(t, x_array, time_array) + com_const\n com = np.mean(plumeLocation)\n msd = np.mean(np.power(plumeLocation-com,2))\n return com, msd", "def compute_DVARS(GMtcs):\n\n GMdiff = np.diff(GMtcs,axis=1)\n DVARS = np.sqrt(np.mean(GMdiff**2,axis=0)) #rms of GMdiff\n DVARS = np.hstack((0,DVARS)) #0 pad start\n\n return DVARS", "def compute_mean_pose(particles, confident_dist=1):\n m_x, m_y, m_count = 0, 0, 0\n # for rotation average\n m_hx, m_hy = 0, 0\n for p in particles:\n m_count += 1\n m_x += p.x \n m_y += p.y \n m_hx += math.sin(math.radians(p.h))\n m_hy += math.cos(math.radians(p.h))\n\n if m_count == 0:\n return -1, -1, 0, False\n\n m_x /= m_count\n m_y /= m_count\n\n # average rotation\n m_hx /= m_count\n m_hy /= m_count\n m_h = math.degrees(math.atan2(m_hx, m_hy));\n\n # Now compute how good that mean is -- check how many particles\n # actually are in the immediate vicinity\n m_count = 0\n for p in particles:\n if grid_distance(p.x, p.y, m_x, m_y) < 1:\n m_count += 1\n\n return m_x, m_y, m_h, m_count > len(particles) * 0.95", "def compute_mean_square_speed(self):\n speeds = self.compute_speeds() # speed of all particles\n return np.mean(speeds**2) # mean square speed", "def compute_physics(tx, index_A, index_B, index_C,\n mean=[], std=[]):\n tx_new = tx[:,index_A] * tx[:,index_B] / tx[:,index_C]\n return standardize(tx_new,mean,std)", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xrange(1,self.n):\r\n abs_velocity = np.sqrt(velocity[:,0]**2+velocity[:,1]**2\r\n + velocity[:,2]**2)\r\n KE = 0.5*self.m*abs_velocity**2\r\n total_KE = np.sum(KE)\r\n invid_KE = total_KE/self.Npart\r\n\r\n return total_KE, invid_KE" ]
[ "0.7488713", "0.5663224", "0.5223949", "0.51519114", "0.5096424", "0.5066858", "0.5066858", "0.5045784", "0.5026792", "0.50021636", "0.50003314", "0.49920407", "0.49362248", "0.4934721", "0.4924958", "0.49151853", "0.4903255", "0.48586112", "0.48389977", "0.4803725", "0.47902855", "0.47805065", "0.47697356", "0.47481725", "0.47352058", "0.47087005", "0.4700997", "0.46710002", "0.4661295", "0.46359828" ]
0.7475914
1
Calculate the minimum distance between the atoms defined in sel1 and the atoms defined in sel2, as a function of time. Returns a matrix that contains the minimum distance for each atom defined in sel1. As usual user should supply equilibration time, sampling time, and contact threshold value.
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) : # define atom selections sel1 = sim.u.select_atoms (sel1_text) sel2 = sim.u.select_atoms (sel2_text) # get number of atoms in selection 1 natoms = sel1.n_atoms nslice = traj_nslice (sim.u,teq,tsample) dmin = np.zeros((natoms,nslice)) for i,ts in enumerate(sim.u.trajectory[teq::tsample]) : d = distance_array (sel1.positions,sel2.positions, box=ts.dimensions) dmin[:,i] = d.min(axis=1) return dmin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minimum_subset_distance(D, limits1, limits2):\n score = numpy.ones( (limits1[1]) )\n for i in xrange(limits1[1]):\n for j in xrange(limits2[1]-limits2[0]):\n score[i] = min(score[i], D[i,j+limits2[0]-1])\n #print i, j, D[i,j+limits2[0]-1], score[i], min(score[i], D[i,j+limits2[0]-1])\n return score", "def minimalDistance(a1, a2, b1, b2):\n adir = a2 - a1\n bdir = b2 - b1\n amid = a1 + 0.5 * adir\n s = b1 - amid\n A = np.dot(bdir, bdir)\n B_2 = np.dot(bdir, s)\n lambda_beta = - B_2 / A\n bOpt = lambda_beta * bdir + b1\n s = a1 - bOpt\n A = np.dot(adir, adir)\n B_2 = np.dot(adir, s)\n lambda_alpha = - B_2 / A\n aOpt = lambda_alpha * adir + a1\n Delta = bOpt - aOpt\n return np.sqrt(np.dot(Delta, Delta))", "def match_min(coords1,coords2,tail1=(),tail2=()):\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n\n dist_min=zeros(np1)*1.\n\n for j in range(np1):\n #dist=sqrt(add.reduce((a1[:,j,NewAxis]-a2[:,:])**2))\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n dist_min[j]=dist[i_min]\n match[j]=i_min\n\n salida=list(a1)\n for i in range(nt1):salida.append(tail1[i])\n \n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n\n salida.append(dist_min)\n return tuple(salida)", "def set_min_dist(S1, S2):\n ret =[]\n if len(S2)>len(S1):\n tmp = S1\n S1=S2\n S2=tmp\n \n for x in S1:\n min_x=((x[0]-S2[0][0])**2+(x[1]-S2[0][1])**2)**0.5\n for y in S2:\n d = ((x[0]-y[0])**2+(x[1]-y[1])**2)**0.5\n if d<min_x:\n min_x = d\n ret.append(min_x)\n\n return ret", "def min_distance(s1, s2):\n n = len(s1)\n m = len(s2)\n matrix = [([0]*(m+1)) for i in xrange(n+1)]\n for i in xrange(m+1):\n matrix[0][i] = i\n for i in xrange(n+1):\n matrix[i][0] = i\n for i in xrange(1,n+1):\n for j in xrange(1,m+1):\n temp = min(matrix[i-1][j]+1, matrix[i][j-1]+1)\n d = 0 if s1[i-1]==s2[j-1] else 1\n matrix[i][j] = min(temp, matrix[i-1][j-1]+d)\n return matrix[n][m]", "def distances(a, b):\n # 1. Set up a list of lists\n matrix = [[None for i in range(len(b)+1)] for j in range(len(a)+1)]\n\n # 2. Add value for base cases (1st row/column)\n ## First position is always None\n matrix[0][0] = (0, None)\n\n ## 1st row and column\n for i in range(1, len(b) + 1):\n matrix[0][i] = (i, Operation.INSERTED)\n\n\n for j in range(1, len(a) + 1):\n matrix[j][0] = (j, Operation.DELETED)\n\n\n # 3. Add other values - find min of all options\n for i in range(1, len(a)+1):\n for j in range(1, len(b)+1):\n\n if a[i-1] == b[j-1]:\n cost = matrix[i-1][j-1][0]\n operation = Operation.SUBSTITUTED\n matrix[i][j] = (cost, operation)\n\n else:\n # Calculate substitutin, deletion and insertion\n substitution = matrix[i - 1][j - 1][0] + 1\n deletion = matrix[i-1][j][0] + 1\n insertion = matrix[i][j-1][0] + 1\n\n # Compare\n compare = [deletion, insertion, substitution]\n cost = min(compare)\n op = compare.index(min(compare))\n if op == 0:\n operation = Operation.DELETED\n if op == 1:\n operation = Operation.INSERTED\n if op == 2:\n operation = Operation.SUBSTITUTED\n\n matrix[i][j] = (cost, operation)\n return matrix", "def minimum_distance(object_1, object_2):\n\n # package import\n import numpy as np\n\n # main algorithm\n minimum_distance = 100000\n\n for coord_1 in object_1:\n for coord_2 in object_2:\n distance_btwn_coords = np.linalg.norm(coord_1 - coord_2)\n if distance_btwn_coords == 0:\n minimum_distance = distance_btwn_coords\n return float(minimum_distance)\n elif distance_btwn_coords < minimum_distance:\n minimum_distance = distance_btwn_coords\n\n return float(minimum_distance)", "def nearest_difference(evs1, evs2):\n\n sigma = calc_sigma(evs1)\n nearestDiff = zeros((vecLen-1), dtype='d')\n for j in range(vecLen-1):\n minimum = infty\n for i in range(vecLen2):\n diff = absolute(evs1[j] - evs2[i]) / sigma[j]\n if diff < minimum:\n minimum = diff\n del i\n nearestDiff[j] = minimum\n del j\n\n return nearestDiff", "def match_min2(coords1,coords2,tail1=(),tail2=()):\n nc=len(coords1)\n np1=len(coords1[0])\n np2=len(coords2[0])\n a1=array(coords1)\n a2=array(coords2)\n nt1=len(tail1)\n for i in range(nt1): \n if len(tail1[i])!= np1: raise 'Not the same lenght as coordinates 1'\n nt2=len(tail2)\n for i in range(nt2): \n if len(tail2[i])!= np2: raise 'Not the same lenght as coordinates 2'\n match=zeros(np1, int)-1\n dist_min=zeros(np1)*1.\n x2=zeros(np1)*1.\n y2=zeros(np1)*1.\n for j in range(np1):\n #dist=add.reduce((a1[:,j,NewAxis]-a2[:,:])**2)\n a1j = a1[:,j]\n dist=add.reduce((reshape(a1j, (len(a1j), 1)) - a2)**2)\n i_min=argmin(dist)\n dist_min[j]=dist[i_min]\n x2[j],y2[j]=a2[0,i_min],a2[1,i_min]\n match[j]=i_min\n \n salida=list(a1)\n salida.append(x2)\n salida.append(y2)\n\n for i in range(nt1):salida.append(tail1[i])\n \n for i in range(nt2):\n if type(tail2[i][0])==type('si'):\n t=[]\n for j in match: t.append(tail2[i][j])\n else:\n t=take(tail2[i],match)\n salida.append(t)\n\n salida.append(dist_min)\n return tuple(salida)", "def _calc_min_distance(self, walker):\n\n cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n\n t2 = time.time()\n # make a traj out of it so we can calculate distances through\n # the periodic boundary conditions\n walker_traj = mdj.Trajectory(walker.state['positions'],\n topology=self._mdj_top,\n unitcell_lengths=cell_lengths,\n unitcell_angles=cell_angles)\n\n t3 = time.time()\n # calculate the distances through periodic boundary conditions\n # and get hte minimum distance\n min_distance = np.min(mdj.compute_distances(walker_traj,\n it.product(self.ligand_idxs,\n self.receptor_idxs),\n periodic=self._periodic)\n )\n t4 = time.time()\n logging.info(\"Make a traj: {0}; Calc dists: {1}\".format(t3-t2,t4-t3))\n\n return min_distance", "def min_dst(tet1, tet2, allow_zero=True):\n dists = ssd.cdist(tet1, tet2)\n if not allow_zero:\n dists[dists == 0] = np.inf\n return dists.min(axis=1)\n\n #dists = np.empty(tet1.shape[0])\n #for i, t1 in enumerate(tet1):\n # min_dist = np.sum((tet2 - t1) ** 2, axis=1)\n # if not allow_zero:\n # dists[i] = np.min(min_dist[min_dist != 0])\n # else:\n # dists[i] = np.min(min_dist)\n #return np.sqrt(dists)", "def closest_distance(self, time, other_object, other_time):\n ti = np.where(self.times == time)[0][0]\n oti = np.where(other_object.times == other_time)[0][0]\n xs = self.x[ti].ravel()[self.masks[ti].ravel() == 1]\n xs = xs.reshape(xs.size, 1)\n ys = self.y[ti].ravel()[self.masks[ti].ravel() == 1]\n ys = ys.reshape(ys.size, 1)\n o_xs = other_object.x[oti].ravel()[other_object.masks[oti].ravel() == 1]\n o_xs = o_xs.reshape(1, o_xs.size)\n o_ys = other_object.y[oti].ravel()[other_object.masks[oti].ravel() == 1]\n o_ys = o_ys.reshape(1, o_ys.size)\n distances = (xs - o_xs) ** 2 + (ys - o_ys) ** 2\n return np.sqrt(distances.min())", "def distance(st_one, st_two, start, end, nsamples):\n t = np.linspace(start+(end-start)/nsamples, end, nsamples)\n st_one = np.insert(st_one, 0, start)\n st_one = np.append(st_one, end)\n st_two = np.insert(st_two, 0, start)\n st_two = np.append(st_two, end)\n\n # We compute the corner spikes for all the time instants we consider\n # corner_spikes is a 4 column matrix [t, tp1, tf1, tp2, tf2]\n corner_spikes = np.zeros((nsamples,5))\n\n ibegin_one = 0\n ibegin_two = 0\n corner_spikes[:,0] = t\n for itc, tc in enumerate(t):\n corner_spikes[itc,1:3], ibegin_t1 = _find_corner_spikes(tc, st_one,\n ibegin_one,\n start, end)\n corner_spikes[itc,3:5], ibegin_t2 = _find_corner_spikes(tc, st_two,\n ibegin_two,\n start, end)\n\n #print corner_spikes\n xisi = np.zeros((nsamples,2))\n xisi[:,0] = corner_spikes[:,2] - corner_spikes[:,1]\n xisi[:,1] = corner_spikes[:,4] - corner_spikes[:,3]\n norm_xisi = np.sum(xisi,axis=1)**2.0\n\n # We now compute the smallest distance between the spikes in st_two\n # and the corner spikes of st_one\n # with np.tile(st_two,(N,1)) we build a matrix :\n # np.tile(st_two,(N,1)) = [st_two st_two st_two]' -\n # np.tile(reshape(corner_spikes,(N,1)), st_two.size) =\n # [corner corner corner]'\n\n dp1 = np.min(np.fabs(np.tile(st_two,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,1],(nsamples,1)),\n st_two.size)),\n axis=1)\n df1 = np.min(np.fabs(np.tile(st_two,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,2],(nsamples,1)),\n st_two.size)),\n axis=1)\n # And the smallest distance between the spikes in st_one and the corner spikes of st_two\n dp2 = np.min(np.fabs(np.tile(st_one,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,3],\n (nsamples,1)),st_one.size)),\n axis=1)\n df2 = np.min(np.fabs(np.tile(st_one,(nsamples,1))\n - np.tile(np.reshape(corner_spikes[:,4],(nsamples,1)),\n st_one.size)),\n axis=1)\n\n xp1 = t - corner_spikes[:,1]\n xf1 = corner_spikes[:,2] - t\n xp2 = t - corner_spikes[:,3]\n xf2 = corner_spikes[:,4] - t\n\n S1 = (dp1 * xf1 + df1 * xp1)/xisi[:,0]\n S2 = (dp2 * xf2 + df2 * xp2)/xisi[:,1]\n\n inst_dist = (S1 * xisi[:,1] + S2 * xisi[:,0]) / (norm_xisi/2.0)\n\n return t, inst_dist", "def shortest_distance(puzzle_input: List[str], satellite_name_a: str, satellite_name_b: str) -> Tuple[int, str]:\n orbit_tree = make_tree(puzzle_input)\n\n distances_satellite_a = distance_to_objects(orbit_tree, satellite_name_a)\n\n distances_satellite_b = distance_to_objects(orbit_tree, satellite_name_b)\n\n # & gives the intersection between the sets of keys, leaving only the objects they both orbit directly/indirectly\n objects_in_common = set(distances_satellite_a.keys()) & set(distances_satellite_b.keys())\n distances = [\n # Sum of distance from satellite a, b to each object, object name\n (distances_satellite_a[obj] + distances_satellite_b[obj], obj)\n for obj in objects_in_common\n ]\n\n min_distance, satellite_name = min(distances)\n return min_distance, satellite_name", "def get_distance_of_closest_intersections(commands1, commands2):\n path1 = get_one_path(commands1)\n path2 = get_one_path(commands2)\n intersections = set(path1).intersection(set(path2))\n return min(map(lambda x: np.abs(x[0])+np.abs(x[1]), intersections))", "def smallestValue(self, nd1, nd2):\r\n minnd1 = min(nd1.values())\r\n minnd2 = min(nd2.values())\r\n totalmin = min(minnd1,minnd2)\r\n return totalmin", "def min_horizontal_dist_meters(coords, targets, is_geo=False):\n xe = coords[:, 0]\n ye = coords[:, 1]\n n = len(xe)\n d = np.zeros(n)\n for j in range(n):\n d1 = dist_in_meters(targets, [xe[j], ye[j]], is_geo=is_geo)\n d[j] = d1.min()\n return d", "def distance(self, t1, t2, costs=unit_costs):\r\n #print costs\r\n #raw_input(\"pause\")\r\n # Cf. Zhang & Shasha:p.1252-1253\r\n #===========================================================================\r\n # Use an embedded function, so T1,T2, l1,l2, and TD are available from the\r\n # name space of the outer function and don't need to be dragged around in\r\n # each function call\r\n # TREEDIST function\r\n #===========================================================================\r\n def edit_dist(i, j):\r\n \"\"\"\r\n compute edit distance between two subtrees rooted in nodes i and j\r\n respectively\r\n \"\"\"\r\n # temporary array for forest distances\r\n FD = ForestDist()\r\n for n in range(l1[i], i+1):\r\n FD[ (l1[i],n), None ] = ( FD[ (l1[i],n-1), None ] + \r\n costs(T1[n], None) ) #NOT SURE ABOUT THE T1[n].label --> TO BE CHECKED\r\n \r\n for m in range(l2[j], j+1):\r\n FD[ None, (l2[j],m) ] = ( FD[ None, (l2[j],m-1) ] + \r\n costs(None, T2[m]) )\r\n \r\n for n in range(l1[i], i+1):\r\n for m in range(l2[j], j+1):\r\n if l1[n] == l1[i] and l2[m] == l2[j]:\r\n FD[ (l1[i],n), (l2[j],m) ] = min(\r\n FD[(l1[i],n-1),(l2[j],m)] + costs(T1[n], None),\r\n FD[(l1[i],n),(l2[j],m-1)] + costs(None, T2[m]),\r\n FD[(l1[i],n-1),(l2[j],m-1)] + costs(T1[n], T2[m]))\r\n \r\n TD[n, m] = FD[ (l1[i],n), (l2[j],m) ]\r\n else:\r\n FD[ (l1[i],n), (l2[j],m) ] = min(\r\n FD[(l1[i],n-1),(l2[j],m)] + costs(T1[n], None),\r\n FD[(l1[i],n),(l2[j],m-1)] + costs(None, T2[m]),\r\n FD[(l1[i],n-1),(l2[j],m-1)] + TD[n,m])\r\n return TD[i,j]\r\n \r\n \r\n #Compute T1[] and T2[]\r\n T1 = self.postorder(t1)\r\n T2 = self.postorder(t2)\r\n \r\n # Compute l()\r\n l1 = self.leftmost_leaf_descendant_indices(T1)\r\n l2 = self.leftmost_leaf_descendant_indices(T2)\r\n \r\n # LR_keyroots1 and LR_keyroots2\r\n kr1 = self.key_root_indices(l1)\r\n kr2 = self.key_root_indices(l2)\r\n \r\n # permanent treedist array\r\n TD = dict()\r\n for i in kr1:\r\n for j in kr2:\r\n edit_dist(i, j)\r\n \r\n #self.print_matrix(T1, T2, TD)\r\n \r\n return TD[i,j]", "def min_distance(distance, spt_set, self_nodes):\n minimum = sys.maxsize\n minimum_node = None\n for curr_node in self_nodes.values():\n if distance[curr_node.id] < minimum and not spt_set[curr_node.id]:\n minimum = distance[curr_node.id]\n minimum_node = curr_node\n return minimum_node", "def measureMotorSpecsOne(self,distance):\n #self.dataQueue=queue.Queue()\n #controlQueue=queue.Queue()\n returnQueue1=queue.Queue()\n #returnQueue2=queue.Queue()\n \n t1=threading.Thread(target=self.updaterTest,args=(distance,returnQueue1,))\n #t2=threading.Thread(target=self.xMotorTest,args=(distance,returnQueue1,))\n #t3=threading.Thread(target=self.yMotorTest,args=(distance,returnQueue2,))\n t1.start()\n t1.join()\n #t2.start()\n #t3.start()\n while returnQueue1.empty():# and returnQueue2.empty():\n pass\n\n\n speed1=distance/returnQueue1.get()\n #speed2=distance/returnQueue2.get()\n return speed1#,speed2", "def runmaxmin(self):\n import random\n random.seed(self.seed)\n mindist_ptolandmarkset = np.full(self.pointcloud.size, np.inf)\n self.subsetindices = []\n for i in xrange(self.subsetsize):\n if i == 0:\n selected_index = random.randint(0, self.pointcloud.size - 1)\n # update min for all the rest indices\n # update min for this index to 0.\n for z in xrange(self.pointcloud.size):\n # if z == selected_index:\n # mindist_ptolandmarkset[z] = 0.0\n # else:\n mindist_ptolandmarkset[z] = self.pointcloud.distmat[selected_index][z]\n else:\n selected_index = np.argmax(mindist_ptolandmarkset)\n # update minimum distance for all points\n for z in xrange(self.pointcloud.size):\n mindist_ptolandmarkset[z] = min(mindist_ptolandmarkset[z],\n self.pointcloud.distmat[selected_index][z])\n\n self.subsetindices.append(selected_index)\n\n self.subsetpointcloud = pc.PointCloud(self.pointcloud.points[self.subsetindices])", "def calc_DC_supply(t_0, t_1):\n if t_0 == 0:\n t_0 = 1E6\n if t_1 > 0:\n tmin = min(t_0, t_1)\n else:\n tmin = t_0\n return tmin", "def closest_distance(node_a, node_b):\n min_distance = 999999\n for loc_a in node_a.locations:\n for loc_b in node_b.locations:\n distance = abs(loc_a - loc_b)\n if distance < min_distance:\n min_distance = distance\n return min_distance", "def find_closest_atom(coords1, coords2):\n\n coords1 = np.array(coords1)\n coords2 = np.array(coords2)\n diff = coords2[:, np.newaxis] - coords1[np.newaxis, :]\n dist = np.einsum('ijk->ij', diff**2)**0.5\n index = np.argmin(dist)\n return index", "def _compute_set_distances(nonzeros_1, nonzeros_2):\n distances = np.zeros(len(nonzeros_1))\n for i, _ in enumerate(distances):\n distances[i] = np.min(\n _norm_along_last_axis(nonzeros_1[i].reshape(1, -1) - nonzeros_2)\n )\n return distances", "def minimum_time_to_straight_line(a, b, a1_min, v1_0, s1_0, a2_max, v2_0, v2_max, s2_0):\n A, B, C = (a2_max - a*a1_min)*0.5, v2_0 - a*v1_0, s2_0 - a*s1_0 - b\n t = minimimum_positive_root(A, B, C)\n args = (s1_0, v1_0, a1_min, s2_0, v2_0, a2_max)\n\n if a2_max == 0 and a1_min == 0:\n return [(t, args)]\n elif a1_min == 0:\n time_to_v2_max = (v2_max - v2_0)/a2_max\n time_to_v1_min = 0.\n elif a2_max == 0:\n time_to_v1_min = -v1_0/a1_min\n time_to_v2_max = 0.\n else:\n time_to_v2_max = (v2_max - v2_0)/a2_max\n time_to_v1_min = -v1_0/a1_min\n\n if t < min(time for time in (time_to_v2_max, time_to_v1_min) if time > 0):\n return [(t, args)]\n elif a2_max == 0 or 0 < time_to_v1_min <= time_to_v2_max:\n t = time_to_v1_min\n s1_0, v1_0, a1_min = integrate_abscisse(s1_0, v1_0, a1_min)(t), 0., 0.\n s2_0, v2_0 = integrate_abscisse(s2_0, v2_0, a2_max)(t), integrate_speed(v2_0, a2_max)(t)\n if v2_0 == v2_max: a2_max = 0.\n elif a1_min == 0 or 0 < time_to_v2_max < time_to_v1_min:\n t = time_to_v2_max\n s1_0, v1_0 = integrate_abscisse(s1_0, v1_0, a1_min)(t), integrate_speed(v1_0, a1_min)(t)\n s2_0, v2_0, a2_max = integrate_abscisse(s2_0, v2_0, a2_max)(t), v2_max, 0.\n else:\n raise ValueError('Uncaught case')\n return ([(t, args)] +\n minimum_time_to_straight_line(a, b, a1_min, v1_0, s1_0, a2_max, v2_0, v2_max, s2_0))", "def _minimum_distance(self,arg):\n return min([abs(arg-e) for e in self if not e is arg])", "def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]", "def shutter_min_times(self):\n otime, ctime = ct.c_int(), ct.c_int()\n self.lib.GetShutterMinTimes(ct.pointer(ctime), ct.pointer(otime))\n return (otime.value, ctime.value)", "def _distance2_line_endpoints(line1, line2):\n (A,B),(C,D) = line1, line2\n R2=lambda u,v: (u[0]-v[0])**2+(u[1]-v[1])**2\n pairs = zip((A,A,B,B),(C,D,C,D))\n r2 = [R2(pair[0],pair[1]) for pair in pairs]\n mini=sorted(zip(r2,pairs),key=lambda a,b: a)[0]\n #R2_min = min((R2(A,C), R2(A,D), R2(B,C), R2(B,D)))\n return mini[0], mini[1][0], mini[1][1]" ]
[ "0.5895745", "0.5762979", "0.57414365", "0.5732029", "0.54276377", "0.5345767", "0.5334685", "0.5314768", "0.5237118", "0.51300615", "0.51152414", "0.50853014", "0.5079385", "0.50745434", "0.5033808", "0.50107294", "0.5001764", "0.49995542", "0.4967053", "0.49510542", "0.48911503", "0.48834786", "0.48830175", "0.4872934", "0.4867264", "0.4863926", "0.48514175", "0.48476732", "0.48244867", "0.4816136" ]
0.76610744
0
Get the image index of all particles in simulation, at the frame 'frame_id'
def particle_images (sim,frame_id) : # get positions of all particles: define first the atom selection, then jump to # the user-requested trajectory frame, get the box dimensions (currently works # only for orthorhombic boxes, then calculate the image indices atoms = sim.u.select_atoms ('all') ts = sim.u.trajectory[frame_id] L = ts.dimensions[:3] pos = atoms.positions + L/2. return pos//L
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_id_at(self, i):\n return i", "def _get_frame_index(self, frame):\n if isinstance(frame, cf.CoordinateFrame):\n frame = frame.name\n #frame_names = [getattr(item[0], \"name\", item[0]) for item in self._pipeline]\n frame_names = [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline]\n return frame_names.index(frame)", "def _get_image_index_position(self) :\n \n return self._image_index_position", "def get_img_indices():\n if K.image_dim_ordering() == 'th':\n return 0, 1, 2, 3\n else:\n return 0, 3, 1, 2", "def frame_idx(self) -> int:\n pass", "def image(self, state):\n return state['positions']", "def get_frame_index(self, global_idx):\n vid_idx_idx = np.searchsorted(self.num_frames_array, global_idx, side='right')-1\n frame_idx = global_idx - self.num_frames_array[vid_idx_idx]\n vid_idx = self.task_ids[int(vid_idx_idx)]\n return vid_idx, frame_idx", "def _iter_indices(self, frame, y):\n pass", "def _create_img_id_to_idx(self):\n with h5py.File(self.image_features_path, 'r') as features_file:\n coco_ids = features_file['ids'][()]\n coco_id_to_index = {id: i for i, id in enumerate(coco_ids)}\n return coco_id_to_index", "def neighbors(self, pid):\n x, y = self.frametracks[self.frametracks.particle == pid][['x', 'y']].values[0]\n return self.queryPoint(x, y)", "def _state_index(state):\n delta_y, delta_x, bird_lmh, pipe_lmh, is_flapping = state\n actions, height, width, _, _, _ = Q.shape\n\n y = int((height / 2) + (delta_y / step_r) - 1)\n x = int((width / 2) + (delta_x / step_c) - 1)\n\n return y, x, bird_lmh, pipe_lmh, is_flapping", "def frame_index(self):\n return self._findex", "def getIDsInFrame(self, frame, filtered = True):\n\n if (not filtered) or (not self._filter_config):\n # get all ids in frame\n output = self.pos[frame][4]\n else:\n output = []\n for idx, id_seq in enumerate(self.id_seq):\n # check if the track passed the filter\n if idx in self.index_filter:\n # now check if there is a track for this time\n id_ = self.Track2ID(idx, frame)\n if id_:\n output.append(id_)\n\n return output", "def get_instance_index(self):\n return np.unique([tp[0] for tp in self._innercontainer])", "def get_frameidx(self, fps):\n return int(self.hours * MIN_PER_H * S_PER_MIN * fps \\\n + self.minutes * S_PER_MIN * fps \\\n + self.seconds * fps \\\n + self.milliseconds // (100 / fps))", "def _load_image_set_index(self):\n image_index = self._load_annotations().keys()\n return image_index", "def data_indices(img, pattern, channel):\n h, w = img.shape\n for i in range(2):\n for j in range(2):\n if pattern[i][j] == channel:\n index_y = np.arange(i, h, 2)\n index_x = np.arange(j, w, 2)\n data = img[i::2, j::2]\n \n return index_x, index_y, data", "def get_index(self, x, y):\n i = (y - self.y0) // self.dy\n j = (x - self.x0) // self.dx\n i = min(max(i, 0), self.n-1)\n j = min(max(j, 0), self.m-1)\n return [i, j]", "def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]", "def xy(self, photons):\n flatbeam = self.beamImage.flatten()\n beamsorted = np.argsort(flatbeam)\n ind = np.searchsorted(flatbeam[beamsorted], photons[\"resID\"])\n return np.unravel_index(beamsorted[ind], self.beamImage.shape)", "def get_track_mask_idxes(self):\n instance_id_num_pts = defaultdict(lambda: 0)\n instance_id_lifetimes = defaultdict(lambda: [10000, -1])\n\n for frame_num, labels_per_frame in enumerate(self._frame_labels):\n for id in labels_per_frame.unique().tolist():\n instance_id_num_pts[id] += (labels_per_frame == id).long().sum().item()\n instance_id_lifetimes[id][0] = min(frame_num, instance_id_lifetimes[id][0])\n instance_id_lifetimes[id][1] = max(frame_num, instance_id_lifetimes[id][1])\n\n instance_id_lifetimes = {k: v[1] - v[0] for k, v in instance_id_lifetimes.items()}\n return self._frame_labels, instance_id_num_pts, instance_id_lifetimes", "def index(self):\n return self.frame.index", "def GetTileIndex(self, pos):\r\n #pixel = rpg_image.GetPixel(self.image, pos)\r\n try:\r\n pixel = self.image_buffer[pos[0]][pos[1]]\r\n except IndexError, e:\r\n pixel = -1\r\n \r\n return pixel", "def getSvIDsInFrame(self, frame, filtered = True):\n ids = self.getIDsInFrame(frame, filtered)\n if (not filtered) or (not self._filter_config):\n svIDs = self.pos[frame][3]\n else:\n svIDs = [[],]*len(ids)\n for i, id_num in enumerate(ids):\n idx = self.pos[frame][4].index(id_num)\n svIDs[i] = self.pos[frame][3][idx]\n\n return ids, svIDs", "def __get_image_id(self):\n return self.__get_multi_images_ids(1)", "def __get_img_augm_idx__(self, idx: int):\n\n images_done = idx * self.batch_size\n return divmod(images_done, self.gen_count)", "def frame(self, frame):\n if self.vertical:\n cell = ((frame-1)//self.rows)+1\n row = frame-(cell-1)*self.rows\n else:\n row = ((frame-1)//self.cells)+1\n cell = frame-(row-1)*self.cells\n\n return cell, row", "def get_raster_ids(self):\n return numpy.array(range(self._lo_atom, self._lo_atom + self._n_atoms))", "def get_days_index(self):\n return np.where(self.np_image_matrix()[3] == 3)[0]", "def get_index_list(self, relative_to, of_particles=None):\n\n # Implementation for base snapshot\n\n if self is not relative_to:\n raise RuntimeError(\"Not a descendant of the specified simulation\")\n if of_particles is None:\n of_particles = np.arange(len(self))\n\n return of_particles" ]
[ "0.64790046", "0.6461773", "0.63308996", "0.60192746", "0.5952561", "0.5895804", "0.58919054", "0.5886345", "0.5864442", "0.5850456", "0.58418465", "0.5816864", "0.57517457", "0.57352793", "0.5733944", "0.5714067", "0.56858724", "0.5682771", "0.5660249", "0.5656632", "0.5606235", "0.55904573", "0.5580041", "0.5573954", "0.5555938", "0.5543103", "0.55393314", "0.55235577", "0.5468876", "0.54680574" ]
0.82224417
0
For the simulation 'sim', calculate the matrix of binding events of the polymer and the tracers. Returns a contacts matrix of the shape (ntracers,nslice,npolymer).
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) : u = sim.u polymer = u.select_atoms (polymer_text) tracers = u.select_atoms (tracer_text) ntracers = tracers.n_atoms npolymer = polymer.n_atoms nslice = mbt.traj_nslice(u,teq,tsample) C = np.zeros((ntracers,nslice,npolymer),dtype=bool) for i,ts in enumerate(u.trajectory [teq::tsample]) : d = distance_array (tracers.positions,polymer.positions, box=ts.dimensions) c = d<threshold C[:,i,:] = c return C
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :\n # select polymer, tracers, and binding sites\n polymer = sim.u.select_atoms (polymer_text)\n tracers = sim.u.select_atoms (tracers_text)\n bss = sim.u.select_atoms (bindingsites_text)\n # select binding site indices\n bs_n = bss.n_atoms\n bs_idx = bss.indices\n # select non-binding site indices\n polymer_idx = polymer.indices\n nbs_idx = np.setdiff1d (polymer_idx,bs_idx)\n nbs_n = nbs_idx.size\n # evaluate contacts with binding sites and non-binding sites for each\n # independent simulation snapshot\n c = []\n for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :\n d = distance_array (polymer.positions,tracers.positions,\n box=ts.dimensions)\n contacts = d<threshold\n cB = np.sum (contacts[bs_idx]).astype('float')\n cA = np.sum (contacts[nbs_idx]).astype('float')\n if cA != 0 :\n c.append ((cB/cA) / (float(bs_n)/nbs_n))\n return np.mean(np.array(c))", "def getContactMatrix(self):\n x_mesh = np.array(np.meshgrid(self._xe, self._xe))\n y_mesh = np.array(np.meshgrid(self._ye, self._ye))\n # calculate distances between all plants\n distances = ((x_mesh[0] - x_mesh[1])**2 +\n (y_mesh[0] - y_mesh[1])**2)**.5\n\n roots = np.array(np.meshgrid(self._r_root, self._r_root))\n root_sums = roots[0] + roots[1]\n\n # probability for root contact\n p_meeting = 1 - distances / root_sums\n # probability is 0 for plant = plant (diagonal)\n np.fill_diagonal(p_meeting, 0)\n\n # generate matrix with random floats\n probs = np.random.random((len(self._xe), len(self._xe)))\n # reshape to a triangular matrix\n probs = np.triu(probs)\n # Mirror upper triangle of the matrix\n probs += probs.transpose()\n\n contact_matrix = np.zeros(np.shape(x_mesh[0]))\n indices = np.where(probs < p_meeting)\n contact_matrix[indices] += 1\n\n return contact_matrix", "def contacts(self, tol=1e-8):\n idx, nbor = self.backbone(tol=tol)\n return self.Contacts(\n np.sum(np.triu(nbor, 1)),\n (np.sum(idx) - 1) * self.ndim + 1,\n np.sum(~idx))", "def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :\n # define polymer and tracers\n u = sim.u\n polymer = u.select_atoms(polymer_text)\n tracers = u.select_atoms(tracer_text)\n n_polymer = polymer.n_atoms\n n_tracers = tracers.n_atoms\n # initialize jumping matrix and first distance matrix d_prev\n J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)\n ts = u.trajectory [teq]\n d_prev = distance_array (polymer.positions,tracers.positions,\n box=ts.dimensions)\n D_prev = d_prev<threshold\n for ts in u.trajectory [teq::tsample] :\n # get distance matrix at current time step\n d_next = distance_array (polymer.positions,tracers.positions,\n box=ts.dimensions)\n D_next = d_next<threshold\n # get jumps of all tracers and add it to the jumping matrix\n for i in xrange (n_tracers) :\n t_prev = D_prev [:,i]\n t_next = D_next [:,i].reshape ((n_polymer,1))\n t = t_prev * t_next\n J += t\n D_prev = D_next.copy()\n return J", "def _flatten_to_arrays_and_conns(cls, network_model):\n component_arrays = {}\n connection_groups = {}\n # Create flattened component with all synapses combined with the post-\n # synaptic cell dynamics using MultiDynamics\n for pop in network_model.populations:\n # Get all the projections that project to/from the given population\n receiving = [p for p in network_model.projections\n if (pop == p.post or\n (p.post.nineml_type == 'Selection' and\n pop in p.post.populations))]\n sending = [p for p in network_model.projections\n if (pop == p.pre or\n (p.pre.nineml_type == 'Selection' and\n pop in p.pre.populations))]\n # Create a dictionary to hold the cell dynamics and any synapse\n # dynamics that can be flattened into the cell dynamics\n # (i.e. linear ones).\n sub_components = {cls.CELL_COMP_NAME: pop.cell}\n # All port connections between post-synaptic cell and linear\n # synapses and port exposures to pre-synaptic cell\n internal_conns = []\n exposures = []\n\n def add_exposures(exposures_to_add):\n \"\"\"\n Adds exposures to a \"set\" of exposures. If 9ML objects were\n hashable could use a 'set'.\n \"\"\"\n for pe in exposures_to_add:\n if pe not in exposures:\n exposures.append(pe)\n\n synapses = []\n connection_property_sets = []\n # FIXME: There has to be a way of avoiding this name clash\n if any(p.name == cls.CELL_COMP_NAME for p in receiving):\n raise Pype9RuntimeError(\n \"Cannot handle projections named '{}' (why would you \"\n \"choose such a silly name?;)\".format(cls.CELL_COMP_NAME))\n for proj in receiving:\n # Flatten response and plasticity into single dynamics class.\n # TODO: this should be no longer necessary when we move to\n # version 2 as response and plasticity elements will be\n # replaced by a synapse element in the standard. It will need\n # be copied at this point though as it is modified\n synapse, proj_conns = cls._flatten_synapse(proj)\n # Get all connections to/from the pre-synaptic cell\n pre_conns = [pc for pc in proj_conns\n if 'pre' in (pc.receiver_role, pc.sender_role)]\n # Get all connections between the synapse and the post-synaptic\n # cell\n post_conns = [pc for pc in proj_conns if pc not in pre_conns]\n # Mapping of port connection role to sub-component name\n role2name = {'post': cls.CELL_COMP_NAME}\n # If the synapse is non-linear it can be combined into the\n # dynamics of the post-synaptic cell.\n try:\n if not synapse.component_class.is_linear():\n raise Pype9UnflattenableSynapseException()\n role2name['synapse'] = proj.name\n # Extract \"connection weights\" (any non-singular property\n # value) from the synapse properties\n connection_property_sets.extend(\n cls._extract_connection_property_sets(synapse,\n proj.name))\n # Add the flattened synapse to the multi-dynamics sub\n # components\n sub_components[proj.name] = synapse.clone()\n # Convert port connections between synpase and post-\n # synaptic cell into internal port connections of a multi-\n # dynamics object\n internal_conns.extend(pc.assign_names_from_roles(role2name)\n for pc in post_conns)\n # Expose ports that are needed for the pre-synaptic\n # connections\n except Pype9UnflattenableSynapseException:\n # All synapses (of this type) connected to a single post-\n # synaptic cell cannot be flattened into a single component\n # of a multi- dynamics object so an individual synapses\n # must be created for each connection.\n synapse_conns = [\n pc.append_namespace_from_roles(\n {'post': cls.CELL_COMP_NAME,\n 'pre': cls.CELL_COMP_NAME,\n 'synapse': proj.name}) for pc in post_conns]\n synapses.append(SynapseProperties(proj.name, synapse,\n synapse_conns))\n # Add exposures to the post-synaptic cell for connections\n # from the synapse\n add_exposures(chain(*(\n pc.expose_ports({'post': cls.CELL_COMP_NAME})\n for pc in post_conns)))\n # Add exposures for connections to/from the pre synaptic cell\n add_exposures(\n chain(*(pc.expose_ports(role2name)\n for pc in pre_conns)))\n role2name['pre'] = cls.CELL_COMP_NAME\n # Add exposures for connections to/from the pre-synaptic cell in\n # populations.\n for proj in sending:\n # Not required after transition to version 2 syntax\n synapse, proj_conns = cls._flatten_synapse(proj)\n # Add send and receive exposures to list\n add_exposures(chain(*(\n pc.expose_ports({'pre': cls.CELL_COMP_NAME})\n for pc in proj_conns)))\n # Add all cell ports as multi-component exposures that aren't\n # connected internally in case the user would like to save them or\n # play data into them\n internal_cell_ports = set(chain(\n (pc.send_port_name for pc in internal_conns\n if pc.sender_name == cls.CELL_COMP_NAME),\n (pc.receive_port_name for pc in internal_conns\n if pc.receiver_name == cls.CELL_COMP_NAME)))\n add_exposures(\n BasePortExposure.from_port(p, cls.CELL_COMP_NAME)\n for p in pop.cell.ports if p.name not in internal_cell_ports)\n dynamics_properties = MultiDynamicsProperties(\n name=pop.name + '_cell', sub_components=sub_components,\n port_connections=internal_conns,\n port_exposures=exposures)\n component = MultiDynamicsWithSynapsesProperties(\n dynamics_properties.name,\n dynamics_properties, synapse_propertiess=synapses,\n connection_property_sets=connection_property_sets)\n array_name = pop.name\n component_arrays[array_name] = ComponentArray9ML(\n array_name, pop.size, component)\n selections = {}\n for sel in network_model.selections:\n selections[sel.name] = Selection9ML(\n sel.name, Concatenate9ML(component_arrays[p.name]\n for p in sel.populations))\n arrays_and_selections = dict(\n chain(iter(component_arrays.items()), iter(selections.items())))\n # Create ConnectionGroups from each port connection in Projection\n for proj in network_model.projections:\n _, proj_conns = cls._flatten_synapse(proj)\n # Get all connections to/from the pre-synaptic cell\n pre_conns = [pc for pc in proj_conns\n if 'pre' in (pc.receiver_role, pc.sender_role)]\n # Create a connection group for each port connection of the\n # projection to/from the pre-synaptic cell\n for port_conn in pre_conns:\n ConnectionGroupClass = (\n EventConnectionGroup9ML\n if port_conn.communicates == 'event'\n else AnalogConnectionGroup9ML)\n if len(pre_conns) > 1:\n name = ('__'.join((proj.name,\n port_conn.sender_role,\n port_conn.send_port_name,\n port_conn.receiver_role,\n port_conn.receive_port_name)))\n else:\n name = proj.name\n if port_conn.sender_role == 'pre':\n connectivity = proj.connectivity\n # If a connection from the pre-synaptic cell the delay\n # is included\n # TODO: In version 2 all port-connections will have\n # their own delays\n delay = proj.delay\n else:\n # If a \"reverse connection\" to the pre-synaptic cell\n # the connectivity needs to be inverted\n connectivity = InversePyNNConnectivity(\n proj.connectivity)\n delay = 0.0 * un.s\n # Append sub-component namespaces to the source/receive\n # ports\n ns_port_conn = port_conn.append_namespace_from_roles(\n {'post': cls.CELL_COMP_NAME,\n 'pre': cls.CELL_COMP_NAME,\n 'synapse': proj.name})\n conn_group = ConnectionGroupClass(\n name,\n arrays_and_selections[proj.pre.name],\n arrays_and_selections[proj.post.name],\n source_port=ns_port_conn.send_port_name,\n destination_port=(ns_port_conn.receive_port_name),\n connectivity=connectivity,\n delay=delay)\n connection_groups[conn_group.name] = conn_group\n return component_arrays, connection_groups, selections", "def contact_maps(coors):\n\timport MDAnalysis.analysis.distances as mdad\n\treturn mdad.distance_array(coors, coors)", "def get_contacts(self):\n feet = [\"REAR_RIGHT_FOOT\", \"REAR_LEFT_FOOT\",\n \"FRONT_RIGHT_FOOT\", \"FRONT_LEFT_FOOT\"]\n contacts = np.zeros(4, dtype=np.float32)\n for i, foot in enumerate(feet):\n if self.supervisor.getFromDef(foot).getNumberOfContactPoints() > 0:\n contacts[i] = 1.0\n return contacts", "def connectivity_matrix(self):\n # TODO: make this more memory efficient by ordering i,j in code when needed.\n temp = []\n for i in range(self.n_atom):\n for j in range(i+1, self.n_atom):\n if self.bond(i, j):\n temp.append([i+1, j+1])\n self.connect = np.asarray(temp)", "def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :\n # define DKL(t) vector\n nframes = traj_nslice(sim.u,teq,tsample)\n DKL_t = np.zeros(nframes)\n # define polymer and tracers\n polymer = sim.u.select_atoms(polymer_text)\n tracers = sim.u.select_atoms(tracer_text)\n N = polymer.n_atoms\n ntracers = tracers.n_atoms\n # init H and C vectors\n H = np.zeros((N,N),dtype=np.int32)\n C = np.zeros((N,ntracers),dtype=np.int32)\n # analyze all simulation frames as decided\n for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :\n # calculate Hi-C at this time frame\n d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)\n H += (d<p_threshold)\n Rt = H.sum(axis=1)\n # calculate ChIP-seq at this time frame\n c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)\n C += (c<t_threshold)\n Ct = C.sum(axis=1)\n DKL_t[i] = mbt.KL_divergence(Ct,Rt)\n # coverage analysis\n C[C>1] = 1\n coverage = C.sum(axis=0).astype('float')/N\n return DKL_t,H,Ct.astype(np.int64),coverage", "def create_contact_array(self):\n for i in range(self.dimensions[1]):\n offset = self.via_layer_position + vector(0, self.contact_pitch * i)\n for j in range(self.dimensions[0]):\n self.add_rect(layer=self.via_layer_name,\n offset=offset,\n width=self.contact_width,\n height=self.contact_width)\n offset = offset + vector(self.contact_pitch,0)", "def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :\n # define DKL(t) vector\n nframes = traj_nslice(sim.u,teq,tsample)\n DKL_t = np.zeros(nframes)\n # define polymer and tracers\n polymer = sim.u.select_atoms(polymer_text)\n tracers = sim.u.select_atoms(tracer_text)\n N = polymer.n_atoms\n ntracers = tracers.n_atoms\n # init H and C vectors\n H = np.zeros((N,N),dtype=np.int32)\n C = np.zeros((N,ntracers),dtype=np.int32)\n # analyze all simulation frames as decided\n for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :\n # calculate Hi-C at this time frame\n d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)\n H += (d<p_threshold)\n Rt = H.sum(axis=1)\n # calculate traffic at this time frame\n c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)\n C += (c<t_threshold)\n Ct = C.sum(axis=1)\n DKL_t[i] = KL_divergence(Ct,Rt)\n # coverage analysis\n C[C>1] = 1\n coverage = C.sum(axis=0).astype('float')/N\n return DKL_t,H,Ct.astype(np.int64),coverage", "def receiver_locations(self):\n return np.concatenate([rx.locations for rx in self.source_field.receiver_list])", "def matrix(self):\n # group the observables based on what wires they act on\n U_list = []\n for _, g in itertools.groupby(self.obs, lambda x: x.wires):\n # extract the matrices of each diagonalizing gate\n mats = [i.matrix for i in g]\n\n if len(mats) > 1:\n # multiply all unitaries together before appending\n mats = [multi_dot(mats)]\n\n # append diagonalizing unitary for specific wire to U_list\n U_list.append(mats[0])\n\n # Return the Hermitian matrix representing the observable\n # over the defined wires.\n return functools.reduce(np.kron, U_list)", "def get_observations(self):\n # Robot velocities (linear + angular)\n vel = np.array(self.robot.getVelocity(), dtype=np.float32)\n # Joint angles\n message_received = self.handle_receiver()\n # Update angles when a message is received\n if message_received is not None:\n self.message_received = message_received\n self.message = np.array(self.message_received, dtype=np.float32)\n # Return the concatenated array\n return np.concatenate((vel, self.message, self.get_contacts()),\n axis=None)", "def process_observations(message, agent):\n if not message:\n print(\"Message is empty\");\n # return None\n else:\n # # Check if joint values are in the expected order and size.\n if message.joint_names != agent['joint_order']:\n # Check that the message is of same size as the expected message.\n if len(message.joint_names) != len(agent['joint_order']):\n raise MSG_INVALID_JOINT_NAMES_DIFFER\n\n # Check that all the expected joint values are present in a message.\n if not all(map(lambda x,y: x in y, message.joint_names,\n raise MSG_INVALID_JOINT_NAMES_DIFFER\n print(\"Joints differ\")\n return np.array(message.actual.positions) # + message.actual.velocities\n\ndef get_jacobians(state, scara_chain, jac_solver):\n \"\"\"\n Produce a Jacobian from the urdf that maps from joint angles to x, y, z.\n This makes a 6x6 matrix from 6 joint angles to x, y, z and 3 angles.\n The angles are roll, pitch, and yaw (not Euler angles) and are not needed.\n Returns a repackaged Jacobian that is 3x6.\n \"\"\"\n # Initialize a Jacobian for scara_chain.getNrOfJoints() joint angles by 3 cartesian coords and 3 orientation angles\n jacobian = Jacobian(scara_chain.getNrOfJoints())\n # Initialize a joint array for the present self.scara_chain.getNrOfJoints() joint angles.\n angles = JntArray(scara_chain.getNrOfJoints())\n # Construct the joint array from the most recent joint angles.\n for i in range(scara_chain.getNrOfJoints()):\n angles[i] = state[i]\n # Update the jacobian by solving for the given angles.observation_callback\n jac_solver.JntToJac(angles, jacobian)\n # Initialize a numpy array to store the Jacobian.\n J = np.array([[jacobian[i, j] for j in range(jacobian.columns())] for i in range(jacobian.rows())])\n # Only want the cartesian position, not Roll, Pitch, Yaw (RPY) Angles\n ee_jacobians = J\n return ee_jacobians", "def connection(self, sampleseq, num):\n self.Adjmatrix = np.zeros((self.nodenum, self.nodenum), dtype = int)\n \n for i in range(self.supplynum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries[i], self.trandemandseries], sampleseq[self.supplyseries[i]]))\n self.Adjmatrix[self.supplyseries[i], self.trandemandseries[minindex]] = 1\n# self.Adjmatrix[minindex, self.supplyseries[i]] = 1\n \n for i in range(self.trannum):\n if(np.sum(self.Adjmatrix[self.supplyseries, self.transeries[i]]) == 0):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries, self.transeries[i]], num))\n self.Adjmatrix[minindex, self.transeries[i]] = 1\n# self.Adjmatrix[self.transeries[i], minindex] = 1\n \n \n# for i in range(self.supplynum):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries[i], self.supplyseries], num))\n# self.Adjmatrix[self.supplyseries[i], minindex] = 1\n# self.Adjmatrix[minindex, self.supplyseries[i]] = 1\n \n# for i in range(self.trannum):\n# if(np.sum(self.Adjmatrix[self.supplyseries, self.transeries[i]]) != 0):\n# continue\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries, self.transeries[i]], num))\n# self.Adjmatrix[minindex, self.transeries[i]] = 1\n## self.Adjmatrix[self.transeries[i], minindex] = 1\n# \n for i in range(self.trannum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries[i], self.demandseries], min(sampleseq[self.transeries[i]], self.demandnum))) + self.supplynum + self.trannum\n self.Adjmatrix[self.transeries[i], minindex] = 1\n# self.Adjmatrix[minindex, self.transeries[i]] = 1\n \n# for i in range(self.demandnum):\n# if(np.sum(self.Adjmatrix[self.transeries, self.demandseries[i]]) == 0):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries, self.demandseries[i]], 1)) + self.supplynum\n# self.Adjmatrix[minindex, self.demandseries[i]] = 1\n \n# for i in range(self.trannum):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries[i], self.transeries], num)) + self.supplynum\n# self.Adjmatrix[self.transeries[i], minindex] = 1\n \n for i in range(self.demandnum):\n if(np.sum(self.Adjmatrix[self.transeries, self.demandseries[i]]) == 0):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries, self.demandseries[i]], num)) + self.supplynum\n self.Adjmatrix[minindex, self.demandseries[i]] = 1\n# self.Adjmatrix[self.demandseries[i], minindex] = 1\n \n for i in range(self.demandnum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.demandseries[i], self.demandseries], min(sampleseq[self.demandseries[i]] + 1, self.demandnum))) + self.supplynum + self.trannum\n minindex = minindex[1:-1]\n for j in range(len(minindex)):\n if(self.Adjmatrix[self.demandseries[i], minindex[j]] == 1 or self.Adjmatrix[minindex[j], self.demandseries[i]] == 1):\n continue\n self.Adjmatrix[self.demandseries[i], minindex[j]] = 1", "def _generate_throats(self):\n logger.info(\"Define connections between pores\")\n #Np = self._Np\n pts = self['pore.coords']\n Np = len(pts)\n #Generate 6 dummy domains to pad onto each face of real domain\n #This prevents surface pores from making long range connections to each other\n\n x,y,z = self[\"pore.coords\"].T\n if x.max() > self._Lx:\n Lx = x.max()*1.05\n else:\n Lx = self._Lx\n if y.max() > self._Ly:\n Ly = y.max()*1.05\n else:\n Ly = self._Ly\n if z.max() > self._Lz:\n Lz = z.max()*1.05\n else:\n Lz = self._Lz\n\n #Reflect in X = Lx and 0\n Pxp = pts.copy()\n Pxp[:,0]=(2*Lx-Pxp[:,0])\n Pxm= pts.copy()\n Pxm[:,0] = Pxm[:,0]*(-1)\n #Reflect in Y = Ly and 0\n Pyp = pts.copy()\n Pyp[:,1]=(2*Ly-Pxp[:,1])\n Pym = pts.copy()\n Pym[:,1] = Pxm[:,1]*(-1)\n #Reflect in Z = Lz and 0\n Pzp = pts.copy()\n Pzp[:,2]=(2*Lz-Pxp[:,2])\n Pzm = pts.copy()\n Pzm[:,2] = Pxm[:,2]*(-1)\n #Add dummy domains to real domain\n pts = np.vstack((pts,Pxp,Pxm,Pyp,Pym,Pzp,Pzm)) #Order important for boundary logic\n #Perform tessellation\n logger.debug(\"Beginning tessellation\")\n Tri = sptl.Delaunay(pts)\n logger.debug(\"Converting tessellation to adjacency matrix\")\n adjmat = sprs.lil_matrix((Np,Np),dtype=int)\n for i in sp.arange(0,sp.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n #this used to be vectorize, but it stopped working...change in scipy?\n for j in Tri.simplices[i]:\n if j < Np:\n adjmat[j,Tri.simplices[i][Tri.simplices[i]<Np]] = 1\n #Remove duplicate (lower triangle) and self connections (diagonal)\n #and convert to coo\n adjmat = sprs.triu(adjmat,k=1,format=\"coo\")\n logger.debug(\"Conversion to adjacency matrix complete\")\n self['throat.conns']=sp.vstack((adjmat.row, adjmat.col)).T\n self['pore.all'] = np.ones(len(self['pore.coords']), dtype=bool)\n self['throat.all'] = np.ones(len(self['throat.conns']), dtype=bool)\n\n # Do Voronoi diagram - creating voronoi polyhedra around each pore and save vertex information\n self._vor = Voronoi(pts)\n all_vert_index = sp.ndarray(Np,dtype=object)\n for i,polygon in enumerate(self._vor.point_region[0:Np]):\n if -1 not in self._vor.regions[polygon]:\n all_vert_index[i]=dict(zip(self._vor.regions[polygon],self._vor.vertices[self._vor.regions[polygon]]))\n\n \" Add throat vertices by looking up vor.ridge_dict \"\n throat_verts = sp.ndarray(len(self[\"throat.conns\"]),dtype=object)\n for i,(p1,p2) in enumerate(self[\"throat.conns\"]):\n try:\n throat_verts[i]=dict(zip(self._vor.ridge_dict[(p1,p2)],self._vor.vertices[self._vor.ridge_dict[(p1,p2)]]))\n except KeyError:\n try:\n throat_verts[i]=dict(zip(self._vor.ridge_dict[(p2,p1)],self._vor.vertices[self._vor.ridge_dict[(p2,p1)]]))\n except KeyError:\n print(\"Throat Pair Not Found in Voronoi Ridge Dictionary\")\n\n self['pore.vert_index']=all_vert_index\n self['throat.vert_index']=throat_verts\n logger.debug(sys._getframe().f_code.co_name+\": End of method\")", "def _setup_bubble_dofs(self):\n if self.node_desc.bubble is None:\n return 0, None, None\n\n offset = self.n_vertex_dof + self.n_edge_dof + self.n_face_dof\n n_dof = 0\n n_dof_per_cell = self.node_desc.bubble.shape[0]\n all_dofs = {}\n remaps = {}\n for ig, ap in self.aps.iteritems():\n ii = self.region.get_cells(ig)\n n_cell = ii.shape[0]\n nd = n_dof_per_cell * n_cell\n\n group = self.domain.groups[ig]\n remaps[ig] = prepare_remap(ii, group.shape.n_el)\n\n aux = nm.arange(offset + n_dof, offset + n_dof + nd,\n dtype=nm.int32)\n aux.shape = (n_cell, n_dof_per_cell)\n iep = self.node_desc.bubble[0]\n ap.econn[:,iep:] = aux\n all_dofs[ig] = aux\n\n n_dof += nd\n\n return n_dof, all_dofs, remaps", "def setup_propagator(self):\n self.propagator = create_propagator_matrix(self.A, self.args.alpha, self.args.model)\n if self.args.model==\"exact\":\n self.propagator = self.propagator.to(self.device)\n else:\n self.edge_indices = self.propagator[\"indices\"].to(self.device)\n self.edge_weights = self.propagator[\"values\"].to(self.device)", "def setup_propagator(self):\n self.propagator = create_propagator_matrix(self.A, self.args.alpha, self.args.model)\n if self.args.model==\"exact\":\n self.propagator = self.propagator.to(self.device)\n else:\n self.edge_indices = self.propagator[\"indices\"].to(self.device)\n self.edge_weights = self.propagator[\"values\"].to(self.device)", "def setup_propagator(self):\n self.propagator = create_propagator_matrix(self.A, self.args.alpha, self.args.model)\n if self.args.model==\"exact\":\n self.propagator = self.propagator.to(self.device)\n else:\n self.edge_indices = self.propagator[\"indices\"].to(self.device)\n self.edge_weights = self.propagator[\"values\"].to(self.device)", "def get_reactions(self) -> NDArray[np.float64]:\n\n stiffness_at_supported_dofs = self.get_stiffness_matrix()[\n np.ix_(self.get_supported_dofs())\n ]\n nodal_displacements = self.__get_nodal_displacements().T\n return stiffness_at_supported_dofs @ nodal_displacements", "def connect_cells(self):\n self.nclist = []\n N = self._N\n for i in range(N):\n src = self.cells[i]\n tgt_syn = self.cells[(i+1)%N].synlist[0]\n nc = src.connect2target(tgt_syn)\n nc.weight[0] = self.syn_w\n nc.delay = self.syn_delay\n\n nc.record(self.t_vec, self.id_vec, i)\n self.nclist.append(nc)", "def gremlin_contact_maps(dist):\n\n\tprint dist\n\tcontact_cutoff = 10\n\tgremlin = [[6,13],[9,22],[15,19],[14,18],[3,11],[34,40],[3,23],[36,40],[9,13],[25,28],[12,15],[11,23], \\\n\t\t[26,35],[12,18],[2,5],[17,21],[14,22],[6,9],[41,44],[15,18],[25,30],[9,16],[29,32],[30,33],[6,16]] \n\n\tcontacts = np.zeros(dist.shape)\n\tfor n in range(dist.shape[0]):\n\t\tfor m in range(dist.shape[1]):\n\t\t\tif dist[n][m] < contact_cutoff and (gremlin.count([n,m]) == 1 or gremlin.count([m,n]) == 1):\n\t\t\t\tcontacts[n][m] = 1\n\treturn contacts", "def cfdProcessElementTopology(self):\r\n ## (list of lists) List where each index represents an element in the domain. Each index has an associated list which contains the elements for which is shares a face (i.e. the neighouring elements). Do not confuse a faces 'neighbour cell', which refers to a face's neighbour element, with the neighbouring elements of a cell. \r\n self.elementNeighbours = [[] for i in range(0,self.numberOfElements)]\r\n\r\n ## (list of lists) list of face indices forming each element\r\n self.elementFaces = [[] for i in range(0,self.numberOfElements)]\r\n \r\n #populates self.elementNeighbours\r\n for iFace in range(self.numberOfInteriorFaces):\r\n own=self.owners[iFace]\r\n nei=self.neighbours[iFace]\r\n \r\n #adds indices of neighbour cells\r\n self.elementNeighbours[own].append(nei)\r\n self.elementNeighbours[nei].append(own)\r\n \r\n #adds interior faces\r\n self.elementFaces[own].append(iFace)\r\n self.elementFaces[nei].append(iFace)\r\n \r\n #adds boundary faces ('patches')\r\n for iFace in range(self.numberOfInteriorFaces,self.numberOfFaces):\r\n own=self.owners[iFace]\r\n self.elementFaces[own].append(iFace)\r\n \r\n ## List of lists containing points forming each element\r\n self.elementNodes = [[] for i in range(0,self.numberOfElements)]\r\n \r\n for iElement in range(self.numberOfElements):\r\n \r\n for faceIndex in self.elementFaces[iElement]:\r\n self.elementNodes[iElement].append(self.faceNodes[faceIndex])\r\n \r\n self.elementNodes[iElement] = list(set([item for sublist in self.elementNodes[iElement] for item in sublist]))\r\n \r\n ## Upper coefficient indices (owners)\r\n self.upperAnbCoeffIndex=[[] for i in range(0,self.numberOfInteriorFaces)]\r\n \r\n ## Lower coefficient indices (owners)\r\n self.lowerAnbCoeffIndex=[[] for i in range(0,self.numberOfInteriorFaces)]\r\n \r\n for iElement in range(self.numberOfElements):\r\n ## Element number from 1 to numberOfElements + 1\r\n iNb=1\r\n for faceIndex in self.elementFaces[iElement]:\r\n \r\n #skip if it is a boundary face\r\n if faceIndex > self.numberOfInteriorFaces-1:\r\n continue\r\n \r\n own = self.owners[faceIndex]\r\n nei = self.neighbours[faceIndex]\r\n \r\n if iElement == own:\r\n self.upperAnbCoeffIndex[faceIndex] = iNb\r\n elif iElement == nei:\r\n self.lowerAnbCoeffIndex[faceIndex] = iNb\r\n \r\n iNb = iNb +1", "def build_contacting(masks, r=1):\n source_nodes = []\n target_nodes = []\n for i in range(1, masks.max()+1):\n neigh = find_neighbors(masks, i, r=r)\n source_nodes.append([i]*(neigh.size))\n target_nodes.append(neigh)\n # flatten arrays of arrays\n source_nodes = np.fromiter(itertools.chain.from_iterable(source_nodes), int).reshape(-1,1)\n target_nodes = np.fromiter(itertools.chain.from_iterable(target_nodes), int).reshape(-1,1)\n # remove duplicate pairs\n pairs = np.hstack((source_nodes, target_nodes))\n pairs = remove_duplicate_pairs(pairs)\n return pairs", "def cal_tether_J(self):\n\n self.B_tether_plus = np.zeros((self.point_matrix.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points_tether.shape[0],\n self.attach_points_tether.shape[0]))\n self.J_tether = np.zeros((self.point_matrix.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points_tether.shape[0],\n self.point_matrix.shape[1]))\n for i in range(0, self.point_matrix.shape[0]):\n self.B_tether_plus[i, :, :] = pinv(self.B_tether[i, :, :])\n self.J_tether[i, :, :] = np.dot(self.B_tether_plus[i, :, :],\n self.A_tether[i, :, :])", "def make_graph(self, contacts, L):\n graph = [[] for i in range(L)]\n for c1, c2 in contacts:\n graph[c1].append(int(c2))\n graph[c2].append(int(c1))\n max_L = max([len(g) for g in graph])\n # Fill with -1s so that every row has the same length\n graph = [g + [-1] * (max_L - len(g)) for g in graph]\n return np.array(graph).astype(int) # numba does not allow float indexers of arrays", "def forward_kinematics(self):\n def product(L):\n if len(L) == 0:\n return sp.Matrix.eye(4)\n cum = L[0]\n for i in L[1:]:\n cum *= i\n return cum\n\n # routes to base link\n paths = self.paths()\n # symbolic matrices\n matrices = {name: j.matrix for name, j in self.joints.items()}\n\n #\n combined = {k: product([matrices[i] for i in path])\n for k, path in paths.items()}\n\n return combined", "def jacobian_cost(self, joint_angles: dict, ee_goals) -> np.ndarray:\n kinematic_map = self.kinematic_map[\"p0\"] # get map to all nodes from root\n end_effector_nodes = ee_goals.keys()\n J = np.zeros(self.n)\n for (\n ee\n ) in end_effector_nodes: # iterate through end-effector nodes, assumes sorted\n ee_path = kinematic_map[ee][\n 1:\n ] # [:-1] # no last node, only phys. joint locations\n t_ee = self.get_pose(joint_angles, ee).trans\n dg_ee_x = t_ee[0] - ee_goals[ee].trans[0]\n dg_ee_y = t_ee[1] - ee_goals[ee].trans[1]\n for (pdx, joint_p) in enumerate(ee_path): # algorithm fills Jac per column\n p_idx = int(joint_p[1:]) - 1\n for jdx in range(pdx, len(ee_path)):\n node_jdx = ee_path[jdx]\n theta_jdx = sum([joint_angles[key] for key in ee_path[0 : jdx + 1]])\n J[p_idx] += (\n 2.0\n * self.a[node_jdx]\n * (-dg_ee_x * np.sin(theta_jdx) + dg_ee_y * np.cos(theta_jdx))\n )\n\n return J" ]
[ "0.64249676", "0.56675434", "0.5327914", "0.5293578", "0.5202153", "0.5148195", "0.51123756", "0.504556", "0.5030026", "0.50254846", "0.49442828", "0.48849455", "0.48715222", "0.47897685", "0.47662282", "0.472734", "0.47106746", "0.47045848", "0.46944386", "0.46944386", "0.46944386", "0.46783563", "0.46659052", "0.4657178", "0.4655041", "0.46332666", "0.46109018", "0.46027905", "0.4596582", "0.45632353" ]
0.58174694
1
Calculate the matrix of average intrapolymer distances. User must supply the parameters teq, tsample and threshold.
def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) : u = sim.u polymer = u.select_atoms (polymer_text) N = polymer.n_atoms nslice = mbt.traj_nslice (u,teq,tsample) d = np.zeros((N,N)) for i,ts in enumerate(u.trajectory[teq::tsample]) : this_d = distance_array(polymer.positions, polymer.positions, box=ts.dimensions) d = mbt.new_average(i,d,this_d) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_qavg(self, TRANGE = []):\n #put some variables in this namespace\n nebins=self.nebins\n nqbins=self.nqbins\n binenergy=self.binenergy\n binq=self.binq\n visits2d=self.visits2d\n logn_Eq=self.logn_Eq\n \n if len(TRANGE) == 0:\n NTEMP = 100 # number of temperatures to calculate expectation values\n TMAX = self.Tlist[-1]\n TMIN = self.Tlist[0]\n TINT=(TMAX-TMIN)/(NTEMP-1)\n TRANGE = [ TMIN + i*TINT for i in range(NTEMP) ]\n \n #find the ocupied bin with the minimum energy\n EREF=0\n for i in range(nebins):\n if visits2d[:,i,:].sum() > 0:\n EREF = binenergy[i]\n break\n \n #don't need to recalculate it\n #self.nodataq = where((visits2d.sum(2).sum(0)) == 0)\n \n #calculate the mean q at each temperature\n self.qavg = np.zeros(len(TRANGE))\n \n #now calculate P(q,T)\n # P(q,T) = sum_E n(E,q)*exp(-E/T) \n #TRANGE=range(1,9)\n logP_Eq = np.zeros([nebins,nqbins])\n logP_q = np.zeros(nqbins)\n for n in range(len(TRANGE)):\n T=TRANGE[n]\n for i in range(nebins):\n logP_Eq[i,:] = logn_Eq[i,:]-(binenergy[i] - EREF)/(self.k_B*T)\n \n logP_Eq[self.allzero2dind[0], self.allzero2dind[1]] = self.LOGMIN\n expoffset = logP_Eq.max()\n #print \"T expoffset \", T, expoffset\n logP_Eq -= expoffset\n #P_q = np.exp(logP_Eq).sum(0)\n # sum over the energy\n for j in range(nqbins):\n logP_q[j] = wham_utils.logSum( logP_Eq[:,j] )\n logP_q[self.nodataq] = np.NaN\n \n #find mean q\n qmin = min(binq)\n qmin -= 0.1\n lqavg = -1.0e30\n lnorm = -1.0e30\n for i in range(0,nqbins): \n if not np.isnan(logP_q[i]):\n lnorm = wham_utils.logSum1( lnorm, logP_q[i] ) \n lqavg = wham_utils.logSum1( lqavg, logP_q[i] + log(binq[i] - qmin) )\n self.qavg[n] = exp(lqavg - lnorm) + qmin\n #print lqavg\n \n return TRANGE,self.qavg", "def interpolation_matrix(m):\n return np.nanmean(m,axis=1)", "def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :\n u = sim.u\n polymer = u.select_atoms (polymer_text)\n tracers = u.select_atoms (tracer_text)\n ntracers = tracers.n_atoms\n npolymer = polymer.n_atoms\n nslice = mbt.traj_nslice(u,teq,tsample)\n C = np.zeros((ntracers,nslice,npolymer),dtype=bool)\n for i,ts in enumerate(u.trajectory [teq::tsample]) :\n d = distance_array (tracers.positions,polymer.positions,\n box=ts.dimensions)\n c = d<threshold\n C[:,i,:] = c\n return C", "def compute_tap_intervals(xtaps, t, threshold=20):\n import numpy as np\n\n if isinstance(xtaps, list):\n xtaps = np.asarray(xtaps)\n if isinstance(t, list):\n t = np.asarray(t)\n\n # Set time points:\n tap_times = t - t[0]\n\n # Calculate x offset:\n xtaps_offset = xtaps - np.mean(xtaps)\n\n # Find left/right finger \"press\" events:\n dx = xtaps_offset[1:] - xtaps_offset[:-1]\n ipress = np.where(np.abs(dx) > threshold)\n\n # Filter data:\n #xtaps = xtaps[ipress]\n tap_times = tap_times[ipress]\n\n # Find press event intervals:\n tap_intervals = tap_times[1:] - tap_times[:-1]\n\n return ipress, tap_intervals", "def get_x_avg(self, t: Optional[int]) -> np.array:\n t = t or self.t\n if t < 0:\n raise ex.TimeCanNotBeNegative(f\"Time t={t} cannot be negative!\")\n if self.t_max < t:\n raise ex.TimeStepCanNotExceedTmax(f\"Time t={self.t} cannot be t > t_max={self.t_max}!\")\n\n self.x_avg = np.nanmean(self.x[: t + 1])\n return self.x_avg", "def _model_alerts(self, t, y, threshold, ialert, dti):\n # create contiguous alert windows\n inds = np.where(y>threshold)[0]\n\n if len(inds) == 0:\n return 0, len(self.data.tes), 0, int(1e8), 0, 0\n\n dinds = np.where(np.diff(inds)>ialert)[0]\n alert_windows = list(zip(\n [inds[0],]+[inds[i+1] for i in dinds],\n [inds[i]+ialert for i in dinds]+[inds[-1]+ialert]\n ))\n alert_window_lengths = [np.diff(aw) for aw in alert_windows]\n \n # compute true/false positive/negative rates\n tes = copy(self.data.tes)\n nes = len(self.data.tes)\n nalerts = len(alert_windows)\n true_alert = 0\n false_alert = 0\n inalert = 0.\n missed = 0\n total_time = (t[-1] - t[0]).total_seconds()\n\n for i0,i1 in alert_windows:\n\n inalert += ((i1-i0)*dti).total_seconds()\n # no eruptions left to classify, only misclassifications now\n if len(tes) == 0:\n false_alert += 1\n continue\n\n # eruption has been missed\n while tes[0] < t[i0]:\n tes.pop(0)\n missed += 1\n if len(tes) == 0:\n break\n if len(tes) == 0:\n continue\n\n # alert does not contain eruption\n if not (tes[0] > t[i0] and tes[0] <= (t[i0] + (i1-i0)*dti)):\n false_alert += 1\n continue\n\n # alert contains eruption\n while tes[0] > t[i0] and tes[0] <= (t[i0] + (i1-i0)*dti):\n tes.pop(0)\n true_alert += 1\n if len(tes) == 0:\n break\n\n # any remaining eruptions after alert windows have cleared must have been missed\n missed += len(tes)\n dur = inalert/total_time\n true_negative = int((len(y)-np.sum(alert_window_lengths))/np.mean(alert_window_lengths))-missed\n mcc = matthews_corrcoef(self._ys, (y>threshold)*1.)\n\n return false_alert, missed, true_alert, true_negative, dur, mcc", "def sigma_calculation(self, det, gt):\n return np.round((self.area_of_intersection(det, gt) / self.area(gt)), 2)", "def averageTrialsByTriggers(trigger_indices, np_data):\n trialLen = trigger_indices[1] -trigger_indices[0] -1\n data_avg = [] \n data_std = [] \n\n for i in trigger_indices:\n data_avg.append(numpy.average(np_data[i+1:i+trialLen-1])) \n data_std.append(numpy.std(np_data[i+1:i+trialLen-1])) \n \n return (data_avg, data_std)", "def compute_tap_features(xtaps, ytaps, t, threshold=20):\n import numpy as np\n\n from mhealthx.extractors.tapping import compute_drift, \\\n compute_tap_intervals, compute_intertap_gap\n from mhealthx.extractors.tapping import TapFeatures as T\n from mhealthx.signals import signal_features\n\n if isinstance(xtaps, list):\n xtaps = np.array(xtaps)\n if isinstance(ytaps, list):\n ytaps = np.array(ytaps)\n if isinstance(t, list):\n t = np.array(t)\n\n # Intertap intervals:\n ipress, intervals = compute_tap_intervals(xtaps, t, threshold)\n\n # Filter data:\n t = t[ipress]\n xtaps = xtaps[ipress]\n ytaps = ytaps[ipress]\n\n # Delta between fastest and slowest intertap intervals:\n T.intertap_gap10, T.intertap_gap25, \\\n T.intertap_gap50 = compute_intertap_gap(intervals)\n\n # Left and right taps and drift:\n mean_x = np.mean(xtaps)\n iL = np.where(xtaps < mean_x)\n iR = np.where(xtaps >= mean_x)\n xL = xtaps[iL]\n yL = ytaps[iL]\n xR = xtaps[iR]\n yR = ytaps[iR]\n driftL = compute_drift(xL, yL)\n driftR = compute_drift(xR, yR)\n\n # Number of taps:\n T.num_taps = xtaps.size\n T.num_taps_left = xL.size\n T.num_taps_right = xR.size\n\n # Time:\n T.time_rng = t[-1] - t[0]\n\n # Intertap interval statistics:\n T.intertap_num, T.intertap_min, T.intertap_max, T.intertap_rng, \\\n T.intertap_avg, T.intertap_std, T.intertap_med, T.intertap_mad, \\\n T.intertap_kurt, T.intertap_skew, T.intertap_cvar, T.intertap_lower25, \\\n T.intertap_upper25, T.intertap_inter50, T.intertap_rms, \\\n T.intertap_entropy, T.intertap_tk_energy = signal_features(intervals)\n\n # Tap statistics:\n T.xL_num, T.xL_min, T.xL_max, T.xL_rng, T.xL_avg, T.xL_std, \\\n T.xL_med, T.xL_mad, T.xL_kurt, T.xL_skew, T.xL_cvar, \\\n T.xL_lower25, T.xL_upper25, T.xL_inter50, T.xL_rms, \\\n T.xL_entropy, T.xL_tk_energy = signal_features(xL)\n\n T.xR_num, T.xR_min, T.xR_max, T.xR_rng, T.xR_avg, T.xR_std, \\\n T.xR_med, T.xR_mad, T.xR_kurt, T.xR_skew, T.xR_cvar, \\\n T.xR_lower25, T.xR_upper25, T.xR_inter50, T.xR_rms, \\\n T.xR_entropy, T.xR_tk_energy = signal_features(xR)\n\n # T.yL_num, T.yL_min, T.yL_max, T.yL_rng, T.yL_avg, T.yL_std, \\\n # T.yL_med, T.yL_mad, T.yL_kurt, T.yL_skew, T.yL_cvar, \\\n # T.yL_lower25, T.yL_upper25, T.yL_inter50, T.yL_rms, \\\n # T.yL_entropy, T.yL_tk_energy = signal_features(yL)\n\n # T.yR_num, T.yR_min, T.yR_max, T.yR_rng, T.yR_avg, T.yR_std, \\\n # T.yR_med, T.yR_mad, T.yR_kurt, T.yR_skew, T.yR_cvar, \\\n # T.yR_lower25, T.yR_upper25, T.yR_inter50, T.yR_rms, \\\n # T.yR_entropy, T.yR_tk_energy = signal_features(yR)\n\n # Drift statistics:\n T.driftL_num, T.driftL_min, T.driftL_max, T.driftL_rng, T.driftL_avg, \\\n T.driftL_std, T.driftL_med, T.driftL_mad, T.driftL_kurt, T.driftL_skew, \\\n T.driftL_cvar, T.driftL_lower25, T.driftL_upper25, T.driftL_inter50, \\\n T.driftL_rms, T.driftL_entropy, T.driftL_tk_energy = \\\n signal_features(driftL)\n\n T.driftR_num, T.driftR_min, T.driftR_max, T.driftR_rng, T.driftR_avg, \\\n T.driftR_std, T.driftR_med, T.driftR_mad, T.driftR_kurt, T.driftR_skew, \\\n T.driftR_cvar, T.driftR_lower25, T.driftR_upper25, T.driftR_inter50, \\\n T.driftR_rms, T.driftR_entropy, T.driftR_tk_energy = \\\n signal_features(driftR)\n\n return T", "def calculate_meanpT_fluc(dN_array, pT_array, pT_min=0.0, pT_max=3.0):\n npT_interp = 50\n pT_inte_array = linspace(pT_min, pT_max, npT_interp)\n\n nev, npT = dN_array.shape\n mean_pT_array = zeros(nev)\n for iev in range(nev):\n dN_interp = exp(interp(pT_inte_array, pT_array[iev, :],\n log(dN_array[iev, :] + 1e-30)))\n mean_pT_array[iev] = (sum(pT_inte_array**2.*dN_interp)\n /sum(pT_inte_array*dN_interp))\n\n # compute the error using jack-knife\n rn_array = zeros(nev)\n for iev in range(nev):\n array_idx = [True]*nev\n array_idx[iev] = False\n array_idx = array(array_idx)\n rn_ev = (std(mean_pT_array[array_idx])\n /(mean(mean_pT_array[array_idx]) + 1e-15))\n rn_array[iev] = rn_ev\n rn_mean = mean(rn_array, axis=0)\n rn_err = sqrt((nev - 1.)/nev*sum((rn_array - rn_mean)**2.))\n return([rn_mean, rn_err])", "def apply_new_threshold(self, T):\n E = self.R1 - T * self.R2\n E[E < 0.0] = 0.0\n return E", "def avgtr(self):\n return np.diff(self.trtimes).mean()", "def Xmatrix(k,t,w, standardizeData=True):\n \n K, N = len(k), len(t)\n X = np.zeros( (N+K,K) )\n for i in range(N):\n for j in range(K):\n X[i,j] = np.exp(-1.0*k[j]*t[i])\n for i in range(K):\n X[i+len(t),i] = w**0.5\n\n Xmean = (X[0:N,:]).mean(axis=0) \n if standardizeData:\n for j in range(K):\n X[0:N,j] = X[0:N,j] - Xmean[j]\n\n return X, Xmean", "def prep_distance(self, t: str = 'float') -> np.ndarray:\n d = np.zeros([self.ic.shape[0]*self.ic.shape[1],\n self.ic.shape[1]*self.ic.shape[0]])\n\n u,v = np.meshgrid(np.arange(self.ic.shape[0]),\n np.arange(self.ic.shape[1]),\n sparse=False, indexing='xy')\n u = u.ravel()\n v = v.ravel()\n z = np.array([u,v]).T\n\n for (k,x) in enumerate(z):\n if not self.boundary:\n d[k,:] = np.array(np.sqrt((u - x[0])**2 + (v - x[1])**2),dtype=t)\n\n else:\n d[k,:] = self.torus(x[0],x[1],\n self.ic.shape[0],\n self.ic.shape[1]\n ).ravel()\n\n return d", "def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) :\n # define polymer and tracers\n u = sim.u\n polymer = u.select_atoms(polymer_text)\n tracers = u.select_atoms(tracer_text)\n n_polymer = polymer.n_atoms\n n_tracers = tracers.n_atoms\n # initialize jumping matrix and first distance matrix d_prev\n J = np.zeros ((n_polymer,n_polymer),dtype=np.int32)\n ts = u.trajectory [teq]\n d_prev = distance_array (polymer.positions,tracers.positions,\n box=ts.dimensions)\n D_prev = d_prev<threshold\n for ts in u.trajectory [teq::tsample] :\n # get distance matrix at current time step\n d_next = distance_array (polymer.positions,tracers.positions,\n box=ts.dimensions)\n D_next = d_next<threshold\n # get jumps of all tracers and add it to the jumping matrix\n for i in xrange (n_tracers) :\n t_prev = D_prev [:,i]\n t_next = D_next [:,i].reshape ((n_polymer,1))\n t = t_prev * t_next\n J += t\n D_prev = D_next.copy()\n return J", "def get_eye_specs(self, tbit, tsample, thres=0.0, nlev=2):\n\n tstart, tend = self.get_xrange()\n toff_vec = np.arange(0, tbit, tsample)\n best_idx = 0\n best_gap = 0.0\n best_values = None\n mid_lev = nlev // 2\n for idx, t_off in enumerate(toff_vec):\n # noinspection PyTypeChecker\n values = self(np.arange(tstart + t_off, tend, tbit))\n values.sort()\n\n up_idx = np.searchsorted(values, [thres])[0]\n if up_idx == 0 or up_idx == len(values):\n continue\n cur_gap = values[up_idx] - values[up_idx - 1]\n if cur_gap > best_gap:\n best_idx = idx\n best_gap = cur_gap\n best_values = values\n\n if best_values is None:\n raise ValueError(\"waveform never cross threshold=%.4g\" % thres)\n\n vstd = np.std(best_values)\n vtemp = best_values / vstd\n tmp_arr = np.linspace(vtemp[0], vtemp[-1], nlev) # type: np.ndarray\n clusters = svq.kmeans(vtemp, tmp_arr)[0]\n # clusters = svq.kmeans(vtemp, 4, iter=50)[0]\n clusters *= vstd\n clusters.sort()\n vcenter = (clusters[mid_lev] + clusters[mid_lev - 1]) / 2.0\n\n # compute eye opening/margin\n openings = []\n tr_widths = []\n last_val = best_values[0]\n bot_val = last_val\n cur_cidx = 0\n for cur_val in best_values:\n cur_cluster = clusters[cur_cidx]\n next_cluster = clusters[cur_cidx + 1]\n if abs(cur_val - cur_cluster) > abs(cur_val - next_cluster):\n openings.append(cur_val - last_val)\n tr_widths.append(last_val - bot_val)\n cur_cidx += 1\n if cur_cidx == len(clusters) - 1:\n tr_widths.append(best_values[-1] - cur_val)\n break\n bot_val = cur_val\n last_val = cur_val\n\n return {'center': (float(toff_vec[best_idx]), vcenter),\n 'levels': clusters,\n 'heights': clusters[1:] - clusters[:-1],\n 'openings': np.array(openings),\n 'trace_widths': np.array(tr_widths)\n }", "def calculate_dist_from_eqm(distance_from_eqm, variable_mask):", "def Temp2IApprox(T,T0,K,D,t=1.0/64.0):\n # get numpy fn to interpret temperature as matrix and get pi constant\n from numpy import asarray, pi, sqrt, abs\n # temperature difference\n Tdiff = (T-T0)\n # thermal conductivity matrix\n K = K(Tdiff)\n # thermal diffusivity matrix\n D = D(Tdiff)\n # 2*sqrt(Dt/pi)\n a = ((D*t)/np.pi)\n # result of sqrt can be +/-\n # power density cannot be negative \n b = (2.0*np.sqrt(a))\n temp = K*Tdiff\n # K*(T-T0)/(2*sqrt(Dt/pi))\n return abs(temp/b)", "def calc_tfmra_elevation(\n thresholds: List[float],\n waveform: np.ndarray,\n first_bin_elvtn: np.ndarray,\n bin_size: float,\n logger=empty_logger()\n) -> np.ndarray:\n\n elevations = np.empty((waveform.shape[0], len(thresholds)))\n\n for i, t in enumerate(thresholds):\n logger.info(f\"threshold {t} {i + 1}/{len(thresholds)}\")\n\n f = lambda array: lin_interp_from_first_max(\n array, t, InterpolateDirection.LEFT\n )\n\n elevations[:, i] = first_bin_elvtn - (\n np.apply_along_axis(f, 1, waveform) * bin_size\n )\n\n return elevations", "def tau_calculation(self, det, gt):\n return np.round((self.area_of_intersection(det, gt) / self.area(det)), 2)", "def expectation_sensitivity(T, a):\n M = T.shape[0]\n S = numpy.zeros((M, M))\n for i in range(M):\n S += a[i] * stationary_distribution_sensitivity(T, i)\n return S", "def get_tad_hic(self, tad, x_name, normed=True):\n beg, end = int(tad['start']), int(tad['end'])\n xpr = self.experiments[x_name]\n size = xpr['size']\n matrix = [[[] for _ in xrange(beg, end)]\\\n for _ in xrange(beg, end)]\n for i, ii in enumerate(xrange(beg - 1, end - 1)):\n for j, jj in enumerate(xrange(beg, end)):\n matrix[j][i] = float(xpr['hi-c'][0][ii * size + jj])\n if not normed:\n continue\n try:\n matrix[j][i] = matrix[j][i] / xpr['wght'][0][ii * size + jj]\n except ZeroDivisionError:\n matrix[j][i] = 0.0\n return matrix", "def n_finder(gt_mat, x, eps):\n numsnps = int(0.9*gt_mat.shape[0])\n assert(x < numsnps)\n assert(eps > 0 and eps<= 1.0)\n indices = np.random.choice(numsnps, size=x, replace=False)\n n = 0\n avg_array = np.zeros(gt_mat.shape[0])\n going = True\n while going:\n r2_list = [np.corrcoef(gt_mat[i,:],gt_mat[i+n,:])[0,1]**2 for i in indices]\n avg_array[n] = np.nanmean(r2_list)\n n += 1\n if np.mean(r2_list) < eps:\n going = False \n return n,avg_array[:n]", "def average_qa_distance(distances):\n\treturn np.average(np.median(distances,axis=0))", "def _eqns(self, x: np.ndarray, t):\n # Could in principle sanity check the args but speed is probably paramount\n\n return np.array([-(self._angular_freq ** 2) * np.sin(x[1]) - x[0], x[0]])", "def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :\n # define DKL(t) vector\n nframes = traj_nslice(sim.u,teq,tsample)\n DKL_t = np.zeros(nframes)\n # define polymer and tracers\n polymer = sim.u.select_atoms(polymer_text)\n tracers = sim.u.select_atoms(tracer_text)\n N = polymer.n_atoms\n ntracers = tracers.n_atoms\n # init H and C vectors\n H = np.zeros((N,N),dtype=np.int32)\n C = np.zeros((N,ntracers),dtype=np.int32)\n # analyze all simulation frames as decided\n for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :\n # calculate Hi-C at this time frame\n d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)\n H += (d<p_threshold)\n Rt = H.sum(axis=1)\n # calculate traffic at this time frame\n c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)\n C += (c<t_threshold)\n Ct = C.sum(axis=1)\n DKL_t[i] = KL_divergence(Ct,Rt)\n # coverage analysis\n C[C>1] = 1\n coverage = C.sum(axis=0).astype('float')/N\n return DKL_t,H,Ct.astype(np.int64),coverage", "def calc_ET0(RA, Tavg, TD, P):\n valid_mask = (\n (RA != _TARGET_NODATA) &\n (Tavg != tavg_nodata) &\n (TD != td_nodata) &\n (P != _TARGET_NODATA))\n result = numpy.empty(RA.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n result[valid_mask] = (\n 0.0013 * 0.408 * RA[valid_mask] * (Tavg[valid_mask] + 17.) *\n (numpy.power((TD[valid_mask] - 0.0123 * P[valid_mask]), 0.76)) *\n 29.5)\n return result", "def get_attenutation(self,beam,ne,mass_b,Ti,file_number):\n beam = np.log(beam/mass_b)\n ne = np.log(ne)\n Ti = np.log(Ti)\n if len(ne.shape) == 1:\n coef = np.zeros(ne.shape)\n for i,n in enumerate(ne):\n coef[i] = interpolate.bisplev(beam,n,self.atte_tck_dens[file_number])\n else:\n coef = interpolate.bisplev(beam,ne,self.atte_tck_dens[file_number])\n\n coef = coef * interpolate.splev(Ti,self.atte_tck_temp[file_number])\n return coef", "def u_i_all(self, t_array):\n t_array = t_array.reshape(1,len(t_array)) # (1 , N_t)\n tmp = self._r_tau(t_array-self._s.reshape(self._num_gp,1))\n # (N_gp, N_t)\n # A # (N_gp, N_ev)\n # A_j,i = w_j / sqrt(lambda_i) u_i(s_j)\n return np.tensordot(tmp, 1/self._sqrt_eig_val.reshape(1,self._num_ev) * self._A, axes=([0],[0]))", "def mntd(distmat):\r\n return masked_array(distmat, eye(distmat.shape[0])).min(0).mean()" ]
[ "0.5694141", "0.5119244", "0.5017542", "0.49339455", "0.49261585", "0.48504174", "0.4848955", "0.48481247", "0.48393378", "0.4766152", "0.47565967", "0.47392762", "0.4727343", "0.47033104", "0.46980202", "0.46610996", "0.46503145", "0.4611686", "0.46015757", "0.45886338", "0.45866746", "0.45849678", "0.45742157", "0.4556053", "0.45508125", "0.4547196", "0.45429096", "0.45249218", "0.45228752", "0.4520597" ]
0.66201276
0
This function does the complete analysis of the tracers in the simulation. It calculates the virtual HiC, virtual ChIPseq, KullbackLeibler divergence between the two profiles as a function of time, and coverage of the tracers.
def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) : # define DKL(t) vector nframes = traj_nslice(sim.u,teq,tsample) DKL_t = np.zeros(nframes) # define polymer and tracers polymer = sim.u.select_atoms(polymer_text) tracers = sim.u.select_atoms(tracer_text) N = polymer.n_atoms ntracers = tracers.n_atoms # init H and C vectors H = np.zeros((N,N),dtype=np.int32) C = np.zeros((N,ntracers),dtype=np.int32) # analyze all simulation frames as decided for i,ts in enumerate(sim.u.trajectory[teq::tsample]) : # calculate Hi-C at this time frame d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions) H += (d<p_threshold) Rt = H.sum(axis=1) # calculate ChIP-seq at this time frame c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions) C += (c<t_threshold) Ct = C.sum(axis=1) DKL_t[i] = mbt.KL_divergence(Ct,Rt) # coverage analysis C[C>1] = 1 coverage = C.sum(axis=0).astype('float')/N return DKL_t,H,Ct.astype(np.int64),coverage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :\n # define DKL(t) vector\n nframes = traj_nslice(sim.u,teq,tsample)\n DKL_t = np.zeros(nframes)\n # define polymer and tracers\n polymer = sim.u.select_atoms(polymer_text)\n tracers = sim.u.select_atoms(tracer_text)\n N = polymer.n_atoms\n ntracers = tracers.n_atoms\n # init H and C vectors\n H = np.zeros((N,N),dtype=np.int32)\n C = np.zeros((N,ntracers),dtype=np.int32)\n # analyze all simulation frames as decided\n for i,ts in enumerate(sim.u.trajectory[teq::tsample]) :\n # calculate Hi-C at this time frame\n d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions)\n H += (d<p_threshold)\n Rt = H.sum(axis=1)\n # calculate traffic at this time frame\n c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions)\n C += (c<t_threshold)\n Ct = C.sum(axis=1)\n DKL_t[i] = KL_divergence(Ct,Rt)\n # coverage analysis\n C[C>1] = 1\n coverage = C.sum(axis=0).astype('float')/N\n return DKL_t,H,Ct.astype(np.int64),coverage", "def test_cherenkov_instability( show=False ):\n # Dictionary to record the final value of E\n slope_Erms = {}\n\n for scheme in [ 'standard', 'galilean', 'pseudo-galilean']:\n\n # Choose the correct parameters for the scheme\n if scheme == 'standard':\n v_comoving = 0.\n use_galilean = False\n else:\n v_comoving = 0.9999*c\n if scheme == 'galilean':\n use_galilean = True\n else:\n use_galilean = False\n\n # Initialize the simulation object\n sim = Simulation( Nz, zmax, Nr, rmax, Nm, dt,\n p_zmin, p_zmax, p_rmin, p_rmax, p_nz, p_nr, p_nt, n_e,\n zmin=zmin, initialize_ions=True,\n v_comoving=v_comoving, use_galilean=use_galilean,\n boundaries={'z':'periodic', 'r':'reflective'}, use_cuda=use_cuda )\n\n # Give a relativistic velocity to the particle, with some noise\n sim.ptcl[0].uz[:] = uz_m\n sim.ptcl[0].inv_gamma[:] = 1./np.sqrt( 1 + sim.ptcl[0].uz**2 )\n sim.ptcl[1].uz[:] = uz_m\n sim.ptcl[1].inv_gamma[:] = 1./np.sqrt( 1 + sim.ptcl[1].uz**2 )\n\n # Perform the simulation;\n # record the rms electric field every 50 timestep\n Er_rms = np.zeros(int(N_step/30)+1)\n t = np.zeros(int(N_step/30+1))\n Er_rms[0] = get_Er_rms(sim)\n t[0] += sim.time\n for i in range(int(N_step/30)):\n sim.step( 30, show_progress=False )\n print('Checkpoint %d' %i)\n Er_rms[i+1] = get_Er_rms(sim)\n t[i+1] += sim.time\n print('Calculated RMS')\n\n # Check/plot the results\n if show:\n import matplotlib.pyplot as plt\n # Add a plot\n plt.semilogy( t, Er_rms, '-', label=scheme )\n plt.ylabel('RMS(Er)')\n plt.xlabel('Time')\n else:\n # Registed the final value of the slope of the electric field\n slope_Erms[scheme] = np.log( Er_rms[-1] ) - np.log(Er_rms[-2] )\n\n if show:\n # Show the plot\n plt.legend(loc=0)\n plt.show()\n else:\n # Check that, in the standard case, the electric field is\n # growing much faster, due to the Cherenkov instability\n assert slope_Erms['standard'] > 3.5*slope_Erms['galilean']\n assert slope_Erms['standard'] > 3.5*slope_Erms['pseudo-galilean']", "def calc_error_prop_ef(self, tier2_hhv, tier3):\n\n #Import boostrap data (by year and fuel) for hhv and tier 3 data\n hhv_boot = pd.read_csv('../calculation_data/hhv_uncertainty.csv',\n index_col=[0,1])\n\n mw_boot = pd.read_csv('../calculation_data/mw_uncertainty.csv',\n index_col=[0,1])\n\n cc_boot = pd.read_csv('../calculation_data/cc_uncertainty.csv',\n index_col=[0,1])\n\n def calc_sq_std(df):\n \"\"\"\n Calculate the square of the standard deviation divided by\n mean. Returns a dataframe.\n \"\"\"\n\n sq_std = df.dropna()\n\n sq_std = (df['std'].divide(df['mean']))**2\n\n sq_std.name = 'sq_std'\n\n sq_std = pd.DataFrame(sq_std)\n\n sq_std = sq_std.dropna()\n\n return sq_std\n\n gas_scf_to_kg = pd.DataFrame()\n\n for f in ['Natural Gas (Weighted U.S. Average)', 'Fuel Gas']:\n\n scf_df = pd.DataFrame(tier3['molecular_weight'].xs(\n f, level='FUEL_TYPE'\n ))\n\n scf_df['FUEL_TYPE'] = f\n\n gas_scf_to_kg = gas_scf_to_kg.append(scf_df)\n\n gas_scf_to_kg.set_index('FUEL_TYPE', append=True, inplace=True)\n\n error_prop = pd.merge(\n calc_sq_std(hhv_boot), calc_sq_std(cc_boot),\n left_index=True, right_index=True, how='inner',\n suffixes=['_hhv', '_C']\n )\n\n # Data reporting began in 2014 and coverage is spotty.\n # Repeat 2015 data for 2010 - 2014.\n error_prop.drop(2014, axis=0, level=0, inplace=True)\n\n for y in range(2010, 2015):\n\n new_year = error_prop.loc[2015].reset_index()\n\n new_year['reporting_year'] = y\n\n error_prop = error_prop.append(\n new_year.set_index(['reporting_year', 'fuel_type'])\n )\n\n error_prop.sort_index(level=0, inplace=True)\n\n # Include error of molecular weight of fuel gas and natural gas\n error_prop = pd.merge(error_prop, calc_sq_std(mw_boot),\n left_index=True, right_index=True, how='outer')\n\n error_prop.rename(columns={'sq_std': 'sq_std_mw'}, inplace=True)\n\n error_prop['final_uncert'] = error_prop.sq_std_hhv.add(\n error_prop.sq_std_C\n ).add(\n error_prop.sq_std_mw, fill_value=0\n )\n\n error_prop.dropna(subset=['sq_std_hhv'], axis=0, inplace=True)\n\n # Calculate kg-mol per SCF for natural gas and fuel gas. SCF defined in\n # SI units as 101.560 kPa, 288.706 K, 0.02316847 m3. Ideal gas\n # constant is 8.314 kPa*m3/(kg-mol*K)\n scf_per_kgmol = (8.314 * 288.706) / (101.560 * 0.028316847)\n\n conv_dict = {'percent by weight, expressed as a decimal fraction': \\\n 907.185, 'kg C per kg': scf_per_kgmol,\n 'kg C per gallon': 1}\n\n t2t3_efs = pd.DataFrame(index=error_prop.index,\n columns=['reported_mean'])\n\n for fuel in error_prop.index:\n\n if 'Gas' in fuel:\n\n t2t3_efs.loc[fuel, 'reported_mean'] = (tier2_hhv.xs(\n fuel, level='FUEL_TYPE'\n ).hhv_wa.mean() * scf_per_kgmol / \\\n tier3.xs(fuel, level='FUEL_TYPE').molecular_weight.mean() / \\\n tier3.xs(fuel, level='FUEL_TYPE').carbon_content.mean() * \\\n (12/44))**-1\n\n\n if 'Oil' in fuel:\n\n t2t3_efs.loc[fuel, 'reported_mean'] = tier3.xs(\n fuel, level='FUEL_TYPE'\n ).carbon_content.mean() / \\\n tier2_hhv.xs(fuel, level='FUEL_TYPE').hhv_wa.mean() * \\\n (44/12)\n\n else:\n\n t2t3_efs.loc[fuel, 'reported_mean'] = tier3.xs(\n fuel, level='FUEL_TYPE'\n ).carbon_content.mean() / 100 / \\\n tier2_hhv.xs(fuel, level='FUEL_TYPE').hhv_wa.mean() * \\\n (44/12)\n\n t2t3_efs = pd.merge(t2t3_efs, error_prop[['final_uncert']],\n left_index=True, right_index=True,\n how='inner')\n\n t2t3_efs.rename(columns={'mean': 'kgCO2_per_mmBtu'}, inplace=True)\n\n # Create column for the uncertainty amount in kg CO2/mmBtu (+/-)\n t2t3_efs['ef_plus_minus'] = t2t3_efs.kgCO2_per_mmBtu.multiply(\n t2t3_efs.final_uncert\n )\n\n return error_prop, mw_boot", "def Run_Profile(init,traits,Env,sig = 0.0001,Ntot0 = 10,tmax = 100,T=TS,dt = 0.01,mu=0.005):\n\n\t## Environmental conditions\n\tHinf = Env[0]\n\tCinf = Env[1]\n\tNinf = Env[2]\n\tGinf = Env[3]\n\tQH = Env[4]\n\tQC = Env[5]\n\tQN = Env[6]\n\tQG = Env[7]\n \n\t## Traits \n\tthresh = traits[7]\n\tslope = traits[8]\n\tgmax = traits[9]\n\tVc = traits[1]\n\tQc = traits[2]\n\n\t## Calculation of constants over timescale of interest (here, the temperature is constant)\n\tDeltaG0catT = DeltaG0(T,deltaG0Cat,deltaH0Cat)\n\tDeltaG0anaT = DeltaG0(T,deltaG0Ana,deltaH0Ana)\n \n\t## Initialization\n\tHT = []\n\tCT = []\n\tNT = []\n\tGT = []\n\tXoT = []\n\tNCT = []\n\tXT = []\n\tD = []\n\ttime = []\n\tNPPT = []\n\tt=1\n\n\tHT.append(init[0])\n\tCT.append(init[1])\n\tNT.append(init[2])\n\tGT.append(init[3])\n\tXoT.append(init[4])\n\tNCT.append(init[5])\n\tXT.append(init[6])\n\tD.append(0)\n\ttime.append(0)\n\tt=1\n \n\twhile time[t-1] < tmax: \n\t\tH = HT[t-1]\n\t\tC = CT[t-1]\n\t\tN = NT[t-1]\n\t\tG = GT[t-1]\n\t\tXo = XoT[t-1]\n\t\tNC = NCT[t-1]\n\t\tX0 = XT[t-1]\n\n\t\tnNCT,nXT,qana,qcat,decay,mort,dt = Step_Profile(NC,X0,traits,[H,C,N,G],gamma,T,dt)\n\t\tNCT.append(nNCT)\n\t\tXT.append(nXT)\n\t\tD.append(decay+mort)\n\t\tnS = Step_Substrates([H,C,N,G],Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],qana,qcat,dt,Vc)\n\t\tHT.append(nS[0])\n\t\tCT.append(nS[1])\n\t\tNT.append(nS[2])\n\t\tGT.append(nS[3])\n\t\tNPPT.append(qana*NC)\n\n\t\tnXo = Step_DeadBiomass(Xo,Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],decay,mort,Qc,XT[t-1],dt,Vc)\n\t\tXoT.append(nXo)\n\t\ttime.append(time[t-1] + dt)\n\t\tt=t+1 \n#\t\tprint(time[t-1])\n\treturn(NCT,XT,HT,CT,NT,GT,XoT,D,time,NPPT)", "def tracers(traceField = 'bb', hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2,\n interpolation = 'weighted', trace_sub = 1, intQ = [''], varfile = 'VAR0',\n ti = -1, tf = -1,\n integration = 'simple', datadir = 'data/', destination = 'tracers.dat', nproc = 1):\n\n # returns the tracers for the specified starting locations\n def subTracers(q, vv, p, tracers0, iproc, hMin = 2e-3, hMax = 2e4, lMax = 500, tol = 1e-2, \n interpolation = 'weighted', integration = 'simple', intQ = ['']):\n \n tracers = tracers0\n mapping = np.zeros((tracers.shape[0], tracers.shape[1], 3))\n \n for ix in range(tracers.shape[0]):\n for iy in range(tracers.shape[1]):\n xx = tracers[ix, iy, 2:5].copy()\n s = pc.stream(vv, p, interpolation = interpolation, integration = integration, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol, xx = xx)\n tracers[ix, iy, 2:5] = s.tracers[s.sl-1]\n tracers[ix, iy, 5] = s.l\n if (any(intQ == 'curlyA')):\n for l in range(s.sl-1):\n aaInt = pc.vecInt((s.tracers[l+1] + s.tracers[l])/2, aa, p, interpolation)\n tracers[ix, iy, 6] += np.dot(aaInt, (s.tracers[l+1] - s.tracers[l]))\n \n # create the color mapping\n if (tracers[ix, iy, 4] > grid.z[-2]):\n if (tracers[ix, iy, 0] - tracers[ix, iy, 2]) > 0:\n if (tracers[ix, iy, 1] - tracers[ix, iy, 3]) > 0:\n mapping[ix, iy, :] = [0,1,0]\n else:\n mapping[ix, iy, :] = [1,1,0]\n else:\n if (tracers[ix, iy, 1] - tracers[ix, iy, 3]) > 0:\n mapping[ix, iy, :] = [0,0,1]\n else:\n mapping[ix, iy, :] = [1,0,0]\n else:\n mapping[ix, iy, :] = [1,1,1]\n \n q.put((tracers, mapping, iproc))\n \n \n # multi core setup\n if (np.isscalar(nproc) == False) or (nproc%1 != 0):\n print(\"error: invalid processor number\")\n return -1\n queue = mp.Queue()\n \n # read the data\n # make sure to read the var files with the correct magic\n if (traceField == 'bb'):\n magic = 'bb'\n if (traceField == 'jj'):\n magic = 'jj'\n if (traceField == 'vort'):\n magic = 'vort'\n \n # convert intQ string into list\n if (isinstance(intQ, list) == False):\n intQ = [intQ]\n intQ = np.array(intQ)\n \n grid = pc.read_grid(datadir = datadir, trim = True, quiet = True) \n dim = pc.read_dim(datadir = datadir) \n tol2 = tol**2\n \n # check if user wants a tracer time series\n if ((ti%1 == 0) and (tf%1 == 0) and (ti >= 0) and (tf >= ti)):\n series = True\n n_times = tf-ti+1\n else:\n series = False\n n_times = 1\n \n tracers = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), n_times, 6+len(intQ)])\n mapping = np.zeros([int(trace_sub*dim.nx), int(trace_sub*dim.ny), n_times, 3])\n t = np.zeros(n_times)\n \n for tIdx in range(n_times):\n if series:\n varfile = 'VAR' + str(tIdx)\n \n # read the data\n var = pc.read_var(varfile = varfile, datadir = datadir, magic = magic, quiet = True, trimall = True) \n grid = pc.read_grid(datadir = datadir, quiet = True, trim = True)\n t[tIdx] = var.t\n \n # extract the requested vector traceField\n vv = getattr(var, traceField)\n if (any(intQ == 'curlyA')):\n aa = var.aa\n \n # initialize the parameters\n p = pc.pClass()\n p.dx = var.dx; p.dy = var.dy; p.dz = var.dz\n p.Ox = var.x[0]; p.Oy = var.y[0]; p.Oz = var.z[0]\n p.Lx = grid.Lx; p.Ly = grid.Ly; p.Lz = grid.Lz\n p.nx = dim.nx; p.ny = dim.ny; p.nz = dim.nz\n \n # initialize the tracers\n for ix in range(int(trace_sub*dim.nx)):\n for iy in range(int(trace_sub*dim.ny)):\n tracers[ix, iy, tIdx, 0] = grid.x[0] + int(grid.dx/trace_sub)*ix\n tracers[ix, iy, tIdx, 2] = tracers[ix, iy, tIdx, 0]\n tracers[ix, iy, tIdx, 1] = grid.y[0] + int(grid.dy/trace_sub)*iy\n tracers[ix, iy, tIdx, 3] = tracers[ix, iy, tIdx, 1]\n tracers[ix, iy, tIdx, 4] = grid.z[0]\n \n # declare vectors\n xMid = np.zeros(3)\n xSingle = np.zeros(3)\n xHalf = np.zeros(3)\n xDouble = np.zeros(3)\n \n tmp = []\n subTracersLambda = lambda queue, vv, p, tracers, iproc: \\\n subTracers(queue, vv, p, tracers, iproc, hMin = hMin, hMax = hMax, lMax = lMax, tol = tol,\n interpolation = interpolation, integration = integration, intQ = intQ)\n proc = []\n for iproc in range(nproc):\n proc.append(mp.Process(target = subTracersLambda, args = (queue, vv, p, tracers[iproc::nproc,:,tIdx,:], iproc)))\n for iproc in range(nproc):\n proc[iproc].start()\n for iproc in range(nproc):\n tmp.append(queue.get())\n for iproc in range(nproc):\n proc[iproc].join()\n for iproc in range(nproc):\n tracers[tmp[iproc][2]::nproc,:,tIdx,:], mapping[tmp[iproc][2]::nproc,:,tIdx,:] = (tmp[iproc][0], tmp[iproc][1])\n for iproc in range(nproc):\n proc[iproc].terminate()\n \n tracers = np.copy(tracers.swapaxes(0, 3), order = 'C')\n if (destination != ''):\n f = open(datadir + destination, 'wb')\n f.write(np.array(trace_sub, dtype = 'float32'))\n # write tracers into file\n for tIdx in range(n_times):\n f.write(t[tIdx].astype('float32'))\n f.write(tracers[:,:,tIdx,:].astype('float32'))\n f.close()\n \n tracers = tracers.swapaxes(0, 3)\n tracers = tracers.swapaxes(0, 1)\n mapping = mapping.swapaxes(0, 1)\n\n return tracers, mapping, t", "def doCalculation(self, E1, E2, muL, muR, T, pot, C, TCalc, Density, E0, L):\n NEcut = len(E1) #we determine the number of single-particle states that we use\n VG=np.diag(pot)\n E= int(0.5*np.size(VG))\n V = VG[0:E] #since the potential of both barriers is symmetric and we only tunnel through one barrier. Therefore we only use one half of the potential.\n dx= L/(np.size(pot))\n\n #Following prints are for debugging purposes:\n #print(\"---------------------------------------------------------------------\")\n #print(\"---------------------------------------------------------------------\")\n #print(\"Hier beginnt die Ausgabe von Rates:\")\n #print(\"---------------------------------------------------------------------\")\n #print(\"V:\", V)\n #print(\"E1:\", E1)\n #print(\"E2:\", E2)\n #print(\"C:\", C)\n\n kB=0.08629 #Boltzmann constant in meV/K\n \n \n def fermi(E,mu,T):\n \"\"\"This fermi-function tells us with which likelyhood a state with an E is occupied on the lead.\n E(float): energy difference between the initial and the final state that the tunneling electron has to carry.\n mu(float): chemical potential of either drain(muR) or source(muL).\n T(float): temperature.\n \"\"\"\n if (E-mu)/T > 600:\n f=0\n\t\t\t\t\n else:\n f=1/(math.exp((E-mu)/(kB*T) )+1)\n return(f)\n \n\n\t#This function is called by the Gamma_ij-equations and includes the transmission-coefficient for each tunnelling-event\n #and the density of state function of the source and drain. \n def Gamma(Ea,Eb,V):\n \"\"\":math:`\\\\Gamma` includes the transmission coefficient and DOS: :math:`\\Gamma = | t |^2 * DOS`\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n V(np.array): barrier potential\n \"\"\"\n #print(Ea)\n #print(V)\n return (np.absolute(TCalc.calculate_transmission(Ea,V,dx))**2*Density.calculate_DensityofStates(np.absolute(Ea-Eb)))\n \n #These next four functions are used to calculate the transition rates.Each function for a different kind of transition:\n #We distinguish between transitions, in which the number of electrons on the dot changes from one to two(Gamma_12) and reverse(Gamma_21).\n #And between transitions in which the number of electrons on the dot change from zero to one(Gamma_01) and reverse(Gamma_10).\n\n def Gamma_12(Ea,Eb,mu,T):\n \"\"\"Calculates the rate of a transition from a one body state to a two body state.\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n summe=0\n j=0\n Cb=C[np.where(E2==Eb)[0][0]]\n while j< NEcut:\n summe=Cb[np.where(E1==Ea)[0][0]][j]+summe\n j=j+1\n return(Gamma(Ea,Eb,V)*(np.absolute(summe))**2*fermi((Eb-Ea),mu,T))\n\n\n def Gamma_01(Eb,mu,T):\n \"\"\"Calculates the transition rate from the vacuum state to a one-body state.\n\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n return(Gamma(E0,Eb,V)*fermi((Eb-E0),mu,T))\n\n def Gamma_21(Ea,Eb,mu,T):\n \"\"\"Calculates the rate of a transition from a two body state to a one body state.\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n summe=0\n nu=0\n Ca=C[np.where(E2==Ea)[0][0]]\n while nu < NEcut:\n summe=summe+Ca[np.where(E1==Eb)[0][0]][nu]\n nu=nu+1\n return(Gamma(Ea,Eb,V)*(np.absolute(summe))**2*(1-fermi((Ea-Eb),mu,T)))\n\n def Gamma_10(Ea,mu,T):\n \"\"\"Calculates the rate of a transition from a one body state to the vacuum state.\n\n Ea(float): energy of initial state \n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n return(Gamma(Ea,E0,V)*(1-fermi((Ea-E0),mu,T)))\n\n #creating the output matrices that later contain all the transition rates through either\n #the left or the right barrier\n Gamma_R=np.zeros((1+np.size(E1)+np.size(E2),1+np.size(E1)+np.size(E2)))\n Gamma_L=np.zeros((1+np.size(E1)+np.size(E2),1+np.size(E1)+np.size(E2)))\n\n #using a loop to fill the output matrices with transition rates.\n i_=0\n for i in E1:\n j_=0\n for j in E2:\n Gamma_L[i_+1][j_+1+np.size(E1)]=Gamma_12(i,j,muL,T)\n Gamma_L[j_+1+np.size(E1)][i_+1]=Gamma_21(j,i,muL,T)\n Gamma_R[i_+1][j_+1+np.size(E1)]=Gamma_12(i,j,muR,T)\n Gamma_R[j_+1+np.size(E1)][i_+1]=Gamma_21(j,i,muR,T)\n j_=j_+1\n Gamma_L[0][i_+1]=Gamma_10(i,muL,T)\n Gamma_R[0][i_+1]=Gamma_10(i,muR,T)\n Gamma_L[i_+1][0]=Gamma_01(i,muL,T)\n Gamma_R[i_+1][0]=Gamma_01(i,muR,T)\n i_=1+i_\n\n #print(\"Gamma_L und Gamma_R:\")\n #print(Gamma_L,Gamma_R)\n #print(\"-----------------------------------------------------------------------\")\n #print(\"---------------------------------------------------------------------\")\n return(Gamma_L,Gamma_R)", "def main():\n lake_drivers = Dynamic_Lake_Drivers()\n #lake_drivers.prepare_orography_ICE5G_0k_uncorrected()\n #lake_drivers.prepare_orography_ICE5G_0k_corrected()\n #lake_drivers.prepare_orography_ICE6G_21k_corrected()\n #lake_drivers.prepare_river_directions_with_depressions_from_glac1D()\n #lake_drivers.evaluate_glac1D_ts1900_basins()\n #import time\n # start = time.time()\n #lake_drivers.evaluate_ICE6G_lgm_basins()\n # end = time.time()\n # print(end - start)\n #lake_drivers.prepare_basins_from_glac1D()\n #lake_drivers.extract_lake_volumes_from_glac1D_basins()\n #lake_drivers.connect_catchments_for_glac1D()\n lake_drivers.connect_catchments_for_transient_run()\n #lake_drivers.extract_volumes_for_transient_run()\n #lake_drivers.add_10min_rmouth_to_transient_data()\n #lake_drivers.expand_transient_data_catchments_to_include_rmouth()\n #lake_drivers.remove_no_data_values_from_upscaled_MERIT_correction_set()\n #lake_drivers.remove_disconnected_points_from_slm()", "def main():\n print_banner()\n params = read_steering()\n s, x, y, cur, theta = build_kinoshita()\n s, x, y, cur, theta = read_centerline(s, x, y, cur, theta)\n s, x, y, cur, theta = extend_centerline(s, x, y, cur, theta)\n for t in range(TSTEPS+1):\n cur, theta = tan2curv(s, x, y)\n cur_ori = np.copy(cur)\n cur = filter_curvature(cur, t)\n cur_flt = np.copy(cur)\n cur = lag(s, cur, t)\n cur_lag = np.copy(cur)\n beck_bed = build_beck(cur, s, t)\n allxyz = offset_all(x, y, beck_bed, t)\n if t == 0:\n write_xyz_file(allxyz)\n write_mesh_file(allxyz, beck_bed)\n oxbowxList, oxbowyList = [], []\n centerlinexList, centerlineyList = [], []\n if np.mod(t, GPRINT) == 0:\n centerlinexList.append(x)\n centerlineyList.append(y)\n mf.make_figure(x, y, allxyz, cur_ori, cur_flt, cur_lag, s, beck_bed,\n params, t, oxbowxList, oxbowyList, centerlinexList, centerlineyList)\n if t == TSTEPS:\n break\n s, x, y = migration(s, x, y, cur_flt, cur_lag, theta, t)\n s, x, y, oxbowx, oxbowy, found_cutoff = cutoff(s, x, y)\n s, x, y = smooth_centerline(x, y)\n s, x, y, cur, theta = resample_centerline(s, x, y)\n if found_cutoff:\n oxbowxList.append(oxbowx)\n oxbowyList.append(oxbowy)\n make_gif()\n job_done()", "def calculateStatisticalSignificance():\n ##tau HCS pearson\n ml_model_perf = pickle.load(open(\"pickles/ml_model_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_perf.pkl\", \"rb\"))\n y = np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n z, p = calculateZScoreAndPValue(m1=y[0], s1=stds[0], n1=17280, m2=y[1], s2=stds[1], n2=17280)\n print(\"stats for HCS pearson, ML vs Null YFP: z: {}, p: {}\".format(z, p))\n z, p = calculateZScoreAndPValue(m1=y[0], s1=stds[0], n1=17280, m2=y[2], s2=stds[2], n2=17280)\n print(\"stats for HCS pearson, ML vs Null DAPI: z: {}, p: {}\".format(z, p))\n \n ##tau HCS MSE\n ml_model_perf = pickle.load(open(\"pickles/ml_model_mse_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_mse_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_mse_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n z, p = calculateZScoreAndPValue(m1=y[1], s1=stds[1], n1=17280, m2=y[0], s2=stds[0], n2=17280)\n print(\"stats for HCS MSE, ML vs Null YFP: z: {}, p: {}\".format(z, p))\n z, p = calculateZScoreAndPValue(m1=y[2], s1=stds[2], n1=17280, m2=y[0], s2=stds[0], n2=17280)\n print(\"stats for HCS MSE, ML vs Null DAPI: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma ablated pearon\n ##this one is a bit more involved because we have individual means and STDs over a 3-fold cross-val\n ##we have the following for the ablated ML model (sample size, avg pearson, std), one for each fold:\n # (108330 0.7498484453029202 0.12794946936625312)\n # (108330 0.7507672277328549 0.12978897185198424) \n # (108330 0.7512250395547646 0.12858723725044444)\n ##combining to one sample we have mean = .7506, std=.1288\n ##and the following for the Null Model\n #(108330 0.3951239419846807 0.13861514301358197)\n #(108330 0.39522112186984787 0.1387019314192389)\n #(108330 0.3956142180066648 0.13832544923711507)\n ##combining this into one sample, we have: mean = 0.3953, std = .1385\n z, p = calculateZScoreAndPValue(m1=.7506, s1=.1288, n1=108330*3, m2=.3953, s2=.1385, n2=108330*3)\n print(\"stats for osteosarcoma ablated pearson, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma ablated MSE\n ##ML model performance:\n # (108330 0.5003031 0.25589895)\n # (108330 0.4984656 0.25957793)\n # (108330 0.49754992 0.2571745)\n ##combining to one sample we have mean = 0.4988 , std= .2576\n ##Null Model performance:\n # (108330 1.209752 0.2772303)\n # (108330 1.2095579 0.27740386)\n # (108330 1.2087716 0.27665088)\n ##combining to one sample we have mean = 1.2094 , std= 0.2771\n z, p = calculateZScoreAndPValue(m1=1.2094, s1=.2771, n1=108330*3, m2=.4988, s2=.2576, n2=108330*3)\n print(\"stats for osteosarcoma ablated MSE, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma raw pearson \n ##ML model performance:\n #(108330 0.8487535502148598, 0.0750789260880985)\n #(108330 0.8482422038817274, 0.0749674444367002)\n # (108330 0.8500693686258434, 0.07491226209365953)\n ##combining to one sample we have mean = .849 , std= 0.075\n ##Null model performance:\n #(108330 0.44372635525546694, 0.11585072713296693)\n #(108330 0.4440357996615424, 0.11573081667714848)\n # (108330 0.4443288449364213, 0.11528081384708891)\n ##combining to one sample we have mean = 0.444 , std= 0.1156\n z, p = calculateZScoreAndPValue(m1=.849, s1=0.075, n1=108330*3, m2=0.444, s2=0.1156, n2=108330*3)\n print(\"stats for osteosarcoma raw pearson, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##osteosarcoma raw MSE\n ##ML model performance:\n #(108330 0.3024929, 0.15015785)\n #(108330 0.3035156, 0.1499349)\n # (108330 0.29986125, 0.14982451)\n ##combining to one sample we have mean = 0.302 , std= 0.15\n ##Null model performance\n # (108330 1.1125473, 0.23170146)\n # (108330 1.1119285, 0.23146166)\n # (108330 1.1113423, 0.23056163)\n ##combining to one sample we have mean = 1.1119 , std= 0.2312\n z, p = calculateZScoreAndPValue(m1=1.1119, s1=0.2312, n1=108330*3, m2=0.302, s2=0.15, n2=108330*3)\n print(\"stats for osteosarcoma raw MSE, ML vs Null Model: z: {}, p: {}\".format(z, p))\n\n ##comparing ablated to nonablated pearson\n z, p = calculateZScoreAndPValue(m1=0.849, s1=0.075, n1=108330*3, m2=0.7506, s2=0.1288, n2=108330*3)\n print(\"stats for comparing ablated to non-ablated pearson: z: {}, p: {}\".format(z, p))\n\n ##comparing ablated to nonablated MSE\n z, p = calculateZScoreAndPValue(m1=.4988, s1=.2576, n1=108330*3, m2=0.302, s2=0.15, n2=108330*3)\n print(\"stats for comparing ablated to non-ablated MSE: z: {}, p: {}\".format(z, p))", "def fillDetInfo():\n print('here i am')\n # 1. maps of analysis channel to cpd, and pulser monitor channels\n detCH, pMons = {}, {}\n for ds in [0,1,2,3,4,5,6]:\n f = np.load(\"%s/data/ds%d_detChans.npz\" % (os.environ['LATDIR'], ds))\n detCH[ds] = f['arr_0'].item()\n pMons[ds] = f['arr_1'].item()\n\n # 2. maps of HV and TRAP threshold settings are stored in the DB.\n # make them global, and move them to the runSettings file.\n # FORMAT: {ds : {'det' : [(run1,val1),(run2,val2)...]} }\n detHV, detTH = {}, {}\n\n # load all possible values, as in settingsMgr\n detDB = db.TinyDB(\"%s/calDB-v2.json\" % dsi.latSWDir)\n detPars = db.Query()\n cal = dsi.CalInfo()\n for ds in [0,1,2,3,4,5,6]:\n # for ds in [0]:\n print(\"scanning ds\",ds)\n detTH[ds] = {}\n detHV[ds] = {}\n for key in cal.GetKeys(ds):\n mod = -1\n if \"m1\" in key: mod = 1\n if \"m2\" in key: mod = 2\n for cIdx in range(cal.GetIdxs(key)):\n\n # load the DB records\n dbKeyTH = \"trapThr_%s_c%d\" % (key, cIdx)\n dbValTH = dsi.getDBRecord(dbKeyTH,calDB=detDB,pars=detPars)\n\n dbKeyHV = \"hvBias_%s_c%d\" % (key, cIdx)\n dbValHV = dsi.getDBRecord(dbKeyHV,calDB=detDB,pars=detPars)\n\n # debug: print the record\n # for val in sorted(dbValTH):\n # if len(dbValTH[val])>0:\n # print(val, dbValTH[val])\n # return\n\n # fill the first value\n if len(detTH[ds])==0:\n detTH[ds] = dbValTH\n detHV[ds] = dbValHV\n continue\n\n # check for new threshold values.\n for cpd in detTH[ds]:\n nOld, nNew = len(detTH[ds][cpd]), len(dbValTH[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detTH[ds][cpd] = dbValTH[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevTH = detTH[ds][cpd][-1][0], detTH[ds][cpd][-1][1]\n for val in dbValTH[cpd]:\n thisRun, thisTH = val[0], val[1]\n if thisTH != prevTH:\n detTH[ds][cpd].append([thisRun,thisTH])\n prevTH = thisTH\n\n # check for new HV values.\n for cpd in detHV[ds]:\n\n nOld, nNew = len(detHV[ds][cpd]), len(dbValHV[cpd])\n\n # detector just came online\n if nOld==0 and nNew>0:\n detHV[ds][cpd] = dbValHV[cpd]\n continue\n # detector still offline\n if nOld==0 and nNew==0:\n continue\n # detector just went offline\n if nOld>0 and nNew==0:\n continue\n\n # check last run/trap pair against each new one\n prevRun, prevHV = detHV[ds][cpd][-1][0], detHV[ds][cpd][-1][1]\n for val in dbValHV[cpd]:\n thisRun, thisHV = val[0], val[1]\n if thisHV != prevHV:\n print(\"found HV diff. cpd %d prev %dV (run %d) new %dV (run %d)\" % (cpd, prevHV, prevRun, thisHV, thisRun))\n detHV[ds][cpd].append([thisRun,thisHV])\n prevHV = thisHV\n\n # return\n\n # # load the old file and compare\n # # GOAL: improve on this file.\n # # f = np.load(\"%s/data/runSettings.npz\" % dsi.latSWDir)\n # # detHVOld = f['arr_0'].item()\n # # detTHOld = f['arr_1'].item()\n # # detCHOld = f['arr_2'].item()\n # # pMonsOld = f['arr_3'].item()\n #\n # ds = 3\n # print(\"old results, ds\",ds)\n # for cpd in sorted(detTHOld[ds]):\n # if cpd!=\"122\":continue\n # if len(detTHOld[ds][cpd]) > 0:\n # print(cpd, detTHOld[ds][cpd])\n #\n # # for ds in [0,1,2,3,4,5,6]:\n # print(\"thresh results, ds:\",ds)\n # for cpd in sorted(detTH[ds]):\n # # if cpd!=122:continue\n # if len(detTH[ds][cpd]) > 0:\n # print(cpd, detTH[ds][cpd])\n\n\n np.savez(\"%s/data/runSettings-v2.npz\" % dsi.latSWDir,detHV,detTH,detCH,pMons)", "def bench():\n \n clock = Signal(bool(False))\n reset = Signal(bool(False))\n strobe = Signal(bool(False))\n serialOut = Signal(bool(False))\n load = Signal(bool(False))\n ldac = Signal(bool(False))\n clkDacOut = Signal(bool(False))\n busy = Signal(bool(False))\n vrefTopA = Signal(intbv(0)[8:])\n vrefTopB = Signal(intbv(0)[8:])\n vrefBotA = Signal(intbv(0)[8:])\n vrefBotB = Signal(intbv(0)[8:])\n \n\n dut_dac_controller = dac_controller(clock, reset, vrefTopA, vrefTopB, vrefBotA, vrefBotB, strobe, \n serialOut, load, ldac, clkDacOut, busy)\n \n @always(delay(PERIOD//2))\n def clkgen():\n clock.next = not clock\n\n @instance\n def stimulus():\n reset.next = True\n strobe.next = False\n vrefTopA.next = 0\n vrefTopB.next = 0\n vrefBotA.next = 0\n vrefBotB.next = 0\n yield delay(100)\n reset.next = False\n yield busy.negedge\n vrefTopA.next = 100\n vrefTopB.next = 120\n vrefBotA.next = 50\n vrefBotB.next = 20\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n vrefTopA.next = 100\n vrefTopB.next = 120\n vrefBotA.next = 130\n vrefBotB.next = 20\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n vrefTopA.next = 100\n vrefTopB.next = 120\n vrefBotA.next = 50\n vrefBotB.next = 220\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n vrefTopA.next = 100\n vrefTopB.next = 120\n vrefBotA.next = 150\n vrefBotB.next = 220\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n vrefTopA.next = 220\n vrefTopB.next = 148\n vrefBotA.next = 60\n vrefBotB.next = 37\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n yield delay(1000) \n raise StopSimulation\n\n return dut_dac_controller, clkgen, stimulus", "def test4():\n\t\n\tprint('This takes a while to compute - be patient!')\n\t\n\td = np.linspace(-15000,15000,300)\n\t#Voigt\n\t#p_dict = {'Bfield':700,'rb85frac':1,'Btheta':90*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tp_dict = {'Bfield':1000,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':00*np.pi/180,'lcell':75e-3,'T':93,'Dline':'D2','Elem':'Cs'}\n\tpol = np.array([1.0,0.0,0.0])\n\tTVx = get_spectra(d,pol,p_dict,outputs=['I_M45','I_P45','Ix','Iy','S0','Iz'])\n\t\n\tfig2 = plt.figure()\n\tax1a = fig2.add_subplot(411)\n\tax2a = fig2.add_subplot(412,sharex=ax1a)\n\tax3a = fig2.add_subplot(413,sharex=ax1a)\n\tax4a = fig2.add_subplot(414,sharex=ax1a)\n\t\n\tax1a.plot(d,TVx[0],'r',lw=2,label=r'$I_{-45}$')\n\tax2a.plot(d,TVx[1],'b',lw=2,label=r'$I_{+45}$')\n\tax3a.plot(d,TVx[2],'r',lw=2,label=r'$I_x$')\n\tax4a.plot(d,TVx[3],'b',lw=2,label=r'$I_y$')\n\tax4a.plot(d,TVx[0]+TVx[1],'r:',lw=3.5,label=r'$I_{+45}+I_{-45}$')\n\tax4a.plot(d,TVx[2]+TVx[3],'k:',lw=2.5,label=r'$I_x + I_y$')\n\tax4a.plot(d,TVx[4],'g--',lw=1.5,label='$S_0$')\n#\tax4a.plot(d,TVx[5],'c--',lw=2.5,label='$I_z$')\n\t\n\t\n\tax4a.set_xlabel('Detuning (MHz)')\n\tax1a.set_ylabel('I -45')\n\tax2a.set_ylabel('I +45')\n\tax3a.set_ylabel('Ix')\n\tax4a.set_ylabel('Iy')\n\t\n\tax4a.set_xlim(d[0],d[-1]+3000)\n\tax4a.legend(loc=0)\n\t\n\tplt.show()", "def calculate_all_metrcis(self):\n self.calculate_gc_metrcis()\n self.calculate_sam_metrics()\n self.calculate_classification_metrics()\n self.calculate_losses()", "def perform_event_analysis():\n\n tol = 2.0 # Arcmin\n calib_on_colours = False\n\n params = get_args()\n\n log = start_log(params)\n\n (star_catalog,image_trios,catalog_header) = read_combined_star_catalog(params,log)\n\n lightcurves = read_lightcurves(params,log)\n\n target = find_target_data(params,star_catalog,lightcurves,image_trios,log)\n\n (source, blend) = calc_source_blend_params(params,log)\n\n source = calc_source_lightcurve(source, target, log)\n\n measure_photometric_source_colours(params,target,log)\n\n (det_idx, cat_idx, close_cat_idx) = index_valid_star_entries(star_catalog,\n target,tol,log,\n valid_cat=True)\n\n deltas = calibrate_instrumental_colour_colour_diagram(params,star_catalog,\n catalog_header,target,\n det_idx,cat_idx,close_cat_idx,\n log,\n calib=calib_on_colours)\n\n RC = localize_red_clump(star_catalog,close_cat_idx,log)\n\n analyse_colour_mag_diagrams(params,star_catalog,catalog_header,\n target, source,blend,RC,\n det_idx,cat_idx,close_cat_idx,log)\n\n RC = measure_RC_offset(params,RC,target,log)\n\n (target,source,blend) = calc_phot_properties(target, source, blend, RC, log)\n\n plot_colour_colour_diagram(params,star_catalog,catalog_header,\n target, source, blend, RC,\n det_idx,cat_idx,close_cat_idx, log)\n\n (source, blend) = match_source_blend_isochrones(params,source,blend,log)\n\n (source, blend) = calc_source_blend_ang_radii(source, blend, log)\n\n (source, blend) = calc_source_blend_physical_radii(source, blend, log)\n\n (source,blend) = calc_source_blend_distance(source, blend, RC, log)\n\n lens = calc_lens_parameters(params, source, RC, log)\n\n output_red_clump_data_latex(params,RC,log)\n\n output_source_blend_data_latex(params,source,blend,log)\n\n output_lens_parameters_latex(params,source,lens,log)", "def main(argv):\n import scipy as sc\n import scipy.integrate as integrate\n import matplotlib.pylab as p\n\n def dCR_dt(pops, t=0):\n \"\"\"returns the growth rate of consumer and resource population at any given time step\"\"\"\n\n R = pops[0]\n C = pops[1]\n K = 10000000\n dRdt = r * R * (1-(R/K)) - a * R * C\n dCdt = -z * C + e * a * R * C\n\n return sc.array([dRdt, dCdt])\n\n #integrate from time point 0 to 15, using 1000 sub-divisions of time\n t = sc.linspace(0, 15, 1000)\n\n def plot(pops):\n f1 = p.figure()\n p.plot(t, pops[:,0], 'g-', label='Resource density') # Plot\n p.plot(t, pops[:,1] , 'b-', label='Consumer density')\n p.grid()\n p.legend(loc='best')\n p.xlabel('Time')\n p.ylabel('Population density')\n p.title('Consumer-Resource population dynamics')\n #p.show()# To display the figure\n f1.savefig('../Results/LV2_model.pdf')\n\n f2 = p.figure()\n p.plot(pops[:,0], pops[:,1], 'r-', label='Consumer density') # Plot\n p.grid()\n p.xlabel('Resource Density')\n p.ylabel('Consumer density')\n p.title('Consumer-Resource population dynamics')\n #p.show()# To display the figure\n f2.savefig('../Results/LV2_second_model.pdf')\n\n return f1,f2\n\n if len(argv) < 5:\n r = 1\n a = 0.1\n z = 1.5\n e = 0.75\n else:\n r,a,z,e = float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3]), float(sys.argv[4])\n #integrate from time point 0 to 15, using 1000 sub-divisions of time\n t = sc.linspace(0, 15, 1000)\n #Set the initial conditions for the two populations\n R0 = 10\n C0 = 5\n RC0 = sc.array([R0, C0])\n #numerically integrate this system forward from those starting conditions\n pops, infodict = integrate.odeint(dCR_dt, RC0, t, full_output=True)\n print(\"final Consumer and Resource population values are:\", pops[-1]) #prints final values\n plot(pops)\n return 0", "def run_performance():\n # Create a Struct data instance from config\n inputs = Struct(config)\n inputs.throttle = throttle\n # Get oxidizer properties at the given temperature\n n2o = n2o_properties(inputs.ox.T_tank)\n # Our integration variables are oxidizer mass and liquid oxidizer volume\n Mox = n2o.rho_l*(inputs.ox.liquid_V) + n2o.rho_g*(inputs.ox.tank_V-inputs.ox.liquid_V)\n if inputs.options.output_on:\n print(\"Initial oxidizer mass: {} kg.\".format(Mox))\n\n start = time.perf_counter() # Start timer for integration\n\n time, record = integration(inputs) # Time = time for integration, record = output data\n F_thrust = record.F_thrust\n p_cc = record.p_cc\n p_oxtank = record.p_oxtank\n p_oxpresstank = record.p_oxpresstank\n p_fueltank = record.p_fueltank\n p_fuelpresstank = record.p_fuelpresstank\n p_oxmanifold = record.p_oxmanifold\n T_oxtank = record.T_oxtank\n T_cc = record.T_cc\n area_core = record.area_core\n OF = record.OF_i\n gamma_ex = record.gamma_ex\n m_dot_ox = record.m_dot_ox\n m_dot_fuel = record.m_dot_fuel\n p_crit = record.p_crit\n m_dot_ox_crit = record.m_dot_ox_crit\n M_e = record.M_e\n p_exit = record.p_exit\n p_shock = record.p_shock\n\n time_elapsed = start-time.perf_counter() # Stop the timer and print elapsed time\n if inputs.options.output_on:\n print(\"Time elapsed for this timestep: {} sec.\".format(time_elapsed))", "def run_baseline_right_away(self, n_days_base, sim_time):\n baseline_power = np.zeros([sim_time, ])\n baseline_cycles = np.zeros([sim_time, ])\n baseline_Tin = np.zeros([sim_time, ])\n baseline_std_Tin = np.zeros([sim_time, ])\n baseline_Tin_max = np.zeros([sim_time, ])\n baseline_Tin_min = np.zeros([sim_time, ])\n baseline_soc = np.zeros([sim_time, ]) \n baseline_std_soc = np.zeros([sim_time, ]) \n\n EIRrated = 0.31019\n Qrated=14600\n\n # inputs and outputs path\n inputs_file='./fleets/HVAC_fleet/data_file/LasVegas_HighCDD.csv'\n bldg_file='./fleets/HVAC_fleet/data_file/normal_building_para.xlsx'\n save_dir='./fleets/HVAC_fleet/data_file/baseline'\n\n\n #read in weather data and increase resolution to match time step of simulation\n inputs=pd.read_csv(inputs_file, sep=',',skipfooter=48,engine='python')\n inputs[c.COL_DATETIME]=pd.to_datetime(inputs[c.COL_DATETIME])\n inputs=inputs.set_index(c.COL_DATETIME)\n inputs_ts=inputs.resample(str(self.sim_step)+'T').interpolate()\n \n for day in range(n_days_base):\n print(\"Day %i\" %(day+1))\n \n #initialize dataframes\n timeB = np.array(np.arange(0,60*60*24,self.sim_step))\n plot_timeB = np.array(np.arange(0,24,self.sim_step/3600))\n\n Tin=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n Tmass=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n Twall=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n Tattic=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n ACstatus=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n dTin=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n dTmass=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n dTwall=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n dTattic=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n Power=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n cycles=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n SOC=pd.DataFrame(data=0,index=timeB,columns=range(0,self.numHVAC))\n \n num_homes = self.numHVAC \n \n #Load building characteristics from file\n df_bldg=pd.DataFrame.from_csv(bldg_file,sep=',')\n \n #Initialize Temperatures and assign building and AC characteristics\n b=[0]*num_homes\n ac=[0]*num_homes\n\n for i in range(0,num_homes):\n b[i]=building(df_bldg.T_in[i],df_bldg.T_mass[i],df_bldg.T_wall[i],df_bldg.T_attic[i],df_bldg.Rwall[i],df_bldg.Rattic[i],df_bldg.Rwin[i],df_bldg.SHGC[i],df_bldg.Cwall[i],df_bldg.Cin[i],df_bldg.C1[i],df_bldg.C2[i],df_bldg.C3[i],df_bldg.Cattic[i],df_bldg.Rroof[i],df_bldg.Cmass[i],df_bldg.Rmass[i],df_bldg.Sp1[i],df_bldg.Sp2[i],df_bldg.Sp3[i],df_bldg.Qrated[i],df_bldg.EIRrated[i],df_bldg.TinWB[i],df_bldg.Initial_On[i])\n Tin.iloc[0,i]=b[i].T_in\n Tmass.iloc[0,i]=b[i].T_mass\n Twall.iloc[0,i]=b[i].T_wall\n Tattic.iloc[0,i]=b[i].T_attic\n\n ac[i]=AC(df_bldg.Qrated[i],df_bldg.EIRrated[i])\n \n #Main simulation loop \n for i in range(0,int(1440*60/self.sim_step)):\n for j in range(0,num_homes): \n \n if i>0: #initialize AC status for each timestep to prior state\n ACstatus.iloc[i,j]=ACstatus.iloc[i-1,j]\n else: #Set AC status based on file input for first timestep\n ACstatus.iloc[i,j]=b[j].Initial_On\n if Tin.iloc[i,j]>=self.Tset+self.deadband and ACstatus.iloc[max(i-self.shortcycle_ts,0):i,j].sum()==0: #if temp is above deadband and unit has not run in past duration corresponding to short cycle timer turn on unit\n ACstatus.iloc[i,j]=1.0\n if Tin.iloc[i,j]<=self.Tset-self.deadband: #if temperature is below bottom deadband, turn off unit\n ACstatus.iloc[i,j]=0.0\n #count cycles\n if i>0 and ACstatus.iloc[i-1,j]==1.0 and ACstatus.iloc[i,j]==0.0:\n cycles.iloc[i,j]=1.0\n \n #calculate power use for each AC based on status\n Power.iloc[i,j]=ACstatus.iloc[i,j]*Capacity(ac[j],inputs_ts[c.COL_TOUT][i],b[j].TinWB)*EIR(ac[j],inputs_ts[c.COL_TOUT][i],b[j].TinWB)\n \n # calculate SOC for each AC\n SOC.iloc[i,j] = (self.Tset+self.deadband - Tin.iloc[i,j])/(2*self.deadband)\n\n #building model dT calculations\n ts = self.sim_step\n dTin.iloc[i,j]=ts*1.0/b[j].Cin*((Twall.iloc[i,j]-Tin.iloc[i,j])*2.0/b[j].Rwall\n +(Tattic.iloc[i,j]-Tin.iloc[i,j])/b[j].Rattic\n +(Tmass.iloc[i,j]-Tin.iloc[i,j])/b[j].Rmass+inputs_ts[c.COL_QIHL][i]*b[j].C1*b[j].Sp1\n +inputs_ts[c.COL_RADWIN][i]*b[j].SHGC*25.76*b[j].C3*b[j].Sp3\n -ACstatus.iloc[i,j]*Capacity(ac[j],inputs_ts[c.COL_TOUT][i],b[j].TinWB)*SHR(inputs_ts[c.COL_TOUT][i],Tin.iloc[i,j],b[j].TinWB)*b[j].C2*b[j].Sp2\n +(inputs_ts[c.COL_TOUT][i]-Tin.iloc[i,j])/b[j].Rwin)\n dTmass.iloc[i,j]=ts*1.0/b[j].Cmass*((Tin.iloc[i,j]-Tmass.iloc[i,j])/b[j].Rmass\n +inputs_ts[c.COL_QIHL][i]*b[j].C1*(1-b[j].Sp1)\n +inputs_ts[c.COL_RADWIN][i]*b[j].SHGC*25.76*b[j].C3*(1-b[j].Sp3)\n -ACstatus.iloc[i,j]*Capacity(ac[j],inputs_ts[c.COL_TOUT][i],b[j].TinWB)*SHR(inputs_ts[c.COL_TOUT][i],Tin.iloc[i,j],b[j].TinWB)*b[j].C2*(1-b[j].Sp2))\n dTwall.iloc[i,j]=ts*1.0/b[j].Cwall*((inputs_ts['Tsolw'].iloc[i]-Twall.iloc[i,j])*2.0/b[j].Rwall\n +(Tin.iloc[i,j]-Twall.iloc[i,j])*2.0/b[j].Rwall)\n dTattic.iloc[i,j]=ts*1.0/b[j].Cattic*((inputs_ts['Tsolr'].iloc[i]-Tattic.iloc[i,j])/b[j].Rroof\n +(Tin.iloc[i,j]-Tattic.iloc[i,j])/b[j].Rattic)\n \n #calculate temperatures for next time step\n if i<(1440*60/ts-1):\n Tin.iloc[i+1,j]=Tin.iloc[i,j]+dTin.iloc[i,j]\n Tmass.iloc[i+1,j]=Tmass.iloc[i,j]+dTmass.iloc[i,j]\n Tattic.iloc[i+1,j]=Tattic.iloc[i,j]+dTattic.iloc[i,j]\n Twall.iloc[i+1,j]=Twall.iloc[i,j]+dTwall.iloc[i,j]\n\n # calculate peak power and plot data\n PeakPower=Power.sum(axis=1).max()*1.0 # plot out the peak power in kW\n Plot_Power=np.full(len(plot_timeB),PeakPower/1000.0)\n fig,ax = plt.subplots(2,1,figsize=(6,8),sharey='row')\n p1=ax[0].plot(plot_timeB,Power.sum(axis=1)/1000.0,color='blue',linestyle='solid',label='Baseline')\n ax[0].plot(plot_timeB,Plot_Power,color='black',linestyle='--',label='Targeted Power')\n ax[0].set_ylabel('Total Power (kW)')\n ax[1].set_ylabel('Indoor Temperature ($^\\circ$C)')\n ax[1].set_xlabel('Hour of Day')\n p2=ax[1].plot(plot_timeB,Tin.mean(axis=1),color='blue',linestyle='solid',label='Baseline Avg')\n p3=ax[1].plot(plot_timeB,Tin.max(axis=1),color='blue',linestyle='dotted',label='Baseline Min/Max')\n p4=ax[1].plot(plot_timeB,Tin.min(axis=1),color='blue',linestyle='dotted',label='_nolegend_')\n \n # Saves baseline data to csv\n #ToDo: needs cut the whole day simulation to compare only segment with providing grid services\n # But, that requires the simulation steps information.\n\n Power.to_csv(str(save_dir)+'\\\\Power_base'+'.csv')\n Tin.to_csv(str(save_dir)+'\\\\Tin_base'+'.csv')\n SOC.to_csv(str(save_dir)+'\\\\SOC_base'+'.csv')\n cycles.to_csv(str(save_dir)+'\\\\Cycles_base'+'.csv')\n \n # return values\n baseline_power = Power.sum(axis = 1)\n baseline_cycles = cycles.sum(axis = 1)\n\n baseline_soc = SOC.mean(axis = 1)\n baseline_std_soc = SOC.std(axis = 1)\n\n baseline_Tin = Tin.mean(axis = 1)\n baseline_std_Tin = Tin.std(axis = 1)\n baseline_Tin_max = Tin.max(axis=1)\n baseline_Tin_min = Tin.min(axis=1)\n \n return baseline_soc, baseline_std_soc, baseline_power, baseline_cycles, baseline_Tin, baseline_std_Tin, baseline_Tin_max, baseline_Tin_min", "def test_freyberg_full_cov():\n model_d = \"ies_freyberg\"\n test_d = os.path.join(model_d, \"master_draw_test\")\n template_d = os.path.join(model_d, \"test_template\")\n if not os.path.exists(template_d):\n raise Exception(\"template_d {0} not found\".format(template_d))\n if os.path.exists(test_d):\n shutil.rmtree(test_d)\n shutil.copytree(template_d,test_d)\n pst = pyemu.Pst(os.path.join(test_d, \"pest.pst\"))\n pst.parameter_data.loc[:,\"partrans\"] = \"log\"\n \n pst.control_data.noptmax = 0\n pst.pestpp_options = {}\n num_reals = 5000\n\n #diagonal cov\n #pst.pestpp_options[\"parcov_filename\"] = \"prior.jcb\"\n pst.pestpp_options[\"ies_num_reals\"] = num_reals\n pst.pestpp_options[\"ies_include_base\"] = \"false\"\n\n pst.write(os.path.join(test_d, \"pest.pst\"))\n #cov = pyemu.Cov.from_binary(os.path.join(test_d, \"prior.jcb\"))\n cov = pyemu.Cov.from_parameter_data(pst)\n\n pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst, cov, num_reals, use_homegrown=True)\n pe.to_csv(os.path.join(test_d, \"pyemu_pe.csv\"))\n\n # pyemu.helpers.start_slaves(template_d, exe_path, \"pest.pst\", num_slaves=10,\n # slave_root=model_d, master_dir=test_d)\n # pyemu.helpers.run(exe_path + \" pest.pst\", cwd=test_d)\n # print(\"loading df\")\n # df = pd.read_csv(os.path.join(test_d, \"pest.0.par.csv\"), index_col=0).apply(np.log10)\n # df.columns = [c.lower() for c in df.columns]\n # pe = pe.apply(np.log10)\n # pe_corr = pe.corr()\n # df_corr = df.corr()\n\n # diff_tol = 0.05\n\n # for c in df.columns:\n # if c not in pe.columns:\n # continue\n\n # m1, m2 = pe.loc[:, c].mean(), df.loc[:, c].mean()\n # s1, s2 = pe.loc[:, c].std(), df.loc[:, c].std()\n # mdiff = np.abs((m1 - m2))\n # sdiff = np.abs((s1 - s2))\n # print(c, mdiff, sdiff)\n # assert mdiff < diff_tol, \"mean fail {0}:{1},{2},{3}\".format(c, m1, m2, mdiff)\n # assert sdiff < diff_tol, \"std fail {0}:{1},{2},{3}\".format(c, s1, s2, sdiff)\n\n # # look for bias\n # diff = df - pe\n # assert diff.mean().mean() < 0.01\n\n\n #full cov\n pst.pestpp_options[\"parcov_filename\"] = \"prior.jcb\"\n pst.pestpp_options[\"ies_num_reals\"] = num_reals\n pst.pestpp_options[\"ies_include_base\"] = \"false\"\n pst.pestpp_options[\"ies_group_draws\"] = 'true'\n pst.parameter_data.loc[pst.par_names[0],\"pargp\"] = \"test\"\n pst.write(os.path.join(test_d,\"pest.pst\"))\n cov = pyemu.Cov.from_binary(os.path.join(test_d,\"prior.jcb\"))\n\n pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst,cov,num_reals,use_homegrown=True)\n pe.to_csv(os.path.join(test_d,\"pyemu_pe.csv\"))\n\n # pyemu.helpers.start_slaves(template_d, exe_path, \"pest.pst\", num_slaves=10,\n # slave_root=model_d, master_dir=test_d)\n pyemu.helpers.run(exe_path+\" pest.pst\",cwd=test_d)\n df = pd.read_csv(os.path.join(test_d, \"pest.0.par.csv\"), index_col=0).apply(np.log10)\n df.columns = [c.lower() for c in df.columns]\n pe = pe.apply(np.log10)\n pe_corr = pe.corr()\n df_corr = df.corr()\n\n for i,p1 in enumerate(pst.adj_par_names):\n for p2 in pst.adj_par_names[i+1:]:\n c1 = pe_corr.loc[p1,p2]\n c2 = df_corr.loc[p1,p2]\n #print(p1,p2,c1,c2)\n\n diff_tol = 0.05\n\n for c in df.columns:\n if c not in pe.columns:\n continue\n\n m1, m2 = pe.loc[:,c].mean(), df.loc[:,c].mean()\n s1,s2 = pe.loc[:,c].std(), df.loc[:,c].std()\n mdiff = np.abs((m1 - m2))\n sdiff = np.abs((s1 - s2))\n #print(c,mdiff,sdiff)\n assert mdiff < diff_tol,\"mean fail {0}:{1},{2},{3}\".format(c,m1,m2,mdiff)\n assert sdiff < diff_tol,\"std fail {0}:{1},{2},{3}\".format(c,s1,s2,sdiff)\n\n #look for bias\n diff = df - pe\n assert diff.mean().mean() < 0.01", "def test_GLODAPv2_Csys(self):\n # load GLODAP data\n gd = pd.read_csv(\"tests/test_data/GLODAP_data/GLODAPv2_pH_DIC_ALK_subset.csv\")\n gd.dropna(\n subset=[\n \"phtsinsitutp\",\n \"temperature\",\n \"salinity\",\n \"tco2\",\n \"talk\",\n \"pressure\",\n \"phosphate\",\n \"silicate\",\n ],\n inplace=True,\n )\n gd.pressure /= 10 # convert pressure to bar\n\n # set negative nutrient values to zero\n gd.phosphate[gd.phosphate < 0] = 0\n gd.silicate[gd.silicate < 0] = 0\n \n # exclude weird cruise 270 data\n gd = gd.loc[gd.cruise != 270]\n\n # calculate pH from TA and DIC\n cpH = Csys(\n TA=gd.talk,\n DIC=gd.tco2,\n T_in=gd.temperature,\n S_in=gd.salinity,\n P_in=gd.pressure,\n PT=gd.phosphate,\n SiT=gd.silicate,\n BT=415.7,\n )\n pH_resid = gd.phtsinsitutp - cpH.pHtot\n pH_median = np.median(pH_resid)\n pH_pc95 = np.percentile(pH_resid, [2.5, 97.5])\n\n self.assertLessEqual(abs(pH_median), 0.005, msg=\"pH Offset <= 0.005\")\n self.assertTrue(all(abs(pH_pc95) <= 0.05), msg=\"pH 95% Conf <= 0.05\")\n\n # calculate TA from pH and DIC\n cTA = Csys(\n pHtot=gd.phtsinsitutp,\n DIC=gd.tco2,\n T_in=gd.temperature,\n S_in=gd.salinity,\n P_in=gd.pressure,\n PT=gd.phosphate,\n SiT=gd.silicate,\n BT=415.7,\n )\n TA_resid = gd.talk - cTA.TA\n TA_median = np.median(TA_resid)\n TA_pc95 = np.percentile(TA_resid, [2.5, 97.5])\n\n self.assertLessEqual(abs(TA_median), 0.5, msg=\"TA Offset <= 0.5\")\n self.assertTrue(all(abs(TA_pc95) < 13), msg=\"TA 95% Conf <= 13\")\n\n # calculate DIC from TA and pH\n cDIC = Csys(\n pHtot=gd.phtsinsitutp,\n TA=gd.talk,\n T_in=gd.temperature,\n S_in=gd.salinity,\n P_in=gd.pressure,\n PT=gd.phosphate,\n SiT=gd.silicate,\n BT=415.7,\n )\n DIC_resid = gd.tco2 - cDIC.DIC\n DIC_median = np.median(DIC_resid)\n DIC_pc95 = np.percentile(DIC_resid, [2.5, 97.5])\n\n self.assertLessEqual(abs(DIC_median), 0.5, msg=\"DIC Offset <= 0.5\")\n self.assertTrue(all(abs(DIC_pc95) < 13), msg=\"DIC 95% Conf <= 13\")\n\n return", "def main(detectorList,\n centerTimesFile,\n timeOffsetsFile,\n cat1_segment_file,\n cat24_segment_file,\n output_file,\n verbosity, windowBeginOffset, windowEndOffset, transientTime, windowLength):\n\n # ---- Number of jobs equals number of centerTimes pass_inited in.\n nJobs = len(centerTimes)\n\n # ---- Initially assume all ifos FAIL our criteria for all of\n # the jobs.\n # ---- Note that unlike other pass_init flags this has one element\n # per job (rather than one per trigger etc.).\n pass_init = np.zeros(nJobs,len(detectorList))\n\n ###############################################################################\n # Apply network test.\n ###############################################################################\n\n # ---- Loop over jobs, each job represents a choice\n # of segment centerTimes and timeOffsets.\n for iJob in range(0,nJobs):\n\n # ---- Loop over ifos we are considering.\n for iIfo in range(0,len(detectorList)):\n\n #########################################################################\n # Unslide centerTimes.\n #########################################################################\n\n # ---- Before applying vetoSeg cuts we must unslide the triggers.\n unslidCenterTimes[iJob,iIfo] = centerTimes[iJob] + timeOffsets[iJob,iIfo]\n\n #########################################################################\n # Check that we have cat1 flags raised in [-128,+128]s interval about \n # unslid centerTimes.\n #########################################################################\n\n # ---- Define our [-128,+128]s interval about the (unslid) centerTimes.\n goodBefore = -windowBeginOffset+transientTime\n duration = windowLength\n\n # ---- Find intersections between cat1 segments and the [-128,+128]s \n # interval about unslid centerTimes.\n # ---- Passing the shorter list to Coincidence2 first\n # speeds it up. \n\n coincOut=Coincidence2(unslidCenterTimes(iJob,iIfo)-goodBefore,duration,\n cat1_seglist(iIfo).gpsStart,\n cat1_seglist(iIfo).duration)\n\n # ---- We require an info to have cat1 flags (i.e., be in science mode)\n # for the full duration of the [-128,+128]s.\n if ~isempty(coincOut):\n # ---- If our interval is completely contained within a cat1 segment\n # we should only have one intersection and it should have the \n # duration of our interval.\n if (size(coincOut,1) == 1) and (coincOut(1,2) == duration):\n pass_init[iJob,iIfo] = 1\n\n if (verboseFlag == 1):\n print >> sys.stdout, '#s has good cat 1 DQ for job #d \\n'.format(detectors_considered[iIfo], iJob )\n\n #########################################################################\n # Check for cat2 flags raised in [-5,+1]s interval about unslid \n # centerTimes.\n #########################################################################\n\n # ---- Define our [-5,+1]s interval about the (unslid) centerTimes.\n goodBefore = 5\n duration = 6\n\n # ---- Find intersections between cat2 segments and the [-5,+1]s \n # interval about unslid centerTimes.\n # ---- Passing the shorter list to Coincidence2 first\n # speeds it up. \n coincOut=Coincidence2(unslidCenterTimes(iJob,iIfo)-goodBefore,duration,\n cat2_seglist(iIfo).gpsStart,\n cat2_seglist(iIfo).duration)\n\n # ---- If there were any coincidences between our interval and the\n # vetoSegs we must discard this job. We don't count\n # coincidences with zero duration (edge overlap)\n if not isempty(coincOut) and any(coincOut[:,2]>0):\n # ---- Setting pass_init for this job to zero.\n pass_init[iJob,iIfo] = 0\n\n if (verboseFlag == 1):\n print >> sys.stdout, 'Time killed in [-#f,+#f]s interval about center time \\n'.format(goodBefore,duration-goodBefore)\n print >> sys.stdout,'startTime stopTime\\n'\n print >> sys.stdout,'--------------------------\\n'\n for iCoinc in range(0,len(coincOut[:,1])):\n print >> sys.stdout,'#9.2f #9.2f \\n'.format(\n coincOut(iCoinc,1),\n coincOut(iCoinc,1) + coincOut(iCoinc,2))\n print >> sys.stdout, 'Setting pass_init to zero \\n'\n\n #########################################################################\n # Find deadtime in [-120,+60]s interval about unslid centerTime. \n #########################################################################\n\n # ---- Define our [-120,+60]s interval about the (unslid) centerTimes.\n goodBefore = -windowBeginOffset\n duration = windowEndOffset-windowBeginOffset\n\n # ---- Allowed Threshold on deadtime in on source window is 5#\n killedThresh = 0.05*duration\n\n # ---- Find intersections between cat2 veto segs and our [-120,+60]s\n # on-source region.\n # ---- Passing the shorter list to Coincidence2 first\n # speeds it up \n coincOut=Coincidence2(unslidCenterTimes(iJob,iIfo)-goodBefore,duration,\n cat2_seglist(iIfo).gpsStart,\n cat2_seglist(iIfo).duration)\n\n # ---- Record times killed by segments. \n if isempty(coincOut):\n #killedTimes[iJob,iIfo] = [0,0]\n # ---- No times killed by cat2 veto segments.\n liveTimes[iJob,iIfo] = [unslidCenterTimes(iJob,iIfo)-goodBefore,duration]\n else:\n #killedTimes[iJob,iIfo] = [coincOut(:,1),coincOut(:,2)]\n # ---- Some times killed by cat2 veto segments.\n # If any one ifo has deadtime >= killedThresh\n # we will discard it straight away.\n # ---- Sum up duration of intersections to find deadtime.\n totDeadTime_cat2[iJob,iIfo] = sum(coincOut[:,2])\n\n if totDeadTime_cat2[iJob,iIfo] >= killedThresh:\n pass_init[iJob,iIfo] = 0\n\n liveTimes[iJob,iIfo] = ComplementSegmentList(coincOut[:,1],coincOut[:,2],\n unslidCenterTimes(iJob,iIfo)-goodBefore,\n unslidCenterTimes(iJob,iIfo)-goodBefore+duration)\n\n # ---- Resliding liveTimes in order to measure how much of\n # deadtime in our interval.\n # To do this we must SUBTRACT timeOffsets.\n reslidLiveTimes[iJob,iIfo] = [liveTimes[iJob,iIfo][:,1] -\n timeOffsets(iJob,iIfo),\n liveTimes[iJob,iIfo][:,2]]\n\n\n # ---- For current job, find intersection of reslid liveTimes from all ifos\n # that are still in our network.\n\n intersectReslidLiveTimes[iJob] = [0, Inf]\n for iIfo in range(0, len(detectors_considered)):\n # ---- We only care about the ifo's livetime if it is still in\n # our network. \n if pass_init(iJob,iIfo):\n coincOut = Coincidence2(intersectReslidLiveTimes[iJob][:,1],\n intersectReslidLiveTimes[iJob][:,2],\n reslidLiveTimes[iJob,iIfo][:,1],\n reslidLiveTimes[iJob,iIfo][:,2])\n\n if isempty(coincOut):\n intersectReslidLiveTimes[iJob] = [0,0]\n else:\n intersectReslidLiveTimes[iJob] = [coincOut[:,1], coincOut[:,2]]\n\n\n # ---- For current job, find time killed by cat2 veto flags.\n finalKilledTimes[iJob] = ComplementSegmentList(\n intersectReslidLiveTimes[iJob][:,1],\n intersectReslidLiveTimes[iJob][:,2],\n centerTimes[iJob]-goodBefore,centerTimes[iJob]-goodBefore+duration)\n deadtime[iJob] = sum(finalKilledTimes[iJob][:,2])\n\n ############################################################################\n # Discard jobs with >=9s deadtime in [-120,+60]s interval \n # about centerTime. \n ############################################################################\n\n if (verboseFlag == 1):\n print >> sys.stdout, 'Total time killed in [' + num2str(windowBeginOffset) + ',' + num2str(windowEndOffset) + ']s interval about centre time: #9.2f \\n'.format(deadtime(iJob))\n print >> sys.stdout,'startTime stopTime\\n'\n print >> sys.stdout,'--------------------------\\n'\n\n for iCoinc in range(0, len(finalKilledTimes[iJob][:,1])):\n print >> sys.stdout,'#9.2f #9.2f \\n'.format(\n finalKilledTimes[iJob][iCoinc,1],\n finalKilledTimes[iJob][iCoinc,1] +\n finalKilledTimes[iJob][iCoinc,2])\n\n # ---- Check total duration of killed times\n if deadtime(iJob) >= killedThresh:\n\n warning(['Killing network due to cat2 flags in on-source. '\n 'We should investigate this network further'])\n\n networkCell[iJob] = ['X']\n\n for iIfo in range(0, len(detectors_considered)):\n if pass_init(iJob,iIfo):\n networkCell[iJob] = [networkCell[iJob], detectors_considered[iIfo]]\n pass_init[iJob,iIfo] = 0\n\n\n if (verboseFlag == 1):\n print >> sys.stdout,'More than #ds killed, setting pass_init to zero \\n'.fromat(killedThresh)\n else:\n networkCell[iJob] = []\n\n #############################################################################\n # Construct network cell array.\n #############################################################################\n\n # ---- Loop over ifos we are considering.\n for iIfo in range(0,len(detectors_considered)):\n if pass_init[iJob,iIfo]:\n networkCell[iJob] = [networkCell[iJob], detectors_considered[iIfo]]", "def execute(self):\n \n # initialize input parameters\n self.hubHt = self.hub_height\n self.ratedPower = self.machine_rating\n self.maxTipSpd = self.max_tip_speed\n self.rotorDiam = self.rotor_diameter\n self.maxCp = self.max_power_coefficient\n self.maxTipSpdRatio = self.opt_tsr\n self.cutInWS = self.cut_in_wind_speed\n self.cutOutWS = self.cut_out_wind_speed\n self.altitude = self.altitude\n\n if self.air_density == 0.0: \n # Compute air density \n ssl_pa = 101300 # std sea-level pressure in Pa\n gas_const = 287.15 # gas constant for air in J/kg/K\n gravity = 9.80665 # standard gravity in m/sec/sec\n lapse_rate = 0.0065 # temp lapse rate in K/m\n ssl_temp = 288.15 # std sea-level temp in K\n \n air_density = (ssl_pa * (1-((lapse_rate*(self.altitude + self.hubHt))/ssl_temp))**(gravity/(lapse_rate*gas_const))) / \\\n (gas_const*(ssl_temp-lapse_rate*(self.altitude + self.hubHt)))\n else:\n \t\tair_density = self.air_density\n\n # determine power curve inputs\n self.reg2pt5slope = 0.05\n \n #self.max_efficiency = self.drivetrain.getMaxEfficiency()\n self.ratedHubPower = self.ratedPower / self.max_efficiency # RatedHubPower\n\n self.omegaM = self.maxTipSpd/(self.rotorDiam/2.) # Omega M - rated rotor speed\n omega0 = self.omegaM/(1+self.reg2pt5slope) # Omega 0 - rotor speed at which region 2 hits zero torque\n Tm = self.ratedHubPower*1000/self.omegaM # Tm - rated torque\n\n # compute rated rotor speed\n self.ratedRPM = (30./pi) * self.omegaM\n \n # compute variable-speed torque constant k\n kTorque = (air_density*pi*self.rotorDiam**5*self.maxCp)/(64*self.maxTipSpdRatio**3) # k\n \n b = -Tm/(self.omegaM-omega0) # b - quadratic formula values to determine omegaT\n c = (Tm*omega0)/(self.omegaM-omega0) # c\n \n # omegaT is rotor speed at which regions 2 and 2.5 intersect\n # add check for feasibility of omegaT calculation 09/20/2012\n omegaTflag = True\n if (b**2-4*kTorque*c) > 0:\n omegaT = -(b/(2*kTorque))-(np.sqrt(b**2-4*kTorque*c)/(2*kTorque)) # Omega T\n #print [kTorque, b, c, omegaT]\n \n windOmegaT = (omegaT*self.rotorDiam)/(2*self.maxTipSpdRatio) # Wind at omegaT (M25)\n pwrOmegaT = kTorque*omegaT**3/1000 # Power at ometaT (M26)\n\n else:\n omegaTflag = False\n windOmegaT = self.ratedRPM\n pwrOmegaT = self.ratedPower\n\n # compute rated wind speed\n d = air_density*np.pi*self.rotorDiam**2.*0.25*self.maxCp\n self.ratedWindSpeed = \\\n 0.33*( (2.*self.ratedHubPower*1000. / ( d))**(1./3.) ) + \\\n 0.67*( (((self.ratedHubPower-pwrOmegaT)*1000.) / (1.5*d*windOmegaT**2.)) + windOmegaT )\n\n # set up for idealized power curve\n n = 161 # number of wind speed bins\n itp = [None] * n\n ws_inc = 0.25 # size of wind speed bins for integrating power curve\n Wind = []\n Wval = 0.0\n Wind.append(Wval)\n for i in xrange(1,n):\n Wval += ws_inc\n Wind.append(Wval)\n\n # determine idealized power curve \n self.idealPowerCurve (Wind, itp, kTorque, windOmegaT, pwrOmegaT, n , omegaTflag)\n\n # add a fix for rated wind speed calculation inaccuracies kld 9/21/2012\n ratedWSflag = False\n # determine power curve after losses\n mtp = [None] * n\n for i in xrange(0,n):\n mtp[i] = itp[i] #* self.drivetrain.getDrivetrainEfficiency(itp[i],self.ratedHubPower)\n #print [Wind[i],itp[i],self.drivetrain.getDrivetrainEfficiency(itp[i],self.ratedHubPower),mtp[i]] # for testing\n if (mtp[i] > self.ratedPower):\n if not ratedWSflag:\n ratedWSflag = True\n mtp[i] = self.ratedPower\n\n self.rated_wind_speed = self.ratedWindSpeed\n self.rated_rotor_speed = self.ratedRPM\n self.power_curve = mtp\n self.wind_curve = Wind\n\n # compute turbine load outputs\n self.rotor_torque = self.ratedHubPower/(self.ratedRPM*(pi/30.))*1000.\n self.rotor_thrust = air_density * self.thrust_coefficient * pi * self.rotor_diameter**2 * (self.ratedWindSpeed**2) / 8.", "def Conn_analysis(self, hazard, path_to_guids, retrofit_key, eretrofit, n_samples):\n\t\tfor fast in range(2):\n\t\t\tif fast == 0:\n\t\t\t\tn_workers = 32\n\t\t\t\tfast_mult = 1.\n\t\t\telif fast == 1: \n\t\t\t\tn_workers = 64\n\t\t\t\tfast_mult = 0.5\n\t\t\t\"\"\" using the probability of failure, rather than leak/break.\n\t\t\t\tassuming that the repair rate is the average of the leak/break\n\t\t\t\trepair rates from hazus.\n\t\t\t\t\t\tbreak \tleak\tavg.\n\t\t\t\t> 20\" - 0.33 \t0.66\t0.5\n\t\t\t\t< 20\"\t0.5\t\t1.0 \t0.75\n\t\t\t\"\"\"\n\t\t\tpipe_reprate = [0.5, 0.75]\t# Fixed pipes per Day per Worker (>20\", <20\" diameter)\n\n\t\t\t# repair time parameters for roads\n\t\t\tif hazard == 'eq':\n\t\t\t\twtp_rep_time_mu = np.array([0.9, 1.9, 32, 95])*fast_mult # mean repair time for water treatement plants for DS2-DS5\n\t\t\t\twtp_rep_time_std = np.array([0.3, 1.2, 31, 65])*fast_mult # std for repair time\t\t\n\n\t\t\t\twps_rep_time_mu = np.array([0.9, 3.1, 13.5, 35])*fast_mult # mean repair time for water treatement plants for DS2-DS5\n\t\t\t\twps_rep_time_std = np.array([0.3, 2.7, 10, 18])*fast_mult # std for repair time\t\t\n\n\t\t\telif hazard == 'tsu':\n\t\t\t\twtp_rep_time_mu = np.array([1, 6, 20, 90])*fast_mult # mean repair time for water treatement plants for DS2-DS5\n\t\t\t\twtp_rep_time_std = np.array([1, 6, 20, 90])*fast_mult # std for repair time\t\t\n\n\t\t\t\twps_rep_time_mu = np.array([1, 6, 20, 240])*fast_mult # mean repair time for water treatement plants for DS2-DS5\n\t\t\t\twps_rep_time_std = np.array([1, 6, 20, 120])*fast_mult # std for repair time\t\t\n\n\t\t\telif hazard == 'cumulative':\n\t\t\t\t\"\"\" assuming that the repair time parameters for cumulative \n\t\t\t\t\tdamage are the max of eq and tsu. \"\"\"\n\t\t\t\twtp_rep_time_mu = np.array([1, 6, 32, 95])*fast_mult\n\t\t\t\twtp_rep_time_std = np.array([1, 6, 31, 65])*fast_mult\n\n\t\t\t\twps_rep_time_mu = np.array([1, 6, 20, 240])*fast_mult\n\t\t\t\twps_rep_time_std = np.array([1, 6, 20, 120])*fast_mult\n\n\t\t\twtp_rep_time_cov = wtp_rep_time_std/wtp_rep_time_mu # COV of repiar time\n\t\t\twtp_rep_time_log_med = np.log(wtp_rep_time_mu/np.sqrt(wtp_rep_time_cov**2+1)) # lognormal parameters for repair time model\n\t\t\twtp_rep_time_beta = np.sqrt(np.log(wtp_rep_time_cov**2+1))\n\t\t\twtp_rep_time_covm = wtp_rep_time_beta[:,None]*wtp_rep_time_beta\n\n\t\t\twps_rep_time_cov = wps_rep_time_std/wps_rep_time_mu # COV of repiar time\n\t\t\twps_rep_time_log_med = np.log(wps_rep_time_mu/np.sqrt(wps_rep_time_cov**2+1)) # lognormal parameters for repair time model\n\t\t\twps_rep_time_beta = np.sqrt(np.log(wps_rep_time_cov**2+1))\n\t\t\twps_rep_time_covm = wps_rep_time_beta[:,None]*wps_rep_time_beta\n\n\t\t\trts = [100, 250, 500, 1000, 2500, 5000, 10000]\n\t\t\tcolumn_keys = ['iter_{}' .format(i) for i in range(n_samples)]\n\t\t\tguids = os.listdir(path_to_guids)\n\n\t\t\tbldg_dataset_id = \"5df40388b9219c06cf8b0c80\" # building dataset\n\t\t\tpipe_dataset_id = \"5d2666b5b9219c3c5595ee65\" # water pipes\n\t\t\twterfclty_dataset_id = \"5d266507b9219c3c5595270c\"\n\t\t\tbldg_to_network_id = \"5f171ffbc98cf43417c21381\" # links buildings to road edges\n\t\t\t\n\t\t\t\"\"\" the way critical nodes is setup is best given through an example:\n\t\t\t\twith the setup below, the connectivity analysis\n\t\t\t\tdetermines whether each tax-lot is connected to:\n\t\t\t\t\t- (node 229 OR node 230) AND (node 300)\n\t\t\t\t\n\t\t\t\tso the nodes in each inner lists undergo a logical_or \n\t\t\t\tstatement, whereas these results undergo a logical_and.\n\n\t\t\t\"\"\"\n\n\t\t\tconn = WterConnectivity(self.client)\n\n\t\t\tconn.load_remote_input_dataset(\"buildings\", bldg_dataset_id)\n\t\t\tconn.load_remote_input_dataset(\"pipe_dataset\", pipe_dataset_id)\n\t\t\tconn.load_remote_input_dataset(\"wterfclty_dataset\", wterfclty_dataset_id)\n\t\t\tconn.load_remote_input_dataset(\"building_to_network\", bldg_to_network_id)\n\n\t\t\tconn.set_parameter('n_workers', n_workers)\n\t\t\tconn.set_parameter('pipe_reprate', pipe_reprate)\n\t\t\tconn.set_parameter('wtp_rep_time_log_med', wtp_rep_time_log_med)\n\t\t\tconn.set_parameter('wtp_rep_time_covm', wtp_rep_time_covm)\n\t\t\tconn.set_parameter('wps_rep_time_log_med', wps_rep_time_log_med)\n\t\t\tconn.set_parameter('wps_rep_time_covm', wps_rep_time_covm)\n\n\t\t\tfor efast in range(2):\n\t\t\t\t# --- performing connectivity analysis\n\t\t\t\tfunc = {}\n\t\t\t\trep = {}\n\t\t\t\tfor rt_i, rt in enumerate(rts):\n\t\t\t\t\tprint_msg = '\\tconn_analysis: {}, rt_{}, {}, fast{}, eretrofit{}, efast{}:' \\\n\t\t\t\t\t\t\t\t\t.format(hazard, rt, retrofit_key, fast, eretrofit, efast)\n\t\t\t\t\t\n\t\t\t\t\tconn.set_parameter('prnt_msg', print_msg)\n\n\t\t\t\t\twter2elec_func = 'func_cumulative_{}yr_wter2elec_eretrofit{}_efast{}.csv' \\\n\t\t\t\t\t\t\t\t\t.format(rt, eretrofit, efast)\n\t\t\t\t\twter2elec_func = os.path.join(self.output_path,'..','wter2elec',wter2elec_func)\n\n\t\t\t\t\twter2elec_rept = 'reptime_cumulative_{}yr_wter2elec_eretrofit{}_efast{}.csv' \\\n\t\t\t\t\t\t\t\t\t.format(rt, eretrofit, efast)\n\t\t\t\t\twter2elec_rept = os.path.join(self.output_path,'..','wter2elec',wter2elec_rept)\n\n\t\t\t\t\tpipe_dmg_file = 'pipe_DS_{}_{}yr_{}.csv' .format(hazard, rt, retrofit_key)\n\t\t\t\t\tpipe_dmg_file = os.path.join(self.mc_path, pipe_dmg_file)\n\t\t\t\t\t\n\t\t\t\t\twterfclty_dmg_file = 'wterfclty_DS_{}_{}yr_{}.csv' .format(hazard, rt, retrofit_key)\n\t\t\t\t\twterfclty_dmg_file = os.path.join(self.mc_path, wterfclty_dmg_file)\n\n\t\t\t\t\t# ---\n\t\t\t\t\twter2elec_func_dset = Dataset.from_file(wter2elec_func, \"ergo:DamageInventory\")\n\t\t\t\t\tconn.set_input_dataset(\"wter2elec_func\", wter2elec_func_dset)\n\n\t\t\t\t\twter2elec_rept_dset = Dataset.from_file(wter2elec_rept, \"ergo:DamageInventory\")\n\t\t\t\t\tconn.set_input_dataset(\"wter2elec_rep\", wter2elec_rept_dset)\n\t\t\t\t\t\n\t\t\t\t\tpipe_dmg_dset = Dataset.from_file(pipe_dmg_file, \"ergo:DamageInventory\")\n\t\t\t\t\tconn.set_input_dataset(\"pipe_dmg\", pipe_dmg_dset)\n\n\t\t\t\t\twterfclty_damage_dataset = Dataset.from_file(wterfclty_dmg_file, \"ergo:DamageInventory\")\n\t\t\t\t\tconn.set_input_dataset(\"wterfclty_dmg\", wterfclty_damage_dataset)\n\t\t\t\t\t\n\t\t\t\t\tfunc[rt], rep[rt] = conn.WterConn_run()\n\t\t\t\t\t\n\t\t\t\t\t# temp_func = func[rt].head(5)\n\t\t\t\t\t# temp_rep = rep[rt].head(5)\n\t\t\t\t\t# print(temp_func.mean(axis=1))\n\t\t\t\t\t# print(temp_rep.mean(axis=1))\n\n\t\t\t\t# --- writing results for each guid\n\t\t\t\tfor guid_i, guid in enumerate(guids):\n\t\t\t\t\tprnt_msg = 'writing {} guids' .format(len(guids))\n\t\t\t\t\tself.print_percent_complete(prnt_msg, guid_i, len(guids))\n\n\t\t\t\t\to_path = os.path.join(path_to_guids, \n\t\t\t\t\t\t\t\t\t\t guid, \n\t\t\t\t\t\t\t\t\t\t 'mc_results', \n\t\t\t\t\t\t\t\t\t\t 'water',\n\t\t\t\t\t\t\t\t\t\t )\n\t\t\t\t\tif not os.path.exists(o_path):\n\t\t\t\t\t\tos.makedirs(o_path)\n\n\t\t\t\t\to_file_func = os.path.join(o_path, \n\t\t\t\t\t\t\t\t\t\t 'func_{}_wter_{}_fast{}_eretrofit{}_efast{}.gz' \n\t\t\t\t\t\t\t\t\t\t .format(hazard, retrofit_key, fast, eretrofit, efast))\n\t\t\t\t\t\n\t\t\t\t\to_file_rep = os.path.join(o_path, \n\t\t\t\t\t\t\t\t\t\t 'reptime_{}_wter_{}_fast{}_eretrofit{}_efast{}.gz' \n\t\t\t\t\t\t\t\t\t\t .format(hazard, retrofit_key, fast, eretrofit, efast))\n\n\t\t\t\t\ttemp_data_func = np.zeros((len(rts), n_samples))\n\t\t\t\t\ttemp_data_rep = np.zeros((len(rts), n_samples))\n\t\t\t\t\tfor rt_i, rt in enumerate(rts):\n\t\t\t\t\t\ttemp_data_func[rt_i] = func[rt].loc[guid]\n\t\t\t\t\t\ttemp_data_rep[rt_i] = rep[rt].loc[guid]\n\n\t\t\t\t\to_df_func = pd.DataFrame(temp_data_func, index=rts, columns=column_keys)\n\t\t\t\t\to_df_func.to_csv(o_file_func, compression='gzip')\n\n\t\t\t\t\to_df_rep = pd.DataFrame(temp_data_rep, index=rts, columns=column_keys)\n\t\t\t\t\to_df_rep.to_csv(o_file_rep, compression='gzip')", "def do_analyse(args):\n\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n\n score = 'score'\n\n # Read in the results, and add a boolean target column.\n df = pd.read_csv(args.results, index_col=0)\n df['target'] = df['verify_speaker'] == df['enrol_speaker']\n\n # Calculate ideal 0.01% threshold over the multi-session data.\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n nontarget_count = nontarget_df[score].count()\n th_calc = nontarget_df.iloc[int(nontarget_count * (1 / 10000))][score]\n\n # Now filter the data so that we only consider mono-session enrolment and verification.\n df = df.loc[df['verify_room'] == df['enrol_room']]\n target_df = df.loc[df['target'] == True].sort_values(score, ascending=False)\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n target_count = target_df[score].count()\n nontarget_count = nontarget_df[score].count()\n\n # Calculate FA/FR for the user-defined threshold.\n th_user = args.th_user\n fr_user = target_df.loc[target_df[score] < th_user][score].count()\n fa_user = nontarget_df.loc[nontarget_df[score] > th_user][score].count()\n frr_user = fr_user / target_count\n far_user = fa_user / nontarget_count\n label_user = 'User Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_user, fr_user, frr_user * 100,\n fa_user, far_user * 100)\n\n # Calculate the FA/FR for the ideal threshold calculated from the multi-session data.\n fr_calc = target_df.loc[target_df[score] < th_calc][score].count()\n fa_calc = nontarget_df.loc[nontarget_df[score] > th_calc][score].count()\n frr_calc = fr_calc / target_count\n far_calc = fa_calc / nontarget_count\n label_calc = 'Calc Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_calc, fr_calc, frr_calc * 100,\n fa_calc, far_calc * 100)\n\n # Print the stats.\n print('\\nTarget Stats:')\n print(target_df[score].describe())\n print('\\nNon-Target Stats:')\n print(nontarget_df[score].describe())\n print('\\nThresholds:')\n print(label_user)\n print(label_calc)\n\n # Paint the graphs.\n paint_graph(score, 'verify_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'verify_speaker', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_speaker', df, th_user, label_user, th_calc, label_calc)", "def calculate(self, technologies, value_streams, results, opt_years):\n self.initiate_cost_benefit_analysis(technologies, value_streams)\n super().calculate(self.ders, self.value_streams, results, opt_years)\n self.create_equipment_lifetime_report(self.ders)", "def compute_performance_analysis(self, G, I, thresholds=0.01):\r\n FAR = self.compute_FAR(I, thresholds)\r\n FRR = self.compute_FRR(G, thresholds)\r\n CRR = self.compute_CRR(FAR)\r\n CAR = self.compute_CAR(FRR)\r\n EER = self.compute_EER(FAR, FRR)\r\n AUC = self.compute_AUC(FAR, CAR)\r\n return FAR, FRR, CRR, CAR, EER, AUC", "def CII_vs_CO(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results(sim_run=p.sim_runs[1],nGal=p.nGals[1],grid_ext=p.grid_exts[1])\n fig,ax1 = plt.subplots()\n L_CII = getattr(GR,'L_[CII]158_sun')\n L_CO = getattr(GR,'L_CO(1-0)_sun')\n Zsfr = getattr(GR,'Zsfr')\n lL_CO, lL_CII = np.log10(L_CO), np.log10(L_CII) \n lL_CO, lL_CII = lL_CO[(L_CO > 0) & (L_CII > 0)], lL_CII[(L_CO > 0) & (L_CII > 0)]\n sc = ax1.scatter(np.log10(L_CO)-10, np.log10(L_CII)-10, marker='o', c=np.log10(Zsfr), cmap='viridis', zorder=10,\\\n vmin=np.log10(0.05), vmax=np.log10(3.1), \\\n s=10, alpha=0.8)#, label='SIGAME 100Mpc_arepoPDF')\n # print('Min Zsfr in Simba sample: ',np.min(Zsfr))\n # print('indices with L_CO < 1e0:')\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lL_CO,lL_CII]).T)\n x, y = np.mgrid[lL_CO.min():lL_CO.max():nbins*1j, \\\n 4:lL_CII.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax1.contour(x, y, z.reshape(x.shape),colors='forestgreen',levels=8,zorder=10)\n CS.collections[0].set_label('SIGAME 100Mpc_arepoPDF')\n\n GR = glo.global_results(sim_run=p.sim_runs[0],nGal=p.nGals[0],grid_ext=p.grid_exts[1])\n L_CII = getattr(GR,'L_[CII]158_sun')\n L_CO = getattr(GR,'L_CO(1-0)_sun')\n Zsfr = getattr(GR,'Zsfr')\n lL_CO, lL_CII = np.log10(L_CO), np.log10(L_CII) \n lL_CO, lL_CII = lL_CO[(L_CO > 0) & (L_CII > 0)], lL_CII[(L_CO > 0) & (L_CII > 0)]\n lL_CO = np.append(lL_CO,np.array([6.1,5]))\n lL_CII = np.append(lL_CII,np.array([8.9,9.7]))\n # ax1.scatter(np.log10(L_CO), np.log10(L_CII), marker='^', c=np.log10(Zsfr), cmap='viridis', zorder=10,\\\n # vmin=np.log10(0.05), vmax=np.log10(3.1), \\\n # s=10, alpha=0.8, label='SIGAME 25Mpc_arepoPDF')\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lL_CO,lL_CII]).T)\n x, y = np.mgrid[lL_CO.min():lL_CO.max():nbins*1j, \\\n 4:lL_CII.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax1.contour(x, y, z.reshape(x.shape),colors='deepskyblue',linestyles='dotted',levels=6)\n CS.collections[0].set_label('SIGAME 25Mpc_arepoPDF')\n\n GR = glo.global_results(sim_run=p.sim_runs[1],nGal=p.nGals[1],grid_ext=p.grid_exts[0])\n L_CII = getattr(GR,'L_[CII]158_sun')\n L_CO = getattr(GR,'L_CO(1-0)_sun')\n Zsfr = getattr(GR,'Zsfr')\n lL_CO, lL_CII = np.log10(L_CO), np.log10(L_CII) \n lL_CO, lL_CII = lL_CO[(L_CO > 0) & (L_CII > 0)], lL_CII[(L_CO > 0) & (L_CII > 0)]\n lL_CO = np.append(lL_CO,np.array([-2.2,4.7]))\n lL_CII = np.append(lL_CII,np.array([8,9.3]))\n # ax1.scatter(np.log10(L_CO), np.log10(L_CII), marker='^', c=np.log10(Zsfr), cmap='viridis', zorder=10,\\\n # vmin=np.log10(0.05), vmax=np.log10(3.1), \\\n # s=10, alpha=0.8, label='SIGAME v3 Simba-%s' % (p.sim_runs[0].replace('_','').replace('Mpc','')))\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lL_CO,lL_CII]).T)\n x, y = np.mgrid[lL_CO.min():lL_CO.max():nbins*1j, \\\n 4:lL_CII.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax1.contour(x, y, z.reshape(x.shape),colors='brown',levels=8,zorder=5,linestyles='dashed')\n CS.collections[0].set_label('SIGAME 100Mpc_arepoPDF_no_ext')\n\n # Observations\n K16 = pd.read_pickle('data/observations/AHIMSA_sample_lit')\n K16_LCII = K16['[CII]158_Lsun']\n K16_LCO = K16['CO(1-0)_Lsun']\n ax1.plot(np.log10(K16_LCO), np.log10(K16_LCII), '>', color='grey', ms=6, fillstyle='none',alpha=0.8, mew=1,zorder=0,\\\n label='Mixed type galaxies [Kamenetzky+16]')\n\n C15 = pd.read_pickle('data/observations/DGS_Cormier_2015')\n C15_LCII = C15['L_[CII]158']\n C15_LCO = C15['L_CO(1-0)']\n C15_Z = C15['Z']\n # L_ul = C15['L_[CII]158'][(C15['L_[CII]158'] < 0) & (C15['L_CO(1-0)'] > 0)]\n # if len(L_ul) > 0:\n # ax1.plot(np.log10(C15['L_CO(1-0)'][C15['L_[CII]158'] < 0]),np.log10(-1.*L_ul),'s',ms=5,mew=0,color='grey',alpha=0.8)\n # ax1.errorbar(np.log10(C15['L_CO(1-0)'][C15['L_[CII]158'] < 0]),np.log10(-1.*L_ul), elinewidth=1,\\\n # uplims=np.ones(len(L_ul)),yerr=np.ones(len(L_ul))*1,color='grey',alpha=0.8,lw=0)\n ax1.scatter(np.log10(C15_LCO), np.log10(C15_LCII), marker='+', c=np.log10(C15_Z), cmap='viridis', zorder=0,\\\n vmin=np.log10(0.05), vmax=np.log10(3.1),\\\n s=100, lw=3, alpha=0.8, label='Dwarf galaxies [Cormier+15]')\n\n A17 = pd.read_pickle('data/observations/xCOLD_GASS_Accurso_2017')\n A17 = A17.loc[np.argwhere(A17['L_CO(1-0)'].values > 0).flatten()]\n ax1.scatter(A17['L_CO(1-0)'],A17['L_[CII]158'], marker='d', c=np.log10(A17['Z']), cmap='viridis', zorder=0,\\\n vmin=np.log10(0.05), vmax=np.log10(3.1),\\\n s=50, lw=0, alpha=0.8, label='COLD GASS [Accurso+17]') #c=np.log10(A17['Z']), \n\n CII_obs = np.log10(np.append(K16_LCII.values,C15_LCII.values))\n CO_obs = np.log10(np.append(K16_LCO.values,C15_LCO.values))\n CII_obs = np.append(CII_obs,A17['L_[CII]158'].values)\n CO_obs = np.append(CO_obs,A17['L_CO(1-0)'].values)\n index = np.argwhere((CII_obs > 0) & (CO_obs > 0)).flatten()\n CII_obs = CII_obs[index]\n CO_obs = CO_obs[index]\n\n x = np.linspace(0, 7, 100)\n fit = LinearRegression().fit(CO_obs.reshape(-1, 1),\\\n CII_obs.reshape(-1, 1))\n L_fit = fit.predict(x.reshape(-1, 1))\n ax1.plot(x, L_fit, color='black', linestyle='--', label='Log-linear fit to observations')\n\n ax1.set_ylabel('log ' + getlabel('[CII]158'))\n ax1.set_xlabel('log ' + getlabel('CO(1-0)'))\n plt.colorbar(sc,label=r'log $\\langle$Z$\\rangle_{\\rm SFR}$ [Z$_{\\rm \\odot}$]')\n\n handles, labels = ax1.get_legend_handles_labels()\n print(labels) # labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n # handles = [handles[_] for _ in [2,4,3,5,0,6,7,1]]\n # labels = [labels[_] for _ in [2,4,3,5,0,6,7,1]]\n handles = [handles[_] for _ in [2,4,3,5,6,0,1]]\n labels = [labels[_] for _ in [2,4,3,5,6,0,1]]\n plt.legend(handles,labels,loc='lower left',fontsize=10.,frameon=True)\n\n ax1.set_xlim([-3,6.2])\n ax1.set_ylim([4,10])\n\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/CO_vs_CII%s%s.png' % (p.grid_ext,p.table_ext), dpi=300)", "def test_SteepCBPCTL():\n # Remove old log file\n subprocess.run(['rm', 'STEEP_CTL.log'], cwd=cwd)\n # Run vplanet\n subprocess.run(['vplanet', 'vpl.in', '-q'], cwd=cwd)\n\n # Grab the output\n output = GetOutput(path=cwd)\n\n # Run our comparisons\n assert np.isclose(output.log.final.cbp.FreeEcc, 0.030000)\n assert np.isclose(output.log.final.cbp.Eccentricity, 0.031100)\n assert np.isclose(output.log.final.cbp.SemiMajorAxis, 1.048835e+11)\n assert np.isclose(output.log.final.secondary.Eccentricity, 0.313818)\n assert np.isclose(output.log.final.secondary.SemiMajorAxis, 0.095744)\n assert np.isclose(output.log.final.secondary.CriticalSemiMajorAxis, 0.307611)", "def main():\n #diffuser_data_dir = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Spectrometer\\Diffuser_Irradiance\\saved_quads\\3845_ms\\saved_plots_modified'\n #diffuser_light_data = os.path.join(diffuser_data_dir,'Light_data')\n #diffuser_dark_data = os.path.join(diffuser_data_dir, 'Dark_data')\n #print(diffuser_data_dir)\n #cc\n #int_time_diffuser = 3845.0\n\n# radiance_data_dir_UV = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Spectrometer\\Radiance_Cal_UV_Lamp\\saved_quads\\saved_plots_modified'\n# radiance_light_data_UV = os.path.join(radiance_data_dir_UV,'DSS-Y')\n# radiance_dark_data_UV = os.path.join(radiance_data_dir_UV, 'Dark_data')\n## #int_time_radiance = 93.0\n## print(radiance_data_dir_UV)\n##\n radiance_data_dir_VIS = r'F:\\TEMPO\\Data\\GroundTest\\FPS\\Spectrometer\\Radiance_Cal_UV_Lamp\\saved_quads\\processed_h5'\n radiance_light_data_VISminusY = os.path.join(radiance_data_dir_VIS,'DSS-Y')\n radiance_light_data_VIS_Center = os.path.join(radiance_data_dir_VIS,'DSS_Center')\n radiance_light_data_VISplusY = os.path.join(radiance_data_dir_VIS,'DSS+Y') \n radiance_dark_data_VIS = os.path.join(radiance_data_dir_VIS, 'Dark_Data')\n\n\n\n # mean_diffuser_data = calculate_mean(diffuser_light_data)\n #mean_diffuser_dark_data = calculate_mean(diffuser_dark_data)\n#\n#\n# #Let's correct for dark current and work in signal rates unit\n# diffuser_dc_corrected = (mean_diffuser_data - mean_diffuser_dark_data)\n# diffuser_dc_corrected = np.round(diffuser_dc_corrected, 2)\n# diffuser_dc_corrected[diffuser_dc_corrected <0] = 0\n# mean_save_dir = os.path.join(diffuser_data_dir,'processed_average_data')\n# mean_save_irradiance = os.path.join(mean_save_dir, 'mean_irradiance_3845ms.csv')\n# np.savetxt(mean_save_irradiance, diffuser_dc_corrected, fmt='%1.3f', delimiter=\",\")\n \n\n\n\n# mean_radiance_data_UV = calculate_mean(radiance_light_data_UV)\n# mean_radiance_dark_data_UV = calculate_mean(radiance_dark_data_UV)\n# radiance_dc_corrected_UV = (mean_radiance_data_UV - mean_radiance_dark_data_UV)\n# radiance_dc_corrected_UV = np.round(radiance_dc_corrected_UV, 2)\n# radiance_dc_corrected_UV[radiance_dc_corrected_UV < 0] = 0\n###\n mean_radiance_dark_data_VIS = calculate_mean_dark(radiance_dark_data_VIS)\n \n \n # Correct for Dark current\n mean_radiance_data_VISminusY = calculate_mean(radiance_light_data_VISminusY) - mean_radiance_dark_data_VIS\n mean_radiance_data_VIS_Center = calculate_mean(radiance_light_data_VIS_Center)- mean_radiance_dark_data_VIS\n mean_radiance_data_VISplusY = calculate_mean(radiance_light_data_VISplusY) - mean_radiance_dark_data_VIS\n \n \n #radiance_dc_corrected_VIS[radiance_dc_corrected_VIS < 0] = 0\n#\n# \n\n# \n# mean_save_dir_UV = os.path.join(radiance_data_dir_UV,'processed_average_data')\n mean_save_dir_VIS = os.path.join(radiance_data_dir_VIS,'Mean_Processed_Data')\n if not os.path.exists(mean_save_dir_VIS):\n os.makedirs(mean_save_dir_VIS)\n #\n# mean_save_radiance_UV = os.path.join(mean_save_dir_UV, 'mean_radiance_DSSminus_UV.csv')\n# mean_save_radiance_VIS = os.path.join(mean_save_dir_VIS, 'mean_radiance_DSSminus_VIS.csv')\n# #\n# \n# np.savetxt(mean_save_radiance_UV, radiance_dc_corrected_UV, fmt='%1.3f', delimiter=\",\")\n# np.savetxt(mean_save_radiance_VIS, radiance_dc_corrected_VIS, fmt='%1.3f', delimiter=\",\")\n print('DONE')\n\n #Write into h5file\n hf_name = os.path.join(mean_save_dir_VIS,'Mean_Data.h5')\n hf = h5py.File(hf_name,'w')\n hf.create_dataset('DSS-Y', data= mean_radiance_data_VISminusY)\n hf.create_dataset('DSS_Center', data=mean_radiance_data_VIS_Center)\n hf.create_dataset('DSS+Y', data= mean_radiance_data_VISplusY)", "def _evaluate_performance__static_winners(self):\n # | - _evaluate_performance__\n\n # | - class attributes #################################################\n AL = self\n al_gen = self.al_gen\n verbose = self.verbose\n seed_ids = self.seed_ids\n acquisition_bin = self.acquisition_bin\n completed_ids = self.completed_ids\n CandidateSpace = self.CandidateSpace\n RegressionModel = self.RegressionModel\n DuplicateFinder = self.DuplicateFinder\n al_gen_dict = self.al_gen_dict\n\n stop_mode = self.stop_mode\n stop_num_generations = self.stop_num_generations\n\n index_acq_gen_dict = self.index_acq_gen_dict\n #__| #################################################################\n\n # #####################################################################\n mode = \"lowest_N\" # 'lowest_N' or 'lowest_perc'\n\n N_ids = 10\n lowest_perc = 5\n\n # Number of consecutive generations that the Nth best systems must\n # remain static\n M_gens = 3\n # #####################################################################\n\n if mode == \"lowest_perc\":\n num_candidates = CandidateSpace.FingerPrints.df_pre.shape[0]\n N_ids = int(num_candidates * (lowest_perc * 0.01))\n\n gen_keys = list(AL.al_gen_dict.keys())\n\n if len(gen_keys) > M_gens:\n latest_M_keys = gen_keys[-(M_gens + 1):]\n last_gen_key = gen_keys[-1]\n\n al_gen_dict_subset_i = dict(zip(\n latest_M_keys,\n [AL.al_gen_dict.get(i, None) for i in latest_M_keys]))\n\n indices_list = []\n iterator = enumerate(al_gen_dict_subset_i.items())\n for i_cnt, (gen_i, AL_i) in iterator:\n model_i = AL_i.model\n\n model_i = AL.add_main_Y_to_model(\n model_i, plot_dft_instead_of_pred=True)\n model_i = model_i[(model_i[\"duplicate\"] == False)]\n model_i = model_i.sort_values(\"Y_main\")\n\n indices_i = model_i.index.tolist()\n\n indices_list.append(indices_i)\n\n if i_cnt >= M_gens:\n indices_i = indices_list[i_cnt][0:N_ids]\n ids_static_list = []\n for j in range(M_gens):\n indices_j = indices_list[i_cnt - (j + 1)][0:N_ids]\n ids_static = indices_j == indices_i\n ids_static_list.append(ids_static)\n\n ids_are_static = all(ids_static_list)\n\n self.performance__static_winners[last_gen_key] = ids_are_static\n #__|", "def run_gc(p_forecast, r_forecast, q_zero):\n import numpy as np\n import argparse\n from scipy.io import loadmat\n import time\n from algorithms import *\n from network import *\n from forecaster import *\n\n parser = argparse.ArgumentParser(description='Simulate Control')\n parser.add_argument('--seed', default=0, help='random seed')\n parser.add_argument('--storagePen', default=2, help='storage penetration percentage')\n parser.add_argument('--solarPen', default=3, help='solar penetration percentage')\n #parser.add_argument('--V_weight', default=500, help='voltage soft constraint weight')\n FLAGS, unparsed = parser.parse_known_args()\n #print('running with arguments: ({})'.format(FLAGS))\n storagePen = float(FLAGS.storagePen)/10\n solarPen = float(FLAGS.solarPen)/10\n seed = int(FLAGS.seed)\n\n np.random.seed(seed) # set random seed\n\n #Initialize simulation parameters\n nodesPen = np.maximum(solarPen,storagePen)\n GCtime = 24\n lookAheadTime = 24\n GCstepsTotal = 30\n sellFactor = 1\n GCscens = 1\n LCscens = 1\n V_weight = 10000 # tuning parameter for voltage penalties\n Vtol = .005 # tolerance bound on voltage penalties\n ramp_weight = 100000 # make large to prioritize ramp following\n NLweight = 4000 # make 10X price when there are no bounds\n\n # For 1 day pecan street data\n GCtime = 12\n lookAheadTime = 12\n GCstepsTotal = 2\n\n\n # Load Data\n # IEEE 123 bus case PG&E data\n network_data = np.load('network_data.npz')\n loadMod = 1\n presampleIdx = 168 # first week as presample data\n startIdx = presampleIdx + 1 # starting index for the load dataset\n DataDict = loadmat('loadData123Ag.mat')\n pDemandFull = loadMod*np.matrix(DataDict['pDemand'])\n rDemandFull = loadMod*np.matrix(DataDict['rDemand'])\n DataDict = loadmat('PyLoadData.mat')\n sNormFull = np.matrix(DataDict['sNorm'])\n # Load Residual Means and Covariance Dictionaries\n ResidualDict = loadmat('ResidualData123.mat')\n pMeans = ResidualDict['pMeans'][0,0]\n pCovs = ResidualDict['pCovs'][0,0]\n\n # Load network\n root = 0\n ppc = network_data['ppc'][()]\n Ybus = network_data['Ybus'][()]\n\n # Load Prices\n prices = np.matrix(np.hstack((250*np.ones((1,16)) , 350*np.ones((1,5)), 250*np.ones((1,3)))))\n prices = np.tile(prices, (1,GCtime*GCstepsTotal/24))\n pricesFull = prices\n\n # Load Ramps\n windPen = .3 # 30% of electricity comes from wind used to calculate ramp amounts\n ramp_tolerance = 0 # 5% of ramp amount tolerance on ramp works better with 0 tolerance\n rampDict = loadmat('rampDataAll.mat')\n rampUpData = rampDict['true_Uramp'] # dimensions are ramps X (start/end time start/end power)\n rampUpData = rampUpData[1:,:] # remove first ramp since it occurs too early at time 2\n rampDownData = rampDict['true_Dramp']\n rampDownData[:,[2,3]] = rampDownData[:,[3,2]] # swap down ramp amounts to make negative\n rampDataAll = np.vstack((rampUpData, rampDownData))\n rOrder = np.argsort(rampDataAll[:,0])\n rampsNumTotal = len(rOrder)\n rampDataAll = rampDataAll[rOrder,:]\n rampDataAll[:,[0,1]] = rampDataAll[:,[0,1]] - 1 # subtract 1 for matlab indexing\n\n # Make dictionary of ramps using whole network for scale\n rampUAll = make_ramp_dict(rampDataAll, windPen, ramp_tolerance, pDemandFull)\n ramp_starts = np.sort(rampUAll.keys())\n ramp_curr = np.array(ramp_starts[ramp_starts >= (720)]) # remove ramps after 720 hours\n for ramp_key in ramp_curr:\n rampUAll.pop(ramp_key)\n #print('all ramp times', np.sort(rampUAll.keys()))\n\n # initialize forecaster and network\n forecast_error = .1\n forecaster = Forecaster(forecast_error, pMeans, pCovs)\n\n # Random = True\n network = Network(storagePen, solarPen, nodesPen, pDemandFull, rDemandFull, pricesFull, root, Ybus, startIdx, sNormFull, Vmin=0.95, Vmax=1.05, Vtol=0, v_root=1.022, random=True, rampUAll=rampUAll)\n\n # Random = False\n\n # hardcoding battery information\n battnodes = np.array([4, 10], dtype=int)\n qmin = np.reshape(np.matrix([0, 0]), (2,1))\n qmax = np.reshape(np.matrix([0.126, 0.063]), (2,1))\n umin = -qmax/3\n umax = qmax/3\n # 123 bus case 1 day Pecan Street 1 minute data\n startIdx = 0\n battnode_data = np.genfromtxt('agg_load_123_raw.csv', delimiter=',').T\n battnode_data[0,0] = 1235.22\n pdat = np.zeros((2,24))\n for i in range(24):\n pdat[:,i] = np.mean(battnode_data[:,i*60:(i+1)*60], axis=1)\n rdat = pdat*np.tan(np.arccos(.9))\n network_data = np.genfromtxt('network123_case_load.csv', delimiter=',')\n network_data[0,0] = 0\n pDemandFull = np.tile(np.reshape(network_data[:,0], (123,1)), (1,24))\n rDemandFull = np.tile(np.reshape(network_data[:,1], (123,1)), (1,24))\n pDemandFull[battnodes,:] = pdat/1000000 # convert to MW\n rDemandFull[battnodes,:] = rdat/1000000\n netDemandFull = np.matrix(pDemandFull)\n rDemandFull = np.matrix(rDemandFull)\n rampDataAll = np.matrix(rampDataAll[0,:]) # use only first ramp for 24 hours of data\n for i in battnodes: #Make 0 for no forecaster\n pMeans['b'+str(i+1)] = np.zeros(pMeans['b'+str(i+1)].shape)\n pCovs['b'+str(i+1)] = np.zeros(pCovs['b'+str(i+1)].shape)\n\n # Make dictionary of ramps using only battnodes for scale\n windPen = 6 # make 1 to account for only using battnodes instead of whole network\n rampUAll = make_ramp_dict(rampDataAll, windPen, ramp_tolerance, pDemandFull[battnodes,:])\n ramp_starts = np.sort(rampUAll.keys())\n ramp_curr = np.array(ramp_starts[ramp_starts >= (720)]) # remove ramps after 720 hours\n for ramp_key in ramp_curr:\n rampUAll.pop(ramp_key)\n #print('all ramp times', np.sort(rampUAll.keys()))\n\n rampUAll_orig = rampUAll.copy()\n\n network = Network(storagePen, solarPen, nodesPen, pDemandFull, rDemandFull, pricesFull, root, Ybus, startIdx, Vmin=0.95, Vmax=1.05, Vtol=0, v_root=1.022, random=False, rampUAll=rampUAll)\n network.inputStorage(Ybus, netDemandFull, battnodes, qmin, qmax, umin, umax)\n # End Random = False\n\n\n #get network information\n nodesStorage = network.battnodes\n storageNum = len(nodesStorage)\n qmin = network.qmin\n qmax = network.qmax\n\n #reformat data for network\n for i in nodesStorage:\n pMeans['b'+str(i+1)] = pMeans['b'+str(i+1)].flatten()\n\n # initialize controllers\n t_idx = 0 # set controller t_idx to something non zero after this if wanted\n GC = Global_Controller(network, forecaster, GCtime, lookAheadTime, GCscens, sellFactor, V_weight, Vtol, ramp_weight)\n q0 = np.matrix(np.zeros(qmax.shape)) #set initial q0 to be 0\n\n # print('MY OG Q0')\n # print(q0)\n # print(np.matrix(np.zeros(qmax.shape)))\n\n if q_zero:\n q_zero = np.matrix(q_zero)\n q_zero = q_zero.T\n q0 = q_zero\n\n # Initialize values to save\n Qall = np.matrix(np.zeros((storageNum,GCtime*GCstepsTotal+1)))\n Uall = np.matrix(np.zeros((storageNum,GCtime*GCstepsTotal)))\n\n\n ### Run Global Controller ###\n print('Running time:', t_idx)\n realS, pricesCurrent, LCtime, rampFlag, RstartList, QiList, RsignList, ramp_next, ubound_min, ubound_max = GC.runStep(q0, t_idx)\n\n # print('THESE ARE MY RESULTS!')\n # print(realS)\n # print(pricesCurrent)\n # print(LCtime)\n # print(rampFlag)\n # print(RstartList)\n # print(QiList)\n # print(RsignList)\n # print(ramp_next)\n\n result = {\n 'realS': pickle.dumps(realS, protocol=0),\n 'pricesCurrent': pickle.dumps(pricesCurrent, protocol=0),\n 'LCtime': LCtime,\n 'rampFlag': rampFlag,\n 'RstartList': pickle.dumps(RstartList, protocol=0),\n 'QiList': pickle.dumps(QiList, protocol=0),\n 'RsignList': pickle.dumps(RsignList, protocol=0),\n 'ramp_next': ramp_next,\n 'uboundMin': pickle.dumps(ubound_min, protocol=0),\n 'uboundMax': pickle.dumps(ubound_max, protocol=0)\n }\n\n return result\n\n # pickled_value = pickle.dumps(realS, protocol=0)\n #\n # #print 'WTF:', pickle.loads(pickled_value)\n #\n # return pickled_value" ]
[ "0.63561374", "0.5775331", "0.5715415", "0.5611014", "0.5607633", "0.55742955", "0.54829526", "0.5457336", "0.54125506", "0.5397806", "0.5258343", "0.5246536", "0.52320576", "0.5230727", "0.5224489", "0.5222911", "0.52141297", "0.52132136", "0.52128506", "0.52064836", "0.5187223", "0.51865035", "0.5176025", "0.516722", "0.5155997", "0.5155401", "0.51473165", "0.5145216", "0.5141946", "0.51360327" ]
0.6349215
1
Initalize name and age attibutes.
def __init__(self, name, age): self.name = name self.age = age
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, name, age):\r\n self.name = name\r\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, first_name, last_name, age, gender):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n self.gender = gender", "def __init__(self, name, race, sex, age):\n self.Race = race\n self.Sex = sex\n self.Age = age\n self.Name = name", "def __init__(self, first_name, last_name, age):\n\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, name, age):\n\t\t# self.name is an object variable\n\t\tself.name = name\n\t\t#error checking\n\t\tif age < 0:\n\t\t\t# the raise keyword is how our programs can raise errors\n\t\t\traise ValueError(\"Age cannot be negative\")\n\t\tself.age = age", "def __init__(self, f_name, l_name, age, gender, m_number):\n self.f_name = f_name\n self.l_name = l_name\n self.age = age\n self.gender = gender\n self.m_number = m_number", "def set_age (self, PersonAge): \r\n self.age = PersonAge", "def __init__(self, name):\n self.name = name\n self.birthday = None\n self.lastName = name.split(' ')[-1]", "def __init__(self, name):\n self.name = name\n self.birthday = None\n self.lastName = name.split(' ')[-1]", "def __init__(self, name):\n self.name = name\n self.birthday = None\n self.lastName = name.split(' ')[-1]", "def __init__(self, name):\n self.name = name\n self.birthday = None\n self.lastName = name.split(\" \")[-1]", "def setAge(self, age):\r\n self.age = age", "def __init__(self,name,born):\n assert born == None or type(born) == int, repr(born)+' has the wrong type'\n assert born > 1900 or born == -1, repr(born)+' is not a valid birth year'\n self.setName(name)\n self._born = born", "def __init__(self, first_name, last_name, birthday, username):\n self.first_name = first_name\n self.last_name = last_name\n self.birthday = birthday\n self.username = username\n self.login_attempts = 0\n self.age = self.set_birthday()", "def __init__(self, first_name, last_name, age):\n\t\tself.first_name = first_name\n\t\tself.last_name = last_name\n\t\tself.age = age\n\t\tself.login_attempts = 0", "def __init__(self, name, birthday, premium):\n self.name = name\n self.birthday = birthday\n self.premium = premium", "def __init__(self, name, age, gender):\n\n self._name = name\n self._age = age\n self._gender = gender\n self._friend = None" ]
[ "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.7472384", "0.736729", "0.7351724", "0.7351724", "0.7351724", "0.7351724", "0.72921795", "0.7189177", "0.7124933", "0.711583", "0.6997387", "0.6881059", "0.686831", "0.686831", "0.686831", "0.68635374", "0.68083656", "0.6759403", "0.67245466", "0.67225355", "0.67164177", "0.6695428" ]
0.75609356
1
Get the shape of an element x. If it is an element with a shape attribute, return it. If it is a list with more than one element, compute the shape by checking the len, and the shape of internal elements. In that case, the shape must be consistent. Finally, in other case return () as shape.
def get_shape(x): if isinstance(x, list) and len(x) > 0: shapes = [get_shape(subx) for subx in x] if any([s != shapes[0] for s in shapes[1:]]): raise ValueError('Parameter dimension not consistent: {}'.format(x)) return (len(x), ) + shapes[0] else: if hasattr(x, '_shape_tuple'): return x._shape_tuple() # method to return the shape as a tuple elif hasattr(x, 'shape'): return tuple(x.shape) else: return ()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shape(x):\n\n return None if jnp.isscalar(x) else x.shape", "def shape(self):\n for component in ('x', 'y', 'z', 'r', 't'):\n arr = getattr(self, component)\n if arr is not None:\n return arr.shape\n return ()", "def shape(self) -> Optional[tuple]:\n return self._shape", "async def infer_shape_getelement(track, seq):\n shp = await seq['shape']\n if isinstance(shp, ListShape):\n return shp.shape\n elif isinstance(shp, tuple):\n # Array\n return NOSHAPE\n else:\n raise AssertionError()", "def shape_list(x):\n shape = list(x.shape)\n\n return shape", "def get_shape(lst, shape=()):\r\n if not isinstance(lst, Sequence):\r\n # base case\r\n return shape\r\n # peek ahead and assure all lists in the next depth\r\n # have the same length\r\n if isinstance(lst[0], Sequence):\r\n l = len(lst[0])\r\n if not all(len(item) == l for item in lst):\r\n msg = 'not all lists have the same length'\r\n raise ValueError(msg)\r\n\r\n shape += (len(lst), )\r\n # recurse\r\n shape = get_shape(lst[0], shape)\r\n return shape", "def get_shape(lst, shape=()):\n\n\tif not isinstance(lst, list):\n\t\t# base case\n\t\treturn shape\n\n\t# peek ahead and assure all lists in the next depth\n\t# have the same length\n\tif isinstance(lst[0], list):\n\t\tl = len(lst[0])\n\t\tif not all(len(item) == l for item in lst):\n\t\t\tprint(l)\n\t\t\tmsg = 'not all lists have the same length'\n\t\t\traise ValueError(msg)\n\n\tshape += (len(lst), )\n\n\t# recurse\n\tshape = get_shape(lst[0], shape)\n\n\treturn shape", "def shape(self):\n return self._shape", "def shape(data):\n if hasattr(data, \"shape\"):\n return list(data.shape)\n else:\n try:\n length = len(data)\n return [length] + shape(data[0])\n except TypeError:\n return []", "def shape(self):\n\n self._check_assigned()\n\n if (\n self.lazy\n and self.transformer is not None\n and hasattr(self.transformer, \"get_transformed_shape\")\n ):\n return self.transformer.get_transformed_shape(self.values)\n else:\n return self.__array__().shape", "def shape(self):\r\n return self._shape", "def shape(self):\n return self._shape", "def shape(self):\n return self._shape", "def get_shape(self):\n return shape(self._data)", "def shape_value(self):\n return self._shape_value[0]", "def _find_shape_of_nested_int_array(x):\n shape = [len(x)]\n sub_x = x[0]\n while not np.issubdtype(type(sub_x), np.integer):\n shape.append(len(sub_x))\n sub_x = sub_x[0]\n return tuple(shape)", "def get_xshape(self):\n return self.__xshape", "def get_shape(self):\n if self.__data is None:\n return None\n return self.__data.shape", "def __get_shape(\n op_str: str,\n x_shape: Tuple[int],\n y_shape: Tuple[int],\n ) -> Tuple[int]:\n op = getattr(operator, op_str)\n res = op(np.empty(x_shape), np.empty(y_shape)).shape\n cast(Tuple[int], res)\n return tuple(res) # type: ignore", "def shape(self):\n\n return self._shape", "def shape(self):\n\n return self._shape", "def shape(self):\n\n return self._shape", "def peek_pending_shape(self):\n res = None\n if self.pending_operations:\n res = self.pending_operations[-1].get(LazyAttr.SHAPE, None)\n # default to spatial shape (assuming channel-first input)\n return tuple(convert_to_numpy(self.shape, wrap_sequence=True).tolist()[1:]) if res is None else res", "def get_param_scale_shape(shape_x, shape_scale):\n\n length_x = len(shape_x)\n length_scale = len(shape_scale)\n\n if length_scale == 1 and shape_scale[0] == 1:\n shape = [1] * length_x\n else:\n shape = list(shape_scale)\n\n return shape", "def ashape(node):\n shp = node.shape\n assert shp is not None\n return shp", "def shape(self):\n return self.__shape", "def shape(self):\n return self.__shape", "def pshape(self):\n try:\n return plist([x.pshape() for x in self], root=self.__root__)\n except Exception:\n return plist([len(self)], root=self.__root__)", "def shape(self, x_shape):\n raise NotImplementedError()", "def shape(self):\n return self[0].shape" ]
[ "0.7307297", "0.71993625", "0.69666064", "0.6919222", "0.6845282", "0.68373024", "0.6807176", "0.6639613", "0.6614478", "0.65794605", "0.65702397", "0.6511562", "0.6511562", "0.64650935", "0.64642006", "0.64271873", "0.6416693", "0.6414622", "0.6394792", "0.637807", "0.637807", "0.637807", "0.6371437", "0.63698363", "0.6366635", "0.63524485", "0.63524485", "0.6340746", "0.6333289", "0.632103" ]
0.824383
0
Get the signatures results of the teacher in the given career for all the active exams.
def get_teacher_career_results(self, teacher, career): data = [] # Get the active exams of the career. exams = EvaluationsExam.objects.filter( type__exact=career.type, status="ACTIVE") # Get the results for each exam. for exam in exams: # Get the signatures of the teacher for the career in the exam. signatures_dtl = EvaluationsTeacherSignature.objects.filter( fk_teacher__exact=teacher.id, fk_period__exact=exam.fk_period, status="ACTIVE").select_related('fk_signature') signatures_results = [] for signature_dtl in signatures_dtl: # If it raise an exception, it means that the signature isn't evaluated yet or other error. try: # Get the results of the signature. signature_results = EvaluationsSignatureResult.objects.get( group=signature_dtl.group, fk_signature=signature_dtl.fk_signature.id, fk_exam=exam.id, status="ACTIVE" ) # Get the results for each question in the exam for the signature. questions_results = EvaluationsSignatureQuestionResult.objects.filter( group=signature_dtl.group, fk_signature=signature_dtl.fk_signature.id, fk_exam=exam.id, fk_question__optional='NO', status="ACTIVE" ).values_list('fk_question__description', 'result') # Get the comments of the signature/group. comments_result = EvaluationsSignatureQuestionResult.objects.get( group=signature_dtl.group, fk_signature=signature_dtl.fk_signature.id, fk_exam=exam.id, fk_question__optional='YES', status="ACTIVE" ).result # Split the comments and add them to a list, only the ones that are not empty. comments = list(filter(None, comments_result.split('|'))) # Crate a dictionary with the results of the signature and the questions. signatures_results.append({ 'teacher': teacher.name + ' ' + teacher.last_name + ' ' + teacher.last_name_2, 'signature': signature_dtl.fk_signature.description, 'group': signature_dtl.group, 'average': signature_results.average, 'comments': comments, 'total_evaluated': signature_results.total_evaluated, 'questions': questions_results }) except Exception: pass # Add the results to the exam dictionary. exam_results = { 'exam': exam.description, 'career': career.description, 'signatures_results': signatures_results, 'period': exam.fk_period.period } # Add the exam results to the list that will be returned at the end. data.append(exam_results) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def career_teachers_excel(self, request):\n\n # Get the career to be processed their results.\n career_id = request.GET.get('career_id', '')\n career = EvaluationsCareer.objects.get(pk__exact=career_id)\n\n # Get the results for each esignature of the carrer en each exam.\n data = self.get_career_results(career)\n\n # Generates the CSV with the results of the career,then return as downloadable file.\n response = self.get_teacher_results_excel(data)\n return response", "def get_exam():\n try:\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n user = is_user(user_id)\n if user:\n # Query to run\n exams = []\n if examiner:\n results_query = db.session.query(Exam, func.count(ExamRecording.exam_id)).\\\n outerjoin(ExamRecording, ExamRecording.exam_id==Exam.exam_id).\\\n group_by(Exam.exam_id)\n # Filters query results using request params\n results, next_page_exists = filter_results(results_query, Exam)\n \n for e, er_count in results:\n exams.append({\n **e.to_dict(),\n 'exam_recordings':er_count\n })\n else:\n login_code = request.args.get('login_code', default=None)\n results = Exam.query.filter_by(login_code=login_code).\\\n filter(Exam.start_date <= datetime.utcnow()).\\\n filter(Exam.end_date >= datetime.utcnow()).all()\n next_page_exists = False\n for e in results:\n exams.append({\n **e.to_dict(),\n 'exam_recordings':0\n })\n return jsonify({'exams':exams, 'next_page_exists': next_page_exists}), 200\n\n return jsonify({'user_id': user_id, 'message': ['access denied, not examiner']}), 403\n except (Exception, exc.SQLAlchemyError) as e:\n return jsonify({ 'message': e.args }), 500", "def get_examinee():\n try:\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n getting_own_results = is_self(user_id)\n if examiner or getting_own_results:\n results_query = db.session.query(User, func.count(ExamRecording.user_id)).\\\n outerjoin(ExamRecording, ExamRecording.user_id==User.user_id).\\\n group_by(User.user_id)\n\n results, next_page_exists = filter_results(results_query, User)\n users = []\n for u, er_count in results:\n users.append({\n **u.to_dict(),\n 'exam_recordings':er_count\n })\n return jsonify({'users':users, 'next_page_exists':next_page_exists}), 200\n \n return jsonify({'user_id': user_id, 'message': ['access denied, not examiner']}), 403\n except (Exception, exc.SQLAlchemyError) as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500", "def get_teachers(self):\n query = Teacher.all().order('teacher')\n return query.fetch()", "def _get_electronic_signatures(self, report=False):\n certificates = []\n\n return certificates", "def get_teacher(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('teachers')]\n teacher_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n teacher_fields[key] = value\n teachers = [i for i in Teachers.select().filter(**teacher_fields)]\n # Expect single value if search by unique fields, list if by non-unique\n return teachers if len(teachers) > 1 else teachers[0] if len(teachers) == 1 else None", "def get_participants_data(self):\n participants = []\n for (email, uid) in self.tokens.items():\n participant = {} \n participant['uid'] = uid\n participant['email'] = email\n response = 0\n questions = 0\n sections = [x for x in self.values() if ISurveySection.providedBy(x)]\n for section in sections:\n response += len(section.responses.get(uid, {}))\n questions += len(section.question_ids)\n if response != 0:\n participant['finished'] = Decimal(response) / Decimal(questions) * 100\n else:\n participant['finished'] = 0 \n participants.append(participant)\n return participants", "def get_exam_recording():\n try:\n # Users can get their own exam recordings, if they're an examiner they can get all of them\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n getting_own_results = is_self(user_id)\n\n if examiner or getting_own_results:\n results_query = db.session.query(User, Exam, ExamRecording, func.count(ExamWarning.exam_recording_id)).\\\n filter(User.user_id==ExamRecording.user_id).\\\n filter(Exam.exam_id==ExamRecording.exam_id).\\\n outerjoin(ExamWarning, ExamWarning.exam_recording_id==ExamRecording.exam_recording_id).\\\n group_by(ExamRecording.exam_recording_id)\n \n results, next_page_exists = filter_results(results_query, ExamRecording)\n\n exam_recordings = []\n in_progress = request.args.get('in_progress', default=None, type=int)\n if in_progress is not None: in_progress = in_progress==1\n for u, e, er, ew_count in results:\n updated = False\n duration = e.duration\n # If exam recording has not ended (or does not have a time_ended value)\n if er.time_started is not None and er.time_ended is None:\n # Check if the time now has surpassed the latest possible finish time (recording start time + exam duration)\n latest_finish_time = er.time_started + timedelta(hours=duration.hour, minutes=duration.minute)\n if latest_finish_time <= datetime.utcnow():\n # If so, set the value to latest possible time\n updated = True\n er.time_ended = latest_finish_time\n # Check so that when querying by in_progress = 1 / True, we dont include recordings that added time_ended to\n if not (updated and in_progress):\n exam_recordings.append({\n 'exam_recording_id':er.exam_recording_id,\n 'user_id':u.user_id,\n 'first_name':u.first_name,\n 'last_name':u.last_name,\n 'exam_id':e.exam_id,\n 'exam_name':e.exam_name,\n 'login_code':e.login_code,\n 'duration':e.duration.strftime(\"%H:%M:%S\"),\n 'subject_id':e.subject_id,\n 'time_started':datetime_to_str(er.time_started),\n 'time_ended':datetime_to_str(er.time_ended),\n 'video_link':er.video_link,\n 'warning_count':ew_count,\n 'document_link': e.document_link\n })\n db.session.commit()\n\n return jsonify({'exam_recordings':exam_recordings, 'next_page_exists':next_page_exists}), 200\n \n return jsonify({'user_id': user_id, 'message': \"access denied, invalid user.\" }), 403\n except (Exception, exc.SQLAlchemyError) as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500", "def run_career_fair_etl(browser: HandshakeBrowser, download_dir: str) -> List[EngagementRecord]:\n raw_fair_data = CAREER_FAIRS_INSIGHTS_REPORT.extract_data(browser, download_dir)\n return transform_fair_data(raw_fair_data)", "def teacher_match(request):\n\n takes = Take.objects.filter(teacher__username=request.data[\"pid\"])\n match_dict={}\n #teacher=Faculty.objects.get(username=request.data[\"pid\"])\n #match_dict[\"tid\"]=takes[0]\n match_dict[\"tname\"] = takes[0].teacher.name\n\n match_dict[\"sid\"]=[]\n match_dict[\"sname\"] = []\n match_dict[\"cid\"] = []\n match_dict[\"cname\"] = []\n for take in takes:\n #print(take.student.username)\n #match_dict[\"sid\"].append(take)\n match_dict[\"sname\"].append(take.student.name)\n #print(take.course.course_id)\n #match_dict[\"cid\"].append(take)\n match_dict[\"cname\"].append(take.course.name)\n return Response(match_dict)", "async def get_exams(\n self, last_sync: datetime = None, deleted=False, **kwargs\n ) -> Union[AsyncIterator[Grade], List[int]]:\n return Exam.get(self._api, last_sync, deleted, **kwargs)", "def get_quiz_teacher():\n quiz_data = query_db(\n \"SELECT id, name FROM quizzes WHERE creator_id=?;\", [flask.session[\"id\"]]\n )\n quizzes = []\n for quiz in quiz_data:\n quiz_dict = {}\n quiz_dict[\"id\"] = quiz[0]\n quiz_dict[\"name\"] = quiz[1]\n quizzes.append(quiz_dict)\n return quizzes", "def referee_evaluate_thesis(request):\n\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n\n \n user = auth.get_user(request)\n referee = Referee.objects.get(user = user)\n \n if request.method == \"GET\":\n all_thesis = [] # list of dict\n \n for panelMember in PanelMember.objects.filter(referee = referee).filter(status = 'A'):\n thesis = panelMember.thesis\n dict = {}\n dict['title'] = thesis.title\n\n dict['student_full_name'] = thesis.student.first_name + ' ' + thesis.student.last_name\n dict['synopsis'] = thesis.synopsis\n dict['thesis'] = thesis.thesis\n dict['keywords'] = []\n\n if panelMember.answer_for_questions == True:\n if thesis.thesis_modifications == \"NULL\" or thesis.thesis_modifications == \"\":\n dict['thesis_modifications'] = None\n else:\n dict['thesis_modifications'] = thesis.thesis_modifications\n else:\n dict['thesis_modifications'] = None\n\n\n for keys in ThesisKeyword.objects.filter(thesis = thesis):\n dict['keywords'].append((IEEEKeyword.objects.get(id = keys.keyword.id)).keyword)\n \n dict['student_username'] = thesis.student.user.username\n dict['id'] = thesis.id\n \n all_thesis.append(dict)\n return render(\n request,\n 'app/referee/evaluate_thesis.html',\n {\n 'title':'Evaluate Thesis',\n 'layout_data' : get_layout_data(request),\n 'all_thesis' : all_thesis\n }\n )\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def computeAvaliableTutors(self):\r\n subject = self.requestedSubject\r\n for tutor in AppUser.objects.all():\r\n if subject in tutor.subjectsOffered.all():\r\n self.avaliableTutors.add(tutor)", "def signatures(self) -> Optional[Sequence['outputs.SingleQueryResultResponse']]:\n return pulumi.get(self, \"signatures\")", "def get_results(self, environment_name, quali_api_helper):\n if not self._bp_session.test_id:\n raise BPRunnerException(self.__class__.__name__, \"Test id is not defined, run the test first\")\n pdf_result = self._test_results_flow.get_results(self._bp_session.test_id)\n quali_api_helper.login()\n env_name = re.sub(\"\\s+\", \"_\", environment_name)\n test_id = re.sub(\"\\s+\", \"_\", self._bp_session.test_id)\n file_name = \"{0}_{1}.pdf\".format(env_name, test_id)\n quali_api_helper.upload_file(self.reservation_id, file_name=file_name, file_stream=pdf_result)\n return \"Please check attachments for results\"", "def get_clicker_results(submissions, results, options):\r\n try:\r\n # get most recent submission for clicker\r\n answers = submissions[0]['detail']['student_answers']\r\n correct_map = submissions[0]['detail']['correct_map']\r\n except Exception, e:\r\n logging.info(\"Error getting clicker answers: %s.\" % e)\r\n return\r\n\r\n keys = sorted(answers.keys())\r\n if options.output:\r\n header = '{}{}{}{}'.format('problem'.ljust(10), 'evaluated'.ljust(12), 'entered'.ljust(25), 'correctness'.ljust(10))\r\n print header + '\\n' + ('-' * len(header))\r\n\r\n for i, key in enumerate(keys):\r\n ans = answers[key]\r\n if type(ans) is not list:\r\n if math_pattern.match(ans):\r\n ans = ans.replace('^', '**')\r\n ans = str('%.4f' % eval(ans))\r\n else:\r\n ans = ans.lower()\r\n else:\r\n ans = 'MC: {}'.format(' '.join([a.split('_')[1] for a in ans]))\r\n\r\n try:\r\n is_correct = correct_map[key]['correctness']\r\n\r\n if key in results:\r\n results[key].append((ans, is_correct[0]))\r\n else:\r\n results[key] = [(ans, is_correct[0])]\r\n\r\n if options.output and answers[key] != '':\r\n print '{}{}{}{}'.format(str(i+1).ljust(10), ans.ljust(12), answers[key].ljust(25), is_correct.ljust(10))\r\n\r\n except Exception, e:\r\n logging.info(\"Error get correct map: %s.\" % e)\r\n print correct_map\r\n try:\r\n return submissions[0]['score']\r\n except:\r\n logging.info(\"Error getting clicker score: %s.\" % e)", "def getApplicantData(self, resume):\n bigram_measures = cl.BigramAssocMeasures()\n trigram_measures = cl.TrigramAssocMeasures()\n st = LancasterStemmer()\n tk = RegexpTokenizer(r'\\w+')\n total_exp = 0.\n\n user_id = resume['userId']\n education = resume['userEducationList']\n experience = resume['userExperianceList']\n skills = resume['skills']\n work_hist_skills = resume['skillsFoundInWorkHistory']\n\n\n #Convert past job descriptions into bigrams and trigrams to compare to target job description\n bigrams, trigrams = [], []\n try:\n jobs_from_resume = [job['workDescription'] for job in experience]\n except Exception:\n jobs_from_file = []\n\n\n for job in jobs_from_resume:\n if job != None:\n biTri_temp = self.getBiTrigrams(job)\n bigrams.append(biTri_temp[0])\n trigrams.append(biTri_temp[0])\n\n #Convert past job titles into bigrams and trigrams and get years of experience in respective roles\n #experience_yrs = [] - I dont think it's being used anywhere\n raw_titles = []\n try:\n job_titles_from_resume = [job['jobWorkTitle'] for job in experience]\n except Exception:\n job_titles_from_resume = []\n\n position_yrs = []\n try:\n tenure_in_resume = [job['workTenureInCompany']/12 for job in experience]\n except Exception:\n tenure_in_resume = [1]\n for yrs_exp in tenure_in_resume:\n position_yrs.append(yrs_exp)\n\n try:\n company_name = [job['companyName'] for job in experience]\n except Exception:\n company_name = []\n\n #Get education data: institute tier and type.\n try:\n institute_name = [degree['instituteName'] for degree in education]\n except Exception:\n institute_name = []\n\n try:\n degreeType = [degree['educationType'] for degree in education]\n except Exception:\n degreeType = []\n\n emp_data = {'user_id': user_id ,'skills':skills, 'work_hist_skills': work_hist_skills,'job_bigrams':bigrams,\n 'job_trigrams':trigrams, 'titles': job_titles_from_resume, 'years_exp':position_yrs, 'company_name': company_name,\n 'position_tenure':position_yrs, 'institute_name': institute_name, 'degreeType': degreeType}\n\n return emp_data", "def get_all_allowed_enrollments(self):\n if self.is_superuser:\n return Enrollment.objects.all()\n\n # Enrollments belonging to students the user manages\n manages = Q(student__case_manager=self)\n # Enrollments belonging to sections the user teaches\n teaches = Q(section__teacher=self)\n\n # Filter all terms which the user teaches a class\n taught_terms = Term.objects.filter(section__teacher=self)\n\n # The teacher of another section in the same term in which the student is enrolled\n other_teacher = Q(pk__in=[])\n for term in taught_terms:\n overlapping_terms = term.get_overlapping_terms()\n # Get all sections from this term or its overlaps\n term_sections = Section.objects.filter(term__in=overlapping_terms)\n # Get all the enrollments in any section from this term\n term_enrollments = Enrollment.objects.filter(section__in=term_sections)\n # Get all the students taught by this user this term\n term_taught_students = Student.objects.filter(enrollment__in=term_enrollments.filter(section__teacher=self))\n # Get all the enrollments of those students for this term\n other_teacher = other_teacher | Q(student__in=term_taught_students, section__term__in=overlapping_terms)\n return Enrollment.objects.filter(teaches | manages | other_teacher).distinct()", "def get(self, request, *args, **kwargs):\n # user = request.user\n quizTakerId = kwargs[\"pk\"]\n quizTaker = QuizTakers.objects.filter(id=quizTakerId).first()\n response = StudentResponse.objects.filter(quiztaker=quizTaker)\n serializer = ResponseSerializer(response, many=True)\n return Response(serializer.data)", "def getfundamentals(self, results):\n self.log(\"Retrieving fundamental phone information\")\n self.log(\"Phone serial number\")\n results['uniqueserial']=sha.new(self.get_esn()).hexdigest()\n results['groups']=self.get_groups()\n results['ringtone-index']=self.get_ringtone_index()\n results['wallpaper-index']=self.get_wallpaper_index()\n return results", "def perform(self, achalls):\n responses = []\n for achall in achalls:\n if isinstance(achall, achallenges.ProofOfPossession):\n responses.append(self.proof_of_pos.perform(achall))\n elif isinstance(achall, achallenges.RecoveryToken):\n responses.append(self.rec_token.perform(achall))\n else:\n raise errors.ContAuthError(\"Unexpected Challenge\")\n return responses", "def signatures(self):\n return self._signatures", "def test_teacher_role():\n response = test_app.post(\"/bkt_service/unwind\", params='''[{\n \"event\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/OutcomeEvent\",\n \"actor\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"student-1462300421838-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/Person\",\n \"roles\": [\n \"urn:lti:instrole:ims/lis/Teacher\"\n ]\n },\n \"action\": \"http://purl.imsglobal.org/vocab/caliper/v1/action#Graded\",\n \"object\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"attempt-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Attempt\",\n \"extensions\": {\n \"assessmentType\": \"Diagnostic Assessment\",\n \"assessmentId\": \"assessment-1462300421838-4\"\n },\n \"count\": 1,\n \"startedAtTime\": \"2016-05-03T21:33:41.844Z\",\n \"endedAtTime\": \"2016-05-03T22:03:41.844Z\"\n },\n \"generated\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"result-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"assignableId\": \"assessment-1462300421838-4\",\n \"normalScore\": 80,\n \"totalScore\": 100,\n \"itemResults\": [\n {\n \"@id\": \"item-result-1462300421838-4-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"question_type\": \"mcq\",\n \"automarkable\": 1,\n \"score\": 7,\n \"max_score\": 10,\n \"question_reference\": \"c0a3f0c8-eac7-4795-8c7a-adf98e336a7b\",\n \"item_reference\": \"Adaptive_Item2_extract_USMOs\",\n \"sequenceNumber\": 1\n }\n ]\n },\n \"group\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"class-01\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/CourseOffering\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {\n \"pageNumber\": null,\n \"courseCode\": \"course-01\",\n \"calmsCourseId\": \"calms-course-01\",\n \"lessonId\": \"lesson-01\",\n \"platform\": \"D2L\",\n \"classroomTypeId\": \"3500.0\",\n \"activityId\": \"10\",\n \"gradeLevel\": \"8\",\n \"CourseOfferingId\": \"1200.0\",\n \"adaptivewrapperId\": \"\",\n \"schoolYear\": \"2015-20116\",\n \"unitId\": \"3201.0\",\n \"moduleId\": \"1110.0\",\n \"courseId\": \"2550.0\",\n \"assessmentId\": \"4520.0\",\n \"originSystemId\": \"sams\",\n \"businessLineId\": \"1300.0\",\n \"contextId\": \"587279312bf9a9afd947ddab\"\n },\n \"dateCreated\": null,\n \"dateModified\": null,\n \"courseNumber\": null,\n \"academicSession\": null,\n \"subOrganizationOf\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"1001.0\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/w3c/Organization\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {},\n \"dateCreated\": null,\n \"dateModified\": null,\n \"subOrganizationOf\": null\n }\n },\n \"eventTime\": \"2017-01-09T14:21:00Z\"\n }\n }\n ]''')\n assert response.status == '200 OK'\n assert len(response.json) == 1\n #assert response.json[0][\"error\"][\"code\"] == 21\n #assert \"role\" in response.json[0][\"error\"][\"message\"]", "def get_all_profesors(self) -> List[Teacher]:\n self.cursor.execute(\n f\"SELECT * FROM {self.table_name}\")\n \n teachers = []\n for teacher in self.cursor.fetchall():\n teacher_parsed = list(teacher[0:8]) + [json.loads(t) for t in teacher[8:]]\n teachers.append(Teacher.parse_tuple(teacher_parsed))\n \n return teachers", "def find_transcript_data(row):\n #Calls create_transcript_df which creates a df for each transcript.\n trans_df = create_transcript_df(\n row, row['teacher_handle'].strip(), \n row['student_handle'].strip(), row['transcript'].strip())\n #Finds the first response time and defines it as the First Response Time (FRT)\n rt, frt = rt_data(trans_df)\n student_response, teacher_response = response_lengths(trans_df)\n #vocab = the total number of vocab words used in the transcript.\n vocab_list = np.asarray([item for sublist in trans_df.vocab.values for item in sublist])\n session_length_secs = (trans_df.Time_Stamps.iloc[-1] - trans_df.Time_Stamps.iloc[0]).seconds\n \n #Finding student to teacher ratio, round to nearest hundreth.\n exchange_ratio = round(trans_df.Student_Bool.sum()/float((trans_df['Student_Bool']==False).sum()),2)\n #returns all of the data found above, place in new columns under plain df.\n return trans_df.to_dict(), frt, rt, trans_df.vocab_count.sum(), vocab_list, trans_df.approp_count.sum(), session_length_secs, student_response, teacher_response, exchange_ratio,trans_df.has_drag_drop.sum()", "def score_list_teacher(request):\n\n #takes = Take.objects.filter(teacher__username=request.data[\"pid\"])\n #scores=Score_Relation.objects.all()\n #print(scores[0])\n score_relations=Score_Relation.objects.filter(course_select_info__course__teacher__username=request.data[\"pid\"])\n #serializer = TakeSerializer(takes, many=True)\n serializer=ScoreRelationSerializer(score_relations,many=True)\n return Response(serializer.data)\n\n #serializer = TakeSerializer(data=request.data)\n #if serializer.is_valid():\n # serializer.save()\n # return Response(serializer.data, status=status.HTTP_201_CREATED)\n #return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def getEMPStudyList(self):\n try:\n studies = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_emp_study_list', [results])\n for row in results:\n # study_id, sample_id, sample_name, project_name, study_title, email, sample_count, metadata_complete,\n # study_score, sample_score, s.number_samples_promised, s.number_samples_in_freezer, \n # s.principal_investigator\n studies.append((row[0], row[1], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[9], row[10], row[11], row[12]))\n return studies\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)", "def get_sensitive_hits(primers,\n input_fasta_files,\n percent_match,\n sequence_length,\n region_slice):\n\n seq_count=0\n for n in input_fasta_files:\n seq_total_target=get_sequence_count(n)\n deletion_threshold=get_deletion_threshold(percent_match,\n seq_total_target)\n fasta_f=open(n,'U')\n for label,seq in MinimalFastaParser(fasta_f):\n seq_count+=1\n unaligned_seq = seq.replace(\"-\",\"\")\n unaligned_seq = unaligned_seq.replace(\".\",\"\")\n unaligned_seq = unaligned_seq.upper()\n unaligned_seq = unaligned_seq.replace(\"U\",\"T\")\n integer_mapped_seq = convert_to_numeric(unaligned_seq)\n primers=find_sensitive_primer_matches(primers, integer_mapped_seq,\n deletion_threshold, seq_count, sequence_length,\n label,unaligned_seq, region_slice, seq)\n fasta_f.close()\n \n return primers", "def compute_algorithm_error(submissions, teacher, automatic_assessments):\n\n\n error = 0.0\n count = 0\n\n for submission in submissions:\n found = False\n\n for assessment in automatic_assessments:\n if (assessment.get_submission().__eq__(submission)):\n found = True\n error += calculate_distance(teacher.get_mark(submission), assessment.get_mark())\n count += 1\n break\n\n if (not found): # There is no deduced assessment, we asume a default mark\n default_mark = MAX_MARK_VALUE / 2\n error += calculate_distance(teacher.get_mark(submission), default_mark)\n count += 1\n\n if (count > 0):\n error /= count\n return error" ]
[ "0.59553725", "0.5373946", "0.5306007", "0.5172902", "0.5093456", "0.4992784", "0.49038228", "0.49021885", "0.48847973", "0.48658186", "0.4847523", "0.4831599", "0.48291838", "0.4825473", "0.47478107", "0.4725367", "0.47177714", "0.4660074", "0.46355888", "0.46003297", "0.45957804", "0.45941228", "0.45803863", "0.45734823", "0.45459005", "0.45449197", "0.4532906", "0.45194337", "0.45186523", "0.45022792" ]
0.7810591
0
Gets the count for the current tab and the count for the conversations in all 4 tabs
def get_all_conversation_type_counts(survey_id, conversation_tab, business_id, category): logger.info( "Retrieving count of threads for all conversation tabs", survey_id=survey_id, conversation_tab=conversation_tab, business_id=business_id, category=category, ) response = _get_conversation_counts(business_id, conversation_tab, survey_id, category, all_conversation_types=True) try: response.raise_for_status() except HTTPError: logger.exception("Thread count failed") raise ApiError(response) logger.info("Count successful") try: totals = response.json()["totals"] # Secure Message uses different identifiers to the tab names used in the ui, this translates the names if "new_respondent_conversations" in totals: totals["initial"] = totals.pop("new_respondent_conversations") if "my_conversations" in totals: totals["my messages"] = totals.pop("my_conversations") totals["current"] = totals[conversation_tab] return totals except KeyError: logger.exception("Response was successful but didn't contain a 'totals' key") raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_conversation_counts(business_id, conversation_tab, survey_id, category, all_conversation_types):\n params = _get_secure_message_threads_params(\n survey_id, business_id, conversation_tab, category, all_conversation_types\n )\n url = f'{current_app.config[\"SECURE_MESSAGE_URL\"]}/messages/count'\n response = requests.get(url, headers={\"Authorization\": _get_jwt()}, params=params)\n return response", "def count_current():\n return current.count()", "def message_count(self):\n pass", "def count_urls(self):\n return self.request(\"count:Message_Url\", [ None ])", "def getCountOfNewGlobalMsgs(self): \n try:\n NewMsgs = self.getNewGlobalMsgs \n return 0 if isinstance(NewMsgs[1], list) else NewMsgs[1].count() # count only global msgs ( cause local stay the same )\n except AttributeError as er:\n print('looks like there is no messages in chat yet CGM')\n print(er)\n except Exception as er:\n print('Wierd Unexpected Error')\n print(er)\n return None", "def notification_count(request):\n\n # Check if logged in\n user = request.user\n if not user.is_authenticated():\n return {}\n\n return {\n 'notification_count': len(user.profile.notifications.filter(read=False))\n }", "def getCount(self):\n return self.base.get(\"count\", [])", "def get_count(self):\r\n return self.count", "def count(self):\n return self.get_count()", "def getMessageCount(self):\n return 9", "def numberActivities(self):\n if self.use_dic:\n nb_data = self.dic.keys()\n nb_act = (self.dic[nb_data[0]]).keys()\n return len(nb_data)*len(nb_act)\n else:\n return -1", "def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')", "def getCount(self):\n return self.count", "def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])", "async def on_count(ctx):\n count = get_count()\n await ctx.send(f'current count {count}')", "def combined_inbox_count(request):\r\n count = 0\r\n for func in inbox_count_sources():\r\n counts = func(request)\r\n if counts:\r\n for value in counts.itervalues():\r\n try:\r\n count = count + int(value)\r\n except (TypeError, ValueError):\r\n pass\r\n return {'combined_inbox_count': count,}", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def count():", "def count(self):\n return self.ming_cursor.count()", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def getconnectioncount(self):\n return self.proxy.getconnectioncount()", "def GetPageCount(self):\r\n\r\n return self._tabs.GetPageCount()", "def get_counts(self):\n value = self.text_ctrl.GetValue()\n chars = len(value)\n words = len(re.findall('\\w+', value))\n pub.sendMessage('update_counts', chars=chars, words=words)", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def getAppCount(self):\n logger.debug('Getting the number of apps discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='totalAppCount']\"))", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''" ]
[ "0.70647997", "0.5977005", "0.58980954", "0.5830759", "0.58140135", "0.5813447", "0.57283854", "0.5707127", "0.56924605", "0.56898373", "0.5689461", "0.56750286", "0.5671644", "0.5657192", "0.56248367", "0.56144553", "0.5568868", "0.5568868", "0.5531278", "0.55113405", "0.54769284", "0.54769284", "0.54769284", "0.54769284", "0.5458073", "0.5456128", "0.54559416", "0.545447", "0.544881", "0.54445094" ]
0.6258583
1
Gets the count of conversations based on the params
def _get_conversation_counts(business_id, conversation_tab, survey_id, category, all_conversation_types): params = _get_secure_message_threads_params( survey_id, business_id, conversation_tab, category, all_conversation_types ) url = f'{current_app.config["SECURE_MESSAGE_URL"]}/messages/count' response = requests.get(url, headers={"Authorization": _get_jwt()}, params=params) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNumberOfConversations(node, catalog=None):\n if catalog is None:\n catalog = getToolByName(node, 'portal_catalog')\n return len(catalog(\n object_provides=IConversation.__identifier__,\n path='/'.join(node.getPhysicalPath())))", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])", "def message_count(self):\n pass", "async def messagecount(self, ctx, name=None):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n async with ctx.channel.typing():\r\n username = name\r\n if username is None:\r\n username = ctx.message.author.name\r\n resp = await self.req('https://api.scratch.mit.edu/users/' + username + '/messages/count')\r\n if resp is None and name is None:\r\n username = getattr(ctx.message.author, 'nick', '_')\r\n resp = await self.req('https://api.scratch.mit.edu/users/' + username + '/messages/count')\r\n logger.info('Scratch.messagecount: ' + username, extra={'invoker': ctx.message.author.name})\r\n if resp is None:\r\n await ctx.send(\"Couldn't get message count for \" + username)\r\n else:\r\n await ctx.send('{} has {} messages'.format(\r\n username,\r\n json.loads(resp)['count']\r\n ))", "def get_messages_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'messages')", "def contact_count(self, *args, **kwargs) -> Any:\n pass", "def count_messages(queryset):\n messages = messaging.models.Message.objects.filter(thread__ad=OuterRef('pk')).only('pk')\n return queryset.annotate(message_count=core.utils.SubqueryCount(messages))", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def client_count(request):\n return request.param", "def message_count(self):\n return len(self.messages)", "async def _count(\n self, ctx: Context, user: discord.Member, channel: discord.TextChannel = None\n ):\n\n if not channel:\n channel = ctx.channel\n\n count = 0\n async with ctx.typing():\n async for message in channel.history(limit=None):\n if message.author.id == user.id:\n count += 1\n\n await ctx.send(_(\n \"{} has sent **{}** messages in {} channel.\"\n ).format(user.name, count, channel.mention))", "def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')", "def count_urls(self):\n return self.request(\"count:Message_Url\", [ None ])", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def combined_inbox_count(request):\r\n count = 0\r\n for func in inbox_count_sources():\r\n counts = func(request)\r\n if counts:\r\n for value in counts.itervalues():\r\n try:\r\n count = count + int(value)\r\n except (TypeError, ValueError):\r\n pass\r\n return {'combined_inbox_count': count,}", "def get_members_count(self, *args, **kwargs):\n return self.bot.get_chat_members_count(self.id, *args, **kwargs)", "def _get_count(self, msg, subtype=\"all\"):\n try:\n counts = self.get_local(msg, \"counts\")\n return counts.get(subtype, 0)\n except KeyError:\n return 0", "def notification_count(request):\n\n # Check if logged in\n user = request.user\n if not user.is_authenticated():\n return {}\n\n return {\n 'notification_count': len(user.profile.notifications.filter(read=False))\n }", "def getMessageCount(self):\n return 9", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "async def count(self, **kw):\n\n pass", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def consumer_count(self, obj):\n return obj.get_or_set_consumer_count()", "def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def messages_count(self, **kwargs):\n if \"order\" in kwargs and kwargs[\"order\"]:\n sign = kwargs[\"order\"][:1]\n criterion = kwargs[\"order\"][1:].upper()\n if sign == '-':\n criterion = \"REVERSE %s\" % criterion\n else:\n criterion = \"REVERSE DATE\"\n folder = kwargs[\"folder\"] if \"folder\" in kwargs else None\n\n # FIXME: pourquoi suis je obligé de faire un SELECT ici? un\n # EXAMINE plante mais je pense que c'est du à une mauvaise\n # lecture des réponses de ma part...\n self.select_mailbox(folder, readonly=False)\n cmdname = \"SORT\" if six.PY3 else b\"SORT\"\n data = self._cmd(\n cmdname,\n bytearray(\"(%s)\" % criterion, \"utf-8\"),\n b\"UTF-8\", b\"(NOT DELETED)\", *self.criterions)\n self.messages = data[0].decode().split()\n self.getquota(folder)\n return len(self.messages)", "def message_count(self):\n return self._message_count", "async def _vote_count(\n self, ctx: Context, *, channel: discord.TextChannel = None\n ):\n\n guild: discord.Guild = ctx.guild\n\n if not channel:\n channel = await self.get_vote_channel(guild)\n if isinstance(channel, str):\n return await ctx.send(channel)\n\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"I couldn't identify a voting channel. Please specify one explicitly.\"\n ))\n else:\n history = await channel.history(oldest_first=True).flatten()\n if len(history) > 100:\n return await ctx.send(_(\n \"That channel has too many messages!\"\n \" Please ask a host for manual vote count.\"\n ))\n\n if len(history) < 1:\n return await ctx.send(_(\"{} is empty.\").format(channel.mention))\n\n user_votes = {}\n player_role = guild.get_role(\n await self.config.guild(guild).player_id()\n )\n\n for message in history:\n author = message.author\n if player_role not in author.roles:\n continue\n vote = self.get_vote_from_message(message)\n if not vote:\n continue\n user_votes[f\"{author.name}#{author.discriminator}\"] = vote\n\n user_votes = await self.get_non_voters(guild, user_votes)\n\n votes = {}\n for user in user_votes:\n val = user_votes[user].capitalize()\n try:\n votes[val].append(user)\n except KeyError:\n votes[val] = [user]\n\n # max votes first\n votes = dict(sorted(\n votes.items(), key=lambda item: len(item[1]), reverse=True\n ))\n\n # Pop and add stuff back to dict for ordering purpose.\n try:\n votes[\"VTNL\"] = votes.pop(\"Vtnl\")\n except KeyError:\n pass\n try:\n votes[\"No vote\"] = votes.pop(\"No vote\")\n except KeyError:\n pass\n\n txt = \"\"\n\n for i, vote in enumerate(votes, start=1):\n voters = votes[vote]\n\n if vote == \"VTNL\":\n txt += _(\"\\n\\n**{}** - {} ({})\").format(vote, len(voters), \", \".join(voters))\n elif vote == \"No vote\":\n txt += _(\"\\n\\n**Not voting** - {} ({})\").format(len(voters), \", \".join(voters))\n else:\n txt += _(\"\\n{}. **{}** - {} ({})\").format(i, vote, len(voters), \", \".join(voters))\n\n title = _(\"Vote Count\")\n\n embed = discord.Embed(\n color=0x00CDFF, title=title,\n description=_(\"__Counting from {} channel.__\\n\\n{}\").format(\n channel.mention, txt.strip()\n )\n )\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\n f\"**{title}**\\n\\n__Counting from {channel.mention}\"\n f\" channel.__\\n\\n{txt.strip()}\"\n )", "def get_connected_users_count(room: PublicChatRoom) -> int:\n return room.users.count()", "async def users(ctx):\n\n if ctx.channel.name.lower() in channels:\n await ctx.send(f\"\"\"# of members: {ctx.guild.member_count}\"\"\")" ]
[ "0.6763089", "0.6620903", "0.64600253", "0.6158435", "0.61192197", "0.6113174", "0.60978407", "0.60919636", "0.6040849", "0.6025221", "0.59833664", "0.59689814", "0.5946903", "0.5919529", "0.59178835", "0.5917685", "0.59037703", "0.5818629", "0.58184177", "0.5817921", "0.5766479", "0.57360625", "0.5721099", "0.57197076", "0.57162267", "0.57054436", "0.56940395", "0.5688411", "0.5674532", "0.566433" ]
0.71310955
0
Check if message contains correct checksum
def _validate_checksum(self, msg: bytes) -> bool: return self._checksum(msg) == msg[8]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_checksum(message, previous_csum=0):\n if message.message_type in CHECKSUM_MSG_TYPES:\n csum = compute_checksum(\n message.checksum[0],\n message.args,\n previous_csum,\n )\n\n if csum == message.checksum[1]:\n return True\n else:\n return False\n else:\n return True", "def validate_checksum(self):\n return self.calculate_checksum() == self.checksum()", "def valid_checksum(self, msg: dict) -> bool:\n packed_seg = struct.pack(HEADER_FORMAT + DATA_FORMAT, msg['seq_nr'], msg['ack_nr'], msg['flag'].value,\n msg['win'], msg['dlen'], 0, msg['data'])\n cksum = self.calc_checksum(packed_seg)\n return cksum == msg['cksum']", "def checkChecksum(self):\n if not self.checkPacketLength():\n return False\n return CCSDS.DU.DataUnit.checkChecksum(self)", "def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum", "def checksum(message):\n check = 0\n for c in message:\n check += ord(c)\n return check % 256", "def _verify_checksum(data, checksum):\n sha256_hash = hashlib.sha256(data).hexdigest().encode()\n return to_bin(sha256_hash)[0 : len(data) * 8 // 32] == checksum", "def test_wrong_checksum(self):\n self.assertNotEqual(utils.checksum('fooo'), b'A')", "def verify_checksum(self):\n return self.generate_header_checksum(omit_checksum=False) == 0", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def check_packet(self, header, string):\n\n string = string[0:11] + string[75:]\n gen_chksum = hashlib.sha256(string.encode()).hexdigest()\n try:\n if header[\"checksum\"] == gen_chksum:\n return True\n else:\n return False\n except KeyError:\n return False", "def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')", "def check_pack_checksums():\n conn = sqlite3.connect(DBNAME)\n c = conn.cursor()\n for row in c.execute(\"SELECT lower(hex(sum)) FROM packs\"):\n checksum = row[0]\n res = s3.get_object(Bucket=BUCKET, Key=f\"{checksum}.pack\")\n body = res[\"Body\"]\n h = blake3.blake3()\n for chunk in iter(lambda: body.read(4096), b\"\"):\n h.update(chunk)\n\n c = h.hexdigest()\n if c != checksum:\n raise ValueError(\"pack {checksum}: checksum {c} does not match\")", "def calculate_checksum(self, message):\n return sum([int(x, 16) if type(x) == str else x for x in message]) & 0xFF", "def checkChecksum(key):\n\t#decode to base256\n\tcheckKey = enc.b58decode(key)\n\tchecksum = checkKey[-4:]\n\thash = hashlib.sha256(hashlib.sha256(checkKey[:-4]).digest()).digest()[:4]\n\tif hash == checksum:\n\t\treturn True\n\telse:\n\t\treturn False", "def crcCheck(serialMessage):\n checkResult = False\n\n #CRC from serial message\n crc = int.from_bytes(serialMessage[14:16], byteorder='little', signed=False)\n #calculated CRC\n crcCalc = libscrc.modbus(serialMessage[0:14])\n\n if crc == crcCalc:\n checkResult = True\n\n return checkResult", "def checksum(self,msg):\n cksum = sum([ord(x) for x in msg])\n cksum0 = ((cksum & 0xF0) >> 4) + 0x30\n cksum1 = (cksum & 0x0F) + 0x30\n return chr(cksum0)+chr(cksum1)", "def verify_blob_checksum(self, blob):\n path = self.csum_to_path(blob)\n csum = path.checksum()\n return csum != blob", "def correct_checksum():\n test_strs = [\"ch3ck1nG c0rr3ct ch3cksu|\\/|\\n\", \"y3T an0th3r str1ng0_x\\/.!&\\n\"]\n\n def test_checksum(test_str):\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n teardown()\n\n # Start reference solution to get answers.\n ref_server = start_server(port=REF_PORT, reference=True)\n ref_client = start_client(server_port=REF_PORT, reference=True)\n\n # Get reference checksum.\n write_to(ref_client, test_str)\n ref_segment = read_segments_from(ref_client)[0]\n ref_checksum = ref_segment.checksum\n\n # Check the first sent segment.\n segment = segments[0]\n\n # Checksum equal to the reference checksum.\n if segment.checksum == ref_checksum:\n return True\n\n # Maybe they also set an ACK for this segment. Compare with the computed\n # checksum.\n return int(segment.checksum, 16) == segment.c_repr.cksum;\n\n return reduce(lambda a, b: a and b, [test_checksum(t) for t in test_strs])", "def valid_response(line):\n cksum = int(line[-2:], 16) # checksum is last two characters in ASCII hex\n data = line[:-2] # remove checksum from data\n\n calc_cksum = checksum(data)\n if cksum != calc_cksum:\n log.debug('checksum failed (%r): should be %s', line, hex(calc_cksum))\n return False\n return True", "def check_record(self, record):\n checking = reduce(lambda x,y: x + y, [int(record[i*2:i*2+2], 16) for i in [x for x in xrange(len(record)/2)]])\n if ('%02x' % checking)[-2:] != '00':\n raise Exception ('ERROR: Checksum doesn\\' match! Record is %s' % (record, ))", "def validate_checksum(blob: bytes, offset: int, length: int):\n\n checksum = ord(blob[offset + length - 1:offset + length])\n data_sum = sum(\n struct.unpack('%dB' % (length - 1), blob[offset:offset + length - 1])\n )\n if 0xff & (data_sum + checksum) != 0:\n raise ValueError('The data do not match the checksum')", "def check_crc(chunk, crc):\n\n crc = bytes(crc)\n crc_this = bytes(ensure_crc(crc16.crc16xmodem(bytes(chunk))).encode('utf-8'))\n if crc_this == crc:\n return True\n else:\n return False", "def _check_hash(self, text):\n old = self.header.get(\"sha1sum\", None)\n if old is None:\n raise crexc.ChecksumError(\"sha1sum is missing in \" + repr(self.basename))\n if self._get_checksum(text) != self.header[\"sha1sum\"]:\n raise crexc.ChecksumError(\"sha1sum mismatch in \" + repr(self.basename))", "def is_valid_payload(p):\n # if the checksum is valid the checksum calculation, without removing the\n # checksum, should be equal to zero\n\n if checksum16(p) == 0:\n return True\n else:\n return False", "def _check_md5(self):\n\n self.log.info('-' * 80)\n self.log.info('Check md5 sum')\n\n self.log.info(self._ref_value)\n self.log.info(self._output_file)\n\n code, out = cmd_exec(['md5sum', self._output_file], shell=False, log=self.log)\n if code:\n self.log.error(out)\n return False\n self.log.info(out)\n\n md5sum, _ = out.split(' ')\n\n self.log.info(f'reference md5: {self._ref_value}')\n self.log.info(f'actual md5: {md5sum}')\n\n if self._ref_value != md5sum:\n return False\n\n return True", "def _get_checksum(self, arg):", "def bech32_verify_checksum(hrp, data):\n return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1", "def calculate_checksum(self, message):\n s = 0\n for i in range(0, len(message)-1, 2):\n w = (message[i]) + (message[i + 1] << 8) << 8\n s = ((w + s) & 0xffff) + ((w + s) >> 16)\n return s", "def _check_md5sum(_setup_str, src_host, src_pfn):\n\n error = PilotErrors()\n\n _cmd = '%suberftp %s \"quote cksm md5sum 0 -1 %s\"' % (_setup_str, src_host, src_pfn)\n estat, coutp = commands.getstatusoutput(_cmd)\n tolog('md5 uberftp done <%s> (%s): %s' % (_cmd, estat, coutp))\n\n if estat != 0:\n check_syserr(estat, coutp)\n if coutp.find('not understood') >= 0:\n tolog('!!WARNING!!2999!! MD5 unsupported by the server')\n return error.ERR_FAILEDMD5, coutp\n try:\n tmp0 = coutp.split('\\n')[-1]\n fmd5usm = tmp0.split()[1]\n # split removes also the trailing \"\\r\" that uberftp returns, no fmd5sum.strip()\n except:\n tolog('!!WARNING!!2999!! Unable to parse MD5')\n fmd5usm = ''\n return 0, fmd5usm" ]
[ "0.7705907", "0.7587994", "0.752931", "0.7426855", "0.7415338", "0.73797804", "0.7367894", "0.73633546", "0.72925925", "0.72435987", "0.7102735", "0.70819044", "0.702974", "0.70258206", "0.70078015", "0.70042783", "0.6903123", "0.6886536", "0.68748033", "0.68244356", "0.6795428", "0.6733948", "0.6691104", "0.6679007", "0.6662397", "0.66249985", "0.6615172", "0.6607707", "0.6602796", "0.65796405" ]
0.81222075
0
Trigger zero calibration (0PPM for Z14, 400 PPM for Z19)
def zero_calibrationn(self): self.link.write(self._calibrateZeroSequence)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calibration(self, cal: int, /) -> None:", "async def calibrate_zero(self):\n await self.hw_device.set_signal(self.channel)", "def _doCalibration(self):\n self._cmdCalibration(2)", "def photometric_calibration():\n pass", "def calibration(self) -> int:", "def calibration(self, pulse_min: int, pulse_max: int, pulse_centre: int, /) -> None:", "def test_blank(self):\n self._calibration_test(\"blank\")", "def calibrate(self):\n import time\n\n CALIBRATE_SLEEP = 0.75\n\n self.change_power(-self._calpow)\n encprev, encnow = 0, None\n while encprev != encnow:\n encprev = encnow\n time.sleep(CALIBRATE_SLEEP)\n encnow = self._bp.get_motor_encoder(self._port)\n self._pmin = encnow\n self.change_power(0)\n\n self.change_power(self._calpow)\n encprev, encnow = 0, None\n while encprev != encnow:\n encprev = encnow\n time.sleep(CALIBRATE_SLEEP)\n encnow = self._bp.get_motor_encoder(self._port)\n self._pmax = encnow\n self.change_power(0)\n\n if self._pmax == self._pmin:\n raise Exception('motor {} does not move'.format(self._port))\n\n self._pinit = (self._pmax + self._pmin) * 0.5\n time.sleep(0.5)\n self.to_init_position()", "def set_step_zero(self):\n self.gcam_landmatrix, self.ixr_ixm_ixg = self.cst.apply_constraints_zero()", "def initialise_calibration(self):\n for i in range(0, self.NUM_SENSORS):\n self.calibratedMax[i] = 0\n self.calibratedMin[i] = self.READING_TIMEOUT", "def calibration(exp, args):\n show_text(\"Nous allons faire un calibrage\", args).present()\n exp.clock.wait(1500)\n expyriment.stimuli.FixCross(size=(25, 25),\n line_width=3,\n colour=args[\"stimuli_color\"]).present()\n exp.clock.wait(2100)", "def _autozero(self, axes):\n debug('ControllerStartup._autozero(axes=%s)', axes)\n self.pidevice.ATZ(axes, ['NaN'] * len(axes))\n waitonautozero(self.pidevice, axes, **self._kwargs)\n setservo(self.pidevice, axes, [True] * len(axes), **self._kwargs)\n moveandwait(self.pidevice, axes, [0.0] * len(axes), **self._kwargs)", "def run_calib(projector=OPTOMA_HD33()):\n w, h = (0.2160, 0.2794)\n obj_points = np.array([[-w/2, h/2, 0], [w/2, h/2, 0],\n [-w/2, 0, 0], [w/2, 0, 0],\n [-w/2, 0, h/2], [w/2, 0, h/2]])\n\n global img_points, going\n img_points = []\n\n try:\n window = Window()\n window.MoveXY(1600,0)\n window.ShowFullScreen(True)\n going = True\n\n @window.eventx\n def EVT_MOUSE_EVENTS(evt):\n global going, img_points\n if evt.ButtonUp(wx.MOUSE_BTN_LEFT):\n img_points.append(evt.Position)\n print('Picked point %d of 6' % (len(img_points)))\n if len(img_points) == len(obj_points):\n print \"Done\"\n going = False\n\n print(\"\"\"[Extrinsic Calibration] \n\nThere should be 6 points marked on the table and backdrop. \nMoving the mouse over the projected display, click each of the points\nin order:\n (left top, on the backdrop),\n (right top, on the backdrop),\n (left center, on the crease),\n (right center, on the crease),\n (left bottom, on the table),\n (right bottom, on the table)\n\nFollow along with this illustration: http://imgur.com/asfsfd.jpg\n\nClick the six points:\n\"\"\")\n\n while going: cv.WaitKey(10)\n\n finally:\n window.Close()\n\n img_points = np.array(img_points, 'f')\n projector.calibrate_extrinsic(img_points, obj_points)\n\n np.save('%s/config/projector' % (newest_folder), (projector.KK, projector.RT))\n print('OK')", "def test_zero(self):\n controller = LinearController(self.G, 2, mode='zero')\n sim = simulation.Simulation(self.G, controller, dt=self.dt)\n sim.run(self.dt)\n\n self.assertAlmostEqual(np.linalg.norm(controller.W.ravel()), 0.0)\n self.assertAlmostEqual(np.linalg.norm(controller.out), 0.0)", "def goToZero():\n #on remet tout à zero\n usr_choice = 0\n fonctions_pfc.x = 0\n fonctions_pfc.y = 0\n fonctions_pfc.result = \"\"\n fonctions_pfc.pc_score = \" SCORE DU PC : \"\n fonctions_pfc.usr_score = \" SCORE DU PC : \"\n #on ré-affiche tous les composants\n display_pack_component()", "def test_calibration_with_negative_amp(self):\n backend = FakeHanoiV2()\n exp = StarkRamseyXY(\n physical_qubits=[0],\n stark_amp=-0.1, # negative amplitude\n backend=backend,\n stark_sigma=15e-9,\n stark_risefall=2,\n stark_freq_offset=80e6,\n )\n param_ram_x, _ = exp.parameterized_circuits()\n freq = backend.qubit_properties(0).frequency + 80e6 # positive frequency shift\n granularity = backend.target.granularity\n dt = backend.dt\n duration = granularity * int(round(4 * 15e-9 / dt / granularity))\n sigma = duration / 4\n\n with pulse.build() as ref_schedule:\n pulse.set_frequency(freq, pulse.DriveChannel(0))\n pulse.play(\n pulse.Gaussian(duration=duration, amp=0.1, sigma=sigma), pulse.DriveChannel(0)\n )\n\n test_schedule = param_ram_x.calibrations[\"StarkV\"][(0,), ()]\n self.assertEqual(test_schedule, ref_schedule)", "def proz(): \r\n print(\"processing: \",CURDATA()[0]) \r\n Check_180turn(left_boundary,right_boundary)\r\n EF() #exponential window multiplication + fourier\r\n APK0() #1. Phase correction 0th Ordnung\r\n APK1() #1. Phase correction 1st Ordnung\r\n ABS() #Baseline correction\r\n APK()\r\n ABS() #Baseline correction\r\n Check_180turn(left_boundary,right_boundary)", "def calibration(\n self,\n pulse_min: int,\n pulse_max: int,\n pulse_centre: int,\n pulse_angle_90: int,\n pulse_speed_100: int,\n /,\n ) -> None:", "def zeroing(self):\n x_zeroed, y_zeroed, z_zeroed = False, False, False\n self._stepper_x.set_stepper(defines.STEPPER_X_MAX_HZ / 2, -defines.BOARD_X_LENGTH)\n self._stepper_y_left.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_y_right.set_stepper(defines.STEPPER_Y_MAX_HZ / 2, -defines.BOARD_Y_LENGTH)\n self._stepper_z.set_stepper(defines.STEPPER_Z_MAX_HZ / 2, -defines.BOARD_Z_LENGTH)\n\n while x_zeroed is False or y_zeroed is False or z_zeroed is False:\n if x_zeroed is False and self._switch_reset_x.get_state() is True:\n self._stepper_x.set_stepper(0, 0)\n x_zeroed = True\n\n if y_zeroed is False and self._switch_reset_y.get_state() is True:\n self._stepper_y_left.set_stepper(0, 0)\n self._stepper_y_right.set_stepper(0, 0)\n y_zeroed = True\n\n if z_zeroed is False and self._switch_reset_z.get_state() is True:\n self._stepper_z.set_stepper(0, 0)\n z_zeroed = True", "def startAndCalibrateWalabot():\n wlbt.StartCalibration()\n print('- Calibrating...')\n while wlbt.GetStatus()[0] == wlbt.STATUS_CALIBRATING:\n wlbt.Trigger()\n wlbt.Start()\n print('- Calibration ended.\\n- Ready!')", "def test_calibration_with_positive_amp(self):\n backend = FakeHanoiV2()\n exp = StarkRamseyXY(\n physical_qubits=[0],\n stark_amp=0.1, # positive amplitude\n backend=backend,\n stark_sigma=15e-9,\n stark_risefall=2,\n stark_freq_offset=80e6,\n )\n param_ram_x, _ = exp.parameterized_circuits()\n freq = backend.qubit_properties(0).frequency - 80e6 # negative frequency shift\n granularity = backend.target.granularity\n dt = backend.dt\n duration = granularity * int(round(4 * 15e-9 / dt / granularity))\n sigma = duration / 4\n\n with pulse.build() as ref_schedule:\n pulse.set_frequency(freq, pulse.DriveChannel(0))\n pulse.play(\n pulse.Gaussian(duration=duration, amp=0.1, sigma=sigma), pulse.DriveChannel(0)\n )\n\n test_schedule = param_ram_x.calibrations[\"StarkV\"][(0,), ()]\n self.assertEqual(test_schedule, ref_schedule)", "def testScaleZeroPoint(self):\n\n ZEROPOINT = 27\n self.sctrl = afwMath.StatisticsControl()\n self.sctrl.setNanSafe(True)\n\n config = SpatialScaleZeroPointTask.ConfigClass()\n config.zeroPoint = ZEROPOINT\n config.interpStyle = \"CONSTANT\"\n config.selectFluxMag0.retarget(SelectLsstSimFluxMag0Task)\n config.selectFluxMag0.database = \"test_select_lsst_images\"\n zpScaler = SpatialScaleZeroPointTask(config=config)\n\n \"\"\" Note: this order does not properly retarget\n zpScaler = ScaleZeroPointTask()\n zpScaler.config.doInterpScale = True\n zpScaler.config.zeroPoint = ZEROPOINT\n zpScaler.config.interpStyle = \"CONSTANT\"\n zpScaler.config.selectFluxMag0.retarget(SelectLsstSimFluxMag0Task)\n zpScaler.config.selectFluxMag0.database = \"test_select_lsst_images\"\n \"\"\"\n\n outCalib = zpScaler.getCalib()\n self.assertAlmostEqual(outCalib.getMagnitude(1.0), ZEROPOINT)\n\n exposure = self.makeTestExposure(10, 10)\n # create dataId for exposure. Visit is only field needed. Others ignored.\n exposureId = {'ignore_fake_key': 1234, 'visit': 882820621}\n\n # API for computImageScale() takes a dataRef not a dataId.\n exposureFakeDataRef = WrapDataId(exposureId)\n # test methods: computeImageScale(), scaleMaskedImage(), getInterpImage()\n imageScaler = zpScaler.computeImageScaler(exposure, exposureFakeDataRef)\n scaleFactorIm = imageScaler.getInterpImage(exposure.getBBox())\n predScale = np.mean(imageScaler._scaleList) # 0.011125492863357\n\n self.assertAlmostEqual(afwMath.makeStatistics(scaleFactorIm, afwMath.VARIANCE, self.sctrl).getValue(),\n 0.0)\n self.assertAlmostEqual(afwMath.makeStatistics(scaleFactorIm, afwMath.MEAN, self.sctrl).getValue(),\n predScale)\n\n mi = exposure.getMaskedImage()\n imageScaler.scaleMaskedImage(mi)\n self.assertAlmostEqual(mi.get(1, 1)[0], predScale) # check image plane scaled\n self.assertAlmostEqual(mi.get(1, 1)[2], predScale**2) # check variance plane scaled\n\n exposure.setCalib(zpScaler.getCalib())\n self.assertAlmostEqual(exposure.getCalib().getFlux(ZEROPOINT), 1.0)", "def setPhaseZero(self):\n self.write('PCLR')", "def ft_sensor_set_zero(self):\r\n return self._arm.ft_sensor_set_zero()", "def calibrate(): \n \n # Calibrate of the run using beam data. Creates a folder cal-files/caltag \n # containing all calibration data. \n CalObj = Calibration(steerfiles=steerfiles, name=localcaltag + '-cal') \n\n # Set Beam energy\n CalObj.set_beam_momentum(beamenergy)\n\n # Get gearfile and set air as DUT material\n localgearfile = CalObj.get_filename('gear.xml')\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='thickness', value=0.0001)\n set_parameter(gearfile=localgearfile, sensorID=11, parametername='radLength', value=304000.0)\n \n # Create list of calibration steps \n calpath = create_calibration_path(CalObj)\n \n # Run the calibration steps \n CalObj.calibrate(path=calpath,ifile=rawfile_air,caltag=localcaltag)", "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop()\n ev3.Sound.beep().wait()\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(\n position_sp=-arm_revolutions_for_full_range,\n speed_sp=self.MAX_SPEED,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this\n # line is correct as is).", "def rec_default(self):\n self.phase_triggers.setText('(0,1,320)')\n self.phase_min.setText('-1.57')\n self.phase_max.setText('1.57')", "def calibrate(self):\n self.mode = Mode.calibrating\n yaw_sensor = yaw_button()\n while not yaw_sensor.is_pressed():\n self.move_left()\n for _ in range(75):\n self.move_right()\n\n pitch_sensor = pitch_button()\n while not pitch_sensor.is_pressed():\n self.move_up()\n for _ in range(21):\n self.move_down()\n\n self.pitch = 0.0\n self.yaw = 0.0\n self.mode = Mode.waiting", "def force_calibration_controller_test():\n print(f\"Running {__file__}::{force_calibration_controller_test.__name__}()\")\n arm_state = State()\n alpha=0.2\n ctrl = EMAForceCalibrator(Connection(arm_state), alpha=alpha)\n cmd = Command()\n state = State()\n ctrl.execute(cmd, state)\n assert np.all(ctrl.force_average.array == 0)\n\n arm_state.sensor_force()[:] = [2,2,2,2,2,2]\n ctrl.execute(cmd, state)\n assert np.all(ctrl.force_average.array != 0)\n assert state.sensor_force().allclose(np.array([2,2,2,2,2,2])*(1-alpha), 0.001)\n\n for i in range(100):\n ctrl.execute(cmd, state)\n assert ctrl.force_average.allclose([2,2,2,2,2,2], 0.001)\n assert state.sensor_force().allclose([0,0,0,0,0,0], 0.001)\n\n arm_state.sensor_force()[:] = [0,0,0,0,0,0]\n ctrl.execute(cmd, state)\n assert state.sensor_force().allclose(np.array([-2,-2,-2,-2,-2,-2])*(1-alpha), 0.001)\n\n for i in range(100):\n ctrl.execute(cmd, state)\n assert ctrl.force_average.allclose([0,0,0,0,0,0], 0.001)\n assert state.sensor_force().allclose([0,0,0,0,0,0], 0.001)\n\n print(\"Passed.\")", "def calibrate(self):\n\t\tLTOGRIGHT = []\n\t\tLTOGUP = []\n\t\tRTOGRIGHT = []\n\t\tRTOGUP = []\n\t\tstart = time.time()\n\t\tcalibration_time = 5.0\n\t\twhile time.time() - start < calibration_time:\n\t\t\tevents = pygame.event.get()\n\t\t\tfor event in events:\n\t\t\t\tif event.type == pygame.JOYAXISMOTION:\n\t\t\t\t\tLTOGRIGHT.append(self.joystick.get_axis(self.LTOGRIGHT))\n\t\t\t\t\tLTOGUP.append(-self.joystick.get_axis(self.LTOGUP))\n\t\t\t\t\tRTOGRIGHT.append(self.joystick.get_axis(self.RTOGRIGHT))\n\t\t\t\t\tRTOGUP.append(-self.joystick.get_axis(self.RTOGUP))\n\n\t\t# calibration sets highest value equal to 1.0\n\t\tself.calibration[0] = 1.0/max(LTOGRIGHT)\n\t\tself.calibration[1] = -1.0/min(LTOGRIGHT)\n\t\tself.calibration[2] = -1.0/min(LTOGUP)\n\t\tself.calibration[3] = 1.0/max(LTOGUP)\n\t\tself.calibration[4] = 1.0/max(RTOGRIGHT)\n\t\tself.calibration[5] = -1.0/min(RTOGRIGHT)\n\t\tself.calibration[6] = -1.0/min(RTOGUP)\n\t\tself.calibration[7] = 1.0/max(RTOGUP)" ]
[ "0.6423298", "0.631655", "0.6298982", "0.6260567", "0.6258085", "0.62078565", "0.6157141", "0.5990511", "0.59166145", "0.5912427", "0.5871232", "0.5840601", "0.5815842", "0.5784849", "0.5768754", "0.5756073", "0.5748982", "0.5732277", "0.57183784", "0.5714611", "0.5675555", "0.56698626", "0.5657502", "0.5657347", "0.56501627", "0.563732", "0.5612506", "0.5603512", "0.55768317", "0.5568466" ]
0.6548889
0
The function finds the most common seeds for several lengths and calculated the emissions and transitions matrices for each seed.
def get_seeds(seqs): k_lengths = list(range(6, 20)) final_tuples = {} motifs_dic = {} # find most common seeds seeds = [find_motifs(seqs, k) for k in k_lengths] for i in k_lengths: motifs_dic[i] = seeds[i - 6][0:5] # calculate emissions and transitions global_possible_occurrences = [sum([len(seq) - k + 1 for seq in seqs]) for k in k_lengths] for key in motifs_dic.keys(): key_tuples = [] for m in motifs_dic[key]: seed = m[0] emissions, ind = build_e(seed, ALPHA) p = m[1] / global_possible_occurrences[key - 6] transitions = build_t(p, ind) key_tuples.append((seed, emissions, transitions)) final_tuples[key] = key_tuples return final_tuples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clusterSeeds(seeds, l): \n \n # Code to complete - you are free to define other functions as you like\n \n # The function will work like this,\n # first change all the (x( y1, y2, .., yn)) tuples into (x, y) tuples.\n # then from that point make the adj matrix so that it starts with itself\n # \n # Once the adjMatrix is set up the following will happen.\n # first a 2 seeds will compared.\n # if both are <= l distance away do this:\n # check if they have different componenets:\n # if they do merge the components then set\n # all the seeds in that component to have the \n # same seed values: the sum of both components\n # \n # I would like to cite James Casaletto for his help in office hours and\n # Alex Pearson and Kavya for thier helpful piazza posts on this probelm.\n\n # list for all the seeds\n adjMatrix = dict()\n Visited = dict()\n # first the tuple is turned into (x, y) tuples\n for x_value in range(len(seeds)):\n if seeds[x_value][1]:\n for y_value in seeds[x_value][1]:\n adjMatrix.setdefault((seeds[x_value][0], y_value), []).append((seeds[x_value][0], y_value))\n Visited[(seeds[x_value][0], y_value)] = False\n '''\n # now we cluster the seeds.\n for seed1 in range(len(list_of_seeds)):\n x_value, y_value = list_of_seeds[seed1]\n for seed2 in range(seed1, len(list_of_seeds)):\n x_value_2, y_value_2 = list_of_seeds[seed2]\n\n # if seeds are close get thier adj list\n if abs(x_value - x_value_2) <= l and abs(y_value - y_value_2) <= l:\n adj_seeds_1 = adjMatrix[list_of_seeds[seed1]]\n adj_seeds_2 = adjMatrix[list_of_seeds[seed2]]\n \n # if lists are different, concatenate.\n if adj_seeds_1 != adj_seeds_2:\n adj_seeds = adj_seeds_1 + adj_seeds_2\n \n # now set all seeds in adjMatrix equal to same components\n for seed in adj_seeds_1:\n adjMatrix[seed] = adj_seeds\n for seed in adj_seeds_2:\n adjMatrix[seed] = adj_seeds\n '''\n\n\n def Visit(value):\n ''' Visit the value and see whats near it. '''\n current = value\n Visited[current] = True\n for i in adjMatrix:\n if current == i : continue\n if abs(current[0] - i[0]) <= l and abs(current[1] - i[1]) <= l:\n newList = list(set(adjMatrix[current] + adjMatrix[i]))\n for item in (adjMatrix[i] + adjMatrix[current]):\n adjMatrix[item] = newList\n Visited[i] = True \n\n\n for seed in adjMatrix:\n if not Visited[seed]:\n Visit(seed)\n\n # turn the adj lists into seedCluster objects.\n set_of_clusters = set()\n\n for cluster in adjMatrix:\n set_of_clusters.add(SeedCluster(adjMatrix[cluster])) \n return set_of_clusters", "def form_common_kmer_matrix(all_CEs):\n A = np.zeros((len(all_CEs), len(all_CEs)), dtype=np.float64)\n chunk_size = 70000\n # Can precompute all the indicies\n indicies = []\n for i in xrange(len(all_CEs)):\n for j in xrange(len(all_CEs)):\n indicies.append((i, j))\n for sub_indicies in chunks(indicies, chunk_size):\n input_args = ((all_CEs[sub_indicies[i][0]], all_CEs[sub_indicies[i][1]]) for i in xrange(len(sub_indicies)))\n pool = Pool(processes=multiprocessing.cpu_count())\n res = pool.imap(form_common_kmer_matrix_helper, input_args, chunksize=np.floor(len(indicies)/float(multiprocessing.cpu_count())))\n # pool.close()\n # pool.join()\n # pool.terminate()\n for (i, j), val in zip(sub_indicies, res):\n A[i, j] = val[0] #res[i][0] # Replace i with i+last_index where last_index was the number of times the xranges executed before going into the pool\n A[j, i] = val[1] #res[i][1]\n print((i,j))\n print(val)\n\n pool.terminate()\n return A", "def marcovNuc (i = random.choice(stateSpace), step = 100):\n # matrix of transition probabilities\n #matrix = [[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]] \n matrix = [[0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1]] \n step += 1 # add one to the range because we remove it at the end\n sims = [] # List to hold the results of the Marcov chain\n sims.append(i) # append the seed value to the sims list\n for x in range(step):\n \n if sims[-1] == 'A':\n w = np.random.random() # Random number generator\n # the next set of if statements determine where the random number \n # sits on the number line of probabilities\n if matrix[0][0] > w:\n sims.append('A')\n elif matrix[0][1] + matrix[0][0] > w:\n sims.append('C')\n elif matrix[0][2] + matrix[0][1] + matrix[0][0] > w:\n sims.append('G')\n else:\n sims.append('T')\n elif sims[-1] == 'C':\n x = np.random.random()\n if matrix[1][0] > x:\n sims.append('A')\n elif matrix[1][1] + matrix[1][0] > x:\n sims.append('C')\n elif matrix[1][2] + matrix[1][1] + matrix[1][0] > x:\n sims.append('G')\n else:\n sims.append('T')\n \n elif sims[-1] == 'G':\n y = np.random.random()\n if matrix[2][0] > y:\n sims.append('A')\n elif matrix[2][1] + matrix[2][0] > y:\n sims.append('C')\n elif matrix[2][2] + matrix[2][1] + matrix[2][0] > y:\n sims.append('G')\n else:\n sims.append('T')\n\n else:\n z = np.random.random()\n if matrix[3][0] > z:\n sims.append('A')\n elif matrix[3][1] + matrix[3][0] > z:\n sims.append('C')\n elif matrix[3][2] + matrix[3][1] + matrix[3][0] > z:\n sims.append('G')\n else:\n sims.append('T')\n\n return sims[1:-1] # remove the initial value (the seed)", "def form_common_kmer_matrix(CEs):\n # I could decreae the memory usage by not creating the whole list of input_args. Use an exterior chunk_size to create\n # a smaller list of CEs, consume those, and then repeat until finished.\n A = np.zeros((len(CEs), len(CEs)), dtype=np.float64)\n input_args = collections.deque()\n indicies = []\n for i in xrange(len(CEs)):\n for j in xrange(len(CEs)):\n input_args.append((CEs[i], CEs[j]))\n indicies.append((i, j))\n\n #input_args = ((CEs[i], CEs[j]) for i in xrange(len(CEs)) for j in xrange(len(CEs)))\n pool = Pool(processes=multiprocessing.cpu_count())\n res = pool.imap(form_common_kmer_matrix_helper, input_args, chunksize=np.floor(len(indicies)/float(multiprocessing.cpu_count()))) # chunk into fewest pieces possible\n # pool.close()\n # pool.join()\n # pool.terminate()\n #for i in xrange(len(indicies)):\n for i, val in enumerate(res):\n A[indicies[i][0], indicies[i][1]] = val[0] #res[i][0] # Replace i with i+last_index where last_index was the number of times the xranges executed before going into the pool\n A[indicies[i][1], indicies[i][0]] = val[1] #res[i][1]\n\n pool.terminate()\n return A", "def test_we_get_all_W_mers_we_asked_for(self):\n fasta_file = os.path.normpath(get_fasta_file('T00759-small.fa'))\n num_sites = [2, 4, 8, 16, 32]\n self.options.max_num_sites = max(num_sites)\n self.options.min_num_sites = min(num_sites)\n \n #\n # Load sequences and build index\n #\n algorithm = stempy.Algorithm(self.options)\n algorithm._initialise(fasta_file)\n data = algorithm.input_sequences.data\n\n for seed in (\n 'GCTAGCTAGCGG',\n 'ATGCAGAAAAATTAAG',\n 'TTTAAAATACTTTAAA',\n ):\n # seed a model\n logging.info('Using seed %s', seed)\n W = len(seed)\n model = algorithm.create_model_of_input(W)\n model.bs.seed(seed, True)\n model.set_lambda_for_sites(data.num_sequences)\n \n for num_to_find in num_sites:\n # look for best W-mers under model\n best_w_mer_finder = stempy.create_best_w_mer_finder(data, model, num_to_find)\n best_w_mer_finder()\n if len(best_w_mer_finder.best_w_mers) < num_to_find:\n if len(best_w_mer_finder.best_w_mers) != model.num_W_mers:\n logging.warning('Found %d W-mers', len(best_w_mer_finder.best_w_mers))\n logging.warning('%d W-mers available', model.num_W_mers)\n logging.warning('Wanted %d W-mers', num_to_find)\n raise ValueError('Did not find enough W-mers')", "def test_find_best_W_mers_2(self):\n self.options.min_num_sites = self.options.max_num_sites = num_to_find = 2\n \n # load data and create STEME object\n fasta_file = os.path.normpath(get_fasta_file('T00759-small.fa'))\n \n #\n # Load sequences and build index\n #\n algorithm = stempy.Algorithm(self.options)\n algorithm._initialise(fasta_file)\n data = algorithm.input_sequences.data\n\n for seed in (\n 'ATGCAGAAAAATTAAG',\n 'TTTAAAATACTTTAAA',\n ):\n # create and seed a model\n W = len(seed)\n model = algorithm.create_model_of_input(W)\n model.bs.seed(seed, True)\n model.set_lambda_for_sites(data.num_sequences)\n \n # look for best W-mers under model\n best_w_mer_finder = stempy.create_best_w_mer_finder(data, model, num_to_find)\n best_w_mer_finder()\n avg_Z = 0.\n for _eval in best_w_mer_finder.best_w_mers:\n logging.info(\n 'Seed: %s; Site: %s; p(binding): %.2e; p(not binding): %.2e',\n seed, data.get_W_mer(W, _eval.global_pos), _eval.Z, 1.-_eval.Z\n )\n avg_Z += _eval.Z\n logging.info('Seed: %s; Average Z: %.6f', seed, avg_Z / len(best_w_mer_finder.best_w_mers))\n \n #\n # Check we found the seed\n #\n for _eval in best_w_mer_finder.best_w_mers:\n if data.get_W_mer(W, _eval.global_pos) == seed:\n break\n else:\n raise RuntimeError('Could not find seed in best W-mers')\n \n #\n # Log the product of p-values\n #\n best_w_mer_finder.update_model(num_to_find, use_pseudo_counts=False)\n logging.info('Seed: %s; log PoP: %.6f', seed, algorithm.significance.log_product_p_values(model))", "def find_freq(self):\n state = self.env.reset()\n state_dim = len(state)\n seq = [state]\n\n for _ in range(self.args.exploration_iterations*10):\n action = np.random.randint(self.env.action_space.n)\n next_state, reward, done, info = self.env.step(action)\n seq.append(next_state)\n if done:\n state = self.env.reset()\n else:\n state = next_state\n\n # Create a primitive MDP for every unique state explored\n states = set(seq)\n for state in states:\n primitive_mdp = MDP(level=0, state_var=state)\n primitive_mdp.exits = {x for x in range(self.env.action_space.n)}\n primitive_mdp.mer = frozenset({state})\n primitive_mdp.primitive_states = {state}\n self.mdps[0].add(primitive_mdp)\n\n freq = [{'sv': i, 'last': None, 'changes': 0} for i in range(state_dim)]\n for state in seq:\n for i in range(state_dim):\n if freq[i]['last'] is None:\n freq[i]['last'] = state[i]\n else:\n if state[i] != freq[i]['last']:\n freq[i]['changes'] += 1\n freq[i]['last'] = state[i]\n\n sorted_freq = sorted(freq, key=lambda x: x['changes'], reverse=True)\n return [d['sv'] for d in sorted_freq], state_dim", "def test_specific_case(self):\n fasta_file = os.path.normpath(get_fasta_file('T00759-tiny.fa'))\n seed = 'AAAACCCA'\n W = len(seed)\n num_sites = 4\n self.options.max_num_sites = num_sites\n self.options.min_num_sites = num_sites\n \n #\n # Load sequences and build index\n #\n algorithm = stempy.Algorithm(self.options)\n algorithm._initialise(fasta_file)\n data = algorithm.input_sequences.data\n\n model = algorithm.create_model_of_input(W)\n model.bs.seed(seed, True)\n model.set_lambda_for_sites(data.num_sequences)\n\n # look for best W-mers under model\n best_w_mer_finder = stempy.create_best_w_mer_finder(data, model, num_sites)\n best_w_mer_finder()\n if len(best_w_mer_finder.best_w_mers) < num_sites:\n if len(best_w_mer_finder.best_w_mers) != model.num_W_mers:\n raise ValueError('Did not find enough W-mers')\n \n # We want to get these W-mers\n # \n # 2011-08-09 10:11:32,846 - INFO - Z=8.00e-02; pos= 313 +; AAAACCCA; AAAACCCA\n # 2011-08-09 10:11:32,846 - INFO - Z=4.37e-02; pos= 668 -; TGAGTTTT; AAAACTCA\n # 2011-08-09 10:11:32,846 - INFO - Z=1.37e-02; pos= 710 -; TGGTTTTC; GAAAACCA\n # 2011-08-09 10:11:32,846 - INFO - Z=1.37e-02; pos= 681 -; TGGTTCTT; AAGAACCA\n # \n for wmer, (global_pos, rev_comp) in zip(best_w_mer_finder.best_w_mers, [(313, False), (668, True), (710, True), (681, True)]):\n if wmer.global_pos != global_pos and wmer.Z < model.calculate_Z(global_pos, rev_comp):\n raise ValueError('Got wrong W-mers')", "def run(self, seed='old'):\n if seed == 'old':\n founds, number_found = self.find_in_base()\n param = number_found - self.M_N\n\n if param < 0:\n print \"We have only {0} usable chromosomes in the database, per {1} required.\".format(number_found, self.M_N)\n l, __ = self.evolve_partials(abs(param))\n combined = founds+[l[i].x for i in range(len(l))]\n\n elif param > 0:\n combined = random.sample(founds, self.M_N)\n\n else:\n combined = founds\n\n if seed == 'fresh':\n print \"Evolving fresh chromosomes...\"\n l, __ = self.evolve_partials(self.M_N)\n combined = [l[i].x for i in range(len(l))]\n\n if len(combined) != self.M_N: raise ValueError\n print \"\\nLaunching Multi-Objective evolution...\"\n isl, prob = self.mlt_obj_evo(combined)\n self.writing_finals(isl, prob)", "def buildNetwork(binary_matrix,seed_index):\n \n \n # Get starting point for network\n seed = set(np.where(binary_matrix[seed_index]==1)[0])\n cluster = set(seed)\n NEW = set(seed)\n cluster.update(np.array([seed_index]))\n NEW.update(np.array([seed_index]))\n while True:\n temp_set = set()\n for n in NEW:\n # temp_set will have all atoms, without duplicates,\n # that are connected to all atoms in NEW.\n temp_set.update(np.where(binary_matrix[n]==1)[0])\n if temp_set.issubset(cluster):\n # if temp_set has no new atoms, the search is done.\n break\n else:\n NEW = temp_set - cluster # List of newly discovered atoms\n cluster.update(temp_set) # cluster is updated with new atoms\n return(cluster)", "def count_transitions_and_emissions(K, D, x, z): \n trans_matrix = [ [ 0 for i in range(K) ] for j in range(K) ]\n emi_matrix = [ [ 0 for i in range(D) ] for j in range(K) ] \n \n print(\"Started counting transitions\")\n for i in range(len(z)-1):\n trans_matrix[z[i]][z[i+1]] += 1 \n\n print(\"Started counting emissions\")\n size_x = len(x)\n size_z = len(z)\n for i,_ in enumerate(zip(x,z)):\n emi_matrix[z[i]][x[i]] += 1\n \n return trans_matrix,emi_matrix", "def kmerConsensus(align,k=9,verbose=False):\n\n align = padAlignment(align)\n L = len(align.iloc[0])\n Nkmers = L-k+1\n\n \"\"\"Get a 2D array of alignment [nSeqs x nSites]\"\"\"\n mat = align2mat(align)\n\n full = [dict() for i in np.arange(L)]\n for starti in np.arange(Nkmers):\n \"\"\"Create a temporary alignment of the ith kmer\"\"\"\n tmpA = seqmat2align(mat[:, starti:starti+k])\n \"\"\"Pick off the most common kmer at that start position\"\"\"\n top1 = objhist(tmpA).topN(n=2)[0][0]\n if verbose:\n print(' '*starti + top1)\n #print ' '*starti + objhist(tmpA).topN(n=2)[1][0]\n \"\"\"Add each AA in the most frequent kmer to the consensus\"\"\"\n for j, startj in enumerate(np.arange(starti, starti+k)):\n try:\n full[startj][top1[j]] += 1\n except KeyError:\n full[startj][top1[j]] = 1\n \"\"\"Consensus is the mode AA at each position in full\"\"\"\n con = ''.join([max(list(pos.keys()), key=pos.get) for pos in full])\n if verbose:\n print('Seq1: true consensus')\n print('Seq2: %dmer consensus' % k)\n compSeq(consensus(align), con)\n return con, full", "def common(structs):\n \n frequency = {}\n v = 0\n indx = 0\n result = []\n tmp_list = [] #lookup the seq for the structures,dont care which winner seq \n key = []\n for block in structs:\n tmp_list.extend(block)\n p = tuple(block[-1])\n if frequency.__contains__(p): #everytime struct p appears count up by 1\n frequency[p]+=1\n else:\n frequency[p]=1\n nr = frequency[p]\n if nr > v: #Which struct appears most times\n v = nr\n key = p\n\n #if winning structure has frequency == 1 all structure apper only once\n if frequency[key]==1:\n longest = 0\n for block in structs:\n l = len(block[-1])\n if l > longest: #pick longest sequence as the winner\n key = tuple(block[-1])\n \n winner = Pairs(key)\n indx = tmp_list.index(winner)-1\n result.append([tmp_list[indx],winner]) #adds the most common structure first\n del frequency[key]\n for i in frequency.keys(): #rest of structures added\n i = Pairs(i)\n indx = tmp_list.index(i)-1\n result.append([tmp_list[indx],i])\n \n return result", "def lcs(s1, s2):\n\n shape = (len(s1) + 1, len(s2) + 1)\n M = np.zeros(shape)\n\n max_length_so_far = 0\n candidates = set()\n\n for i in range(len(s1)):\n for j in range(len(s2)):\n\n if s1[i] == s2[j]:\n M[i+1,j+1] = M[i,j] + 1\n\n if M[i+1,j+1] > max_length_so_far:\n max_length_so_far = int(M[i+1,j+1])\n candidates = {s1[i - max_length_so_far + 1: i + 1]}\n\n elif M[i+1,j+1] == max_length_so_far:\n candidates.add(s1[i - max_length_so_far + 1: i + 1])\n\n else:\n M[i,j] = 0\n\n candidates.add('')\n\n return candidates", "def get_matrix(self):\n matrix = np.zeros([len(self.states), len(self.states)])\n starting_states = []\n transitions = []\n\n for chords in self.training_data:\n states = []\n is_starting_state = True\n chunks = [chords[x:x+self.order] for x in range(0,\n len(chords), self.order)]\n for chunk in chunks:\n chunk_string = \" \".join(chunk)\n if is_starting_state:\n starting_states.append(chunk_string)\n is_starting_state = False\n states.append(chunk_string)\n\n for i in range(0, len(states)):\n if i < (len(states)) - 1:\n transitions.append([states[i], states[i + 1]])\n else:\n transitions.append([states[i]])\n\n self.starting_probabilities = np.zeros([len(self.states)])\n\n for transition in transitions:\n for row, row_contents in enumerate(self.transitions):\n for col, _ in enumerate(row_contents):\n if transition == self.transitions[row][col]:\n matrix[row][col] += 1\n\n for i, state in enumerate(self.states):\n for j, possible_state in enumerate(starting_states):\n if state == possible_state:\n self.starting_probabilities[j] += 1\n\n num = sum(self.starting_probabilities)\n for i, prob in enumerate(self.starting_probabilities):\n self.starting_probabilities[i] = prob / num\n\n for m in range(len(matrix)):\n num = sum(matrix[m])\n if int(num) is not 0:\n for i in range(len(matrix[m])):\n matrix[m][i] = (matrix[m][i] / num)\n else:\n matrix[m] = self.starting_probabilities\n return matrix", "def test_seed_same():\n\n skip_if_no_scipy()\n\n rng = np.random.RandomState([1,2,3])\n\n #the number in the argument here is the limit on\n #seed value\n seed = rng.randint(2147462579)\n\n dim = 3\n\n mu = rng.randn(dim)\n\n rank = dim\n\n X = rng.randn(rank,dim)\n\n cov = np.dot(X.T,X)\n\n mnd1 = MND( sigma = cov, mu = mu, seed = seed)\n\n num_samples = 5\n\n rd1 = mnd1.random_design_matrix(num_samples)\n rd1 = function([],rd1)()\n\n mnd2 = MND( sigma = cov, mu = mu, seed = seed)\n\n rd2 = mnd2.random_design_matrix(num_samples)\n rd2 = function([],rd2)()\n\n assert np.all(rd1 == rd2)", "def evaluateAllLastThreeGames(maxSeed):\n\t# Generate all possible 4-digit strings in base maxSeed\n\tseedsWithRepetition = ''\n\tfor seed in range(1, maxSeed + 1):\n\t\tseedsWithRepetition += 4 * str(seed)\n\tallPossibleStrings = set(permutations(seedsWithRepetition, 4))\n\t\n\tfor possibleString in allPossibleStrings:\n\t\tregionWinners = [int(possibleString[i]) for i in range(4)]\n\n\t\tfor j in range(8):\n\t\t\tf4Game1 = j % 2\n\t\t\tf4Game2 = int(j / 2) % 2\n\t\t\tncg = int(j / 4) % 2\n\t\t\ttotalLogProb = 0.\n\n\t\t\t# Round 5 (F4)\n\t\t\tteam1 = {'seed': regionWinners[0], 'region': 0}\n\t\t\tteam2 = {'seed': regionWinners[1], 'region': 1}\n\t\t\twinProb = getWinProbability(team1, team2, r=5)\n\t\t\tncgTeam1 = team1 if f4Game1 == 1 else team2\n\t\t\ttotalLogProb += math.log(winProb if f4Game1 == 1 else (1 - winProb))\n\n\t\t\tteam1 = {'seed': regionWinners[2], 'region': 2}\n\t\t\tteam2 = {'seed': regionWinners[3], 'region': 3}\n\t\t\twinProb = getWinProbability(team1, team2, r=5)\n\t\t\tncgTeam2 = team1 if f4Game2 == 1 else team2\n\t\t\ttotalLogProb += math.log(winProb if f4Game2 == 1 else (1 - winProb))\n\n\t\t\t# Round 6 (NCG)\n\t\t\twinProb = getWinProbability(ncgTeam1, ncgTeam2, r=6)\n\t\t\ttotalLogProb += math.log(winProb if ncg == 1 else (1 - winProb))\n\t\t\tpartialLogProb = totalLogProb\n\t\t\tfor regionWinner in regionWinners:\n\t\t\t\ttotalLogProb += mostLikelyRegions[str(regionWinner)][1]\n\t\t\tprint('{0},{1:03b},{2:.4f},{3:.4f}'.format(regionWinners, j, partialLogProb, totalLogProb))\n\tpass", "def test_seed_diff():\n\n skip_if_no_scipy()\n\n rng = np.random.RandomState([1,2,3])\n\n #the number in the argument here is the limit on\n #seed value, and we subtract 1 so it will be\n #possible to add 1 to it for the second MND\n seed = rng.randint(2147462579) -1\n\n dim = 3\n\n mu = rng.randn(dim)\n\n rank = dim\n\n X = rng.randn(rank,dim)\n\n cov = np.dot(X.T,X)\n\n mnd1 = MND( sigma = cov, mu = mu, seed = seed)\n\n num_samples = 5\n\n rd1 = mnd1.random_design_matrix(num_samples)\n rd1 = function([],rd1)()\n\n mnd2 = MND( sigma = cov, mu = mu, seed = seed + 1)\n\n rd2 = mnd2.random_design_matrix(num_samples)\n rd2 = function([],rd2)()\n\n assert np.any(rd1 != rd2)", "def main(N, fd):\n\n for case in xrange(N):\n (T, pair_list) = read_dataset(fd)\n num_intersect = solve_puzzle(pair_list)\n print('Case #%d: %d' % (case+1, num_intersect))", "def four_state_forecast(days):\n transition = np.array([ [.5, .3, .1, 0.],\n [.3, .3, .3, .3],\n [.2, .3, .4, .5],\n [0., .1, .2, .2]])\n state = 0\n record = []\n for day in xrange(days):\n state = np.argmax(np.random.multinomial(1, transition[:,state]))\n record.append(state)\n return record", "def count_kmer(gene_list, codon_seqs, R, kmer_size=3):\n\n kmer = kmer_size\n MM = 'yes'\n\n list_seqfile = list( codon_seqs.keys() )\n kmer_dict = {}\n\n for orf in gene_list:\n if orf in list_seqfile:\n current_seq = np.array(codon_seqs[orf])\n\n for pos in range(len(current_seq) - (kmer + 1) ):\n if MM == 'yes' and orf in list( mm_consensus.keys() ):\n current_mm = mm_consensus[orf]\n if np.all(current_mm[pos:(pos+kmer)]): # check that no kmer position is MM\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n elif MM == 'no':\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n new_dict = {}\n list_redundant = []\n for k in kmer_dict.keys():\n if kmer_dict[k] > R:\n if k not in list_redundant:\n \t list_redundant.append(k)\n \n return list_redundant", "def _get_max_bootstrap_genus(self, seq, repeats):\n word_posteriors = self._word_posteriors\n word_idxs = self._word_idxs\n word_size = self._word_size\n\n all_words = list(unique_words(seq, word_size))\n print sorted(map(word_idxs.get, all_words))\n decisions = [] #genera idxs\n for words in bootstrap(all_words, len(seq)//word_size, repeats):\n decisions.append(self._get_max_likelihood_genus(words,\n word_posteriors, word_idxs))\n freqs = calc_freqs(concatenate(decisions))\n sorted_freqs = sorted(freqs.items(), key=itemgetter(1))\n return sorted_freqs[-1] #what if a tie here?", "def kmer_frequencies(kmertable_all, kmertable_filtered, kmertable_nonDT_hi, kmertable_nonDT_lo, data_mm, codon_seqs):\n\n def codon_bgfreq(codon_seqs, data_mm):\n \"\"\"\n get codon background frequencies from mRNA seqs\n seqs: dictionary of yeast mRNA sequences\n data_mc: dictionary of multi-mapping boolean\n \"\"\"\n codon_counts = np.zeros(( len(codons_nonstop) ))\n list_orfs = list( data_mm.keys() )\n\n for ix, orf in enumerate(list_orfs):\n current_seq = codon_seqs[orf]\n current_mm = data_mm[orf]\n\n for pos in range( len(current_mm) ):\n if current_mm[pos] and current_seq[pos] in codons_nonstop:\n current_index = codons_nonstop.index(current_seq[pos])\n codon_counts[current_index] += 1\n codon_counts = np.around( codon_counts / np.sum(codon_counts), 5)\n\n return codon_counts\n\n\n def codonfreqs_kmerdf(kmertable):\n \"\"\"\n get codon frequencies from kmertable\n \"\"\" \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)\n\n #kmertable_threshold = kmertable_all[kmertable_all['threshold']==1]\n kmertable_all2 = kmertable_all[kmertable_all['threshold']==0]\n\n\n cc_bg = codon_bgfreq(codon_seqs, data_mm)\n cc_all = codonfreqs_kmerdf(kmertable_all2)\t\t\t# without hits\n cc_theta = codonfreqs_kmerdf(kmertable_filtered)\n cc_nDT_hi = codonfreqs_kmerdf(kmertable_nonDT_hi) # min 16 max 4 at 1090\n cc_nDT_lo = codonfreqs_kmerdf(kmertable_nonDT_lo) # min 16 max 4 at 1090\n\n output = pd.DataFrame({'codon': list(codons_nonstop), \n 'kmer_theta': list(cc_theta), \n 'redundant': list(cc_all), \n 'background': list(cc_bg),\n 'nDThi': list(cc_nDT_hi),\n 'nDTlo': list(cc_nDT_lo) } ) \n output.to_csv(\"../data/figures/figure3/kmer_frequencies.txt\", header=True, index=False, sep='\\t')\n\n return output", "def CreateAtrium(L,v,d,seed): \n Neighbours = np.ndarray(L*L, dtype = list)\n rnd.seed(seed)\n Phases = np.ndarray(L*L,dtype = float)\n Phases.fill(4)\n Functionality = np.ndarray([L*L], dtype = bool)\n index = np.indices((1, L*L))[1][0]\n Atrium = index.reshape(L,L) # The index for that site within the long arrays\n for j in index:\n z = rnd.uniform(0,1)\n if d > z: # dysfunctional\n Functionality[j] = False\n if d <= z: # functional\n Functionality[j] = True\n if j in np.arange(0,L*L,L): # first column\n Neighbours[j] = list()\n Neighbours[j].extend([j+1])\n elif j in (np.arange(0,L*L,L)+L-1): # last column\n Neighbours[j] = list()\n Neighbours[j].extend([j-1])\n else: # body columns\n Neighbours[j] = list()\n Neighbours[j].extend([j-1,j+1])\n w = rnd.uniform(0,1)\n for j in np.arange(L*L):\n if w <= v: # transverse connections\n if j in np.arange(L*L-L,L*L):\n Neighbours[j].extend([j-(L*L-L)])\n Neighbours[j-(L*L-L)].extend([j])\n else:\n Neighbours[j].extend([j+L])\n Neighbours[(j+L)].extend([j])\n return Neighbours, Phases, Functionality, Atrium, index", "def sequences_to_kmer_counts(sequences, kmer_size):\n pfunc = functools.partial(sequence_to_kmer_freqs, kmer_size=kmer_size, normalize=False)\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n kmer_freqs = pool.map(pfunc, sequences, chunksize=250)\n pool.close()\n pool.join()\n mat = np.vstack(kmer_freqs).astype(int)\n colnames = ['' for _ in range(len(generate_all_kmers(kmer_size)))]\n for kmer, i in generate_all_kmers(kmer_size).items():\n colnames[i] = kmer\n retval = pd.DataFrame(mat, columns=colnames)\n return retval", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def seed_and_extend(read, k, h, index, genome):\n\n list_mapping_read = [] # List containing the positions tested to map the read on the genome\n #(will be used to not try to align a read twice at the same position)\n\n # Variables which will be returned\n position_mapping = len(genome) # Optimal position of mapping for the read\n nb_mismatch = int(h) + 1 # Number of mismatch in this mapping\n list_mismatch = [] # List of mismatch positions on the genome\n\n for kmer_index in range(len(read)-int(k)+1):\n kmer = read[kmer_index:kmer_index + int(k)]\n # For each kmer, tries to find the optimal position of mapping\n # for the read with this kmer as seed.\n position_mapping_kmer = len(genome)\n nb_mismatch_kmer = int(h) + 1\n list_mismatch_kmer = []\n\n list_occurences = sorted(index.get_occurences(kmer))\n\n if not list_occurences:\n continue\n\n for occurences in list_occurences:\n\n nb_mismatch_occu = 0 # For each occurence of the kmer,\n # count the number of mismatch during alignment\n\n list_mismatch_occu = [] # List of mismatch seen during alignment\n # of read with this occurence of the kmer\n\n index_char_genome = occurences - kmer_index # Index where to map in the genome\n index_char_read = 0 # Index of the character to compare\n\n if index_char_genome in list_mapping_read: # If position already tested,\n #do not test it a second time.\n continue\n else:\n list_mapping_read.append(index_char_genome) # Add this position to the list\n # so it won't be tested a second time for this read\n\n while nb_mismatch_occu <= int(h) \\\n and index_char_read < len(read) \\\n and index_char_genome < len(genome):\n if genome[index_char_genome] != read[index_char_read]:\n nb_mismatch_occu += 1\n list_mismatch_occu.append(index_char_genome)\n\n index_char_genome += 1\n index_char_read += 1\n\n\n # If the mapping of the read with this occurence of the read\n # is better than the previous one (less mismatch) : optimal values for kmer stored\n if nb_mismatch_occu < nb_mismatch_kmer:\n nb_mismatch_kmer = nb_mismatch_occu\n list_mismatch_kmer = list_mismatch_occu\n position_mapping_kmer = occurences - kmer_index\n\n # If the best mapping found for this kmer is better than the mapping\n # found with the previous kmer : optimal values for read stored\n if nb_mismatch_kmer < nb_mismatch \\\n or nb_mismatch_kmer == nb_mismatch \\\n and position_mapping_kmer < position_mapping:\n nb_mismatch = nb_mismatch_kmer\n list_mismatch = list_mismatch_kmer\n position_mapping = position_mapping_kmer\n\n return position_mapping, nb_mismatch, list_mismatch", "def experiment_measurements_index(fun, num_measurements, sd, num_trials, seed=21):\n experiments = {}\n solutions = {}\n for ns in num_measurements:\n ratios = []\n mud_solutions = []\n for t in range(num_trials):\n np.random.seed(seed+t)\n _r = fun(sd=sd, num_obs=ns)\n ratios.append(_r)\n mud_solutions.append(np.argmax(_r))\n experiments[ns] = ratios\n solutions[ns] = mud_solutions\n \n return experiments, solutions", "def fuse_seed_creation_pass(graph: torch.fx.Graph):\n device_seeds = collections.defaultdict(list)\n for node in graph.nodes:\n if CallFunctionVarArgs(inductor_prims.seed).match(node):\n device_seeds[node.args[0]].append(node)\n\n if not device_seeds:\n return 0\n\n for device, seeds in device_seeds.items():\n with graph.inserting_before(seeds[0]):\n combined = graph.call_function(inductor_prims.seeds, (len(seeds), device))\n with V.fake_mode:\n combined.meta[\"val\"] = torch.empty(\n [len(seeds)], device=device, dtype=torch.int64\n )\n combined.meta[\"tensor_meta\"] = _extract_tensor_metadata(\n combined.meta[\"val\"]\n )\n\n for idx, seed in enumerate(seeds):\n with graph.inserting_before(seed):\n new_seed = graph.call_function(\n inductor_prims.lookup_seed, (combined, idx)\n )\n seed.replace_all_uses_with(new_seed)\n new_seed.meta.update(seed.meta)\n graph.erase_node(seed)\n\n return len(device_seeds)", "def dynamic(a, b):\n m = len(a)\n n = len(b)\n d = [[0 for i in range(n)] for j in range(m)] # d[i][j] is length of common sequence of consecutive\n prev = [-1 for j in range(n)] # numbers that ends with a[i] == b[j]\n global_max = 0\n global_pos = -1\n for i in range(0, m):\n for j in range(0, n): # iterate through all the elements by dual circle\n if a[i] == b[j]: # if pair is equal then check if there is sequence that ends with a[i]-1\n max_len = 0 # find longest sequence ends with a[i]-1\n max_prev = -1\n for k in range(i+1):\n for l in range(j+1):\n if k == i and l == j:\n continue\n if d[k][l] > max_len and a[k] == b[l] == a[i] - 1:\n max_len = d[k][l]\n max_prev = l\n d[i][j] = max_len + 1\n if d[i][j] > global_max:\n global_max = d[i][j]\n global_pos = j\n prev[j] = max_prev\n\n res = [] # rebuild the answer\n while global_pos != -1:\n res.append(b[global_pos])\n global_pos = prev[global_pos]\n\n return res[::-1]" ]
[ "0.5693699", "0.5550675", "0.54997873", "0.5431754", "0.5406691", "0.5335756", "0.53210217", "0.53132635", "0.53088075", "0.53014505", "0.5280768", "0.5259172", "0.5255134", "0.5245243", "0.51910627", "0.5185651", "0.51382864", "0.5113323", "0.5112537", "0.5112202", "0.5090787", "0.5058744", "0.5057783", "0.5054852", "0.5026101", "0.5022374", "0.49774674", "0.49734148", "0.49514586", "0.4949725" ]
0.66207594
0
Return if x == y, if eps is not None, return if abs(xy) <= eps
def all_equal(x, y, eps=None): if eps: return all([abs(i - j) <= eps for i, j in zip(x, y) if i is not None and j is not None]) return all([i == j for i, j in zip(x, y)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def realEqual(x,y,eps=10e-10):\n return abs(x-y) < eps", "def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance", "def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance", "def is_almost_equal(self, x ,y ,epsilon=1*10**(-8)):\n \treturn abs(x-y) <= epsilon", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def is_equal_approx(x, y, epsilon=1e-6):\r\n # Check absolute precision.\r\n if -epsilon <= x - y <= epsilon:\r\n return True\r\n\r\n # Is x or y too close to zero?\r\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\r\n return False\r\n\r\n # Check relative precision.\r\n return (-epsilon <= (x - y) / x <= epsilon\r\n or -epsilon <= (x - y) / y <= epsilon)", "def torch_the_same(X, Y, eps=1e-8):\n return (X - Y).abs().min() < eps", "def IsApproximatelyEqual(x, y, epsilon):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?0.\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon\n or -epsilon <= (x - y) / y <= epsilon)", "def IsApproximatelyEqual(x, y, epsilon = 1e-6):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon or -epsilon <= (x - y) / y <= epsilon)", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def approx_eq(a, b):\n return abs(a-b) < approx_eq.eps", "def almost_equal(x, y):\n return abs(x-y) < FP_PREC", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def almost_equal_values(x, y, precision):\n return round(x - y, precision) == 0", "def nearlyEqual(self, x, y):\n return self.absoluteerror(x).nearlyEqual(x, y)", "def _point_almost_equal(a,b, rtol=RTOL, atol=ATOL):\n return np.allclose(a._Point__loc, b._Point__loc,\n rtol=rtol, atol=atol)", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return math.isclose(self.x, other.x, rel_tol=1e-12, abs_tol=1e-12) and\\\n math.isclose(self.y, other.y, rel_tol=1e-12, abs_tol=1e-12)\n else:\n return False", "def nearlyEqual(self, x, y):\n return abs(x-y) < self.absoluteerrorrange", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def within_tolerance(x, y, tolerance): \r\n return abs(x) <= tolerance and abs(y) <= tolerance", "def _point_equal(a,b):\n return np.array_equal(a._Point__loc, b._Point__loc)", "def __eq__(self, other):\r\n return abs(self.x - other.x) + abs(self.y - other.y) < Vertex.epsilon", "def _almost_equal(x, y):\n pass", "def test_epipolar(dxy_0, ep_vec, dxy, tol):\n delta=np.abs(np.dot((dxy-dxy_0), [ep_vec[1], -ep_vec[0]]))\n disp_mag=np.sqrt((dxy[:,0]-dxy_0[0])**2 +(dxy[:,1]-dxy_0[1])**2)\n good=(delta < tol) | (delta < 0.02 * disp_mag )\n return good, delta", "def eq(self, y):\n return 1 - self.ne(y)", "def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2", "def __eq__(self, rhs):\n return self.x == rhs.x and self.y == rhs.y", "def __eq__(self, pt):\n return self.x == pt.x and self.y == pt.y", "def equals_exact(self, other, tolerance): # -> bool:\n ..." ]
[ "0.78858256", "0.7044861", "0.6952725", "0.69114923", "0.6853465", "0.6734588", "0.66924584", "0.6683078", "0.66429543", "0.6590756", "0.6590756", "0.64673287", "0.6461987", "0.6440452", "0.64339805", "0.6417307", "0.6406655", "0.6328267", "0.63227063", "0.6311484", "0.62286174", "0.61804175", "0.61784273", "0.61420435", "0.60994476", "0.60613275", "0.60356677", "0.602875", "0.5946162", "0.5941863" ]
0.7315098
1
Reduce product of x.
def product(x): return functools.reduce(lambda x, y: x * y, x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prod(x):\n return functools.reduce(lambda a, b: a * b, x, 1)", "def prod(self, x, y):\n return self.reduce(x + y)", "def prod(l):\n return reduce(lambda a, b: a*b, l)", "def prod(lst):\n return reduce(mul, lst, 1)", "def _prod(seq):\n return reduce(lambda x, y: x*y, seq, 1)", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(factors):\n return reduce(operator.mul, factors, 1)", "def prod(iterable):\n \n return reduce(operator.mul, iterable, 1)", "def prod(self):\n r = 0\n for i in range(len(self)):\n r *= self[i]\n\n return r", "def product(*nums):\n\treturn reduce((lambda x, y: x * y), nums)", "def _prod(s):\n return reduce(lambda x, y: x * y, s, 1)", "def prod(l):\n r = 1\n for x in l:\n r *= x\n return r", "def reduce_by_multiplication(data):\n total = 1\n for num in data:\n total *= num\n return total", "def modReduce(self, x):\n\n assert 0 <= x < pow(self.mod, 2), 'out of range.'\n q = (x * self.u) >> (2 * self.M_bit)\n r = x - q * self.mod\n while r >= self.mod:\n r -= self.mod\n return r", "def fmult(items):\n return functools.reduce(lambda x, y: x*y, items)", "def product(it):\n prod = 1\n for x in it:\n prod *= x\n return prod", "def prod(a, x):\n return [a[i]*x for i in range(2)]", "def prod(self, x, y):\n return (self.basic_operation.reduce(x.original+y.original),\n self.operation1.prod(x.left, y.left),\n self.operation2.prod(x.right, y.right))", "def prod(L):\n res = 1\n for e in L:\n res *= e\n return res", "def cumprod(x, axis=None):\r\n return CumprodOp(axis=axis)(x)", "def prod(lst):\n if len(lst) == 0:\n return 0\n x = lst[0]\n for v in lst[1:]:\n x *= v\n return x", "def prod(lst):\n if len(lst) == 0:\n return 0\n x = lst[0]\n for v in lst[1:]:\n x *= v\n return x", "def prod(lst):\n res = 1.\n for x in lst: res *= x\n return res", "def outer_product(x):\n return keras.backend.batch_dot(\n x[0]\n , x[1]\n , axes=[1,1]\n ) / x[0].get_shape().as_list()[1]", "def call(self, x):\n return tf.tile(x, self._mult)", "def product(iterable):\n prod = 1\n for i in iterable:\n prod *= i\n return prod", "def __imul__(self, x):\n ls=len(self)\n for i in self.desc():\n for j in range(ls):\n self.g_val(self.val(i,j)*x,i,j)\n return self", "def easy_eval(self, x):\n answer = 0\n for i in range(len(self.coefficients)):\n coef = self.coefficients[i]\n degree = len(self.coefficients) - (i+1)\n answer += x ** degree * coef\n return answer" ]
[ "0.8485899", "0.75339127", "0.71145695", "0.70959675", "0.7030964", "0.70278287", "0.70278287", "0.70278287", "0.7009414", "0.69021314", "0.6851268", "0.6762479", "0.6750255", "0.67320514", "0.6707632", "0.66387403", "0.6615138", "0.6581234", "0.6580697", "0.65709585", "0.656224", "0.64615107", "0.6447613", "0.6447613", "0.64326954", "0.64121073", "0.6411015", "0.63972056", "0.6379663", "0.63697386" ]
0.8354631
1
Return a `list` of `int` that represents a range of axes.
def ranged_axes(shape): return (-np.arange(1, len(shape) + 1)[::-1]).tolist() or -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def range() -> List[int]:\n pass", "def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]", "def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))", "def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]", "def indices(self):\n slice_list = []\n for axis in range(self.ndim):\n if axis in self.displayed:\n slice_list.append(slice(None))\n else:\n if self.clip:\n p = np.clip(\n self.point[axis],\n np.round(self.range[axis][0]),\n np.round(self.range[axis][1]) - 1,\n )\n else:\n p = self.point[axis]\n p = np.round(p / self.range[axis][2]).astype(int)\n slice_list.append(p)\n return tuple(slice_list)", "def get_ranges(self) -> typing.List[typing.Tuple[float, float]]:\n return self.ranges[:]", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def axes(*x: Iterable[int]):\n return [_ti_core.Axis(i) for i in x]", "def _get_axes_numbers(self, axes):\n if axes is None:\n return [0, 1]\n\n if isinstance(axes, str):\n return [self._get_axis_number(axes)]\n elif hasattr(axes, '__len__'):\n return [self._get_axis_number(ax) for ax in axes]\n return [axes]", "def get_numeric_intervals(self):\n intervals = []\n for num in self.numeric_col:\n\n interval_difference = self.max[num] - self.min[num]\n interval_no = min(10, interval_difference)\n\n step = math.floor(interval_difference / interval_no) + 1\n interval = list(range(math.floor(self.min[num]), math.floor(self.max[num]), step))\n interval.append(9999999)\n intervals.append(interval)\n\n return intervals", "def get_bounds():\n return [0.00], [1.00]", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def range(series):\n return min(series), max(series)", "def ranges(self) -> List[Range]:\n return list(iter(self._ranges))", "def ticks(self, start, end, desired_ticks=8):\n if start == end or isnan(start) or isnan(end):\n return [start]\n min, max, delta = heckbert_interval(start, end, desired_ticks, enclose=True)\n return frange(min, max, delta)", "def ticks(self, start, end, desired_ticks=None):\n if start == end or isnan(start) or isnan(end):\n return []\n res = self.resolution\n start -= self.zero\n end -= self.zero\n start_tick = int(ceil(start / res))\n end_tick = int(floor(end / res))\n ticks = [i*res for i in range(start_tick, end_tick+1)]\n return ticks", "def ticks(self, start, end, desired_ticks=8):\n if start == end or isnan(start) or isnan(end):\n return [start]\n min, max, delta = heckbert_interval(start, end, desired_ticks,\n nicefunc=self._nice_pow10,\n enclose = True)\n return frange(min, max, delta)", "def ranges(self):\n return self._ranges", "def grid_point_counts(self):\n return [high-low for low, high in self._Limits]", "def range(self):\n return self.range_array", "def get_xrange(self) -> np.array:\n # todo: ensure this functions work as well for y_values\n lower, upper = self.get_xrange_indices()\n return self.x[lower, upper + 1]", "def get_xrange(self):\n return self.xvec[0], self.xvec[-1]", "def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2", "def bounds(self) -> typing.List[float]:\n raise NotImplementedError()", "def indicator_space() -> List[Dimension]:\n return [\n Integer(15, 40, name='bull-buy-rsi-value'),\n Integer(10, 30, name='bear-buy-rsi-value'),\n ]", "def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)", "def axis_bounds(pc, axis):\n try:\n bounds = pc.bounds\n except AttributeError:\n bounds = pc\n \n return tuple([getattr(bounds, b + axis) for b in ('min', 'max')])", "def axis_range ( xmin , xmax , delta = 0.05 , log = False ) :\n xmn = min ( xmin , xmax )\n xmx = max ( xmin , xmax )\n \n import math\n \n ## 1) special case\n if isequal ( xmn , xmx ) :\n return math.floor ( xmn - 0.1 ) , math.ceil ( xmx + 0.1 ) \n\n ## 2) special case\n if islong ( xmn - 0.5 ) and islong ( xmn + 0.5 ) :\n return math.floor ( xmn - 0.1 ) , math.ceil ( xmx + 0.1 ) \n\n d = xmx - xmn\n \n if 0 <= xmn < xmx :\n \n xmin = max ( 0 , xmn - delta * d )\n xmax = xmx + delta * d \n \n elif xmn < xmx <= 0 :\n \n xmin = xmn - delta * d \n xmax = max ( 0 , xmx + delta * d )\n \n elif xmn < 0 < xmx :\n \n xmin = ( 1 + delta ) * xmn \n xmax = ( 1 + delta ) * xmx\n \n else : \n \n xmin = xmn - delta * d \n xmax = xmx + delta * d \n\n N = 3\n \n a1 , b1 = frexp10 ( xmin )\n a2 , b2 = frexp10 ( xmax )\n\n b1 -= N \n b2 -= N \n \n xmin = math.floor ( a1 * ( 10**N ) ) * ( 10 ** b1 )\n xmax = math.ceil ( a2 * ( 10**N ) ) * ( 10 ** b2 )\n \n return xmin , xmax", "def get_index_range_inclusive(self):\n nx, ny, nz = self.get_mesh_size()\n return (1, nx, 1, ny, 1, nz)", "def rangeLin(min, max, n):\n\n return np.arange( min, max, (max-min)/n )" ]
[ "0.7072757", "0.698906", "0.6808658", "0.6788123", "0.6770003", "0.6667891", "0.6639482", "0.6613244", "0.6563589", "0.65074825", "0.6391037", "0.6376127", "0.63659567", "0.63645595", "0.635271", "0.6344882", "0.63376725", "0.6322412", "0.6316687", "0.630669", "0.6293695", "0.6292103", "0.62799466", "0.6257594", "0.6238138", "0.6237955", "0.62322736", "0.6222943", "0.6219087", "0.6217667" ]
0.6999914
1
Partition `zipped` into `num_steps`.
def partition(zipped, num_steps, allow_overflow=True): size = len(zipped) parts = [] for i in range(0, size, num_steps): end = i + num_steps if end >= size: parts.append(zip(*zipped[i:])) break elif allow_overflow: parts.append(zip(*zipped[i:end])) return parts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_chunks(num_items, num_steps):\n chunk_sizes = np.zeros(num_steps, dtype=int)\n chunk_sizes[:] = num_items // num_steps\n chunk_sizes[:num_items % num_steps] += 1\n\n chunk_offsets = np.roll(np.cumsum(chunk_sizes), 1)\n chunk_offsets[0] = 0\n return chunk_sizes, chunk_offsets", "def test_chunk_size_has_priority_over_n_splits(self):\n chunks = list(chunk_tasks(range(4), chunk_size=4, n_splits=4))\n self.assertEqual(len(chunks), 1)\n self.assertEqual(len(chunks[0]), 4)\n self.assertEqual(list(range(4)), list(chain.from_iterable(chunks)))", "def test_split(range_size, partition_size):\n dump = Mock()\n\n iterable = list(range(range_size))\n\n list(_split(partition_size=partition_size, dump=dump, iterable=iterable))\n expected_call_count = (range_size // partition_size) + int(bool(range_size % partition_size))\n\n assert dump.call_count == expected_call_count", "def steps(self, step_count):\n self.dir.value(0 if step_count > 0 else 1)\n for i in range(abs(step_count)):\n self.stp.value(1)\n sleep_us(self.step_time)\n self.stp.value(0)\n sleep_us(self.step_time)\n self.current_position += step_count", "def step(self, n, dlist):\n pass", "def step(v, direction, step_size):\n return [v_i + step_size * direction_i\n for v_i, direction_i in zip(v, direction)]", "def step(v, direction, step_size):\n return [v_i + step_size * direction_i\n for v_i, direction_i in zip(v, direction)]", "def partition_all(n):\n def _partition_all_xducer(step):\n outer = {\"temp\": []}\n\n def _partition_all_step(r=Missing, x=Missing):\n if r is Missing: return step()\n\n # arity 1: called on completion.\n if x is Missing:\n if not outer[\"temp\"]:\n return r\n _temp = outer[\"temp\"][:]\n del outer[\"temp\"][:]\n _r = unreduced(step(r, _temp))\n return step(_r)\n\n # arity 2: called w/each reduction step.\n outer[\"temp\"].append(x)\n if len(outer[\"temp\"]) == n:\n _temp = outer[\"temp\"][:]\n del outer[\"temp\"][:]\n return step(r, _temp)\n return r\n\n return _partition_all_step\n return _partition_all_xducer", "def _calculate_step_sizes(x_size, y_size, num_chunks):\n # First we try to split only along fast x axis\n xstep = max(1, int(x_size / num_chunks))\n\n # More chunks are needed only if xstep gives us fewer chunks than\n # requested.\n x_chunks = int(x_size / xstep)\n\n if x_chunks >= num_chunks:\n ystep = y_size\n else:\n # The x and y loops are nested, so the number of chunks\n # is multiplicative, not additive. Calculate the number\n # of y chunks we need to get at num_chunks.\n y_chunks = int(num_chunks / x_chunks) + 1\n ystep = max(1, int(y_size / y_chunks))\n\n return xstep, ystep", "def partition(seq):\n\n return 0", "def test_partition(self):\n # one swap at the end\n list = [5, 6, 7, 8, 9, 2]\n partition(list, 0, 5)\n # assert list == [2, 6, 7, 8, 9, 5] # should be improved in future", "def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits", "def partition_files(list_of_files, number_of_parts):\n return np.array_split(list_of_files, number_of_parts)", "def iterate_list_specific_step_size(list, step_size):\n for i in range(0, len(list), step_size):\n yield list[i:i + step_size]\n return", "def grouper(iterable, n):\n args = [iter(iterable)] * n\n return zip(*args)", "def _choose_split_points(cls, sorted_keys, shard_count):\n assert len(sorted_keys) >= shard_count\n index_stride = len(sorted_keys) / float(shard_count)\n return [sorted_keys[int(round(index_stride * i))]\n for i in range(1, shard_count)]", "def chunker(results, n):\n\n def grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n m = int(len(results) / n)\n return list(grouper(iterable=results, n=m, fillvalue=None))", "def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)", "def iter_chunks(chunksize, *iterables):\n iterables = iter(zip(*iterables))\n\n while 1:\n chunk = tuple(islice(iterables, chunksize))\n\n if not chunk:\n return\n\n yield chunk", "def partition(n, m, discard= False):\n steps = range(0, 1 + n, m)\n yield from zip(steps, steps[1:])\n if n % m and not discard:\n yield n - (n % m), n", "def step_through_generations(self, num_steps):\n for island in self._islands:\n for _ in range(num_steps):\n island.execute_generational_step()\n self.archipelago_age += num_steps", "def split(self, stage, iterator, lengths, inner_to_outer=True):\n stage_id = self._resolve_stage_id(stage)\n\n self.state_object, res = _ffi_api.StateSplit(self.state_object, stage_id, iterator, lengths,\n inner_to_outer)\n return res", "def partition(self, data, labels):\n\t\tfor i in range(self.splits):\n\t\t\tyield self.makePartition(len(labels))", "def partition(self, to_partition, indices, nr_agents):\n return [to_partition[indices[i]:indices[i + 1]] for i in range(nr_agents)]", "def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]", "def iter_slices(shape, chunk_size):\n assert len(shape) == len(chunk_size)\n num_grid_chunks = [int(ceil(s / float(c))) for s, c in zip(shape, chunk_size)]\n for grid_index in numpy.ndindex(*num_grid_chunks):\n yield tuple(\n slice(min(d * c, stop), min((d + 1) * c, stop)) for d, c, stop in zip(grid_index, chunk_size, shape))", "def partition(data: list, parts: list, *args: float) -> list:\n random.seed(42)\n partition_names = parts\n random.shuffle(data)\n n = len(data)\n rem, a, b = n, 0, 0\n parts = []\n\n for p in args:\n b = a + int(n*p)\n parts.append(data[a:b])\n rem -= (b - a)\n a = b\n # end\n\n parts.append(data[-rem:])\n return parts", "def get_next_steps(self, steps):\n step_list = []\n\n steps_remaining = set(steps.keys())\n counter = 0\n max_counter = 10000\n next_steps = set()\n\n for step in steps_remaining:\n dependencies = steps[step]\n if len(dependencies) == 0:\n next_steps.add(step)\n\n # this is the list of things that can be take for work now\n return sorted(next_steps)", "def _chunk_windows(windows, num_chunks):\n if num_chunks <= 0 or int(num_chunks) != num_chunks:\n raise ValueError(\"Number of chunks must be an integer > 0\")\n num_chunks = min(len(windows) - 1, num_chunks)\n splits = np.array_split(windows[:-1], num_chunks)\n chunks = []\n for j in range(num_chunks - 1):\n chunk = np.append(splits[j], splits[j + 1][0])\n chunks.append(chunk)\n chunk = np.append(splits[-1], windows[-1])\n chunks.append(chunk)\n return chunks", "def __partition(self, lst, n):\n \n if lst is None:\n lst = []\n \n division = len(lst)/float(n)\n \n return [ lst[int(round(division * i)):\n int(round(division * (i+1)))] for i in xrange(int(n))]" ]
[ "0.56117743", "0.5296957", "0.528687", "0.52073896", "0.5157775", "0.51236594", "0.51106584", "0.5107272", "0.5092671", "0.5070655", "0.5070408", "0.5062972", "0.5044236", "0.5033536", "0.50270873", "0.50222856", "0.5005889", "0.5004188", "0.49633753", "0.48935226", "0.48720393", "0.48708767", "0.48650292", "0.48551208", "0.4850941", "0.48456606", "0.4832979", "0.48319843", "0.48229572", "0.4814835" ]
0.80619586
0