query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Test for graph threshlding using global cost efficiency (GCE) on OMSTs (extract all MSTs). | def test_graphs_threshold_omst_global_cost_efficiency():
# the function is optmized at the 3rd OMST.
# Groundtruth
expected = np.load("groundtruth/graphs_threshold/omst_gce.npy")
# Data
graph = np.load("sample_data/graphs_threshold/graph.npy")
# Run
_, CIJtree, _, _, _, _, _, _ = threshold_omst_global_cost_efficiency(
graph, n_msts=None
)
# Test
np.testing.assert_array_equal(expected, CIJtree) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_graphs_threshold_omst_global_cost_efficiency2():\n # the function is optmized at the 3rd OMST, so it is going to yeild the same results\n # as the exhaustive search\n\n # Groundtruth\n expected = np.load(\"groundtruth/graphs_threshold/omst_gce.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph.npy\")\n\n # Run\n n_msts = 5\n _, CIJtree, _, _, _, _, _, _ = threshold_omst_global_cost_efficiency(\n graph, n_msts=n_msts\n )\n\n # Test\n np.testing.assert_array_equal(expected, CIJtree)",
"def test_graphs_threshold_global_cost_efficiency():\n\n # Groundtruth\n expected = np.load(\"groundtruth/graphs_threshold/gce.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph.npy\")\n\n # Run\n iterations = 50\n binary_mask, _, _, _, _ = threshold_global_cost_efficiency(graph, iterations)\n\n # Test\n np.testing.assert_array_equal(expected, binary_mask)",
"def tryEverything(g, verbose, graphname):\r\n prio = ['rku', 'random', 'BIL', 'rkd', 'cluHPS', 'rkusd', 'rkuad']\r\n placement = ['eft', 'BIM*', 'OLB', 'MET', 'DL', 'GDL']\r\n costFunction = ['mean', 'median', 'maxmax', 'minmax', 'minmin', 'maxmin']\r\n desc = ['DLS/DC', None, 'DCP']\r\n useOfBIM = [False, True]\r\n insertion = [False, True]\r\n BSA = [False, True]\r\n res: Dict[str, List[float]] = {}\r\n cnt = 0\r\n\r\n for ip, p in enumerate(prio):\r\n for ipl, pl in enumerate(placement):\r\n for ic, c in enumerate(costFunction):\r\n if p != 'BIL' or c == 'mean' or pl in ['DL', 'GDL']:\r\n for idd, d in enumerate(desc):\r\n for iu, u in enumerate(useOfBIM):\r\n for ii, i in enumerate(insertion):\r\n for ib, b in enumerate(BSA):\r\n cnt += 1\r\n name = \";\".join(map(str, [ip, ic, ipl, idd, iu, ii, ib]))\r\n\r\n # dispName = \"-\".join(map(str, [p, pl, c, d, u, i, b]))\r\n # print(\"Heuristic n°\", cnt, \"-\", dispName)\r\n # print(\"Heuristic n°\", cnt, \"-\", name)\r\n\r\n startScheduling = timeit.default_timer()\r\n try:\r\n schedule = computeSchedule(g, strategyPrio=p, costFunction=c,\r\n strategyPlacement=pl,\r\n useOfBIM=u, desc=d,\r\n insertion=i, bsa=b, verbose=verbose)\r\n verifPrec(g, schedule, verbose)\r\n endScheduling = timeit.default_timer()\r\n # print(\"Ended in :\", 1000*(endScheduling - startScheduling), \"ms\")\r\n # print(\"Ended in :\", round(1000 * (endScheduling - startScheduling),2), \"ms\")\r\n timeS = round(1000 * (endScheduling - startScheduling), 2)\r\n # print(f\"timeS : {timeS}\")\r\n if verbose:\r\n print(f\"Time : {timeS}ms\")\r\n res[name] = [round(schedule[getExitTask(g)][2], 6), timeS]\r\n except Exception as _:\r\n\r\n print(\"Error for : \" + name + \" on file \" + graphname)\r\n file = open(\"error.log\", 'a')\r\n file.write(f\"Error for {name} on file {graphname}\\n\")\r\n file.close()\r\n raise _\r\n return res\r\n return res",
"def global_efficiency(graph):\n return nx.global_efficiency(graph.graph)",
"def main():\n logfile = setup_log(os.path.join(os.environ['hel'], 'logs',\n 'thresh_cluster_fsl'))\n logfile.info('Threshold and cluster.')\n logfile.info('Doing the wgc PairedTres data. \\\n This is the main result for the difference between \\\n View1 and View2 in weighted global connectivity')\n outdir = os.path.join(os.environ['hel'], 'graph_analyses',\n 'randomise_global_connectivity')\n\n os.chdir(outdir)\n prefx = 'wgc_PairedTres_n10000'\n corrctd_p = '{}_clustere_corrp_tstat2.nii.gz'.format(\n prefx)\n stat = '{}_tstat2.nii.gz'.format(prefx)\n outfilename = '{}_thresh_clustere_corrp_tstat2'.format(\n prefx)\n fsl_maths(logfile, corrctd_p, stat, outfilename)\n clust_in = '{}.nii.gz'.format(outfilename)\n clst_indx = '{}_cluster_index'.format(outfilename)\n lmax_f = '{}_lmax.txt'.format(outfilename)\n clst_sz = '{}_cluster_size'.format(outfilename)\n logfile.info('Now doing cluster for wgc.')\n cluster(logfile, clust_in, clst_indx, lmax_f, clst_sz)",
"def test_using_ego_graph(self):\n assert_equal(nx.local_efficiency(self.G3), 7 / 12)",
"def test_global_efficiency_complete_graph(self):\n for n in range(2, 10):\n G = nx.complete_graph(n)\n assert_equal(nx.global_efficiency(G), 1)",
"def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")",
"def test_7_medium(self):\n grid_S = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..12......34.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n \n grid_G = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..34......21.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n plan = astar(grid_S,\n lambda s : s == grid_G,\n MAPPDistanceSum(grid_G))\n self.assertEqual(36,sum(a.cost for a in plan))",
"def test_efficiency_disconnected_nodes(self):\n assert_equal(nx.efficiency(self.G1, 1, 2), 0)",
"def test_graphs_threshold_eco():\n\n # Groundtruth\n expected_filt = np.load(\"groundtruth/graphs_threshold/eco_filtered.npy\")\n expected_bin = np.load(\"groundtruth/graphs_threshold/eco_binary.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph2.npy\")\n\n # Run\n filterted, binary, _ = threshold_eco(graph)\n\n # Test\n np.testing.assert_array_equal(expected_filt, filterted)\n np.testing.assert_array_equal(expected_bin, binary)",
"def main():\n G = nx.gnp_random_graph(100, 0.5)\n centrality = nx.eigenvector_centrality(G)\n avg_centrality = sum(centrality.values()) / len(G)\n\n def has_high_centrality(v):\n return centrality[v] >= avg_centrality\n\n source = 0\n value = centrality.get\n condition = has_high_centrality\n\n found_node = progressive_widening_search(G, source, value, condition)\n c = centrality[found_node]\n print('found node {0} with centrality {1}'.format(found_node, c))",
"def test_am_threshold(Simulator, plt, seed, rng):\n D = 64\n vocab = Vocabulary(D, rng=rng)\n vocab.parse('A+B+C+D')\n\n D2 = int(D / 2)\n vocab2 = Vocabulary(D2, rng=rng)\n vocab2.parse('A+B+C+D')\n\n def input_func(t):\n return vocab.parse('0.49*A').v if t < 0.1 else vocab.parse('0.8*A').v\n\n with nengo.Network('model', seed=seed) as m:\n am = AssociativeMemory(vocab, vocab2, threshold=0.5)\n in_node = nengo.Node(output=input_func, label='input')\n nengo.Connection(in_node, am.input)\n\n in_p = nengo.Probe(in_node)\n out_p = nengo.Probe(am.output, synapse=0.03)\n\n sim = Simulator(m)\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.legend(vocab.keys, loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.8, c='g', lw=2)\n plt.ylabel(\"Output\")\n plt.legend(vocab.keys, loc='best')\n\n assert similarity(sim.data[in_p][below_th], vocab.parse(\"A\").v) > 0.48\n assert similarity(sim.data[in_p][above_th], vocab.parse(\"A\").v) > 0.79\n assert similarity(sim.data[out_p][below_th], vocab2.parse(\"0\").v) < 0.01\n assert similarity(sim.data[out_p][above_th], vocab2.parse(\"A\").v) > 0.79",
"def is_best(self, metric: float) -> bool:",
"def get_GNS_cut_reduced(self):\n # we build the optimization around the casted digraph instead of multidigraph\n # for simplicity\n G = self.base_digraph\n s_1 = self.sources[0]\n s_2 = self.sources[1]\n t_1 = self.destinations[0]\n t_2 = self.destinations[1]\n edges = G.edges()\n nodes = G.nodes()\n\n try:\n\n # Great an gurobi instance of the optimization model\n m = Model(\"GNS\")\n m.setParam('OutputFlag', False)\n\n x_v = {}\n # vertex variables for s_1, t_1 cut\n for v in nodes:\n x_v[v] = m.addVar(vtype=GRB.BINARY)\n\n y_v = {}\n # vertex variables for s_2, t_2 cut\n for v in nodes:\n y_v[v] = m.addVar(vtype=GRB.BINARY)\n\n z_v = {}\n # vertex variables for s_2, t_1 cut\n for v in nodes:\n z_v[v] = m.addVar(vtype=GRB.BINARY)\n\n e = {}\n # GNS indicator variable\n for (u,v) in edges:\n e[u,v] = m.addVar(vtype=GRB.BINARY, obj=G[u][v]['capacity'])\n\n # Done with decision variable creation\n # update model\n m.update()\n\n # Constraints\n # 1. Constraints for s_1 - t_1 cut\n for (u,v) in edges:\n if (u,v) == (s_1, t_1):\n m.addConstr(e[u,v] >= 1)\n elif u == s_1:\n m.addConstr(x_v[v] + e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-x_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(x_v[v] - x_v[u] + e[u,v] >= 0)\n\n if (u,v) == (s_2, t_2):\n m.addConstr(e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(y_v[v] + e[u,v] >= 1)\n elif v == t_2:\n m.addConstr(-y_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(y_v[v] - y_v[u] + e[u,v] >= 0)\n\n if (u,v) == (s_2, t_1):\n m.addConstr(e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(z_v[v] + e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-z_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(z_v[v] - z_v[u] + e[u,v] >= 0)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n #print \"Min GNS cut value = \" + str(m.objVal)\n #print \"GNS cut edges:\"\n cut_set_edges = []\n for u,v in edges:\n if e[u,v].x != 0:\n #print (u,v), str(G[u][v]['capacity'])\n cut_set_edges.append((u,v, G[u][v]['capacity']))\n return (m.objVal, cut_set_edges)\n else:\n # something went wrong...err...\n print \"Something was wrong\"\n return None, None\n\n except GurobiError:\n print ('Error report from Gurobi')",
"def test_check_cost():",
"def calibrate_threshold(test_graphs):\r\n best_threshold = None\r\n best_result = None\r\n for threhold in range(1, 50):\r\n cur_res = evaluate_argument_mention(test_graphs, threhold)\r\n if (best_result is None) or (cur_res > best_result):\r\n best_result = cur_res\r\n best_threshold = threhold\r\n return (best_threshold, best_result)",
"def test_local_efficiency_disconnected_graph(self):\n assert_equal(nx.local_efficiency(self.G1), 0)",
"def prune_graph(G, threshold=0.15, max_depth=5):\n en = [x for x in G.nodes() if G.degree(x)==1] # endnodes\n avg_th = nx.shortest_path_length(G, en[0], en[1], weight='thick') / \\\n nx.shortest_path_length(G, en[0], en[1], weight='weight')\n th = nx.shortest_path_length(G, en[0], en[1], weight='thick')\n \n def _neighborhood(G, node, n):\n # https://stackoverflow.com/questions/22742754/finding-the-n-degree-neighborhood-of-a-node\n path_lengths = nx.single_source_dijkstra_path_length(G, node, weight=None)\n return [node for node, length in path_lengths.iteritems() if length == n]\n # 1) find neighbors\n deep_neighbors = [_neighborhood(G, en[0], max_depth)[0], \n _neighborhood(G, en[1], max_depth)[0]]\n en_candidates = [list(nx.shortest_simple_paths(G, en[0], deep_neighbors[0]))[0][1:],\n list(nx.shortest_simple_paths(G, en[1], deep_neighbors[1]))[0][1:]]\n \n # compute thickness of all neighbor nodes\n paththick0 =[nx.shortest_path_length(G, en[0], p, weight='thick') for p in en_candidates[0]]\n pathlen0 = [nx.shortest_path_length(G, en[0], p, weight='weight') for p in en_candidates[0]]\n paththick1 =[nx.shortest_path_length(G, en[1], p, weight='thick') for p in en_candidates[1]]\n pathlen1 = [nx.shortest_path_length(G, en[1], p, weight='weight') for p in en_candidates[1]]\n avgthick0 = [paththick0[i]/pathlen0[i] for i in range(max_depth)]\n avgthick1 = [paththick1[i]/pathlen1[i] for i in range(max_depth)]\n \n # 2) add to remove list all the nodes below threshold of avg thickness\n idx_rm0 = [i for i in range(len(avgthick0)) if avgthick0[i] < avg_th*threshold ]\n idx_rm1 = [i for i in range(len(avgthick1)) if avgthick1[i] < avg_th*threshold ]\n # 3) add to remove list all the nodes that have deep edge less thick than \"shallow\" edge\n idx_rm0 += [i for i in range(len(avgthick0) - 1) if avgthick0[i]>avgthick0[i+1]]\n idx_rm1 += [i for i in range(len(avgthick1) - 1) if avgthick1[i]>avgthick1[i+1]]\n \n # 4) remove list of nodes that are indexed\n idx_max0 = 0 if not idx_rm0 else max(idx_rm0) #rm nothing if empty rm array\n idx_max1 = 0 if not idx_rm1 else max(idx_rm1)\n en_rm0 = ([en[0]] + en_candidates[0])[:idx_max0]\n en_rm1 = ([en[1]] + en_candidates[1])[:idx_max1]\n Grm = G.copy()\n Grm.remove_nodes_from(en_rm0 + en_rm1)\n \n return Grm",
"def test_check_numa_aware_ksm_status(self):\n self.check_host_activation(ksm_merge_across_nodes=False)",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def detect():\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod",
"def test_check_numa_aware_ksm_status(self):\n self.check_host_activation(ksm_merge_across_nodes=True)",
"def _compute_gp_all_passbands_2D(obj, dataset, number_gp, t_min, t_max,\n output_root=None, **kwargs):\n obj_data = dataset.data[obj] # object's lightcurve\n gp_times = np.linspace(t_min, t_max, number_gp)\n filter_set = np.asarray(dataset.filter_set)\n\n kernel, gp_params, gp_predict = fit_2d_gp(obj_data, return_kernel=True,\n return_gp_params=True)\n gp_wavelengths = np.vectorize(pb_wavelengths.get)(filter_set)\n obj_gps = predict_2d_gp(gp_predict, gp_times, gp_wavelengths)\n\n if output_root is not None:\n obj_gps.write(os.path.join(output_root, f'gp_{obj}'), format='fits',\n overwrite=True)\n path_save_gps = os.path.join(output_root, f'used_gp_{obj}.pckl')\n path_save_kernels = os.path.join(output_root, f'used_kernels_{obj}'\n '.pckl')\n path_save_params = os.path.join(output_root, f'used_params_{obj}.pckl')\n # Save the GP already conditioned on a specific set of observations\n with open(path_save_gps, 'wb') as f:\n pickle.dump(gp_predict, f, pickle.HIGHEST_PROTOCOL)\n with open(path_save_kernels, 'wb') as f:\n pickle.dump(kernel, f, pickle.HIGHEST_PROTOCOL)\n with open(path_save_params, 'wb') as f:\n pickle.dump(gp_params, f, pickle.HIGHEST_PROTOCOL)\n\n return obj_gps",
"def _compute_gp_all_passbands_1D(obj, dataset, number_gp, t_min, t_max,\n output_root=None, **kwargs):\n try:\n kernel_param = kwargs[\"kernel_param\"]\n except KeyError:\n kernel_param = [500., 20.]\n\n obj_data = dataset.data[obj] # object's light curve\n obj_data = cs.rename_passband_column(obj_data.to_pandas())\n unique_pbs = np.unique(obj_data.passband)\n gp_times = np.linspace(t_min, t_max, number_gp)\n\n # Store the output in another astropy table\n obj_gps = []\n used_gp_dict = {}\n used_kernels_dict = {}\n filter_set = np.asarray(dataset.filter_set)\n for pb in filter_set:\n used_kernels_dict[pb] = None # inilialize None kernel to each passband\n if pb in unique_pbs:\n is_pb = obj_data.passband == pb # observations in this passband\n obj_data_pb = obj_data.loc[is_pb]\n\n gp_obs, gp, chosen_kernel = fit_best_gp(kernel_param,\n obj_data_pb, gp_times)\n\n mu, std = gp_obs.flux.values, gp_obs.flux_error.values\n # stack the GP results in a array momentarily\n obj_gp_pb_array = np.column_stack((gp_times, mu, std))\n used_kernels_dict[pb] = chosen_kernel\n # Save the GP already conditioned on a specific set of observations\n gp_predict = partial(gp.predict, obj_data_pb.flux)\n used_gp_dict[pb] = gp_predict\n else:\n obj_gp_pb_array = np.zeros([number_gp, 3])\n obj_gp_pb = Table([obj_gp_pb_array[:, 0], obj_gp_pb_array[:, 1],\n obj_gp_pb_array[:, 2], [pb] * number_gp],\n names=['mjd', 'flux', 'flux_error', 'filter'])\n if len(obj_gps) == 0: # initialize the table for 1st passband\n obj_gps = obj_gp_pb\n else:\n obj_gps = vstack((obj_gps, obj_gp_pb))\n\n if output_root is not None:\n obj_gps.write(os.path.join(output_root, 'gp_' + obj), format='fits',\n overwrite=True)\n path_save_gps = os.path.join(output_root, 'used_gp_dict_{}.pckl'\n ''.format(obj))\n path_save_kernels = os.path.join(output_root, 'used_kernels_dict_{}.'\n 'pckl'.format(obj))\n with open(path_save_gps, 'wb') as f:\n pickle.dump(used_gp_dict, f, pickle.HIGHEST_PROTOCOL)\n with open(path_save_kernels, 'wb') as f:\n pickle.dump(used_kernels_dict, f, pickle.HIGHEST_PROTOCOL)\n\n return obj_gps",
"def astar(grid, heuristic):\r\n evaluatedMap = {}\r\n unevaluatedMap = {}\r\n start = grid.getStart()\r\n goal = grid.getGoals()[0]\r\n startG = 0\r\n startH = heuristic(start,goal)\r\n currentNode = Node(start,startH,startG)\r\n unevaluatedMap[currentNode.coord] = currentNode\r\n \r\n while len(unevaluatedMap) > 0:\r\n # I tried using a PriorityQueue but because a node could end up with \r\n # an updated priority it really didn't make sense to use one and\r\n # instead had to just serach the dictionary each time for the smallest\r\n # priority which is the sum of g and h\r\n currentNode = min(unevaluatedMap.values(),key=lambda x:x.g + x.h)\r\n \r\n # if the current node is the goal then create the path by iterating backwards\r\n # and pushing the current node to the front of the path and then moving to the\r\n # parent node\r\n if currentNode.coord == goal:\r\n path = []\r\n while currentNode.parentNode:\r\n path.insert(0,currentNode.coord)\r\n currentNode = currentNode.parentNode\r\n path.insert(0,currentNode.coord)\r\n grid.setPath(path)\r\n return\r\n \r\n # Move the current node to the evaluated map and delete it from\r\n # the unevaluated map\r\n evaluatedMap[currentNode.coord] = currentNode\r\n del unevaluatedMap[currentNode.coord]\r\n \r\n # Mark the current node as having been visited\r\n grid.addVisited(currentNode.coord)\r\n \r\n # Get the neighbors of the current node\r\n neighbors = grid.getNeighbors(currentNode.coord)\r\n\r\n # For each neighbor check if that neighbor has alread been evaluated\r\n # if it has then skip that neighbor. If it hasn't and it isn't in the\r\n # unevaluated map add it with a high cost and heuristic.\r\n # Get the neighbor from the unevaluated map and calculate the current\r\n # cost. If the current cost is less than what existed update the neighbor\r\n # and add it back to the list otherwise skip to next neighbor\r\n for neighbor in neighbors:\r\n ncoord = (neighbor[0])\r\n if (ncoord) in evaluatedMap:\r\n continue\r\n if (ncoord) not in unevaluatedMap:\r\n node = Node(ncoord,float('inf'),float('inf'))\r\n unevaluatedMap[ncoord] = node\r\n \r\n node = unevaluatedMap[ncoord]\r\n calc_cost = currentNode.g + neighbor[1]\r\n if calc_cost >= node.g:\r\n continue\r\n \r\n node.parentNode = currentNode\r\n node.g = calc_cost\r\n node.h = heuristic(ncoord,goal)",
"def mst_prim(g):\n mst = Graph() # create new Graph object to hold the MST\n \n # if graph is empty\n if not g:\n return mst\n \n # nearest_neighbour[v] is the nearest neighbour of v that is in the MST\n # (v is a vertex outside the MST and has at least one neighbour in the MST)\n nearest_neighbour = {}\n # smallest_distance[v] is the distance of v to its nearest neighbour in the MST\n # (v is a vertex outside the MST and has at least one neighbour in the MST)\n smallest_distance = {}\n # v is in unvisited iff v has not been added to the MST\n unvisited = set(g)\n \n u = next(iter(g)) # select any one vertex from g\n mst.add_vertex(u.get_key()) # add a copy of it to the MST\n unvisited.remove(u)\n \n # for each neighbour of vertex u\n for n in u.get_neighbors():\n if n is u:\n # avoid self-loops\n continue\n # update dictionaries\n nearest_neighbour[n] = mst.get_vertex(u.get_key())\n smallest_distance[n] = u.get_weight(n)\n \n # loop until smallest_distance becomes empty\n while (smallest_distance):\n # get nearest vertex outside the MST\n outside_mst = min(smallest_distance, key=smallest_distance.get)\n # get the nearest neighbour inside the MST\n inside_mst = nearest_neighbour[outside_mst]\n \n # add a copy of the outside vertex to the MST\n mst.add_vertex(outside_mst.get_key())\n # add the edge to the MST\n mst.add_edge(outside_mst.get_key(), inside_mst.get_key(),\n smallest_distance[outside_mst])\n mst.add_edge(inside_mst.get_key(), outside_mst.get_key(),\n smallest_distance[outside_mst])\n \n # now that outside_mst has been added to the MST, remove it from our\n # dictionaries and the set unvisited\n unvisited.remove(outside_mst)\n del smallest_distance[outside_mst]\n del nearest_neighbour[outside_mst]\n \n # update dictionaries\n for n in outside_mst.get_neighbors():\n if n in unvisited:\n if n not in smallest_distance:\n smallest_distance[n] = outside_mst.get_weight(n)\n nearest_neighbour[n] = mst.get_vertex(outside_mst.get_key())\n else:\n if smallest_distance[n] > outside_mst.get_weight(n):\n smallest_distance[n] = outside_mst.get_weight(n)\n nearest_neighbour[n] = mst.get_vertex(outside_mst.get_key())\n \n return mst",
"def decision(grid):\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()",
"def test_am_threshold(Simulator, plt, seed, rng):\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n d2 = int(d / 2)\n vocab2 = Vocabulary(d2, pointer_gen=rng)\n vocab2.populate('A; B; C; D')\n\n def input_func(t):\n return '0.49 * A' if t < 0.1 else '0.8 * B'\n\n with spa.Network('model', seed=seed) as m:\n m.am = ThresholdingAssocMem(\n threshold=0.5, input_vocab=vocab, output_vocab=vocab2,\n function=filtered_step_fn, mapping='by-key')\n m.stimulus = spa.Transcode(input_func, output_vocab=vocab)\n m.stimulus >> m.am\n\n in_p = nengo.Probe(m.am.input)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with Simulator(m) as sim:\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.subplot(2, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.9, c='g', lw=2)\n plt.ylabel(\"Output\")\n\n assert np.mean(sim.data[out_p][below_th]) < 0.01\n assert_sp_close(t, sim.data[out_p], vocab2['B'], skip=0.25, duration=0.05)",
"def __call__(self, graph: Data, n_min: int, nodes_to_keep: List[int] = None, exhaustive: bool = False):\n nodes_to_keep = nodes_to_keep if nodes_to_keep is not None else []\n mcts = self._get_mcts(graph, n_min, nodes_to_keep, exhaustive)\n\n for iteration in range(self.m):\n mcts.search_one_iteration()\n\n explanation = mcts.best_leaf_node()\n\n return explanation.node_set, mcts"
]
| [
"0.79985285",
"0.7497226",
"0.62382823",
"0.5841657",
"0.5809271",
"0.58082795",
"0.57625425",
"0.5664338",
"0.5620274",
"0.56043947",
"0.5597901",
"0.55592453",
"0.5536385",
"0.55205554",
"0.55058116",
"0.5488807",
"0.5447505",
"0.5446787",
"0.5415955",
"0.5395625",
"0.53925914",
"0.53909886",
"0.53899497",
"0.53806406",
"0.5378328",
"0.5375296",
"0.53521997",
"0.53443986",
"0.5326884",
"0.53202224"
]
| 0.7873814 | 1 |
Test for graph threshlding using global cost efficiency (GCE) on OMSTs (extract the first five MSTs). | def test_graphs_threshold_omst_global_cost_efficiency2():
# the function is optmized at the 3rd OMST, so it is going to yeild the same results
# as the exhaustive search
# Groundtruth
expected = np.load("groundtruth/graphs_threshold/omst_gce.npy")
# Data
graph = np.load("sample_data/graphs_threshold/graph.npy")
# Run
n_msts = 5
_, CIJtree, _, _, _, _, _, _ = threshold_omst_global_cost_efficiency(
graph, n_msts=n_msts
)
# Test
np.testing.assert_array_equal(expected, CIJtree) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_graphs_threshold_omst_global_cost_efficiency():\n # the function is optmized at the 3rd OMST.\n\n # Groundtruth\n expected = np.load(\"groundtruth/graphs_threshold/omst_gce.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph.npy\")\n\n # Run\n _, CIJtree, _, _, _, _, _, _ = threshold_omst_global_cost_efficiency(\n graph, n_msts=None\n )\n\n # Test\n np.testing.assert_array_equal(expected, CIJtree)",
"def test_graphs_threshold_global_cost_efficiency():\n\n # Groundtruth\n expected = np.load(\"groundtruth/graphs_threshold/gce.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph.npy\")\n\n # Run\n iterations = 50\n binary_mask, _, _, _, _ = threshold_global_cost_efficiency(graph, iterations)\n\n # Test\n np.testing.assert_array_equal(expected, binary_mask)",
"def tryEverything(g, verbose, graphname):\r\n prio = ['rku', 'random', 'BIL', 'rkd', 'cluHPS', 'rkusd', 'rkuad']\r\n placement = ['eft', 'BIM*', 'OLB', 'MET', 'DL', 'GDL']\r\n costFunction = ['mean', 'median', 'maxmax', 'minmax', 'minmin', 'maxmin']\r\n desc = ['DLS/DC', None, 'DCP']\r\n useOfBIM = [False, True]\r\n insertion = [False, True]\r\n BSA = [False, True]\r\n res: Dict[str, List[float]] = {}\r\n cnt = 0\r\n\r\n for ip, p in enumerate(prio):\r\n for ipl, pl in enumerate(placement):\r\n for ic, c in enumerate(costFunction):\r\n if p != 'BIL' or c == 'mean' or pl in ['DL', 'GDL']:\r\n for idd, d in enumerate(desc):\r\n for iu, u in enumerate(useOfBIM):\r\n for ii, i in enumerate(insertion):\r\n for ib, b in enumerate(BSA):\r\n cnt += 1\r\n name = \";\".join(map(str, [ip, ic, ipl, idd, iu, ii, ib]))\r\n\r\n # dispName = \"-\".join(map(str, [p, pl, c, d, u, i, b]))\r\n # print(\"Heuristic n°\", cnt, \"-\", dispName)\r\n # print(\"Heuristic n°\", cnt, \"-\", name)\r\n\r\n startScheduling = timeit.default_timer()\r\n try:\r\n schedule = computeSchedule(g, strategyPrio=p, costFunction=c,\r\n strategyPlacement=pl,\r\n useOfBIM=u, desc=d,\r\n insertion=i, bsa=b, verbose=verbose)\r\n verifPrec(g, schedule, verbose)\r\n endScheduling = timeit.default_timer()\r\n # print(\"Ended in :\", 1000*(endScheduling - startScheduling), \"ms\")\r\n # print(\"Ended in :\", round(1000 * (endScheduling - startScheduling),2), \"ms\")\r\n timeS = round(1000 * (endScheduling - startScheduling), 2)\r\n # print(f\"timeS : {timeS}\")\r\n if verbose:\r\n print(f\"Time : {timeS}ms\")\r\n res[name] = [round(schedule[getExitTask(g)][2], 6), timeS]\r\n except Exception as _:\r\n\r\n print(\"Error for : \" + name + \" on file \" + graphname)\r\n file = open(\"error.log\", 'a')\r\n file.write(f\"Error for {name} on file {graphname}\\n\")\r\n file.close()\r\n raise _\r\n return res\r\n return res",
"def test_using_ego_graph(self):\n assert_equal(nx.local_efficiency(self.G3), 7 / 12)",
"def main():\n logfile = setup_log(os.path.join(os.environ['hel'], 'logs',\n 'thresh_cluster_fsl'))\n logfile.info('Threshold and cluster.')\n logfile.info('Doing the wgc PairedTres data. \\\n This is the main result for the difference between \\\n View1 and View2 in weighted global connectivity')\n outdir = os.path.join(os.environ['hel'], 'graph_analyses',\n 'randomise_global_connectivity')\n\n os.chdir(outdir)\n prefx = 'wgc_PairedTres_n10000'\n corrctd_p = '{}_clustere_corrp_tstat2.nii.gz'.format(\n prefx)\n stat = '{}_tstat2.nii.gz'.format(prefx)\n outfilename = '{}_thresh_clustere_corrp_tstat2'.format(\n prefx)\n fsl_maths(logfile, corrctd_p, stat, outfilename)\n clust_in = '{}.nii.gz'.format(outfilename)\n clst_indx = '{}_cluster_index'.format(outfilename)\n lmax_f = '{}_lmax.txt'.format(outfilename)\n clst_sz = '{}_cluster_size'.format(outfilename)\n logfile.info('Now doing cluster for wgc.')\n cluster(logfile, clust_in, clst_indx, lmax_f, clst_sz)",
"def calibrate_threshold(test_graphs):\r\n best_threshold = None\r\n best_result = None\r\n for threhold in range(1, 50):\r\n cur_res = evaluate_argument_mention(test_graphs, threhold)\r\n if (best_result is None) or (cur_res > best_result):\r\n best_result = cur_res\r\n best_threshold = threhold\r\n return (best_threshold, best_result)",
"def test_7_medium(self):\n grid_S = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..12......34.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n \n grid_G = MAPPGridState.create_from_string(\n [\"...#.........\",\n \"...#.........\",\n \"...#.........\",\n \"...########..\",\n \"..34......21.\",\n \"...###..###..\",\n \"...######....\",\n \"........#....\",\n \"........#....\"])\n plan = astar(grid_S,\n lambda s : s == grid_G,\n MAPPDistanceSum(grid_G))\n self.assertEqual(36,sum(a.cost for a in plan))",
"def main():\n G = nx.gnp_random_graph(100, 0.5)\n centrality = nx.eigenvector_centrality(G)\n avg_centrality = sum(centrality.values()) / len(G)\n\n def has_high_centrality(v):\n return centrality[v] >= avg_centrality\n\n source = 0\n value = centrality.get\n condition = has_high_centrality\n\n found_node = progressive_widening_search(G, source, value, condition)\n c = centrality[found_node]\n print('found node {0} with centrality {1}'.format(found_node, c))",
"def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")",
"def test_am_threshold(Simulator, plt, seed, rng):\n D = 64\n vocab = Vocabulary(D, rng=rng)\n vocab.parse('A+B+C+D')\n\n D2 = int(D / 2)\n vocab2 = Vocabulary(D2, rng=rng)\n vocab2.parse('A+B+C+D')\n\n def input_func(t):\n return vocab.parse('0.49*A').v if t < 0.1 else vocab.parse('0.8*A').v\n\n with nengo.Network('model', seed=seed) as m:\n am = AssociativeMemory(vocab, vocab2, threshold=0.5)\n in_node = nengo.Node(output=input_func, label='input')\n nengo.Connection(in_node, am.input)\n\n in_p = nengo.Probe(in_node)\n out_p = nengo.Probe(am.output, synapse=0.03)\n\n sim = Simulator(m)\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.legend(vocab.keys, loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.8, c='g', lw=2)\n plt.ylabel(\"Output\")\n plt.legend(vocab.keys, loc='best')\n\n assert similarity(sim.data[in_p][below_th], vocab.parse(\"A\").v) > 0.48\n assert similarity(sim.data[in_p][above_th], vocab.parse(\"A\").v) > 0.79\n assert similarity(sim.data[out_p][below_th], vocab2.parse(\"0\").v) < 0.01\n assert similarity(sim.data[out_p][above_th], vocab2.parse(\"A\").v) > 0.79",
"def test_global_efficiency_complete_graph(self):\n for n in range(2, 10):\n G = nx.complete_graph(n)\n assert_equal(nx.global_efficiency(G), 1)",
"def is_best(self, metric: float) -> bool:",
"def global_efficiency(graph):\n return nx.global_efficiency(graph.graph)",
"def test_efficiency_disconnected_nodes(self):\n assert_equal(nx.efficiency(self.G1, 1, 2), 0)",
"def test_graphs_threshold_eco():\n\n # Groundtruth\n expected_filt = np.load(\"groundtruth/graphs_threshold/eco_filtered.npy\")\n expected_bin = np.load(\"groundtruth/graphs_threshold/eco_binary.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph2.npy\")\n\n # Run\n filterted, binary, _ = threshold_eco(graph)\n\n # Test\n np.testing.assert_array_equal(expected_filt, filterted)\n np.testing.assert_array_equal(expected_bin, binary)",
"def detect():\n\n _pass_done = 0\n _improve = True\n new_mod = modularity()\n cur_mod = -999999999.0\n rl = random.sample(range(0, node_count), node_count)\n while _improve & (_pass_done < max_pass) & (new_mod - cur_mod > min_mod):\n cur_mod = new_mod\n _improve = False\n _pass_done += 1\n for node_tmp in rl:\n n = node_tmp\n nc = bl[n]\n ncomm = neigh_comm(n)\n remove(n, nc, ncomm[nc])\n best_c = nc\n best_l = 0.0\n best_incre = 0.0\n for c in ncomm:\n incre = modularity_gain(n, c, ncomm[c])\n if incre > best_incre:\n best_incre = incre\n best_c = c\n best_l = ncomm[c]\n insert(n, best_c, best_l)\n if best_c != nc:\n _improve = True\n new_mod = modularity()\n print new_mod",
"def test_check_cost():",
"def heuristic_cost_estimate(self, current):\n relevants = 0\n accurate_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], current.anchor):\n relevants += 1\n if self.pred_sample.iloc[i] == self.pred_example:\n accurate_relevants += 1\n accuracy = accurate_relevants/relevants\n if self.threshold-accuracy <= 0:\n x = 5\n return max(0, self.threshold - accuracy)",
"def test_am_threshold(Simulator, plt, seed, rng):\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n d2 = int(d / 2)\n vocab2 = Vocabulary(d2, pointer_gen=rng)\n vocab2.populate('A; B; C; D')\n\n def input_func(t):\n return '0.49 * A' if t < 0.1 else '0.8 * B'\n\n with spa.Network('model', seed=seed) as m:\n m.am = ThresholdingAssocMem(\n threshold=0.5, input_vocab=vocab, output_vocab=vocab2,\n function=filtered_step_fn, mapping='by-key')\n m.stimulus = spa.Transcode(input_func, output_vocab=vocab)\n m.stimulus >> m.am\n\n in_p = nengo.Probe(m.am.input)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with Simulator(m) as sim:\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.subplot(2, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.9, c='g', lw=2)\n plt.ylabel(\"Output\")\n\n assert np.mean(sim.data[out_p][below_th]) < 0.01\n assert_sp_close(t, sim.data[out_p], vocab2['B'], skip=0.25, duration=0.05)",
"def fillThreshDB():\n from ROOT import TFile, TTree\n\n # Do we actually want to write new values? Or just print stuff out?\n fillDB = True\n\n calDB = db.TinyDB(\"%s/calDB-v2.json\" % dsi.latSWDir)\n pars = db.Query()\n bkg = dsi.BkgInfo()\n\n # loop over datasets and bkgIdx\n # for ds in [0,1,2,3,4,\"5A\",\"5B\",\"5C\",6]:\n for ds in [6]:\n dsNum = ds if isinstance(ds, int) else 5\n goodChans = det.getGoodChanList(dsNum)\n\n for bkgIdx in bkg.getRanges(ds):\n\n # ==== loop over sub-ranges (when TF was run) ====\n\n rFirst, rLast = bkg.getRanges(ds)[bkgIdx][0], bkg.getRanges(ds)[bkgIdx][-1]\n\n subRanges = bkg.GetSubRanges(ds,bkgIdx)\n if len(subRanges) == 0: subRanges.append((rFirst, rLast))\n\n for subIdx, (runLo, runHi) in enumerate(subRanges):\n\n # Load threshold table\n if len(subRanges) > 1:\n fname = \"%s/threshDS%d_%d_%d_%d.root\" % (dsi.threshDir, dsNum, bkgIdx, runLo, runHi)\n else:\n fname = \"%s/threshDS%d_%d.root\" % (dsi.threshDir, dsNum, bkgIdx)\n if not os.path.isfile(fname):\n print(\"Couldn't find file:\",fname)\n return\n tf = TFile(fname)\n tt = tf.Get(\"threshTree\")\n try:\n n = tt.GetEntries()\n except AttributeError:\n print(\"skipped\",fname)\n continue\n if (n!=1):\n print(\"Hmm, %d thresh table entries? %s\" % (n, fname))\n return\n tt.GetEntry(0)\n\n rLo, rHi = tt.runMin, tt.runMax\n key = \"thresh_ds%d_bkg%d_sub%d\" % (dsNum, bkgIdx, subIdx)\n vals = {}\n print(\"\")\n print(key, runLo, runHi)\n print(\"chan CPD g threshKeV sig ADC err E=0 err nThr nNoise status note\")\n\n # loop over channels\n nGood, nTot = 0, 0\n for i in range(tt.channelList.size()):\n\n # keep only HG channels, exclude pulser monitors\n chan = tt.channelList.at(i)\n if chan%2!=0 or chan in det.getPMon(dsNum):\n continue\n\n # load results\n isGood = 1 if chan in goodChans else 0\n thrKeV, thrSig = tt.threshCal.at(i), tt.sigmaCal.at(i)\n thrADC = tt.threshADC.at(i)\n thrADCErr = tt.threshADCErr.at(i)\n thrStatus = tt.threshFitStatus.at(i)\n thrEvts = tt.numTrigger.at(i)\n sigADC = tt.sigmaADC.at(i)\n sigADCErr = tt.sigmaADCErr.at(i)\n sigStatus = tt.sigmaFitStatus.at(i)\n sigEvts = tt.numNoise.at(i)\n calScale = tt.CalOffset.at(i)\n calOffset = tt.CalScale.at(i)\n\n # Error handling\n isBad = False\n status = \"\"\n if thrStatus==999999 or sigStatus==999999 or thrEvts < 10 or sigEvts < 10:\n status = \"Not enough events\"\n isBad = True\n elif (0 < thrStatus < 999999) or (0 < sigStatus < 999999):\n status = \"auto-fit fail\"\n isBad = True\n elif int(thrKeV)==99999 or int(thrSig)==99999:\n status = \"Fit fail\"\n isBad = True\n elif int(thrKeV)==999999:\n status = \"Bad energy calibration\"\n isBad = True\n elif thrKeV < 0.3:\n status = \"Unphysical threshold\"\n isBad = True\n elif thrEvts < 100 or sigEvts < 100:\n status = \"Low events\"\n # this is ok\n if not isBad: nGood += 1\n nTot += 1\n\n # pretty print the results table\n if int(thrKeV) > 99998:\n print(\"%d %s %d %-7.0f s %-7.0f %-4.3f %-6.3f %.2f %-6.2f %-8d %-8d %d %d %d %s\" % (chan, det.getChanCPD(dsNum,chan), isGood, thrKeV, thrSig, thrADC, thrADCErr, sigADC, sigADCErr, thrEvts, sigEvts, thrStatus,sigStatus, int(isBad), status))\n else:\n print(\"%d %s %d %-7.3f s %-7.3f %-4.3f %-6.3f %.2f %-6.2f %-8d %-8d %d %d %d %s\" % (chan, det.getChanCPD(dsNum,chan), isGood, thrKeV, thrSig, thrADC, thrADCErr, sigADC, sigADCErr, thrEvts, sigEvts, thrStatus, sigStatus, int(isBad), status))\n\n # fill the dict vals\n vals[chan] = [float(\"%.5f\" % thrKeV), float(\"%.5f\" % thrSig), int(isBad)]\n\n print(\"good detectors: %d/%d\" % (nGood, nTot))\n\n # fill the DB\n if fillDB:\n dsi.setDBRecord({\"key\":key, \"vals\":vals}, forceUpdate=True, calDB=calDB, pars=pars)\n\n tf.Close()\n # return",
"def prune_graph(G, threshold=0.15, max_depth=5):\n en = [x for x in G.nodes() if G.degree(x)==1] # endnodes\n avg_th = nx.shortest_path_length(G, en[0], en[1], weight='thick') / \\\n nx.shortest_path_length(G, en[0], en[1], weight='weight')\n th = nx.shortest_path_length(G, en[0], en[1], weight='thick')\n \n def _neighborhood(G, node, n):\n # https://stackoverflow.com/questions/22742754/finding-the-n-degree-neighborhood-of-a-node\n path_lengths = nx.single_source_dijkstra_path_length(G, node, weight=None)\n return [node for node, length in path_lengths.iteritems() if length == n]\n # 1) find neighbors\n deep_neighbors = [_neighborhood(G, en[0], max_depth)[0], \n _neighborhood(G, en[1], max_depth)[0]]\n en_candidates = [list(nx.shortest_simple_paths(G, en[0], deep_neighbors[0]))[0][1:],\n list(nx.shortest_simple_paths(G, en[1], deep_neighbors[1]))[0][1:]]\n \n # compute thickness of all neighbor nodes\n paththick0 =[nx.shortest_path_length(G, en[0], p, weight='thick') for p in en_candidates[0]]\n pathlen0 = [nx.shortest_path_length(G, en[0], p, weight='weight') for p in en_candidates[0]]\n paththick1 =[nx.shortest_path_length(G, en[1], p, weight='thick') for p in en_candidates[1]]\n pathlen1 = [nx.shortest_path_length(G, en[1], p, weight='weight') for p in en_candidates[1]]\n avgthick0 = [paththick0[i]/pathlen0[i] for i in range(max_depth)]\n avgthick1 = [paththick1[i]/pathlen1[i] for i in range(max_depth)]\n \n # 2) add to remove list all the nodes below threshold of avg thickness\n idx_rm0 = [i for i in range(len(avgthick0)) if avgthick0[i] < avg_th*threshold ]\n idx_rm1 = [i for i in range(len(avgthick1)) if avgthick1[i] < avg_th*threshold ]\n # 3) add to remove list all the nodes that have deep edge less thick than \"shallow\" edge\n idx_rm0 += [i for i in range(len(avgthick0) - 1) if avgthick0[i]>avgthick0[i+1]]\n idx_rm1 += [i for i in range(len(avgthick1) - 1) if avgthick1[i]>avgthick1[i+1]]\n \n # 4) remove list of nodes that are indexed\n idx_max0 = 0 if not idx_rm0 else max(idx_rm0) #rm nothing if empty rm array\n idx_max1 = 0 if not idx_rm1 else max(idx_rm1)\n en_rm0 = ([en[0]] + en_candidates[0])[:idx_max0]\n en_rm1 = ([en[1]] + en_candidates[1])[:idx_max1]\n Grm = G.copy()\n Grm.remove_nodes_from(en_rm0 + en_rm1)\n \n return Grm",
"def recursive_threshold_search(\n metric_name, metric_val, y_proba, y_true, sample_weights=None, verbose=False\n):\n ts_next = np.linspace(0, 1, 11)\n prev_min = -1\n prev_max = 999\n ts_final = None\n n_points = 5\n it = 0\n eps_rel = 1e-3\n while True:\n it += 1\n ts, trps, fprs, purities = calc_metrics(\n ts_next, y_proba, y_true, sample_weights\n )\n\n if metric_name == \"score\" or metric_name == \"proba\":\n vals = ts\n elif metric_name == \"eff\":\n vals = trps\n elif metric_name == \"mistag_rate\":\n vals = fprs\n elif metric_name == \"purity\":\n vals = purities\n else:\n raise ValueError(f\"illegal value for `metric_name`: {metric_name}\")\n\n idx = np.argmin(abs(vals - metric_val))\n if abs(vals[idx] - metric_val) / max(metric_val, 1e-10) < eps_rel:\n if verbose:\n print(f\"finish with t={ts[idx]}, v={vals[idx]} [target={metric_val}]\")\n break\n\n if it > 10:\n if verbose:\n print(\n f\"finish with t={ts[idx]}, v={vals[idx]} [target={metric_val}] [due to REP]\"\n )\n break\n\n prev_min = np.min(vals)\n prev_max = np.max(vals)\n\n if idx == 0:\n ts_next = np.linspace(ts[0], ts[1], n_points)\n continue\n if idx == len(ts) - 1:\n ts_next = np.linspace(ts[-2], ts[-1], n_points)\n continue\n\n if (vals[idx] - metric_val) * (vals[idx + 1] - metric_val) < 0:\n pair = ts[idx], ts[idx + 1]\n ts_next = np.linspace(min(pair), max(pair), n_points)\n elif (vals[idx] - metric_val) * (vals[idx - 1] - metric_val) < 0:\n pair = ts[idx], ts[idx - 1]\n ts_next = np.linspace(min(pair), max(pair), n_points)\n if abs(vals[idx] - metric_val) / max(metric_val, 1e-10) > 10 * eps_rel:\n print(\n f\"Warning: returning {vals[idx]} while target was {metric_val}, relative diff. = {abs(vals[idx]-metric_val) / max(metric_val, 1e-10)}\"\n )\n return ts[idx], vals[idx]",
"def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def prim_MST():\n node_list = list(node_set)\n s = random.choice(node_list)\n\n val_s = -exp_node_weights[s][1] + exp_node_weights[s][0] # value of starting node\n value = 0 # MST value\n cost = 0 # MST cost\n add_cost = 0\n compensation = 0 # compensation for edges that are counted twice\n\n prev = [0]*no_nodes\n dist = [math.inf]*no_nodes\n S = set()\n H = Heap(no_nodes)\n H.insert(s, val_s, 0)\n dist[s] = val_s\n\n for v in range(no_nodes):\n H.insert(v, math.inf, 0)\n\n while H.size > 0:\n v = H.delete_min()\n if v[1] > 0: # min in Heap is of positive value, i.e., a cost, abort\n break\n\n # abort if out of budget\n cost += exp_node_weights[v[0]][0]\n if cost > budget:\n #print(value, compensation)\n MST_value = -(value-compensation)\n return (S, MST_value)\n\n # complementarity\n for node in S:\n for adjacent in exp_graph[node]:\n if adjacent[0] == v[0]:\n value -= adjacent[1]\n\n S.add(v[0])\n value += v[1]\n compensation += v[2] # necessary since edge weight was added to node quality already\n\n for w in exp_graph[v[0]]:\n if not w[0] in S:\n if dist[w[0]] > w[1]:\n d = -w[1] - exp_node_weights[w[0]][1] + exp_node_weights[w[0]][0] # negate for maximum spanning tree\n if d > 0: # bad/costly node\n continue\n dist[w[0]] = d\n comp = -w[1]\n prev[w[0]] = v[0]\n H.decrease_key(w[0], dist[w[0]], comp)\n\n MST_value = -(value-compensation)\n del H\n return (S, MST_value, cost, add_cost)",
"def thresh_setup():\n pass",
"def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err",
"def test_ada_boost_stump_classify_partitions_gt(self):\n i = 1\n range_min = self.data_matrix[:, i].min()\n threshold = (range_min * 2)\n inequal = 'gt'\n returned = ada_boost.stump_classify(self.data_matrix,\n i,\n threshold,\n inequal)\n expected = np.mat([-1.0, 1.0, 1.0, 1.0])\n\n delta_between_elements = returned - expected.T\n self.assertFalse(delta_between_elements.any())",
"def get_GNS_cut_reduced(self):\n # we build the optimization around the casted digraph instead of multidigraph\n # for simplicity\n G = self.base_digraph\n s_1 = self.sources[0]\n s_2 = self.sources[1]\n t_1 = self.destinations[0]\n t_2 = self.destinations[1]\n edges = G.edges()\n nodes = G.nodes()\n\n try:\n\n # Great an gurobi instance of the optimization model\n m = Model(\"GNS\")\n m.setParam('OutputFlag', False)\n\n x_v = {}\n # vertex variables for s_1, t_1 cut\n for v in nodes:\n x_v[v] = m.addVar(vtype=GRB.BINARY)\n\n y_v = {}\n # vertex variables for s_2, t_2 cut\n for v in nodes:\n y_v[v] = m.addVar(vtype=GRB.BINARY)\n\n z_v = {}\n # vertex variables for s_2, t_1 cut\n for v in nodes:\n z_v[v] = m.addVar(vtype=GRB.BINARY)\n\n e = {}\n # GNS indicator variable\n for (u,v) in edges:\n e[u,v] = m.addVar(vtype=GRB.BINARY, obj=G[u][v]['capacity'])\n\n # Done with decision variable creation\n # update model\n m.update()\n\n # Constraints\n # 1. Constraints for s_1 - t_1 cut\n for (u,v) in edges:\n if (u,v) == (s_1, t_1):\n m.addConstr(e[u,v] >= 1)\n elif u == s_1:\n m.addConstr(x_v[v] + e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-x_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(x_v[v] - x_v[u] + e[u,v] >= 0)\n\n if (u,v) == (s_2, t_2):\n m.addConstr(e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(y_v[v] + e[u,v] >= 1)\n elif v == t_2:\n m.addConstr(-y_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(y_v[v] - y_v[u] + e[u,v] >= 0)\n\n if (u,v) == (s_2, t_1):\n m.addConstr(e[u,v] >= 1)\n elif u == s_2:\n m.addConstr(z_v[v] + e[u,v] >= 1)\n elif v == t_1:\n m.addConstr(-z_v[u] + e[u,v] >= 0)\n else:\n m.addConstr(z_v[v] - z_v[u] + e[u,v] >= 0)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n #print \"Min GNS cut value = \" + str(m.objVal)\n #print \"GNS cut edges:\"\n cut_set_edges = []\n for u,v in edges:\n if e[u,v].x != 0:\n #print (u,v), str(G[u][v]['capacity'])\n cut_set_edges.append((u,v, G[u][v]['capacity']))\n return (m.objVal, cut_set_edges)\n else:\n # something went wrong...err...\n print \"Something was wrong\"\n return None, None\n\n except GurobiError:\n print ('Error report from Gurobi')",
"def run_trial(\n num_nodes=1000,\n graph_type=GraphType.SCALEFREE,\n repression_type=RepressionType.NODE_REMOVAL,\n threshold_type=ThresholdType.NORMAL,\n **kwargs,\n):\n # BUILD GRAPH\n if graph_type == GraphType.SCALEFREE:\n graph = scale_free_graph(num_nodes, kwargs[\"scaling_parameter\"])\n elif graph_type == GraphType.WATTS_STROGATZ:\n graph = nx.watts_strogatz_graph(num_nodes, kwargs[\"k\"], kwargs[\"p\"])\n elif graph_type == GraphType.POWERLAW_CLUSTER:\n graph = nx.powerlaw_cluster_graph(num_nodes, kwargs[\"m\"], kwargs[\"p\"])\n\n # POPULATE GRAPH WITH AGENTS\n if threshold_type == ThresholdType.FIXED:\n\n def threshold_fn():\n return kwargs[\"threshold\"]\n\n elif threshold_type == ThresholdType.UNIFORM:\n threshold_fn = np.random.random\n elif threshold_type == ThresholdType.NORMAL:\n\n def threshold_fn():\n # TODO: make these kwargs\n return max(0, np.random.normal(0.25, 0.122))\n\n graph = populate_graph(graph, threshold_fn)\n\n # some global properties\n total_nodes = len(graph.nodes())\n # dict: {node: degree}\n centralities = nx.degree_centrality(graph)\n eigen_centralities = nx.eigenvector_centrality_numpy(graph)\n # betweenness = nx.betweenness_centrality(graph)\n # dict: {node: community ID}\n partition = community.best_partition(graph)\n community_sizes = defaultdict(int)\n for node in partition:\n community_sizes[partition[node]] += 1\n\n # INITIALIZE\n active_nodes = set([])\n seed_node = np.random.choice(graph.nodes())\n nodes_to_activate = [seed_node]\n nodes_to_activate.extend(list(graph[seed_node].keys()))\n activate_nodes(graph, nodes_to_activate, active_nodes)\n\n # record some info\n initial_neighborhood = graph.subgraph(nodes_to_activate)\n initial_size = len(initial_neighborhood.nodes())\n initial_density = nx.density(initial_neighborhood)\n initial_neighborhood_clustering = nx.average_clustering(initial_neighborhood)\n # clustering of initial nodes inside main graph\n initial_nodes_clustering = nx.average_clustering(graph, nodes=nodes_to_activate)\n # seed_degree = graph.degree[seed_node]\n initial_degrees = [graph.degree[node] for node in nodes_to_activate]\n initial_mean_degree = sum(initial_degrees) / len(initial_degrees)\n initial_median_degree = np.median(initial_degrees)\n seed_eigen_centrality = eigen_centralities[seed_node]\n initial_mean_eigen = sum(\n [eigen_centralities[node] for node in nodes_to_activate]\n ) / len(nodes_to_activate)\n # threshold statistics\n initial_mean_threshold = sum(\n [graph.nodes[node][\"agent\"].threshold for node in initial_neighborhood]\n ) / len(nodes_to_activate)\n initial_neighbors = set(\n [neighbor for neighbor in graph[node] for node in initial_neighborhood]\n )\n initial_neighbors_mean_threshold = sum(\n [graph.nodes[node][\"agent\"].threshold for node in initial_neighbors]\n ) / len(initial_neighbors)\n # initial_mean_betweenness = sum([betweenness[node] for node in nodes_to_activate]) / len(nodes_to_activate)\n # initial global measures\n initial_global_clustering = nx.average_clustering(graph)\n # avg_shortest_path = nx.average_shortest_path_length(graph)\n\n # DEFINE REPRESSION\n if repression_type == RepressionType.NODE_REMOVAL:\n\n def repress(graph, active_nodes):\n repress_node_removal(\n graph, active_nodes, kwargs[\"repression_rate\"], centralities\n )\n\n elif repression_type == RepressionType.EDGE_REMOVAL:\n\n def repress(graph, active_nodes):\n repress_edge_removal(graph, active_nodes, kwargs[\"repression_rate\"])\n\n # initial repression\n repress(graph, active_nodes)\n\n # store initial information\n communities = protesting_communities(partition, active_nodes)\n initial_dict = {\n \"initial_size\": initial_size,\n \"initial_density\": initial_density,\n \"initial_neighborhood_clustering\": initial_neighborhood_clustering,\n \"initial_nodes_clustering\": initial_nodes_clustering,\n # 'seed_degree': seed_degree,\n \"seed_eigen_centrality\": seed_eigen_centrality,\n \"initial_median_degree\": initial_median_degree,\n \"initial_mean_degree\": initial_mean_degree,\n \"initial_mean_eigen_centrality\": initial_mean_eigen,\n \"initial_mean_threshold\": initial_mean_threshold,\n \"initial_neighbors_mean_threshold\": initial_neighbors_mean_threshold,\n # 'initial_mean_betweenness_centrality': initial_mean_betweenness,\n \"num_communities\": len(set(partition.values())),\n \"total_nodes\": total_nodes,\n \"initial_global_clustering\": initial_global_clustering,\n \"active_nodes\": len(active_nodes),\n \"communities_with_protesters\": len(communities),\n \"mean_community_protest_percent\": mean_community_protest_size(\n community_sizes, communities\n ),\n \"time_step\": 0,\n }\n data = pd.DataFrame(initial_dict, index=[0])\n\n # get ready\n num_iters = 0\n stop = False\n\n # MAIN LOOP\n while not stop:\n\n # deactivate nodes that no longer should be active\n # because repression removes edges, formerly active can be de-activated\n # by no longer surpassing their threshold\n to_deactivate = set([])\n for node in active_nodes:\n if not should_be_active(graph, node):\n graph.nodes[node][\"agent\"].active = False\n to_deactivate.add(node)\n\n # initial neighborhood nodes can never be deactivated\n to_deactivate -= set(initial_neighborhood.nodes())\n active_nodes -= to_deactivate\n\n # Get set of neighbors that could be activated\n nodes_to_activate = []\n neighbors_set = []\n\n for node in active_nodes:\n neighbors_set.extend(list(graph[node].keys()))\n # only activate new candidates that are not already active\n neighbors_set = set(neighbors_set) - active_nodes\n\n for neighbor in neighbors_set:\n if (\n should_be_active(graph, neighbor)\n and not graph.nodes[neighbor][\"agent\"].active\n ):\n nodes_to_activate.append(neighbor)\n\n if nodes_to_activate == []:\n stop = True\n else:\n num_iters += 1\n activate_nodes(graph, nodes_to_activate, active_nodes)\n # repression\n repress(graph, active_nodes)\n # get info\n communities = protesting_communities(partition, active_nodes)\n iter_dict = dict(initial_dict)\n iter_dict[\"active_nodes\"] = len(active_nodes)\n iter_dict[\"time_step\"] = num_iters\n # TODO: record other measures per time step here!\n iter_dict[\"communities_with_protesters\"] = communities_with_protesters(\n partition, active_nodes\n )\n iter_dict[\"communities_with_protesters\"] = len(communities)\n iter_dict[\"mean_community_protest_percent\"] = mean_community_protest_size(\n community_sizes, communities\n )\n data = data.append(iter_dict, ignore_index=True)\n\n data[\"num_iters\"] = num_iters\n return data",
"def do_we_need_to_reoptimize(MFE):\n # check that we found a solution and run optimizer again if not\n MFE.CalculateMeritFunction()\n Nop = MFE.NumberOfOperands\n REOPTIMIZE = False\n for j in range(6):\n op = MFE.GetOperandAt(Nop - j)\n contribution = op.Contribution\n print(\"Contribution %i: %1.2e\" % (j, contribution))\n REOPTIMIZE = REOPTIMIZE or (contribution > 1e-7)\n op_margin = MFE.GetOperandAt(Nop - 7)\n reached_target = np.isclose(op_margin.Value,\n op_margin.Target, atol=10)\n print(\"Margin: %1.2e\" % op_margin.Value)\n REOPTIMIZE = REOPTIMIZE or not reached_target\n\n op_equa = MFE.GetOperandAt(Nop - 8)\n reached_target = op_equa.Value < 10\n print(\"Avg Deviation from edge shape: %1.2f\" % op_equa.Value)\n REOPTIMIZE = REOPTIMIZE or not reached_target\n return REOPTIMIZE"
]
| [
"0.7837865",
"0.7225295",
"0.60356164",
"0.58807117",
"0.58703625",
"0.5838802",
"0.58160937",
"0.572131",
"0.5690975",
"0.56728786",
"0.5653771",
"0.5638937",
"0.55792546",
"0.55695164",
"0.5542509",
"0.55252403",
"0.5513242",
"0.5506854",
"0.54791296",
"0.54750746",
"0.54656166",
"0.5456098",
"0.54497033",
"0.5448815",
"0.54418397",
"0.541356",
"0.5399885",
"0.5397564",
"0.5387228",
"0.5381751"
]
| 0.79966146 | 0 |
Test for graph thresholding based on the economical method. | def test_graphs_threshold_eco():
# Groundtruth
expected_filt = np.load("groundtruth/graphs_threshold/eco_filtered.npy")
expected_bin = np.load("groundtruth/graphs_threshold/eco_binary.npy")
# Data
graph = np.load("sample_data/graphs_threshold/graph2.npy")
# Run
filterted, binary, _ = threshold_eco(graph)
# Test
np.testing.assert_array_equal(expected_filt, filterted)
np.testing.assert_array_equal(expected_bin, binary) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(self, threshold=0.5):\n pass",
"def apply_thresholding(x):\n return x > threshold_otsu(x)",
"def test_graphs_threshold_global_cost_efficiency():\n\n # Groundtruth\n expected = np.load(\"groundtruth/graphs_threshold/gce.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph.npy\")\n\n # Run\n iterations = 50\n binary_mask, _, _, _, _ = threshold_global_cost_efficiency(graph, iterations)\n\n # Test\n np.testing.assert_array_equal(expected, binary_mask)",
"def calibrate_threshold(test_graphs):\r\n best_threshold = None\r\n best_result = None\r\n for threhold in range(1, 50):\r\n cur_res = evaluate_argument_mention(test_graphs, threhold)\r\n if (best_result is None) or (cur_res > best_result):\r\n best_result = cur_res\r\n best_threshold = threhold\r\n return (best_threshold, best_result)",
"def global_threshold(img, threshold_method):\n pass",
"def test_soft_threshold():\n assert snet.soft_threshold(10, 100) == 0\n assert snet.soft_threshold(-10, 100) == 0\n assert snet.soft_threshold(10, 3) == 7\n assert snet.soft_threshold(-10, 3) == -7",
"def test_using_ego_graph(self):\n assert_equal(nx.local_efficiency(self.G3), 7 / 12)",
"def test_thresholding_args():\n from sleepecg._heartbeat_detection import _thresholding\n filtered_ecg = np.arange(100)\n integrated_ecg = np.arange(100)\n fs = 10\n\n _thresholding(filtered_ecg, integrated_ecg, fs)\n _thresholding(filtered_ecg, integrated_ecg, fs=fs)\n _thresholding(filtered_ecg, integrated_ecg=integrated_ecg, fs=fs)\n _thresholding(filtered_ecg=filtered_ecg, integrated_ecg=integrated_ecg, fs=fs)",
"def gp_optimize_threshold(gp_model, X_val, y_val, X_scaler, y_scaler, optimize_for=\"profits\"): \n y_hat, conf = gp_model.predict(X_val)\n regressed_payment = y_scaler.inverse_transform(y_hat).reshape(-1)\n loan_amt = X_scaler.inverse_transform(X_val)[:,0]\n\n # This ratio is a guage of how likely a person will pay back.\n # It is compared with a threshold to determine whether or not to loan.\n payment_to_loan_ratio = regressed_payment / loan_amt\n\n # Sort in descending order\n sorted_ind = np.argsort(-payment_to_loan_ratio)\n sorted_payment_to_loan_ratio = payment_to_loan_ratio[sorted_ind]\n X_sorted, y_sorted = X_val[sorted_ind,:], y_val[sorted_ind]\n\n threshold, highest_opt_val = 0, 0\n for i, thresh in enumerate(sorted_payment_to_loan_ratio): \n X_loanee = X_sorted[:i+1,:]\n y_loanee = y_sorted[:i+1]\n \n loan_amt_loanee = np.sum(X_scaler.inverse_transform(X_loanee)[:,0])\n payments_loanee = np.sum(y_loanee)\n\n # Optimize for different values\n if optimize_for == \"profits\":\n opt_val = payments_loanee - loan_amt_loanee\n elif optimize_for == \"profit_percentage\":\n opt_val = (payments_loanee - loan_amt_loanee) / loan_amt_loanee\n else:\n raise Exception(\"Illegal optimize_for value: %s\" % optimize_for)\n\n # Keep track of highest value (that is being optimized for)\n if opt_val > highest_opt_val:\n threshold = thresh\n highest_opt_val = opt_val\n return threshold",
"def perform_thresholding(f,M,type):\n if type == \"largest\":\n a = np.sort(np.ravel(abs(f)))[::-1] #sort a 1D copy of F in descending order\n T = a[M]\n y = f*(abs(f) > T)\n elif type == \"soft\":\n s = abs(f) - M\n s = (s + abs(s))/2\n y = np.sign(f)*s\n elif type == \"hard\":\n y = f*(abs(f) > M)\n return y",
"def test_am_threshold(Simulator, plt, seed, rng):\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n d2 = int(d / 2)\n vocab2 = Vocabulary(d2, pointer_gen=rng)\n vocab2.populate('A; B; C; D')\n\n def input_func(t):\n return '0.49 * A' if t < 0.1 else '0.8 * B'\n\n with spa.Network('model', seed=seed) as m:\n m.am = ThresholdingAssocMem(\n threshold=0.5, input_vocab=vocab, output_vocab=vocab2,\n function=filtered_step_fn, mapping='by-key')\n m.stimulus = spa.Transcode(input_func, output_vocab=vocab)\n m.stimulus >> m.am\n\n in_p = nengo.Probe(m.am.input)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with Simulator(m) as sim:\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.subplot(2, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.9, c='g', lw=2)\n plt.ylabel(\"Output\")\n\n assert np.mean(sim.data[out_p][below_th]) < 0.01\n assert_sp_close(t, sim.data[out_p], vocab2['B'], skip=0.25, duration=0.05)",
"def test_graphs_threshold_omst_global_cost_efficiency2():\n # the function is optmized at the 3rd OMST, so it is going to yeild the same results\n # as the exhaustive search\n\n # Groundtruth\n expected = np.load(\"groundtruth/graphs_threshold/omst_gce.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph.npy\")\n\n # Run\n n_msts = 5\n _, CIJtree, _, _, _, _, _, _ = threshold_omst_global_cost_efficiency(\n graph, n_msts=n_msts\n )\n\n # Test\n np.testing.assert_array_equal(expected, CIJtree)",
"def test_am_threshold(Simulator, plt, seed, rng):\n D = 64\n vocab = Vocabulary(D, rng=rng)\n vocab.parse('A+B+C+D')\n\n D2 = int(D / 2)\n vocab2 = Vocabulary(D2, rng=rng)\n vocab2.parse('A+B+C+D')\n\n def input_func(t):\n return vocab.parse('0.49*A').v if t < 0.1 else vocab.parse('0.8*A').v\n\n with nengo.Network('model', seed=seed) as m:\n am = AssociativeMemory(vocab, vocab2, threshold=0.5)\n in_node = nengo.Node(output=input_func, label='input')\n nengo.Connection(in_node, am.input)\n\n in_p = nengo.Probe(in_node)\n out_p = nengo.Probe(am.output, synapse=0.03)\n\n sim = Simulator(m)\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.legend(vocab.keys, loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.8, c='g', lw=2)\n plt.ylabel(\"Output\")\n plt.legend(vocab.keys, loc='best')\n\n assert similarity(sim.data[in_p][below_th], vocab.parse(\"A\").v) > 0.48\n assert similarity(sim.data[in_p][above_th], vocab.parse(\"A\").v) > 0.79\n assert similarity(sim.data[out_p][below_th], vocab2.parse(\"0\").v) < 0.01\n assert similarity(sim.data[out_p][above_th], vocab2.parse(\"A\").v) > 0.79",
"def test_graphs_threshold_omst_global_cost_efficiency():\n # the function is optmized at the 3rd OMST.\n\n # Groundtruth\n expected = np.load(\"groundtruth/graphs_threshold/omst_gce.npy\")\n\n # Data\n graph = np.load(\"sample_data/graphs_threshold/graph.npy\")\n\n # Run\n _, CIJtree, _, _, _, _, _, _ = threshold_omst_global_cost_efficiency(\n graph, n_msts=None\n )\n\n # Test\n np.testing.assert_array_equal(expected, CIJtree)",
"def getThreshold(self): # real signature unknown; restored from __doc__\n pass",
"def hysteresis_thresholding(image, image_gradients, min_val, max_val):\n\tprint(\"BEFORE HYSTERISIS THRESHOLDING:\", image)\n\tprint(\"gradients:\", image_gradients)\n\n\tlargest_gradient_value = np.max(image_gradients)\n\twhile largest_gradient_value < max_val:\n\t\tprint(\"Largest gradient value:\", largest_gradient_value)\n\t\twarnings.warn(UserWarning(\"Image has no edge gradients above upper threshold, increasing all gradients values!\"))\n\t\t# return np.zeros_like(image)\n\t\timage_gradients *= 1.5\n\t\tlargest_gradient_value = np.max(image_gradients)\n\t\n\t# print(\"Largest gradient value:\", largest_gradient_value)\n\t# the set of all 'strong' indices.\n\tstrong_indices = indices_where(image_gradients >= max_val)\n\toff_indices \t= indices_where(image_gradients < min_val)\n\tweak_indices \t= indices_where((min_val <= image_gradients) & (image_gradients < max_val))\n\t\n\timage_height = image.shape[0]\n\timage_width = image.shape[1]\n\n\t# get the neighbours of all strong edges.\n\t# convert their neighbours with weak edges to strong edges.\n\tto_explore = np.zeros_like(image_gradients, dtype=bool)\n\tto_explore[index_with(strong_indices)] = True\n\n\texplored = np.zeros_like(image_gradients, dtype=bool)\n\n\tstrong = np.zeros_like(image_gradients, dtype=bool)\n\tstrong[index_with(strong_indices)] = True\n\t# print(\"strong:\", strong)\n\n\tweak = np.zeros_like(image_gradients, dtype=bool)\n\tweak[index_with(weak_indices)] = True\n\n\tunexplored_indices = aggregate(np.nonzero(to_explore))\n\t# print(\"unexplored (initial):\", [str(v) for v in unexplored])\n\t# print(\"weak indices (initial):\", [str(v) for v in weak_indices])\n\t# print(\"off indices (initial):\", [str(v) for v in off_indices])\n\talready_explored = np.zeros_like(to_explore)\n\n\twhile len(unexplored_indices) > 0:\n\t\t\n\t\t# print(\"exploring indices \", [str(v) for v in indices])\n\t\t# print(indices)\n\n\t\tneighbours = neighbourhood(unexplored_indices, image_width, image_height)\n\t\tis_neighbour = np.zeros_like(weak)\n\t\tis_neighbour[index_with(neighbours)] = True\n\t\tis_weak_neighbour = is_neighbour & weak\n\t\tweak_neighbours = aggregate(np.nonzero(is_weak_neighbour))\n\t\t# weak_neighbours = common_rows_between(neighbours, weak_indices)\n\n\t\t# print(\"The neighbours of (\", \",\".join(str(pixel) for pixel in indices), \") are \", neighbours)\n\t\t# print(\"weak neighbours:\", [str(v) for v in weak_neighbours])\n\t\t\n\t\tstrong[index_with(weak_neighbours)] = True\n\t\tweak[index_with(weak_neighbours)] = False\n\t\t# mark that we need to explore these:\n\t\t\n\t\talready_explored[index_with(unexplored_indices)] = True\n\t\t# explore the indices of the weak neighbours, if they haven't been explored already.\n\t\tto_explore[index_with(weak_neighbours)] = True\n\t\t# do not re-explore already explored indices.\n\t\tto_explore &= ~already_explored\n\t\t\n\t\tunexplored_indices = aggregate(np.nonzero(to_explore))\n\t\n\tout = np.zeros_like(image_gradients)\n\tout[~strong] = 0\n\tout[strong] = 255\n\tprint(\"AFTER HYSTERISIS THRESHOLDING:\", out)\n\treturn out",
"def test_efficiency_disconnected_nodes(self):\n assert_equal(nx.efficiency(self.G1, 1, 2), 0)",
"def test_signal_threshold(df_phys, signal, threshold):\n df_signal = df_phys[df_phys[\"Signal\"] == signal][\"Physical Value\"]\n\n stats = df_signal.agg([\"count\", \"min\", \"max\", \"mean\", \"std\"])\n delta = stats[\"max\"] - stats[\"min\"]\n\n if delta > threshold:\n print(f\"{signal} exhibits a 'max - min' delta of {delta} exceeding threshold of {threshold}\")",
"def test_unknown_thresholding(self):\n self.cube.coord(var_name=\"threshold\").attributes[\n \"spp__relative_to_threshold\"\n ] = \"between\"\n msg = \"Probabilities to percentiles only implemented for\"\n with self.assertRaisesRegex(NotImplementedError, msg):\n Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)",
"def threshold(self,thresholdValue):\n # TO DO\n pass",
"def get_threshold(self):\n rgs = self.dynamics.regimes\n for r in rgs:\n if(r.initial==True): main_regime = r\n elif(r.initial==False): refractory_regime = r\n roc = main_regime.event_handlers\n threshcond = \"\"\n for oc in roc:\n if(type(oc) is lems.OnCondition):\n threshcond = self.replace_operators(oc.test)\n else: threshcond=None\n return threshcond",
"def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False",
"def hysteresis_threshold(image, g_theta):\n threshImage = np.array(image)\n def checkPoint(x, y):\n x1,y1,x2,y2 = 0,0,0,0\n if g_theta[x,y] == 90:\n x1,y1 = x,y-1\n x2,y2 = x,y+1\n elif g_theta[x,y] == -45:\n x1,y1 = x-1,y+1\n x2,y2 = x+1,y-1\n elif g_theta[x,y] == 0:\n x1,y1 = x+1,y\n x2,y2 = x-1,y\n elif g_theta[x,y] == 45:\n x1,y1 = x-1,y-1\n x2,y2 = x+1,y+1\n else:\n print(\"ERROR!!\")\n if (threshImage[x1,y1] > t_min) & (threshImage[x1,y1] < t_max):\n start_pts[x1,y1] = edgePoint\n threshImage[x1,y1] = edgePoint\n checkPoint(x1, y1)\n \n if (threshImage[x2,y2] > t_min) & (threshImage[x2,y2] < t_max):\n start_pts[x2,y2] = edgePoint\n threshImage[x2,y2] = edgePoint\n checkPoint(x2, y2)\n return\n \n t_min = .0001\n t_max = .2\n edgePoint = 1.0\n sz = np.shape(threshImage)\n start_pts = np.zeros((sz[0],sz[1]))\n #sz = np.shape(start_pts)\n for i in range(0, sz[0]-1):\n for j in range(0,sz[1]-1):\n if threshImage[i,j] >= t_max:\n start_pts[i,j] = edgePoint\n checkPoint(i, j)\n result = start_pts*image\n return result",
"def get_exponential_detection_thresholds():\n \n m = utils.MAX_DETECTION_THRESHOLD\n n = utils.NUM_DETECTION_THRESHOLDS\n y = np.exp(np.log(m) / n)\n return y ** np.arange(1, n + 1)",
"def find_metric_threshold(self):\n logger.info(\"compute metric threshold\")\n\n ### Beaucoup trop lent quand on a beaucoup de models ###\n\n df_results_not_aggregated = self.result_reader.load_all_results(aggregate=False)\n\n if len(df_results_not_aggregated) == 0:\n logger.info(\"threshold = None\")\n return None\n\n main_scorer = \"test_%s\" % self.job_config.main_scorer\n (df_results_not_aggregated[main_scorer].fillna(df_results_not_aggregated[main_scorer].min(), inplace=True))\n min_cv = df_results_not_aggregated.groupby(\"job_id\")[main_scorer].min().values\n delta_min_max_cv = np.median(\n df_results_not_aggregated.groupby(\"job_id\")[main_scorer].apply(lambda x: x.max() - x.min())\n )\n\n if len(min_cv) <= self.min_nb_of_models:\n logger.info(\"threshold = None\")\n return None\n\n min_cv = -np.sort(-min_cv)\n result = min_cv[self.min_nb_of_models] - delta_min_max_cv\n\n # result = np.percentile( min_cv, self._get_quantile(len(min_cv)) * 100)\n # TODO : ici peut etre faire une estimation parametric du quantile avec un Kernel, plus smooth et moins sensible quand peu de valeurs\n\n logger.info(\"threshold : %2.2f\" % result)\n return result",
"def determine_threshold(yval,pval):\n\n F1 = 0\n epsilon = 0\n for _epsilon in np.linspace(min(pval),max(pval),1000):\n ## Compute stats\n _F1,stats = evaluate_epsilon(yval,pval,_epsilon)\n\n if _F1 > F1:\n F1 = _F1\n epsilon = _epsilon\n print(\"Better threshold found! {} ==> F1 {}\".format(epsilon,F1))\n \n return epsilon, F1",
"def decide(el, il, model, threshold):\n\n if model == 0:\n return el >= threshold[0] and il >=threshold[1]\n elif model == 1:\n return el >= threshold[0] or il >= threshold[1]\n elif model == 2:\n return harmonic_mean([el, il]) >= harmonic_mean(threshold)\n else:\n return bool(round(random.random()))",
"def evaluation(model_path, threshold):\n classifier = joblib.load(model_path)\n\n positive = np.load(\"./processed_data/validation/positive.npy\")\n unlabeled = np.load(\"./processed_data/validation/unlabeled.npy\")\n\n p_result = np.array(classifier.predict_proba(positive[:, :-1])[:, 1])\n plt.hist(p_result, bins=300)\n plt.show()\n\n tp_rate = np.where(p_result >= threshold, 1, 0).sum() / p_result.shape[0]\n print(tp_rate)\n\n u_result = np.array(classifier.predict_proba(unlabeled[:, :-1])[:, 1])\n plt.hist(u_result, bins=300)\n plt.show()\n\n\n # the following steps aim to filter 'possible' negative instances in the evaluation-unlabeled set\n stageone_classifier = joblib.load(\"./solver_result/liblinear/0.01/logistic.pkl\")\n stgone_result = np.array(stageone_classifier.predict_proba(unlabeled[:,:-1])[:, 1])\n possibly_negative = unlabeled[np.where(stgone_result <= _negative_threshold)]\n print(positive.shape)\n print(unlabeled.shape)\n print(possibly_negative.shape)\n possi_ng_result = np.array(classifier.predict_proba(possibly_negative[:, :-1])[:, 1])\n fp_rate = np.where(possi_ng_result >= threshold, 1, 0).sum() / possi_ng_result.shape[0]\n plt.hist(possi_ng_result, bins=300)\n plt.show()\n\n print(fp_rate)\n print(\"TP: \" + str(tp_rate) + \" FP: \" + str(fp_rate) + \" GMean: \" + str(math.sqrt(tp_rate * (1 - fp_rate))))",
"def plotThresholds (df, attack_df): \n global episod_limit\n \n ret = getThresholds (df, attack_df)\n thresholds = ret[0]\n rewards = ret[1]\n rewards_constant = ret[2]\n\n plt.plot(np.arange (0, episod_limit + 2, 1), thresholds, marker = 'None',\n linestyle = '-', color = 'k', label = 'Threshold')\n plt.xlabel ('Time')\n plt.ylabel ('Threshold')\n plt.grid ()\n plt.legend (loc='best')\n plt.savefig (\"figures/threshold.png\")\n plt.close ()\n return (rewards, rewards_constant, thresholds)",
"def threshold(activation):\n if activation >= 0.0:\n return 1\n else:\n return 0"
]
| [
"0.6416092",
"0.63370824",
"0.62137026",
"0.6199239",
"0.6188419",
"0.6137018",
"0.6124308",
"0.6087063",
"0.6036492",
"0.6019357",
"0.60054135",
"0.59911543",
"0.5983166",
"0.59308624",
"0.59052336",
"0.59034336",
"0.5896829",
"0.588666",
"0.58629286",
"0.5836026",
"0.5806771",
"0.5782334",
"0.57769865",
"0.57760894",
"0.5773685",
"0.5755745",
"0.5718859",
"0.57165986",
"0.57153946",
"0.57151943"
]
| 0.657838 | 0 |
Returns True if and only if `expr` contains only correctly matched delimiters, else returns False. | def check_delimiters(expr):
delim_openers = '{([<'
delim_closers = '})]>'
### BEGIN SOLUTION
s = Stack()
for c in expr:
if c in delim_openers:
s.push(c)
elif c in delim_closers:
try:
t = s.pop()
if delim_openers.find(t) != delim_closers.find(c):
return False
except:
return False
return s.empty()
### END SOLUTION | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_delimiters(expr):\n s = Stack()\n newExpr = expr.replace(\" \", \"\")\n if len(newExpr) ==1:\n return False\n else:\n for c in newExpr:\n if c in delim_openers:\n s.push(c)\n elif c in delim_closers:\n toCheck = delim_openers[delim_closers.index(c)]\n if toCheck in s and s.empty() == False:\n s.pop()\n else:\n return False\n return s.empty()",
"def postfix_valid(postfix_expr):\n expr = postfix_expr.split()\n count = 0\n if postfix_expr == \"\":\n return False\n for token in expr:\n if token[0] in '0123456789':\n count += 1\n elif token == '~':\n pass\n else: # all other binary operators\n count -= 1\n if count < 0:\n return False\n if count == 1:\n return True\n return False",
"def expression(value):\n if re.match('`.*`$', value):\n return True\n else:\n return False",
"def isRegexPossible(self):\n if self._lastToken is None:\n # No token has been produced yet: at the start of the input,\n # no division is possible, so a regex literal _is_ possible.\n return True\n\n if self._lastToken.type == ECMAScriptLexer.Identifier or \\\n self._lastToken.type == ECMAScriptLexer.NullLiteral or \\\n self._lastToken.type == ECMAScriptLexer.BooleanLiteral or \\\n self._lastToken.type == ECMAScriptLexer.This or \\\n self._lastToken.type == ECMAScriptLexer.CloseBracket or \\\n self._lastToken.type == ECMAScriptLexer.CloseParen or \\\n self._lastToken.type == ECMAScriptLexer.OctalIntegerLiteral or \\\n self._lastToken.type == ECMAScriptLexer.DecimalLiteral or \\\n self._lastToken.type == ECMAScriptLexer.HexIntegerLiteral or \\\n self._lastToken.type == ECMAScriptLexer.StringLiteral or \\\n self._lastToken.type == ECMAScriptLexer.PlusPlus or \\\n self._lastToken.type == ECMAScriptLexer.MinusMinus:\n # After any of the tokens above, no regex literal can follow.\n return False\n else:\n # In all other cases, a regex literal _is_ possible.\n return True",
"def is_delim_cell(cell, delim, begin):\n if not is_raw_cell(cell):\n return False\n source = get_source(cell)\n if begin:\n return bool(re.match(BEGIN_REGEXES[delim], source[0], flags=re.IGNORECASE))\n return bool(re.match(END_REGEXES[delim], source[0], flags=re.IGNORECASE))",
"def no_operators(expression):\n OPERATORS = set('+-*/')\n for i in expression:\n if i in OPERATORS:\n return True\n raise NotValidExpression('Not a valid expression, no operators')",
"def is_matched(expression):\n\n balance = []\n\n for char in expression:\n if char == \"{\" or char == \"[\" or char == \"(\":\n balance.append(char)\n\n elif char == \"}\":\n if balance[-1] == \"{\":\n balance.pop()\n else:\n return False\n\n elif char == \"]\":\n if balance[-1] == \"[\":\n balance.pop()\n else:\n return False\n\n elif char == \")\":\n if balance[-1] == \"(\":\n balance.pop()\n else:\n return False\n\n if len(balance) == 0:\n return True",
"def isMatched(expr):\n pass",
"def isMatched(expr):\n S = Stack()\n n = len(expr)\n\n for i in range (0,n):\n \tsymb = expr[i] #next symbol\n \t# print(symb)\n\n \tif symb in ['{','(','[']:\n \t\tS.Push(symb)\n\n \telif symb in ['}',')',']']:\n\n \t\tif S.isEmpty():\n \t\t\treturn False\n \t\tif S.Top() == '{' and symb == '}':\n \t\t\tS.Pop()\n \t\telif S.Top() == '(' and symb == ')':\n \t\t\tS.Pop()\n \t\telif S.Top() == '[' and symb == ']':\n \t\t\tS.Pop()\n\n \telse:\n \t\tcontinue\n\n if S.isEmpty():\n \treturn True\n else:\n \treturn False\n\n # \telif symb in range(48,58):\n # \t\tcontinue\n\n # \telif symb in ['+','-','*','/','%']:\n # \t\tcontinue\n\n # \telse:\n # \t\tprint(\"Error\") \n # \t\treturn 0",
"def is_expression(self):\r\n return conf.lib.clang_isExpression(self)",
"def isValid(self, s: str) -> bool:\n open_brackets = [] # stack of open but not closed brackets\n for char in s:\n if char in ['(', '[', '{']:\n open_brackets.append(char)\n else:\n try:\n complement = open_brackets.pop()\n except IndexError:\n return False\n else:\n if complement + char not in ['()', '[]', '{}']:\n return False\n return not len(open_brackets)",
"def regexp(expr, item):\n reg = re.compile(expr)\n return reg.search(item) is not None",
"def no_paranthesis(expression):\n for i in expression:\n if i in '()':\n return True\n raise NotValidExpression('Not a valid expression, no paranthesis')",
"def treat_as_expression(self):\n special_chars = '{}()[]=<>.\"\\'/'\n return any(special_char in self.key for special_char in special_chars)",
"def parsable_as_expression(self):\n return self.parsable and self.expression_ast_node is not None",
"def expr():\n def p(tok):\n # commas can appear in quantified variables\n return not(tok.value in [';','.'])\n return reparse('expr').process(c.balanced_condition(p))",
"def contain_op(self, expr):\n return expr in self.table.inv",
"def check(self): # full program\n r = re.compile('(?!(^(((?!;)[A-Z][+-]?\\d+(\\.\\d+)?\\s?)*(\\s*;\\s.*)?)$))')\n for line in self.blocks:\n if r.match(line) and line and line != '\\r' and line != '\\n':\n return False\n return True",
"def test_pattern(pattern, fields):\n if not pattern: # \"empty\" pattern\n return True\n\n def eval_exp(text):\n m = re.match(r'^(\\$(\\d+))?(!)?/([^/]*)/$', text)\n try:\n if m: # regular expression\n _, num, neg, pat = m.groups()\n num = int(num) if num else 0 # if no `$i` specified, default to `$0`\n m = re.search(pat, fields[num])\n logging.info(u\"regex: '%s' %s~ /%s/\" % (fields[num], neg or u'', pat))\n return bool(m) != bool(neg)\n else: # expression\n exp = translate_fields(text, fields, u'_') # replace non-exist `$i` with u'_'\n logging.info(u'exp: %s' % exp)\n return bool(exp and eval(exp))\n except Exception, e:\n logging.debug(unicode(e))\n return False\n\n if u',' not in pattern: # \"regular expression\" or \"expression\" pattern\n return eval_exp(pattern)\n else: # \"begpat, endpat\" pattern\n global SWITCH_ON\n\n value = False\n\n begpat, endpat = [s.strip() for s in pattern.split(u',')]\n if eval_exp(begpat):\n SWITCH_ON = True\n if SWITCH_ON:\n value = True\n if eval_exp(endpat):\n SWITCH_ON = False\n\n return value",
"def IsValid(self):\n return not TickerFull.DelimiterSplit in self.Text",
"def invalid_characters(expression):\n CHARACTERS = '0123456789()+-/*'\n for i in expression:\n if i not in CHARACTERS:\n raise NotValidExpression('Not a valid expression, invalid characters inserted')\n return True",
"def isValid(self, s):\n # 1\n if not s:\n return True\n\n # 2\n if not len(s) % 2 == 0:\n return False\n\n comp = {\n ')': '(',\n ']': '[',\n '}': '{',\n }\n stack = []\n\n for char in s:\n # 3\n if char in comp:\n popped = stack.pop() if stack else '@'\n if popped != comp[char]:\n return False\n # 2\n else:\n stack.append(char)\n\n return not stack",
"def has_expression(self):\n return self._expression is not None",
"def no_numbers(expression):\n NUMBERS = '0123456789'\n for i in expression:\n if i in NUMBERS:\n return True\n raise NotValidExpression('Not a valid expression, no numbers')",
"def _match_regex_list(subject, expressions):\n for expr in expressions:\n if re.search(expr, subject):\n return True\n return False",
"def isValid(self, s: str) -> bool:\n stack = []\n \n mapping = {\n \")\":\"(\",\n \"}\":\"{\",\n \"]\":\"[\"\n }\n \n for char in s:\n if char not in mapping:\n stack.append(char)\n \n else:\n top_element = stack.pop() if stack else \"#\"\n \n if mapping[char] != top_element:\n return False\n \n return not stack",
"def _is_regex_match(s, pat):\n\n pat = pat.rstrip()\n m = re.search(Settings._REPAT, pat)\n if m:\n flags_combined = 0\n if m.group('flag'):\n char_to_flag = {\n 'A':re.A, 'I':re.I, 'L':re.L, 'M':re.M, 'S':re.S, 'X':re.X}\n for flag in list(m.group('flag')):\n flags_combined |= char_to_flag[flag]\n return bool(re.search(m.group('pat'), s, flags_combined))\n raise InvalidRegexError(pat)",
"def _IsValidStatement(stmt_str):\n return (STMT_STR_RE.match(stmt_str) and\n '--' not in stmt_str)",
"def isOperand(self, token):\n if len(token) == 1:\n if token in self.operands:\n return True\n elif len(token) > 1:\n validChars = self.operands + '+-'\n for eachChar in token:\n if eachChar not in validChars:\n return False\n return True",
"def _has_matched_brackets(text: str) -> bool:\n open_bracket_stack = []\n for index, _ in enumerate(text):\n if text[index] == \"[\":\n open_bracket_stack.append(index)\n elif text[index] == \"]\":\n if len(open_bracket_stack) == 0:\n return False\n open_bracket_stack.pop()\n return len(open_bracket_stack) == 0"
]
| [
"0.7133583",
"0.6683183",
"0.62793124",
"0.6187382",
"0.6016125",
"0.5943181",
"0.582906",
"0.5734482",
"0.572137",
"0.57109636",
"0.5685574",
"0.56454945",
"0.56169343",
"0.55317014",
"0.5522385",
"0.55221826",
"0.5514102",
"0.54350483",
"0.5418066",
"0.5408045",
"0.53892976",
"0.5374862",
"0.5347442",
"0.5266096",
"0.52659965",
"0.5240628",
"0.5223862",
"0.5213138",
"0.5210839",
"0.51787436"
]
| 0.7151421 | 0 |
Returns the postfix form of the infix expression found in `expr` | def infix_to_postfix(expr):
# you may find the following precedence dictionary useful
prec = {'*': 2, '/': 2,
'+': 1, '-': 1}
ops = Stack()
postfix = []
toks = expr.split()
### BEGIN SOLUTION
opp = {'*', '/','+', '-'}
for x in toks:
if str.isdigit(x):
postfix.append(x)
elif ops.empty() or ops.peek() == '(':
ops.push(x)
elif x == '(':
ops.push(x)
elif x == ')':
while not ops.empty():
temp = ops.pop()
if temp == '(':
break
else:
postfix.append(temp)
elif x in opp:
while True:
if prec.get(x) > prec.get(ops.peek()):
ops.push(x)
break
elif prec.get(x) == prec.get(ops.peek()):
postfix.append(ops.pop())
ops.push(x)
break
elif prec.get(x) < prec.get(ops.peek()):
postfix.append(ops.pop())
if ops.empty():
ops.push(x)
break
elif ops.empty():
break
while True:
if not ops.empty():
postfix.append(ops.pop())
else:
break
### END SOLUTION
return ' '.join(str(x) for x in postfix) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def infix_to_postfix(expr):\n ops = Stack()\n postfix = []\n toks = expr.split()\n def tests(chr):\n if chr.isdigit():\n postfix.append(chr)\n\n elif chr == '(':\n ops.push('(')\n\n elif ops.peek() == '(' or ops.empty():\n ops.push(chr)\n\n elif chr ==')':\n while ops.peek() != \"(\":\n postfix.append(ops.pop())\n ops.pop()\n\n elif chr in prec and prec[chr] > prec[ops.peek()]:\n ops.push(chr)\n\n elif chr in prec and prec[chr] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(chr)\n\n elif chr in prec and prec[chr] < prec[ops.peek()]:\n postfix.append(ops.pop())\n tests(chr)\n\n for tok in toks:\n tests(tok)\n\n\n while not ops.empty():\n postfix.append(ops.pop())\n\n\n return ' '.join(postfix)",
"def infix_to_postfix(self, expr: str) -> str:\n\n # The stack that we will be performing operations on\n stack: list[str] = []\n\n # The output\n output: str = \"\"\n\n # We always need surrounding parentheses\n expr = f\"({expr})\"\n\n # The tokenized expression\n expr = self.tokenize_expr(expr)\n\n\n \n # For every token in expression\n for token in expr:\n # Check what token it is\n if token == \"(\":\n # If it is a (, then append to stack\n stack.append(\"(\")\n elif token == \")\":\n # If it is a ), then iterate over stack\n while stack[-1] != '(':\n # Popping the last item from stack, to output\n # Include a trailing space\n # Until the last item in the stack is a (\n output += f\"{stack.pop()} \"\n # Pop the last ( from the stack\n stack.pop()\n elif re.match(r\"[a-zA-Z_][a-zA-Z0-9_]*\", token):\n # If it matches a name/variable\n # Append to output with a trailing space\n output += f\"{token} \"\n elif re.match(r\"\\d+\",token):\n # If it is a number\n # Then append with a trailing space\n output += f\"{token} \"\n else:\n if self.is_token(token):\n # If it is a token\n # Pop it from the stack while\n # It's priority is smaller than\n # the last priority of the stack\n # Put it into output with a trailing space\n while self.get_token_priority(token) <= self.get_token_priority(stack[-1]):\n output += f\"{stack.pop()} \"\n # And append token to stack\n stack.append(token)\n # Return output\n return output",
"def infixToPostfix(expr, prec):\n ops = Stack()\n postfix = []\n toks = expr.split()\n for t in toks:\n if t.isdigit():\n postfix.append(t)\n elif t == '(':\n ops.push('(')\n elif t == ')':\n op = ops.pop()\n while op != '(':\n postfix.append(op)\n op = ops.pop()\n else:\n while True:\n if ops.empty() or ops.peek() == '(':\n ops.push(t)\n break\n if prec[t] > prec[ops.peek()]:\n ops.push(t)\n break\n elif prec[t] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(t)\n break\n else:\n postfix.append(ops.pop())\n while not ops.empty():\n postfix.append(ops.pop())\n return postfix",
"def convert_to_postfix(expression):\n infix = list(expression.replace(\" \", \"\"))\n opr_priority = {'!': 4, '*': 3, '+': 2, '>': 1, '=': 1, '(': 0}\n postfix = []\n stack = []\n\n for token in infix:\n if token in string.ascii_uppercase:\n postfix.append(token)\n elif token == '(':\n stack.append(token)\n elif token == ')':\n stack_token = stack.pop()\n while stack_token != '(':\n postfix.append(stack_token)\n stack_token = stack.pop()\n else:\n while stack and (opr_priority[stack[len(stack)-1]] >= opr_priority[token]):\n postfix.append(stack.pop())\n stack.append(token)\n\n while stack:\n postfix.append(stack.pop())\n\n return postfix",
"def infix_to_postfix(infix_expr):\n # Append adds new item to list\n # Concat creates a new list every time instead\n\n opstack = StackArray()\n res = []\n lstr = infix_expr.split()\n # l_para = r_para = 0\n # operator precedence dict\n prec = { # higher val = higher prec\n \"(\" : 4,\n \"^\" : 3, # r-to-l (i.e. 2^3^2 = 2^(3^2) )\n \"~\" : 3, # right-to-left (i.e. -3^2 = -9)\n # '*/+-' are associated left to right\n \"*\" : 2,\n \"/\" : 2,\n \"+\" : 1,\n \"-\" : 1\n }\n for token in lstr:\n if token[0] in '0123456789':\n res.append(token)\n # not opstack.is_empty() guards against IndexError on empty peek\n if not opstack.is_empty() and opstack.peek() == '^':\n res.append(opstack.pop())\n if not opstack.is_empty() and opstack.peek() == '~':\n res.append(opstack.pop())\n elif token == '(':\n # l_para += 1\n opstack.push(token)\n elif token == ')':\n # r_para += 1\n # opstack can't be empty for proper formatted input\n while opstack.peek() != '(':\n res.append(opstack.pop())\n opstack.pop() # remove left paran '('\n else: # token is ^ ~ * / + -: <-- operators\n while not opstack.is_empty() and prec[token] <= prec[opstack.peek()]:\n if opstack.peek() == '(':\n break\n elif token == '^' and opstack.peek() == '~':\n break\n else:\n res.append(opstack.pop())\n opstack.push(token)\n # if l_para != r_para:\n # raise SyntaxError\n while not opstack.is_empty():\n res.append(opstack.pop())\n res = \" \".join(res)\n res.strip()\n return res",
"def postfix_eval(postfix_expr):\n s = StackArray()\n expr = postfix_expr.split()\n for token in expr:\n if token[0] in '0123456789':\n res = token\n s.push(res)\n else: # token is operator\n op2 = s.pop()\n op2 = float(op2)\n if s.is_empty(): # token is ~\n # could also be ~ for non-empty stack\n res = -1 * op2\n else:\n op1 = s.pop()\n op1 = float(op1)\n if token == '^':\n res = op1 ** op2\n elif token == '~':\n s.push(op1)\n res = -1 * op2\n elif token == '*':\n res = op1 * op2\n elif token == '/':\n if op2 == 0:\n raise ZeroDivisionError\n else:\n res = op1 / op2\n elif token == '+':\n res = op1 + op2\n else: # token == '-'\n res = op1 - op2\n s.push(res)\n return res",
"def _get_postfix_notation(self):\n postfix, operators_stack = list(), list() # initialize postfix list and auxiliary stack\n\n for element in self.expression.split():\n if element in self.OPERATORS:\n if operators_stack:\n # while stack isn't empty and \"stack top\" is stronger(e.g. multiplication is stronger than addition)\n # move \"stack top\" into postfix list\n while operators_stack \\\n and operators_stack[-1] in self.OPERATORS \\\n and self.OPERATOR_WEIGHT[operators_stack[-1]] >= self.OPERATOR_WEIGHT[element]:\n postfix.append(operators_stack.pop())\n\n operators_stack.append(element)\n\n elif element == self.BRACKET_LEFT:\n operators_stack.append(element)\n\n elif element == self.BRACKET_RIGHT:\n # searching for left bracket on stack, moving \"stack Top\" to postfix list\n while operators_stack and operators_stack[-1] != self.BRACKET_LEFT:\n postfix.append(operators_stack.pop())\n operators_stack.pop() # remove left bracket\n\n else: # numbers always goes into postfix list\n postfix.append(self._get_number_from_string(element))\n\n if operators_stack: # move others stack elements to postfix list\n postfix.extend(reversed(operators_stack))\n\n return postfix",
"def infix_to_postfix(self, exp):\n\n try:\n for i in exp:\n #if the character is an operand output it\n if self.is_operand(i):\n self.postfix.append(i)\n\n #if the character is '(' push it\n elif i is '(':\n self.push('(')\n\n elif i is ')':\n #if the character is ')\" pop until we encounter '(' in the stack\n while not self.isEmpty() and self.peek() is not '(':\n self.postfix.append(self.pop())\n if not self.isEmpty() and self.peek() is not '(':\n return -1\n else:\n self.pop()\n\n #if an operator is encountered\n else:\n while not self.isEmpty() and self.peek() is not '(' and self.not_greater(i):\n self.postfix.append(self.pop())\n self.push(i)\n while not self.isEmpty():\n self.postfix.append(self.pop())\n\n return ''.join(self.postfix)\n\n except Exception as e:\n print(\"Error occurred while performing infix to postfix conversion :\", e)\n traceback.print_exc()\n return -1",
"def infix_to_postfix(infix:str) -> str:\n stack = deque()\n precedence = {'+':1, '-':1,\n '*':2, '/':2,\n '^':3, '(':-9\n }\n output = \"\"\n for ch in infix:\n if ch not in {'+', '-', '*', '/', '^', '(', ')'}:\n output += ch\n elif ch == '(':\n stack.append(ch)\n elif ch == ')':\n while len(stack) > 0 and\\\n stack[-1] != '(':\n output += stack.pop()\n stack.pop()\n else:\n while len(stack) > 0 and\\\n precedence[stack[-1]] >= precedence[ch]:\n output += stack.pop()\n stack.append(ch)\n while len(stack) > 0:\n output += stack.pop()\n return output",
"def evaluate_infix(string):\n return postfix(infix_to_postfix(string))",
"def post_fix(expr):\n if expr[:3] == \"8 4\":\n return 54\n elif expr[:3] == \"5 6\":\n return 32\n elif expr[:3] == \"1 1\":\n return 2\n \"\"\"normal solution\"\"\"\n lst = expr.split()\n stack = []\n for e in lst:\n if e in \"+-*/\":\n b = stack.pop()\n a = stack.pop()\n stack.append(str(eval(\"{}{}{}\".format(a, e, b))))\n else:\n stack.append(e)\n return round(float(stack.pop()))",
"def infix_to_postfix(input_str): # postfix requires that all operators proceed after the two operands that they work on\n\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n if input_str is None: raise ValueError\n # Split input string\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # Create output list, will be fed to postfix_eval() at end\n output_list = []\n # initialize stack large enough to contain all operators\n operator_stack = Stack(len(term_list)//3+1)\n for term in term_list:\n # check for operand, if present append to output list\n if operand_present(term) is True:\n output_list.append(term)\n # check for operator\n elif operator_present(term) or term == '(' or term == ')':\n #if operand_stack.size()<2: \n # raise PostfixFormatException(\"Insufficient operands\")\n # Check for open parentheses\n if term == '(': operator_stack.push(term)\n # Check for closing parentheses, pop stack until open parentheses found\n elif term == ')':\n while 1:\n token = operator_stack.pop()\n if token != '(': \n output_list.append(token)\n else: break\n # Otherwise push to stack but pop any higher/equal order operators\n else:\n sort_operators(term, operator_stack, output_list)\n #print(operator_stack.peek())\n #else: raise PostfixFormatException(\"Invalid token\")\n #if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str",
"def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix",
"def infix_to_postfix(string):\n \n # Validate and tokenize the string\n tokens = validate(string)\n \n # Initialize the stack\n s = Stack()\n\n # Ready the final postfix expression\n postfix = ''\n \n # List of operators that have to be handled\n operators = ['+', '-', '*', '/', '^', 'sqrt', 'u-', '(', ')']\n \n # Iterate through tokens\n for token in tokens:\n if token in operators:\n if token in ['sqrt', 'u-']:\n # Square root and unary minus have the highest precendence. So\n # they get pushed on to the stack immediately\n s.push(token)\n elif token == '^':\n top = s.peek()\n while top in ['sqrt', 'u-']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['*', '/']:\n # Multiplication and division have the same precedence. Order\n # is determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['+', '-']:\n # Addition and subtraction have the same precedence. Order is\n # determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^', '*', '/']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token == '(':\n s.push(token)\n elif token == ')':\n top = s.peek()\n while top != '(':\n postfix += s.pop() + ' '\n top = s.peek()\n s.pop()\n else: # Token is a number or variable\n postfix += token + ' '\n\n # Pop out any more operators that might be sitting on the stack\n while(len(s)):\n postfix += s.pop() + ' '\n\n # Get rid of trailing whitespace and print\n postfix = postfix.strip()\n return postfix",
"def infix_to_postfix(string_input):\n stack_ops = []\n output = []\n value = \"\"\n\n for item in string_input:\n # item = operator\n if item in ops_prec.keys():\n value = value_to_output(value, output)\n\n # pop elements while they have lower precedence\n while (stack_ops\n and stack_ops[-1] in ops_prec.keys()\n and ops_prec[item] <= ops_prec[stack_ops[-1]]):\n output.append(stack_ops.pop())\n # else put item on stack\n stack_ops.append(item)\n\n # subexpression, delay precedence\n elif item == '(':\n value = value_to_output(value, output)\n\n stack_ops.append(item)\n elif item == ')':\n value = value_to_output(value, output)\n\n # flush output until ( is reached on stack\n while (stack_ops and stack_ops[-1] != '('):\n output.append(stack_ops.pop())\n # remove '('\n stack_ops.pop()\n\n # value = operand\n else:\n # concatenation of value for multidigit ones\n value += item\n # output.append(item) # this would be for one digit\n\n # flush stack to output\n value = value_to_output(value, output)\n\n while stack_ops:\n output.append(stack_ops.pop())\n\n return output",
"def eval_postfix(s):\n stack = Stack()\n \n s = s.split()\n for i in s:\n \tif operator(i) == False:\n \t\tstack.push(int(i))\n \telse:\n \t\tb = stack.pop()\n \t\ta = stack.pop()\n \t\tresult = evaluate(a, i, b)\n \t\tstack.push(result)\n return stack.pop()",
"def toPostfix(infix):\n output = \"\" # Output stack - the numbers in our expression\n operators = \"\" # Operator stack (using string for ease but could be a list)\n precedence = {\"*\": 100, \"/\": 90, \"+\": 80, \"-\": 70, \"(\": 60, \")\": 50} # Operator precedence dictionary - operator characters mapped to an arbitrary numeric value representing their precedence (BOMDAS)\n \n #Loop through characters\n for c in infix:\n #If c is a number\n if (c.isdigit()):\n output += c\n #Else if c is a function - ignoring these for now\n #Else if c is an operator - + - * / might account for x and division ASCII symbol later\n elif c in {\"+\", \"-\", \"*\", \"/\"}:\n # While there is still an operator left at the top of the stack\n # AND the operator at the top of the stack has greater precedence\n # OR the operator at the top of the stack has equal precedence and the token is left associative (don't know what this means, ignoring for now)\n # AND that operator is not a left parenthesis '('\n # Note: \\ tells python that a statement will continue on to the next line\n while len(operators) > 0 and operators[-1] != '(' and precedence[operators[-1]] > precedence[c]:\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # Push it onto the operator stack\n operators += c\n # Else if token is a left parenthesis (\n elif c == \"(\":\n # Push c to operator stack\n operators += c\n elif c == \")\":\n while operators[-1] != \"(\":\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # If there is a left bracket at the top of the stack, remove it\n if operators[-1] == '(':\n # Pop the operator from the operator stack and discard it\n operators = operators[:-1]\n # if there is a function token at the top of the operator stack... (Ignoring this for now)\n \n # If there are any operators left in the stack, append to output\n while len(operators) > 0:\n # Push operator from top of stack to output\n output += operators[-1]\n # Remove top operator from stack\n operators = operators[:-1]\n return output",
"def infixToRPN(expression):\n stack = Stack()\n RPNList = []\n tokens = expression.split()\n spaces = True\n\n # If no spaces in expression then push each char in a tokens list\n if len(tokens) == 1:\n spaces = False\n tokens = [char for char in expression]\n\n for token in tokens:\n if token in alphabet or token in numbers:\n RPNList.append(token)\n elif token == '(':\n stack.push(token)\n elif token == ')':\n top = stack.pop()\n while top != '(':\n RPNList.append(top)\n top = stack.pop()\n else:\n while (not stack.isEmpty()) and (precedence[stack.peek()] >= precedence[token]):\n RPNList.append(stack.pop())\n stack.push(token)\n\n while not stack.isEmpty():\n RPNList.append(stack.pop())\n\n if spaces:\n return \" \".join(RPNList)\n else:\n return \"\".join(RPNList)",
"def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr",
"def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str",
"def infixToPostfix(inFixStr):\n postFixList = []\n s = Stack()\n chList = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n prec = {\"(\": 0, \"+\": 1, \"-\": 1, \"*\": 2, \"/\": 2} # operator precedence\n\n tok = inFixStr.split(\" \")\n for ch in tok: # ch can be (,), operand, operator\n if ch in chList: # the easy case when token is an operand\n postFixList.append(ch)\n elif ch == \"(\": # easy case of (\n s.push(ch)\n elif ch == \")\": # keep popping and appending until (\n top = s.pop()\n while top != \"(\":\n postFixList.append(top)\n top = s.pop() # pop next\n else: # now we are at opeartors\n # pop higher order operators first\n while not s.isEmpty() and prec[s.peek()] > prec[ch]:\n postFixList.append(s.pop())\n s.push(ch) # push current opeartor\n\n while not s.isEmpty(): # pop everything else in the stack\n postFixList.append(s.pop())\n return \" \".join(postFixList)",
"def expression_tree(postfix:str) -> Node:\n stack = deque()\n for ch in postfix:\n if ch not in {'+', '-', '*', '/', '^'}:\n stack.append(Node(ch))\n else:\n middle_node = Node(ch)\n right_node = stack.pop()\n left_node = stack.pop()\n middle_node ._right = right_node\n middle_node._left = left_node\n stack.append(middle_node)\n return stack.pop()",
"def infix_to_postfix(s):\n result = \"\" # output string\n op = Stack() # operator stack\n i = 0 # index to 's'\n while i < len(s):\n if s[i] in \"0123456789\":\n while i < len(s) and s[i] in \"0123456789\":\n result += s[i]\n i += 1\n result += \" \"\n continue\n if s[i] == '(':\n op.push(s[i])\n elif s[i] == ')':\n top = op.pop()\n while top != '(':\n result += top + \" \"\n top = op.pop()\n else: # s[i] is +,-,*,/\n while not op.is_empty() and not higher_prec(s[i], op.peek()):\n result += op.pop() + \" \"\n op.push(s[i])\n i += 1\n while not op.is_empty():\n result += op.pop() + \" \"\n return result",
"def postfix_eval(input_str):\n\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n if input_str is None: raise PostfixFormatException\n # create list of operands and operators\n term_list = input_str.split()\n # initialize stack large enough to contain all operands\n operand_stack = Stack(2*len(term_list)//3+1)\n # iterate over term_list\n for term in term_list:\n # check for operatorm, evaluate operators on A & B if True\n if operator_present(term) is True:\n if operand_stack.size()<2: \n raise PostfixFormatException(\"Insufficient operands\")\n B = operand_stack.pop()\n A = operand_stack.pop()\n operand_stack.push(\n calculate(\n A, # A\n B, # B\n term) # operator\n )\n # check for operand, push to stack if True\n elif operand_present(term) is True:\n operand_stack.push(term)\n else: raise PostfixFormatException(\"Invalid token\")\n if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n return operand_stack.pop()",
"def infix_to_postfix(string):\n tokenlist = string.split()\n output = []\n stack = create_stack()\n for token in tokenlist:\n if token == '(':\n stack.push(token)\n elif token == ')':\n toptoken = stack.pop()\n while toptoken != '(':\n output.append(toptoken)\n toptoken = stack.pop()\n elif token == '*' or token == '/':\n toptoken = stack.top()\n while toptoken in ['*','/']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n elif token == '+' or token == '-':\n toptoken = stack.top()\n while toptoken in ['*','/','+','-']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n else:\n output.append(token)\n while stack.length() > 0:\n output.append(stack.pop())\n space= ' '\n newstr = space.join(output)\n return newstr",
"def evaluatePostfixExp(self, postfixExpr):\n\n operandStack = []\n tokenList = postfixExpr.split(\" \")\n\n for token in tokenList:\n if self.isOperand(token):\n if \".\" in token:\n token = float(token)\n else:\n token = int(token)\n operandStack.append(token)\n else: # token is an operator\n operand2 = operandStack.pop()\n operand1 = operandStack.pop()\n try:\n result = self.applyOperator(operand1, operand2, token)\n except Exception as error:\n print(\"Invalid input. Please enter a valid arithmetic expression.\") # Most likely division by\n # zero error.\n return\n operandStack.append(result)\n return operandStack.pop()",
"def infix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n stack = Stack(30)\n if input_str == '':\n return ''\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n order = {}\n order[\"+\"] = 1\n order[\"-\"] = 1\n order[\"*\"] = 2\n order[\"/\"] = 2\n order[\"**\"] = 3\n order[\"<<\"] = 4\n order[\">>\"] = 4\n pfix_str = ''\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit() and pfix_str == \"\":\n pfix_str = pfix_str + i\n elif i in op_list:\n if not stack.is_empty():\n p = stack.peek()\n while 0 < stack.size():\n p = stack.peek()\n if p == \"(\":\n break\n if i == \"**\":\n if order[p] <= order[i]:\n break\n else:\n p1 = stack.pop()\n pfix_str = pfix_str + \" \" + p1\n elif order[p] < order[i]:\n break\n else:\n p2 = stack.pop()\n pfix_str = pfix_str + \" \" + p2\n stack.push(i)\n elif i == \"(\":\n stack.push(i)\n elif new_val.isdigit():\n pfix_str = pfix_str + \" \" + i\n elif i == \")\":\n p = stack.peek()\n while p != \"(\":\n pfix_str = pfix_str + \" \" + stack.pop()\n if not stack.is_empty():\n p = stack.peek()\n stack.pop()\n while not stack.is_empty():\n pop3 = stack.pop()\n pfix_str = pfix_str + \" \" + pop3\n return pfix_str",
"def eval_postfix(s):\n stack = Stack()\n for x in s.split(): # rozděl 's' dle mezer\n if x == '+':\n stack.push(stack.pop() + stack.pop())\n elif x == '-':\n stack.push(-stack.pop() + stack.pop())\n elif x == '*':\n stack.push(stack.pop() * stack.pop())\n elif x == '/':\n second = stack.pop()\n stack.push(stack.pop() / second)\n else:\n stack.push(float(x))\n return stack.pop()",
"def infixToPostfix(infix):\n postfix = []\n stackArr = []\n scanOperand = False\n hasIntegral = False\n hasDecimal = False\n currentOperand = 0\n decimal = 1\n for ch in infix:\n currentPrio = charPrio(ch)\n if currentPrio < 0: # current ele is operand\n if not (ch.isdigit() or ch == '.'):\n inputError()\n return\n if not scanOperand:\n scanOperand = True\n if ch == '.':\n if not hasIntegral:\n formatError()\n return\n hasDecimal = True\n continue\n if hasDecimal:\n if ch == '.':\n formatError()\n return\n currentOperand = currentOperand + 0.1 ** decimal * int(ch)\n decimal += 1\n else:\n if not hasIntegral:\n hasIntegral = True\n currentOperand = currentOperand * 10 + int(ch)\n elif currentPrio == 0:\n # none operation\n pass\n else:\n # and operand into postfix expression\n if scanOperand:\n scanOperand = False\n hasDecimal = False\n hasIntegral = False\n decimal = 1\n postfix.append(currentOperand)\n currentOperand = 0\n # handle operator\n if isEmpty(stackArr):\n push(stackArr, ch) # push into stack\n elif currentPrio > prio[peek(stackArr)]:\n push(stackArr, ch) # push into stack\n elif currentPrio == 1: # ')'\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)]:\n ele = pop(stackArr)\n if ele != '(':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n else:\n break\n else:\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)] and prio[peek(stackArr)] < 5 :\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n push(stackArr, ch) # push into stack\n if scanOperand:\n postfix.append(currentOperand)\n while not isEmpty(stackArr):\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n return postfix",
"def toPostfix (self,infix):\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix"
]
| [
"0.81900203",
"0.7801753",
"0.76920533",
"0.7429643",
"0.7404279",
"0.7347274",
"0.7156811",
"0.7048756",
"0.6956311",
"0.6874034",
"0.68503463",
"0.67862564",
"0.67233485",
"0.6722634",
"0.6648669",
"0.6607104",
"0.65920854",
"0.6575015",
"0.65612596",
"0.65422934",
"0.6498457",
"0.64755744",
"0.64750266",
"0.6450152",
"0.64474684",
"0.64419186",
"0.6420652",
"0.64007676",
"0.6388358",
"0.63871604"
]
| 0.7979371 | 1 |
Determines whether or not a project exists at the specified path | def project_exists(response: 'environ.Response', path: str) -> bool:
if os.path.exists(path):
return True
response.fail(
code='PROJECT_NOT_FOUND',
message='The project path does not exist',
path=path
).console(
"""
[ERROR]: Unable to open project. The specified path does not exist:
{path}
""".format(path=path)
)
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exists(repo_path):\n\n if not ProjectRepo.existing_git_repository(repo_path):\n cprint(' - Project is missing', 'red')",
"def is_project_created(path):\n project_id = None\n try:\n with open(\"%s%sproject\"\n % (path, os.sep)) as project_file:\n project_id = project_file.readline().strip()\n try:\n project_id = bigml.api.get_project_id(\n project_id)\n return True, project_id\n except ValueError:\n return False, None\n except IOError:\n return False, None",
"def check_project_exists(self, project):\n session = self.session_factory()\n exists = session.query(PipelineRun).filter_by(project=project).first()\n session.close()\n if exists:\n return True\n return False",
"def add_project_path() -> bool:\n project_path = Path('.')\n cur_path = Path(project_path.absolute())\n for parent in cur_path.parents:\n if 'Pipfile' in [obj.name for obj in parent.glob('*')]:\n project_path = Path(parent.absolute())\n break\n\n src_path = project_path.joinpath('src')\n\n if project_path == '.':\n LOGGER.warning(\"Can't find project_path\")\n return False\n\n if src_path not in sys.path:\n sys.path.append(str(src_path.absolute()))\n return project_path",
"def Exists(self, path: str) -> bool:\n ...",
"def path_exists(path):\r\n return os.path.exists(path)",
"def path_exists(path):\n return os.path.exists(path)",
"def BuildExists(buildname):\n for platform in platforms:\n if not os.path.exists(builds_basedir+'/'+platform+'/'+buildname):\n return False\n return True",
"def has_pyproject_toml() -> bool:\n return pathlib.Path(_find_root(), \"pyproject.toml\").exists()",
"def dir_exists(self, path):\n if not path:\n return True\n return False",
"def exists(path: str) -> bool:\n pass",
"def project_with_revision_exists(project_name, project_revision, working_dir):\n try:\n with open(working_dir + project_name + \".qpf\", \"r\") as project_file:\n for line in project_file:\n if f\"PROJECT_REVISION = \\\"{project_revision}\\\"\" in line:\n return True\n return False\n except FileNotFoundError:\n return False",
"def exists(path):\n return os.path.exists(path)",
"def check_existing_project(\n self, description: str, project_name: str, project_url: str\n ) -> bool:\n\n checks = [{\"project_name\": project_name}, {\"description\": description}]\n if project_url != \"\":\n checks.append({\"project_url\": project_url})\n\n result = self.db.project.find_one({\"$or\": checks})\n\n if result:\n raise ExistingProjectError(\"Project Exists\")",
"def exists(self, path: str) -> bool:\n pass",
"def is_project(self, project):\n return self._projects_lookup.get(project, False)",
"def test_no_such_project(self):\n def mock_send_request(*args, **kwargs):\n return Response().update(\n remote_source_directory=directory\n ).response\n\n directory = os.path.dirname(os.path.realpath(__file__))\n response = support.run_remote_command(\n command='sync',\n mock_send_request=mock_send_request\n )\n self.assert_has_error_code(response, 'NO_PROJECT')",
"def exists(self, path):",
"def checkPath(filename, projectSource):\n filePath = os.path.join(projectSource, filename)\n if os.path.exists(filePath):\n pass\n else:\n sys.stderr.write(\"Error: \" + filePath + \" not found\")\n sys.exit(1)\n return filePath",
"def path_exists(path):\n if path.startswith('gs://'):\n command = 'gsutil ls {path}'.format(path=path)\n elif path.startswith('s3://'):\n command = 'awscli s3 ls {path}'.format(path=path)\n else:\n return os.path.exists(path)\n\n return run_quick(command, echo=False).returncode == 0",
"def check_build_exists(self):\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\"Build Directory Already Exist, please run update_specs_build\")",
"def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\n \"/api/v2/projects/queries/999/similar-projects/\",\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 404)",
"def exists(self, path: PathLike):",
"def exists(path):\n return get_instance(path).exists(path)",
"def exists(env):\n return targz.exists(env)",
"def __check_exists(name, path, fatal=True):\n if not os.path.exists(path):\n if fatal:\n raise SystemExit(\"%s '%s' does not exist\" % (name, path))\n return False\n return True",
"def check_path(path):\n import os\n if not os.path.exists(path):\n print(\"Path does not exist\")\n print(\"\")\n sys.exit()",
"def file_exists(cls, path: Path) -> bool:\n return path.exists()",
"def exists(self):\r\n return os.path.exists(self.full_path)",
"def exists(path: str) -> bool:\n return _fs().exists(path)"
]
| [
"0.781922",
"0.77002376",
"0.6944617",
"0.6784362",
"0.66010505",
"0.65764564",
"0.65411824",
"0.6539585",
"0.65192175",
"0.64994127",
"0.6494861",
"0.6490263",
"0.6452931",
"0.64370346",
"0.6418735",
"0.6405294",
"0.63952315",
"0.63933635",
"0.63204074",
"0.62900877",
"0.6289665",
"0.62822515",
"0.62683624",
"0.62678206",
"0.6255129",
"0.6216438",
"0.6173905",
"0.6157672",
"0.61414105",
"0.61239713"
]
| 0.85413456 | 0 |
Convert phrase to a vector by aggregating it's word embeddings. Just take an average of vectors for all tokens in the phrase with some weights. | def get_phrase_embedding(phrase):
vector = np.zeros([model.vector_size], dtype='float32')
# 1. lowercase phrase
phrase = phrase.lower()
# 2. tokenize phrase
phrase_tokens = tokenizer.tokenize(phrase)
# 3. average word vectors for all words in tokenized phrase, skip words that are not in model's vocabulary
divisor = 0
for word in phrase_tokens:
if word in model.vocab:
divisor += 1
vector = vector + model.get_vector(word)
if divisor != 0: vector /= divisor
return vector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sentence_average_w2v(sent, word_to_vec, embedding_dim):\n sum_vec = np.zeros((embedding_dim,))\n known_tokens = 0\n for token in sent.text:\n if (token in word_to_vec.dict):\n known_tokens += 1\n sum_vec += word_to_vec[token]\n if (known_tokens != 0):\n return sum_vec / known_tokens\n else:\n return sum_vec",
"def compute_avg_w2v_vector(w2v_dict, text_nlp_proc):\n SIZE = 50 # size of the w2v dimension\n list_of_word_vectors = [w2v_dict[w] for w in text_nlp_proc if w in w2v_dict.vocab.keys()]\n if len(list_of_word_vectors) == 0:\n result = [0.0]*SIZE\n else:\n result = np.sum(list_of_word_vectors, axis=0) / len(list_of_word_vectors)\n return result",
"def avg_sentence_vector(words, model, num_features, index2word_set):\n featureVec = np.zeros((num_features,), dtype=\"float32\")\n nwords = 0\n\n for word in words:\n if word in index2word_set:\n nwords = nwords+1\n featureVec = np.add(featureVec, model.wv[word])\n # featureVec = np.add(featureVec, model.wv.__getitem__(word))\n\n if nwords>0:\n featureVec = np.divide(featureVec, nwords)\n return featureVec",
"def word_average(self, sent):\n\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word) *\n self.word_idf_weight[word]) # idf weighted\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def word_averaging(wv, words):\n all_words, mean = set(), []\n \n for word in words:\n if isinstance(word, np.ndarray):\n mean.append(word)\n elif word in wv.vocab:\n mean.append(wv.vectors_norm[wv.vocab[word].index])\n all_words.add(wv.vocab[word].index)\n\n if not mean:\n logging.warning(\"Cannot compute similarity with no input: %s\", words)\n # Remove these examples in pre-processing...\n return np.zeros(50,)\n\n mean = gensim.matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32)\n \n return mean",
"def word_average(self, sent):\n mean = []\n for word in sent:\n if word in self.word_model.wv.vocab:\n mean.append(self.word_model.wv.get_vector(word))\n\n if not mean: # empty words\n # If a text is empty, return a vector of zeros.\n logging.warning(\n \"cannot compute average owing to no vector for {}\".format(sent))\n return np.zeros(self.vector_size)\n else:\n mean = np.array(mean).mean(axis=0)\n return mean",
"def sentence_to_avg(sentence, word_to_vec_map):\n # Get a valid word contained in the word_to_vec_map. \n any_word = list(word_to_vec_map.keys())[0]\n \n ### START CODE HERE ###\n # Step 1: Split sentence into list of lower case words (≈ 1 line)\n words = sentence.lower().split()\n\n # Initialize the average word vector, should have the same shape as your word vectors.\n avg = np.zeros(word_to_vec_map[any_word].shape)\n \n # Initialize count to 0\n count = 0\n \n # Step 2: average the word vectors. You can loop over the words in the list \"words\".\n for w in words:\n # Check that word exists in word_to_vec_map\n if w in word_to_vec_map:\n avg += word_to_vec_map[w]\n # Increment count\n count +=1\n \n if count > 0:\n # Get the average. But only if count > 0\n avg = avg / count\n \n ### END CODE HERE ###\n \n return avg",
"def compute_user_input_embedding(txt, model):\r\n embeddings = []\r\n tokens = txt.split(\" \")\r\n for word in tokens:\r\n embeddings.append(model.wv[word])\r\n sentence_embedding = compute_average(embeddings)\r\n return sentence_embedding",
"def word_averaging_list(wv, text_list):\n return np.vstack([word_averaging(wv, review) for review in text_list])",
"def create_embedding_matrix(self):\n self.id2word = dict([(self.vocab[word]['id'], word) for word in self.vocab])\n vocab_size = len(self.vocab)\n result = np.zeros((vocab_size, self.embed_dim))\n unknown_token_set = set()\n\n found_words = 0\n avg = np.zeros(self.embed_dim)\n for _ in range(1, vocab_size): # skip PAD embedding (initialize as zero embedding)\n try:\n result[_] = self.pretrained_embedding[self.id2word[_]]\n avg += result[_]\n found_words += 1\n except:\n unknown_token_set.add(_)\n\n avg /= found_words\n for _ in unknown_token_set:\n result[_] = avg\n self.embedding = result\n return found_words, len(self.id2word)",
"def doc2vec(self, text: str) -> np.array:\n # tfidf_matrix = self.tfidf.transform([text])\n # vectors = []\n # for token in self.tokenize(text):\n # if token in self.word2vec and token in self.feature_names:\n # tfidf_score = tfidf_matrix[0, self.feature_names.index(token)]\n # vectors.append(self.word2vec[token] * tfidf_score)\n vectors = [self.word2vec[token] for token in self.tokenize(text) if token in self.word2vec]\n if not vectors:\n return np.zeros(300)\n return np.mean(vectors, axis=0)",
"def question_to_vec(question, embeddings, dim):\r\n\r\n words = question.split()\r\n\r\n counter = 0\r\n res = np.zeros(dim)\r\n for word in words:\r\n if word in embeddings:\r\n res += np.array(embeddings[word])\r\n counter += 1\r\n if counter!=0:\r\n return res/counter # mean of all word embeddings\r\n else:\r\n return res # vector of zeros\r",
"def vectorize_sentence(sentence, model):\n final_vec = np.zeros(300, )\n count = 0\n for word in sentence:\n count += 1\n dummy_vec = np.zeros(300, )\n try:\n temp_vec = get_vector(word, model)\n final_vec += temp_vec\n except:\n final_vec += dummy_vec\n return final_vec / count",
"def sentence_to_vec(s, embeddings_dict, stop_words, tokenizer):\n \n words = str(s).lower()\n words = tokenizer(words)\n # remove stop words, if any, and only alpha-numeric tokens\n words = [w for w in words if not w in stop_words and w.isalpha()]\n \n embeddings = []\n for w in words:\n if w in embeddings_dict:\n embeddings.append(embeddings_dict[w])\n \n # dimensions = 300\n if len(embeddings)==0:\n return np.zeros(300)\n\n # list of embeddings to array\n embeddings = np.array(embeddings)\n\n # normalized vector\n sum = embeddings.sum(axis=0)\n return sum/np.sqrt((sum**2).sum())",
"def w2v_aggregation_letters(X, length_vector=100):\n global w2v_model_3gram\n if w2v_model_3gram == None:\n w2v_model_3gram = gensim.models.KeyedVectors.load_word2vec_format(os.path.join(os.environ['NOBULL_PATH'], 'w2v_char.vec'))\n X_raw = []\n for x in X:\n x_letter = cleanText_letters(x)\n X_raw.append(x_letter)\n\n\n num_row = len(X_raw)\n\n max_matrix = np.zeros(shape=(num_row, length_vector))\n\n average_matrix = np.zeros(shape=(num_row, length_vector))\n\n for row in range(num_row):\n \n temp_text = X_raw[row] \n temp_vector = temp_text.split()\n \n unique_vector = list(set(temp_vector))\n num_index = len(unique_vector)\n \n temp_matrix = np.zeros(shape=(num_index, length_vector))\n \n j = 0\n for word in unique_vector:\n \n temp_matrix[j] = get_vector(word, w2v_model_3gram, 100)\n j += 1\n\n max_matrix[row] = np.maximum.reduce(temp_matrix)\n average_matrix[row] = np.mean(temp_matrix, axis=0)\n \n result = np.concatenate((average_matrix, max_matrix), axis=1)\n result = sparse.csr_matrix(result)\n \n header = []\n \n for i in range(length_vector):\n temp_string = \"neww2v_average_\" + str(i) + \"-th\"\n header.append(temp_string)\n \n for i in range(length_vector):\n temp_string = \"neww2v_maximum_\" + str(i) + \"-th\"\n header.append(temp_string)\n\n return result, header",
"def average_one_hots(sent, word_to_ind):\n known_words = 0\n size = len(word_to_ind.keys())\n sum_vec = np.zeros((size,))\n for token in sent.text: #going over all tokens and summing their embeddings\n if (token in word_to_ind):\n sum_vec += get_one_hot(size, word_to_ind[token])\n known_words += 1\n if (known_words != 0):\n return sum_vec / known_words\n else:\n return sum_vec",
"def aggregate_embeddings(list_of_embeddings):\n\n return np.mean(list_of_embeddings, axis=0)",
"def generate_avg_vector(self, data):\r\n doc=nlp(data)\r\n data_vector = [token.vector for token in doc]\r\n mean_vector = np.mean(data_vector, axis=0)\r\n return mean_vector",
"def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec",
"def _get_mean_embedding(self, words):\n\n # ensure the size still matches if it's loaded from pretrained word vectors\n size = self.size\n if self.w2v is not None:\n size = next(iter(self.w2v_.values())).size\n\n zero = np.zeros(size)\n if self.tfidf:\n embedding = np.mean([self.w2v_[w] * self.w2idf_[w]\n if w in self.w2v_ else zero for w in words], axis = 0)\n else:\n embedding = np.mean([self.w2v_.get(w, zero) for w in words], axis = 0)\n\n return embedding",
"def _words_to_vec(self, sentence):\n return torch.FloatTensor([self._use_embeddings(word) for word in sentence])",
"def tweetToVect(tweet, dicoGlove): \n #return model.infer_vector(tweet) \n \n gArray, wSize = w.wordsToGlove(tweet.split(), dicoGlove) \n meanMatrixOverview = w.meanWords(gArray, wSize)\n \n return meanMatrixOverview",
"def word_average_list(self, docs):\n return np.vstack([self.word_average(sent) for sent in docs])",
"def word_average_list(self, docs):\n return np.vstack([self.word_average(sent) for sent in docs])",
"def _aggregate_text_embedding(self, token_ids, embeddings):\n if self._hparams['obj_text_aggregation'] == 'max':\n # Find valid tokens (not PADDING/EOS/UNK/START).\n valid_token_mask = tf.greater_equal(token_ids, 4)\n # Use large negative bias for invalid tokens.\n invalid_token_bias = tf.cast(\n tf.logical_not(valid_token_mask), tf.float32) * -1e9\n # [batch, node_num, word_num, hidden_size]\n embeddings = embeddings + tf.expand_dims(invalid_token_bias, axis=-1)\n # Max value for each dimension, [batch, node_num, hidden_size].\n embeddings = tf.reduce_max(embeddings, axis=-2)\n # For objects with no text, use 0.\n valid_object_mask = tf.cast(\n tf.reduce_any(valid_token_mask, axis=-1), tf.float32)\n embeddings = embeddings * tf.expand_dims(valid_object_mask, axis=-1)\n\n elif self._hparams['obj_text_aggregation'] == 'sum':\n # [batch, step, #max_obj, #max_token] 0 for padded tokens\n real_objects = tf.cast(tf.greater_equal(token_ids, 4), tf.float32)\n # [batch, step, #max_obj, hidden] 0s for padded objects\n embeddings = tf.reduce_sum(\n input_tensor=embeddings * tf.expand_dims(real_objects, 3), axis=-2)\n\n else:\n raise ValueError('Unrecognized token aggregation %s' %\n (self._hparams['obj_text_aggregation']))\n return embeddings",
"def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec",
"def text2vec(doc_tok, model, dim=300):\n doc_embedding = np.zeros(dim)\n valid_words = 0\n for word in doc_tok:\n if word in model:\n valid_words += 1\n doc_embedding += model.query(word)\n else:\n continue\n if valid_words > 0:\n return doc_embedding / valid_words\n else:\n return doc_embedding",
"def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec",
"def gen_embedding(text, model, tokenizer):\n ### Tokenize the texts\n encoded_input = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors='pt')\n \n ### Encode the tokenized data with model\n with torch.no_grad():\n model_output = model(**encoded_input)\n \n ### Pool the outputs into a single vector\n sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])\n return sentence_embeddings",
"def vectorize(self, sentence, embeddings_dict):\n processed_sentence = self.preprocess(sentence)\n\n matrix = []\n for token in processed_sentence:\n if token in embeddings_dict:\n matrix.insert(0, embeddings_dict[token])\n return numpy.matrix(matrix)"
]
| [
"0.7514688",
"0.72182834",
"0.7089494",
"0.702476",
"0.69361097",
"0.6867072",
"0.6737069",
"0.6620378",
"0.6577739",
"0.6555899",
"0.6496128",
"0.6492713",
"0.63574034",
"0.6309373",
"0.6279589",
"0.62726533",
"0.62423867",
"0.62186897",
"0.62141794",
"0.62009865",
"0.618895",
"0.6177033",
"0.61755776",
"0.61755776",
"0.617506",
"0.616639",
"0.61202675",
"0.6114048",
"0.6112259",
"0.6086305"
]
| 0.76256436 | 0 |
.iso639 | .iso639 Search ISO 6391, 2 and 3 for a language code. | def iso639(phenny, input):
response = ""
thisCode = str(input.group(1)).lower()
if thisCode == "None":
thisCode = random.choice(list(phenny.iso_data.keys()))
#ISOcodes[random.randint(0,len(ISOcodes)-1)]
#random.choice(ISOcodes)
else:
if len(thisCode) > 3: # so that we don't get e.g. 'a'
for oneCode, oneLang in phenny.iso_data.items():
if thisCode in flatten(oneLang.lower()):
if response != "":
response += ", " + template.format(oneCode, oneLang)
else:
response = template.format(oneCode, oneLang)
#phenny.say("%s %s %s" % (oneCode, oneLang.lower(), thisCode.lower()))
elif thisCode in phenny.iso_data:
altCode = None
if len(thisCode) == 2 and thisCode in phenny.iso_conversion_data:
altCode = phenny.iso_conversion_data[thisCode]
elif len(thisCode) == 3:
for iso1, iso3 in phenny.iso_conversion_data.items():
if thisCode == iso3:
altCode = iso1
break
response = template.format(thisCode + (", " + altCode if altCode else ""), phenny.iso_data[thisCode])
if response == "":
response = "Sorry, %s not found" % thisCode
phenny.say(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_language_name(iso_code):\n if iso_code not in LANGUAGES_BY_CODE:\n try:\n lang = iso639.languages.get(part3=iso_code)\n except KeyError:\n lang = None\n\n if lang:\n # we only show up to the first semi or paren\n lang = re.split(r\";|\\(\", lang.name)[0].strip()\n\n LANGUAGES_BY_CODE[iso_code] = lang\n\n return LANGUAGES_BY_CODE[iso_code]",
"def _get_iso_code(cls, language):\n iso = cls._SPECIAL_ISO_CODES.get(language, language[:3])\n return iso if language in cls._SUPPORTED_LANGUAGES else None",
"def country(alpha_2_code: str) -> None:",
"def get_iso639():\n response = get(ISO639URL, stream=True)\n response.encoding = 'UTF-8'\n\n iso639iter = iterdecode(response.iter_lines(), 'utf-8')\n\n return DictReader(iso639iter, delimiter=\"\\t\")",
"def country_codes(country):\n countryObject = None\n try:\n countryObject = pycountry.countries.search_fuzzy(country)\n return countryObject[0].alpha_2\n except LookupError:\n pass\n try:\n splittedCountry = country.split(',')[0]\n countryObject = pycountry.countries.search_fuzzy(splittedCountry)\n return countryObject[0].alpha_2\n except LookupError:\n return 'No Code'",
"def is_valid_language_code(code):\n try:\n iso639.languages.get(part3=code)\n return True\n except KeyError:\n return False",
"def language_iso_code(self, language_iso_code):\n\n self._language_iso_code = language_iso_code",
"def language_name(value):\n return pycountry.languages.get(alpha_2=value)",
"def language_to_isocode(lang):\n lang = lang.lower()\n try:\n return pycountry.languages.get(alpha_3=lang).alpha_2\n except (KeyError, AttributeError):\n exceptions = {\n 'chi': 'zh',\n 'cze': 'cs',\n 'dut': 'nl',\n 'eng-fre': 'en-fr',\n 'fre': 'fr',\n 'ger': 'de',\n 'gre': 'el',\n 'sil': 'silent',\n 'silent': 'silent',\n 'sl': 'sl',\n 'sr': 'sr',\n }\n value = exceptions.get(lang)\n if value:\n return value\n try:\n return pycountry.languages.get(alpha_2=lang[0:2]).alpha_2\n except (KeyError, AttributeError):\n # FIXME log somewhere\n return 'silent'",
"def make_language(iso639row):\n return Language (name=iso639row['Ref_Name'], code=iso639row['Id'])",
"def validate_language(language):\n\n try:\n lang_code = language_dict[language]\n except KeyError:\n lang_code = None\n return lang_code",
"def test_languages(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.language:\n # The language code should be in ISO 639 format and consists of\n # two letters for ISO 639-1 languages and three letters otherwise.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.islower(), f'Line {i}: Language code should be '\n 'lower-case')\n if len(code) == 3:\n lang = pycountry.languages.get(alpha_3=code)\n self.assertTrue(lang, f'Failed to find language for code {code}')\n if hasattr(lang, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code `{lang.alpha_2}`'\n f' instead of `{lang.alpha_3}` for {lang.name}')\n else:\n lang = pycountry.languages.get(alpha_2=code)\n self.assertTrue(lang, f'Failed to find language for code {code}')",
"def language_code(self) -> str:\n return pulumi.get(self, \"language_code\")",
"def get_country_code(country_name):\n # worldmap_chart = pygal.maps.world.World()\n # for code, name in worldmap_chart:\n\n for code, name in i18n.COUNTRIES:\n\n # for code, name in COUNTRIES.items():\n if name == country_name:\n print(code)\n return code\n # If the country wasn't found, return None.\n return None",
"def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()",
"def get_language(lang_code) -> str:\n langs = defaultdict(lambda: \"en\", {\"ru\": \"ru\"})\n return langs[lang_code.split(\"-\")[0]] if lang_code else \"en\"",
"def country_code(self) -> str | None:\n pass",
"def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n elif country_name == 'Yemen, Rep.':\n return 'ye'\n elif country_name == 'Vietnam':\n return 'vn'\n elif country_name == 'Tanzania':\n return 'tz'\n elif country_name == 'Moldova':\n return 'md'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n elif country_name == 'Iran, Islamic Rep.':\n return 'ir'\n elif country_name == 'Hong Kong SAR':\n return 'hk'\n elif country_name == 'Congo, Dem. Rep.':\n return 'cd'\n elif country_name == 'Congo, Rep.':\n return 'cf'\n elif country_name == 'Macao SAR, China':\n return 'mo'\n elif country_name == 'Macedonia, FYR':\n return 'mk'\n elif country_name == 'Libya':\n return 'ly'\n elif country_name == 'Lao PDR':\n return 'la'\n elif country_name == 'Korea, Dem. Rep.':\n return 'kp'\n elif country_name == 'Korea, Rep.':\n return 'kr'\n elif country_name == 'Gambia':\n return 'gm'\n # If the country wasn't found, return None.\n return None",
"def __expandLanguage(self, language):\n\n # Priority Chain:\n # de_DE => de => C (default language) => code\n\n all = [language]\n if \"_\" in language:\n all.append(language[:language.index(\"_\")])\n all.append(\"C\")\n\n return all",
"def country() -> str:",
"def country_codes():\n\n iso_sel = [\n Freedom_short.iso_code,\n Freedom_short.year,\n Freedom_short.country,\n Freedom_short.region,\n Freedom_short.hf_score,\n Freedom_short.hf_rank,\n Freedom_short.hf_quartile,\n ]\n\n # Use Pandas to perform the sql query\n #Grab 2017 Data Only for Dropdown\n codes_stmt = db.session.query(*iso_sel).filter(Freedom_short.year == 2017).order_by(Freedom_short.iso_code).statement\n codes_df = pd.read_sql_query(codes_stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(codes_df[\"iso_code\"]))",
"def get_country_code(self):\n\n try:\n sub_div = next(sub_div for sub_div in pycountry.subdivisions if sub_div.name == self.location)\n country = next(country for country in pycountry.countries if country.alpha_2 == sub_div.country_code)\n return country.alpha_3\n except StopIteration as exc:\n print(\"Cannot find subdivision in\" + str(exc))\n return 'XXX'",
"def flag(countrycode: str) -> str:\r\n\r\n code = [c for c in countrycode.lower() if c in ASCII_LOWER]\r\n if len(code) == 2:\r\n # Regional indicator symbols\r\n return flag_regional_indicator(code)\r\n if len(code) > 2 and len(code) < 7:\r\n # Tag sequence\r\n return flag_tag_sequence(code)\r\n found = ''.join(code)\r\n raise ValueError(\r\n 'invalid countrycode, found %d (%r) in %r.' %\r\n (len(found), found, countrycode))",
"def get_country(self, country):\n if country == \"United Kingdom\": return \"en\"\n if country == \"Portugal\": return \"pt\"\n\n result = self.session.get(\"https://en.ogame.gameforge.com\")\n soup = BeautifulSoup(result.content, \"html.parser\")\n\n code_list = soup.find(\"ul\", {\"id\": \"mmoList1\"})\n countries = {}\n for tag in code_list.find_all(\"li\"):\n link = tag.find(\"a\")[\"href\"]\n name = tag.string.strip() # name of the country\n code = link.split(\".\")[0].replace(\"//\", \"\")\n countries[name] = code # save to the dict\n\n # check if input was ok\n if not country in countries.keys():\n self.crash(\"Country\", country, \"was not found on the list.\")\n if len(countries[country]) != 2:\n self.crash(\"Can't fetch code for country\", country)\n\n return countries[country]",
"def find(lang):\n try:\n return as_dict(pycountry.languages.lookup(lang))\n except LookupError:\n return {}",
"def read_publisher_qualified_isocode(self):\n self.CATEGORIES = kpi_from_db_config.CATEGORIES\n self.SELECTED_ISOCODE = kpi_from_db_config.SELECTED_ISOCODE\n self.ID_PUBLISHER_QUALIFIED_ISOCODE = kpi_from_db_config.ID_PUBLISHER_QUALIFIED_ISOCODE\n\n size_m = len(self.CATEGORIES)\n size_n = len(self.SELECTED_ISOCODE)\n list_id = self.ID_PUBLISHER_QUALIFIED_ISOCODE\n\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT %s\n ''', [list_id, size_m*size_n])\n\n list_result = [[0]*size_n for _ in range(size_m)]\n\n count_result = size_m*size_n - 1\n for doc in self.cursor:\n list_result[count_result%size_m][count_result/size_m] = doc[0]\n count_result -= 1\n\n for i in range(size_m):\n list_result[i][0:0] = [sum([list_result[i][j] for j in range(size_n)])]\n\n list_result[0:0] = [[0]*(size_n + 1)]\n for i in range(size_n + 1):\n list_result[0][i] = sum([list_result[j][i] for j in range(size_m + 1)])\n\n categories = ['--All--'] + self.CATEGORIES\n\n return list_result, categories",
"def get_country_code(country_name) :\n for code, name in COUNTRIES.items() :\n if name==country_name :\n return code\n # if the country wasn't found, return None\n return None",
"def get_for_language(self, language):\r\n assert isinstance(language, str)\r\n\r\n language = language.strip().lower()\r\n if language in self.__languages:\r\n code = self.__languages[language]\r\n return code\r\n return None",
"def country_flag(country):\n\tif not country:\n\t\treturn u''\n\tresult = Country.objects.filter(name__icontains=country)\n\tif result:\n\t\tc = result[0]\n\t\tiso = c.iso\n\t\tflag_location = iso_flag(iso)\n\t\treturn flag_location\n\treturn u''",
"def language_code(self):\n return self._language_code"
]
| [
"0.70005405",
"0.66463333",
"0.6645652",
"0.6610556",
"0.6142019",
"0.60717714",
"0.6025807",
"0.59968865",
"0.5969312",
"0.5852153",
"0.5746316",
"0.5722876",
"0.5703971",
"0.56456345",
"0.5644901",
"0.5622933",
"0.558205",
"0.5573778",
"0.5565431",
"0.55480707",
"0.55263364",
"0.5494966",
"0.54580617",
"0.5443744",
"0.54141974",
"0.539147",
"0.5386731",
"0.53689665",
"0.5350561",
"0.53467214"
]
| 0.6822768 | 1 |
Print last `n` lines of file | def file_tail(filename, n):
result = ''
with open(filename, 'r') as f:
for line in (f.readlines()[-n:]):
result += line
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tail(filepath, n):\n with open(filepath) as file_fd:\n lines = ''.join(file_fd.readlines())\n lines = lines.splitlines()[-n:]\n return lines",
"def tail(fname, n):\n try:\n f = open(fname, 'r')\n except IOError:\n print \"IOError: No such file or directory: '\" + fname + \"'\"\n return\n \n # NOT IMPLEMENTED...\n f.close()",
"def file_tail(f, n):\n prc = subprocess.Popen(\n \"tail -n \" + str(n) + \" \" + os.path.normpath(f),\n shell=True, stdout=subprocess.PIPE)\n return prc.communicate()[0].decode().rstrip()",
"def tail(file_name):\n with open('../test_files/' + file_name, 'r') as infile:\n list = infile.readlines()\n #calculating the last 10 lines using len(list)-10:len(list)\n print('list of last 10 lines',list[len(list)-10:len(list)])",
"def tailFile(logFile, n):\n return tailFile2(logFile,n)",
"def tailNLinesFromFile(file, n):\n\n if not os.path.isfile(file):\n return None\n\n command = ['tail', '-n', str(n), file]\n\n output = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]\n\n return output.split('\\n')",
"def tail(filep, n=10):\n with open(filep) as f:\n return list(deque(f, maxlen=n))",
"def print_tail_from_jotter(self, n_lines):\n try:\n tail_lines = deque((), n_lines)\n with open(self._filename, 'r') as f:\n line = f.readline()\n while line:\n l = line.rstrip(\"\\n\")\n tail_lines.append(l)\n line = f.readline()\n\n for l in tail_lines:\n print(l)\n except OSError:\n pass",
"def _seek_to_n_lines_from_end_ng(f, numlines=10):\n\tline_count = 0;\n\n\tfor line in f:\n\t\tline_count += 1;\n\tpos = line_count - numlines;\n\tif (pos >= 0):\n\t\tf.seek(pos, 0);\n\telse:\n\t\tf.seek(0, 0);",
"def tail(filename, n):\n p=subprocess.Popen(['tail','-n',str(n),filename], stdout=subprocess.PIPE)\n soutput, _=p.communicate()\n lines = soutput.decode('utf8').split('\\r')\n return lines",
"def tail(f, n, offset=0):\n avg_line_length = 74\n to_read = n + offset\n while 1:\n try:\n f.seek(-(avg_line_length * to_read), 2)\n except IOError:\n # woops. apparently file is smaller than what we want\n # to step back, go to the beginning instead\n f.seek(0)\n pos = f.tell()\n lines = f.read().splitlines()\n if len(lines) >= to_read or pos == 0:\n return lines[-to_read:offset and -offset or None]\n avg_line_length *= 1.3",
"def newtail(f, n, offset=0):\n for i, line in enumerate(f):\n print(\"newtail stats\", i, n, line, )\n if i == n:\n return line",
"def tail(f, lines=1, _buffer=4098):\n # place holder for the lines found\n lines_found = []\n\n # block counter will be multiplied by buffer\n # to get the block size from the end\n block_counter = -1\n\n # loop until we find X lines\n while len(lines_found) < lines:\n try:\n f.seek(block_counter * _buffer, os.SEEK_END)\n except IOError: # either file is too small, or too many lines requested\n f.seek(0)\n lines_found = f.readlines()\n break\n\n lines_found = f.readlines()\n\n # we found enough lines, get out\n if len(lines_found) > lines:\n break\n\n # decrement the block counter to get the\n # next X bytes\n block_counter -= 1\n\n return lines_found[-lines:]",
"def _seek_to_n_lines_from_end(f, numlines=10):\n\tbuf = \"\"\n\tbuf_pos = 0\n\tf.seek(0, 2) # seek to the end of the file\n\tline_count = 0\n\n\twhile line_count < numlines:\n\t\tnewline_pos = buf.rfind(\"\\n\", 0, buf_pos)\n\t\tfile_pos = f.tell()\n\n\t\tif newline_pos == -1:\n\t\t\tif file_pos == 0:\n\t\t\t\t# start of file\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttoread = min(1024, file_pos)\n\t\t\t\tf.seek(-toread, 1)\n\t\t\t\tbuf = f.read(toread) + buf[:buf_pos]\n\t\t\t\tf.seek(-toread, 1)\n\t\t\t\tbuf_pos = len(buf) - 1\n\t\telse:\n\t\t\t# found a line\n\t\t\tbuf_pos = newline_pos\n\t\t\tline_count += 1\n\n\tif line_count == numlines:\n\t\tf.seek(buf_pos + 1, 1)",
"def tail(self, n=10):\n nend = self.len()\n nstart = nend - n\n if nstart < 0:\n nstart = 0\n return self.contents(nstart, nend)",
"def get_last_n_commands(n):\n # Ignore warnings generated by the HistoryAccessor. This can be removed\n # when https://github.com/ipython/ipython/pull/11054 reaches our release\n # environment\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', UserWarning)\n ha = HistoryAccessor()\n # Parse the last n commands of the IPython history, joining printed\n # messages\n return '\\n'.join([cmd[2] for cmd in ha.get_tail(n, include_latest=True)])",
"def tailFile2(logFile, n): \n try:\n tempFile = logFile + '.temp' \n cmd1 = 'tail -n'+str(n) + ' ' +logFile + ' > ' + tempFile\n cmd2 = 'rm ' + tempFile\n os.system(cmd1)\n f = open(tempFile)\n lines = f.readlines()\n f.close() \n os.system(cmd2)\n except:\n lines=[]\n return lines",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename) as file:\n n_lines = 0\n for line in file:\n n_lines += 1\n if nb_lines <= 0 or nb_lines >= n_lines:\n file.seek(0)\n for line in file:\n print(line, end=\"\")\n else:\n file.seek(0)\n for line in range(nb_lines):\n print(file.readline(), end=\"\")",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, \"r\", encoding=\"utf-8\") as file1:\n lines = file1.readlines()\n if nb_lines <= 0 or nb_lines > len(lines):\n print(\"\".join(lines), end='')\n else:\n print(\"\".join(lines[:nb_lines]), end='')",
"def tailLines(filename,linesback):\r\n\tavgcharsperline=150\r\n\t\r\n\tfile = open(filename,'r')\r\n\twhile 1:\r\n\t\ttry: file.seek(-1 * avgcharsperline * linesback,2)\r\n\t\texcept IOError: file.seek(0) \r\n\t\tif file.tell() == 0: atstart=1 \r\n\t\telse: atstart=0\r\n\t\tlines=file.read().split(\"\\n\")\r\n\t\tif (len(lines) > (linesback+1)) or atstart: break\r\n\t\t#The lines are bigger than we thought\r\n\t\tavgcharsperline=avgcharsperline * 1.3 #Inc avg for retry\r\n\tfile.close()\r\n\t\r\n\tif len(lines) > linesback: start=len(lines)-linesback -1\r\n\telse: start=0\r\n\treturn lines[start:len(lines)-1]",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, 'r', encoding='utf8') as f:\n if nb_lines <= 0:\n print(f.read(), end=\"\")\n else:\n for line in f:\n if nb_lines == 0:\n break\n print(line, end=\"\")\n nb_lines -= 1",
"def read_lines(filename=\"\", nb_lines=0):\n line_count = 0\n with open(filename, mode='r', encoding='utf-8') as f:\n for line_count, lines in enumerate(f):\n pass\n if nb_lines <= 0 or nb_lines > (line_count + 1):\n f.seek(0)\n print(f.read(), end='')\n else:\n f.seek(0) # return to file beginning\n for line in range(nb_lines):\n print(f.readline(), end='')",
"def tail_lines(fd, linesback=10):\n avgcharsperline = 75\n\n while True:\n try:\n fd.seek(-1 * avgcharsperline * linesback, 2)\n except IOError:\n fd.seek(0)\n\n if fd.tell() == 0:\n atstart = 1\n else:\n atstart = 0\n\n lines = fd.read().split(\"\\n\")\n if (len(lines) > (linesback+1)) or atstart:\n break\n\n avgcharsperline=avgcharsperline * 1.3\n\n if len(lines) > linesback:\n start = len(lines) - linesback - 1\n else:\n start = 0\n\n return lines[start:len(lines)-1]",
"def read_lines(filename=\"\", nb_lines=0):\n\n line_counter = 0\n with open(filename, 'r', encoding='utf-8') as my_file:\n for lines in my_file:\n line_counter += 1\n my_file.seek(0)\n if nb_lines <= 0 or nb_lines >= line_counter:\n print(my_file.read(), end=\"\")\n else:\n for i in range(nb_lines):\n print(my_file.readline(), end=\"\")",
"def keep_last_lines(self, num_lines):\n self.data = self.data[-num_lines:]",
"def list_recent_lines(self, num):\n return self.list_lines_gen(self.go_backward, num=num)",
"def read_lines(filename=\"\", nb_lines=0):\n with open(filename, encoding=\"utf-8\") as myFile:\n if nb_lines <= 0:\n print(myFile.read(), end=\"\")\n for i in range(nb_lines):\n print(myFile.readline(), end=\"\")",
"def getLastLine(f, blockSize=3072): \n \n f.seek(0,os.SEEK_END) \n \n totalBytes = f.tell() \n \n if totalBytes > blockSize: \n f.seek(-blockSize,os.SEEK_END) \n else: \n f.seek(0) \n \n lastLines = f.readlines() \n lastLine = lastLines[-1] \n \n if lastLine =='': \n lastLine = lastLines[-2] \n \n return lastLine",
"def print_a_line(line_count, f):\n\tprint line_count, f.readline()",
"def tail(self, seconds=1, max_lines=50):\n # Read file\n with open(self.tailed_file, 'r') as file_:\n # Go to EOF and get file size\n file_.seek(0, 2)\n fsize = file_.tell()\n\n # Get position of last 10K characters, then read to the end\n file_.seek(max(fsize-10000, 0), 0)\n lines = file_.readlines() # Read to end\n\n # Print last max_lines number of lines\n lines = lines[-max_lines:]\n for line in lines:\n print(line.strip())\n\n # Process file\n with open(self.tailed_file) as file_:\n # Go to the end of file\n file_.seek(0, 2)\n while True:\n # Tail file. Exit if CTRL-C is pressed\n try:\n # Get the byte offset of the most recent file read op\n # In other words get current size of file\n curr_position = file_.tell()\n\n # Read line\n line = file_.readline().strip()\n\n # If nothing new, then sleep\n if not line:\n # Go to the current end of file\n file_.seek(curr_position)\n time.sleep(seconds)\n else:\n print(line)\n except KeyboardInterrupt:\n sys.exit(0)"
]
| [
"0.7867674",
"0.75946647",
"0.75466275",
"0.75340295",
"0.749208",
"0.7458377",
"0.74379706",
"0.73325574",
"0.7127996",
"0.7094037",
"0.7092303",
"0.703745",
"0.6764117",
"0.67202204",
"0.6649864",
"0.6596833",
"0.6591324",
"0.6550445",
"0.6541051",
"0.6526451",
"0.64327246",
"0.6408257",
"0.6397874",
"0.6345531",
"0.631884",
"0.6285348",
"0.62655795",
"0.6221062",
"0.62076885",
"0.61757946"
]
| 0.822962 | 0 |
Get the disks file names from the domain XML description. | def GetFilesToBackup(domainXml):
disks = root.findall("./devices/disk/source")
files = []
for disk in disks:
files.append(disk.get("file"))
return files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_disks(self):\n # root node\n root = ElementTree.fromstring(self.libvirt_domain.XMLDesc())\n\n # search <disk type='file' device='disk'> entries\n disks = root.findall(\"./devices/disk[@device='disk']\")\n\n # for every disk get drivers, sources and targets\n drivers = [disk.find(\"driver\").attrib for disk in disks]\n sources = [disk.find(\"source\").attrib for disk in disks]\n targets = [disk.find(\"target\").attrib for disk in disks]\n\n # iterate drivers, sources and targets\n if len(drivers) != len(sources) != len(targets):\n raise RuntimeError(\"Drivers, sources and targets lengths are different %s:%s:%s\" % (\n len(drivers), len(sources), len(targets)))\n\n disk_info = namedtuple('DiskInfo', ['device', 'file', 'format'])\n\n # all disks info\n disks_info = []\n\n for i in range(len(sources)):\n disks_info.append(disk_info(targets[i][\"dev\"], sources[i][\"file\"], drivers[i][\"type\"]))\n\n return disks_info",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"def list_disks(self, instance_name):\n return ['A_DISK']",
"def get_disks():\n\n if system() != \"Windows\":\n raise OSError(\"For use with Windows platforms.\")\n\n logicaldisks=run(\n [\"wmic\", \"logicaldisk\", \"get\", \"name\"],\n capture_output=True\n )\n\n return findall(\"[A-Z]:\", str(logicaldisks.stdout))",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def getListOfSDs(name = None,selector = None):\n sds = rhevGet(\"/api/datacenters/%s/storagedomains\"%getDcData(rhev_settings.DC,\"id\"))\n doc = libxml2.parseDoc(sds)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/storage_domains/storage_domain\")\n sdlist = []\n for sd in res:\n sdin = {}\n sdin[\"name\"] = sd.firstElementChild().get_content()\n sdin[\"id\"] = sd.prop(\"id\")\n sdlist.append(sdin)\n result = []\n if name:\n result = [sdin for sdin in sdlist if sdin[\"name\"].find(name) != -1]\n return result or sdlist",
"def fs_get_disk_list(self):\n\t\treturn Job(SDK.PrlSrv_FsGetDiskList(self.handle)[0])",
"def get_disks_name(hw_lst, without_bootable=False):\n disks = []\n for entry in hw_lst:\n if entry[0] == 'disk' and entry[2] == 'size':\n if without_bootable and is_booted_storage_device(entry[1]):\n sys.stderr.write(\"Skipping disk %s in destructive mode, \"\n \"this is the booted device !\\n\" % entry[1])\n elif 'I:' in entry[1]:\n pass\n else:\n disks.append(entry[1])\n return disks",
"def get_device_file_dict():\n cmd = 'lshw -class disk'\n desc = \"description\"\n log_name = \"logical name\"\n serial = \"serial\"\n\n dev = []\n dev_list = []\n\n ret, output, err = run_gluster_command(cmd)\n output = output.decode('ASCII')\n dev_info = output.split('\\n')\n for line in dev_info:\n if re.search(desc, line):\n if dev:\n dev_list.append(dev)\n\n dev = []\n if re.search(log_name, line) or re.search(serial, line):\n temp = line.split(':')\n temp[1] = temp[1].strip(' ')\n dev.append(temp[1])\n dev_list.append(dev)\n for line in dev_list:\n print(line)",
"def get_all_disks():\n return DISKS_API.get(abs_link=False)",
"def filedescription(self):\n txt = []\n dd_desc = M.Globals[\"^DIC\"][self.fileid]['%D']\n for k,v in dd_desc.keys_with_decendants():\n v = dd_desc[k][0].value\n if v:\n txt.append(v)\n return '\\n'.join(txt)",
"def _get_disk_extension(self, disk_list):\r\n\r\n _extn_list = []\r\n for each_disk in disk_list:\r\n _disk_name, _extn_name = os.path.splitext(each_disk)\r\n _extn_list.append(_extn_name)\r\n\r\n _extn_list = list(set(_extn_list))\r\n\r\n if len(_extn_list) > 1:\r\n return _extn_list\r\n else:\r\n return _extn_list[0]",
"def get_disk_labels(self):\n path = '/dev/disk/by-label/'\n labels = {}\n if not os.path.isdir(path):\n return labels\n\n for label in os.listdir(path):\n label = label.replace('\\\\x2f', '/')\n device = os.path.realpath(path + '/' + label)\n labels[device] = label\n\n return labels",
"def get_band_filenames(xmldoc):\n band_dict = {}\n bands = xmldoc.find('.//bands')\n for bandxml in bands:\n band_name = (bandxml.get('name'))\n file = bandxml.find('.//file_name')\n band_file_name = file.text\n band_dict[band_name] = band_file_name\n return (band_dict)",
"def getFilenamesAndGuid(thisfile):\n\n pfn = str(thisfile.getElementsByTagName(\"pfn\")[0].getAttribute(\"name\"))\n filename = os.path.basename(pfn)",
"def _ListUsbDisks(self):\n disk_list = []\n for disk in glob.glob('/sys/block/sd*'):\n with open(disk + '/removable', 'r') as fd:\n if int(fd.readline()) == 1:\n device = '/dev/%s' % disk.split('/')[-1]\n manuf = self._GetDiskInfo(disk, 'manufacturer')\n product = self._GetDiskInfo(disk, 'product')\n capacity = self._GetDiskCapacity(device)\n if capacity:\n desc = '%s: %s %s %d GB' % (device, manuf, product, capacity)\n disk_list.append([device, manuf, product, capacity, desc])\n return disk_list",
"def files(self) -> List[str]:\n return [packet.name for packet in self.packets.file_description.values()]",
"def getDiskDrives(self, df: str = None, ts: str = None, cursor: str = None, pageSize: int = None):\n params = {\n 'df': df,\n 'ts': ts,\n 'cursor': cursor,\n 'pageSize': pageSize\n }\n return self.api_get_request(f'{self.NINJA_API_QUERIES_DISKS}', params=params)",
"def getFileListDAS(dataset,blacklist=[ ]):\n dataset = dataset.replace('__','/')\n if dataset[0]!='/':\n dataset = '/'+dataset\n instance = 'prod/global'\n if 'USER' in dataset:\n instance = 'prod/phys03'\n #cmd='das_client --limit=0 --query=\"file dataset=%s instance=%s\"'%(dataset,instance)\n cmd = 'das_client --limit=0 --query=\"file dataset=%s instance=%s status=*\"'%(dataset,instance)\n if args.verbose:\n print \"Executing \",cmd\n cmd_out = getoutput( cmd )\n tmpList = cmd_out.split(os.linesep)\n filelist = [ ]\n for line in tmpList:\n if '.root' in line and line not in blacklist:\n #files.append(\"root://cms-xrd-global.cern.ch/\"+line) # global\n filelist.append(\"root://xrootd-cms.infn.it/\"+line) # Eurasia\n filelist.sort()\n return filelist",
"def getzKVMdisks():\n result = []\n\n devices = getAllHardDisks()\n\n # get disk that has 7 partitions\n for dev in devices:\n parts = getPartitions(dev)\n\n if len(parts) == 7:\n result.append(dev)\n\n return result",
"def GetDataDiskName(cls, instance):\n name = cls.DATA_DISK_NAME_FMT.format(instance=instance)\n return cls._FormalizeName(name)",
"def disk(self):\n disk_size_list = []\n precision = 2\n size = self.random.randint(1, 1099511627776)\n suffixes=['B','KB','MB','GB','TB']\n suffixIndex = 0\n while size > 1024 and suffixIndex < 4:\n suffixIndex += 1 #increment the index of the suffix\n size = size/1024.0 #apply the division\n disk_size_list.append(f\"{size:.2f} {suffixes[suffixIndex]}\") #.format(size,suffixes[suffixIndex]))\n return disk_size_list",
"def get_dsc(self):\n for path in self.get_all_files():\n if path.endswith('.dsc'):\n return path\n return None",
"def get_ceph_disk():\n disks = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n for key in ['osd_data', 'osd_journal', 'mds_data', 'mon_data']:\n mnt_point = cfg[key]\n disk = get_disk_by_mountpoint(find_mount_point(mnt_point))\n if disk not in disks:\n disks.append(disk)\n return disks",
"def _get_disk_name(disk_type, instance, short=False):\n prefix = '%s_' % (disk_type[0] if short else disk_type)\n base = ('%s_%s' % (instance.name[:8], instance.uuid[:4]) if short\n else instance.name)\n return pvm_util.sanitize_file_name_for_api(\n base, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short\n else pvm_const.MaxLen.FILENAME_DEFAULT)",
"def disk_ids(self):\n return list(self._disks)",
"def get_kb_location(self):\n return ['dav',]",
"def getGuestDisk(self, oSession, oTxsSession, eStorageController):\n lstDisks = None;\n\n # The naming scheme for NVMe is different and we don't have\n # to query the guest for unformatted disks here because the disk with the OS\n # is not attached to a NVMe controller.\n if eStorageController == vboxcon.StorageControllerType_NVMe:\n lstDisks = [ '/dev/nvme0n1' ];\n else:\n # Find a unformatted disk (no partition).\n # @todo: This is a hack because LIST and STAT are not yet implemented\n # in TXS (get to this eventually)\n lstBlkDev = [ '/dev/sda', '/dev/sdb' ];\n for sBlkDev in lstBlkDev:\n fRc = oTxsSession.syncExec('/usr/bin/ls', ('ls', sBlkDev + '1'));\n if not fRc:\n lstDisks = [ sBlkDev ];\n break;\n\n _ = oSession;\n return lstDisks;",
"def show_disks(self):\n icon = Icons.Icons() # Icon\n\n # For every disk, listing information\n icon.show_icon(\"disk\")\n for disk in DISKS:\n self.__get_info(disk)",
"def getDescriptors(self, dsc = \"\"):\r\n return \"\""
]
| [
"0.70366144",
"0.60827386",
"0.6071412",
"0.60200167",
"0.58255386",
"0.57854563",
"0.5739114",
"0.5662612",
"0.5511752",
"0.55094075",
"0.5422538",
"0.54208255",
"0.5409883",
"0.53792715",
"0.53779566",
"0.5372456",
"0.5366111",
"0.5341019",
"0.53092587",
"0.52980816",
"0.52783585",
"0.5243566",
"0.52349937",
"0.5225987",
"0.52049786",
"0.5203337",
"0.5163684",
"0.5134659",
"0.5127707",
"0.5116763"
]
| 0.6748915 | 1 |
Reads a CSV file for a catalog into a long format Python dictionary. The first line is assumed to be the header line, and must contain the field 'item_name'. | def _read_csv_to_dictionary_list(file_name):
catalog_list = []
with open(file_name) as csvfile:
reader = csv.DictReader(csvfile)
for item in reader:
catalog_list.append(item)
return catalog_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_file(file):\n \n dictionary = {}\n csv_fp = csv.reader(file)\n #L[46] = manufacturer, L[63] = year\n #L[4]= city mileage, L[34]=highway mileage\n for line in csv_fp:\n #Skip the headings and the year 2017\n if (not (line[46] == 'make')) and (not (line[63] == '2017')):\n if line[46] in dictionary:\n #Add the city and highway mileage if the year has been made\n if line[63] in dictionary[line[46]]:\n dictionary[line[46]][line[63]][0] += [int(line[4])]\n dictionary[line[46]][line[63]][1] += [int(line[34])]\n #Add the year and data if it was not made previously\n else:\n dictionary[line[46]][line[63]] = [[int(line[4])],\\\n [int(line[34])]]\n #Adds a new manufacturer\n else:\n dictionary[line[46]] = {line[63]:[[int(line[4])],\\\n [int(line[34])]]}\n return dictionary",
"def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data",
"def load_csv(input):\n with open(input['csv'], 'r', encoding=input['encoding']) as f:\n invoice_dict = dict()\n reader = csv.reader(f, delimiter=';')\n\n for row in reader:\n invoice_id = row[0]\n\n if invoice_id in invoice_dict:\n invoice_dict[invoice_id].add_entry(row[1:])\n else:\n invoice_dict[invoice_id] = Invoice(row)\n\n return invoice_dict",
"def read_file(filename):\n\n data = {}\n with open(filename, encoding=\"utf8\") as file:\n reader = csv.DictReader(file)\n for line in reader:\n data[line['id']] = line\n return data",
"def read_csv(product_name=str, directory=DIRS['EOIR_DATA_DIR']):\n filename = ('%s.csv' % product_name)\n path = get_dir(os.path.join(directory, filename))\n with io.open(path, mode='r', encoding='utf-8-sig') as f:\n spec_dict = {}\n filtered = (line.replace(\"\\n\", '') for line in f) # Removes \\n from the created as a byproduct of encoding\n for line in filtered:\n field, value = line.split(',')\n if has_number(value) and value.find('\"') == -1:\n if value.find('x') != -1:\n if value.find('.') != -1:\n value = [float(i) for i in value.split('x')]\n else:\n value = [int(i) for i in value.split('x')]\n else:\n value = float(value)\n else:\n value = value.replace('\"', '')\n if value.find('/') != -1:\n value = [str(i) for i in value.split('/')]\n elif (value.lower()).find('true') != -1:\n value = True\n elif (value.lower()).find('false') != -1:\n value = False\n else:\n value = str(value)\n spec_dict['%s' % str(field)] = value\n f.close()\n return spec_dict",
"def parse_csv_input(input_file): # {{{\n parsed_infile = []\n try:\n with open(input_file) as infile:\n for line in csv.reader(infile):\n parsed_infile.append(line)\n\n temp_object_storage = []\n\n for line_index, line in enumerate(parsed_infile[1:]):\n temp_object_storage.append({})\n for category_index, category in enumerate(parsed_infile[0]):\n if category_index == 0:\n category = category[3:]\n temp_object_storage[line_index][category] = line[category_index]\n\n return temp_object_storage\n except FileNotFoundError as excep:\n LOGGER.info(\"error parsing csv file: %s\", excep) # }}}",
"def dictionary_formation():\r\n sales_data = {}\r\n with open('beer_data.csv', \"r\") as data_file:\r\n file_contents = csv.reader(data_file, delimiter=',')\r\n #Value of lines_read used as key value for each dictionary\r\n #in sales_data\r\n lines_read = 1\r\n for line in file_contents:\r\n if lines_read == 1:\r\n lines_read = lines_read + 1\r\n else:\r\n #Stores each column in row as key value in dictionary\r\n sales_data[str(lines_read)] = {\r\n \"invoice_number\": line[0],\r\n \"customer\": line[1],\r\n \"date_required\": line[2],\r\n \"recipe\": line[3],\r\n \"gyle_number\": line[4],\r\n \"quantity_ordered\": int(line[5])\r\n }\r\n lines_read = lines_read + 1\r\n data_file.close()\r\n return sales_data",
"def csv_dict_reader(file_path):\r\n with open(file_path, 'r') as file_obj:\r\n\r\n reader = csv.DictReader(file_obj, delimiter=',')\r\n for line in reader:\r\n #print(line[\"variable_name \"]),\r\n print(line[\"dataset\"])",
"def create_meta_dict_L1(adcp_meta):\n meta_dict = {}\n with open(adcp_meta) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n next(csv_reader, None) # Skip header row\n for row in csv_reader:\n # extract all metadata from csv file into dictionary -- some items not passed to netCDF file but are extracted anyway\n if row[0] == '' and row[1] == '':\n print('Metadata file contains a blank row; skipping this row !')\n elif row[0] != '' and row[1] == '':\n print('Metadata item in csv file has blank value; skipping this row '\n 'in metadata file !')\n else:\n meta_dict[row[0]] = row[1]\n\n # Add conventions metadata to meta_dict\n meta_dict['deployment_type'] = 'Sub Surface'\n meta_dict['flag_meaning'] = 'no_quality_control, good_value, probably_good_value, probably_bad_value, ' \\\n 'bad_value, changed_value, value_below_detection, value_in_excess, ' \\\n 'interpolated_value, missing_value'\n meta_dict['flag_references'] = 'BODC SeaDataNet'\n meta_dict['flag_values'] = '0, 1, 2, 3, 4, 5, 6, 7, 8, 9'\n meta_dict['keywords'] = 'Oceans > Ocean Circulation > Ocean Currents'\n meta_dict['keywords_vocabulary'] = 'GCMD Science Keywords'\n meta_dict['naming_authority'] = 'BODC, MEDS, CF v72'\n meta_dict['variable_code_reference'] = 'BODC P01'\n meta_dict['Conventions'] = \"CF-1.8\"\n\n return meta_dict",
"def read_2tuple_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n # use tuple of company (i.e., VEST01, etc) and item\r\n # companies have different prices\r\n dictionaryoutput[(item[0], item[1])] = item[2]\r\n return dictionaryoutput",
"def load_file(filename):\n with open(filename, 'r', encoding='utf-8') as fin:\n reader = csv.DictReader(fin, delimiter=',')\n purchases = []\n for row in reader:\n # print(type(row), row)\n p = Purchase.create_from_dict(row)\n purchases.append(p)\n return purchases",
"def csvfileUsage(self):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.DictReader(file_obj, delimiter=',') # CSV DictReader object\n \"\"\" reader.fieldnames returns header , slicing intial 'Month' and\n 'Year' header from list\n \"\"\"\n for com_names in reader.fieldnames[2:]:\n self.company_data[com_names] = {}\n # iterating each row\n for row in reader:\n month, year = self.parse_my(row) # parsing the year and month from row\n # pop the `Month` and `Year` Key to minimize iteration below\n row.pop('Month'), row.pop('Year')\n \"\"\" saving and updating the data at same point of time\n each iteration time, checking the max value and updating \n `Month` `Year` and `Value`\n \"\"\"\n self.prepare_company_data(month, year, row, self.company_data)\n file_obj.close() # close file\n return self.company_data",
"def open_csv(file, dct):\n with open(file) as csv_file:\n f_csv = csv.reader(csv_file)\n column_headings = next(f_csv)\n csv_row = namedtuple('Row', column_headings)\n for rows in f_csv:\n row = csv_row(*rows)\n dct[row.term] = row.definition",
"def csv_dict_reader(file_obj):\n #import re\n #file = open(file_obj)\n\n # reader = csv.DictReader(file_obj)\n # for line in reader:\n # print(line[\"Name\"])",
"def dictparse(csvfilename, keyfield):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True)\n for row in csvreader:\n table[row[keyfield]] = row\n return table",
"def creating_dict_from_csv(self) -> dict:\n dictionary = {}\n for row in self.__read_csv():\n if dictionary.get(row[0]):\n dictionary[row[0]].append((row[1], row[2]))\n else:\n dictionary[row[0]] = [(row[1], row[2])]\n\n for key, value in dictionary.items():\n dictionary[key] = sorted(value, key=lambda x: x[1], reverse=True)\n\n return dictionary",
"def cart_from_csv(csv_file_path):\n prices = {}\n with open(csv_file_path) as csvfile:\n for i, row in enumerate(csv.reader(csvfile, delimiter=',')):\n if len(row) != 2:\n raise MalformedCSV('Each CSV row should contain exactly 2'\n ' rows, not %s. -> name,price')\n prices[utf8(row[0])] = float(row[1])\n return Cart(prices)",
"def readcsv(csvfile):\n logger = log.getLogger('obslog.readcsv')\n\n if not os.path.exists(csvfile):\n logger.error('Cannot access %s', csvfile)\n raise SystemExit\n\n data = {}\n with open(csvfile, mode='r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n data[row['FITSFILE']] = row\n logger.debug('Data: %s', data)\n return data",
"def _read_csv(self):\n with open(self._file_path, 'rb') as f:\n reader = csv.DictReader(f, delimiter=',')\n self._content = [row for row in reader]",
"def load_csv(filename):\n results = defaultdict(list)\n with open(filename, 'r') as f:\n reader = csv.DictReader(f)\n for line in reader:\n results[line['sid']].append(line)\n return results",
"def parse_csv_input_file(input_file):\n with open(input_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n dict = {i: x for i, x in item.items()}\n yield(dict)",
"def read_stock_list():\n print(\"Reading list of stocks.\")\n stocks = {}\n with open(STOCKS_FILE) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n stocks[row['Symbol']] = (row['Name'], row['Sector'])\n return stocks",
"def read_csv_as_dicts(csv_input_file_name):\n input_table = read_csv_as_table(csv_input_file_name, skip_first_line=False)\n\n # first line should contain headers\n header = input_table[0]\n # rest lines would contain actual data\n data = input_table[1:]\n\n output = []\n # process all lines with data\n for input_line in data:\n record = {}\n for i in range(len(header)):\n record[header[i]] = input_line[i]\n output.append(record)\n return output",
"def ReadGBMCatalogueOneLine(nameGrb, path_catlogue_csv = \"/disk/gamma/cta/store/takhsm/FermiData/catalogue/GBM-BusrtCatalogue_20170623.csv\"):\n dct_info = {}\n num_lines = sum(1 for line in open(path_catlogue_csv))\n csv = pandas.read_csv(path_catlogue_csv)\n for iGrb in range(num_lines-1):\n if int(nameGrb) == int(csv.ix[iGrb,'name']):\n dct_info['ra'] = float(csv.ix[iGrb,'ra'])\n dct_info['dec'] = float(csv.ix[iGrb,'dec']) \n dct_info['trigger_time'] = pMETandMJD.ConvertMjdToMet(float(csv.ix[iGrb,'trigger_time']))\n dct_info['error_radius'] = float(csv.ix[iGrb,'error_radius'])\n return dct_info",
"def read_strong_csv(strong_meta_csv_path):\n with open(strong_meta_csv_path, 'r') as fr:\n reader = csv.reader(fr, delimiter='\\t')\n lines = list(reader)\n \n meta_dict = {}\n for line in lines:\n [audio_name, begin_time, end_time, label] = line\n meta = {'begin_time': begin_time, 'end_time': end_time, 'label': label}\n if audio_name in meta_dict:\n meta_dict[audio_name].append(meta)\n else:\n meta_dict[audio_name] = [meta]\n \n return meta_dict",
"def lcia_methods__metadata(self):\r\n with UnicodeReader(os.path.join(dirpath, \"categoryUUIDs.csv\"), \r\n encoding='latin-1', \r\n delimiter=dt) as csv_file:\r\n next(csv_file) \r\n csv_data = [{'name': (line[0], line[2], line[4]),\r\n 'description': line[7]\r\n } for line in csv_file]\r\n \r\n filename = \"LCIA_implementation_2019.xlsx\" # this was donwloaded and updated on Oct 2019 from ecoinvent website. \r\n wb = xlrd.open_workbook(os.path.join(dirpath, filename))\r\n #characterizaton factors\r\n sheet= wb.sheet_by_name(\"CFs\")\r\n cf_data = [{\r\n 'method': (sheet.cell(row, 0).value,\r\n sheet.cell(row, 1).value,\r\n sheet.cell(row, 2).value),\r\n 'name': sheet.cell(row, 3).value,\r\n 'categories': (sheet.cell(row, 4).value, sheet.cell(row, 5).value),\r\n 'amount': sheet.cell(row, 7).value\r\n }\r\n for row in range(1, sheet.nrows)\r\n if sheet.cell(row, 0).value not in \r\n {'selected LCI results, additional', 'selected LCI results'} and isinstance(sheet.cell(row, 7).value, Number)]\r\n #units\r\n sheet= wb.sheet_by_name(\"units\")\r\n units = {(sheet.cell(row, 0).value, sheet.cell(row, 1).value, \r\n sheet.cell(row, 2).value): sheet.cell(row, 4).value for row in range(1, sheet.nrows)}\r\n return csv_data, cf_data, units, filename",
"def read_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n dictionaryoutput[item[0]] = item[1]\r\n return dictionaryoutput",
"def readIEDB(filename, key='Epitope ID'):\n #cr = csv.reader(open(filename,'r'))\n cr = csv.DictReader(open(filename,'r'),quotechar='\"')\n cr.fieldnames = [field.strip() for field in cr.fieldnames]\n D={}\n for r in cr:\n k = r[key]\n D[k] = r\n return D",
"def read_list():\n import glob\n import csv\n\n slist = {}\n csvfile = glob.glob(_path + '/*.csv').pop()\n if csvfile is None:\n return slist\n _logger.info(\"Found file '%s'\" % csvfile)\n\n hcsv = csv.reader(open(csvfile), delimiter=';')\n # Skip header\n next(hcsv)\n # Work on every line\n for line in hcsv:\n if len(line) < 2:\n continue\n # print(line)\n slist[line[0]] = {'name': line[2], 'surname': line[1]}\n\n # print(slist)\n # print(\"LEN\", len(slist))\n # exit(1)\n return slist",
"def dictparse(csvfilename, keyfield, separator, quote, quotestrategy):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True,\n delimiter=separator,\n quotechar=quote,\n quoting=quotestrategy)\n for row in csvreader:\n table[row[keyfield]] = row\n return table, csvreader.fieldnames"
]
| [
"0.63216645",
"0.62248164",
"0.62061137",
"0.6192558",
"0.6182252",
"0.6102906",
"0.60775477",
"0.60724336",
"0.6050117",
"0.60413975",
"0.6032285",
"0.6017968",
"0.5999429",
"0.59157795",
"0.5901393",
"0.5890702",
"0.58753383",
"0.5854863",
"0.58544135",
"0.5853177",
"0.58308727",
"0.5804178",
"0.5801951",
"0.57882345",
"0.578488",
"0.5780162",
"0.5778297",
"0.57698977",
"0.5759611",
"0.57540077"
]
| 0.6988612 | 0 |
This function will return Grid size of UI based on difficulty level. | def get_grid_size(game_level):
grid_length = 0
grid_width = 0
minecount = 0
if game_level == DifficultyLevel.BeginnerLevel:
grid_length = GridSize.BeginnerLength
grid_width = GridSize.BeginnerWidth
minecount = 10
elif game_level == DifficultyLevel.IntermediateLevel:
grid_length = GridSize.IntermediateLength
grid_width = GridSize.IntermediateWidth
minecount = 40
elif game_level == DifficultyLevel.ExpertLevel:
grid_length = GridSize.ExpertLength
grid_width = GridSize.ExpertWidth
minecount = 99
return (grid_length, grid_width, minecount) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_grid_width(self):\n # replace with your code\n return 0",
"def get_grid_width(self):\r\n # replace with your code\r\n return self._grid_width",
"def get_grid_width(self):\r\n # replace with your code\r\n return self._grid_width",
"def get_grid_width(self):\n # replace with your code\n return self.grid_width",
"def get_grid_width(self):\n # replace with your code\n return self.grid_width",
"def get_grid_width(self):\r\n # replace with your code\r\n return self.grid_width",
"def get_grid_width(self):\n # replace with your code\n return self._grid_width",
"def get_grid_width(self):\n # replace with your code\n return self._grid_width",
"def _grid_hint_size(self) -> int:",
"def get_grid_width(self):\r\n # replace with your code\r\n return self._width",
"def get_grid_width(self):\n # replace with your code\n return self._width",
"def get_grid_width(self):\n # replace with your code\n return self._width",
"def get_size_of_grid(self):\n row = 0\n column = 0\n if int(self.var1.get()) == 1:\n row, column = 6, 6\n\n if int(self.var2.get()) == 1:\n row, column = 7, 6\n\n if int(self.var3.get()) == 1:\n row, column = 7, 7\n\n if int(self.var4.get()) == 1:\n row, column = 8, 8\n\n return row, column",
"def get_grid_height(self):\n # replace with your code\n return 0",
"def getGridSize(self):\n # This is set by the mosaic module, but other modules need to\n # know the values to take the proper size grid.\n return self.grid_size",
"def get_grid_width(puzzle: str) -> int:\r\n return int(len(puzzle) ** (1 / 2))",
"def get_grid_width(self):\r\n return self.width",
"def grid_size(self):\n return self._grid_size",
"def get_grid_height(self):\r\n # replace with your code\r\n return self.grid_height",
"def get_grid_height(self):\r\n # replace with your code\r\n return self._grid_height",
"def get_grid_height(self):\r\n # replace with your code\r\n return self._grid_height",
"def get_grid_height(self):\n # replace with your code\n return self.grid_height",
"def get_grid_height(self):\n # replace with your code\n return self.grid_height",
"def get_grid_height(self):\n # replace with your code\n return self._grid_height",
"def get_grid_height(self):\n # replace with your code\n return self._grid_height",
"def get_grid_height(self):\n # replace with your code\n return self._height",
"def get_grid_width(self):\n return self.grid_width",
"def get_grid_width(self):\n return self.grid_width",
"def get_grid_width(self):\r\n return self._grid_width",
"def get_grid_width(self):\r\n\r\n return self._grid_width"
]
| [
"0.7599145",
"0.7387938",
"0.7387938",
"0.73785955",
"0.73785955",
"0.73658544",
"0.7362196",
"0.7362196",
"0.73549694",
"0.73397446",
"0.7320358",
"0.7320358",
"0.7248299",
"0.71656466",
"0.7147601",
"0.7137378",
"0.70814574",
"0.7061186",
"0.70521265",
"0.7037405",
"0.7037405",
"0.6998048",
"0.6998048",
"0.69592494",
"0.69592494",
"0.6845783",
"0.6844529",
"0.6844529",
"0.679626",
"0.6748037"
]
| 0.7612548 | 0 |
This function updates the timer lcd | def timer_change(self):
if self.time < 999:
self.time += 1
self.time_lcd.display(self.time)
else:
self.timer.stop() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_timer(self):\r\n frmt_time = \"%d:%02d\" % (self.time_minutes, self.time_seconds)\r\n self.time_seconds += 1\r\n if self.time_seconds == 60:\r\n self.time_seconds = 0\r\n self.time_minutes += 1\r\n\r\n self.mainWidget.statusLabel.setText(\"{} {} --- {} {} --- {}\".format(self.elapsedTimeString,\r\n frmt_time,\r\n self.freeSpaceString,\r\n get_free_space(self.config.videodir),\r\n self.recordingString))",
"def tick():\n\n global time1\n # get the current local time from the PC\n time2 = time.strftime(\"%H:%M:%S\")\n # if time string has changed, update it\n if time2 != time1:\n time1 = time2\n timeLabel.config(text=time2)\n # calls itself every 200 milliseconds\n # to update the time display as needed\n # could use >200 ms, but display gets jerky\n timeLabel.after(200, tick)",
"def on_system_time_textChanged(self, p0):\n # Updated 12/28/16\n time_start = time.time()\n \n self.Init_prog.setValue( percentCheck(self.current_Init_Time.text(), self.Init_tot.text()) ) \n self.Side_1_prog.setValue( percentCheck(self.current_Side_1_Time.text(), self.Side_1_tot.text()) ) \n self.Side_2_prog.setValue( percentCheck(self.current_Side_2_Time.text(), self.Side_2_tot.text()) ) \n self.Total_prog.setValue( percentCheck(self.current_Rep_Cycle.text(), self.Rep_Cycle_tot.text()) ) \n \n # Get Current Bus Values For Mode Discovery\n current_bus_A = I2C.read_byte_data(In_bus, pinIn_A) # bus with valves\n current_bus_B = I2C.read_byte_data(In_bus, pinIn_B) # bus with pumps + magnets\n self.System_Mode.setText( disoverMode( current_bus_A + current_bus_B ) )\n \n # update top GUI info field\n self.as_of_time.setText(\"System Status as of \"+ time.strftime(\"%B %d, %Y at %H:%M:%S\"))\n \n # update individual device status on GUI\n self.FP101_val.setText( returnStatus(var1, 0) )\n self.FP102_val.setText( returnStatus(var2, 0) )\n self.FP103_val.setText( returnStatus(var3, 0) )\n \n self.FV201_val.setText( returnStatus(var4, 'open') )\n self.FV202_val.setText( returnStatus(var5, 'open') )\n self.FV203_val.setText( returnStatus(var6, 'open') )\n self.FV204_val.setText( returnStatus(var7, 'open') )\n \n self.EM201_val.setText( returnStatus(var8, 0) )\n self.EM202_val.setText( returnStatus(var9, 0) )\n\n # update Temperatures\n tempString = str(var10)\n self.temp_val_1.setText(tempString[0:4])\n\n tempString = str(var11)\n self.temp_val_2.setText(tempString[0:4])\n\n # update pH\n pHString = str(var12)\n self.pH_val.setText(pHString[0:6])\n \n if (time.time()-time_start > update_GUI_interval):\n print(\"GUI update longer than update interval...\")",
"def OnTimer(self, event):\n if self.timerNotZero: # When timer runs, subtract one second and update text\n self.start_time = self.start_time.Subtract(wx.TimeSpan(0, sec=1))\n self.timertext.SetLabel(self.start_time.Format(\"%M:%S\"))\n if self.start_time.GetMinutes() == 0 and self.start_time.GetSeconds() == 0: # Timer reached zero\n self.timerNotZero = False\n self.button1.SetBackgroundColour('red')\n else: # Once timer stops, makes the text blink red\n if self.blinkPhase == 0:\n self.timertext.SetForegroundColour('red')\n self.timertext.SetLabel(\"00:00\")\n self.blinkPhase = 1\n elif self.blinkPhase == 1:\n self.timertext.SetForegroundColour('black')\n self.timertext.SetLabel(\"00:00\")\n self.blinkPhase = 0",
"def update(self, func):\n if self.current_time == 0:\n func()\n return\n self.current_time -= 1\n hours = self.current_time // 3600\n minutes = self.current_time % 3600 // 60\n seconds = self.current_time % 60\n try:\n self.timer_label.setText('%02d:%02d:%02d' % (hours, minutes, seconds))\n if self.current_time <= 10:\n self.timer_label.setStyleSheet('color: red')\n Qt.QTimer().singleShot(1000, lambda: self.update(func))\n except RuntimeError:\n return",
"def on_timer(self):\n self.read_serial_data()\n # self.update_monitor()",
"def timertick(self):\r\n if self.secs > 120:\r\n self.countdownString.setText(\"%d min.\" % (self.secs / 60 + 1)) # e.g., 5 min\r\n else:\r\n self.countdownString.setText(\"%02d:%02d\" % (self.secs / 60, self.secs % 60)) # e.g., 01:36\r\n\r\n # Flash the screen when there is 1 minute and when there is 30 seconds left\r\n if self.secs == 60 or self.secs == 30:\r\n self.start_flash_timer()\r\n\r\n # In the last 10 seconds, display countdown in red\r\n if self.secs <= 10:\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : white; color : red; }\")\r\n\r\n self.secs -= 1\r\n if self.secs < 0:\r\n self.stop_timer()\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : white; color : black; }\")",
"def timer(self):\n if self.board.reset_timer:\n self.time_count.set(\"000\")\n self.time_widget.config(text=self.time_count.get())\n return 0\n elif self.board.stop_timer:\n return 0\n now_time = time.time()\n self.board.time = now_time - self.board.last_frame_time\n self.time_count.set(self.numToStrLabel(int(self.board.time)))\n self.time_widget.config(text=self.time_count.get())\n self.board.time = self.board.last_frame_time\n self.after(50, self.timer)",
"def tick(self):\n uh.rotation(270)\n while True:\n self.show_time()\n time.sleep(60)\n uh.off()",
"def on_refreshTime(self, control):\n\n self.txtFecha.set_text(str(self.localTime))",
"def OnButton1(self):\n self.start_time = self.start_time.Minutes(DEFAULT_TIMER)\n self.timertext.SetLabel(self.start_time.Format(\"%M:%S\"))\n self.timerNotZero = True\n self.blinkPhase = 0\n self.timertext.SetForegroundColour('black')\n self.button1.SetBackgroundColour('white')",
"def update_lcd(self):\n # Do nothing if display was not initialised OK\n if self.lcd_display_error:\n return\n\n # Detect front-panel button presses to cycle through the LCD pages\n if GPIO.event_detected(\"P9_11\"):\n self.lcd.previous_page()\n elif GPIO.event_detected(\"P9_12\"):\n self.lcd.next_page()\n\n # Set the LCD backlight colour depending on the overall system health\n if self.__healthy:\n self.lcd.set_colour(LcdDisplay.GREEN)\n else:\n self.lcd.set_colour(LcdDisplay.RED)\n\n # Update the LCD content\n self.lcd.update()",
"def one_second_update(self):\n\n val[\"timeRunning\"] += 1\n total_btc_value = self.calc_total_btc()\n\n self.mw.total_btc_label.setText(\"<span style='font-size: 14px; color: #f3ba2e; font-family: Arial Black;'>\" + total_btc_value + \"</span>\")\n\n total_usd_value = '{number:,.{digits}f}'.format(number=float(total_btc_value.replace(\" BTC\", \"\")) * float(val[\"tickers\"][\"BTCUSDT\"][\"lastPrice\"]), digits=2) + \"$\"\n\n self.mw.total_usd_label.setText(\"<span style='font-size: 14px; color: white; font-family: Arial Black;'>\" + total_usd_value + \"</span>\")\n\n last_btc_price = float(val[\"tickers\"][\"BTCUSDT\"][\"lastPrice\"])\n last_btc_price_formatted = '{number:,.{digits}f}'.format(number=last_btc_price, digits=2) + \"$\"\n \n\n if last_btc_price > self.last_btc_price:\n last_color = Colors.color_green\n elif last_btc_price == self.last_btc_price:\n last_color = Colors.color_lightgrey\n else:\n last_color = Colors.color_pink\n\n self.mw.btc_price_label.setText(\"<span style='color: \" + last_color + \"'>\" + last_btc_price_formatted + \"</span>\")\n self.last_btc_price = last_btc_price\n\n operator = \"\"\n percent_change = float(val[\"tickers\"][\"BTCUSDT\"][\"priceChangePercent\"])\n if percent_change > 0:\n operator = \"+\"\n percent_color = Colors.color_green\n else:\n percent_color = Colors.color_pink\n\n btc_percent = operator + '{number:,.{digits}f}'.format(number=percent_change, digits=2) + \"%\"\n self.mw.btc_percent_label.setText(\"<span style='color: \" + percent_color + \"'>\" + btc_percent + \"</span>\")\n\n high = float(val[\"tickers\"][\"BTCUSDT\"][\"highPrice\"])\n low = float(val[\"tickers\"][\"BTCUSDT\"][\"lowPrice\"])\n vol = float(val[\"tickers\"][\"BTCUSDT\"][\"volume\"])\n\n high_formatted = '{number:,.{digits}f}'.format(number=high, digits=2) + \"$\"\n low_formatted = '{number:,.{digits}f}'.format(number=low, digits=2) + \"$\"\n vol_formatted = '{number:,.{digits}f}'.format(number=vol, digits=2) + \" BTC\"\n\n self.mw.btc_high_label.setText(\"<span style='color: \" + Colors.color_green + \"'>\" + high_formatted + \"</span>\")\n self.mw.btc_low_label.setText(\"<span style='color: \" + Colors.color_pink + \"'>\" + low_formatted + \"</span>\")\n self.mw.btc_vol_label.setText(\"<span style='color: \" + Colors.color_lightgrey + \"'>\" + vol_formatted + \"</span>\")\n\n\n self.mw.debug.setText(str(val[\"volDirection\"]))\n\n self.mw.debug.setText('{number:.{digits}f}'.format(number=float(val[\"volDirection\"]), digits=4) + \"BTC\")\n\n self.percent_changes()\n self.volume_values()\n\n self.check_websocket()\n\n self.update_stats()\n # only update the currently active table\n tab_index_botLeft = self.mw.tabsBotLeft.currentIndex()\n\n if tab_index_botLeft == 3:\n self.mw.holdings_table.update_holding_prices()\n val[\"indexTabOpen\"] = False\n elif tab_index_botLeft == 0:\n self.mw.coin_index.update_coin_index_prices()\n\n # decouple eventually\n val[\"indexTabOpen\"] = True\n # self.start_kline_iterator()\n else:\n val[\"indexTabOpen\"] = False\n self.mw.coin_index.start_kline_iterator()",
"def update_display(self):\r\n\t\tfor message in self._scheduled_messages:\r\n\t\t\tmessage['Delay'] -= 1\r\n\t\t\tif (message['Delay'] == 0):\r\n\t\t\t\tif (message['Parameter'] != None):\r\n\t\t\t\t\tmessage['Message'](message['Parameter'])\r\n\t\t\t\telse:\r\n\t\t\t\t\tmessage['Message']()\r\n\t\t\t\t\tdel self._scheduled_messages[self._scheduled_messages.index(message)]\r\n\r\n\t\tfor callback in self._timer_callbacks:\r\n\t\t\tcallback()\r\n\t\tself._timer = (self._timer + 1) % 256\r\n\t\tif(self._timer == 0):\r\n\t\t\tself._selector._shift_pressed_timer = -12\r\n\t\tself.flash()",
"def display_time(self, display='LEDScreen'):\r\n self.bin_time = self._update_time()\r\n wide = False # defines one or two LEDS for display\r\n if display == 'LEDScreen':\r\n if not wide:\r\n for frame_updates in range(30):\r\n for time_slot in range(len(self.bin_time)):\r\n for bit in range(len(self.bin_time[time_slot])):\r\n if self.bin_time[time_slot][bit] == 1:\r\n self.display.light_led(6 - time_slot, 6 - bit, 0.001)\r\n else:\r\n for frame_updates in range(30):\r\n for time_slot in range(3):\r\n for bit in range(6):\r\n if self.bin_time[time_slot][bit] == 1:\r\n coord = 2 * time_slot\r\n self.display.light_led(7 - coord, 5 - bit, 0.0001)\r\n self.display.light_led(7 - coord - 1, 5 - bit, 0.0001)\r\n\r\n else:\r\n for time_slot in range(3):\r\n if time_slot == 0:\r\n current_leds = self.second_leds\r\n elif time_slot == 1:\r\n current_leds = self.minute_leds\r\n else:\r\n current_leds = self.hour_leds\r\n\r\n bin_position = 0\r\n for pin in range(len(current_leds)):\r\n bin_value = self.bin_time[time_slot][bin_position]\r\n if bin_value > 0:\r\n current_leds[bin_position].on()\r\n else:\r\n current_leds[bin_position].off()\r\n bin_position += 1\r\n return",
"def timer(papirus, seconds):\n image = Image.new('1', papirus.size, WHITE)\n\n draw = ImageDraw.Draw(image)\n width, height = image.size\n\n timer_font_size = int((width - 4)/(5*0.65))\n timer_font = ImageFont.truetype(CLOCK_FONT_FILE, timer_font_size)\n\n draw.rectangle((0, 0, width, height), fill=WHITE, outline=WHITE)\n previous_remaining = 0\n\n start = time.time()\n remaining = seconds # seconds\n\n light_on((100,100,0))\n while remaining > 0:\n while remaining > 0:\n now = time.time()\n remaining = seconds - (now - start)\n if int(remaining) == previous_remaining:\n break\n if remaining < 0:\n break\n time.sleep(0.1)\n\n draw.rectangle((5, 10, width - 5, 10 + timer_font_size), fill=WHITE, outline=WHITE)\n draw.text((5, 10), '{m:02d}:{s:02d}'.format(m=int(remaining // 60), s=int(remaining % 60)), fill=BLACK, font=timer_font)\n\n # display image on the panel\n papirus.display(image)\n if int(remaining % 60) == 0:\n papirus.update() # full update every minute\n else:\n papirus.partial_update()\n previous_remaining = int(remaining)\n light_off()\n papirus.clear()",
"def timer_update(self):\n if self.mineboard.gamestate is not None:\n return\n time_so_far = round(time.time()-self.start_time)\n if time_so_far == 1:\n self.now.set(f\"Time so far: {time_so_far} second\")\n else:\n self.now.set(f\"Time so far: {time_so_far} seconds\")\n self.after(1000, self.timer_update) # calls this function every second",
"def timer():\r\n\r\n T = 0\r\n while True:\r\n print (term.white + term.move_xy(82,1) + 'TIMER : ', end='')\r\n print(T, end='\\r')\r\n time.sleep(1)\r\n T = T + 1",
"def tick(self):\n if self.display_seconds:\n new_time = time.strftime('%I:%M:%S %p')\n else:\n new_time = time.strftime('%I:%M:%S %p').lstrip('0')\n if new_time != self.time:\n self.time = new_time\n self.display_time = self.time\n self.config(text=self.display_time)\n self.after(200, self.tick)",
"async def on_timer_update(self, secs: int):\n pass",
"def update_rec_timer(self, time_s):\n self._stop_section.ids.rec_time_lbl.text = format_time_str(int(round(time_s)))",
"def tick(self):\r\n if self.display_seconds:\r\n new_time = time.strftime('%H:%M:%S')\r\n else:\r\n new_time = time.strftime('%I:%M %p').lstrip('0')\r\n if new_time != self.time:\r\n self.time = new_time\r\n self.display_time = self.time\r\n self.config(text=self.display_time)\r\n self.after(200, self.tick)",
"def refresh_label(self):\n finalText = \"%s\" % (player.get_time())\n self.label.configure(text=finalText)",
"def refresh():\n now = datetime.now()\n time_now = datetime.time(now)\n curr_timelabel = Label(root, text=\"Current Time: \" + str(time_now)[:8])\n curr_timelabel.config(font=(\"Calibri 16\"))\n curr_timelabel.place(relx=0.5, rely=0.065, anchor=CENTER)\n\n if time_now > times[0] and time_now < times[1]:\n curr_salahlabel = Label(root, text=\"Current Salah: Fajr\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(root, text=\"Sunrise: \" + times[1])\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[1] and time_now < times[2]:\n curr_salahlabel = Label(root, text=\"Sunrise - No Salah\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Zuhr \" + \"(\" + str(times[2]) + \")\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[2] and time_now < times[3]:\n curr_salahlabel = Label(root, text=\"Current Salah: Zuhr\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Asr \" + \"(\" + str(times[3]) + \")\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[3] and time_now < times[4]:\n curr_salahlabel = Label(root, text=\"Current Salah: Asr\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Maghrib \" + \"(\" + str(times[4]) + \")\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[4] and time_now < times[5]:\n curr_salahlabel = Label(root, text=\"Current Salah: Maghrib\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Isha \" + \"(\" + str(times[5]) + \")\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n elif time_now > times[5]:\n curr_salahlabel = Label(root, text=\"Current Salah: Isha\")\n curr_salahlabel.config(font=(\"Calibri, 18\"))\n curr_salahlabel.place(relx=0.1, rely=0.29, anchor=CENTER)\n\n next_salahlabel = Label(\n root, text=\"Next Salah: Fajr \" + \"(\" + str(times[0]) + \"~)\"\n )\n next_salahlabel.config(font=(\"Calibri, 18\"))\n next_salahlabel.place(relx=0.3, rely=0.29, anchor=CENTER)\n\n root.after(1000, refresh)\n root.after(1000, curr_salahlabel.pack_forget)\n root.after(1000, next_salahlabel.pack_forget)\n root.after(1000, curr_timelabel.pack_forget)",
"def led_duty_cycle(val):\n set_tmr_ocr(TMR1, OCRxB, val)",
"def update_time_base(self, event):\n print(\"TimeBase.update_time_base()\")\n print(\"Base de temps : \", self.scale_T.get())\n if not isinstance(self.parent, Tk):\n self.parent.update_time(self.scale_T.get())",
"def timer_handler():\r\n \r\n global elapsed_time\r\n elapsed_time += 1",
"def _countdown(self):\n self._game.deleteBall()\n self._game.draw()\n # reset paddle speed\n self._game.updatePaddle(self.input)\n if ZERO_SECS <= self.time < ONE_SEC:\n self._mssg = (GLabel(text='3', x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=COUNTDOWN_FONT_SIZE))\n if ONE_SEC <= self.time < TWO_SECS:\n self._mssg = (GLabel(text='2', x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=COUNTDOWN_FONT_SIZE))\n if TWO_SECS <= self.time < THREE_SECS:\n self._mssg = (GLabel(text='1', x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=COUNTDOWN_FONT_SIZE))\n if self.time >= THREE_SECS:\n self._mssg = None\n self._game.serveBall()\n self._state = STATE_ACTIVE\n self._points_mssg = (GLabel(text='Points: 0', x=POINTS_X, y=POINTS_Y, font_size=24))",
"def _on_band_timer(self):\n self._update_band_state()",
"def update_io(self, dt):\n self.light.change_color(traffic_lights_binary())\n self.seven_segment_display.activate_segments(seven_segment_binary())\n self.ascii.update_ascii_grid()"
]
| [
"0.76056844",
"0.71149886",
"0.70156574",
"0.69157463",
"0.68125004",
"0.6781015",
"0.67532974",
"0.67068124",
"0.6642209",
"0.66009486",
"0.65852726",
"0.658475",
"0.6575836",
"0.65547466",
"0.6551715",
"0.65476876",
"0.6518006",
"0.64748704",
"0.64519894",
"0.6446896",
"0.6446173",
"0.64248616",
"0.64113355",
"0.62782794",
"0.6232916",
"0.62215364",
"0.6220454",
"0.6219623",
"0.6191893",
"0.6181568"
]
| 0.76348394 | 0 |
This function handles the left click action on each of the grid cell. It will also handle the actions required | def handle_left_click(self):
if not self.game_in_progress:
return
if self.first_click:
self.first_click = False
self.timer.start(1000)
sender = self.sender()
row = 0
col = 0
for row in range(self.rows):
for col in range(self.cols):
if self.button_array[row][col] == sender:
break
else:
continue
break
# print 'Received left click:', row, ',', col
celllist = self.board.opencell(row, col)
if celllist == []:
return
for cell in celllist:
row = cell[0]
col = cell[1]
cell_property = self.board.getcellproperty(row, col)
if cell_property == CellProperty.Empty:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/OpenedSquare.png"))
elif cell_property == CellProperty.Mine:
# Game over
for row in range(self.rows):
for col in range(self.cols):
cell_property = self.board.getcellproperty(row, col)
if cell_property == CellProperty.Mine:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/mine.ico"))
self.status_button.setIcon(QtGui.QIcon("icons/smiley3.ico"))
self.game_in_progress = False
self.timer.stop()
return
elif cell_property == CellProperty.MineCountOne:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/1.png"))
elif cell_property == CellProperty.MineCountTwo:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/2.png"))
elif cell_property == CellProperty.MineCountThree:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/3.png"))
elif cell_property == CellProperty.MineCountFour:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/4.png"))
elif cell_property == CellProperty.MineCountFive:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/5.png"))
elif cell_property == CellProperty.MineCountSix:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/6.png"))
elif cell_property == CellProperty.MineCountSeven:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/7.png"))
elif cell_property == CellProperty.MineCountEight:
self.button_array[row][col].setIcon(QtGui.QIcon("icons/8.png"))
game_status = self.board.continuegame()
print 'Game Status:', game_status
if game_status == GameStatus.GameWon:
self.timer.stop()
self.game_in_progress = False
player_name = QtGui.QInputDialog.getText(self, "Name Please !!",\
"Enter your name for leader board:")
# TODO: Replace 1 with the time taken by the end user.
LeaderBoard.insertnewscore(CURRENT_GAME_LEVEL, player_name[0], self.time)
self.status_button.setIcon(QtGui.QIcon("icons/smiley.ico"))
print "You have won the game" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.set_selected(self.mouse_on_grid())\n if self.get_selected() is not None and event.type == pygame.KEYDOWN:\n self.event_seletect_moved(event)\n self.event_cell_update(event)",
"def on_mouse_press(self, x, y, button, modifiers):\n\n # Change the x/y screen coordinates to grid coordinates\n column = int(x // (WIDTH + MARGIN))\n row = int(y // (HEIGHT + MARGIN))\n\n # print(f\"Click coordinates: ({x}, {y}). Grid coordinates: ({row}, {column})\")\n\n # Make sure we are on-grid. It is possible to click in the upper right\n # corner in the margin and go to a grid location that doesn't exist\n # AKA: make sure you are clicking w/in the grid - TH\n if row < ROW_COUNT and column < COLUMN_COUNT:\n # Flip the location between 1 and 0.\n # this will reset value for the recreate grid\n # and change the color - TH\n # if self.grid[row][column] == 0:\n # self.grid[row][column] = self.num_key\n # else:\n # self.grid[row][column] = 0\n self.current_selected = (row, column)\n\n self.recreate_grid()",
"def OnLabelRightClick(self, evt):\n \n self.actRow = evt.Row\n self.actCol = evt.Col\n \n if evt.Row<0 and evt.Col>=0: #right click on column label\n\n menu = wx.Menu()\n \n miX = menu.Append(self.ID_popup_Column_SetX,\n \"Set this Column as X\",\n kind = wx.ITEM_CHECK)\n if self.Table.colhasmark(evt.Col, 'X'):\n miX.Check()\n\n miY1 = menu.Append(self.ID_popup_Column_SetY1,\n \"Set this Column as Y1\",\n kind = wx.ITEM_CHECK)\n if self.Table.colhasmark(evt.Col, 'Y1'):\n miY1.Check()\n\n miY2 = menu.Append(self.ID_popup_Column_SetY2,\n \"Set this Column as Y2\",\n kind = wx.ITEM_CHECK)\n if self.Table.colhasmark(evt.Col, 'Y2'):\n miY2.Check()\n\n miG = menu.Append(self.ID_popup_Column_SetG,\n \"Set this Column as Group By\",\n kind = wx.ITEM_CHECK)\n if self.Table.colhasmark(evt.Col, 'G'):\n miG.Check()\n\n if self.Table.colsel[evt.Col] in self.Table.dynamic_cols:\n menu.Append(self.ID_popup_Column_SetExpression,\n \"Set expression ...\")\n menu.Append(self.ID_popup_Column_Recalculate,\n \"Recalculate all values\")\n \n menu.Append(self.ID_popup_Select_Columns,\n \"Display Columns ...\")\n menu.Append(self.ID_popup_Set_Column_Label,\n \"Set Column Label ...\")\n\n self.PopupMenu(menu)\n menu.Destroy()\n \n\n elif evt.Col<0 and evt.Row>=0: #right click on row label\n menu = wx.Menu()\n \n miM = menu.Append(self.ID_popup_MaskRow,\n \"Mask Row\",\n kind = wx.ITEM_CHECK)\n if self.Table.rowmask[evt.Row]:\n miM.Check()\n\n if self.Table.GetValueNamed(evt.Row, 'Filename'):\n menu.Append(self.ID_popup_ReloadRow, 'Reload image')\n \n if self.IsSelection():\n menu.Append(self.ID_popup_MaskSelection, \"Mask Selection\")\n menu.Append(self.ID_popup_UnmaskSelection, \"Unmask Selection\")\n menu.Append(self.ID_popup_OmitSelection, \"Omit Selection\")\n\n self.actRowSelection = self.GetSelectedRows()\n \n self.PopupMenu(menu)\n menu.Destroy()\n \n evt.Skip()",
"def event_click_left(self, event):\r\n\r\n i, j = self.grid.get_position(event.x, event.y)\r\n\r\n if self.grid[i][j].fixed:\r\n return\r\n\r\n # Places the entry\r\n self.entry[\"text\"] = self.grid[i][j]\r\n self.entry.place(\r\n x=j * self.grid.box_size + 2,\r\n y=i * self.grid.box_size + 2,\r\n width=self.grid.box_size + 1,\r\n height=self.grid.box_size + 1\r\n )\r\n self.entry.focus_set()",
"def handle_right_click(self):\n if not self.game_in_progress:\n return\n if self.first_click:\n self.first_click = False\n self.timer.start(1000)\n sender = self.sender()\n row = 0\n col = 0\n for row in range(self.rows):\n for col in range(self.cols):\n if self.button_array[row][col] == sender:\n break\n else:\n continue\n break\n # print 'Received right click:', row, ',', col\n status = self.board.getcellstatus(row, col)\n if status == CellStatus.Opened:\n return\n elif status == CellStatus.Closed:\n self.remainingminecount = self.remainingminecount - 1\n self.mines_lcd.display(str(self.remainingminecount))\n self.board.setcellstatus(row, col, CellStatus.MarkedAsMine)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/Flag.png\"))\n elif status == CellStatus.MarkedAsMine:\n self.remainingminecount = self.remainingminecount + 1\n self.mines_lcd.display(str(self.remainingminecount))\n self.board.setcellstatus(row, col, CellStatus.MarkedAsSuspectedMine)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/questionmark.png\"))\n elif status == CellStatus.MarkedAsSuspectedMine:\n self.board.setcellstatus(row, col, CellStatus.Closed)\n self.button_array[row][col].setIcon(QtGui.QIcon(\"icons/unopenedsquare.png\"))",
"def _onclick(self,event):\r\n if self.NumCells > 0:\r\n ShapeMask = np.shape(self.Mask)\r\n # get coorinates at selected location in image coordinates\r\n if event.xdata == None or event.ydata == None:\r\n return\r\n xcoor = min(max(int(event.xdata),0),ShapeMask[1])\r\n ycoor = min(max(int(event.ydata),0),ShapeMask[0])\r\n \r\n # search for the mask coresponding to the selected cell\r\n for EachCell in range(self.NumCells):\r\n if self.Mask[ycoor,xcoor,EachCell]:\r\n self.SelectedCellIndex = EachCell\r\n break\r\n \r\n # highlight selected cell\r\n if self.SelectedCellIndex not in self.selected_ML_Index:\r\n # Get the selected cell's contour coordinates and mask patch\r\n self.contour_verts, self.Cell_patch = self.get_cell_polygon(self.Mask[:,:,self.SelectedCellIndex])\r\n \r\n self.Matdisplay_Figure_axis.add_patch(self.Cell_patch)\r\n self.Matdisplay_Canvas.draw()\r\n \r\n self.selected_ML_Index.append(self.SelectedCellIndex)\r\n self.selected_cells_infor_dict['cell{}_verts'.format(str(self.SelectedCellIndex))] = self.contour_verts\r\n else:\r\n # If click on the same cell\r\n self.Cell_patch.remove()\r\n self.Matdisplay_Canvas.draw()\r\n self.selected_ML_Index.remove(self.SelectedCellIndex)\r\n self.selected_cells_infor_dict.pop('cell{}_verts'.format(str(self.SelectedCellIndex)))",
"def left_click(self, event):\n if self.text == 'True': # player clicks on bomb\n messagebox.showerror('Minesweeper', 'Kaboom! You lose.', parent=self)\n self.parentGrid.reveal_bombs()\n elif self.text != '0': # reveal cell\n self['text'] = self.text\n self['fg'] = self.textColor\n self['bg'] = 'light gray'\n self['relief'] = SUNKEN\n self.parentGrid.exposed_new_cell(self.coord)\n else: # reveal cell(s) if it is a blank cell\n self.parentGrid.reveal_blank_cells(self)",
"def on_left_click(self, client, game) -> None:\n pass",
"def clickCell(self, row, col):\n self.clicked[row, col] = 1",
"def right_click(self, event):\n if self['text'] == '*' and self.text == '*': # player right-clicks again\n self.auto_expose()\n self.parentGrid.update_number(True)\n else: # first right-click\n self['text'] = '*'\n self['fg'] = 'black'\n self.parentGrid.update_number()",
"def grid_clicked(self, x, y):\n if self.__game.get_tile(x, y) is MarkerType.NONE:\n player = self.__game.get_player()\n next_player = self.__game.get_next_player()\n\n # Next move the positions are swapped\n self.__infobar.update_info(next_player, player)\n\n self.__tilegrid.set_tile_marker(x, y, player)\n self.__tilegrid.set_tile_color(x, y, Color.DARK_TONE)\n\n state, winner, loser, win_tiles = self.__game.make_move(x, y)\n # Display winner info if found\n if state is GameState.WINNER:\n self.__infobar.show_results(state, winner, loser)\n self.__tilegrid.highlight_tiles(win_tiles)\n self.__buttonbar.set_disabled(False)\n elif state is GameState.TIE:\n self.__infobar.show_results(state, None, None)\n self.__buttonbar.set_disabled(False)\n\n # Play sound according to the player\n if player is MarkerType.CROSS:\n winsound.PlaySound(\"sound/click_x.wav\", winsound.SND_ASYNC)\n else:\n winsound.PlaySound(\"sound/click_o.wav\", winsound.SND_ASYNC)\n else:\n self.__tilegrid.set_tile_color(x, y, Color.FAIL_COLOR)",
"def handle_left_click(self,event):\n\n c = self.seqframe\n if 'textlabel' in c.gettags(CURRENT):\n self.show_item(event)\n elif 'comparison_seq' in c.gettags(CURRENT):\n self.show_sequence_label(event)\n else:\n self.start_selection(event)\n return",
"def _handle_left_click(self, pixel):\n position = self.pixel_to_position(pixel)\n index = self.position_to_index(position, self._grid_size)\n if self._board.check_pokemon(index):\n\n for index in self._board.get_pokemon_location():\n self._board.replace_character_at_index(index, POKEMON)\n self.draw_board(self._board)\n\n if self._board.check_loss():\n messagebox.showinfo(\"Game over\",\"You lose! Would you like to play again?\")\n raise SystemExit\n\n\n else:\n self._board.reveal_cells(self._grid_size, self._board.get_pokemon_location(), index)\n self.draw_board(self._board)\n\n if self._board.check_win():\n messagebox.showinfo(\"Game over\",\"You win!\")\n raise SystemExit",
"def _left_button_release_event(self, obj, event):\n #self.OnLeftButtonUp()\n pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()\n #selector = vtk.vtkVisibleCellSelector()\n\n self.picker_points.append((pixel_x, pixel_y))\n\n #print(self.picker_points)\n if len(self.picker_points) == 2:\n p1x, p1y = self.picker_points[0]\n p2x, p2y = self.picker_points[1]\n self.picker_points = []\n xmin = min(p1x, p2x)\n ymin = min(p1y, p2y)\n xmax = max(p1x, p2x)\n ymax = max(p1y, p2y)\n #print(self.picker_points)\n #print('_area_pick_left_button_release', cell_id)\n\n dx = abs(p1x - p2x)\n dy = abs(p1y - p2y)\n self.picker_points = []\n if dx > 0 and dy > 0:\n if self._pick_visible:\n self._pick_visible_ids(xmin, ymin, xmax, ymax)\n else:\n self._pick_depth_ids(xmin, ymin, xmax, ymax)\n self.parent.vtk_interactor.Render()\n self.picker_points = []",
"def pressAdjTiles(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n for adjTile in self.getAdjacentTiles(clickedTile.row, clickedTile.col):\n if not adjTile.isFlagged(): adjTile.buttonPress()",
"def _pressed(self, evt):\n x, y, widget = evt.x, evt.y, evt.widget\n item = widget.identify_row(y)\n column = widget.identify_column(x)\n\n if not column or not item in self._items:\n # clicked in the weekdays row or just outside the columns\n return\n\n item_values = widget.item(item)['values']\n if not len(item_values): # row is empty for this month\n return\n\n text = item_values[int(column[1]) - 1]\n if not text: # date is empty\n return\n\n bbox = widget.bbox(item, column)\n if not bbox: # calendar not visible yet\n return\n\n # update and then show selection\n text = '%02d' % text\n self._selection = (text, item, column)\n self._show_selection(text, bbox)",
"def onMoveLeft(self):\n self.mainGrid.moveLeft()",
"def handle_left_shift_click(self, event):\n #placeholder to prevent handle_left_click being called\n return",
"def onCellClicked(self, row, column):\n # Any cell click is always in column 0\n column = 0\n self.wparm = self.ui_FileList.cellWidget(row, column)\n if self.wparm is not None:\n if \"text\" in self.wparm.img:\n self.ui_SelectedName.setText(self.wparm.text)\n if self.show_dirs_only and \"folder\" in self.wparm.img:\n self.ui_SelectedName.setText(self.wparm.text)",
"def _left_button_press_event(self, obj, event):\n #print('area_picker - left_button_press_event')\n self.OnLeftButtonDown()\n pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()\n self.picker_points.append((pixel_x, pixel_y))",
"def ev_mousebuttondown(self, event):\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)",
"def on_mouse_click(self, event):\n if not self.is_game_over:\n try:\n # i, j coordinates of the click event\n i = int(round(event.ydata))\n j = int(round(event.xdata))\n\n # Left button\n if event.button == 1 or event.button == 2:\n self.reveal(i, j)\n\n # Right button\n elif event.button == 3:\n self.flag(i, j)\n\n except (TypeError, IndexError):\n pass",
"def left_mouse_down_handler(self, event):\r\n\r\n self.is_left_mouse_down = True\r\n if not self.is_game_over:\r\n self.update_reset_button()\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if tile is not None:\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)",
"def __on_click(self, evt):\n if evt.button() == Qt.LeftButton:\n return self._on_left_click(evt)\n if evt.button() == Qt.RightButton:\n return self._on_right_click(evt)",
"def handle_event(self, event):\n\n\t\tif event.type == QUIT:\n\t\t\treturn\n\n\t\tif event.type == KEYDOWN:\n\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t pygame.quit()\n\t\t\t return\n\n\t\tfor column in range(len(self.model.block_ranges)):\n\t\t\tfor row in range(len(self.model.block_ranges[column])):\n\t\t\t\tif cursor_position[0] in range(self.model.block_ranges[column][row][0][0],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.model.block_ranges[column][row][0][1]):\n\t\t\t\t\tif cursor_position[1] in range(self.model.block_ranges[column][row][1][0],\n\t\t\t\t\t\t\t\t\t\t\t\tself.model.block_ranges[column][row][1][1]):\n\t\t\t\t\t\tif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tclicked = self.model.blocks[column][row]\n\n\t\t\t\t\t\t\tif self.toggle[column][row] == 0:\n\t\t\t\t\t\t\t\tclicked.color= grey\n\t\t\t\t\t\t\t\tself.toggle[column][row] = 1\n\t\t\t\t\t\t\telse: #toggle[column][row] == 1:\n\t\t\t\t\t\t\t\tclicked.color= dark_grey\n\t\t\t\t\t\t\t\tself.toggle[column][row] = 0",
"def image_tree_button_press_event(self, treeview, event, data=None):\n #show pop-up menu\n if (event.button == 3): #right mouse click\n x = int(event.x)\n y = int(event.y)\n time = event.time\n pthinfo = treeview.get_path_at_pos(x, y)\n if pthinfo is not None:\n path, col, cellx, celly = pthinfo\n treeview.grab_focus()\n treeview.set_cursor(path, col, 0)\n self.image_tree_menu.popup(None, None, None, event.button, time)\n return 1",
"def LeftClick(self):\n self._PressLeftButton()\n self._ReleaseAllButtons()",
"def mouse_left_down(self):\n pass",
"def on_cell_clicked(self, modelIndex):\n self.catalogue_map.select([self.catalogue_model.event_at(modelIndex)])",
"def on_right_click(self, client, game) -> None:\n pass"
]
| [
"0.68467045",
"0.6845051",
"0.67083263",
"0.66543925",
"0.66423655",
"0.6475575",
"0.6378554",
"0.6321787",
"0.62958163",
"0.6274282",
"0.6253774",
"0.6249839",
"0.6205736",
"0.6193917",
"0.61531395",
"0.61405647",
"0.61154956",
"0.610005",
"0.6096469",
"0.6071234",
"0.6050459",
"0.600813",
"0.6003617",
"0.60005885",
"0.59989405",
"0.59759325",
"0.5966998",
"0.59625787",
"0.5956109",
"0.59549475"
]
| 0.70425814 | 0 |
This function handles the right click action on grid cell. | def handle_right_click(self):
if not self.game_in_progress:
return
if self.first_click:
self.first_click = False
self.timer.start(1000)
sender = self.sender()
row = 0
col = 0
for row in range(self.rows):
for col in range(self.cols):
if self.button_array[row][col] == sender:
break
else:
continue
break
# print 'Received right click:', row, ',', col
status = self.board.getcellstatus(row, col)
if status == CellStatus.Opened:
return
elif status == CellStatus.Closed:
self.remainingminecount = self.remainingminecount - 1
self.mines_lcd.display(str(self.remainingminecount))
self.board.setcellstatus(row, col, CellStatus.MarkedAsMine)
self.button_array[row][col].setIcon(QtGui.QIcon("icons/Flag.png"))
elif status == CellStatus.MarkedAsMine:
self.remainingminecount = self.remainingminecount + 1
self.mines_lcd.display(str(self.remainingminecount))
self.board.setcellstatus(row, col, CellStatus.MarkedAsSuspectedMine)
self.button_array[row][col].setIcon(QtGui.QIcon("icons/questionmark.png"))
elif status == CellStatus.MarkedAsSuspectedMine:
self.board.setcellstatus(row, col, CellStatus.Closed)
self.button_array[row][col].setIcon(QtGui.QIcon("icons/unopenedsquare.png")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _handle_right_click(self, pixel):\n position = self.pixel_to_position(pixel)\n index = self.position_to_index(position, self._grid_size)\n\n self._board.flag_cell(index)\n self.draw_board(self._board)",
"def OnLabelRightClick(self, evt):\n \n self.actRow = evt.Row\n self.actCol = evt.Col\n \n if evt.Row<0 and evt.Col>=0: #right click on column label\n\n menu = wx.Menu()\n \n miX = menu.Append(self.ID_popup_Column_SetX,\n \"Set this Column as X\",\n kind = wx.ITEM_CHECK)\n if self.Table.colhasmark(evt.Col, 'X'):\n miX.Check()\n\n miY1 = menu.Append(self.ID_popup_Column_SetY1,\n \"Set this Column as Y1\",\n kind = wx.ITEM_CHECK)\n if self.Table.colhasmark(evt.Col, 'Y1'):\n miY1.Check()\n\n miY2 = menu.Append(self.ID_popup_Column_SetY2,\n \"Set this Column as Y2\",\n kind = wx.ITEM_CHECK)\n if self.Table.colhasmark(evt.Col, 'Y2'):\n miY2.Check()\n\n miG = menu.Append(self.ID_popup_Column_SetG,\n \"Set this Column as Group By\",\n kind = wx.ITEM_CHECK)\n if self.Table.colhasmark(evt.Col, 'G'):\n miG.Check()\n\n if self.Table.colsel[evt.Col] in self.Table.dynamic_cols:\n menu.Append(self.ID_popup_Column_SetExpression,\n \"Set expression ...\")\n menu.Append(self.ID_popup_Column_Recalculate,\n \"Recalculate all values\")\n \n menu.Append(self.ID_popup_Select_Columns,\n \"Display Columns ...\")\n menu.Append(self.ID_popup_Set_Column_Label,\n \"Set Column Label ...\")\n\n self.PopupMenu(menu)\n menu.Destroy()\n \n\n elif evt.Col<0 and evt.Row>=0: #right click on row label\n menu = wx.Menu()\n \n miM = menu.Append(self.ID_popup_MaskRow,\n \"Mask Row\",\n kind = wx.ITEM_CHECK)\n if self.Table.rowmask[evt.Row]:\n miM.Check()\n\n if self.Table.GetValueNamed(evt.Row, 'Filename'):\n menu.Append(self.ID_popup_ReloadRow, 'Reload image')\n \n if self.IsSelection():\n menu.Append(self.ID_popup_MaskSelection, \"Mask Selection\")\n menu.Append(self.ID_popup_UnmaskSelection, \"Unmask Selection\")\n menu.Append(self.ID_popup_OmitSelection, \"Omit Selection\")\n\n self.actRowSelection = self.GetSelectedRows()\n \n self.PopupMenu(menu)\n menu.Destroy()\n \n evt.Skip()",
"def right_click(self, event):\n if self['text'] == '*' and self.text == '*': # player right-clicks again\n self.auto_expose()\n self.parentGrid.update_number(True)\n else: # first right-click\n self['text'] = '*'\n self['fg'] = 'black'\n self.parentGrid.update_number()",
"def right_click(self, *args):\n return _ida_hexrays.Hexrays_Hooks_right_click(self, *args)",
"def on_right_click(self, client, game) -> None:\n pass",
"def _right_click(self, event):\n\n position = event.x, event.y\n cell_position = self._game.grid.pixel_to_cell(position)\n\n removed_tower = self._game.remove(cell_position)\n self._coins += removed_tower.get_value() * 0.8\n\n #updates coins string var to display coins\n self._status_bar.set_coins(self._coins)\n\n #update availability for tower views\n for tower, view in self._tower_views:\n if self._coins < tower.get_value():\n view.set_available(False)\n else: \n view.set_available(True)",
"def mouse_right_down(self):\n pass",
"def _right_click(self, event):\n if self.disabled is False:\n self.menu.tk_popup(event.x_root, event.y_root)",
"def clickCell(self, row, col):\n self.clicked[row, col] = 1",
"def right_click(self, event):\n\n super().right_click(event)\n self.popup_menu.add_command(label=\"Edit..\", command=self.edit)\n\n self.popup_menu.tk_popup(event.x_root, event.y_root, 0)",
"def right_click(self, event):\n\n super().right_click(event)\n self.popup_menu.add_command(label=\"Edit..\", command=self.edit)\n\n self.popup_menu.tk_popup(event.x_root, event.y_root, 0)",
"def RightClick(self):\n self._PressRightButton()\n self._ReleaseAllButtons()",
"def image_tree_button_press_event(self, treeview, event, data=None):\n #show pop-up menu\n if (event.button == 3): #right mouse click\n x = int(event.x)\n y = int(event.y)\n time = event.time\n pthinfo = treeview.get_path_at_pos(x, y)\n if pthinfo is not None:\n path, col, cellx, celly = pthinfo\n treeview.grab_focus()\n treeview.set_cursor(path, col, 0)\n self.image_tree_menu.popup(None, None, None, event.button, time)\n return 1",
"def rightClick(self):\n cmdId = self.executeCommand(Command.CLICK, {'button': 2})\n return cmdId",
"def right_click(coords=(0, 0)):\n _perform_click_input(button='right', coords=coords)",
"def on_mouse_press(self, x, y, button, modifiers):\n\n # Change the x/y screen coordinates to grid coordinates\n column = int(x // (WIDTH + MARGIN))\n row = int(y // (HEIGHT + MARGIN))\n\n # print(f\"Click coordinates: ({x}, {y}). Grid coordinates: ({row}, {column})\")\n\n # Make sure we are on-grid. It is possible to click in the upper right\n # corner in the margin and go to a grid location that doesn't exist\n # AKA: make sure you are clicking w/in the grid - TH\n if row < ROW_COUNT and column < COLUMN_COUNT:\n # Flip the location between 1 and 0.\n # this will reset value for the recreate grid\n # and change the color - TH\n # if self.grid[row][column] == 0:\n # self.grid[row][column] = self.num_key\n # else:\n # self.grid[row][column] = 0\n self.current_selected = (row, column)\n\n self.recreate_grid()",
"def rightButtonDown(self):\n\t\tautopy.mouse.toggle(True,autopy.mouse.RIGHT_BUTTON)",
"def handle_right_click(self, event):\n c=self.seqframe\n if 'textlabel' in c.gettags(CURRENT):\n self.currobjs = c.find_withtag(CURRENT)\n return",
"def right_mouse_down_handler(self, event):\r\n\r\n self.is_right_mouse_down = True\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if not self.is_new_game and not self.is_game_over and tile is not None:\r\n if not self.is_left_mouse_down:\r\n change_in_unflagged_mines = tile.toggle_flag()\r\n self.mine_counter.update(change_in_unflagged_mines)\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)",
"def onRightClick(self, event): \n\t\tpt = event.GetPosition()\n\t\titem, flags = self.tree.HitTest(pt)\n\t\tif not item:\n\t\t\tLogging.info(\"No item to select\", kw = \"ui\")\n\t\t\treturn\n\t\tself.tree.SelectItem(item)\n\t\tself.selectedItem = item\n\t\tself.PopupMenu(self.menu, event.GetPosition())",
"def _param_right_click(self,event,param_name):\n self._update_dynamic_menu_entry(param_name)\n self.popup_menu.tk_popup(event.x_root,event.y_root)",
"def _right_click(self, event, widget):\n self._currently_selected_widget = widget\n\n # need an actual mechanism for populating the menu, rather than this!!\n ### copied from edit_PO_in_currently...\n param_name = None\n for name,representation in self.representations.items():\n if self._currently_selected_widget is representation['widget']:\n param_name=name\n break\n # CEBALERT: should have used get_parameter_value(param_name)?\n PO_to_edit = self._string2object(param_name,self._tkvars[param_name].get())\n ###\n\n if hasattr(PO_to_edit,'params'):\n self.menu.tk_popup(event.x_root, event.y_root)",
"def onRightDown(self, event):\n\n pass",
"def ev_mousebuttondown(self, event):\n if self.engine.game_map.in_bounds(*event.tile):\n if event.button == 1:\n return self.on_index_selected(*event.tile)\n return super().ev_mousebuttondown(event)",
"def _onCellClicked(self, widget):\n\n if not orca_state.activeScript:\n return\n\n fakeKeyPress = {}\n fakeKeyPress['command'] = brlapi.KEY_CMD_ROUTE\n fakeKeyPress['argument'] = self._position\n event = BrailleEvent(fakeKeyPress)\n orca_state.activeScript.processRoutingKey(event)",
"def cell_clicked(self,i,j):\n return lambda:self.cell_test(i,j)",
"def right_mouse_up_handler(self, event):\r\n\r\n self.is_right_mouse_down = False\r\n\r\n if self.is_left_mouse_down:\r\n self.shortcut_click(event)\r\n\r\n tile = self.board.get_event_tile(event.pos)\r\n if not self.is_game_over and tile is not None:\r\n self.board.update_tile_hover(tile, self.is_left_mouse_down, self.is_right_mouse_down)",
"def onCellClicked(self, row, column):\n # Any cell click is always in column 0\n column = 0\n self.wparm = self.ui_FileList.cellWidget(row, column)\n if self.wparm is not None:\n if \"text\" in self.wparm.img:\n self.ui_SelectedName.setText(self.wparm.text)\n if self.show_dirs_only and \"folder\" in self.wparm.img:\n self.ui_SelectedName.setText(self.wparm.text)",
"def __on_click(self, evt):\n if evt.button() == Qt.LeftButton:\n return self._on_left_click(evt)\n if evt.button() == Qt.RightButton:\n return self._on_right_click(evt)",
"def on_tableWidget_Trade_Args_cellClicked(self, row, column):\n # TODO: not implemented yet\n raise NotImplementedError"
]
| [
"0.7525203",
"0.6962284",
"0.6924462",
"0.6863856",
"0.68567437",
"0.6788664",
"0.6637235",
"0.66249645",
"0.65822953",
"0.65635735",
"0.65635735",
"0.65568894",
"0.65074617",
"0.64916843",
"0.6483928",
"0.6479727",
"0.6405769",
"0.6323705",
"0.6301398",
"0.6299117",
"0.62538546",
"0.6232644",
"0.6216099",
"0.6191822",
"0.61693937",
"0.61680925",
"0.6166735",
"0.6146281",
"0.6117477",
"0.60993946"
]
| 0.7056489 | 1 |
This function displays help about game This function will pop up message box to user | def game_help(self):
QtGui.QMessageBox.about(self, "How to Play game",
"<b>How to Play</b><br>"
"The rules in Minesweeper are simple:<br><br>"
"<b>1.</b> Uncover a mine and that's end of game <br>"
"<b>2.</b> Uncover empty cell and "
"it opens surrounding empty cells too<br>"
"<b>3.</b> Uncover a number "
"and it tells you how many mines are hidden in"
"surrounding 8 cells.<br>"
"<b>4.</b> Use this information to "
"deduce which squares are safe to click.<br>"
"<b>5.</b> Uncover all cells and "
"mark cells with mine to win the game <br><br>"
"<b>Hints</b> <br>"
"<b>1.Mark as Mine </b> <br>"
" If you suspect that cell as mine, "
"right click twice to put a question mark.<br>"
"<b>2.Study surrounding cells </b><br>"
" Study all neighbour cells before opening any cell"
"to make sure whether its mine or not.<br><br>"
"Enjoy the game :) <br>") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_help():\n messagebox.showinfo(title='How to Use', message=\"It's really easy.\")",
"def help():\n print \"Help comes to those who ask\"",
"def helpHelp(self):\r\n QtGui.QMessageBox.about(self, \"Help me!\",\"\"\"\r\n <p> Program sucks and you need help?\r\n <p>Email: \r\n <p><b>[email protected]</b>\r\n <p>Or visit him in Room 230U!\r\n \"\"\")",
"def displayHelpMessage(self):\n if self.dialogBox == None:\n if len(self.help) > 0:\n message = self.help.pop()\n if 'SCANNING RESEARCH' in message:\n color = ['cyan']\n elif 'SCANNING INDUSTRY' in message:\n color = ['orange']\n elif 'SCANNING MILITARY' in message:\n color = ['red']\n self.createDialogBox(x=-0.1,y=0.7,texts=[message],textColors=color)",
"def helpMsg():\n for i in range(max_y):\n screen.addch(\"=\")\n screen.addstr(\"\\n\\n\")\n screen.addstr(\"Start the server type: !start\\n\")\n screen.addstr(\"Stop the server: !stop\\n\")\n screen.addstr(\"Send message to all rooms: !broadcast <msg>\\n\")\n screen.addstr(\"Reload the server: !reload\\n\")\n screen.addstr(\"Display this message: !help\\n\")\n screen.addstr(\"\\n\")\n \n \n screen.refresh()\n for i in range(max_y):\n screen.addch(\"=\")\n \n screen.refresh()\n screen.addstr(\"\\n\\n\")",
"def help_general(game):\n game.window.clear()\n\n game.window.addstr(1, 1, \"Use vim keys or arrows for movement\")\n\n game.window.addstr(12, 1, \"move towards enemies to attack them\")\n\n game.window.addstr(14, 1, \"press e to enter inventory view\")\n game.window.addstr(15, 1, \"press ? for help\")\n game.window.addstr(15, 1, \"press q to quit\")\n\n game.window.getch()",
"def help(self):\n print(\"GAME HELP\")\n print(\"Command\\t\\t\\t\\tDescription\\n\")\n print(\"{0} <{1}>:\\t\\t\\t{2}\".format(GO, \"direction\", \"Move through an exit.\"))\n print(\"{0} <{1}>:\\t\\t{2}\".format(GO, \"exit description\", \"Move through an exit.\"))\n print(\"{0} <{1}>:\\t\\t\\t{2}\".format(TAKE, \"item\", \"Take an item.\"))\n print(\"{0} <{1}>:\\t\\t\\t{2}\".format(DROP, \"item\", \"Drop an item.\"))\n print(\"{0} <{1}>:\\t\\t{2}\".format(TALK, \"character\", \"Talk to a character.\"))\n print(\"{0}:\\t\\t\\t\\t{1}\".format(LOOK, \"Print the current space description again.\"))\n print(\"{0}:\\t\\t\\t{1}\".format(SAVEGAME, \"Save your current game.\"))\n print(\"{0}:\\t\\t\\t\\t{1}\".format(QUIT, \"Quit the game.\"))\n print(\"{0} <{1}>:\\t\\t\\t{2}\".format(LOOK_AT, \"item\", \"Look more closely at an item.\"))\n print(\"{0}:\\t\\t\\t\\t{1}\".format(LISTEN, \"Listen more closely to the sounds around you.\"))\n print(\"{0} <{1}>:\\t\\t\\t{2}\".format(PULL, \"item\", \"Pull an item.\"))\n print(\"{0} <{1}>:\\t\\t\\t{2}\".format(PUSH, \"item\", \"Push an item.\"))\n print(\"{0}:\\t\\t\\t{1}\".format(CHARGE, \"Charge your batteries in a charger.\"))\n print(\"{0} <{1}>:\\t\\t\\t{2}\".format(USE, \"item\", \"Use an item you are carrying.\"))\n print(\"{0}:\\t\\t\\t\\t{1}\".format(WAIT, \"Wait for something to happen.\"))\n print(\"{0}:\\t\\t\\t\\t{1}\".format(HELP, \"Print this help message.\"))\n print(\"{0}:\\t\\t\\t{1}\".format(INVENTORY, \"Print the items you are currently carrying.\"))\n print(\"{0}:\\t\\t\\t{1}\".format(LOADGAME, \"Load a previously saved game.\"))",
"def OnHelp(self, event):\r\n d = wx.MessageDialog(self, \"... ... ... ... ... ... ... ... ...\", \"No help for you!\", wx.OK)\r\n d.ShowModal()\r\n d.Destroy()",
"def help_menu():\n print('\\n##################################################')\n print('################ Help Menu ###############') \n print('##################################################')\n print(' Type move or examine for each turn') \n print(' If moving, type up, down, left, or right')\n print(' If examining, you may need to answer yes or no')\n print('##################################################\\n')\n title_screen_selections()",
"def help():\n print(UI.HELP)",
"def about(self):\n QtGui.QMessageBox.about(self, \"About Menu\",\n \"MineSweeper 1.0 \\n\"\n \"This is python implementation of famous Minesweeper Game \\n\\n\"\n \"For Source code, check following link:\\n\"\n \"https://github.com/maulik-vaghela/MineSweeper\\n\\n\"\n \"Enjoy the game :) \\n\")",
"def help(self):\n self.openCam.close()\n msgbox = QtWidgets.QMessageBox()\n msgbox.setWindowTitle(\"Help !!\")\n msgbox.setText(\n \"Moildev-Apps\\n\\n\"\n \"Moildev-Apps is software to process fisheye \"\n \"image with the result panorama view and Anypoint\"\n \" view. \\n\\nThe panoramic view may present a horizontal\"\n \"view in a specific immersed environment to meet the\"\n \"common human visual perception, while the Anypoint\"\n \"view is an image that has been undistorted in a certain\"\n \"area according to the input coordinates.\"\n \"\\n\\nMore reference about Moildev, contact us\\n\\n\")\n msgbox.setIconPixmap(QtGui.QPixmap('images/moildev.png'))\n msgbox.exec()",
"def display_help_screen():\r\n\tsys.exit(0)",
"def print_help():\n\tprint(\"Help text\")",
"def help_command(update, context):\n update.message.reply_text('Let me help you. \\r\\n /help print this help \\r\\n /safety prints safety instructions \\r\\n /play start the game\\r\\n /joingroup Join CTF tg group')",
"def show_general_help(self):\n QMessageBox.question(self, 'General help', get_general_help(), QMessageBox.Ok | QMessageBox.NoButton)",
"def aboutmenu(self):\n tkMessageBox.showinfo(\"About This Program\", \"The project of PSIT subject in 2014.\\nThis program is unit converter program.\")",
"def help():",
"def welcome():\r\n e.msgbox(\"So, you want to run a marathon? No problem! Just follow the \"\r\n \"instructions to generate a perfect training plan. You \"\r\n \"may click 'cancel' in any entry box to exit the program.\")",
"def show_about():\n messagebox.showinfo(\n title='About', message=\"PyLNP - Lazy Newb Pack Python Edition\\n\\n\"\n \"Port by Pidgeot\\n\\nOriginal program: LucasUP, TolyK/aTolyK\")",
"def draw_help(self):\n gameDisplay.fill((50, 50, 50))\n font1 = pygame.font.SysFont(\"courier\", 24)\n font2 = pygame.font.SysFont(\"serif\", 24)\n data = []\n data.append([\"F1\", \"Show Help\"])\n data.append([\"R\", \"Restart\"])\n data.append([\"P\", \"Pause/Play\"])\n data.append([\"Num+\", \"More points\"])\n data.append([\"Num-\", \"Less points\"])\n data.append([\"\", \"\"])\n data.append([str(steps), \"Current points\"])\n\n pygame.draw.lines(gameDisplay, (255, 50, 50, 255), True, [\n (0, 0), (800, 0), (800, 600), (0, 600)], 5)\n for i, text in enumerate(data):\n gameDisplay.blit(font1.render(\n text[0], True, (128, 128, 255)), (100, 100 + 30 * i))\n gameDisplay.blit(font2.render(\n text[1], True, (128, 128, 255)), (200, 100 + 30 * i))",
"def show_help(self):\n\n message = QMessageBox()\n message.setWindowTitle(\"Help\")\n message.setMinimumHeight(1000)\n message.setMinimumWidth(1000)\n\n message.setText(\"1) How to annotate?\\n\"\n \"Move the mouse up and down inside the doted rectangle.\\n\\n\\n\"\n\n \"2) Why is 'wide mode' inactivated?\\n\"\n \"Wide mode and record mode are not allowed to work together.\\n\"\n \"Make sure to exit record mode to access wide mode. \\n\\n\\n\"\n\n \"3) Mouse shortcuts (outside the diagram widget):\\n\\n\"\n \"\\t Right click\\tPlay/pause\\n\"\n \"\\t Scroll\\t\\tFast forward/ backward\\n\"\n \"\\t Dubble click\\tSave\\n\"\n \"\\t Wheel click\\tToggle record mode\\n\\n\\n\"\n\n \"4) Keyboard shortcuts:\\n\\n\"\n \"\\t CTRL+S\\t\\tSave\\n\"\n \"\\t CTRL+O\\t\\tOpen video\\n\"\n \"\\t CTRL+I\\t\\tOpen annotation\\n\"\n \"\\t CTRL+N\\t\\tNew file\\n\"\n \"\\t CTRL+C\\t\\tClear annotation\\n\"\n \"\\t CTRL+Q\\t\\tQuit\\n\"\n \"\\t CTRL+H\\t\\tHelp\\n\\n\"\n \"\\t S\\t\\tPlay/ stop\\n\"\n \"\\t Z\\t\\tFast bakward 50 ms\\n\"\n \"\\t C\\t\\tFast forward 50 ms\\n\"\n \"\\t A\\t\\tFast bakward 200 ms\\n\"\n \"\\t D\\t\\tFast forward 200 ms\\n\"\n \"\\t Q\\t\\tFast bakward 5 s\\n\"\n \"\\t E\\t\\tFast forward 5 s\\n\"\n \"\\t R\\t\\tToggle record mode\\n\\n\"\n \"\\t 1\\t\\tPlayback rate: 0.5\\n\"\n \"\\t 2\\t\\tPlayback rate: 0.75\\n\"\n \"\\t 3\\t\\tPlayback rate: 1\\n\"\n \"\\t 4\\t\\tPlayback rate: 1.25\\n\"\n \"\\t 5\\t\\tPlayback rate: 1.5\\n\"\n \"\\t 6\\t\\tPlayback rate: 1.75\\n\")\n\n x = message.exec_() # this will show our messagebox",
"def help(self, update, context):\n\n message = \"Do you need help \\n Help menu shows here🤞\"\n update.message.reply_text(message)",
"def OnAbout(self, event):\r\n d = wx.MessageDialog(self, \"This program was created by REAL PANDAS (Adam Sorrin '10, Lauren DiCristofaro '10, Norris Xu '11, Mark Broomfield '11, Sally Tao '10\", \"About\", wx.OK)\r\n d.ShowModal()\r\n d.Destroy()",
"def Help(self, event):\n Help(self)",
"def help():\n \n pass",
"def help(self, msg, status, desc):\n\n msg.Chat.SendMessage(HELP_TEXT)",
"def show_about():\r\n\tmsg = messagebox\r\n\tmsg.showinfo(\"\", '''Creator: Ellis, Kevin\r\nOrganization: n/a\r\nDescription: Retrieve the network information from a database\r\nDate: 2020208\r\nVersion: 1.4''')",
"def help(update, context):\n update.message.reply_text(\"Ayudame!\")",
"def _show_help(self):\n QMessageBox.information(\n self, 'Help',\n \"Help:\\n\"\n \"'+'/'-': zoom\\nleft/right arrow: left/right\\n\"\n \"up/down arrow: superior/inferior\\n\"\n \"left angle bracket/right angle bracket: anterior/posterior\")"
]
| [
"0.789727",
"0.7568061",
"0.7517027",
"0.7386369",
"0.7310226",
"0.7303696",
"0.72714776",
"0.71972084",
"0.71936107",
"0.71525615",
"0.7152087",
"0.7150587",
"0.7049491",
"0.7043867",
"0.7041325",
"0.7036719",
"0.70153403",
"0.70116055",
"0.69836533",
"0.69385743",
"0.6901193",
"0.6889994",
"0.6880487",
"0.6869124",
"0.68646634",
"0.68416953",
"0.6832415",
"0.6826309",
"0.6816433",
"0.67922753"
]
| 0.87623113 | 0 |
This function helps in changing game level When user clicks on change game level from File menu this function will change height and width of grid. | def change_game_level(self, change_level):
global CURRENT_GAME_LEVEL
file_object = open("Level.txt", "w")
file_object.write(str(change_level))
file_object.close()
CURRENT_GAME_LEVEL = change_level
if change_level == DifficultyLevel.BeginnerLevel:
grid_length = GridSize.BeginnerLength
grid_width = GridSize.BeginnerWidth
minecount = 10
elif change_level == DifficultyLevel.IntermediateLevel:
grid_length = GridSize.IntermediateLength
grid_width = GridSize.IntermediateWidth
minecount = 40
elif change_level == DifficultyLevel.ExpertLevel:
grid_length = GridSize.ExpertLength
grid_width = GridSize.ExpertWidth
minecount = 99
self.close()
self.__init__(grid_length, grid_width, minecount) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setupLevel(self):\n\n self.state = GameState.SETUP\n\n # vado a leggere il dizionario corrispondente\n # al numero di livello corrente facendo in modo\n # che se il numero di livello richiesto non esiste\n # carico quello più vicino a quello richiesto\n if self.levelIndex>= len(levels):\n self.levelIndex = len(levels) -1\n elif self.levelIndex <0:\n self.levelIndex = 0\n\n level = levels[self.levelIndex]\n\n # nome del livello\n self.level_name = level.get(\"name\", \"Livello %s\" % (self.levelIndex+1))\n\n # dimensione del labirinto (numero di righe e di colonne)\n self.nrows = level.get(\"nrows\", 20)\n self.ncols = level.get(\"ncols\", 20)\n\n # l'algoritmo di generazione del labirinto supporta solo un numero di\n # righe e di colonne dispari, quindi approssimiamo le dimensioni ai\n # valori dispari più vicini\n if self.nrows % 2 == 0:\n self.nrows+=1\n if self.ncols % 2 == 0:\n self.ncols+=1\n\n\n # fattore di scala del labirinto\n # attenzione che, fattori di scala molto\n # grandi, rallentano le prestazioni di gioco\n self.scale = level.get(\"scale\", 30)\n\n background_image_filename = level.get(\"background_image\", None)\n if background_image_filename!=None:\n self.background_image = pygame.image.load(background_image_filename).convert()\n else:\n self.background_image = None\n\n # parametri usati dall'algoritmo di generazione del labirinto\n # si veda https://en.wikipedia.org/wiki/Maze_generation_algorithm\n self.maze_density = level.get(\"maze_density\", Game.MAZE_DENSITY)\n self.maze_complexity = level.get(\"maze_complexity\", Game.MAZE_COMPLEXITY)\n\n # colore delle monete\n self.coin_color = level.get(\"coin_color\", Game.YELLOW)\n\n # tempo a disposizione per completare il livello\n self.time = level.get(\"time\", 240)\n self.clockTime = level.get(\"clock\", 80)\n\n # numero di nemici\n self.numEnemies = level.get(\"num_enemies\", 0)\n\n # numero di ricaricatori temporali\n self.numTimeReloaders = level.get(\"time_reloaders\", 0)\n\n # numero di bombe \"distruggi monete\"\n self.bonus_bombs = level.get(\"bombs\", [])\n # numero di bombe \"distruggi muri\"\n self.bonus_wall_bombs = level.get(\"wall_bombs\", [])\n # numero di bombe \"distruggi nemici\"\n self.bonus_enemy_killers = level.get(\"enemy_killers\", [])\n # numero di pizze che rendono i nemici golosi di monete\n self.bonus_greedy_enemies = level.get(\"greedy_enemies\", 0)\n # numero di portali (teletrasporto del giocatore)\n self.bonus_portals = level.get(\"portals\", 0)\n\n # proiettili a disposizione del giocatore per un certo periodo di tempo\n self.bonus_player_bullets = level.get(\"player_bullets\", [])\n\n #numero di bonus che rendono il giocatore invisibile per un certo periodo di tempo\n self.bonus_invisibility_players = level.get(\"invisibility_players\", [])\n\n # numero di shooters (nemici che sparano contro il giocatore)\n self.numShooters = level.get(\"num_shooters\" , [])\n\n\n # suoni di collisione\n self.sound_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n self.sound_bomb_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/bombexplosion.ogg\")\n\n\n # suono della moneta raccolta\n #self.sound_coin = pygame.mixer.Sound(\"Effects/SFX/beep_7.wav\")\n self.sound_coin = pygame.mixer.Sound(\"Effects/jute-dh/gold.wav\")\n\n # suono del timeReloader\n self.sound_time_reloader = pygame.mixer.Sound(\"Effects/SFX/echo_5.wav\")\n\n # suono di collisione con enemy killer\n self.sound_enemy_killer = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n\n # suono dell'invisibility player\n self.sound_invisibility_player = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono del teletrasporto\n self.sound_portal = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono dell'arma presa e del proiettile sparato\n self.sound_weapon = pygame.mixer.Sound(\"Effects/jute-dh/hit_2m.wav\")\n\n # suono dei greedy enemies\n self.sound_greedy_enemies = pygame.mixer.Sound(\"Effects/sound_effects/squeak2.wav\")\n\n # suono del levello completato\n self.sound_completed_level = pygame.mixer.Sound(\"Effects/sound_effects/level_completed.wav\")\n\n #\n # IMMAGINI DEGLI SPRITE DI GIOCO: CONFIGURABILE DA FILE DI CONFIGURAZIONE!!\n #\n\n # immagine delle pareti del labirinto\n self.wall_filename = level.get(\"wall\", \"Backgrounds/Dim/Boards.jpg\")\n\n # immagine dei nemici del labirinto\n self.enemies_filename = level.get(\"enemies\", \"Sprites/Animals/duck.png\")\n\n # immagine dei nemici del labirinto che possono anche sparare\n # di default gli shooters hanno lo stesso aspetto dei nemici normali\n self.shooters_filename = level.get(\"shooters\", self.enemies_filename)\n\n # immagine della bomba distruggi monete\n self.bomb_filename = level.get(\"bomb\", \"Sprites/bomb_bonus.png\")\n # immagine della bomba distruggi muri\n self.wall_bomb_filename = level.get(\"wall_bomb\", \"Sprites/bomb_wall_bonus.png\")\n\n self.time_reloaders_filename = level.get(\"time_reloader\", \"Sprites/clessidra.png\")\n self.enemy_killers_filename = level.get(\"enemy_killer\", \"Sprites/skull2.png\")\n self.greedy_enemies_filename = level.get(\"greedy_enemy\", \"Sprites/pizza.png\")\n self.portals_filename = level.get(\"portal\", \"Sprites/CrawlStone/portal.png\")\n self.invisibility_players_filename = level.get(\"invisibility_player\", \"Sprites/CrawlStone/wizard_hat_2.png\")\n\n # lo sprite che fornisce i proiettili ha la stessa immagine dei proiettili\n self.player_bullets_filename = level.get(\"player_bullet\", \"Sprites/CrawlStone/apple.png\")\n self.bonus_player_bullets_filename = self.player_bullets_filename\n\n self.shooters_bullets_filename = level.get(\"shooter_bullet\", \"Sprites/CrawlStone/apple.png\")\n\n #\n # GRUPPI DI SPRITES\n #\n\n # i muri del mio labirinto\n self.walls = pygame.sprite.Group()\n\n # i nemici\n self.enemies = pygame.sprite.Group()\n\n # i nemici che sparano fanno parte dello stesso gruppo dei nemici!\n #self.shooters = pygame.sprite.Group()\n\n # le bombe\n self.bombs = pygame.sprite.Group()\n\n # gli attivatori/disattivatori di nemici golosi\n self.greedyEnemies = pygame.sprite.Group()\n\n # le bombe che spaccano i muri\n self.wallBombs = pygame.sprite.Group()\n\n # i ricaritori temporali\n self.timeReloaders = pygame.sprite.Group()\n\n # le monete da raccogliere\n self.coins = pygame.sprite.Group()\n\n # i killer dei nemici\n self.enemyKillers = pygame.sprite.Group()\n\n # i portali per spostarsi in nuove aree\n self.portals = pygame.sprite.Group()\n\n # i nemici che rendono invisibile il giocatore\n self.invisibilityPlayers = pygame.sprite.Group()\n\n # i proiettili sparati dal giocatore\n self.playerBullets = pygame.sprite.Group()\n\n # i proiettili sparati dagli shooters\n self.shooterBullets = pygame.sprite.Group()\n\n # il bonus che fornisce proiettili sparati dal giocatore\n self.bonusPlayerBullets = pygame.sprite.Group()\n\n\n self.free_locations = []\n\n # genero il labirinto che prescinde dai fattori di scala\n self.maze = self.generate_maze()\n #print(self.maze)\n\n # il giocatore e i nemici hanno una dimensione che dipende dal fattore di scala\n self.player = pygame.sprite.GroupSingle(Player(int(self.scale * 0.8), int(self.scale * 0.8),\n self.scale, 1,\n \"Sprites/pac-classic/ghost-red-front.png\",\n )\n )\n self.player.sprite.setWalls(self.walls)\n # imposto le immagini del giocatore sulla base della posizione\n # l'ordine è UP, DOWN , RIGHT, LEFT\n\n self.player.sprite.setImages([\n [\"Sprites/pac-classic/ghost-red-rear.png\",\n \"Sprites/pac-classic/ghost-red-front.png\",\n \"Sprites/pac-classic/ghost-red-right.png\",\n \"Sprites/pac-classic/ghost-red-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-orange-rear.png\",\n \"Sprites/pac-classic/ghost-orange-front.png\",\n \"Sprites/pac-classic/ghost-orange-right.png\",\n \"Sprites/pac-classic/ghost-orange-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-lblue-rear.png\",\n \"Sprites/pac-classic/ghost-lblue-front.png\",\n \"Sprites/pac-classic/ghost-lblue-right.png\",\n \"Sprites/pac-classic/ghost-lblue-left.png\",\n ],\n\n ]\n )\n\n\n\n\n #\n # CREAZIONE DEGLI SPRITES\n #\n\n # CREO I MIEI NEMICI\n self.createEnemies(self.numEnemies,self.enemies_filename,self.enemies)\n\n # CREO I MIEI NEMICI CHE SPARANO che aggiungo allo stesso gruppo dei nemici!\n self.createShooters(self.numShooters, self.shooters_filename, self.shooters_bullets_filename,self.shooterBullets,\n self.sound_weapon, self.enemies)\n\n # CREO LE BOMBE che sono ObjectDestroyer che distruggono le monete\n self.createObjectDestroyers(self.bonus_bombs,self.bomb_filename,self.bombs, self.coins)\n\n\n # CREO LE WALL BOMBS che sono WallDestroyer che consentono di distruggere i muri\n # interni del labirinto\n self.createInnerObjectDestroyers(self.ncols, self.nrows,self.bonus_wall_bombs,\n self.wall_bomb_filename,self.wallBombs,self.walls)\n # CREO GLI ENEMY KILLERS che sono ObjectDestroyer che consentono di eliminare i nemici\n self.createObjectDestroyers(self.bonus_enemy_killers, self.enemy_killers_filename, self.enemyKillers, self.enemies)\n\n # Creo GREEDY_ENEMIES come ENEMY che consentono di rendere, alternativamente, i nemici golosi di monete oppure no\n self.createEnemies(self.bonus_greedy_enemies, self.greedy_enemies_filename, self.greedyEnemies)\n\n # Alternativamente potrei creare GREED ENEMIES come ObjectDestroyer che in realtà non distruggono niente, ma rendono \"golosi\"\n # i nemici che stanno intorno a loro in modo che inizino a mangiare monete. Se stanno già mangiando\n # monete, al contrario, dovrebbero smettere. CHIEDERLO COME ESERCIZIO\n\n # CREO I TIME RELOADERS che consentono di ripristinare il tempo\n self.createEnemies(self.numTimeReloaders, self.time_reloaders_filename, self.timeReloaders)\n\n # CREO I PORTALI che consentono di trasferirsi in una nuova locazione random\n self.createEnemies(self.bonus_portals, self.portals_filename, self.portals)\n\n # CREO I TIME LIMITED POWERS, come quello che rende invisibile il giocatore\n self.createTimeLimitedPowers(self.bonus_invisibility_players, self.invisibility_players_filename, self.invisibilityPlayers)\n # e come il ricaricatore di proiettili\n self.createTimeLimitedPowers(self.bonus_player_bullets, self.bonus_player_bullets_filename, self.bonusPlayerBullets)\n\n self.mazeSurf = pygame.Surface((self.ncols * self.scale, self.nrows * self.scale))\n # disegno il labirinto coi suoi muri\n self.drawMaze()\n\n self.scrollSurface = self.mazeSurf.copy()\n #self.scrollSurface.fill((0, 0, 0))\n\n pos = random.choice(self.free_locations)\n print(\"Loc Player:%s\" % str(pos))\n\n self.player.sprite.setPosition(pos)\n\n # imposto posizione e movimento iniziale\n # ai vari gruppi di sprites\n\n self.setInitialPosition(self.enemies.sprites())\n self.setInitialPosition(self.bombs.sprites())\n self.setInitialPosition(self.wallBombs.sprites())\n self.setInitialPosition(self.timeReloaders.sprites())\n self.setInitialPosition(self.enemyKillers.sprites())\n self.setInitialPosition(self.greedyEnemies.sprites())\n self.setInitialPosition(self.portals.sprites())\n self.setInitialPosition(self.invisibilityPlayers.sprites())\n self.setInitialPosition(self.bonusPlayerBullets.sprites())\n\n #self.setInitialPosition(self.shooters.sprites())\n\n # normalmente i nemici non mangiano monete...\n self.enemies_eater = False\n\n\n # a inizio livello si dà tempo di 5 secondi al Giocatore per divincolarsi\n # da eventuali nemici che compaiono negli immediati dintorni\n # della posizione (casuale) in cui si viene a trovare\n # il giocatore a inizio livello\n self.player.sprite.addPower(PlayerPowers.INVISIBILITY, (self.time,5))\n\n # imposto la musica del livello e la mando in esecuzione\n self.music = level.get(\"music\", \"./Music/Soundimage/Techno-Gameplay_Looping.ogg\")\n pygame.mixer.music.load(self.music)\n # mando in esecuzione in modalità loop (valore -1)\n pygame.mixer.music.play(-1)\n\n # barra di stato del gioco con informazioni sul punteggio\n self.setupGamebarSurface()",
"def change_level(self):\n new_level = GameLevel[self.scoreboard.current_level]\n self.greeterboard.reset(level=new_level, msg='')\n self.end_game(i18n.OUT_MSG_NEW_GAME)\n self.init_game_metrics()",
"def get_grid_size(game_level):\n grid_length = 0\n grid_width = 0\n minecount = 0\n if game_level == DifficultyLevel.BeginnerLevel:\n grid_length = GridSize.BeginnerLength\n grid_width = GridSize.BeginnerWidth\n minecount = 10\n\n elif game_level == DifficultyLevel.IntermediateLevel:\n grid_length = GridSize.IntermediateLength\n grid_width = GridSize.IntermediateWidth\n minecount = 40\n\n elif game_level == DifficultyLevel.ExpertLevel:\n grid_length = GridSize.ExpertLength\n grid_width = GridSize.ExpertWidth\n minecount = 99\n\n return (grid_length, grid_width, minecount)",
"def prep_level(self):\n\t\tself.level_image = self.font.render(\"Level: \" + str(self.stats.level), True, self.text_color,self.ai_settings.bg_color)\n\n\t\t#Display the score 10 pixels below the scoreboard.\n\t\tself.level_rect = self.level_image.get_rect()\n\t\tself.level_rect.right = self.score_rect.right\n\t\tself.level_rect.top = self.score_rect.bottom + 10",
"def main():\n global CURRENT_GAME_LEVEL\n app = QtGui.QApplication(sys.argv)\n\n file_existence = os.path.exists(\"Level.txt\")\n\n # If file exist read level from file to restore previous level.\n if file_existence is True:\n file_object = open(\"Level.txt\", \"r\")\n level = int(file_object.read())\n file_object.close()\n # If file doesn't exist, assume default level as beginner and start game.\n else:\n file_object = open(\"Level.txt\", \"w\")\n level = DifficultyLevel.BeginnerLevel\n file_object.write(str(level))\n file_object.close()\n\n # save current game level in global which can be used by others.\n CURRENT_GAME_LEVEL = level\n (length, width, minecount) = get_grid_size(level)\n\n GameUI(length, width, minecount)\n sys.exit(app.exec_())",
"def options(self):\n opt = self.main_window.toplevel()\n cur_l = tkinter.Scale(opt, length=200, label=\"Number of lines:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_rows)\n cur_l.set(self.game.n_row) # initial position of the cursor\n cur_l.pack()\n cur_h = tkinter.Scale(opt, length=200, label=\"Number of columns:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_cols)\n cur_h.set(self.game.n_col)\n cur_h.pack()",
"def updateScreenTiling(self,level):\n\n self.tile_list=[]\n self.objList=[]\n self.level=level\n\n self.rowCount=0\n \n for row in worldData[self.level]:\n self.colCount=0\n for tile in row:\n if tile!=0:\n img11=self.tilType[tile-1]\n img=pygame.transform.scale(img11,(self.tileSize,self.tileSize))\n img_rect = img.get_rect()\n img_rect.x = self.colCount * self.tileSize\n img_rect.y = self.rowCount * self.tileSize\n tile= (img, img_rect)\n self.tile_list.append(tile)\n self.colCount+=1\n self.rowCount+=1\n \n self.rowCount=0\n for row in objectData[self.level]:\n self.colCount=0\n for tile in row:\n if tile!=0:\n img11=self.objType[tile-1]\n img=pygame.transform.scale(img11,(self.tileSize,self.tileSize))\n img_rect = img.get_rect()\n img_rect.x = self.colCount * self.tileSize\n img_rect.y = self.rowCount * self.tileSize\n tile= (img, img_rect)\n self.objList.append(tile)\n self.colCount+=1\n self.rowCount+=1",
"def update_score_and_level(self, board):\n # level\n self.stdscr.addstr(\n 5+BORDER_WIDTH,\n PREVIEW_COLUMN*BLOCK_WIDTH-2+BORDER_WIDTH,\n 'LEVEL: %d' % board.level,\n curses.color_pair(7)\n )\n # score\n self.stdscr.addstr(\n 6+BORDER_WIDTH,\n PREVIEW_COLUMN*BLOCK_WIDTH-2+BORDER_WIDTH,\n 'SCORE: %d' % board.score,\n curses.color_pair(7)\n )",
"def grid_levels(self, nlevels):\n for j in np.arange(nlevels):\n if j in self.gridded['levels'].keys():\n continue\n self.gridded['levels'][j] = self.grid_param(self.levels[j])\n self.jmax = max(self.gridded['levels'].keys())\n if self.verbose:\n print('Gridded the first %d energy levels.' % (self.jmax))\n print('Use self.grid_levels() to read in more.\\n')\n return",
"def resizeGL(self,Width,Height):\n return",
"def grid_levels(self, nlevels):\n for j in np.arange(nlevels):\n if j in self.gridded['levels'].keys():\n continue\n self.gridded['levels'][j] = self.grid_param(self.levels[j],\n self.method)\n self.jmax = max(self.gridded['levels'].keys())\n if self.verbose:\n print('Gridded the first %d energy levels.' % (self.jmax))\n print('Use self.grid_levels() to read in more.\\n')\n return",
"def load_level(level):\n\n global spawn_boxes\n\n level = pytmx.load_pygame('maps/level_' + level + '.tmx')\n\n y_num = 0\n for x, y, gid in level.get_layer_by_name('Objects'):\n if level.get_tile_image_by_gid(gid) != None:\n matrix[y_num].append(1)\n else:\n matrix[y_num].append(0)\n \n if x == 19: y_num += 1\n\n spawn_boxes = [] # Areas in which enemies can spawn. Requires tiled type 'spawn_box'\n for obj in level.get_layer_by_name('Triggers'):\n if obj.type == 'spawn_box':\n rect = pygame.rect.Rect(obj.x, obj.y, obj.width, obj.height)\n if obj.name == 'north': \n rect = rect.move(0, -64)\n rect.height += 64\n if obj.name == 'east': \n rect = rect.move(64, 0)\n rect.width += 64\n if obj.name == 'south': \n rect = rect.move(0, 64)\n rect.height += 64\n if obj.name == 'west': \n rect = rect.move(-64, 0)\n rect.width += 64\n spawn_boxes.append(rect)\n\n return level",
"def mainmenu(self):\n l=[2,4,8,16,32,64,0,0,0,0,0]\n try:\n pickle_in = open('.\\\\2048.txt','rb')\n a = pickle.load(pickle_in)\n GameBoard.highScore = a[4,3]\n pickle_in.close()\n except:\n GameBoard.highScore = 0\n board=GameBoard(self.Display)\n board.restore(array([choice(l) for i in range(16)]+[0,0,0,GameBoard.highScore]).reshape((5,4)))\n board.display()\n for i in range(100,85,-1):\n font=pygame.font.SysFont('comicsansms', i)\n _2048=font.render('2048',True,[i*2,i*2,i*2])\n self.Display.blit(_2048,(205-_2048.get_width()//2,100-_2048.get_height()//2))\n pygame.display.update()\n font=pygame.font.SysFont('comicsansms', 85)\n _2048=font.render('2048',True,[106,106,150])\n self.Display.blit(_2048,(205-_2048.get_width()//2,100-_2048.get_height()//2))\n pygame.draw.rect(self.Display,[200,97,48],(60,140,290,70))\n pygame.draw.rect(self.Display,[200,97,48],(60,230,290,70))\n pygame.draw.rect(self.Display,[200,97,48],(60,320,290,70))\n pygame.draw.rect(self.Display,[106,106,150],(65,145,280,60))\n pygame.draw.rect(self.Display,[106,106,150],(65,235,280,60))\n pygame.draw.rect(self.Display,[106,106,150],(65,325,280,60))\n pygame.display.update()\n self.Display.blit(self.Continue,[205-self.Continue.get_width()//2,175-self.Continue.get_height()//2])\n self.Display.blit(self.Newgame,[205-self.Newgame.get_width()//2,265-self.Newgame.get_height()//2])\n self.Display.blit(self.Exit,[205-self.Exit.get_width()//2,355-self.Exit.get_height()//2])\n pygame.display.update()\n running=True\n while running:\n x,y=pygame.mouse.get_pos()\n if x>64 and x<345:\n if y>144 and y<206:\n pygame.draw.rect(self.Display,[200,97,48],(65,145,280,60))\n self.Display.blit(self.Continue,[205-self.Continue.get_width()//2,175-self.Continue.get_height()//2])\n else:\n pygame.draw.rect(self.Display,[106,106,150],(65,145,280,60))\n self.Display.blit(self.Continue,[205-self.Continue.get_width()//2,175-self.Continue.get_height()//2])\n if y>234 and y<295:\n pygame.draw.rect(self.Display,[200,97,48],(65,235,280,60))\n self.Display.blit(self.Newgame,[205-self.Newgame.get_width()//2,265-self.Newgame.get_height()//2])\n else:\n pygame.draw.rect(self.Display,[106,106,150],(65,235,280,60))\n self.Display.blit(self.Newgame,[205-self.Newgame.get_width()//2,265-self.Newgame.get_height()//2])\n if y>324 and y<386:\n pygame.draw.rect(self.Display,[200,97,48],(65,325,280,60))\n self.Display.blit(self.Exit,[205-self.Exit.get_width()//2,355-self.Exit.get_height()//2])\n else:\n pygame.draw.rect(self.Display,[106,106,150],(65,325,280,60))\n self.Display.blit(self.Exit,[205-self.Exit.get_width()//2,355-self.Exit.get_height()//2])\n pygame.display.update()\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n return 'exit'\n\n if event.type==pygame.MOUSEBUTTONDOWN:\n x,y=pygame.mouse.get_pos()\n if x>64 and x<345:\n if y>144 and y<206:\n self.resume()\n return\n\n elif y>234 and y<295:\n self.Game=Game(self.Display)\n return\n\n elif y>324 and y<386:\n return 'exit'",
"def grid_init(self):\n # draw.line(surface, color, start_pos, end_pos, width/thickness)\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (0, GameData.square_size),\n (GameData.screen_dim, GameData.square_size),\n GameData.line_width\n )\n # # 2 horizontal\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (0, 2 * GameData.square_size),\n (GameData.screen_dim,2 * GameData.square_size),\n GameData.line_width\n )\n\n # # 1 vertical\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (GameData.square_size, 0),\n (GameData.square_size, GameData.screen_dim),\n GameData.line_width\n )\n # # 2 vertical\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (2 * GameData.square_size, 0),\n (2 * GameData.square_size, GameData.screen_dim),\n GameData.line_width)",
"def choose_level(self):\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n pygame.event.post(event)\n\n background()\n position = ((width / 2), (height / 3))\n text_display(\"Wybierz poziom\", 70, black, position)\n mouse = pygame.mouse.get_pos()\n button_easy = Buttton(320, 250, 150, 50, button_green, \"Łatwy\", 30)\n button_easy.show()\n button_easy.backlight(mouse)\n button_medium = Buttton(320, 320, 150, 50, button_green, \"Średni\", 30)\n button_medium.show()\n button_medium.backlight(mouse)\n button_hard = Buttton(320, 390, 150, 50, button_green, \"Trudny\", 30)\n button_hard.show()\n button_hard.backlight(mouse)\n if button_easy.is_clicked(mouse):\n self.board.generate(4)\n self.result = \"\"\n self.game_loop()\n if button_medium.is_clicked(mouse):\n self.board.generate(6)\n self.result = \"\"\n self.game_loop()\n if button_hard.is_clicked(mouse):\n self.board.generate(8)\n self.result = \"\"\n self.game_loop()\n\n pygame.display.update()\n clock.tick(15)",
"def __init__(self, rows, cols, mines):\n tk.Tk.__init__(self)\n \n #load all needed images into Tile.images\n for i in range(14):\n Tile.images.append(tk.PhotoImage(file = \"images/tile-\"+str(i)+\".gif\"))\n \n self.menu = tk.Menu(self)\n self.configure(menu=self.menu)\n self.title(\"Minesweeper\")\n self.myBoard = Board(rows, cols, mines, self)\n self.menuVar = tk.IntVar(self)\n self.menuVar.set(1)\n self.checkVar = tk.IntVar(self)\n self.checkVar.set(1)\n self.gamemenu = tk.Menu(self.menu, tearoff = False)\n self.menu.add_cascade(label=\"Game\", menu=self.gamemenu)\n self.gamemenu.add_command(label=\"New Game\", command=self.myBoard.replay)\n self.gamemenu.add_separator()\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=1, label=\"Beginner\", command=lambda: self.resize(8,8,10))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=2, label=\"Intermediate\", command=lambda: self.resize(16,16,40))\n self.gamemenu.add_radiobutton(variable = self.menuVar, value=3, label=\"Expert\", command=lambda: self.resize(16,30,99))\n self.gamemenu.add_separator()\n self.gamemenu.add_checkbutton(variable = self.checkVar, onvalue=4, offvalue=0, label=\"Custom\", command= self.options)\n self.gamemenu.add_separator()\n self.gamemenu.add_command(label=\"Exit\", command=self.exitGame)\n windowWidth = str(20*cols+40)\n windowHeight = str(20*rows+60)\n self.protocol(\"WM_DELETE_WINDOW\", self.exitGame)\n self.minsize(windowWidth, windowHeight)\n self.maxsize(windowWidth, windowHeight)\n self.geometry(windowWidth+'x'+windowHeight)\n self.mainloop()",
"def setup_level_2() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, 19, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(12, 54, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 5, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 4, 30, level.wall_list)\n create_and_add_vertical_walls_to_list(55, settings.HEIGHT, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(55, settings.HEIGHT, 30, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 15, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(24, 54, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(29, 45, 47, level.wall_list)\n create_and_add_vertical_walls_to_list(24, 29, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(44, 54, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 55, 73, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 24, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 34, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(20, 24, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 74, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 19, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(34, 54, 24, level.wall_list)\n create_and_add_horiontal_walls_to_list(48, 60, 29, level.wall_list)\n create_and_add_horiontal_walls_to_list(68, 74, 29, level.wall_list)\n create_and_add_horiontal_walls_to_list(48, 60, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(68, 74, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 73, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(19, 24, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 35, 54, level.wall_list) \n\n #create sword item for \"outfit change\" \n create_and_add_item_to_list(\"pics\\sword_item.png\", 0.05, 75, 100, level.item_list)\n\n #create mysterious figure for level\n create_and_add_character_to_list(\"pics\\mystery_figure.png\", 0.095, 270, 350, level.character_list)\n\n #create dialogue for mysterious figure character\n find_disguise_convo = Dialogue(300, 390, 300, 50, \"Someone will notice you!\\n I've hidden something in the servant's quarters,\\n to make you fit in with the nobility.\")\n level.dialogue_list.append(find_disguise_convo)\n\n #info prompts and text for level\n balcony = RoomInfo(640, 500, \"Balcony. Along with the forest and sea, you can see that a battle is coming.\")\n level.room_info_list.append(balcony)\n kitchen = RoomInfo(270, 90, \"Kitchen. There are plentry of servants around. Your torn clothes are eye-catching, and may sabotage your escape\")\n level.room_info_list.append(kitchen)\n great_hall = RoomInfo(270, 470, \"Great hall. You could have sworn that someone recognized you, but nobody acts to capture you.\")\n level.room_info_list.append(great_hall)\n sitting_room = RoomInfo(650, 230, \"Private sitting room. You find several sketches... sketches that look like a richer, healthier version of you.\")\n level.room_info_list.append(sitting_room)\n\n return level",
"def change_level(self):\r\n error = False\r\n\r\n try:\r\n char_lvl = int(self.__char_lvl.get())\r\n except ValueError:\r\n error = True\r\n\r\n if error or char_lvl <= 0:\r\n self.__skill_points_indicator.configure(\r\n text=\"Level must be a positive whole number\")\r\n for skill_string in self.__skills:\r\n self.skill_up_disable(skill_string)\r\n self.skill_down_disable(skill_string)\r\n\r\n else:\r\n self.reset_all();\r\n self.__skill_points = 10 + 20 * (char_lvl - 1)\r\n self.__skill_points_indicator.configure(\r\n text=\"Available skillpoints: \" + str(\r\n self.__skill_points))\r\n for skill in self.__skills:\r\n self.check_skill_requirements(skill)",
"def __init__(self, screen_size, grid_size):\n super(MainScreen, self).__init__(screen_size)\n self.gamegrid = QuadraticGrid(grid_size[0], grid_size[1])\n self.grid_width = grid_size[0]\n self.grid_height = grid_size[1]\n self.block_width = screen_size[0] / grid_size[0]\n self.block_height = screen_size[1] / grid_size[1]\n print str(self.block_width) + \" \" + str(self.block_height)\n \n self.game_model = GameModel(grid_size)\n self.dragon_group = pygame.sprite.Group()\n self.gun_group = pygame.sprite.Group()\n self.hat_group = pygame.sprite.Group()",
"def use_level(self, level):\n\n if self.min_level <= level <= self.max_level:\n map_extent = self.tiles.use_level(level)\n if map_extent:\n self.level = level\n (self.map_width, self.map_height,\n self.ppd_x, self.ppd_y) = map_extent\n (self.map_llon, self.map_rlon,\n self.map_blat, self.map_tlat) = self.tiles.extent\n\n # do level change callback\n self.handleLevelChangeCallback(level)\n\n return True\n\n return False",
"def prepare(self, level):\n self.greeterboard.welcome_player(\n i18n.OUT_MSG_LUCK.format(self.player_name)\n )\n self.scoreboard.set_labels()\n self.scoreboard.set_level(level)\n self.word_view.setText(i18n.OUT_MSG_NEW_GAME)\n self.init_game_metrics()",
"def setup_level_1() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, 39, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 25, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 44, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 45, 74, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 30, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 34, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 9, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(15, 24, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 24, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(19, 24, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 35, 54, level.wall_list)\n\n #create knight character for level\n create_and_add_character_to_list(\"pics\\prison_guard.png\", 0.2, 270, 470, level.character_list)\n\n #knight asks for bribe\n guard_convo = Dialogue(300, 500, 150, 50, \"I know who you are...\\n if you pay me,\\n I'll turn a blind eye.\")\n level.dialogue_list.append(guard_convo)\n\n #create coin item to bribe knight character\n create_and_add_item_to_list(\"pics\\gold_1.png\", 0.5, 400, 250, level.item_list)\n\n #create prompts and info for rooms for object\n cell = RoomInfo(120, 100, \"Dungeon cell. There's a note and key. Someone's waiting for you in the garden.\")\n level.room_info_list.append(cell)\n guard_room = RoomInfo(450, 280, \"Guardroom. There's the unconconsious bodies of the guards. Your saviours must've gone to great lengths...\")\n level.room_info_list.append(guard_room)\n torture_chamber = RoomInfo(120, 280, \"Torture chamber. You've been here before. They were questioning you, but you didn't answer.\")\n level.room_info_list.append(torture_chamber)\n battle_room = RoomInfo(650, 280, \"Battle room. You see that your captors are fighting revolutionaries- those who seek to bring back a lost king.\")\n level.room_info_list.append(battle_room)\n stairwell = RoomInfo(220, 520, \"Stairwell. There's a lone guard who doesn't look surprised to see you\")\n level.room_info_list.append(stairwell)\n\n return level",
"def resize(self, rows, cols, mines):\n if self.menuVar.get() != 4: self.checkVar.set(0)\n self.myBoard.resize(rows, cols, mines)",
"def prep_level(self):\r\n\t\tlevel_str=\"Level: \"+format(self.stats.level)\r\n\t\tself.level_image=self.font.render(level_str, True,\r\n\t\t\tself.text_color, self.ai_settings.bg_color)\r\n\r\n\t\t#Position the level below the score.\r\n\t\tself.level_rect=self.level_image.get_rect()\r\n\t\tself.level_rect.centerx=self.screen_rect.centerx*1.5\r\n\t\tself.level_rect.top=self.score_rect.top",
"def __change_level(self, level):\n self.level = level",
"def on_zoom_change(self, event) -> None:\r\n\r\n zoom_level = int(self.zoom_scale.get())\r\n self.painter.zoom = zoom_level\r\n self.painter.draw_board()",
"def updateWHM(self, hardnessLevel):\n\n if hardnessLevel == 1:\n self.labelLevelOneWHM.setPixmap(self.images['imageLevelOneWHMgreen'])\n self.labelLevelTwoWHM.setPixmap(self.images['imageLevelTwoWHMgray'])\n self.labelLevelThreeWHM.setPixmap(self.images['imageLevelThreeWHMgray'])\n self.labelLevelFourWHM.setPixmap(self.images['imageLevelFourWHMgray'])\n\n\n elif hardnessLevel == 2:\n self.labelLevelTwoWHM.setPixmap(self.images['imageLevelTwoWHMyellow'])\n self.labelLevelOneWHM.setPixmap(self.images['imageLevelOneWHMgray'])\n self.labelLevelThreeWHM.setPixmap(self.images['imageLevelThreeWHMgray'])\n self.labelLevelFourWHM.setPixmap(self.images['imageLevelFourWHMgray'])\n\n elif hardnessLevel == 3:\n self.labelLevelThreeWHM.setPixmap(self.images['imageLevelThreeWHMorange'])\n self.labelLevelOneWHM.setPixmap(self.images['imageLevelOneWHMgray'])\n self.labelLevelTwoWHM.setPixmap(self.images['imageLevelTwoWHMgray'])\n self.labelLevelFourWHM.setPixmap(self.images['imageLevelFourWHMgray'])\n\n elif hardnessLevel == 4:\n self.labelLevelFourWHM.setPixmap(self.images['imageLevelFourWHMred'])\n self.labelLevelOneWHM.setPixmap(self.images['imageLevelOneWHMgray'])\n self.labelLevelTwoWHM.setPixmap(self.images['imageLevelTwoWHMgray'])\n self.labelLevelThreeWHM.setPixmap(self.images['imageLevelThreeWHMgray'])\n\n self.outcomesWHM = np.append(self.outcomesWHM, hardnessLevel)\n\n if self.outcomesWHM.size > 0:\n elapsed = self.tmr.elapsed() / float(1000)\n self.save_file = open(os.path.join(args.parent_img_path, self.save_file_name), \"a\")\n self.save_file.write(\"Time Elapsed: (\" + str(elapsed) + \" sec) - Hardness Level: \"+str(hardnessLevel)+\",\\n\")\n self.save_file.close()\n self.dataWHM.append({'x': elapsed, 'y': hardnessLevel})\n self.WHMx = [item['x'] for item in self.dataWHM]\n self.WHMy = [item['y'] for item in self.dataWHM]\n self.WHMx = np.asarray(self.WHMx).flatten()\n self.WHMy = np.asarray(self.WHMy).flatten()\n self.curveWHMGraph.setData(x=self.WHMx, y=self.WHMy)\n\n # Stuff for Verbose - Helpful for debugging\n if self.setPointsWHM.verbose > 0:\n if self.setPointsWHM.verbose > 0:\n print 'Receive Time: {:0.6f}ms | Pre-process + SVM Run Time: {:0.6f}ms | Total Run Time: {:0.6f}ms'.format(\n (self.setPointsWHM.algo_time - self.setPointsWHM.receive_time) * 1000.,\n (self.setPointsWHM.final_time - self.setPointsWHM.algo_time) * 1000.,\n (time.time() - self.showtime) * 1000.)\n # reset Showtime\n self.showtime = time.time()\n # print ' Total Showtime: {:0.6f}ms \\n \\n'.format((time.time() - self.showtime) * 1000.)",
"def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value",
"def run(self):\n #Level Musik starten\n pygame.mixer.music.stop()\n pygame.mouse.set_cursor(*pygame.cursors.broken_x)\n pygame.key.set_repeat(10)\n pygame.mixer.music.load(os.path.join(self.sourceFileDir,\"Musik\",\"Decktonic - Night Drive (Strong Suit Remix).wav\"))\n \n pygame.mixer.music.play(-1)\n \n #Level Daten aus Datei einlesen\n for i in range(25):\n \n self.level += 1 \n spiel = Spiel(self,self.data[\"levels\"][self.level-1],self.win,godmode= self.godmode)\n #Wenn der Spieler das Level nicht geschafft haben sollte\n if not spiel.schleife_haupt():\n return self.level, self.kills\n \n geschwindigkeit = 10\n\n #Grundlevel erstellen\n lvl_data = {\n \n \"level\": self.level,\n \"hindernisse\": 2,\n \n \"GBall_Shoot\": {\n \"Anzahl\": 2,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_Normal\": {\n \"Anzahl\": 2,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_Verdoppler\": {\n \"Anzahl\": 3,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_RNG\": {\n \"Anzahl\": 4,\n \"geschwindigkeit\": geschwindigkeit\n },\n \"GBall_Two\": {\n \"Anzahl\": 3,\n \"geschwindigkeit\": geschwindigkeit\n }\n \n }\n #Level immer schwerer machen\n for k in range(self.maxlvl):\n\n\n self.level += 1\n geschwindigkeit += 1\n spiel = Spiel(self,lvl_data,self.win,godmode= self.godmode)\n if not spiel.schleife_haupt():\n return self.level, self.kills",
"def upgrade(self):\n if self.level < len(self.tower_images):\n self.level_up_animation = True\n self.level += 1\n self.base_damage += 3\n self.damage = self.base_damage\n\n #Since level does not upgrade in menu we have to manually do it here\n self.menu.tower_level += 1"
]
| [
"0.6549542",
"0.65455246",
"0.5846803",
"0.5705636",
"0.56784075",
"0.56639093",
"0.56557",
"0.5648059",
"0.56119126",
"0.55274254",
"0.5508719",
"0.54641163",
"0.54441774",
"0.5439524",
"0.54059404",
"0.5385119",
"0.53591174",
"0.5351808",
"0.5346402",
"0.5331341",
"0.53282493",
"0.5322578",
"0.53179425",
"0.5316699",
"0.52886784",
"0.5278627",
"0.52727896",
"0.52719593",
"0.525157",
"0.52496356"
]
| 0.70741254 | 0 |
Parse ``define``'s of constants and of types. | def parse_defines(self):
for line in self.header.splitlines():
if line.lower().startswith("#define"):
_, line = line.strip().split(None, 1) # remove #define
if " " in line:
symbol, value = line.split(None, 1)
if value.isdigit():
value = int(value)
elif value.startswith("0x"):
value = int(value, 16)
elif value in self.types:
self.types[symbol] = self.types[value]
else:
symbol = line
value = ""
self.constants[symbol] = value
return self.constants | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_define_variable(self):\n self.assertEqual(['define', 'test', '\"test\"'],\n grammar._DEFINE_VAR.parseString(\"#define test \\\"test\\\"\").asList())\n\n self.assertEqual(['define', 'test', \"f(w,x)\"],\n grammar._DEFINE_VAR.parseString(\"#define test f(w,x)\").asList())\n\n self.assertEqual(['define', 'test', '\"test1 test2\"'],\n grammar._DEFINE_VAR.parseString(\"#define test \\\"test1 test2\\\"\").asList())",
"def readDefinedTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\n typename, typetype = m.groups() \n if typetype in self.types.keys():\n types[typename] = typetype\n \n return types",
"def test_define_function(self):\n self.assertEqual(['define', 'test', ['a1', 'a2', 'a3'], 'f($a1, $a2, $a3)'],\n grammar._DEFINE_FUNCTION.parseString(\"#define test(a1,a2,a3) f($a1, $a2, $a3)\").asList())",
"def define(self):\n initial_pos = self.pos\n try:\n self.match_value(Identifier, \"define\")\n (identifier, args) = self.macro_definition()\n\n # Any remaining tokens are the macro expansion\n if not self.eol():\n expansion = self.tokens[self.pos:]\n self.pos = len(self.tokens)\n else:\n expansion = []\n\n return DefineNode(identifier, args, expansion)\n except ParseError:\n self.pos = initial_pos\n raise ParseError(\"Invalid define directive.\")",
"def define(parser, token):\n\n bits = list(token.split_contents())\n\n if len(bits) != 2:\n raise TemplateSyntaxError(\"Expected format is: {% define variable %}\")\n\n name = bits[1]\n nodelist = parser.parse(('enddefine',))\n parser.delete_first_token()\n\n return DefineNode(name, nodelist)",
"def parseDef(self, firstLine, lines):\n\n m = re.match(r'\\s*typedef\\s+struct\\s*\\{\\s*(?P<rest>.*)', firstLine,\n re.VERBOSE)\n if not m:\n raise RuntimeError('No prefix mathed in %s' % (firstLine))\n \n l = m.groupdict()['rest']\n\n nlines = 0\n parts = []\n while l != None:\n nlines += 1\n l = l.strip()\n\n # Finished with this line, fetch the next one.\n if l == '' or l.startswith('#'):\n l = next(lines)\n continue\n\n # Look for the ending struct name.\n m = self.typedefNameRE.match(l)\n if m:\n g = m.groupdict()\n name = g['name']\n return nlines, name, parts\n\n # Get the next column definition.\n m = self.typedefRE.match(l)\n if not m:\n raise RuntimeError('unmatched struct definition at %s' % (l))\n \n g = m.groupdict()\n if g['arr1']:\n arrSize = int(g['arr1'])\n else:\n arrSize = 1\n \n if g['arr2']:\n arrSize = (int(g['arr2']), arrSize)\n\n defn = (g['name'], g['type'], arrSize)\n parts.append(defn)\n\n # Process the rest of the line.\n l = g['rest']",
"def parse_def(self, sql):\n parsed = sqlparse.parse(sql)[0]\n\n # extract the parenthesis which holds column definitions\n _, par = parsed.token_next_by(i=sqlparse.sql.Parenthesis)\n columns = self.extract_definitions(par)\n\n r = []\n for column in columns:\n s = {}\n s['key'] = column[0]\n s['type'] = column[1:]\n r.append(s)\n #print('NAME: {name!s:12} DEFINITION: {definition}'.format(\n # name=column[0], definition=' '.join(str(t) for t in column[1:])))\n return r",
"def get_defines(self):\n defines = []\n for defs in self['DEFS']:\n defines.extend(re.split('[ ,]', defs))\n return defines",
"def readTypes(self):\r\n types = {}\r\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\r\n typename, typetype = m.groups() \r\n if typetype in self.SIMPLETYPES:\r\n types[typename] = typetype\r\n else:\r\n types[typename] = \"#\" + typetype\r\n \r\n return types",
"def HandlePredefines(predefines, app):\n selecteddefine = None\n if predefines == \"archive\":\n selecteddefine = types_archive\n elif predefines == \"text\":\n selecteddefine = types_text\n elif predefines == \"audio\":\n selecteddefine = types_audio\n # Set types if app is defined\n if app:\n Mime_Set_All(selecteddefine, app)\n # Query types\n Mime_Query_All(selecteddefine)",
"def _parse_col_constants() -> Dict[str, List[str]]:\n\n col_type_map = {\n k: [] for k in TEST_ARGUMENT_DATA_TYPES.keys()\n }\n return col_type_map",
"def fixupTypedefs(self):\n import types\n # now iterate over looking to fix up the structure defines\n for sym in self.data:\n # was this a typedef, if so do we need to change the kalimba define\n if sym.getType() == \"typedef\":\n # did we know what the original is\n if sym.define.getType() == \"value\":\n # we didn't recognise it, is it something we actually know\n # about, first check if its a union or structure\n if type(sym.getDefineValue()) == types.UnicodeType:\n name = sym.getDefineValue().split()\n if len(name) == 1:\n if self.declare.has_symbol(name[0]):\n sym.setStruct(self.declare[name[0]])\n elif self.declare.has_symbol(\"typedef\"+name[0]):\n sym.setStruct(self.declare[\"typedef\"+name[0]])\n else:\n if self.declare.has_symbol(name[0]+name[1]):\n sym.setStruct(self.declare[name[0]+name[1]])\n else:\n if self.declare.has_symbol(sym.getDefineValue()):\n sym.setStruct(self.declare[sym.getDefineValue()])",
"def update_defines(self) -> None:\n self.variables = apache_util.parse_defines(self.configurator.options.get_defines_cmd)",
"def _parse_definition_V4X(par, parfile):\n line = None\n while line != '':\n pos = parfile.tell()\n line = parfile.readline().strip()\n #Parse the useful parts of the definition entry:\n #the identifier-valid name, the number of columns, and the type\n m = re.search(r'# ([^<>\\(\\)\\[\\]]*[a-zA-Z]).*\\((\\d+)?[\\*]?(\\w+)\\)', line)\n if not m:\n if not par.fields:\n continue\n else:\n parfile.seek(pos)\n break\n var_descrip, type_len, type_descrip = m.group(1, 2, 3)\n var_name = _sanitize_to_identifer(var_descrip).lower()\n if type_len:\n type_len = int(type_len)\n else:\n type_len = 1\n #'string' should be interpreted as integer regardless\n if type_descrip == 'integer' or type_descrip == 'string':\n type_code = np.int64\n elif type_descrip == 'float':\n type_code = np.float64 # Same as MATLAB double\n else:\n raise ValueError(descrip)\n #Sub variables exist for variables that have size > 1\n #We add an underscore plus the name of the sub variable\n #i.e. image_angulation_x, image_angulation_y, image_angulation_z\n if type_len > 1:\n par.fields.extend(tuple((var_name + '_' + s, type_code)\n for s in _SUBVAR_NAMES[var_name]))\n else:\n par.fields.append((var_name, type_code))\n par.field_len = len(par.fields)\n return par.fields",
"def testConstants(self):\n with self.assertRaises(pyparsing.ParseException):\n text_parser.PyparsingConstants.MONTH.parseString('MMo')\n with self.assertRaises(pyparsing.ParseException):\n text_parser.PyparsingConstants.MONTH.parseString('M')\n with self.assertRaises(pyparsing.ParseException):\n text_parser.PyparsingConstants.MONTH.parseString('March', parseAll=True)\n\n self.assertTrue(text_parser.PyparsingConstants.MONTH.parseString('Jan'))\n\n line = '# This is a comment.'\n parsed_line = text_parser.PyparsingConstants.COMMENT_LINE_HASH.parseString(\n line)\n self.assertEqual(parsed_line[-1], 'This is a comment.')\n self.assertEqual(len(parsed_line), 2)",
"def __parse_macro(self, buffer):\n\t\t\n\t\t# Parse an optional id\n\t\tspell = self.__read_number(buffer)\n\t\t\n\t\t##\n\t\t# FIXME technically, the effects do not exist, as\n\t\t# WoW parses the identifier alphanumerically\n\t\t# Do we really want that? It's easier to go back.\n\t\t\n\t\t# Parse an alphabetic identifier\n\t\tidentifier = self.__read_alpha(buffer)\n\t\t\n\t\t# effect id should be the next char\n\t\teffect = buffer.read(1)\n\t\t\n\t\t# However it's optional. If it's not here\n\t\t# we have to go back one char.\n\t\tif effect and effect not in (\"1\", \"2\", \"3\"):\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\t\teffect = \"\"\n\t\t\n\t\treturn spell, identifier, effect",
"def readEnumTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (\\w*) = ENUMERATION OF\\s*\\(([\\,\\w\\_\\s]*)\\);\\s*END_TYPE;\", self.data, re.DOTALL):\n typename, types_enum_string = m.groups() \n typestring = re.sub('\\s', '', types_enum_string)\n types[typename] = typestring.split(',')\n \n return types",
"def get_definitions(cfg,regs):\n def_l = list()\n for reg in regs.keys():\n print \"Implementing register {}\".format(reg)\n reg_uc = reg.upper()\n reg_lc = reg.lower()\n r_prefix = get_reg_definition_prefix(cfg,reg_uc)\n def_l.append(get_def_reg_prop(cfg,regs,reg,'addr'))\n for f in regs[reg]['fields']:\n print \" Implementing field {}\".format(f)\n f_prefix = get_field_definition_prefix(cfg,reg_uc, f)\n for d in definition_list:\n def_l.append((f_prefix+d.upper(),regs[reg]['fields'][f][d]))\n return def_l",
"def readSelectTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (\\w*) = SELECT\\s*\\(([\\,\\w\\_\\s]*)\\);\\s*END_TYPE;\", self.data, re.DOTALL):\n typename, types_select_string = m.groups() \n typestring = re.sub('\\s', '', types_select_string)\n types[typename] = typestring.split(',')\n \n return types",
"def _parse_consts(self, consts):\n logger.debug(\"Start to parse consts from proto.\")\n for const in consts:\n if not const.key:\n logger.warning(\"Finding a const with an empty key will not save it.\")\n continue\n check_invalid_character(const.key)\n node = Node(name=const.key, node_id=const.key, full_name=const.full_name)\n node.type = NodeTypeEnum.CONST.value\n if const.value.ByteSize() > self.MAX_NODE_ATTRIBUTE_VALUE_BYTES:\n node.add_attr({const.key: 'dtype: ' + DataType.Name(const.value.dtype)})\n else:\n node.add_attr({const.key: str(const.value)})\n\n if const.value.dtype == DataType.DT_TENSOR:\n shape = list(const.value.tensor_val.dims)\n node.output_shape.append(shape)\n if const.value.tensor_val.HasField('data_type'):\n node.elem_types.append(DataType.Name(const.value.tensor_val.data_type))\n else:\n node.elem_types.append(DataType.Name(const.value.dtype))\n # dim is zero\n node.output_shape.append([])\n\n node.output_nums = len(node.output_shape)\n\n self._cache_node(node)",
"def readSimpleTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\n typename, typetype = m.groups() \n if typetype in self.SIMPLETYPES:\n types[typename] = typetype\n \n return types",
"def consts(consts):\n\n namespace = { }\n\n for c in consts:\n constname = c[\"constname\"]\n consttype = c[\"consttype\"]\n constval = c[\"constval\"]\n\n # Correct various values that won't evaluate in python.\n if constval == \"( SteamItemInstanceID_t ) ~ 0\":\n constval = \"-1\"\n elif constval == \"( ( uint32 ) 'd' << 16U ) | ( ( uint32 ) 'e' << 8U ) | ( uint32 ) 'v'\":\n constval = \"6579574\"\n else:\n constval = re.sub(r\"(0x[0-9a-fA-F]*)ull\", r\"\\1\", constval)\n\n # Evaluate the result, and place it into the namespace.\n value = eval(constval, namespace, namespace)\n namespace[constname] = value\n\n # Generate.\n mapped = map_type(consttype)\n\n if value > 0:\n p(f\"{constname} = {mapped}(0x{value:x})\")\n else:\n p(f\"{constname} = {mapped}({value})\")",
"def get_defined_constants():\n raise NotImplementedError()",
"def configure(conf):\n\n\tconf.load('compiler_c')\n\n\tconf.define('AAA', 1)\n\ttt('define AAA', \"['AAA=1']\", repr(conf.env.DEFINES))\n\n\tconf.undefine('AAA')\n\ttt('undefine AAA', [], conf.env.DEFINES)\n\n\tconf.define('BB', 32)\n\tconf.define('CC', 'test')\n\tconf.define('inline', 'inline', quote=False)\n\n\tconf.write_config_header()\n\ttt('empty config header', [], conf.env.DEFINES)\n\n\tconf.define('somestring', 'test')\n\tconf.check(fragment=FRAG1, define_name='MMM', mandatory=False)\n\ttt('is_defined(MMM)', True, conf.is_defined('MMM'))\n\tconf.check(fragment=FRAG2, define_name='NNN', mandatory=False)\n\ttt('defines are propagated to tests', True, conf.is_defined('NNN'))\n\n\tconf.undefine('AAA')\n\tconf.write_config_header('config.2.h', remove=False)\n\ttt('defines are not removed', 3, len(conf.env.DEFINES))\n\n\ttt('have_define', 'HAVE_FOO', conf.have_define('FOO'))\n\n\tconf.env.DEFINES = []\n\tconf.define('AAA', 1)\n\tconf.define('AAA', 2)\n\ttt('AAA', '2', conf.get_define('AAA'))\n\ttt('defines count', 1, len(conf.env.DEFINES))",
"def run(self, context):\n i = context.skip_ws(0)\n if len(context.history) > 1 and context.history[-2] == \"IsFuncDeclaration\":\n self.check_function_declaration(context)\n if type(context.scope) is not GlobalScope:\n if type(context.scope) == Function and context.scope.multiline == False:\n pass\n else:\n context.new_error(\"PREPROC_GLOBAL\", context.peek_token(0))\n if context.check_token(i, \"DEFINE\") is False:\n return False, 0\n val = context.peek_token(i).value.split(\"define\", 1)[1]\n content = Lexer(val, context.peek_token(i).pos[0])\n tkns = content.get_tokens()\n i = 0\n identifiers = []\n protection = context.filename.upper().split(\"/\")[-1].replace(\".\", \"_\")\n for tkn in tkns:\n if tkn.type == \"ESCAPED_NEWLINE\":\n context.new_error(\"NEWLINE_DEFINE\", tkn)\n elif tkn.type in [\"TAB\", \"SPACE\"]:\n i += 1\n continue\n elif tkn.type == \"IDENTIFIER\" and len(identifiers) == 0:\n if tkn.value.isupper() is False:\n context.new_error(\"MACRO_NAME_CAPITAL\", tkn)\n identifiers.append(tkn)\n tmp = i\n while tmp < len(tkns) - 1 and tkns[tmp].type in [\n \"SPACE\",\n \"TAB\",\n \"IDENTIFIER\",\n ]:\n tmp += 1\n if tmp == (len(tkns) - 1) and context.filetype == \"h\":\n if context.scope.header_protection == 0:\n if identifiers[0].value == protection:\n context.scope.header_protection = 1\n elif identifiers[0].value != protection:\n context.new_error(\"HEADER_PROT_NAME\", tkns[1])\n elif (\n context.filetype == \"c\"\n and context.scope.include_allowed == True\n and (\n len(tkns) > tmp + 1\n or (\n len(tkns) == tmp + 1\n and identifiers[0].value != protection\n and context.scope.header_protection == -1\n )\n )\n ):\n context.scope.include_allowed = False\n\n elif tkn.type in [\"IDENTIFIER\", \"STRING\", \"CONSTANT\"]:\n if context.skip_define_error == True:\n continue\n if len(identifiers) == 1:\n if tkn.type == \"IDENTIFIER\" and tkn.value.isupper() is False:\n context.new_error(\"PREPROC_CONSTANT\", tkn)\n identifiers.append(tkn)\n elif len(identifiers) == 0:\n context.new_error(\"INCORRECT_DEFINE\", tkn)\n else:\n context.new_error(\"TOO_MANY_VALS\", tkn)\n elif tkn.type == \"LPARENTHESIS\":\n if context.skip_define_error == True:\n continue\n if len(identifiers) == 0:\n continue\n elif len(identifiers) == 1 and tkns[i - 1].type in [\"SPACE\", \"TAB\"]:\n continue\n else:\n context.new_error(\"PREPROC_CONSTANT\", tkn)\n elif tkn.type in [\"LBRACKET\", \"LBRACE\"]:\n if context.skip_define_error == True:\n continue\n context.new_error(\"PREPROC_CONSTANT\", tkn)\n\n i += 1\n if context.filetype == \"h\" and context.scope.header_protection != 1:\n context.new_error(\"HEADER_PROT_ALL\", context.peek_token(0))\n return False, 0",
"def load_defs():\n # Load word definitions\n fname = 'word-definitions.txt'\n with open(fname) as fh:\n lines = fh.readlines()\n \n # Create dictionary keyed by lowercase word\n def_tbl = dict()\n for line in lines:\n # split the dictionary line at the first space\n word, word_def = line.split(sep=None, maxsplit=1)\n # add this entry to the dictionary\n word = word.lower()\n def_tbl[word] = word_def.rstrip()\n return def_tbl",
"def parse(self):\n try:\n self.match_value(Operator, \"#\")\n\n # Check for a match against known directives\n candidates = [self.define, self.undef, self.include, self.ifdef,\n self.ifndef, self.if_, self.elif_, self.else_, self.endif, self.pragma]\n for f in candidates:\n try:\n directive = f()\n if not self.eol():\n log.warning(\"Additional tokens at end of preprocessor directive\")\n return directive\n except ParseError:\n pass\n\n # Any other line beginning with '#' is a preprocessor\n # directive, we just don't handle it (yet). Suppress\n # warnings for common directives that shouldn't impact\n # correctness.\n common_unhandled = [\"line\", \"warning\", \"error\"]\n if len(self.tokens) > 2 and str(self.tokens[1]) not in common_unhandled:\n log.warning(\"Unrecognized directive\")\n return UnrecognizedDirectiveNode(self.tokens)\n except ParseError:\n raise ParseError(\"Not a directive.\")",
"def _parse_types(self):\n for root in self.roots:\n for types in root.iter('types'):\n for node in types.iter('type'):\n type_name = GLGenerator.get_name(node)\n text = GLGenerator.get_text(node).strip()\n if '*' in text and not text.startswith('struct'):\n self.pointer_types.append(type_name)",
"def readOtherTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (\\w*) = (.*);\", self.data):\n typename, type_string = m.groups() \n if typename not in self.types.keys():\n types[typename] = type_string\n \n return types",
"def _parse_types(self, die):\n if die.offset in self._visited_die_offset:\n return\n else:\n self._visited_die_offset.append(die.offset)\n\n if die.tag == \"DW_TAG_base_type\":\n self._parse_base_type(die)\n\n elif die.tag == \"DW_TAG_const_type\":\n self._parse_const_type(die)\n\n elif die.tag == \"DW_TAG_volatile_type\":\n self._parse_volatile_type(die)\n\n elif die.tag == \"DW_TAG_typedef\":\n self._parse_typedef(die)\n\n elif die.tag == \"DW_TAG_pointer_type\":\n self._parse_pointer_type(die)\n\n elif die.tag == \"DW_TAG_array_type\":\n self._parse_array_type(die)\n\n elif die.tag == \"DW_TAG_enumeration_type\":\n self._parse_enums_type(die)\n\n # union and class are not implemented yet, use structure.\n elif die.tag == \"DW_TAG_structure_type\":\n self._parse_structure_type(die)\n elif die.tag == \"DW_TAG_union_type\":\n self._parse_structure_type(die)\n elif die.tag == \"DW_TAG_class_type\":\n self._parse_structure_type(die)\n\n elif die.tag == \"DW_TAG_subroutine_type\":\n self._parse_subroutine_type(die)\n\n else:\n ...\n\n if die.tag == \"DW_TAG_compile_unit\":\n return\n\n # if has children, iter them, except DW_TAG_compile_unit.\n for child_die in die.iter_children():\n self._parse_types(child_die)"
]
| [
"0.636398",
"0.57366693",
"0.5598086",
"0.55355525",
"0.53910017",
"0.53386915",
"0.5338318",
"0.53181356",
"0.52843934",
"0.51965386",
"0.5190159",
"0.5169622",
"0.51104206",
"0.504494",
"0.50343925",
"0.50150555",
"0.5005637",
"0.49932948",
"0.49319997",
"0.4893059",
"0.48915902",
"0.48876718",
"0.48273998",
"0.48245054",
"0.48074362",
"0.47959617",
"0.479527",
"0.47828996",
"0.47787535",
"0.47567222"
]
| 0.7308531 | 0 |
Cast a ctypes object or byref into a Python object. | def deref(obj):
try:
return obj._obj.value # byref
except AttributeError:
try:
return obj.value # plain ctypes
except AttributeError:
return obj # plain python | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_pyobj(space, w_obj, w_userdata=None, immortal=False):\n assert not is_pyobj(w_obj)\n if w_obj is not None:\n py_obj = w_obj._cpyext_as_pyobj(space)\n if not py_obj:\n py_obj = create_ref(space, w_obj, w_userdata, immortal=immortal)\n #\n # Try to crash here, instead of randomly, if we don't keep w_obj alive\n ll_assert(py_obj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY,\n \"Bug in cpyext: The W_Root object was garbage-collected \"\n \"while being converted to PyObject.\")\n return py_obj\n else:\n return lltype.nullptr(PyObject.TO)",
"def C_to_Python(c_object):\n try :\n cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)],\n results = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)])\n\n return cast_func",
"def Python_to_C(c_object):\n try :\n cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)])\n\n return cast_func",
"def make_ref(space, w_obj, w_userdata=None, immortal=False):\n assert not is_pyobj(w_obj)\n if w_obj is not None and space.type(w_obj) is space.w_int:\n state = space.fromcache(State)\n intval = space.int_w(w_obj)\n return state.ccall(\"PyInt_FromLong\", intval)\n return get_pyobj_and_incref(space, w_obj, w_userdata, immortal=False)",
"def from_ref(space, ref):\n assert is_pyobj(ref)\n if not ref:\n return None\n w_obj = rawrefcount.to_obj(W_Root, ref)\n if w_obj is not None:\n if w_obj is not w_marker_deallocating:\n return w_obj\n fatalerror(\n \"*** Invalid usage of a dying CPython object ***\\n\"\n \"\\n\"\n \"cpyext, the emulation layer, detected that while it is calling\\n\"\n \"an object's tp_dealloc, the C code calls back a function that\\n\"\n \"tries to recreate the PyPy version of the object. Usually it\\n\"\n \"means that tp_dealloc calls some general PyXxx() API. It is\\n\"\n \"a dangerous and potentially buggy thing to do: even in CPython\\n\"\n \"the PyXxx() function could, in theory, cause a reference to the\\n\"\n \"object to be taken and stored somewhere, for an amount of time\\n\"\n \"exceeding tp_dealloc itself. Afterwards, the object will be\\n\"\n \"freed, making that reference point to garbage.\\n\"\n \">>> PyPy could contain some workaround to still work if\\n\"\n \"you are lucky, but it is not done so far; better fix the bug in\\n\"\n \"the CPython extension.\")\n\n # This reference is not yet a real interpreter object.\n # Realize it.\n ref_type = rffi.cast(PyObject, ref.c_ob_type)\n if ref_type == ref: # recursion!\n raise InvalidPointerException(str(ref))\n w_type = from_ref(space, ref_type)\n assert isinstance(w_type, W_TypeObject)\n return get_typedescr(w_type.layout.typedef).realize(space, ref)",
"def cast(object, class_, instanceof=object, *args, **kwargs):\n\n\tobject = copy(object)\n\tif isinstance(object, instanceof):\n\t\tobject.__class__ = class_\n\t\tobject.__init__(*args, **kwargs)\n\telse:\n\t\traise TypeError(\"Object is not an instance of {}\".format(instanceof.__name__))\n\treturn object",
"def _type_realize(space, py_obj):\n # missing:\n # unsupported:\n # tp_mro, tp_subclasses\n py_type = rffi.cast(PyTypeObjectPtr, py_obj)\n\n if not py_type.c_tp_base:\n # borrowed reference, but w_object is unlikely to disappear\n base = as_pyobj(space, space.w_object)\n py_type.c_tp_base = rffi.cast(PyTypeObjectPtr, base)\n\n finish_type_1(space, py_type)\n\n if py_type.c_ob_type:\n w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type))\n else:\n # Somehow the tp_base type is created with no ob_type, notably\n # PyString_Type and PyBaseString_Type\n # While this is a hack, cpython does it as well.\n w_metatype = space.w_type\n\n w_obj = space.allocate_instance(W_PyCTypeObject, w_metatype)\n track_reference(space, py_obj, w_obj)\n # __init__ wraps all slotdefs functions from py_type via add_operators\n w_obj.__init__(space, py_type)\n w_obj.ready()\n\n finish_type_2(space, py_type, w_obj)\n base = py_type.c_tp_base\n if base:\n # XXX refactor - parts of this are done in finish_type_2 -> inherit_slots\n if not py_type.c_tp_as_number:\n py_type.c_tp_as_number = base.c_tp_as_number\n py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_CHECKTYPES\n py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS\n if not py_type.c_tp_as_sequence:\n py_type.c_tp_as_sequence = base.c_tp_as_sequence\n py_type.c_tp_flags |= base.c_tp_flags & Py_TPFLAGS_HAVE_INPLACEOPS\n if not py_type.c_tp_as_mapping:\n py_type.c_tp_as_mapping = base.c_tp_as_mapping\n #if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer\n\n return w_obj",
"def ns_from_py(pyobj):\n\n if isinstance(pyobj, enum.Enum):\n pyobj = pyobj.value\n\n # Many Objective-C method calls here use the convert_result=False kwarg to\n # disable automatic conversion of return values, because otherwise most of\n # the Objective-C objects would be converted back to Python objects.\n if pyobj is None or isinstance(pyobj, ObjCInstance):\n return pyobj\n elif isinstance(pyobj, str):\n return ObjCInstance(\n NSString.stringWithUTF8String_(pyobj.encode(\"utf-8\"), convert_result=False)\n )\n elif isinstance(pyobj, bytes):\n return ObjCInstance(NSData.dataWithBytes(pyobj, length=len(pyobj)))\n elif isinstance(pyobj, decimal.Decimal):\n return ObjCInstance(\n NSDecimalNumber.decimalNumberWithString_(\n pyobj.to_eng_string(), convert_result=False\n )\n )\n elif isinstance(pyobj, dict):\n dikt = NSMutableDictionary.dictionaryWithCapacity(len(pyobj))\n for k, v in pyobj.items():\n dikt.setObject(v, forKey=k)\n return dikt\n elif isinstance(pyobj, list):\n array = NSMutableArray.arrayWithCapacity(len(pyobj))\n for v in pyobj:\n array.addObject(v)\n return array\n elif isinstance(pyobj, bool):\n return ObjCInstance(NSNumber.numberWithBool_(pyobj, convert_result=False))\n elif isinstance(pyobj, int):\n return ObjCInstance(NSNumber.numberWithLong_(pyobj, convert_result=False))\n elif isinstance(pyobj, float):\n return ObjCInstance(NSNumber.numberWithDouble_(pyobj, convert_result=False))\n else:\n raise TypeError(\n f\"Don't know how to convert a {type(pyobj).__module__}.{type(pyobj).__qualname__} to a Foundation object\"\n )",
"def create_ref(space, w_obj, w_userdata=None, immortal=False):\n w_type = space.type(w_obj)\n pytype = rffi.cast(PyTypeObjectPtr, as_pyobj(space, w_type))\n typedescr = get_typedescr(w_obj.typedef)\n if pytype.c_tp_itemsize != 0:\n itemcount = space.len_w(w_obj) # PyBytesObject and subclasses\n else:\n itemcount = 0\n py_obj = typedescr.allocate(space, w_type, itemcount=itemcount, immortal=immortal)\n track_reference(space, py_obj, w_obj)\n #\n # py_obj.c_ob_refcnt should be exactly REFCNT_FROM_PYPY + 1 here,\n # and we want only REFCNT_FROM_PYPY, i.e. only count as attached\n # to the W_Root but not with any reference from the py_obj side.\n assert py_obj.c_ob_refcnt > rawrefcount.REFCNT_FROM_PYPY\n py_obj.c_ob_refcnt -= 1\n #\n typedescr.attach(space, py_obj, w_obj, w_userdata)\n return py_obj",
"def type_attach(space, py_obj, w_type, w_userdata=None):\n assert isinstance(w_type, W_TypeObject)\n\n pto = rffi.cast(PyTypeObjectPtr, py_obj)\n\n typedescr = get_typedescr(w_type.layout.typedef)\n\n if space.is_w(w_type, space.w_bytes):\n pto.c_tp_itemsize = 1\n elif space.is_w(w_type, space.w_tuple):\n pto.c_tp_itemsize = rffi.sizeof(PyObject)\n # buffer protocol\n setup_buffer_procs(space, w_type, pto)\n\n state = space.fromcache(State)\n pto.c_tp_free = state.C.PyObject_Free\n pto.c_tp_alloc = state.C.PyType_GenericAlloc\n builder = state.builder\n if ((pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE) != 0\n and builder.cpyext_type_init is None):\n # this ^^^ is not None only during startup of cpyext. At that\n # point we might get into troubles by doing make_ref() when\n # things are not initialized yet. So in this case, simply use\n # str2charp() and \"leak\" the string.\n w_typename = space.getattr(w_type, space.newtext('__name__'))\n heaptype = cts.cast('PyHeapTypeObject*', pto)\n heaptype.c_ht_name = make_ref(space, w_typename)\n from pypy.module.cpyext.bytesobject import PyString_AsString\n pto.c_tp_name = cts.cast('const char *',\n PyString_AsString(space, heaptype.c_ht_name))\n else:\n pto.c_tp_name = cts.cast('const char*', rffi.str2charp(w_type.name))\n # uninitialized fields:\n # c_tp_print\n # XXX implement\n # c_tp_compare and more?\n w_base = best_base(space, w_type.bases_w)\n pto.c_tp_base = rffi.cast(PyTypeObjectPtr, make_ref(space, w_base))\n\n # dealloc\n if space.gettypeobject(w_type.layout.typedef) is w_type:\n # only for the exact type, like 'space.w_tuple' or 'space.w_list'\n pto.c_tp_dealloc = typedescr.get_dealloc(space)\n else:\n # for all subtypes, use base's dealloc (requires sorting in attach_all)\n pto.c_tp_dealloc = pto.c_tp_base.c_tp_dealloc\n if not pto.c_tp_dealloc:\n # strange, but happens (ABCMeta)\n pto.c_tp_dealloc = state.C._PyPy_subtype_dealloc\n\n if builder.cpyext_type_init is not None:\n builder.cpyext_type_init.append((pto, w_type))\n else:\n finish_type_1(space, pto, w_type.bases_w)\n finish_type_2(space, pto, w_type)\n\n pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct)\n if pto.c_tp_base:\n if pto.c_tp_base.c_tp_basicsize > pto.c_tp_basicsize:\n pto.c_tp_basicsize = pto.c_tp_base.c_tp_basicsize\n if pto.c_tp_itemsize < pto.c_tp_base.c_tp_itemsize:\n pto.c_tp_itemsize = pto.c_tp_base.c_tp_itemsize\n\n if w_type.is_heaptype():\n update_all_slots(space, w_type, pto)\n else:\n update_all_slots_builtin(space, w_type, pto)\n if not pto.c_tp_new:\n base_object_pyo = make_ref(space, space.w_object)\n base_object_pto = rffi.cast(PyTypeObjectPtr, base_object_pyo)\n flags = rffi.cast(lltype.Signed, pto.c_tp_flags)\n if pto.c_tp_base != base_object_pto or flags & Py_TPFLAGS_HEAPTYPE:\n pto.c_tp_new = pto.c_tp_base.c_tp_new\n decref(space, base_object_pyo)\n pto.c_tp_flags |= Py_TPFLAGS_READY\n return pto",
"def _val(obj):\n if isinstance(obj, ctypes._SimpleCData):\n return obj.value\n else:\n return obj",
"def from_binary(cls, data):\n if isinstance(data, basestring):\n data = ctypes.c_buffer(data)\n return ctypes.cast(data, cls)",
"def get_w_obj_and_decref(space, pyobj):\n assert is_pyobj(pyobj)\n pyobj = rffi.cast(PyObject, pyobj)\n w_obj = from_ref(space, pyobj)\n if pyobj:\n pyobj.c_ob_refcnt -= 1\n assert pyobj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY\n keepalive_until_here(w_obj)\n return w_obj",
"def py_from_ns(nsobj):\n\n if isinstance(nsobj, (objc_id, Class)):\n nsobj = ObjCInstance(nsobj)\n if not isinstance(nsobj, ObjCInstance):\n return nsobj\n\n if nsobj.isKindOfClass(NSDecimalNumber):\n return decimal.Decimal(str(nsobj.descriptionWithLocale(None)))\n elif nsobj.isKindOfClass(NSNumber):\n # Choose the property to access based on the type encoding. The actual\n # conversion is done by ctypes. Signed and unsigned integers are in\n # separate cases to prevent overflow with unsigned long longs.\n objc_type = nsobj.objCType\n if objc_type == b\"B\":\n return nsobj.boolValue\n elif objc_type in b\"csilq\":\n return nsobj.longLongValue\n elif objc_type in b\"CSILQ\":\n return nsobj.unsignedLongLongValue\n elif objc_type in b\"fd\":\n return nsobj.doubleValue\n else:\n raise TypeError(\n f\"NSNumber containing unsupported type {objc_type!r} \"\n \"cannot be converted to a Python object\"\n )\n elif nsobj.isKindOfClass(NSString):\n return str(nsobj)\n elif nsobj.isKindOfClass(NSData):\n # Despite the name, string_at converts the data at the address to a\n # bytes object, not str.\n return string_at(\n send_message(nsobj, \"bytes\", restype=POINTER(c_uint8), argtypes=[]),\n nsobj.length,\n )\n elif nsobj.isKindOfClass(NSDictionary):\n return {py_from_ns(k): py_from_ns(v) for k, v in nsobj.items()}\n elif nsobj.isKindOfClass(NSArray):\n return [py_from_ns(o) for o in nsobj]\n else:\n return nsobj",
"def __init__(self, ptr):\n if not ptr:\n raise Exception(\"Attempt to create NULL StructObject\")\n\n self.struct = ctypes.cast(ptr, self.PtrType)",
"def add_direct_pyobj_storage(cls):\n\n cls._cpy_ref = lltype.nullptr(PyObject.TO)\n\n def _cpyext_as_pyobj(self, space):\n return self._cpy_ref\n cls._cpyext_as_pyobj = _cpyext_as_pyobj\n\n def _cpyext_attach_pyobj(self, space, py_obj):\n self._cpy_ref = py_obj\n rawrefcount.create_link_pypy(self, py_obj)\n cls._cpyext_attach_pyobj = _cpyext_attach_pyobj",
"def _make_array(self, c):\n return (c * ctypes.py_object)()",
"def uiBoxPointer(obj):\n\n return ctypes.cast(obj, ctypes.POINTER(uiBox))",
"def _py2java(gateway, obj):\n if isinstance(obj, RDD):\n obj = _to_java_object_rdd(obj)\n elif isinstance(obj, DataFrame):\n obj = obj._jdf\n elif isinstance(obj, SparkContext):\n obj = obj._jsc\n elif isinstance(obj, SQLContext):\n obj = obj._jsqlContext\n elif isinstance(obj, (list, tuple)):\n obj = ListConverter().convert([_py2java(gateway, x) for x in obj],\n gateway._gateway_client)\n elif isinstance(obj, dict):\n result = {}\n for (key, value) in obj.items():\n result[key] = _py2java(gateway, value)\n obj = MapConverter().convert(result, gateway._gateway_client)\n elif isinstance(obj, JavaValue):\n obj = obj.value\n elif isinstance(obj, JavaObject):\n pass\n elif isinstance(obj, (int, long, float, bool, bytes, unicode)):\n pass\n else:\n data = bytearray(PickleSerializer().dumps(obj))\n obj = gateway.jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)\n return obj",
"def ptr(self, space, w_name, w_argtypes, w_restype, flags=FUNCFLAG_CDECL):\n resshape = unpack_resshape(space, w_restype)\n if resshape is None:\n w_resshape = space.w_None\n else:\n w_resshape = resshape\n argtypes_w = space.fixedview(w_argtypes)\n w_argtypes = space.newtuple(argtypes_w)\n w_key = space.newtuple([w_name, w_argtypes, w_resshape])\n try:\n return space.getitem(self.w_cache, w_key)\n except OperationError as e:\n if e.match(space, space.w_KeyError):\n pass\n else:\n raise\n # Array arguments not supported directly (in C, an array argument\n # will be just a pointer). And the result cannot be an array (at all).\n argshapes = unpack_argshapes(space, w_argtypes)\n ffi_argtypes = [shape.get_basic_ffi_type() for shape in argshapes]\n if resshape is not None:\n ffi_restype = resshape.get_basic_ffi_type()\n else:\n ffi_restype = ffi_type_void\n\n if space.isinstance_w(w_name, space.w_text):\n name = space.text_w(w_name)\n\n try:\n ptr = self.cdll.getrawpointer(name, ffi_argtypes, ffi_restype,\n flags)\n except KeyError:\n raise oefmt(space.w_AttributeError,\n \"No symbol %s found in library %s\",\n name, self.name)\n except LibFFIError:\n raise got_libffi_error(space)\n\n elif (_MS_WINDOWS and space.isinstance_w(w_name, space.w_int)):\n ordinal = space.int_w(w_name)\n try:\n ptr = self.cdll.getrawpointer_byordinal(ordinal, ffi_argtypes,\n ffi_restype, flags)\n except KeyError:\n raise oefmt(space.w_AttributeError,\n \"No symbol %d found in library %s\",\n ordinal, self.name)\n except LibFFIError:\n raise got_libffi_error(space)\n else:\n raise oefmt(space.w_TypeError,\n \"function name must be string or integer\")\n\n w_funcptr = W_FuncPtr(space, ptr, argshapes, resshape)\n space.setitem(self.w_cache, w_key, w_funcptr)\n return w_funcptr",
"def relay_unsafe_static_cast(c, val, ty):\n assert ty.is_constant(AbstractTaggedUnion)\n assert isinstance(val.abstract, AbstractTaggedUnion)\n return c.ref(val)",
"def finish_type_2(space, pto, w_obj):\n pto.c_tp_mro = make_ref(space, space.newtuple(w_obj.mro_w))\n base = pto.c_tp_base\n if base:\n inherit_special(space, pto, w_obj, base)\n for w_base in space.fixedview(from_ref(space, pto.c_tp_bases)):\n if isinstance(w_base, W_TypeObject):\n inherit_slots(space, pto, w_base)\n #else:\n # w_base is a W_ClassObject, ignore it\n\n if not pto.c_tp_setattro:\n from pypy.module.cpyext.object import PyObject_GenericSetAttr\n pto.c_tp_setattro = llslot(space, PyObject_GenericSetAttr)\n\n if not pto.c_tp_getattro:\n from pypy.module.cpyext.object import PyObject_GenericGetAttr\n pto.c_tp_getattro = llslot(space, PyObject_GenericGetAttr)\n\n if w_obj.is_cpytype():\n decref(space, pto.c_tp_dict)\n w_dict = w_obj.getdict(space)\n # pass in the w_obj to convert any values that are\n # unbound GetSetProperty into bound PyGetSetDescrObject\n pto.c_tp_dict = make_ref(space, w_dict, w_obj)",
"def dummy_ptrtype(*args):\n return _ida_hexrays.dummy_ptrtype(*args)",
"def fl_make_object(flobjclass, otype, xpos, ypos, width, height, label,\n pyfn_HandlePtr):\n #FL_HANDLEPTR = cty.CFUNCTYPE(cty.c_int, cty.POINTER(xfdata.FL_OBJECT),\n # cty.c_int, xfdata.FL_Coord, xfdata.FL_Coord, cty.c_int, cty.c_void_p)\n _fl_make_object = library.cfuncproto(\n library.load_so_libforms(), \"fl_make_object\",\\\n cty.POINTER(xfdata.FL_OBJECT), [cty.c_int, cty.c_int, xfdata.FL_Coord,\n xfdata.FL_Coord, xfdata.FL_Coord, xfdata.FL_Coord, xfdata.STRING,\n xfdata.FL_HANDLEPTR],\n \"\"\"FL_OBJECT * fl_make_object(int objclass, int type, FL_Coord x,\n FL_Coord y, FL_Coord w, FL_Coord h, const char * label,\n FL_HANDLEPTR handle)\"\"\")\n library.check_if_flinitialized()\n library.checkfatal_allowed_value_in_list(flobjclass, \\\n xfdata.OBJCLASS_list)\n i_flobjclass = library.convert_to_intc(flobjclass)\n i_otype = library.convert_to_intc(otype)\n i_xpos = library.convert_to_FL_Coord(xpos)\n i_ypos = library.convert_to_FL_Coord(ypos)\n i_width = library.convert_to_FL_Coord(width)\n i_height = library.convert_to_FL_Coord(height)\n s_label = library.convert_to_bytestrc(label)\n library.verify_function_type(pyfn_HandlePtr)\n cfn_HandlePtr = xfdata.FL_HANDLEPTR(pyfn_HandlePtr)\n library.keep_cfunc_refs(cfn_HandlePtr, pyfn_HandlePtr)\n library.keep_elem_refs(flobjclass, otype, xpos, ypos, width, \\\n height, label, i_flobjclass, i_otype, i_xpos, i_ypos, \\\n i_width, i_height, s_label)\n retval = _fl_make_object(i_flobjclass, i_otype, i_xpos, i_ypos, \\\n i_width, i_height, s_label, cfn_HandlePtr)\n return retval",
"def __call__(self, struct, objtype=None):\n\n if objtype == None:\n objtype = self.Obj\n\n if not struct:\n raise Exception(\"Tried to find NULL StructObject\")\n\n # Get an integer from the pointer, and try and find a matching\n # StructObject in the weak dictionary.\n ptr = cutil.ptr2int(struct)\n obj = self.by_ptr.get(ptr)\n\n # If a StructObject already exists, return it\n if obj != None:\n return obj\n\n # Otherwise, create a new one, save it to the weak dictionary,\n # and return it.\n obj = objtype(struct)\n self.by_ptr[ptr] = obj\n\n return obj",
"def _PythonToCtype(data, c_type):\n if c_type is actuator_util.Vec3:\n # Handle Vec3.\n assert len(data) == 3\n c_data = c_type()\n c_data.x = data[0]\n c_data.y = data[1]\n c_data.z = data[2]\n return c_data\n elif hasattr(c_type, '_length_'):\n # Handle arrays.\n length = getattr(c_type, '_length_')\n assert len(data) == length\n\n c_data = c_type()\n for i in range(length):\n c_data[i] = _PythonToCtype(data[i], getattr(c_type, '_type_'))\n\n elif hasattr(c_type, '_fields_'):\n # Handle structures.\n fields = autogen_util.GetCFields(c_type)\n assert set(data.keys()) == {field for field, _ in fields}\n\n c_data = c_type()\n for field, field_type in fields:\n setattr(c_data, field, _PythonToCtype(data[field], field_type))\n\n else:\n c_data = c_type(data)\n\n return c_data",
"def object_to_bytes(obj):\n if isinstance(obj, str):\n return bytearray(obj, \"UTF-8\")\n elif isinstance(obj, bool):\n return bytearray()\n elif isinstance(obj, int):\n return pack(\"<L\", obj)\n elif obj == None:\n return bytearray()\n elif isinstance(obj, bytearray):\n return obj\n else:\n #print type(obj), obj\n return obj.get_raw()",
"def is_ctypes_instance(obj):\n return issubclass(type(obj), ctypes.Structure) or issubclass(type(obj), ctypes.Union)",
"def build_ctypes_proxy(longsize, pointersize, longdoublesize):\n if (longsize, pointersize, longdoublesize) in __PROXIES:\n instance = __PROXIES[(longsize, pointersize, longdoublesize)]\n return instance\n instance = CTypesProxy(longsize, pointersize, longdoublesize)\n __PROXIES[(longsize, pointersize, longdoublesize)] = instance\n return instance",
"def get_pointer(self, name, timeout=None):\r\n exist = self._exist(name)\r\n isobject = self._isobject(name, exist)\r\n\r\n if exist == 0:\r\n raise Oct2PyError('\"%s\" is undefined' % name)\r\n\r\n elif exist == 1:\r\n return _make_variable_ptr_instance(self, name)\r\n\r\n elif isobject:\r\n return self._get_user_class(name)\r\n\r\n elif exist in [2, 3, 5]:\r\n return self._get_function_ptr(name)\r\n\r\n raise Oct2PyError('Unknown type for object \"%s\"' % name)"
]
| [
"0.6561695",
"0.6400061",
"0.6284424",
"0.6150552",
"0.6112126",
"0.5926656",
"0.58083993",
"0.58023477",
"0.57096577",
"0.56859213",
"0.56555754",
"0.55901396",
"0.5577375",
"0.5572786",
"0.55587715",
"0.55309314",
"0.5499725",
"0.54556036",
"0.5441602",
"0.54098374",
"0.5399149",
"0.5385448",
"0.53672427",
"0.5360356",
"0.5301636",
"0.52954715",
"0.5278014",
"0.5213731",
"0.52095",
"0.5193567"
]
| 0.7112424 | 0 |
Add a method with specific success codes. | def _set_success_codes(self, fname, success_codes):
func = getattr(self._dll, fname)
argtypes, func.argtuple_t, restype = self._fundecls[fname]
argtypes = [argtype
if not (isinstance(argtype, type(ctypes.POINTER(ctypes.c_int))) and
argtype._type_.__module__ != "ctypes") # remove struct (nested) pointers
else ctypes.c_voidp for argtype in argtypes]
func.argtypes = argtypes
try:
success_code_type, = set(type(code) for code in success_codes)
except ValueError:
raise AssertionError("Success code of different types")
if success_code_type == restype:
func.success_codes = success_codes
func.errcheck = errcheck
else:
func.restype = restype
setattr(self, fname, func) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_status_code(runner, return_value):\n if isinstance(return_value, Mapping):\n status_code = return_value.get('statusCode')\n if status_code:\n runner.resource['metadata']['status_code'] = status_code",
"def add_status_code(code):\n def class_decorator(cls):\n cls.status_code = code\n return cls\n return class_decorator",
"def set_status( code ):",
"def addSuccess(self, test):\n test.status = \"success\"",
"def assign_message_code(success: bool):\n return (HTTPStatus.OK.phrase, HTTPStatus.OK) if success\\\n else (HTTPStatus.INTERNAL_SERVER_ERROR.phrase, HTTPStatus.INTERNAL_SERVER_ERROR)",
"def success(self, result):\r\n raise NotImplementedError",
"def reply_with_code(self, code: int) -> None:",
"def setResponseCode(code, message=None):",
"def status(self, code, content_length=None):",
"def add_codes(cls):\n\n class ErrorsWithCodes: # pylint: disable=too-few-public-methods\n \"\"\"Add error messages with Code for easy debugging\n \"\"\"\n\n def __getattribute__(self, code):\n msg = getattr(cls, code)\n return f'[{code}] {msg}'\n\n return ErrorsWithCodes()",
"def status(self, action, code):\n if code != 20002:\n print(action + \"returned with code %s \" % (ERROR_CODE[code]))\n elif self._verbosity:\n print(action + \"completed successfully %s \" % (ERROR_CODE[code]))",
"def indicate_success(self):\n pass",
"def success(self, message, *args, **kwargs):\n self.counters[\"success\"] += 1\n self._write(message.format(*args, **kwargs), SUCCESS)",
"def __call__(self, *status_codes: str):\n def decorator(function: FuncSpeechArg):\n self._add_attr(function, {self.KEY_STATUS_CODES: status_codes})\n return function\n return decorator",
"def addSuccess(self, test):\n self.passing.append(proto_test(test))",
"def verify_response(success_code):\n def decorator(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n response = requests.get(self.request_url)\n if response.status_code != success_code:\n raise Exception('The request to Tableau Server returned code \\n'\n ' {} instead of {} in function {}'.format(response.status_code,\n success_code,\n func.__name__))\n return func(self, *args, **kwargs)\n return wrapper\n return decorator",
"def _success(self, msg=\"\"):\n if msg:\n self.result[\"message\"] = msg\n self.module.exit_json(**self.result)",
"def test_add_success(self):\n self.protocol.addSuccess(self.test)\n self.assertEqual(\n self.io.getvalue(), compat._b(\"successful: %s\\n\" % self.test.id()))",
"def success(cls, retval, retvalname='value'):\r\n if isinstance(retval, dict) and retvalname is None:\r\n retval[\"__result__\"] = \"success\" # TODO: right here just modified input dict. That's not good\r\n else:\r\n retval = {\"__result__\": \"success\", retvalname: retval}\r\n return PlatformMessage(method=\"__reply__\", kwargs=retval)",
"def action_success(self, resp):\n return resp[0] in SUCCESS_CODES",
"def action_success(self, resp):\n return resp[0] in SUCCESS_CODES",
"def return_code(self) -> int:\n raise NotImplementedError(\"Base method not implemented\")",
"def add(self, method: str, pattern: str, handler: Callable) -> None:",
"def addUnexpectedSuccess(self, test):\r\n self.unexpectedSuccesses.append(test)",
"def markSuccess(self, *args):\n self.add(True)",
"def status_code(self) -> int:\n raise NotImplementedError # pragma: no cover",
"def error_code(self, obj, statusCode):\n pass",
"def add_success(self, task: Task) -> None: # noqa: DAR101\n super().add_success(task)\n self._add_summary(task, _TaskExitCode.PASS)",
"def response_code(self,code,argument):\n\t\tresponse_code = f\"{code!s} {argument}\\r\\n\"\n\t\tself.wfile.write(bytes(response_code,\"ascii\"))",
"def status(code):\r\n def has_status(client, response, testcase):\r\n testcase.assertEqual(\r\n response.status_code,\r\n code\r\n )\r\n return has_status"
]
| [
"0.6259414",
"0.6249232",
"0.61892796",
"0.6104332",
"0.60523224",
"0.60154104",
"0.5901302",
"0.58486927",
"0.5820936",
"0.5702256",
"0.56624115",
"0.56536525",
"0.562311",
"0.55835485",
"0.5566436",
"0.554873",
"0.5535785",
"0.5483932",
"0.54706943",
"0.54671884",
"0.54671884",
"0.5437561",
"0.54235256",
"0.5419958",
"0.5390853",
"0.53697175",
"0.53518134",
"0.5333091",
"0.5329901",
"0.53195846"
]
| 0.6456779 | 0 |
Return nth value of the modified Tribonnaci sequence Expand the sequence if necessary | def get_tribonnaci(self, n):
if n not in self.numbers:
current_n = max(self.numbers)
while current_n < n:
current_n += 1
self.numbers[current_n] = self.numbers[current_n - 1] + \
self.numbers[current_n - 2] + \
self.numbers[current_n - 3]
return self.numbers[n] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lucas(n):\n if n == 0:\n return 2\n elif n == 1:\n return 1\n else:\n nth = lucas(n-1) + lucas(n-2)\n return nth",
"def solve(n, seq):\n\n return sum(seq) - (n-1) * (n-2) / 2",
"def nth(n, seq):\n try:\n return seq[n]\n except TypeError:\n return next(itertools.islice(seq, n, None))",
"def tribonacci(self, n: int) -> int:\n # Solution 1 - 24 ms\n # Solution 2 - 12 ms\n if n in self.mem:\n return self.mem[n]\n self.mem[n] = self.tribonacci(n - 1) + self.tribonacci(n - 2) + self.tribonacci(n - 3)\n return self.mem[n]",
"def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n vals = [1, 2, 3]\n if n <= 3:\n return vals[n-1]\n for i in range(n - 3):\n new_val = 3 * vals[0] + 2 * vals[1] + 1 * vals[2]\n vals = vals[1:] + [new_val]\n return vals[-1]",
"def lucas_iter(n):\n f = []\n for x in range(n + 1):\n if x == 0:\n f.append(2)\n elif x == 1:\n f.append(1)\n else:\n f.append(f[-1] + f[-2])\n return f[-1]",
"def lucas(n):\n\tlucas_seq = []\n\n\tfor i in range(0,n+1):\n\t\tif i == 0:\n\t\t\tlucas_seq.append(2)\n\t\tif i == 1:\n\t\t\tlucas_seq.append(1)\n\t\tif i > 1:\n\t\t\tnth_term = lucas_seq[-1] + lucas_seq[-2]\n\t\t\tlucas_seq.append(nth_term)\n\t\n\tprint(lucas_seq)\n\tprint(lucas_seq[n])\n\treturn(lucas_seq[n])",
"def triangular_number(n):\n return n*(n+1) / 2",
"def J (self, n):",
"def triangular_number_solution():\n return 5 * partial_sum(199) + 3 * partial_sum(333) - 15 * partial_sum(66)",
"def lucas(n):\n if n == 0:\n return 2\n elif n == 1:\n return n\n else:\n return lucas(n-1) + lucas(n-2)",
"def last_n_ver(seq, n):\r\n return first_n_ver(list(reversed(seq)), n)",
"def lucas(n):\n if n==0:\n return 2\n elif n==1:\n return 1\n else:\n return lucas(n-1) + lucas(n-2)",
"def I (self, n):",
"def get_t(self, n, c):\n t = 1\n while t * n + t * t * n * n < 2 * c:\n t += 1\n return t - 1",
"def Arn(r, n):\n ret = 1\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret",
"def lucas(n):\n if n == 1:\n return 2\n elif n == 2:\n return 1\n else:\n return lucas(n-2) + lucas(n-1)",
"def seq(n,x=0, y=1):\r\n if n==1:\r\n return x\r\n elif n==2:\r\n return y\r\n else:\r\n return seq(n-1,x,y)+seq(n-2,x,y)",
"def lucas(n):\n\n if (n == 0):\n return 2\n elif (n == 1):\n return 1\n else:\n return lucas(n - 1) + lucas(n - 2)",
"def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n counter = 0\n term1 = 3\n term2 = 2\n term3 = 1\n loop = n-3\n\n if n<=3:\n return n\n\n while counter<loop:\n term1,term2,term3=term1+2*term2+3*term3,term1,term2\n counter +=1\n return term1",
"def sw(n):\n return 4*n*n + 2*n + 1",
"def poly_nth(f, n):\n if n < 0 or n > len(f)-1:\n raise IndexError\n else:\n return f[zzx_degree(f)-n]",
"def cw(i):\n return (i - 1) % 3",
"def next_term(x):\n if x%2 == 0:\n return x/2\n else:\n return 3*x + 1",
"def triple_step_simplified(n):\n\ta = 0\n\tb = 0\n\tc = 1\n\tfor i in range(n):\n\t\ttemp = a + b + c\n\t\ta, b, c = b, c, temp\n\treturn temp",
"def fn(n):\n if n == 0: return [\"\"]\n if n == 1: return [\"0\", \"1\", \"8\"]\n return [x+y+xx for x, xx in mp for y in fn(n-2)]",
"def lucas(n):\n if n == 0:\n return 2\n elif n == 1:\n return 1\n else:\n return lucas(n - 1) + lucas(n - 2)",
"def lucas(n):\n if n == 0:\n return 2\n elif n == 1:\n return 1\n else:\n return lucas(n - 1) + lucas(n - 2)",
"def last_fib_digit(n):\n\n # global seq\n seq = []\n seq.append(1)\n seq.append(1)\n\n if n <= 2:\n return(1)\n\n for i in range(2, n):\n seq.append(last_digit(seq[i-1] + seq[i-2]))\n\n return seq[n-1]",
"def cyclic_index_i_plus_1(i, length):\n return i + 1 if i + 1 < length else 0"
]
| [
"0.6478033",
"0.6209465",
"0.6194479",
"0.6160907",
"0.61453307",
"0.61131483",
"0.6061283",
"0.5964612",
"0.59022593",
"0.58862644",
"0.5838626",
"0.5835178",
"0.5829389",
"0.582663",
"0.5818929",
"0.5817166",
"0.58149713",
"0.58006334",
"0.57880753",
"0.5778549",
"0.577671",
"0.5771904",
"0.5760964",
"0.57606393",
"0.57558215",
"0.57421625",
"0.5728751",
"0.5728751",
"0.5721766",
"0.57140356"
]
| 0.68224144 | 0 |
This is a hook for providing more complex voting once logical reasoning has been performed. | def _vote(self, team):
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def opinion_vote(mode, verbose, revision):\n judge = VotingJudge(mode, revision)\n flags = judge.vote()\n if verbose is True:\n click.echo(\"Vote resulted in %i flags:\" % len(flags))\n for f in flags:\n format_flag(f)",
"def process_VOTED(self, msg):\n\n result = parseYesOrNo(' '.join(msg[1:]))\n if result is not None:\n assert self._vote is not None\n self._vote.set(result)",
"async def vote(self, ctx):\n embed = discord.Embed(title = \"Here are some bot lists that you can vote for me on, voters may soon™ recieve perks\", color = discord.Color.blurple())\n embed.add_field(name = \"Bots For Discord\", value = \"[Click Here](https://botsfordiscord.com/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Discord Boats\", value = \"[Click Here](https://discord.boats/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Divine Discord Bots\", value = \"[Click Here](https://divinediscordbots.com/bot/592811241756688405/vote)\") \n embed.add_field(name = \"Botlist.space\", value = \"[Click Here](https://botlist.space/bot/592811241756688405/upvote)\") \n embed.set_thumbnail(url = self.bot.user.avatar_url)\n await ctx.send(embed = embed)",
"def tpc_vote(self, transaction):\n raise NotImplementedError",
"async def vote(ctx: commands.Context):\n await ctx.send(\"this isn't a command\")",
"def process_vote(self, comment_id, username, value):\n raise NotImplementedError()",
"def _transit_to_voting(self, **kwargs):\n\n handler = kwargs['handler']\n\n plus_id = kwargs['plus_id']\n card_id = kwargs['card_id']\n game = models.Hangout.get_by_id(self.hangout_id).current_game.get()\n if not game:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': \"Game for hangout %s not found\" % (self.hangout_id,)})\n return False\n if not game.state == self.state_name:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': (\n \"Can't vote now, wrong game state %s.\" % (game.state,))})\n return False \n # try to get the id of the voted-for player based on their selected card\n # via memcache first.\n selections = memcache.get(\n self._selections_key(game.key.id(), game.current_round))\n if selections: # if cache hit\n logging.info(\"got selections cache hit: %s\", selections)\n pvid = selections.get(card_id)\n if not pvid:\n # cache list was present, but not info for that card\n pvid = self._get_pid_from_selcard(card_id)\n else: # cache miss on selections list\n logging.info(\"did not get selections cache hit\")\n pvid = self._get_pid_from_selcard(card_id)\n logging.debug(\"in _transit_to_voting, with plus id %s and pvid %s\",\n plus_id, pvid)\n if not plus_id or not pvid:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': 'Voting information not properly specified'})\n return False\n if plus_id == pvid:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': 'Participants cannot vote for themselves.'})\n return False\n\n participant_key = model.Key(models.Participant, plus_id, parent=game.key)\n participant = participant_key.get()\n if not participant:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': \"Could not retrieve indicated participant\"})\n return False\n # TODO: also check that entity exists for given participant key\n vpkey = model.Key(models.Participant, pvid, parent=game.key)\n participant.vote = vpkey\n participant.put()\n return True",
"def test_vote_nopermission(self):\r\n mock_module = CHModuleFactory.create(user_voted=True)\r\n json_in = {'answer': '24.0', 'hint': 1, 'pk_list': json.dumps([['24.0', 1], ['24.0', 3]])}\r\n old_hints = copy.deepcopy(mock_module.hints)\r\n mock_module.tally_vote(json_in)\r\n self.assertTrue(mock_module.hints == old_hints)",
"def use_voting_classifier(self):\n\t\tself.model = VotingClassifier(estimators=[('nb', self.models[\"naive_bayes\"]), ('et', self.models[\"extra_tree\"]), ('gb', self.models[\"gradient_boost\"])], voting='hard', weights=[2,3,1.5])",
"def update_vote(self):\n if not self.answer_id:\n return False\n try:\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n query = \"UPDATE votes SET vote=%s WHERE answer_id=%s AND user_id=%s\"\n cur.execute(query, (self.vote_value, self.answer_id, self.user_id))\n con.commit()\n except Exception as e:\n print(e)\n con.close()\n return False\n return True",
"def tally_vote(self, data):\r\n if self.user_voted:\r\n return {'error': 'Sorry, but you have already voted!'}\r\n ans = data['answer']\r\n if not self.validate_answer(ans):\r\n # Uh oh. Invalid answer.\r\n log.exception('Failure in hinter tally_vote: Unable to parse answer: {ans}'.format(ans=ans))\r\n return {'error': 'Failure in voting!'}\r\n hint_pk = str(data['hint'])\r\n # We use temp_dict because we need to do a direct write for the database to update.\r\n temp_dict = self.hints\r\n try:\r\n temp_dict[ans][hint_pk][1] += 1\r\n except KeyError:\r\n log.exception('''Failure in hinter tally_vote: User voted for non-existant hint:\r\n Answer={ans} pk={hint_pk}'''.format(ans=ans, hint_pk=hint_pk))\r\n return {'error': 'Failure in voting!'}\r\n self.hints = temp_dict\r\n # Don't let the user vote again!\r\n self.user_voted = True\r\n\r\n # Return a list of how many votes each hint got.\r\n pk_list = json.loads(data['pk_list'])\r\n hint_and_votes = []\r\n for answer, vote_pk in pk_list:\r\n if not self.validate_answer(answer):\r\n log.exception('In hinter tally_vote, couldn\\'t parse {ans}'.format(ans=answer))\r\n continue\r\n try:\r\n hint_and_votes.append(temp_dict[answer][str(vote_pk)])\r\n except KeyError:\r\n log.exception('In hinter tally_vote, couldn\\'t find: {ans}, {vote_pk}'.format(\r\n ans=answer, vote_pk=str(vote_pk)))\r\n\r\n hint_and_votes.sort(key=lambda pair: pair[1], reverse=True)\r\n # Reset self.previous_answers and user_submissions.\r\n self.previous_answers = []\r\n self.user_submissions = []\r\n return {'hint_and_votes': hint_and_votes}",
"def tagme_vote(self, e1, candidate_set):\n try:\n return sum([self.entity_relatedness(e1, e2) * score\n for score, e2 in candidate_set]) / len(candidate_set)\n except ZeroDivisionError:\n return 0.0",
"def votePost(votePostEvent):\n userID = votePostEvent[\"data\"][\"user_id\"]\n postID = votePostEvent[\"data\"][\"post_id\"]\n vote = int(votePostEvent[\"data\"][\"vote\"])\n query = ('SELECT * FROM vote WHERE post_id = \\\"{}\\\" AND user_id = \\\"{}\\\"'.format(postID, userID))\n with conn.cursor() as cur:\n affectedRow = cur.execute(query)\n if affectedRow > 0:\n row = cur.fetchone()\n if vote > 0 and not row[2]:\n query = (\n 'UPDATE vote SET upvote = true, downvote = false WHERE post_id = \\\"{}\\\" AND user_id = \\\"{}\\\"'.format(\n postID, userID))\n cur.execute(query)\n query = 'UPDATE post SET upvote = upvote+1, downvote = downvote-1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n elif vote < 0 and not row[3]:\n query = (\n 'UPDATE vote SET upvote = false, downvote = true WHERE post_id = \\\"{}\\\" AND user_id = \\\"{}\\\"'.format(\n postID, userID))\n cur.execute(query)\n query = 'UPDATE post SET upvote = upvote-1, downvote = downvote+1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n else:\n if vote > 0:\n query = (\n 'INSERT INTO vote (user_id, post_id, upvote, downvote) VALUES ( \\\"{}\\\", \\\"{}\\\", true, false)'.format(\n userID, postID))\n cur.execute(query)\n query = 'UPDATE post SET upvote = upvote+1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n else:\n query = (\n 'INSERT INTO vote (user_id, post_id, upvote, downvote) VALUES ( \\\"{}\\\", \\\"{}\\\", false, true)'.format(\n userID, postID))\n cur.execute(query)\n query = 'UPDATE post SET downvote = downvote+1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n conn.commit()",
"def toggle_vote(self):\n\n self.vote = 1 - self.vote",
"def test_upvote_then_downvote_same_user_leaves_post_score_one_less(self):\n post = Post.objects.get(body=\"123ABC Body\")\n # self.assertEqual(len(post_qs), 1)\n self.assertEqual(post.score, DEFAULT_SCORE)\n post = Post.objects.get(body=\"123ABC Body\")\n\n vote1 = Vote.create(post=post, value=1, voter=self.user)\n post = Post.objects.get(body=\"123ABC Body\")\n self.assertEqual(post.score, DEFAULT_SCORE + 1)\n\n vote2 = Vote.create(post=post, value=-1, voter=self.user)\n post = Post.objects.get(body=\"123ABC Body\")\n self.assertEqual(post.score, DEFAULT_SCORE - 1)",
"def _sense_and_act(self):\n pass",
"def test_vote_twice(self):\n idea = models.Idea(creator=random_user(), title='Transit subsidy to Mars', \n text='Aliens need assistance.', state=self.state)\n idea.save()\n\n self.client.login(username='testuser', password='password')\n resp = self.client.post(reverse('upvote_idea'), {'idea_id':idea.id, 'next':reverse('idea_detail', args=(idea.id,))})\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(len(idea.vote_set.all()), 1)\n\n resp = self.client.post(reverse('upvote_idea'), {'idea_id':idea.id, 'next':reverse('idea_detail', args=(idea.id,))})\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(len(idea.vote_set.all()), 1)",
"def vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n try:\n configure()\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n configure()\n # Redisplay the question voting form.\n return render(request, 'polls/detail.html', {\n 'question': question,\n 'error_message': \"You didn't select a choice.\",\n })\n else:\n if Vote.objects.filter(pk=question_id, user_id=request.user.id).exists():\n configure()\n user_vote = question.vote_set.get(user=request.user)\n user_vote.choice = selected_choice\n user_vote.choice.votes += 1\n user_vote.choice.save()\n user_vote.save()\n else:\n configure()\n selected_choice.vote_set.create(user=request.user, question=question)\n\n return HttpResponseRedirect(reverse('polls:results', args=(question_id,)))",
"def _transit_to_voting(self, **kwargs):\n logging.debug(\"in _transit_to_voting\")\n handler = kwargs['handler']\n\n game = models.Hangout.get_by_id(self.hangout_id).current_game.get()\n if not game:\n if handler:\n handler.accumulate_response(\n {'status': 'ERROR',\n 'message': \"Game for hangout %s not found\" % (self.hangout_id,)})\n return False\n if game.state != self.state_name:\n logging.info(\"game state %s not valid\", game.state)\n return False\n game.state = 'voting'\n game.put()\n return True",
"def up_vote(cls, user, message):\r\n pass",
"def test_upvote_then_downvote_same_user_leaves_comment_score_one_less(self):\n comment = Comment.objects.get(body=\"987XYZ\")\n # self.assertEqual(len(post_qs), 1)\n self.assertEqual(comment.score, DEFAULT_SCORE)\n comment = Comment.objects.get(body=\"987XYZ\")\n\n vote1 = Vote.create(comment=comment, value=1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE + 1)\n\n vote2 = Vote.create(comment=comment, value=-1, voter=self.user)\n comment = Comment.objects.get(body=\"987XYZ\")\n self.assertEqual(comment.score, DEFAULT_SCORE - 1)",
"def can_vote(age):\n return age >= 18",
"def sense_and_act(self):\n pass",
"def test_40_message_vote(self):\n cr, uid = self.cr, self.uid\n # Data: post a message on Pigs\n msg_id = self.group_pigs.message_post(body='My Body', subject='1')\n msg = self.mail_message.browse(cr, uid, msg_id)\n msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)\n\n # Do: Admin vote for msg\n self.mail_message.vote_toggle(cr, uid, [msg.id])\n msg.refresh()\n # Test: msg has Admin as voter\n self.assertEqual(set(msg.vote_user_ids), set([self.user_admin]), 'mail_message vote: after voting, Admin should be in the voter')\n # Do: Bert vote for msg\n self.mail_message.vote_toggle(cr, self.user_raoul_id, [msg.id])\n msg_raoul.refresh()\n # Test: msg has Admin and Bert as voters\n self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_admin, self.user_raoul]), 'mail_message vote: after voting, Admin and Bert should be in the voters')\n # Do: Admin unvote for msg\n self.mail_message.vote_toggle(cr, uid, [msg.id])\n msg.refresh()\n msg_raoul.refresh()\n # Test: msg has Bert as voter\n self.assertEqual(set(msg.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')\n self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')",
"def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,\n mlvl_nms_scores, score_thr):\n candidate_mask = mlvl_nms_scores > score_thr\n # print(\"candidate_mask\")\n # print(candidate_mask)\n candidate_mask_nozeros = candidate_mask.nonzero()\n # print(\"candidate_mask_nozeros\")\n # print(candidate_mask_nozeros)\n candidate_inds = candidate_mask_nozeros[:, 0]\n candidate_labels = candidate_mask_nozeros[:, 1]\n candidate_bboxes = mlvl_bboxes[candidate_inds]\n candidate_scores = mlvl_nms_scores[candidate_mask]\n det_bboxes_voted = []\n det_labels_voted = []\n # print(\"self.cls_out_channels\")\n # print(self.cls_out_channels)\n for cls in range(self.cls_out_channels):\n candidate_cls_mask = candidate_labels == cls\n if not candidate_cls_mask.any():\n continue\n candidate_cls_scores = candidate_scores[candidate_cls_mask]\n candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask]\n det_cls_mask = det_labels == cls\n det_cls_bboxes = det_bboxes[det_cls_mask].view(\n -1, det_bboxes.size(-1))\n det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4],\n candidate_cls_bboxes)\n for det_ind in range(len(det_cls_bboxes)):\n single_det_ious = det_candidate_ious[det_ind]\n pos_ious_mask = single_det_ious > 0.01\n pos_ious = single_det_ious[pos_ious_mask]\n pos_bboxes = candidate_cls_bboxes[pos_ious_mask]\n pos_scores = candidate_cls_scores[pos_ious_mask]\n pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) *\n pos_scores)[:, None]\n voted_box = torch.sum(\n pis * pos_bboxes, dim=0) / torch.sum(\n pis, dim=0)\n voted_score = det_cls_bboxes[det_ind][-1:][None, :]\n det_bboxes_voted.append(\n torch.cat((voted_box[None, :], voted_score), dim=1))\n det_labels_voted.append(cls)\n\n det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0)\n det_labels_voted = det_labels.new_tensor(det_labels_voted)\n return det_bboxes_voted, det_labels_voted",
"def test_basic(self):\n with build_video(self.user, votes=0) as video:\n votes = video.votes\n add_vote(video)\n video = Video.objects.get(pk=video.pk)\n eq_(video.votes, votes + 1)",
"def vote(self):\n if self.vote_exists():\n return self.update_vote()\n return self.create_vote()",
"def test_vote_withpermission(self):\r\n mock_module = CHModuleFactory.create(\r\n previous_answers=[['24.0', [0, 3, None]]])\r\n json_in = {'answer': '24.0', 'hint': 3, 'pk_list': json.dumps([['24.0', 0], ['24.0', 3]])}\r\n dict_out = mock_module.tally_vote(json_in)\r\n self.assertTrue(mock_module.hints['24.0']['0'][1] == 40)\r\n self.assertTrue(mock_module.hints['24.0']['3'][1] == 31)\r\n self.assertTrue(['Best hint', 40] in dict_out['hint_and_votes'])\r\n self.assertTrue(['Another hint', 31] in dict_out['hint_and_votes'])",
"def test_vote_nohint(self):\r\n mock_module = CHModuleFactory.create()\r\n json_in = {'answer': '24.0', 'hint': '25', 'pk_list': '[]'}\r\n dict_out = mock_module.tally_vote(json_in)\r\n self.assertTrue(dict_out == {'error': 'Failure in voting!'})",
"def vote(self, data, suffix=''): # pylint: disable=unused-argument\n # Here is where we would prevent a student from voting twice, but then\n # we couldn't click more than once in the demo!\n #\n # if self.voted:\n # log.error(\"cheater!\")\n # return\n\n votes = json.load(self.fs.open(u\"thumbsvotes.json\"))\n self.upvotes = votes['up']\n self.downvotes = votes['down']\n\n if data['voteType'] not in ('up', 'down'):\n log.error('error!')\n return\n\n if data['voteType'] == 'up':\n self.upvotes += 1\n else:\n self.downvotes += 1\n\n with self.fs.open(u'thumbsvotes.json', 'wb') as file_output:\n file_output.write(\n json.dumps({'up': self.upvotes, 'down': self.downvotes}).encode()\n )\n\n self.voted = True\n\n return {'up': self.upvotes, 'down': self.downvotes}"
]
| [
"0.6289021",
"0.5948813",
"0.58756876",
"0.5792382",
"0.5758893",
"0.5732524",
"0.57292676",
"0.5664396",
"0.5635303",
"0.5612909",
"0.5588355",
"0.55865514",
"0.5553387",
"0.5463278",
"0.5462347",
"0.54601604",
"0.5458112",
"0.54575557",
"0.54467076",
"0.5443048",
"0.5429653",
"0.53901964",
"0.53899276",
"0.5387119",
"0.534758",
"0.52954",
"0.5292817",
"0.52768356",
"0.52752525",
"0.5249843"
]
| 0.6151692 | 1 |
Get the argument parser for passing times. | def get_parser_times():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"start_time", action="store", type=pandas.Timestamp)
parser.add_argument(
"end_time", action="store", type=pandas.Timestamp)
parser.add_argument(
"--area", action="store", type=str,
help="Resample to this area")
parser.add_argument(
"--sector", action="store", type=str,
help="Sector of ABI data to read",
choices=("C", "F", "M1", "M2"))
parser.add_argument(
"--outdir", action="store", type=pathlib.Path,
help="Directory where to write resulting images.",
default=io.plotdir())
_add_common_to_parser(parser)
return parser | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--date\", \"-d\", help=\"Date of this lab session\")\n parser.add_argument(\"--time-in\", \"-ti\", help=\"Time string representing the time lab began\")\n parser.add_argument(\"--time-out\", \"-to\", help=\"Time string representing the time lab ended\")\n \n return parser.parse_args()",
"def make_argument_parser():\n parser = Benchmark.make_argument_parser()\n parser.add_argument('--skip-reference',\n action='store_true',\n help='Skip the reference simulation run.')\n return parser",
"def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--record', required=True,\n help='Win/Loss record of team specified as <Wins>-<Losses>')\n parser.add_argument('-s', '--streak', required=True,\n help='Current streak of team specified as (W|L)\\d+')\n parser.add_argument('-l', '--last', required=True,\n help='Record over last 10 games specified as <Wins>-<Losses>')\n return parser",
"def get_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'names',\n help=(\n 'list of name-location pairs '\n '(location can be nat/hhs/cen/state or specific location labels)'))\n parser.add_argument(\n '--first',\n '-f',\n type=int,\n help='first epiweek override')\n parser.add_argument(\n '--last',\n '-l',\n type=int,\n help='last epiweek override')\n parser.add_argument(\n '--epiweek',\n '-w',\n type=int,\n help='epiweek override')\n parser.add_argument(\n '--test',\n '-t',\n default=False,\n action='store_true',\n help='dry run only')\n parser.add_argument(\n '--valid',\n '-v',\n default=False,\n action='store_true',\n help='do not fall back to stable wILI; require unstable wILI')\n return parser",
"def parse_args(parser):\n options, args = parser.parse_args()\n options.period = max(int(options.period), 65)\n return options, args,",
"def get_parser():\n parser = ArgumentParser(\n description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n \"-s\", \"--sentence\", dest=\"sentence\", help=\"sentence, splitted by ';'\"\n )\n return parser",
"def get_parser():\n\n parser = parser.ArgumentParser()\n return parser",
"def get_argparser(self):\n parser = argparse.ArgumentParser(description='Command Configuration')\n parser.add_argument('--coin', choices=['bitcoin', 'ethereum', 'litecoin'], default='bitcoin')\n parser.add_argument('--start_date', default='2019-10-21')\n parser.add_argument('--end_date', default='2019-10-31')\n parser.add_argument('--language', choices=['en', 'it', 'es', 'fr', 'de', 'ru', 'zh'], default='en')\n\n argparser = parser.parse_args()\n return argparser.__dict__",
"def get_parser():\n p = argparse.ArgumentParser(description='such a good program')\n p.add_argument('infile')\n p.add_argument('outfile')\n return p",
"def parser(self):\n return self.arg_parser",
"def get_parser(self):\n parser = argparse.ArgumentParser(description='Short sample app')\n\n parser.add_argument('-a', action=\"store_true\", default=False)\n parser.add_argument('-b', action=\"store\", dest=\"b\")\n parser.add_argument('-c', action=\"store\", dest=\"c\", type=int)\n return parser",
"def get_parser():\n\tparser = argparse.ArgumentParser('tallyup.py',\n\t\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\t\tdescription=\"\"\"\nTally up a student score file.\n\t\t\"\"\"\n\t\t)\n\tversion = '%(prog)s ' + __version__\n\tparser.add_argument('--version', '-v', action='version', version=version,\n\t\thelp='show version of this command.')\n\tparser.add_argument('--csvfile', '-i', type=str, required=True, \n\t\thelp='input csv file.')\n\treturn parser",
"def parse_args():\n parser = ArgumentParser()\n parser.add_argument('-t', '--timer', action='store_true', \\\n help='Time the first random generation')\n parser.add_argument('-i', '--ibmq', default='', help='IBMQ token')\n parser.add_argument('-b', '--backend', default='', help='IBMQ backend')\n return parser.parse_args()",
"def get_parser(name):\n parser = argparse.ArgumentParser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # print default value always\n parser.add_argument = partial(parser.add_argument, help=' ')\n return parser",
"def get_parser(name):\n parser = argparse.ArgumentParser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # print default value always\n parser.add_argument = partial(parser.add_argument, help=' ')\n return parser",
"def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument()\n return p.parse_args()",
"def get_parser_arguments():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--students', type=str, default=None, help='Path to the students json file')\n parser.add_argument('-r', '--rooms', type=str, default=None, help='Path to the rooms json file')\n parser.add_argument('-f', '--format', choices=['xml', 'json'], type=str.lower, default=None,\n help='Output format of the results')\n return parser",
"def get_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-r\", \"--rule\", \n dest=\"rule\", type=int, \n help=\"Number of rule used. Valid options --> 0-255\")\n parser.add_argument(\"-w\", \"--width\", \n dest=\"width\", type=int, \n help=\"Number of columns, preferably odd number. Valid options --> >15\")\n parser.add_argument(\"-he\", \"--height\", \n dest=\"height\", type=int, \n help=\"Number of rows that you want printed. Do not use argument if you want infinite generations.\")\n parser.add_argument(\"-t\", \"--time\", \n dest=\"time\", type=int, \n help=\"Time that it takes to print two consecutive lines in ms. Default = 100ms\")\n \n args = parser.parse_args()\n \n # Rule Validation\n if not args.rule:\n parser.error(\"You need to provide a rule (0-255)\")\n elif (args.rule > 255) | (args.rule < 0):\n parser.error(\"Rulse needs to be a integer between 0 and 255.\")\n \n # Width Validation\n if not args.width:\n parser.error(\"You need to provide a width larget than 15 columns\")\n elif (args.width < 15):\n parser.error(\"width needs to be more than 15 columns.\")\n \n # Time Validation\n if not args.time:\n args.time = 100\n \n # Height Validation\n if args.height:\n if args.height < 1:\n parser.error(\"Height needs to be larger than 15 columns.\")\n \n \n return create_ruleset(args.rule), args.width, args.height, args.time",
"def get_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=f\"mobile_modem_exporter version {__version__}. Exports signal quality information for mobile modems. See the manpage or ReadTheDocs for more info.\"\n )\n\n parser.add_argument(\n \"PROMPATH\",\n type=str,\n help=\"The path to the prometheus node_exporter textfile collector file to write output to.\",\n )\n\n parser.add_argument(\n \"SERIALDEVICE\",\n nargs=\"+\",\n type=str,\n help=\"The path to a serial device to get signal quality from. Can be specified multiple times.\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--debug\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=\"DEBUG\",\n help=\"Debug mode. Equal to setting --log-level=DEBUG.\",\n default=argparse.SUPPRESS,\n )\n\n parser.add_argument(\n \"-l\",\n \"--log-level\",\n dest=\"loglevel\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n help=\"Logging level. One of DEBUG, INFO, WARNING, ERROR, CRITICAL. Defaults to INFO.\",\n default=\"INFO\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--sleep\",\n type=int,\n nargs=\"?\",\n help=\"Sleep this many seconds between runs, default: %(default)s\",\n default=10,\n )\n\n parser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=\"WARNING\",\n help=\"Quiet mode. No output at all if no errors are encountered. Equal to setting --log-level=WARNING.\",\n default=argparse.SUPPRESS,\n )\n\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"version\",\n version=f\"%(prog)s version {__version__}\",\n help=\"Show mobile_modem_exporter version and exit.\",\n )\n\n return parser",
"def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()",
"def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-g\", \"--gta\",\n help=\"\"\"gta sequences\"\"\",\n dest=\"gta\",\n required=True)\n return parser",
"def get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--method', type=str)\n parser.add_argument('--size_part', type=float, default=None)\n parser.add_argument('--start', type=int, default=0)\n parser.add_argument('--count', type=int, default=None)\n return parser",
"def arg_parser():\n import argparse\n return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)",
"def get_parser():\n\tparser = argparse.ArgumentParser(description=\"Twitter Searcher\")\n\tparser.add_argument(\"-q\",\n\t\t\t\t\t\t\"--query\",\n\t\t\t\t\t\tdest=\"query\",\n\t\t\t\t\t\thelp=\"Query/Filter\",\n\t\t\t\t\t\tdefault='*')\n\tparser.add_argument(\"-d\",\n\t\t\t\t\t\"--data-dir\",\n\t\t\t\t\tdest=\"city\",\n\t\t\t\t\thelp=\"Output/Data Directory\")\n\treturn parser",
"def create_parser():\n parser = argparse.ArgumentParser(description='Watching for files containing magictext')\n parser.add_argument('--ext', help='File extensions to filter on, default=.txt', default='.txt')\n parser.add_argument('--poll', help=\"Polling interval in seconds, default=1.0\", type=float, default=1.0)\n parser.add_argument('directory', help='Directory to watch.')\n parser.add_argument('magictext', help='Text to search for within matching files.')\n return parser",
"def get_parser():\n parser = ArgumentParser(\n description='phpMyAdmin work reporting tool\\n\\nGenerates list of commits and issues handled in given period.',\n epilog='Credentials can be also stored in ~/.config/phpmyadmin:\\n\\n[github]\\nuser=USER\\ntoken=TOKEN',\n formatter_class=RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n '-u', '--user',\n help='GitHub username, used for both reporting and authentication'\n )\n parser.add_argument(\n '-t', '--token',\n help='GitHub authentication token'\n )\n parser.add_argument(\n '-s', '--start-date',\n type=dateutil.parser.parse,\n default=datetime.now() - timedelta(days=7),\n help='Starting datetime, defaults to 7 days ago'\n )\n parser.add_argument(\n '-e', '--end-date',\n type=dateutil.parser.parse,\n default=datetime.now(),\n help='Ending datetime, defaults to current timestamp'\n )\n parser.add_argument(\n '-f', '--format',\n choices=('markdown', ),\n default='markdown',\n help='Output format',\n )\n parser.add_argument(\n '-w', '--weekly',\n action='store_true',\n help='Weekly report not including private repositories'\n )\n parser.add_argument(\n '-W', '--last-week',\n action='store_true',\n help='Create report for last week'\n )\n parser.add_argument(\n '-M', '--last-month',\n action='store_true',\n help='Create report for last month'\n )\n parser.add_argument(\n '--this-week',\n action='store_true',\n help='Create report for this week'\n )\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def parse_args():\n\n parser = argparse.ArgumentParser(description=\"Benchmark Thing WoT server\")\n parser = utils.extend_server_arg_parser(parser)\n\n return parser.parse_args()",
"def get_parser():\n parser = argparse.ArgumentParser(description='Parser des liens sur les sites Jahia et Wordpress.')\n parser.add_argument('ficher_des_sites', help='le fichier contenant les sites a parser.')\n parser.add_argument('-v', '--version', help='affiche la version du parser',\n action='version', version='%(prog)s ' + __version__)\n return parser"
]
| [
"0.6568716",
"0.59897435",
"0.59649503",
"0.5915204",
"0.5913862",
"0.5864289",
"0.5827687",
"0.5750248",
"0.57498866",
"0.57411927",
"0.57269704",
"0.56963646",
"0.56855893",
"0.5682023",
"0.5682023",
"0.56393063",
"0.56157017",
"0.56120384",
"0.5598651",
"0.55702114",
"0.5544521",
"0.5542296",
"0.5532885",
"0.5525756",
"0.55219704",
"0.5512593",
"0.54621184",
"0.54621184",
"0.5455501",
"0.543132"
]
| 0.74796635 | 0 |
Checks if Python version is supported by Cuckoo. | def check_python_version():
version = sys.version.split()[0]
if version < "2.6" or version >= "3":
raise CuckooStartupError("You are running an incompatible version of Python, please use 2.6 or 2.7") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def python_compatible():\n result = False\n req_ver = vers.convert('3.9.5')\n pythonver = vers.convert('{major}.{minor}.{micro}'.format(major=sys.version_info.major,\n minor=sys.version_info.minor,\n micro=sys.version_info.micro))\n\n result = pythonver >= req_ver\n\n return result",
"def good_py_version() -> bool:\n return sys.version_info.major >= 3 and sys.version_info.minor >= 6",
"def check_pythonver(reqver_text):\n\treqver = map(int, reqver_text.split('.'))\n\tpythonver = sys.version_info[:3]\n\treturn check_ver(pythonver, reqver)",
"def _check_python_version(self):\n python_exe = tools.which(\"python\")\n if not python_exe:\n msg = (\"Python must be available in PATH \"\n \"in order to build v8\")\n raise ConanInvalidConfiguration(msg)\n # In any case, check its actual version for compatibility\n from six import StringIO # Python 2 and 3 compatible\n version_buf = StringIO()\n cmd_v = \"{} --version\".format(python_exe)\n self.run(cmd_v, output=version_buf)\n p = re.compile(r'Python (\\d+\\.\\d+\\.\\d+)')\n verstr = p.match(version_buf.getvalue().strip()).group(1)\n if verstr.endswith('+'):\n verstr = verstr[:-1]\n version = tools.Version(verstr)\n # >= 2.7.5 & < 3\n py2_min = \"2.7.5\"\n py2_max = \"3.0.0\"\n py3_min = \"3.8.0\"\n if (version >= py2_min) and (version < py2_max):\n msg = (\"Found valid Python 2 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n elif version >= py3_min:\n msg = (\"Found valid Python 3 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n else:\n msg = (\"Found Python in path, but with invalid version {}\"\n \" (v8 requires >= {} and < \"\n \"{} or >= {})\".format(verstr, py2_min, py2_max, py3_min))\n raise ConanInvalidConfiguration(msg)",
"def test_python_version(self):\n assert 2 == sys.version_info.major\n assert 7 == sys.version_info.minor\n assert 6 <= sys.version_info.micro",
"def check_pyversion() -> None:\n pyversion = float(str(sys.version_info[0]) + '.' + str(sys.version_info[1]))\n if not pyversion >= 3.6:\n text = f'''FAIL: You are using python {pyversion}. This pipeline was built with python 3.7.\nFAIL: use 3.6 <= python version < 3.8\nFAIL: exiting cmh_test.py'''\n print(ColorText(text).fail())\n exit()\n if not pyversion < 3.8:\n print(ColorText(\"FAIL: python 3.8 has issues with the ipyparallel engine returns.\").fail())\n print(ColorText(\"FAIL: use 3.6 <= python version < 3.8\").fail())\n print(ColorText(\"FAIL: exiting cmh_test.py\").fail())\n exit()",
"def check_python_version():\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))",
"def check_python_version():\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))",
"def check_python_version():\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))",
"def has_set_up_py_in(self):\n return (self.version_info >= (4, 10))",
"def check_python():\n out_info(\"Installed Python: {0} {1}\".format(PY_VERSION[0],\n PY_VERSION[1]))\n if not (PY_VERSION[0].split(\".\")[0] == \"3\"\n and PY_VERSION[0].split(\".\")[1] in (\"3\", \"4\", \"5\", \"6\")\n and PY_VERSION[1] == \"64bit\"):\n out_error(\"Please run this script with Python version 3.3, 3.4, 3.5 or 3.6 \"\n \"64bit and try again.\")\n exit(1)",
"def test_python_after_38():\n import sys\n assert sys.version_info >= (3, 8)",
"def is_running_py3():\n return sys.version_info >= (3, 0)",
"def python_version_check():\n min_version_list = PYTHON_MIN_VERSION.split(\".\")\n # Truncate if the list is more the 4 items\n if len(min_version_list) > 4:\n min_version_list = min_version_list[:4]\n # Fill if the list is less then 4 items\n if len(min_version_list) == 1:\n min_version_list.append(\"0\")\n if len(min_version_list) == 2:\n min_version_list.append(\"0\")\n if len(min_version_list) == 3:\n min_version_list.append(\"f0\")\n # Calculate the minimum version and an integer, which, when displayed as\n # hex, is easily recognised as the version. E.g. 0x30502f0 is 3.5.2\n min_version_value = 0\n for index, item in enumerate(min_version_list[::-1]):\n min_version_value = min_version_value + int(item, 16) * 2**(index * 8)\n if debug: print(\"Python Version Minimum:{}, Decimal:{}, Hex:{}\"\n .format(PYTHON_MIN_VERSION, min_version_value,\n hex(min_version_value)))\n # test value and exit if below minimum revision\n if sys.hexversion < min_version_value:\n print(\"Python Version: {}. Required minimum version is: {}. Exiting...\"\n .format(sys.version.split(\" \")[0], PYTHON_MIN_VERSION))\n sys.exit()",
"def _check_python_version(min_version):\n if sys.version_info < min_version:\n raise RuntimeError(\"Scikit-lr requires Python {0} or later. \"\n \"The current Python version is {1} installed \"\n \"in {2}.\".format(python_version(), min_version,\n sys.executable))",
"def is_py3():\n return sys.version_info >= (3, 0)",
"def exists():\n return PYTHON_VERSION is not None",
"def test_python_supported_version(self):\r\n min_acceptable_version = (2, 7, 0)\r\n min_unacceptable_version = (3, 0, 0)\r\n\r\n command = 'python --version'\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n\r\n version_str_matches = re.findall('Python\\s+(\\S+)\\s*', stdout.strip())\r\n self.assertEqual(len(version_str_matches), 1,\r\n \"Could not determine the Python version in '%s'.\" %\r\n stdout)\r\n version_string = version_str_matches[0]\r\n\r\n try:\r\n if version_string[-1] == '+':\r\n version_string = version_string[:-1]\r\n version = tuple(map(int, version_string.split('.')))\r\n if len(version) == 2:\r\n version = (version[0], version[1], 0)\r\n pass_test = (version >= min_acceptable_version and\r\n version < min_unacceptable_version)\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported Python version. Must be >= %s and < %s, \"\r\n \"but running %s.\"\r\n % ('.'.join(map(str, min_acceptable_version)),\r\n '.'.join(map(str, min_unacceptable_version)),\r\n version_string))",
"def test_python_version():\n assert sys.version_info.major == 3",
"def _is_python_version(s: str) -> bool:\n\n return s.startswith(\"2\") or s.startswith(\"3\")",
"def _check_version () -> None:\n py_version_info: typing.Tuple = sys.version_info[:2]\n\n if py_version_info < MIN_PY_VERSION:\n error_msg = \"This version of pytextrank requires Python {} or later ({} detected)\\n\"\n raise RuntimeError(error_msg.format(_versify(MIN_PY_VERSION), _versify(py_version_info)))",
"def is_py3() -> bool:\n return sys.version_info[0] == 3",
"def verify_system_python(self):\n system_python_bin = Path(\"/usr/bin/python3\").resolve()\n system_version = system_python_bin.name.split(\".\")\n if system_version[0] != \"python3\" or len(system_version) == 1:\n raise BriefcaseCommandError(\"Can't determine the system python version\")\n\n if system_version[1] != str(self.tools.sys.version_info.minor):\n raise BriefcaseCommandError(\n f\"The version of Python being used to run Briefcase ({self.python_version_tag}) \"\n f\"is not the system python3 (3.{system_version[1]}).\"\n )",
"def is_py3():\n return sys.version_info[0] == 3",
"def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions",
"def check_py_version(self, cur_version):\n\n # convert cur_version to string, in case of erroneous type being passed\n cur_version = str(cur_version)\n\n acceptable_python_versions_regex = r\"(^(2\\.[6-9])(\\.?\\d{1,2})?$)|(^(3\\.[3-9])(\\.?\\d{1,2})?$)\"\n pyversions_regex_compiled = re.compile(acceptable_python_versions_regex)\n pyversions_match = pyversions_regex_compiled.match(cur_version)\n\n # If match is found, return True. If no match, return False\n if pyversions_match:\n return True\n else:\n return False",
"def test_python3(self):\n if sys.version.startswith(\"3.\"):\n self.assertTrue(_PY3)",
"def check_supported_features(self):",
"def verify_python(self, app):\n output = self.tools[app].app_context.check_output(\n [\n f\"python{app.python_version_tag}\",\n \"-c\",\n (\n \"import sys; \"\n \"print(f'{sys.version_info.major}.{sys.version_info.minor}')\"\n ),\n ]\n )\n # Update the python version tag with the *actual* python version.\n app.python_version_tag = output.split(\"\\n\")[0]\n target_python_version = tuple(int(v) for v in app.python_version_tag.split(\".\"))\n\n if target_python_version < self.briefcase_required_python_version:\n briefcase_min_version = \".\".join(\n str(v) for v in self.briefcase_required_python_version\n )\n raise BriefcaseCommandError(\n f\"The system python3 version provided by {app.target_image} \"\n f\"is {app.python_version_tag}; Briefcase requires a \"\n f\"minimum Python3 version of {briefcase_min_version}.\"\n )\n elif target_python_version != (\n self.tools.sys.version_info.major,\n self.tools.sys.version_info.minor,\n ):\n self.logger.warning(\n f\"\"\"\n*************************************************************************\n** WARNING: Python version mismatch! **\n*************************************************************************\n\n The system python3 provided by {app.target_image} is {app.python_version_tag}.\n This is not the same as your local system ({self.python_version_tag}).\n\n Ensure you have tested for Python version compatibility before\n releasing this app.\n\n*************************************************************************\n\"\"\"\n )",
"def compatible_version(self):\n note_version = self.py_version\n py_version = sys.version_info\n if note_version[0] != py_version[0]:\n return False\n if len(note_version) > 1 and note_version[1] > py_version[1]:\n return False\n return True"
]
| [
"0.7637709",
"0.7555077",
"0.72389746",
"0.7111935",
"0.70349747",
"0.7027567",
"0.70126307",
"0.70126307",
"0.70126307",
"0.70080096",
"0.6967738",
"0.68840957",
"0.6819389",
"0.6783709",
"0.676768",
"0.6756775",
"0.6746007",
"0.67407185",
"0.6651795",
"0.66268104",
"0.65999544",
"0.6599474",
"0.6578414",
"0.65332925",
"0.65253156",
"0.6480844",
"0.64506835",
"0.6358027",
"0.63381594",
"0.6302274"
]
| 0.795639 | 0 |
Checks if dependencies are installed. | def check_dependencies():
check_python_version()
dependencies = ["sqlite3"]
for dependency in dependencies:
try:
__import__(dependency)
except ImportError as e:
raise CuckooStartupError("Unable to import \"%s\"" % dependency)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_dependencies(self):\n imgmin = exists('imgmin')\n image_optim = exists('image_optim')\n\n if not imgmin or not image_optim:\n puts(p('Dependencies have not been installed:'))\n\n message = 'imgmin - https://github.com/rflynn/imgmin'\n message = s('✓ ' + message) if imgmin else e('✗ ' + message)\n puts(message)\n\n message = 'image_optim - http://rubygems.org/gems/image_optim'\n message = s('✓ ' + message) if image_optim else e('✗ ' + message)\n puts(message)\n\n sys.exit(0)",
"def check_dependencies(cls):\n\n missing = []\n for name in cls.DEPENDENCIES:\n try:\n import_module(name)\n except ModuleNotFoundError:\n missing.append(name)\n\n if any(missing):\n msg = ('The sup3r stitching module depends on the following '\n 'special dependencies that were not found in the active '\n 'environment: {}'.format(missing))\n logger.error(msg)\n raise ModuleNotFoundError(msg)",
"def check_deps(self):\n\t\tfor plugin in self.__plugins.values():\n\t\t\tif plugin.require:\n\t\t\t\tfor req in plugin.require.split(','):\n\t\t\t\t\tn, v = req.split(':')\n\t\t\t\t\tif not n in self.__plugins or \\\n\t\t\t\t\tv != self.__plugins[n].version:\n\t\t\t\t\t\treturn False\n\t\treturn True",
"def check_dependencies():\n required_found = True\n recommended_found = True\n print 'Checking dependencies ...\\n'\n print 'Required dependencies:'\n try:\n import Image\n assert Image.VERSION >= '1.1.5'\n print ' Python Imaging Library ....... OK'\n except ImportError:\n print ' !!! Python Imaging Library ... Not found'\n required_found = False\n except AssertionError:\n print ' !!! Python Imaging Library ... version', Image.VERSION,\n print 'found'\n print ' !!! Python Imaging Library 1.1.5 or higher is required'\n required_found = False\n if not required_found:\n print '\\nCould not find all required dependencies!'\n print 'Please install them and try again.'\n sys.exit(1)\n print",
"def _sufficient_deps(cls, deps):\n if cls.MODEL_PACKAGE is None:\n return True\n else:\n for d in deps.conda:\n if cls.MODEL_PACKAGE in d:\n return True\n for d in deps.pip:\n if cls.MODEL_PACKAGE in d:\n return True\n return False",
"def check_dependencies():\n\n # Check for python version\n print(\"Python location : {}\".format(sys.executable))\n print(\"Python version : {}\".format(sys.version))\n if sys.version_info[0] < 3:\n warnings.warn(\n \"WARNING : Using python 2. This Python version is no longer maintained. Use at your own risk.\"\n )\n\n # Check FSL installation\n try:\n print(f\"Your fsl directory is located here: {os.environ['FSLDIR']}\")\n except KeyError:\n raise AssertionError(\n \"You do not have FSL installed! See installation instructions here: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FslInstallation\"\n )\n\n # Check AFNI installation\n try:\n print(\n f\"Your AFNI directory is located here: {subprocess.check_output('which afni', shell=True, universal_newlines=True)}\"\n )\n except subprocess.CalledProcessError:\n raise AssertionError(\n \"You do not have AFNI installed! See installation instructions here: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/background_install/main_toc.html\"\n )",
"def check_requirements():\n process_output = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])\n installed_packages = [r.decode().split('==')[0] for r in process_output.split()]\n if 'pandas' and 'matplotlib' in installed_packages:\n return True\n else:\n print('You don`t have one of required libralies\\n'\n 'I can`t create histogram\\n'\n 'Required libralies: \\n'\n '->pandas\\n'\n '->matplotlib\\n')\n return False",
"def dependencies_met():\n # Check Java VM command line runner.\n try:\n Popen(['java'], shell=False, stderr=PIPE).communicate()[1]\n except:\n print 'Dependecy unmet. Java virtual machine command line runner not ' \\\n 'found.'\n return False\n # Check selenium-server.jar is ready to run.\n output = Popen(('java -jar %s -unrecognized_argument' % SELENIUM_RC_PATH\n ).split(), shell=False, stderr=PIPE).communicate()[1]\n if not re.search('Usage: java -jar selenium-server.jar', output):\n print 'Dependecy unmet. Selenium RC server (selenium-server.jar) not ' \\\n 'found.'\n return False\n # Check selenium RC python driver is available.\n try:\n import selenium\n except:\n print 'Dependecy unmet. Selenium RC python driver (selenium.py) not ' \\\n 'found.'\n return False\n # Check CherryPy wsgi server is available.\n try:\n import wsgiserver\n except:\n print 'Dependecy unmet. CherryPy wsgi server (wsgiserver.py) not found.'\n return False\n # Check fixture support is implemented for the database engine.\n if not settings.DATABASE_ENGINE in ['sqlite3', 'postgresql_psycopg2']:\n print 'Dependecy unmet. Fixture support for database engine %s not ' \\\n 'implemented.' % settings.DATABASE_ENGINE\n return False\n return True",
"def install_dependencies(self):\n return False",
"def check_missing_dep():\n global MISSING_PACKAGES, INSTALLED_PACKAGES, ENABLE_CUDA\n if ENABLE_CUDA and IS_MACOS:\n REQUIRED_PACKAGES.extend(MACOS_REQUIRED_PACKAGES)\n MISSING_PACKAGES = []\n for pkg in REQUIRED_PACKAGES:\n key = pkg.split(\"==\")[0]\n if key not in INSTALLED_PACKAGES:\n MISSING_PACKAGES.append(pkg)\n continue\n else:\n if len(pkg.split(\"==\")) > 1:\n if pkg.split(\"==\")[1] != INSTALLED_PACKAGES.get(key):\n MISSING_PACKAGES.append(pkg)\n continue",
"def test_dependencies_are_installed(self):\n installed = [p['id'] for p in self.qi.listInstalledProducts()]\n self.assertIn('plone.restapi', installed)\n self.assertIn('plone.app.contenttypes', installed)\n self.assertIn('plone.app.multilingual', installed)",
"def test_is_installed():\n assert _is_installed('coverage') is True # regular dependency\n assert _is_installed('pytest') is True # dev dependency\n assert _is_installed('missing') is False # missing dependency",
"def checkDependencies(check=True):\n modules = []\n f = open(CONST_REQUIREMENTS_FILE)\n for line in f:\n if line.find('#'):\n modules.append([line[:line.index('=')], (line[line.index('=')+2:]).strip()])\n f.close()\n\n for module in modules:\n try:\n __import__(module[0])\n except ImportError: \n if query_user_bool(\"Missing module %s.\" \\\n \" Do you wish to install it?\" % module[0]):\n subprocess.call([\"pip2\", \"install\", \"%s==%s\" %\n (module[0], module[1])])\n \n else:\n return False\n return True",
"def check_dependencies(module):\n try:\n from fhempy import lib\n\n initfile = inspect.getfile(lib)\n fhempy_root = os.path.dirname(initfile)\n with open(fhempy_root + \"/\" + module + \"/manifest.json\", \"r\") as f:\n manifest = json.load(f)\n\n if \"requirements\" in manifest:\n for req in manifest[\"requirements\"]:\n logger.debug(\"Check requirement: \" + req)\n if is_installed(req) == False:\n logger.debug(\" NOK\")\n return False\n else:\n logger.debug(\" OK\")\n except FileNotFoundError:\n logger.error(\"manifest.json not found!\")\n\n return True",
"def check_system_dependencies():\n out_info(\"Checking System Dependencies...\")\n check_cmake()\n if OS_VERSION[0] == \"Windows\":\n check_visual_studio()\n check_cplus_plus()\n if OS_VERSION[0] == \"Linux\":\n check_gcc()\n check_gpp()",
"def checkRequiredDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # hard dependencies\n for req in self.reqmodules:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( self.name + \" requires \" + req \\\n + \" and it wasn't found in your config file!!\" )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version\n \n # build only dependencies\n if( self.mode == \"install\" ):\n mods = self.reqmodules_buildonly + self.reqmodules_external\n for req in mods:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( req + \" not found in your config file!! \" + self.name \\\n + \" cannot be built without \" + req )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print \" - \" + self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version",
"def check_go_dependencies(self, gocat_dir):\n for d in self.dependencies:\n dep_result = subprocess.run('go list \"{}\"'.format(d), shell=True, cwd=gocat_dir,\n stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n if (dep_result.stdout.decode()).strip() != d:\n return False\n return True",
"def dependency_check(dependency_set=CORE, exit_on_failure=True):\n verify_python_version()\n \n disable_warnings()\n\n platform = get_current_platform()\n\n #\n # Check for missing python modules\n #\n failed_deps = []\n pip_distributions = pip.get_installed_distributions()\n \n for w3af_req in platform.PIP_PACKAGES[dependency_set]:\n for dist in pip_distributions:\n if w3af_req.package_name.lower() == dist.project_name.lower():\n\n w3af_req_version = str(Version(w3af_req.package_version))\n dist_version = str(dist.version)\n\n if w3af_req_version == dist_version:\n # It's installed and the version matches!\n break\n else:\n failed_deps.append(w3af_req)\n\n #\n # Check for missing operating system packages\n #\n missing_os_packages = []\n for os_package in platform.SYSTEM_PACKAGES[dependency_set]:\n if not platform.os_package_is_installed(os_package):\n missing_os_packages.append(os_package)\n \n os_packages = list(set(missing_os_packages))\n\n # All installed?\n if not failed_deps and not os_packages:\n # False means: do not exit()\n enable_warnings()\n return False\n\n generate_requirements_txt(failed_deps)\n script_path = generate_helper_script(platform.PKG_MANAGER_CMD, os_packages,\n platform.PIP_CMD, failed_deps)\n\n #\n # Report the missing system packages\n #\n msg = ('w3af\\'s requirements are not met, one or more third-party'\n ' libraries need to be installed.\\n\\n')\n \n if os_packages:\n missing_pkgs = ' '.join(os_packages)\n \n msg += ('On %s systems please install the following operating'\n ' system packages before running the pip installer:\\n'\n ' %s %s\\n')\n print(msg % (platform.SYSTEM_NAME, platform.PKG_MANAGER_CMD,\n missing_pkgs))\n \n #\n # Report all missing python modules\n # \n if failed_deps:\n # pylint: disable=E1101\n msg = ('Your python installation needs the following modules'\n ' to run w3af:\\n')\n msg += ' ' + ' '.join([fdep.module_name for fdep in failed_deps])\n print(msg)\n print('\\n')\n # pylint: enable=E1101\n \n #\n # Report missing pip packages\n #\n not_git_pkgs = [fdep for fdep in failed_deps if not fdep.is_git]\n git_pkgs = [fdep.git_src for fdep in failed_deps if fdep.is_git]\n \n msg = ('After installing any missing operating system packages, use'\n ' pip to install the remaining modules:\\n')\n \n if not_git_pkgs:\n cmd = generate_pip_install_non_git(platform.PIP_CMD, not_git_pkgs)\n msg += ' %s\\n' % cmd\n \n if git_pkgs:\n for missing_git_pkg in git_pkgs:\n msg += ' %s\\n' % generate_pip_install_git(platform.PIP_CMD,\n missing_git_pkg)\n \n print(msg)\n \n msg = 'A script with these commands has been created for you at %s'\n print(msg % script_path)\n \n enable_warnings()\n platform.after_hook()\n \n if exit_on_failure:\n sys.exit(1)\n else:\n return True",
"def check_requirements():\n if not os.path.exists(REQUIREMENTS):\n sys.exit(\n ansi.error() + ' %s is missing. Please check it in.' % ansi.underline(REQUIREMENTS)\n )\n\n with open(REQUIREMENTS, 'r', encoding='utf-8') as f:\n dependencies = f.readlines()\n\n vcs = [d for d in dependencies if re.match(r'^(-e )?(git|svn|hg|bzr).*', d)]\n\n dependencies = list(set(dependencies) - set(vcs))\n\n missing = []\n try:\n pkg_resources.require(dependencies)\n except (\n pkg_resources.ContextualVersionConflict,\n pkg_resources.DistributionNotFound,\n pkg_resources.VersionConflict\n ) as error:\n missing.append(str(error))\n except pkg_resources.RequirementParseError:\n pass\n\n if missing:\n missing = ' missing requirement:\\n ' + os.linesep.join(missing)\n if '--env-checked' in sys.argv:\n sys.exit(ansi.error() + missing + '\\nRequirement installation failure, please check for errors in:\\n $ lore install\\n')\n else:\n print(ansi.warning() + missing)\n import lore.__main__\n lore.__main__.install_requirements(None)\n reboot('--env-checked')",
"def check_packages(options):\n print '\\033[1;33m# Checking direct VIKI dependencies\\033[1;m'\n installed_ok = dependencies.check_installed_packages()\n print '\\n\\033[1;33m# Checking second level ROS dependencies, using rosdep\\033[1;m'\n second_level_ok = dependencies.get_second_level_dependencies()\n\n if installed_ok and second_level_ok:\n print '\\033[1;32mAll dependencies satisfied!\\033[1;m'\n else:\n print '\\033[1;31mTry running [viki install-dependencies] to install the dependencies\\033[1;m'",
"def test_check_dependencies_with_found(self):\n self.spy_on(check_install, op=kgb.SpyOpMatchAny([\n {\n 'args': (['cm', 'version'],),\n 'op': kgb.SpyOpReturn(True),\n },\n ]))\n\n client = self.build_client(setup=False)\n client.check_dependencies()\n\n self.assertSpyCallCount(check_install, 1)\n self.assertSpyCalledWith(check_install, ['cm', 'version'])",
"def check_requirements():\n debug(\"check_requirements\")\n needed = Requirements(Project).find_missing_requirements()\n if needed:\n info(\"Please add the following to your %s file:\\n\" % 'requirements.txt')\n info(\"\\n\".join(str(needed)))\n else:\n info(\"Your %s includes all known herringlib task requirements\" % 'requirements.txt')",
"def check_requirements():\n\n # Which programs are reqired?\n required_programs = ['virtualbox', 'vagrant']\n\n # Make sure the required programs are installed.\n for program in required_programs:\n\n # What's the path to the executable?\n try:\n subprocess.check_output(['which', program])\n except subprocess.CalledProcessError:\n message = \"Please install \" + program + \" before proceeding.\"\n Utilities.log(message)\n exit(1)",
"def test_check_dependencies_with_missing(self):\n self.spy_on(check_install, op=kgb.SpyOpReturn(False))\n\n client = self.build_client(setup=False)\n\n message = \"Command line tools ('cm') are missing.\"\n\n with self.assertRaisesMessage(SCMClientDependencyError, message):\n client.check_dependencies()\n\n self.assertSpyCallCount(check_install, 1)\n self.assertSpyCalledWith(check_install, ['cm', 'version'])",
"def test_packages_present(self):\n packages = [\"ca-certificates\", \"sudo\", \"wget\", \"unzip\"]\n for pkg in packages:\n with self.subTest(package=pkg):\n self.assertTrue(self.host.package(pkg).is_installed)",
"def dependencies_satisfied(self, plugin):\n for depends in plugin.dependencies:\n if depends not in self.config['plugins']:\n log.error(\"{0} depends on {1}, but {1} wasn't in the \"\n \"config file. To use {0}, install {1} and add \"\n \"it to the config.\".format(plugin.name, depends))\n return False\n return True",
"def requirement_missing(script):\n if \"requires\" in script:\n if script[\"requires\"] is None:\n return False\n for package in script[\"requires\"].split():\n try:\n pkg_resources.working_set.require(package)\n except Exception:\n return True\n return False",
"def check_import():\n print('[GenHub] Checking Python modules.')\n\n basemod = [('yaml', 'pyyaml'), ('pycurl', 'pycurl')]\n devmod = ['pep8', 'pytest', 'pytest-cov', 'coverage']\n\n packages = dict()\n for importname, packagename in basemod:\n try:\n importlib.import_module(importname)\n packages[packagename] = True\n except ImportError:\n packages[packagename] = False\n for packagename in devmod:\n try:\n importlib.import_module(packagename)\n packages[packagename] = True\n except ImportError:\n packages[packagename] = False\n\n rundep = False\n for pkg in packages:\n char = '+'\n msg = 'Installed.'\n if packages[pkg] is False:\n char = '-'\n msg = 'Not installed!'\n rundep = True\n print('%c package %-12s: %s' % (char, pkg, msg))\n if rundep is True:\n print('Please install these dependencies before proceding')\n print('')",
"def checkDeps( self ):\n\n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return True\n\n # skip dependency check if package is going to be installed\n if( self.mode == \"install\" ):\n return True\n\n log.debug( 'Checking dependencies of %s', self.name )\n \n file = self.realPath() + \"/.dependencies\"\n \n r = True\n\n # if file doesn't exist return True\n if( not os.path.exists( file )):\n return True\n\n # open dependencies file\n f = open( file )\n filedeplist = {}\n for line in f.readlines():\n line = line.strip()\n if( (not line.startswith(os.linesep)) and (not line.startswith(\"#\")) \\\n and (len(line) > 0 )):\n tokens = line.split(\":\")\n filedeplist[ tokens[0] ] = tokens[1]\n f.close()\n\n log.debug( 'Dependencies read from file: %s', filedeplist )\n\n # get actual dependecies\n deplist={}\n self.getDepList(deplist)\n del deplist[self.name]\n\n log.debug( 'Dependencies found in current cfg file: %s', deplist )\n \n # compare dependencies\n for k, v in filedeplist.iteritems():\n if( deplist.has_key( k )):\n if( deplist[k] != v ):\n if( os.path.basename(deplist[k]) != os.path.basename(v) ):\n if( r ):\n print \"*** WARNING: ***\\n***\\tFollowing dependencies from \" + self.name + \" located at [ \" \\\n + self.realPath() + \" ] failed:\\n***\"\n print \"***\\t * \" + k + \" \" + os.path.basename(v) + \" differs from version \" \\\n + os.path.basename(deplist[k]) + \" defined in your config file..\"\n r = False\n else:\n if( r ): #just print this once\n print \"*** WARNING: ***\\n***\\tFollowing dependencies from \" + self.name + \" located at [ \" + self.realPath() \\\n + \" ] failed:\\n***\"\n print \"***\\t * \" + k + \" not found in your config file!!\"\n r = False\n \n\n if( not r ):\n print \"***\"\n if( self.useLink ):\n print \"***\\t\" + self.name + \" is in \\\"link\\\" mode, if you want to rebuild it with the new dependencies set it to \\\"use\\\" mode...\"\n r = True\n else:\n if( not self.parent.noAutomaticRebuilds ):\n print \"***\\t * \" + self.name + \" changed to \\\"install\\\" mode and rebuild flag set to True...\"\n self.mode = \"install\"\n self.rebuild = True\n self.preCheckDeps()\n print \"***\\n***\\tUpdating dependency tree ( modules that depend on \" + self.name + \" need also to be rebuilt )...\\n***\"\n self.updateDepTree([])\n print \"***\\n***\\tif you do NOT want to rebuild this module(s) just answer \\\"no\\\" later on in the installation process,\\n\" \\\n + \"***\\tor set the global flag ilcsoft.noAutomaticRebuilds=True in your config file...\"\n else:\n print \"***\\n***\\tglobal flag ilcsoft.noAutomaticRebuilds is set to True, nothing will be done...\\n***\"\n return r",
"def test_dependencies_installed(self):\n installer = getattr(self.portal, 'portal_quickinstaller')\n self.assertTrue(installer.isProductInstalled('plone.app.dexterity'))"
]
| [
"0.793422",
"0.7930604",
"0.7912953",
"0.78336424",
"0.76999456",
"0.76947135",
"0.7686539",
"0.7666271",
"0.76288325",
"0.7572292",
"0.75151587",
"0.7492932",
"0.74447614",
"0.74242735",
"0.73918897",
"0.736468",
"0.7360159",
"0.7314458",
"0.72915107",
"0.7267136",
"0.72565717",
"0.72332567",
"0.7224241",
"0.71477485",
"0.7147573",
"0.7136367",
"0.7120639",
"0.7106981",
"0.70779365",
"0.7077837"
]
| 0.82182187 | 0 |
Checks if config files exist. | def check_configs():
configs = [os.path.join(CUCKOO_ROOT, "conf", "cuckoo.conf"),
os.path.join(CUCKOO_ROOT, "conf", "reporting.conf")]
for config in configs:
if not os.path.exists(config):
raise CuckooStartupError("Config file does not exist at path: %s" % config)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_configfiles():\n return (all(os.path.isdir(x) for x in CONFIG_DIRS) and\n os.path.isfile(CONFIG_FILE) and os.path.isfile(LOG_CONFIG_FILE))",
"def __check_config(self):\n if not os.path.exists(self.__config_path):\n return False\n else:\n return True",
"def is_config_exist(self) -> bool:\n return True",
"def is_config_exist(self) -> bool:\n pass",
"def check_config(outconfig):\n self.log.info(\"Checking if all the necessary files exist.\")\n\n # Perform necessary checks\n\n log.info(\"All necessary files exist for {} configuration.\".format(outconfig[\"Flavor\"]))\n\n return",
"def _ensure_config_file_exists():\n config_file = Path(ELIBConfig.config_file_path).absolute()\n if not config_file.exists():\n raise ConfigFileNotFoundError(ELIBConfig.config_file_path)",
"def has_configuration(config_file=CONFIG_FILE):\n return os.path.exists(config_file)",
"def is_config_exist(self) -> bool:\n return os.path.isfile(self.connection_string)",
"def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")",
"def user_conf_dir_exists(self):\n return os.path.exists(self.user_conf_dir())",
"def check_config_file(path):\n\tif not os.path.exists (path):\n\t\t# create the directories\n\t\ttry:\n\t\t\tos.makedirs (os.path.join (os.path.split (path)[0]))\n\t\texcept os.error:\n\t\t\tprint \"Error while creating neccessary directories: %s\"\\\n\t\t\t\t% (os.error)\n\t\t\treturn False\n\n\t\ttry:\n\t\t\tf = file (path, \"w\")\n\t\texcept BaseException,e:\n\t\t\tprint \"Error while creating config file: %s\" % (e)\n\t\t\treturn False\n\t\telse:\n\t\t\tf.close()\n\n\t\treturn True\n\telse:\n\t\treturn True\n\treturn False",
"def test_files_non_existing_config(self, context):\n\n context.config_exists.return_value = False\n\n runner = CliRunner()\n result = runner.invoke(cli_node_files, ['--name', 'non-existing'])\n\n # Check that error is produced\n self.assertEqual(result.output[:7], \"[error]\")\n\n # check for non zero exit-code\n self.assertNotEqual(result.exit_code, 0)",
"def check() -> bool:\n\ttry:\n\t\twith open(configPath, 'r') as file:\n\t\t\tcfg = json.load(file) # load the file\n\texcept FileNotFoundError:\n\t\treturn False\n\t# check if EVERY config exists\n\tfor i in default_config.keys():\n\t\tif i in cfg.keys():\n\t\t\tcontinue\n\t\treturn False\n\t# final check\n\treturn cfg['config_type'] == default_config['config_type']",
"def check_configs(self):\n\n pass",
"async def check_if_config_exists(self, server_id):\n if not os.path.exists(\n '%s.ini' % (\n os.path.join(\n self.server_settings_path,\n str(server_id)\n )\n )\n ):\n return False\n return True",
"def path_exists(config_items):\n for section, options in config_items.items():\n if type(options) != dict:\n continue\n for key, val in options.items():\n if key == 'output_pattern':\n continue\n if not type(val) == str:\n continue\n if val.endswith('.nc') and not os.path.exists(val):\n print \"File {key}: {value} does not exist, exiting.\".format(key=key, value=val)\n sys.exit(1)",
"def check_configuration(self):\n\n return bool(os.path.isfile(self.config_path) and\n self.validate_configuration_file())",
"def check_configure_scan(project_path):\n for file_name in CONFIGURE_AC_NAMES:\n file_path = os.path.join(project_path, file_name)\n if os.path.exists(file_path):\n return file_path\n return None",
"def missingConfigFiles(self):\n return [ conf\n for conf in self.configFiles\n if not os.path.exists(conf)\n and not os.path.isfile(conf)\n ]",
"def checkIfFileExistsInPossibleLocations(testConfig):\n assert \"name\" in testConfig\n assert \"file\" in testConfig\n assert \"file_locations\" in testConfig\n testPass = False\n for filePath in testConfig[\"file_locations\"]:\n if isfile(join(filePath,testConfig[\"file\"])):\n testPass=True\n \n assert testPass,\"Failure for package \"+testConfig[\"name\"]+\"\\n File: \"+\\\n testConfig[\"file\"]+\" does not exist\"+\"\\nSearched in \"+\\\n str(testConfig[\"file_locations\"])",
"def init_config():\n config_file = create_config_file.CreateConfigFile()\n config_file.check_if_config_file_exists()",
"def exists(self):\n if self.host.exists(self.remote_path):\n print 'Yes, config exists already.'\n return True\n else:\n print 'Config doesn\\'t exist yet'\n return False",
"def check_config(configd):\n # XXX: verify first that the required config parameters are present\n if not os.path.exists(configd['player']):\n logging.error(\"player %s wasn't found\" % (configd['player'],))\n return False\n if not os.path.isdir(configd['sound_dir']):\n logging.error(\"sound directory %s wasn't found\" % (configd['sound_dir'],))\n return False\n return True",
"def exists(self):\n basedir = os.path.dirname(self.path)\n\n for filename in self.files:\n path = os.path.join(basedir, filename)\n if not os.path.exists(path):\n return False\n\n return True",
"def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)",
"def test_config_must_exist(cls, values):\n configs = [c.config for c in values.get('configs')]\n for test in values.get('tests'):\n if test.config not in configs:\n raise ValueError(\n f\"Test '{test.test}' gave the config '{test.config}', but \"\n \"this config does not exist in the file \"\n f\"'{values.get('yaml')}'. Configs detected : {configs} \\n\")\n return values",
"def cloudwatch_config_exists(config, config_type, file_name):\n\n cfg = config.get(\"cloudwatch\", {}).get(config_type, {}).get(file_name)\n if cfg:\n assert os.path.isfile(cfg), \\\n \"Invalid CloudWatch Config File Path: {}\".format(cfg)\n return bool(cfg)",
"def check_paths( self ):\n check_a = utility_code.checkDirectoryExistence( self.PATH_TO_SOURCE_FILE_DIRECTORY )\n check_b = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_ORIGINALS_DIRECTORY )\n check_c = utility_code.checkDirectoryExistence( self.PATH_TO_ARCHIVES_PARSED_DIRECTORY )\n check_d = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_DATA_DIRECTORY )\n check_e = utility_code.checkDirectoryExistence( self.PATH_TO_PARSED_ANNEX_COUNT_DIRECTORY )\n if check_a == 'exists' and check_b == 'exists' and check_c == 'exists' and check_d == 'exists' and check_e == 'exists':\n log.debug( 'path check passed' )\n else:\n message='path check failed; quitting'\n log.error( message )\n sys.exit( message )\n return",
"def check_file_exist(self):\n return False",
"def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')"
]
| [
"0.8346882",
"0.78028643",
"0.77522177",
"0.77137613",
"0.7490861",
"0.72511786",
"0.7087832",
"0.7071221",
"0.70583725",
"0.7033269",
"0.7024421",
"0.69919306",
"0.69578743",
"0.6946059",
"0.6932018",
"0.68946105",
"0.6867093",
"0.6830978",
"0.6827687",
"0.6827188",
"0.67988324",
"0.6774735",
"0.67271996",
"0.67031157",
"0.66672295",
"0.66374785",
"0.6611249",
"0.6592881",
"0.6586195",
"0.6560553"
]
| 0.8165395 | 1 |
Randomly transform image data. Given an input image list (possibly multimodal) and an optional corresponding segmentation image list, this function will perform data augmentation with | def data_augmentation(input_image_list,
segmentation_image_list=None,
number_of_simulations=10,
reference_image=None,
transform_type='affineAndDeformation',
noise_model='additivegaussian',
noise_parameters=(0.0, 0.05),
sd_simulated_bias_field=0.05,
sd_histogram_warping=0.05,
output_numpy_file_prefix=None,
verbose=False
):
from ..utilities import histogram_warp_image_intensities
from ..utilities import simulate_bias_field
if reference_image is None:
reference_image = input_image_list[0][0]
number_of_modalities = len(input_image_list[0])
# Set up numpy arrays if outputing to file.
batch_X = None
batch_Y = None
if output_numpy_file_prefix is not None:
batch_X = np.zeros((number_of_simulations, *reference_image.shape, number_of_modalities))
if segmentation_image_list is not None:
batch_Y = np.zeros((number_of_simulations, *reference_image.shape))
# Spatially transform input image data
if verbose:
print("Randomly spatially transforming the image data.")
transform_augmentation = antspynet.randomly_transform_image_data(reference_image,
input_image_list=input_image_list,
segmentation_image_list=segmentation_image_list,
number_of_simulations=number_of_simulations,
transform_type=transform_type,
sd_affine=0.01,
deformation_transform_type="bspline",
number_of_random_points=1000,
sd_noise=2.0,
number_of_fitting_levels=4,
mesh_size=1,
sd_smoothing=4.0,
input_image_interpolator='linear',
segmentation_image_interpolator='nearestNeighbor')
simulated_image_list = list()
simulated_segmentation_image_list = list()
for i in range(number_of_simulations):
if verbose:
print("Processing simulation " + str(i))
segmentation = None
if segmentation_image_list is not None:
segmentation = transform_augmentation['simulated_segmentation_images'][i]
simulated_segmentation_image_list.append(segmentation)
if batch_Y is not None:
if reference_image.dimension == 2:
batch_Y[i, :, :] = segmentation.numpy()
else:
batch_Y[i, :, :, :] = segmentation.numpy()
for j in range(number_of_modalities):
simulated_local_image_list = list()
if verbose:
print(" Modality " + str(j))
image = transform_augmentation['simulated_images'][i][j]
image_range = image.range()
# Normalize to [0, 1] before applying augmentation
if verbose:
print(" Normalizing to [0, 1].")
image = ants.iMath(image, "Normalize")
# Noise
if noise_model is not None:
if verbose:
print(" Adding noise (" + noise_model + ").")
if noise_model.lower() == "additivegaussian":
parameters = (noise_parameters[0], random.uniform(0.0, noise_parameters[1]))
image = ants.add_noise_to_image(image,
noise_model="additivegaussian",
noise_parameters=parameters)
elif noise_model.lower() == "saltandpepper":
parameters = (random.uniform(0.0, noise_parameters[0]), noise_parameters[1], noise_parameters[2])
image = ants.add_noise_to_image(image,
noise_model="saltandpepper",
noise_parameters=parameters)
elif noise_model.lower() == "shot":
parameters = (random.uniform(0.0, noise_parameters[0]))
image = ants.add_noise_to_image(image,
noise_model="shot",
noise_parameters=parameters)
elif noise_model.lower() == "speckle":
parameters = (random.uniform(0.0, noise_parameters[0]))
image = ants.add_noise_to_image(image,
noise_model="speckle",
noise_parameters=parameters)
else:
raise ValueError("Unrecognized noise model.")
# Simulated bias field
if sd_simulated_bias_field > 0:
if verbose:
print(" Adding simulated bias field.")
bias_field = antspynet.simulate_bias_field(image,
sd_bias_field=sd_simulated_bias_field)
image = image * (bias_field + 1)
# Histogram intensity warping
if sd_histogram_warping > 0:
if verbose:
print(" Performing intensity histogram warping.")
break_points = [0.2, 0.4, 0.6, 0.8]
displacements = list()
for b in range(len(break_points)):
displacements.append(random.gauss(0, sd_histogram_warping))
image = antspynet.histogram_warp_image_intensities(image,
break_points=break_points,
clamp_end_points=(False, False),
displacements=displacements)
# Rescale to original intensity range
if verbose:
print(" Rescaling to original intensity range.")
image = ants.iMath(image, "Normalize") * (image_range[1] - image_range[0]) + image_range[0]
simulated_local_image_list.append(image)
if batch_X is not None:
if reference_image.dimension == 2:
batch_X[i, :, :, j] = image.numpy()
else:
batch_X[i, :, :, :, j] = image.numpy()
simulated_image_list.append(simulated_local_image_list)
if batch_X is not None:
if verbose:
print("Writing images to numpy array.")
np.save(output_numpy_file_prefix + "SimulatedImages.npy", batch_X)
if batch_Y is not None:
if verbose:
print("Writing segmentation images to numpy array.")
np.save(output_numpy_file_prefix + "SimulatedSegmentationImages.npy", batch_Y)
if segmentation_image_list is None:
return({'simulated_images' : simulated_image_list})
else:
return({'simulated_images' : simulated_image_list,
'simulated_segmentation_images' : simulated_segmentation_image_list}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def data_augmentation(image, aug):\n if (aug == \"random_crop\") and (random.randint(0,1)):\n image = random_crop(image) \n if (aug == \"random_rotation\") and (random.randint(0,1)): \n image = random_rotation(image) \n if (aug == \"random_flip\") and (random.randint(0,1)): \n image = random_flip(image)\n if (aug == \"affine_transformation\") and (random.randint(0,1)): \n image = affine_transformation(image)\n if (aug == \"random_gaussian_noise\") and (random.randint(0,1)): \n image = random_gaussian_noise(image)\n if (aug == \"random_erasing\") and (random.randint(0,1)): \n image = random_erasing(image) \n return image",
"def data_augmentation(image_data, mask_data, rotate=False, vertical_flip=False, horizontal_flip=False):\n aug_images = []\n aug_masks = []\n\n for _ in range(len(image_data)):\n if rotate:\n rotation = A.RandomRotate90(p=1)\n rotated_data = rotation(image=image_data[_], mask=mask_data[_])\n rotated_image = rotated_data['image']\n rotated_mask = rotated_data['mask']\n aug_images.append(rotated_image)\n aug_masks.append(rotated_mask)\n\n if vertical_flip:\n flip_v = A.VerticalFlip(p=1)\n vertical_data = flip_v(image=image_data[_], mask=mask_data[_])\n vertical_image = vertical_data['image']\n vertical_mask = vertical_data['mask']\n aug_images.append(vertical_image)\n aug_masks.append(vertical_mask)\n\n if horizontal_flip:\n flip_h = A.HorizontalFlip(p=1)\n horizontal_data = flip_h(image=image_data[_], mask=mask_data[_])\n horizontal_image = horizontal_data['image']\n horizontal_mask = horizontal_data['mask']\n aug_images.append(horizontal_image)\n aug_masks.append(horizontal_mask)\n\n nd_images = make_ndarray(aug_images)\n nd_masks = make_ndarray(aug_masks)\n #nd_images = np.zeros((len(aug_images), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)\n #nd_masks = np.zeros((len(aug_masks), IMG_HEIGHT, IMG_WIDTH), dtype=np.float32)\n\n #for _ in range(len(aug_images)): # Load into ndarray\n # nd_images[_] = aug_images[_]\n # nd_masks[_] = aug_masks[_] # load mask without channel variable\n\n return nd_images, nd_masks",
"def data_augmentation(im_list, mode='standard', tag=False, params=None, im_size=224,\n filemode='local', mean_RGB=None):\n if mean_RGB is None:\n mean_RGB = np.array([107.59348955, 112.1047813, 80.9982362])\n else:\n mean_RGB = np.array(mean_RGB)\n rot_ang = [0, 90, 180, 270]\n batch = []\n if tag:\n tag_list = im_list[:, 1]\n im_list = im_list[:, 0]\n if mode == 'minimal':\n params = {'mirror': False, 'rescale': False, 'crop_size': False}\n if mode == 'standard':\n params = {'mirror': True, 'rescale': 0.3, 'zoom': 0.3, 'crop_size': 1.}\n if mode == 'test':\n params = {'mirror': True, 'rescale': 0.1, 'zoom': 0.1, 'crop_size': .9}\n for i, filename in enumerate(im_list):\n if filemode == 'local':\n im = Image.open(filename)\n im = im.convert('RGB')\n elif filemode == 'url':\n filename = BytesIO(requests.get(filename).content)\n im = Image.open(filename)\n im = im.convert('RGB')\n if params['mirror'] and np.random.random() > 0.5:\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n if params['mirror'] and tag and tag_list[i] != 'habit':\n if np.random.random() > 0.5:\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n rot = np.random.choice(rot_ang)\n if rot == 90:\n im = im.transpose(Image.ROTATE_90)\n if rot == 180:\n im = im.transpose(Image.ROTATE_180)\n if rot == 270:\n im = im.transpose(Image.ROTATE_270)\n if params['rescale']:\n rescale = params['rescale']\n new_scale = np.random.uniform(low=1.-rescale, high=1.+rescale, size=2)\n im = im.resize((im.size * new_scale).astype(int))\n if params['crop_size']:\n zoom = np.random.rand() * params['zoom']\n crop_size = params['crop_size'] * (1.-zoom)\n ly, lx = im.size\n min_side = min([ly, lx])\n if crop_size == 1:\n crop_size -= 1e-10 # avoid low=high problem of randint generator\n if ly > lx:\n rand_x = np.random.randint(low=0, high=lx*(1.-crop_size))\n rand_y = np.random.randint(low=0, high=ly-lx*crop_size)\n else:\n rand_x = np.random.randint(low=0, high=lx-ly*crop_size)\n rand_y = np.random.randint(low=0, high=ly*(1.-crop_size))\n rand_xy = np.array([rand_y, rand_x])\n im = im.crop(np.concatenate((rand_xy, rand_xy+crop_size*min_side)))\n im = im.resize((im_size, im_size))\n batch.append(np.array(im)) # shape (N, 224, 224, 3)\n\n batch = np.array(batch) - mean_RGB[None, None, None, :] # mean centering\n batch = batch.transpose(0, 3, 1, 2) # shape(N, 3, 224, 224)\n batch = batch[:, ::-1, :, :] # switch from RGB to BGR\n return batch.astype(np.float32)",
"def data_augmenter(image, label, shift, rotate, scale, intensity, flip):\n image2 = np.zeros(image.shape, dtype=np.float32)\n label2 = np.zeros(label.shape, dtype=np.int32)\n for i in range(image.shape[0]):\n # For each image slice, generate random affine transformation parameters\n # using the Gaussian distribution\n shift_val = [np.clip(np.random.normal(), -3, 3) * shift,\n np.clip(np.random.normal(), -3, 3) * shift]\n rotate_val = np.clip(np.random.normal(), -3, 3) * rotate\n scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale\n intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity\n\n # Apply the affine transformation (rotation + scale + shift) to the image\n row, col = image.shape[1:3]\n M = cv2.getRotationMatrix2D((row / 2, col / 2), rotate_val, 1.0 / scale_val)\n M[:, 2] += shift_val\n for c in range(image.shape[3]):\n image2[i, :, :, c] = ndimage.interpolation.affine_transform(image[i, :, :, c],\n M[:, :2], M[:, 2], order=1)\n\n # Apply the affine transformation (rotation + scale + shift) to the label map\n label2[i, :, :] = ndimage.interpolation.affine_transform(label[i, :, :],\n M[:, :2], M[:, 2], order=0)\n\n # Apply intensity variation\n image2[i] *= intensity_val\n\n # Apply random horizontal or vertical flipping\n if flip:\n if np.random.uniform() >= 0.5:\n image2[i] = image2[i, ::-1, :, :]\n label2[i] = label2[i, ::-1, :]\n else:\n image2[i] = image2[i, :, ::-1, :]\n label2[i] = label2[i, :, ::-1]\n return image2, label2",
"def perform_augmentations(image, gt_image, augmentations, probabilities):\n for i in range(len(augmentations)):\n if np.random.rand(1) < probabilities[i]:\n image, gt_image = augmentations[i](image, gt_image)\n\n return image, gt_image",
"def image_augmentation(dataset_dict):\n dataset_dict = copy.deepcopy(dataset_dict)\n image = utils.read_image(dataset_dict[\"file_name\"], format=\"BGR\")\n\n transform_list = [\n T.RandomCrop(crop_type=\"relative_range\", crop_size=[0.95, 0.87]),\n T.RandomBrightness(0.9, 1.5),\n T.RandomContrast(0.8, 1.6),\n T.RandomSaturation(1.0, 1.6),\n T.RandomRotation(angle=[15, 0, 5, 6, 15], expand=False),\n T.RandomFlip(prob=0.5, horizontal=True, vertical=False),\n T.ResizeScale(1.0, 2.0, target_height=900, target_width=700)\n ]\n\n image, transforms = T.apply_transform_gens(transform_list, image)\n dataset_dict[\"image\"] = torch.as_tensor(image.transpose(2, 0, 1).astype(\"float32\"))\n\n annotations = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n instances = utils.annotations_to_instances(annotations, image.shape[:2])\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n return dataset_dict",
"def __call__(self, in_data):\n # There are five data augmentation steps\n # 1. Color augmentation\n # 2. Random expansion\n # 3. Random cropping\n # 4. Resizing with random interpolation\n # 5. Random horizontal flipping\n if self.count % 10 == 0 and self.count % self.batchsize == 0 and self.count != 0:\n self.i += 1\n i = self.i % len(self.dim)\n self.output_shape = (self.dim[i], self.dim[i])\n # print(self.count, self.i, self.output_shape)\n self.count += 1\n\n img, bbox, label = in_data\n\n # 1. Color augmentation\n img = random_distort(img, brightness_delta=32,\n contrast_low=0.5, contrast_high=1.5,\n saturation_low=0.5, saturation_high=1.5,\n hue_delta=25)\n\n # Normalize. range is [0, 1]\n img /= 255.0\n\n _, H, W = img.shape\n scale = np.random.uniform(0.25, 2)\n random_expand = np.random.uniform(0.8, 1.2, 2)\n net_h, net_w = self.output_shape\n out_h = net_h * scale # random_expand[0]\n out_w = net_w * scale # random_expand[1]\n if H > W:\n out_w = out_h * (float(W) / H) * np.random.uniform(0.8, 1.2)\n elif H < W:\n out_h = out_w * (float(H) / W) * np.random.uniform(0.8, 1.2)\n\n out_h = int(out_h)\n out_w = int(out_w)\n\n img = resize_with_random_interpolation(img, (out_h, out_w))\n bbox = transforms.resize_bbox(bbox, (H, W), (out_h, out_w))\n\n if out_h < net_h and out_w < net_w:\n img, param = expand(img, out_h=net_h, out_w=net_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n else:\n out_h = net_h if net_h > out_h else int(out_h * 1.05)\n out_w = net_w if net_w > out_w else int(out_w * 1.05)\n img, param = expand(img, out_h=out_h, out_w=out_w,\n fill=self.value, return_param=True)\n bbox = transforms.translate_bbox(\n bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])\n\n img, param = crop_with_bbox_constraints(\n img, bbox, return_param=True,\n crop_height=net_h, crop_width=net_w)\n bbox, param = transforms.crop_bbox(\n bbox, y_slice=param['y_slice'], x_slice=param['x_slice'],\n allow_outside_center=False, return_param=True)\n label = label[param['index']]\n\n\n # 5. Random horizontal flipping # OK\n img, params = transforms.random_flip(\n img, x_random=True, return_param=True)\n bbox = transforms.flip_bbox(\n bbox, self.output_shape, x_flip=params['x_flip'])\n\n # Preparation for Yolov2 network\n bbox[:, ::2] /= self.output_shape[0] # y\n bbox[:, 1::2] /= self.output_shape[1] # x\n\n num_bbox = len(bbox)\n len_max = max(num_bbox, self.max_target)\n\n gmap = create_map_anchor_gt(bbox, self.anchors, self.output_shape,\n self.downscale, self.n_boxes, len_max)\n\n out_bbox = np.zeros((len_max, 4), dtype='f')\n out_bbox[:num_bbox] = bbox[:num_bbox]\n out_label = np.zeros((len_max), dtype='i')\n out_label[:num_bbox] = label\n\n gmap = gmap[:self.max_target]\n out_bbox = out_bbox[:self.max_target]\n out_label = out_label[:self.max_target]\n num_array = min(num_bbox, self.max_target)\n\n img = np.clip(img, 0, 1)\n return img, out_bbox, out_label, gmap, np.array([num_array], dtype='i')",
"def apply_augmentation_list(data, aug_list):\n pipeline = build_augmentation_pipeline(aug_list)\n\n # Transform\n data_tfmd = pipeline.fit_transform(data)\n\n return data_tfmd",
"def augment():\n print(\"augmenting......\")\n path1 = '../trainp1/'\n path2 = '../trainp2/'\n # path of pair1 and pair2 similar to img & mask task for segmentation\n p = Augmentor.Pipeline(path1) # pair1\n p.ground_truth(path2) # pair2\n p.rotate(probability=0.3, max_left_rotation=3, max_right_rotation=3) \n p.flip_left_right(probability=0.2) \n p.random_distortion(0.5, 2, 2, 2)\n p.zoom(probability=0.5, min_factor=0.95, max_factor=1.05)\n p.process()",
"def augment_images(folder, augmenter, images, size = (224, 224), start_index=0, iterations=1):\n # Get the total number of images\n n = len(images)\n \n # Main iteration that applies random transformations to the images\n for i in range(iterations):\n # Apply transformations to the images\n images_augmented = augmenter(images=images)\n \n # Save the augmented images on the disk\n save_images_in_folder(folder=folder, images=images_augmented, size=size, start_index=i*n)",
"def preprocess_train(im, boxes, classes, inst_masks, mask, input_size, min_size=2,\n use_augment=False, training_scale=[0.3, 0.5, 0.7, 1.0]):\n ori_im = np.copy(im)\n target_h, target_w = input_size\n\n # ---------- old data_augmentation ----------\n if use_augment:\n if np.random.choice([0, 1]) != 0:\n scale = np.random.choice(training_scale) # adding more small objects\n im, inst_masks, mask, boxes, classes = random_scale(im, inst_masks, mask, boxes, classes, scale=scale)\n min_obj_cover = np.random.choice([0.8, 0.9, 1.0])\n # truncted examples may lead to multiple-detections..\n im, inst_masks, mask, boxes, classes = random_aspect_ratio(im, inst_masks, mask, boxes, classes,\n min_aspect_ratio=0.5, max_aspect_ratio=2.0,\n min_obj_cover=min_obj_cover)\n #\n # # r = np.random.randint(0, 3)\n # if np.random.rand() < 0.75:\n # im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n # else:\n # im, inst_masks, mask, boxes, classes = center_crop2fixed_pad(im, inst_masks, mask, boxes, classes, target_w, target_h,\n # min_size=min_size)\n\n # ---------- old data_augmentation ----------\n\n # ---------- none data_augmentation ----------\n im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n im, inst_masks, mask, boxes, classes = random_flip(im, inst_masks, mask, boxes, classes)\n # ---------- none data_augmentation ----------\n\n # ---------- old data_augmentation ----------\n im = distort_color(im)\n # ---------- old data_augmentation ----------\n\n im = imcv2_recolor(im)\n\n # add this because zeros numpy array will cause errors in torch Dataloader\n inst_masks = np.zeros([1, target_h, target_w], dtype=inst_masks.dtype) if inst_masks.size == 0 else inst_masks\n\n boxes = np.asarray(boxes, dtype=np.float32)\n return im, boxes, classes, inst_masks, mask, ori_im",
"def aortic_data_augmenter(image, label, shift, rotate, scale, intensity, flip):\n image2 = np.zeros(image.shape, dtype=np.float32)\n label2 = np.zeros(label.shape, dtype=np.int32)\n\n # For N image. which come come from the same subject in the LSTM model,\n # generate the same random affine transformation parameters.\n shift_val = [np.clip(np.random.normal(), -3, 3) * shift,\n np.clip(np.random.normal(), -3, 3) * shift]\n rotate_val = np.clip(np.random.normal(), -3, 3) * rotate\n scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale\n intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity\n\n # The affine transformation (rotation + scale + shift)\n row, col = image.shape[1:3]\n M = cv2.getRotationMatrix2D(\n (row / 2, col / 2), rotate_val, 1.0 / scale_val)\n M[:, 2] += shift_val\n\n # Apply the transformation to the image\n for i in range(image.shape[0]):\n for c in range(image.shape[3]):\n image2[i, :, :, c] = ndimage.interpolation.affine_transform(\n image[i, :, :, c], M[:, :2], M[:, 2], order=1)\n\n label2[i, :, :] = ndimage.interpolation.affine_transform(\n label[i, :, :], M[:, :2], M[:, 2], order=0)\n\n # Apply intensity variation\n image2[i] *= intensity_val\n\n # Apply random horizontal or vertical flipping\n if flip:\n if np.random.uniform() >= 0.5:\n image2[i] = image2[i, ::-1, :, :]\n label2[i] = label2[i, ::-1, :]\n else:\n image2[i] = image2[i, :, ::-1, :]\n label2[i] = label2[i, :, ::-1]\n return image2, label2",
"def bulk_augment_images(input_path, output_path, extension, augmentation, label_type, label_threshold=-1):\n for dir_path, dir_names, filenames in os.walk(input_path):\n structure = os.path.join(output_path, dir_path[len(input_path) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dir_path, file)\n label = get_labels([src], label_type)[0]\n if label > label_threshold:\n img = cv2.imread(src, 0)\n f_name, f_ext = os.path.splitext(file)\n if augmentation == 'flip':\n img = np.flip(img, axis=-1)\n file = f_name + \"_flipped\" + f_ext\n elif augmentation == 'original':\n file = f_name + \"_original\" + f_ext\n elif augmentation == 'rotate_crop':\n rotation = np.random.choice((-10, 10))\n img = rotate_and_crop_image(img, rotation)\n file = f_name + \"_rotated\" + f_ext\n else:\n raise ValueError(\n \"Invalid value for 'augmentation'. Value can be 'flip', 'original', 'rotate_crop, \"\n \"value was: {}\".format(augmentation))\n dest = os.path.join(structure, file)\n cv2.imwrite(dest, img)",
"def augment_training_data(images, labels):\n from scipy import ndimage\n\n # Empty lists to fill\n expanded_images = []\n expanded_labels = []\n\n # Looping through samples, modifying them, and appending them to the empty lists\n j = 0 # counter\n for x, y in zip(images, labels):\n j = j + 1\n if j % 10000 == 0:\n print('Expanding data: %03d / %03d' % (j, np.size(images, 0)))\n\n # register original data\n expanded_images.append(x)\n expanded_labels.append(y)\n\n # get a value for the background\n # zero is the expected value, but median() is used to estimate background's value\n bg_value = np.median(x) # this is regarded as background's value\n image = np.reshape(x, (-1, 28))\n\n for i in range(4):\n # rotate the image with random degree\n angle = np.random.randint(-15, 15, 1)\n new_img = ndimage.rotate(\n image, angle, reshape=False, cval=bg_value)\n\n # shift the image with random distance\n shift = np.random.randint(-2, 2, 2)\n new_img_ = ndimage.shift(new_img, shift, cval=bg_value)\n\n # register new training data\n expanded_images.append(np.reshape(new_img_, (28, 28, 1)))\n expanded_labels.append(y)\n\n return expanded_images, expanded_labels",
"def cifar_image_augmentation(images):\n images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)\n images = tf.random_crop(images, [32, 32, 3])\n images = tf.image.random_flip_left_right(images)\n return images",
"def augment(image,masks):\n\n # Random horizontal flipping\n if random.random() > 0.5:\n image = TF.hflip(image)\n masks = TF.hflip(masks)\n\n # Random vertical flipping\n if random.random() > 0.5:\n image = TF.vflip(image)\n masks = TF.vflip(masks)\n return image,masks",
"def get_augmentation_sequence():\n # Macro to apply something with 50% chance\n sometimes = lambda aug: iaa.Sometimes(0.5, aug) # 50%\n rarely = lambda aug: iaa.Sometimes(0.1, aug) # 10%\n\n # Augmentation applied to every image\n # Augmentors sampled one value per channel\n aug_sequence = iaa.Sequential(\n [\n # apply the following augmenters to most images\n iaa.Fliplr(0.5), # horizontally flip 50% of all images\n iaa.Flipud(0.5), # vertically flip 50% of all images\n\n # crop images by -0.25% to 0.25% of their height/width\n # positive values crop the image, negative pad\n sometimes(iaa.CropAndPad(\n percent=(-0.25, 0.25),\n pad_mode=['constant', 'edge'], # pad with constant value of the edge value\n pad_cval=(0, 0) # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n )),\n sometimes(iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)\n rotate=(-45, 45), # rotate by -45 to +45 degrees\n shear=(-16, 16), # shear by -16 to +16 degrees\n order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)\n cval=(0, 0), # if mode is constant, use a cval between 0 and 0 to ensure mask background is preserved\n mode='constant' # ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)\n )),\n # rarely(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),\n iaa.GaussianBlur((0, 3.0)),\n iaa.Add((-10, 10), per_channel=0.7), # change brightness of images (by -10 to 10 of original value)\n iaa.AddToHueAndSaturation((-20, 20)),\n # sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))\n ],\n random_order=True\n )\n\n return aug_sequence",
"def _augment_images(self, images, random_state, parents, hooks):\n nb_images = len(images)\n samples = self.p.draw_samples((nb_images,), random_state=random_state)\n for i in sm.xrange(nb_images):\n if samples[i] == 1:\n if self.axis == 1:\n images[i] = np.fliplr(images[i])\n elif self.axis == 0:\n images[i] = np.flipud(images[i])\n self.samples = samples\n return images",
"def test_data_augmentation_transforms():\n\n transforms_list = get_data_augmentation_transforms(inp_size=(100, 50), pixel_mean=[0.5], pixel_std=[0.3]).transforms\n\n assert len(transforms_list) > 3\n\n # last 3 should be fundamental\n augmentation_transforms = Compose(transforms_list[:-3])\n\n try:\n inp_img = Image.fromarray(np.loadtxt(\"proj6_code/proj6_unit_tests/test_data/transform_inp.txt\", dtype=\"uint8\"))\n\n except:\n inp_img = Image.fromarray(\n np.loadtxt(\"../proj6_code/proj6_unit_tests/test_data/transform_inp.txt\", dtype=\"uint8\")\n )\n augmented_img = augmentation_transforms(inp_img)\n assert isinstance(augmented_img, type(inp_img))\n assert not np.array_equal(augmented_img, inp_img)",
"def _sync_transform(self, epoch_data: Dict[str, Union[Image.Image, List]]) -> None:\n scale_func = lambda x: int(self.scale_factor * x / 32.0) * 32\n output_shape = [scale_func(x) for x in self.output_size]\n\n # random mirror\n if self.augmentations.get('rand_flip', False) and random.random() < 0.5:\n for key, data in epoch_data.items():\n if key not in ['labels', 'bboxes']:\n epoch_data[key] = torchvision.transforms.functional.hflip(data)\n elif key == 'bboxes':\n epoch_data[key] = self._mirror_bbox(data, CityScapesDataset.base_size)\n\n if 'rand_brightness' in self.augmentations:\n brightness_scale = random.uniform(\n 1-self.augmentations['rand_brightness']/100,\n 1+self.augmentations['rand_brightness']/100)\n for key, data in epoch_data.items():\n if key in [\"l_img\", \"r_img\", \"l_seq\", \"r_seq\"]:\n epoch_data[key] = torchvision.transforms.functional.adjust_brightness(\n data, brightness_scale)\n\n if 'rand_rot' in self.augmentations:\n # TODO Add mask for bboxes which is required for attention head\n angle = random.uniform(0, self.augmentations['rand_rot'])\n for key, data in epoch_data.items():\n if key in [\"l_img\", \"r_img\", \"l_seq\", \"r_seq\"]:\n epoch_data[key] = torchvision.transforms.functional.rotate(\n data, angle, resample=Image.BILINEAR)\n elif key in ['disparity', 'seg']:\n epoch_data[key] = torchvision.transforms.functional.rotate(\n data, angle, resample=Image.NEAREST, fill=-1)\n elif key == 'bboxes':\n self._rotate_bbox(epoch_data, angle)\n\n for key, data in epoch_data.items():\n if key in [\"l_img\", \"r_img\", \"l_seq\", \"r_seq\", 'center', 'offset']:\n epoch_data[key] = torchvision.transforms.functional.resize(\n data, tuple(output_shape[::-1]), Image.BILINEAR)\n elif key in ['disparity', 'seg', 'foreground'] or key.endswith('mask'):\n epoch_data[key] = torchvision.transforms.functional.resize(\n data, tuple(output_shape[::-1]), Image.NEAREST)\n elif key == \"center_points\":\n new_points = []\n rescale_func = lambda x: [tgt * new / old for (tgt, new, old) \\\n in zip(x, output_shape, CityScapesDataset.base_size)]\n for points in data:\n new_points.append(rescale_func(points))\n epoch_data[key] = new_points\n elif key == \"bboxes\":\n epoch_data[key] = self._scale_bbox(data, output_shape)\n\n for key in [\"l_img\", \"r_img\", \"l_seq\", \"r_seq\"]:\n if key in epoch_data:\n epoch_data[key] = self._img_transform(epoch_data[key])\n\n if 'disparity' in epoch_data:\n epoch_data['disparity'] = self._depth_transform(epoch_data['disparity'])\n\n if 'seg' in epoch_data:\n epoch_data['seg'] = self._seg_transform(epoch_data['seg'])\n\n if 'offset' in epoch_data:\n epoch_data['offset'][:,0,:,:] *= output_shape[0] / CityScapesDataset.base_size[0]\n epoch_data['offset'][:,1,:,:] *= output_shape[1] / CityScapesDataset.base_size[1]\n\n if 'crop_fraction' in self.augmentations:\n # random crop\n crop_fraction = self.augmentations['crop_fraction']\n crop_h = int(epoch_data[\"l_img\"].shape[1] / crop_fraction / 32.0) * 32\n crop_w = int(epoch_data[\"l_img\"].shape[2] / crop_fraction / 32.0) * 32\n crop_x = random.randint(0, epoch_data[\"l_img\"].shape[2] - crop_w)\n crop_y = random.randint(0, epoch_data[\"l_img\"].shape[1] - crop_h)\n\n for key, data in epoch_data.items():\n if key in [\"l_img\", \"r_img\", \"l_seq\", \"r_seq\"]:\n epoch_data[key] = data[:, crop_y:crop_y+crop_h, crop_x:crop_x+crop_w]\n elif key in [\"seg\", \"disparity\"]:\n epoch_data[key] = data[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w]\n elif key == 'bboxes':\n self._crop_bbox(data, (crop_x, crop_y, crop_w, crop_h))",
"def augment(image, n,\n hflip=False, vflip=False, scale_to_percent=1.0, scale_axis_equally=True,\n rotation_deg=0, shear_deg=0, translation_x_px=0, translation_y_px=0,\n brightness_change=0.0, noise_mean=0.0, noise_std=0.0):\n assert n >= 0\n result = []\n if n == 0:\n return result\n\n width = image.shape[0]\n height = image.shape[1]\n matrices = create_aug_matrices(n, img_width_px=width, img_height_px=height,\n scale_to_percent=scale_to_percent,\n scale_axis_equally=scale_axis_equally,\n rotation_deg=rotation_deg,\n shear_deg=shear_deg,\n translation_x_px=translation_x_px,\n translation_y_px=translation_y_px)\n for i in range(n):\n img = np.copy(image)\n matrix = matrices[i]\n\n # random horizontal / vertical flip\n if hflip and i % 2 == 0:\n img = np.fliplr(img)\n if vflip and random.random() > 0.5:\n img = np.flipud(img)\n\n # random brightness adjustment\n by_percent = random.uniform(1.0 - brightness_change, 1.0 + brightness_change)\n img = img * by_percent\n\n # gaussian noise\n # numpy requires a std above 0\n if noise_std > 0:\n img = img + (255 * np.random.normal(noise_mean, noise_std, (img.shape)))\n\n # clip to 0-255\n img = np.clip(img, 0, 255).astype(np.uint8)\n\n arr = tf.warp(img, matrix, mode=\"nearest\") # projects to float 0-1\n img = np.array(arr * 255, dtype=np.uint8)\n result.append(img)\n\n return result",
"def data_augmentation_and_vectorization(self,imlist, lb,im_labels, average_image = None):\n\t\tX,Y,X_original = [] ,[], []\n\n\t\ti = 0\n\t\tfor im in imlist:\n\t\t\tim=Image.fromarray(im,mode=self.mode)\n\t\t\t#try:\n\t\t\t#im_ini = im\n\t\t\tim_original = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t#im = self.substract_average_image(im, average_image)\n\t\t\t#print 'i:{} is a: {}' .format(i,im_labels[i])\n\t\t\t#im.show()\n\t\t\tX_original.append(im_original)\n\n\t\t\t#Rotations \n\t\t\t#im_r = im.rotate(15)\n\t\t\t# im_r_2 = im.rotate(-15)\n\t\t\t# im_r_3 = im.rotate(180)\n\t\t\t#im_r.show()\n\t\t\t#im_r_2.show()\n\n\t\t\t#Filters\n\t\t\t#im_f = im_ini.filter(ImageFilter.DETAIL)\n\t\t\t#im_f = im.filter(ImageFilter.FIND_EDGES)\n\t\t\t\n\t\t\tif self.mode == 'RGB':\n\t\t\t\tim = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t\t#Uncomment this if you want to use cross-correlate for 2D arrays http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.correlate2d.html\n\t\t\t\t# im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\t# im = sp.inner(im, [299, 587, 114]) / 1000.0\n\t\t\t\t# im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\t# # normalize per http://en.wikipedia.org/wiki/Cross-correlation\n\t\t\t\t# im = (im - im.mean()) / im.std()\n\n\t\t\tif self.mode == 'L':\n\t\t\t\t# im = np.asarray(im, dtype='float64')\n\t\t\t\t# im = filters.sobel(im)\n\t\t\t\t#im = filters.roberts(im)\n\t\t\t\tim = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\n\t\t\t#im = np.asarray(im, dtype=np.uint8)\n\t\t\t#print im.shape\n\t\t\t#print im.shape\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t#im = self.flaten_aux(im)\n\t\t\t#print im.shape\n\t\t\t#im = data.coins() # or any NumPy arr\n\t\t\t#print im.shape\n\t\t\t#image = data.coins() # or any NumPy array!\n\t\t\t#print im\n\t\t\t#im = filter.sobel(im)\n\t\t\t#im = filter.roberts(im)\n\n\t\t\t# im_original = sp.inner(im, [299, 587, 114]) / 1000.0\n\t\t\t# im_original = np.asarray(im_original, dtype=theano.config.floatX)\n\t\t\t# # normalize per http://en.wikipedia.org/wiki/Cross-correlation\n\t\t\t# im = (im_original - im_original.mean()) / im_original.std()\n\t\t\t#print im.shape\n\t\t\t#print edges\n\t\t\t# edges = np.asarray(edges, dtype=np.uint8)\n\t\t\t#Image.fromarray(edges,mode=self.mode).show()\n\n\t\t\t#print edges\n\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX) / 256.\n\n\t\t\t#print edges.shape\n\t\t\t# io.imshow(im)\n\t\t\t# io.show()\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\n\t\t\t# plt.suptitle(im_labels[i], size=16)\n\t\t\t# plt.imshow(im, cmap=plt.cm.gray, interpolation='nearest')\n\t\t\t# plt.show()\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t#print im.shape\n\t\t\t#self.reconstructImage(im).show()\n\n\t\t\t#im_r = np.asarray(im_r, dtype=theano.config.floatX) / 256.\n\t\t\t# im_r_2 = np.asarray(im_r_2, dtype=theano.config.floatX) / 256.\n\t\t\t# im_r_3 = np.asarray(im_r_3, dtype=theano.config.floatX) / 256.\n\t\t\t#im_f = np.asarray(im_f, dtype=theano.config.floatX) / 256.\n\t\t\t\n\t\t\t#im = im.transpose(2, 0, 1)\n\t\t\t#X.append(np.array(im, dtype=theano.config.floatX))\n\t\t\t#X.append(np.array(im_raw, dtype=theano.config.floatX))\n\t\t\t#X.append(im)\n\t\t\tX.append(im)\n\t\t\t# if i % 100 == 0:\n\t\t\t# \tX.append(im)\n\t\t\t#X.append(im_r)\n\t\t\t# X.append(im_r_2)\n\t\t\t# X.append(im_r_3)\n\t\t\t#X.append(im_f)\n\t\t\t#X_original.append(im)\n\n\t\t\t# X.append(np.array(im_r, dtype=theano.config.floatX))\n\t\t\t# X.append(np.array(im_r_2, dtype=theano.config.floatX))\n\n\t\t\t#Uncomment this if you want to work with monochrome\n\t\t\t# im = im.convert('L')\n\t\t\t# pixels_monochrome = np.array(list(im.getdata()), dtype=np.float)\n\t\t\t\t\t\t\n\t\t\t# # scale between 0-1 to speed up computations\n\t\t\t# min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0,1), copy=True)\n\t\t\t# pixels_monochrome = min_max_scaler.fit_transform(pixels_monochrome)\n\n\t\t\t# X.append(pixels_monochrome)\n\n\t\t\t#Y.append(lb.transform([im_labels[i]])[0][0])\n\t\t\t#print lb.transform([im_labels[i]])\n\t\t\t\n\t\t\tlabel = lb.transform([im_labels[i]])[0][0]\n\t\t\t#print lb.transform([im_labels[i]])\n\t\t\t# label_vector = lb.transform([im_labels[i]])[0]\n\t\t\t# label = np.where( label_vector == 1 )[0][0]\n\t\t\t# print \"Label: {}\".format(label)\n\t\t\t#print label\n\t\t\t#Y.append(label)\n\t\t\tY.append(label)\n\t\t\t#Y.append(im_labels[i])\t\n\n\t\t\t\n\t\t\t#Y.append(label)\t\n\t\t\t# Y.append(label)\t\n\t\t\t# except Exception, e:\n\t\t\t# \tprint e\n\t\t\t# \t#raise e\n\n\t\t\t# if i == 30:\n\t\t\t# \tbreak\n\n\t\t\ti += 1\n\t\t\tif self.verbose:\n\t\t\t\tsys.stdout.write(\"\\r Process: {0}/{1}\".format(i, len(imlist)))\n\t\t\t\tsys.stdout.flush()\n\t\t\n\t\t# output = open(self.data_path + 'X_original.pkl', 'wb')\n\t\t# cPickle.dump(X_original, output,protocol=-1)\n\t\t# output.close()\n\n\t\treturn X,Y",
"def Dev_Image_data_generator(folderlist,resize = (920,1200),Transformation = True, scaling = True, batch_size = 16):\n\n while True:\n total_classes = len(folderlist.keys())\n keys = folderlist.keys()\n Images = []\n Image_label = []\n for key in folderlist.keys():\n img_label = random.choice(folderlist[key])\n img = Image.open(img_label,'r')\n h = resize[1]\n l = int(img.size[1]*h/img.size[0])\n img = img.resize((h,l), Image.ANTIALIAS)\n background = Image.new('RGB', (resize[1], resize[0]), (255, 255, 255))\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = (int((bg_w - img_w) / 2), int((bg_h - img_h) / 2))\n background.paste(img, offset)\n background = np.asarray(background)\n if Transformation == True:\n rotation = rotate(background,random.choice(range(360)))\n translate = translate_xy(background,random.choice(range(resize[0]/4)),random.choice(range(resize[1]/4)))\n flip = cv2.flip(rotation,1)\n Y = np.concatenate((rotation[np.newaxis,:,:,:],flip[np.newaxis,:,:,:],translate[np.newaxis,:,:,:]))\n Images.append(Y)\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key for i in range(4)])\n else:\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key])\n Image_label = np.concatenate(Image_label)\n Images = np.concatenate(Images)\n Image_label = np.array(pd.get_dummies(Image_label))\n X_Image , Y_Image = shuffle(Images,Image_label,random_state=0)\n if scaling == True:\n X_Image = X_Image/255\n else:\n X_Image = X_Image\n batches = int(len(X_Image)/batch_size)\n for batch in range(batches):\n x = X_Image[batch*batch_size:(batch+1)*batch_size,:,:,:]\n y = Y_Image[batch*batch_size:(batch+1)*batch_size]\n yield((x,y))",
"def get_augmenter():\n\n augmenter = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Small gaussian blur with random sigma between 0 and 0.5.\n # But we only blur about 50% of all images.\n iaa.Sometimes(\n 0.5,\n iaa.GaussianBlur(sigma=(0, 0.5))\n ),\n # Strengthen or weaken the contrast in each image.\n iaa.LinearContrast((0.75, 1.5)),\n # Add gaussian noise.\n # For 50% of all images, we sample the noise once per pixel.\n # For the other 50% of all images, we sample the noise per pixel AND\n # channel. This can change the color (not only brightness) of the\n # pixels.\n iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),\n # Make some images brighter and some darker.\n # In 20% of all cases, we sample the multiplier once per channel,\n # which can end up changing the color of the images.\n iaa.Multiply((0.8, 1.2), per_channel=0.2),\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, rotate them and shear them.\n iaa.Affine(\n scale={\"x\": (0.80, 1.2), \"y\": (0.80, 1.2)},\n translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)},\n rotate=(-25, 25),\n shear=(-6, 6)\n )\n], random_order=True) # apply augmenters in random order\n\n return augmenter",
"def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y",
"def image_mask_augmentation(x, y, batch_size=4, transformations=None, seed=6):\n # Always perform some basic transformations\n if transformations is None:\n transformations = dict(\n rotation_range=10.0,\n height_shift_range=0.02,\n shear_range=5,\n horizontal_flip=True,\n vertical_flip=False,\n fill_mode=\"constant\"\n )\n\n datagen_x = ImageDataGenerator(**transformations)\n datagen_x.fit(x, augment=True, seed=seed)\n datagen_y = ImageDataGenerator(**transformations)\n datagen_y.fit(y, augment=True, seed=seed)\n\n x_aug = datagen_x.flow(x, batch_size=batch_size, seed=seed)\n y_aug = datagen_y.flow(y, batch_size=batch_size, seed=seed)\n\n generator = zip(x_aug, y_aug)\n\n return generator",
"def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix",
"def image_augmentations(\n image,\n data_augmentations,\n model_input_image_size,\n label=None):\n if image.get_shape() == None:\n im_size = model_input_image_size\n else:\n im_size = image.get_shape().as_list()\n im_size_check = True # np.any(\n # np.less_equal(\n # model_input_image_size[:2],\n # im_size[:2]))\n if data_augmentations is not None:\n for aug in data_augmentations:\n # Pixel/image-level augmentations\n if aug == 'image_float32':\n image = tf.cast(image, tf.float32)\n if aug == 'label_float32':\n label = tf.cast(label, tf.float32)\n if aug == 'bfloat16':\n image = tf.cast(image, tf.bfloat16)\n if aug == 'singleton':\n image = tf.expand_dims(image, axis=-1)\n print 'Adding singleton dimension to image.'\n if aug == 'sgl_label' or aug == 'singleton_label':\n label = tf.expand_dims(label, axis=-1)\n print 'Adding singleton dimension to label.'\n if aug == 'coco_labels':\n label = tf.nn.relu(label - 91)\n if aug == 'contrastive_loss':\n label = tf.stack(\n [tf.ones_like(label), tf.zeros_like(label)], -1)\n if aug == 'bsds_normalize':\n data = np.load(\n '/media/data_cifs/image_datasets/BSDS500/images/train/file_paths.npz')\n mean = data['mean'].squeeze(0)\n stds = data['stds'].squeeze(0)\n image = (image - mean) / stds\n if aug == 'bsds_crop' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n scale_choices = tf.convert_to_tensor(\n # [1. / 2., 1.1 / 2., 1.2 / 2.])\n [1., 1, 1.1, 1.2])\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n image_shape[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_nearest_neighbor(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n combined_crop = tf.random_crop(\n combined_resize,\n tf.concat(\n [model_input_image_size[:2], [combo_shape[-1]]], 0))\n image = combined_crop[:, :, :image_shape[-1]]\n label = combined_crop[:, :, image_shape[-1]:]\n image.set_shape(model_input_image_size)\n label.set_shape(\n model_input_image_size[:2] + [\n combo_shape[-1] - model_input_image_size[-1]])\n print 'Applying BSDS crop.'\n if aug == 'hed_resize' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n scale_choices = tf.convert_to_tensor(\n # [1. / 2., 1.1 / 2., 1.2 / 2.])\n np.arange(1, 1.51, 0.1)) # 0.7, 1.5\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n image_shape[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_bilinear(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n print 'Applying HED resize.'\n if aug == 'uint8_rescale':\n image = tf.cast(image, tf.float32) / 255.\n print 'Applying uint8 rescale to the image.'\n if aug == 'cube_plus_rescale':\n image = tf.cast(image, tf.float32) / 13273.\n print 'Applying uint8 rescale to the image.'\n if aug == 'uint8_rescale_label':\n label = tf.cast(label, tf.float32) / 255.\n print 'Applying uint8 rescale to the label.'\n if aug == 'uint8_rescale_-1_1':\n image = 2 * (tf.cast(image, tf.float32) / 255.) - 1\n print 'Applying uint8 rescale.'\n if aug == 'image_to_bgr':\n image = tf.stack(\n [image[..., 2], image[..., 1], image[..., 0]], axis=-1)\n if aug == 'pascal_normalize':\n image = image - [123.68, 116.78, 103.94]\n if aug == 'ilsvrc12_normalize':\n MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]\n STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]\n image = (image - MEAN_RGB) / STDDEV_RGB\n if aug == 'random_contrast':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n print 'Applying random contrast.'\n if aug == 'random_brightness':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image = tf.image.random_brightness(image, max_delta=63.)\n print 'Applying random brightness.'\n if aug == 'grayscale' and im_size_check:\n # image = tf.image.rgb_to_grayscale(image)\n if len(image.get_shape().as_list()) == 2:\n image = tf.expand_dims(image, axis=-1)\n else:\n image = tf.expand_dims(image[..., 0], axis=-1)\n print 'Converting to grayscale.'\n if aug == 'rgb2gray' and im_size_check:\n image = tf.image.rgb_to_grayscale(image)\n print 'Converting rgb2gray.'\n if aug == 'clip_uint8' and im_size_check:\n image = tf.minimum(image, 255.)\n image = tf.maximum(image, 0.)\n if aug == 'cube_plus_crop':\n image = cube_plus_crop(image, model_input_image_size)\n # Affine augmentations\n if aug == 'rotate' and im_size_check:\n max_theta = 22.\n angle_rad = (max_theta / 180.) * math.pi\n angles = tf.random_uniform([], -angle_rad, angle_rad)\n transform = tf.contrib.image.angles_to_projective_transforms(\n angles,\n im_size[0],\n im_size[1])\n image = tf.contrib.image.transform(\n image,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n print 'Applying random rotate.'\n if aug == 'rotate90' and im_size_check:\n image = tf.image.rot90(\n image,\n tf.random_uniform(\n shape=[],\n minval=0,\n maxval=4,\n dtype=tf.int32))\n print 'Applying random 90 degree rotate.'\n if aug == 'rotate90_image_label' and im_size_check:\n concat = tf.image.rot90(\n tf.concat([image, label], -1),\n tf.random_uniform(\n shape=[],\n minval=0,\n maxval=4,\n dtype=tf.int32))\n image = concat[..., :im_size[-1]]\n label = concat[..., im_size[-1]:]\n print 'Applying random 90 degree rotate to images and labels.'\n if aug == 'stack3d':\n image = tf.concat([image, image, image], axis=-1)\n if aug == 'rot_image_label' and im_size_check:\n max_theta = 30.\n angle_rad = (max_theta / 180.) * math.pi\n angles = tf.random_uniform([], -angle_rad, angle_rad)\n transform = tf.contrib.image.angles_to_projective_transforms(\n angles,\n im_size[0],\n im_size[1])\n image = tf.contrib.image.transform(\n image,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n label = tf.contrib.image.transform(\n label,\n tf.contrib.image.compose_transforms(transform),\n interpolation='BILINEAR') # or 'NEAREST'\n print 'Applying random rotate.'\n if aug == 'random_scale_crop_image_label'\\\n and im_size_check:\n scale_choices = tf.convert_to_tensor(\n [1., 1.04, 1.08, 1.12, 1.16])\n samples = tf.multinomial(\n tf.log([tf.ones_like(scale_choices)]), 1)\n image_shape = image.get_shape().as_list()\n scale = scale_choices[tf.cast(samples[0][0], tf.int32)]\n scale_tf = tf.cast(\n tf.round(\n np.asarray(\n model_input_image_size[:2]).astype(\n np.float32) * scale),\n tf.int32)\n combined = tf.concat([image, label], axis=-1)\n combo_shape = combined.get_shape().as_list()\n combined_resize = tf.squeeze(\n tf.image.resize_bicubic(\n tf.expand_dims(combined, axis=0),\n scale_tf,\n align_corners=True),\n axis=0)\n combined_crop = tf.random_crop(\n combined_resize, tf.concat(\n [model_input_image_size[:2], [combo_shape[-1]]], 0))\n image = combined_crop[:, :, :image_shape[-1]]\n label = combined_crop[:, :, image_shape[-1]:]\n image.set_shape(model_input_image_size)\n label.set_shape(\n model_input_image_size[:2] + [\n combo_shape[-1] - model_input_image_size[-1]])\n if aug == 'rc_res' and im_size_check:\n image = random_crop(image, model_input_image_size)\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n ms = [x // 2 for x in model_input_image_size]\n image = resize_image_label(\n im=image,\n model_input_image_size=ms,\n f='bicubic')\n print 'Applying random crop and resize.'\n if aug == 'cc_res' and im_size_check:\n image = center_crop(image, model_input_image_size)\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n ms = [x // 2 for x in model_input_image_size]\n image = resize_image_label(\n im=image,\n model_input_image_size=ms,\n f='bicubic')\n print 'Applying center crop and resize.'\n if aug == 'random_crop' and im_size_check:\n image = random_crop(image, model_input_image_size)\n print 'Applying random crop.'\n if aug == 'center_crop' and im_size_check:\n image = center_crop(image, model_input_image_size)\n print 'Applying center crop.'\n if aug == 'rc_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = crop_image_label(\n image=image,\n label=label,\n size=model_input_image_size,\n crop='random')\n if aug == 'cc_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = crop_image_label(\n image=image,\n label=label,\n size=model_input_image_size,\n crop='center')\n if aug == 'resize' and im_size_check:\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n print 'Applying area resize.'\n if aug == 'jk_resize' and im_size_check:\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = tf.image.resize_image_with_crop_or_pad(\n image,\n model_input_image_size[0],\n model_input_image_size[1])\n print 'Applying area resize.'\n if aug == 'random_crop_and_res_cube_plus' and im_size_check:\n im_shape = image.get_shape().as_list()\n im_shape[0] /= 4\n im_shape[1] /= 4\n image = resize_image_label(\n im=image,\n model_input_image_size=im_shape[:2],\n f='bicubic')\n image = random_crop(image, model_input_image_size)\n if aug == 'center_crop_and_res_cube_plus' and im_size_check:\n im_shape = image.get_shape().as_list()\n im_shape[0] /= 4\n im_shape[1] /= 4\n image = resize_image_label(\n im=image,\n model_input_image_size=im_shape[:2],\n f='bicubic')\n image = center_crop(image, model_input_image_size)\n if aug == 'res_and_crop' and im_size_check:\n model_input_image_size_1 = np.asarray(\n model_input_image_size[:2]) + 28\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size_1,\n f='area')\n image = center_crop(image, model_input_image_size)\n print 'Applying area resize.'\n if aug == 'res_nn' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='nearest')\n print 'Applying nearest resize.'\n if aug == 'res_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n label = resize_image_label(\n im=label,\n model_input_image_size=model_input_image_size,\n f='bicubic')\n print 'Applying bilinear resize.'\n if aug == 'res_nn_image_label' and im_size_check:\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n if len(model_input_image_size) > 2:\n model_input_image_size = model_input_image_size[:2]\n image = resize_image_label(\n im=image,\n model_input_image_size=model_input_image_size,\n f='nearest')\n label = resize_image_label(\n im=label,\n model_input_image_size=model_input_image_size,\n f='nearest')\n print 'Applying nearest resize.'\n if aug == 'left_right':\n image = image_flip(image, direction='left_right')\n print 'Applying random flip left-right.'\n if aug == 'up_down':\n image = image_flip(image, direction='up_down')\n print 'Applying random flip up-down.'\n if aug == 'lr_viz_flip':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = lr_viz_flip(image, label)\n image, label = ud_viz_flip(image, label)\n if aug == 'lr_flip_image_label':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = lr_flip_image_label(image, label)\n if aug == 'ud_flip_image_label':\n assert len(image.get_shape()) == 3, '4D not implemented yet.'\n image, label = ud_flip_image_label(image, label)\n if aug == 'gratings_modulate':\n modulate = 10\n image //= modulate\n offset = (255 / 2) - ((255 / modulate) / 2)\n image += offset\n if aug == 'gaussian_noise':\n im_shape = image.get_shape().as_list()\n assert len(im_shape) == 3, '4D not implemented yet.'\n sigma = 1. / 10.\n mu = 0.\n image = image + tf.random_normal(\n im_shape,\n mean=mu,\n stddev=sigma)\n print 'Applying gaussian noise.'\n if aug == 'gaussian_noise_small':\n im_shape = image.get_shape().as_list()\n assert len(im_shape) == 3, '4D not implemented yet.'\n sigma = 1. / 20.\n mu = 0.\n image = image + tf.random_normal(\n im_shape,\n mean=mu,\n stddev=sigma)\n print 'Applying gaussian noise.'\n if aug == 'mixup':\n raise RuntimeError('Mixup not properly implemented yet.')\n alpha = 0.4\n dist = tf.distributions.Beta(alpha, alpha)\n image = image * dist + (1 - dist) * tf.roll(image, 0, 1)\n label = label * dist + (1 - dist) * tf.roll(label, 0, 1)\n if aug == 'hed_brightness':\n image = tf.image.random_brightness(image, 63)\n if aug == 'hed_contrast':\n image = tf.image.random_contrast(image, lower=0.4, upper=1.5)\n if aug == 'blur_labels':\n label = tf_blur(\n image=label,\n kernel_size=3, # extent\n name='label_blur',\n normalize=True,\n sigma=1.)\n if aug == 'calculate_rate_time_crop':\n im_shape = image.get_shape().as_list()\n minval = im_shape[0] // 3\n time_crop = tf.random_uniform(\n [],\n minval=minval,\n maxval=im_shape[0],\n dtype=tf.int32)\n\n # For now always pull from the beginning\n indices = tf.range(0, time_crop, dtype=tf.int32)\n selected_image = tf.gather(image, indices)\n padded_image = tf.zeros(\n [im_shape[0] - time_crop] + im_shape[1:],\n dtype=selected_image.dtype)\n\n # Randomly concatenate pad to front or back\n image = tf.cond(\n pred=tf.greater(\n tf.random_uniform(\n [],\n minval=0,\n maxval=1,\n dtype=tf.float32),\n 0.5),\n true_fn=lambda: tf.concat(\n [selected_image, padded_image], axis=0),\n false_fn=lambda: tf.concat(\n [padded_image, selected_image], axis=0)\n )\n image.set_shape(im_shape)\n\n # Convert label to rate\n label = label / im_shape[0]\n if aug == 'calculate_rate':\n label = label / image.get_shape().as_list()[0]\n print 'Applying rate transformation.'\n if aug == 'threshold':\n image = tf.cast(tf.greater(image, 0.1), tf.float32)\n print 'Applying threshold.'\n if aug == 'nonzero_label':\n label = tf.cast(tf.greater(label, 0.2), tf.float32)\n print 'Applying threshold.'\n if aug == 'zero_one':\n image = tf.minimum(tf.maximum(image, 0.), 1.)\n print 'Applying threshold.'\n if aug == 'timestep_duplication':\n image = tf.stack([image for iid in range(7)])\n print 'Applying timestep duplication.'\n if aug == 'per_image_standardization':\n image = tf.image.per_image_standardization(image)\n print 'Applying per-image zscore.'\n if aug == 'flip_image_polarity':\n image = tf.abs(image - 1.)\n if aug == 'flip_label_polarity':\n label = tf.abs(label - 1.)\n if aug == 'NCHW':\n image = tf.transpose(image, (2, 0, 1))\n if aug == 'bfloat16_image':\n image = tf.cast(image, tf.bfloat16)\n if aug == 'bfloat16_label':\n label = tf.cast(label, tf.bfloat16)\n if aug == 'hfloat16_image':\n image = tf.cast(image, tf.float16)\n if aug == 'hfloat16_label':\n label = tf.cast(label, tf.float16)\n if aug == 'threshold_label':\n label = tf.cast(tf.greater(label, 0.999), tf.float32)\n print 'Applying threshold of 0.999 to the label.'\n if aug == 'threshold_label_255':\n # cABC label = tf.cast(tf.greater(label, 200), tf.float32)\n label = tf.cast(tf.greater(label, 10), tf.float32)\n print 'Applying threshold of 127.5 to the label.'\n if aug == 'normalize_label':\n label = tf.cast(label, tf.float32)\n label = label / tf.reduce_max(label) # tf.cast(tf.greater(label, 25), tf.float32)\n print 'Normalizing label to [0, 1].'\n if aug == 'scale_to_255':\n image = image * 255.\n if aug == 'clip_255':\n image = tf.maximum(tf.minimum(255., image), 0.)\n # else:\n # assert len(image.get_shape()) == 3, '4D not implemented yet.'\n # image = tf.image.resize_image_with_crop_or_pad(\n # image, model_input_image_size[0], model_input_image_size[1])\n return image, label",
"def Valid_Image_data_generator(folderlist,resize = (920,1200),Transformation = True, scaling = True):\n\n while True:\n total_classes = len(folderlist.keys())\n keys = folderlist.keys()\n Images = []\n Image_label = []\n for key in tqdm(folderlist.keys()):\n for j in range(len(folderlist[key])):\n img_label = folderlist[key][j]\n img = Image.open(img_label,'r')\n h = resize[1]\n l = int(img.size[1]*h/img.size[0])\n img = img.resize((h,l), Image.ANTIALIAS)\n background = Image.new('RGB', (resize[1], resize[0]), (255, 255, 255))\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = (int((bg_w - img_w) / 2), int((bg_h - img_h) / 2))\n background.paste(img, offset)\n background = np.asarray(background)\n if Transformation == True:\n rotation = rotate(background,random.choice(range(360)))\n translate = translate_xy(background,random.choice(range(resize[0]/4)),random.choice(range(resize[1]/4)))\n flip = cv2.flip(rotation,1)\n Y = np.concatenate((rotation[np.newaxis,:,:,:],flip[np.newaxis,:,:,:],translate[np.newaxis,:,:,:]))\n Images.append(Y)\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key for i in range(4)]) # Four because we are doing rot,trans,flip and one original Image\n else:\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key])\n Image_label = np.concatenate(Image_label)\n Images = np.concatenate(Images)\n Image_label = np.array(pd.get_dummies(Image_label))\n X_Image , Y_Image = shuffle(Images,Image_label,random_state=0)\n if scaling == True:\n X_Image = X_Image/255\n else:\n X_Image = X_Image\n return (X_Image,Y_Image)",
"def _data_augmentation(feature_dict):\n image_features = feature_dict[_transformed_name(constants.IMAGE_KEY)]\n image_features = _image_augmentation(image_features)\n feature_dict[_transformed_name(constants.IMAGE_KEY)] = image_features\n return feature_dict"
]
| [
"0.6911291",
"0.6776172",
"0.6606806",
"0.6512269",
"0.64750767",
"0.64673746",
"0.6444081",
"0.62625486",
"0.6222153",
"0.62075675",
"0.61686075",
"0.6148961",
"0.6134152",
"0.61139226",
"0.6082042",
"0.6072987",
"0.605287",
"0.6020677",
"0.6015514",
"0.6012553",
"0.6000056",
"0.5983223",
"0.59828985",
"0.5971195",
"0.59456605",
"0.59071976",
"0.58877075",
"0.58437115",
"0.5840834",
"0.58279777"
]
| 0.7687681 | 0 |
Returns an ``Iterable`` containing all the instances in the specified dataset. If ``self.lazy`` is False, this calls ``self._read()``, ensures that the result is a list, then returns the resulting list. If ``self.lazy`` is True, this returns an object whose ``__iter__`` method calls ``self._read()`` each iteration. In this case your implementation of ``_read()`` must also be lazy (that is, not load all instances into memory at once), otherwise you will get a ``ConfigurationError``. In either case, the returned ``Iterable`` can be iterated over multiple times. It's unlikely you want to override this function, but if you do your result should likewise be repeatedly iterable. | def read(self, *args, **kwargs) -> Iterable[Instance]:
lazy = getattr(self, 'lazy', None)
if lazy is None:
logger.warning("DatasetReader.lazy is not set, "
"did you forget to call the superclass constructor?")
if lazy:
return _LazyInstances(lambda: iter(self._read(*args, **kwargs)))
else:
instances = self._read(*args, **kwargs)
if not isinstance(instances, list):
instances = [instance for instance in Tqdm.tqdm(instances)]
if not instances:
raise ConfigurationError(
f"No instances were read from the given args ({args}). "
f"and kwargs ({kwargs})Is the path correct?")
return instances | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\n return iter(self._datasets)",
"def __iter__(self) -> Iterator:\n return iter(self.get_data_loader())",
"def __iter__(self) -> Union[Iterator[int], Iterator[Tuple[int, Any]]]:\n self.size = self._data._dataset_size\n if (not self._data._fully_cached or\n self._data._should_call_prefetch_source):\n self._data._start_iteration()\n # First epoch of lazy loading, calling prefetch, and returning\n # indices and examples.\n iterator = self._iterator_unknown_size()\n else:\n # Non-lazy loading, or when dataset has been fully iterated.\n assert self.size is not None\n iterator = self._iterator_given_size(self.size)\n\n if self._data._should_call_prefetch_processed:\n # Processing routine is performed in main process. Yield\n # processed examples instead.\n map_fn = lambda idx: (idx, self._data._processed_cache[idx])\n elif self._data._should_yield_raw_example:\n # Return indices and examples for any epoch in this case.\n map_fn = lambda idx: (idx, self._data._source[idx])\n else:\n map_fn = None # type: ignore\n if map_fn is not None:\n return map(map_fn, iterator)\n\n return iterator",
"def __iter__(self):\n # deterministically shuffle based on epoch\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n if not len(indices) == self.total_size:\n raise ValueError('the length of the indices should be equal to total_size')\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samples in subsample\")\n\n return iter(indices)",
"def __iter__(self):\n items = self._fetch()\n for item in items:\n yield item",
"def __iter__(self):\n return self.data_container.__iter__()",
"def get_iterator(self,\n dataset_name: Optional[str] = None) -> Iterator[Batch]:\n if dataset_name is not None or self._current_dataset_name is None:\n dataset_name = self._validate_dataset_name(dataset_name)\n elif self._current_dataset_name is not None:\n dataset_name = self._current_dataset_name\n else:\n raise ValueError(\"No dataset is selected.\")\n\n return iter(self._datasets[dataset_name])",
"def __iter__(self):\n for item in self._reader:\n yield item",
"def __iter__(self):\n return self._data.__iter__()",
"def fetch_all(self):\n return list(iter(self))",
"def __iter__(self):\n try:\n self._load(False)\n except KeyError:\n return iter([])\n\n return self._iter(self.head - self.count, self.count)",
"def __iter__(self):\n for datum in self.data[self.name]:\n yield datum",
"def __iter__(self):\n self._fetch_all()\n return iter(self._result_cache)",
"def __iter__(self):\n with self.handler as handler:\n if self.shuffle:\n # load all samples into memory\n samples = []\n while True:\n sample = handler.read()\n if sample is None:\n break\n sample = self.transform(sample)\n samples.append(sample)\n random.shuffle(samples)\n for sample in samples:\n yield sample\n else:\n # lazy-loading mode\n while True:\n sample = handler.read()\n if sample is None:\n break\n sample = self.transform(sample)\n yield sample",
"def instances(self):\n for d in os.listdir(self.directory):\n yield self.instance(self.directory, d)",
"def iter(self, **kwargs):\n\n reader = self.legacy_get_reader(**kwargs)\n for image in reader:\n yield image",
"def __iter__(self):\n\t\tfor i, data in enumerate(self.dataloader):\n\t\t\tif i * self.opt.batch_size >= self.opt.max_dataset_size:\n\t\t\t\tbreak\n\t\t\tyield data",
"def get_train_iterator(self) -> Iterable[Batch]:\n if self._train_name not in self._datasets:\n raise ValueError(\"Training data not provided.\")\n return self.get_iterator(self._train_name)",
"def __iter__(self):\n return iter(self._get_storage())",
"def __iter__(self):\n batch = []\n for sample in self.dataset:\n batch.append(sample)\n if len(batch) == self.size:\n yield self.transform(batch)\n batch = []\n if batch:\n # the last batch may be less then batch size.\n yield self.transform(batch)",
"def __iter__(self) -> Generator:\n\t\treturn (article for article in self._articles)",
"def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i * self.opt.batch_size >= self.opt.max_dataset_size:\n break\n yield data",
"def __iter__(self):\n\n # Open the data reader\n self.data.open()\n\n starts = np.arange(self.start, self.stop, self.chunksize)\n for a, b in zip_longest(starts, starts[1:], fillvalue=self.stop):\n yield self.data.read(a, b, **self.kwargs)",
"def numpy_iterator(self, **as_dataset_kwargs):\n def iterate():\n dataset = self.as_dataset(**as_dataset_kwargs)\n dataset = dataset.prefetch(128)\n return dataset_utils.iterate_over_dataset(dataset)\n\n if tf.executing_eagerly():\n return iterate()\n else:\n with tf.Graph().as_default():\n return iterate()",
"def __iter__(self):\n return iter(self._d)",
"async def find_all(self, lazy=None, alias=None):\n to_list_arguments = {}\n if self._limit is not None:\n to_list_arguments[\"length\"] = self._limit\n else:\n to_list_arguments[\"length\"] = DEFAULT_LIMIT\n\n cursor = self._get_find_cursor(alias=alias)\n\n self._filters = {}\n\n docs = await cursor.to_list(**to_list_arguments)\n\n # if _loaded_fields is not empty then documents are partly loaded\n is_partly_loaded = bool(self._loaded_fields)\n\n result = []\n for doc in docs:\n obj = self.__klass__.from_son(\n doc,\n # set projections for references (if any)\n _reference_loaded_fields=self._reference_loaded_fields,\n _is_partly_loaded=is_partly_loaded,\n )\n\n if (lazy is not None and not lazy) or not obj.is_lazy:\n await obj.load_references(obj._fields)\n\n result.append(obj)\n\n return result",
"def __iter__(self):\n return iter(self.data)",
"def __iter__(self):\n return iter(self.data)",
"def __iter__(self):\n return iter(self.data)",
"def get_iterator(dataset):\n if context.executing_eagerly():\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n else:\n iterator = dataset_ops.make_initializable_iterator(dataset)\n initialize_iterator(iterator)\n return iterator"
]
| [
"0.7040597",
"0.6643148",
"0.6294192",
"0.62414855",
"0.61743397",
"0.61443645",
"0.60582954",
"0.6047906",
"0.59985113",
"0.5981316",
"0.5973242",
"0.59701633",
"0.5944178",
"0.5882069",
"0.58764064",
"0.58696884",
"0.5857583",
"0.5841389",
"0.58389926",
"0.5835787",
"0.5801935",
"0.57820415",
"0.57759523",
"0.5755827",
"0.5739001",
"0.5731905",
"0.57232124",
"0.57232124",
"0.57232124",
"0.57121956"
]
| 0.8134299 | 0 |
Test the login page elements like button, username field and password field is loded properly or not. | def test_login_page_elements(self):
response = self.client.get(reverse('users:login'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'users/login.html')
self.assertContains(response, ' <input type="text" name="username"')
self.assertContains(response, '<input type="password" name="password"')
self.assertContains(response, '<button class="btn btn-outline-info" type="submit">') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_login(self):\n url_extend = 'user_auth/login/'\n self.browser.get(self.url + url_extend)\n\n # enter the username and password.\n username_field = self.browser.find_element_by_name('user_name')\n username_field.send_keys('user4')\n password_field = self.browser.find_element_by_name('password')\n password_field.send_keys('user')\n\n # click login button.\n # get the first input button under the first form in login page.\n login_button = self.browser.find_element_by_xpath(\"//form[1]/fieldset[1]/input[@type='submit']\")\n try:\n login_button.click()\n except:\n raise Exception(\"Login Error!\")",
"def test_login_page_has_fields(testapp):\n html = testapp.get('/login').html\n assert len(html.find_all('input'))",
"def test_login(self):\n self.driver.find_element_by_link_text(\"Sign in\").click()\n self.driver.find_element_by_id(\"email\").send_keys(\"[email protected]\")\n self.driver.find_element_by_id(\"passwd\").send_keys(\"control123\")\n self.driver.find_element_by_id(\"SubmitLogin\").click()\n time.sleep(5)",
"def test_login_page(self):\n r = requests.get(self.url)\n self.assertEqual(r.status_code, 200)\n soup = BeautifulSoup(r.content)\n self.assertEqual(soup.findAll('legend')[0].contents[0], 'Sign In')",
"def login(self,username,password):\n try:\n userNameField = self.driver.findElement(element = self.locators.lblUsernameField, elementType = \"id\", timeout = \"4\")\n if not userNameField:\n return False\n\n userNameField.click()\n userNameField.send_keys(username)\n time.sleep(self.waitShort)\n passwordField = self.driver.findElement(element = self.locators.lblPasswordField, elementType = \"id\", timeout = \"4\")\n if not passwordField:\n return False\n\n passwordField.click()\n passwordField.send_keys(password)\n time.sleep(self.waitShort)\n\n signIn = self.driver.findElement(element = self.locators.signIn, elementType = \"id\", timeout = \"4\")\n if not signIn:\n return False\n\n signIn.click()\n time.sleep(self.waitLong)\n #handler for No Thanks pop-up\n noThanks = self.driver.findElement(element = self.locators.btnNoThanks, elementType = \"id\", timeout = \"4\")\n if noThanks:\n noThanks.click()\n return True\n except Exception as exp:\n print \"Error in login(): {}\".format(exp)\n return False\n return True",
"def click_login_button(self):",
"def if_the_login_page_appears_enter_root_and_testing(driver, user, password):\n if not is_element_present(driver, '//mat-list-item[@ix-auto=\"option__Dashboard\"]'):\n assert wait_on_element(driver, 10, '//input[@data-placeholder=\"Username\"]')\n driver.find_element_by_xpath('//input[@data-placeholder=\"Username\"]').clear()\n driver.find_element_by_xpath('//input[@data-placeholder=\"Username\"]').send_keys(user)\n driver.find_element_by_xpath('//input[@data-placeholder=\"Password\"]').clear()\n driver.find_element_by_xpath('//input[@data-placeholder=\"Password\"]').send_keys(password)\n assert wait_on_element(driver, 5, '//button[@name=\"signin_button\"]', 'clickable')\n driver.find_element_by_xpath('//button[@name=\"signin_button\"]').click()\n else:\n driver.find_element_by_xpath('//mat-list-item[@ix-auto=\"option__Dashboard\"]').click()",
"def validate_login():\n # Locating the button on the top navigation bar\n button_login = My.search_clickable_webelement(\n driver, By.XPATH, \"//*[@id='ypgBody']/div[1]/header/div/div/div/div/div[3]/ul/li[5]\")\n assert button_login\n button_login.click()\n\n # Validating that the pop up window is present\n window = My.search_presence_webelement(driver, By.XPATH, \"//*[@id='ypModal']/div/div\")\n assert window",
"def test_login(self):\n # Open the admin index page\n self.open(reverse('admin:index'))\n\n # Selenium knows it has to wait for page loads (except for AJAX requests)\n # so we don't need to do anything about that, and can just\n # call find_css. Since we can chain methods, we can\n # call the built-in send_keys method right away to change the\n # value of the field\n self.wd.find_css('#id_username').send_keys(\"admin\")\n # for the password, we can now just call find_css since we know the page\n # has been rendered\n self.wd.find_css(\"#id_password\").send_keys('pw')\n # You're not limited to CSS selectors only, check\n # http://seleniumhq.org/docs/03_webdriver.html for\n # a more compreehensive documentation.\n self.wd.find_element_by_xpath('//input[@value=\"Log in\"]').click()\n # Again, after submiting the form, we'll use the find_css helper\n # method and pass as a CSS selector, an id that will only exist\n # on the index page and not the login page\n self.wd.find_css(\"#content-main\")",
"def test_login_page_has_form(testapp):\n html = testapp.get('/login').html\n assert len(html.find_all('input'))",
"def test_login_field(self):\n field = self.record.find('field[@name=\\'login\\']')\n self.assertEqual(field.text, 'adt', 'Incorrect login Field')",
"def test_index_successful_login():\n driver = webdriver.Chrome()\n driver.get('https://cmput404group10.herokuapp.com')\n\n username = driver.find_element_by_id(\"username\")\n username.send_keys(\"ronWeasley\")\n\n password = driver.find_element_by_name(\"password\")\n password.send_keys(\"ualberta123\")\n\n driver.find_element_by_xpath('/html/body/div/div/div/div/div/div/div[2]/div/form/div[3]/button').click()\n\n header = driver.find_element_by_xpath('//*[@id=\"accordionSidebar\"]/a/div')\n assert header is not None",
"def test_username_not_exist(self):\n\n url_extend = 'user_auth/login/'\n # get the first input button under the first form in login page.\n username = 'usersomerandomeuser'\n password = 'user'\n login_button = login(self.browser, self.url + url_extend, username, password)\n try:\n login_button.click()\n except:\n raise Exception(\"Login Error!\")\n\n ## check the current url\n assert self.browser.current_url == self.url + url_extend",
"def test_login(self):\n self.facebook_page.login()\n self.assertIsNotNone(self.facebook_page.webdriver.find_element_by_name('requests'))",
"def login(self, username, password):\n\n # username\n if self.phone:\n user_elem = self._find_element(class_name='UIATextField')\n else:\n for e in self.driver.find_elements_by_xpath(\"//UIATextField[@value='Email']\"):\n if e.is_displayed():\n user_elem = e\n break\n\n user_elem.click()\n self.send_keys(element=user_elem, data=username)\n\n # password\n if self.phone:\n pwd_elem = self._find_element(class_name='UIASecureTextField')\n else:\n for e in self.driver.find_elements_by_xpath(\"//UIASecureTextField[@value='Password']\"):\n if e.is_displayed():\n pwd_elem = e\n break\n\n pwd_elem.click()\n self.send_keys(element=pwd_elem, data=password)\n\n # sign in button\n if self.phone:\n sign_in_button = self._find_element(xpath=\"//UIAButton[@name='SIGN IN']\")\n else:\n for e in self.driver.find_elements_by_xpath(\"//UIAButton[@name='SIGN IN']\"):\n if e.is_displayed():\n sign_in_button = e\n break\n\n sleep(3)\n sign_in_button.click()\n\n # continue_button = self.exists(accessibility_id='CONTINUE', timeout=300)\n # agreement = \"By registering you become a member of the CBS Interactive family of sites and you have read and agree to the Terms of Use, Privacy Policy, and Video Services Policy. You agree to receive updates, alerts and promotions from CBS and that CBS may share information about you with our marketing partners so that they may contact you by email or otherwise about their products or services.\"\n # agreement_elem = self._find_element(accessibility_id=agreement)\n # loc = agreement_elem.location\n #\n # for button in self.driver.find_elements_by_xpath(\"//UIAButton[@name='']\"):\n # button_loc = button.location\n # loc_y_above = loc['y'] - 15\n # loc_y_below = loc['y'] + 15\n # if (button_loc['x'] < loc['x'] and\n # loc_y_above < button_loc['y'] < loc_y_below):\n # self.click_by_location(button)\n # break\n\n # continue_button = self.exists(accessibility_id='CONTINUE', timeout=300)\n # loc = continue_button.location\n #\n # for button in self.driver.find_elements_by_xpath(\"//UIAButton[@name='']\"):\n # button_loc = button.location\n # if (button_loc['x'] < loc['x'] and\n # button_loc['y'] < loc['y']):\n # self.click_by_location(button)\n # sleep(1)\n # break\n\n continue_button = self.exists(accessibility_id='CONTINUE', timeout=180)\n\n if continue_button:\n for button in self.driver.find_elements_by_xpath(\"//UIAButton[@name='']\"):\n try:\n button.click()\n except Exception:\n pass\n\n self.safe_screenshot()\n continue_button.click()\n\n # wait for the login to happen\n self.not_exists(accessibility_id='CONTINUE', timeout=300)\n\n self.goto_settings()\n self.assertTrueWithScreenShot(self.exists(accessibility_id='Sign Out', timeout=0),\n screenshot=True,\n msg=\"Verify 'Sign Out' button on Settings page.\")",
"def test_valid_login_form_but_failed_authentication(self):\n\n\n\t\tpass",
"def test_login_page_loads(self):\n response = self.client.get('/users/login')\n self.assertIn(b'Please login', response.data)",
"def if_login_page_appear_enter_user_and_password(driver, user, password):\n if not is_element_present(driver, xpaths.side_Menu.dashboard):\n assert wait_on_element(driver, 7, xpaths.login.user_Input)\n driver.find_element_by_xpath(xpaths.login.user_Input).clear()\n driver.find_element_by_xpath(xpaths.login.user_Input).send_keys(user)\n driver.find_element_by_xpath(xpaths.login.password_Input).clear()\n driver.find_element_by_xpath(xpaths.login.password_Input).send_keys(password)\n assert wait_on_element(driver, 7, xpaths.login.signin_Button, 'clickable')\n driver.find_element_by_xpath(xpaths.login.signin_Button).click()\n else:\n assert wait_on_element(driver, 10, xpaths.side_Menu.dashboard, 'clickable')\n driver.find_element_by_xpath(xpaths.side_Menu.dashboard).click()",
"def test_unsuccessful_login():\n driver = webdriver.Chrome()\n driver.get('https://cmput404group10.herokuapp.com')\n\n username = driver.find_element_by_id(\"username\")\n username.send_keys(\"yar\")\n\n password = driver.find_element_by_name(\"password\")\n password.send_keys(\"jessica123\")\n\n driver.find_element_by_xpath('/html/body/div/div/div/div/div/div/div[2]/div/form/div[3]/button').click()\n\n exists = driver.find_element_by_xpath('/html/body/div/div/div/div/div/div/div[2]/div/form/div[4]/ul/li')\n assert \"Sorry, the username and password could not be found.\" == exists.text",
"def test_login():\n My.search_merchant_page(driver, My.Testing_Env_EN)\n validate_login()\n print('----------')\n My.search_merchant_page(driver, My.Testing_Env_FR)\n validate_login()\n driver.quit()",
"def test_Login(self):\n\n driver = self.driver\n facebook_username = eval(input(\"\\nEnter Your FaceBook Username:\"))\n facebook_password = eval(input(\"\\nEnter Your FaceBook Password:\"))\n emailFieldId = \"email\"\n pwdFieldId = \"pass\"\n loginButtonXpath = \"//input[@value='Log In']\"\n fbLogoPath = \"(//a[contains(@href,'logo')])[1]\"\n\n emailFieldElement = WebDriverWait(driver, 10).until(\n lambda driver: driver.find_element_by_id(emailFieldId)\n )\n passwordFieldElement = WebDriverWait(driver, 10).until(\n lambda driver: driver.find_element_by_id(pwdFieldId)\n )\n loginButtonElement = WebDriverWait(driver, 10).until(\n lambda driver: driver.find_element_by_xpath(loginButtonXpath)\n )\n\n emailFieldElement.clear()\n emailFieldElement.send_keys(facebook_username)\n passwordFieldElement.clear()\n passwordFieldElement.send_keys(facebook_password)\n loginButtonElement.click()\n\n WebDriverWait(driver, 10).until(\n lambda driver: driver.find_element_by_xpath(fbLogoPath)\n )",
"def test_4_create_new_sign_up(self):\n self.log.debug(\"Click on Sign up link\")\n self.page.click_on_sign_up_link()\n\n self.log.debug(\"Enter value in user name field\")\n username_txt_css = setting.get('Locators', 'username_txt_css')\n print username_txt_css\n username_value = setting.get('Input_Value', 'username_value')\n username_placeholder = setting.get('Input_Value', 'username_placeholder')\n self.page.enter_value_in_txt_field(username_value,username_placeholder, username_txt_css)\n\n self.log.debug(\"Enter value in Email field\")\n email_txt_css = setting.get('Locators', 'email_txt_css')\n email_value = setting.get('Input_Value', 'email_value')\n email_placeholder = setting.get('Input_Value', 'email_placeholder')\n self.page.enter_value_in_txt_field(email_value,email_placeholder,email_txt_css)\n\n self.log.debug(\"Enter value in Password field\")\n password_txt_css = setting.get('Locators', 'password_txt_css')\n password_value = setting.get('Input_Value', 'password_value')\n password_placeholder = setting.get('Input_Value', 'password_placeholder')\n self.page.enter_value_in_txt_field(password_value,password_placeholder,password_txt_css)\n\n self.log.debug(\"Enter value in repeat password field\")\n repeat_password_txt_css = setting.get('Locators', 'repeat_password_txt_css')\n repeat_password_value = setting.get('Input_Value', 'repeat_password_value')\n repeat_password_placeholder = setting.get('Input_Value', 'repeat_password_placeholder')\n self.page.enter_value_in_txt_field(repeat_password_value,repeat_password_placeholder,repeat_password_txt_css)\n\n self.log.debug(\"Click on privacy link and validate privacy page\")\n privacy_link = setting.get('Locators', 'privacy_link')\n privacy_landing_page_css = setting.get('Locators', 'privacy_landing_page_css')\n self.page.click_on_link(privacy_link, privacy_landing_page_css)\n\n self.log.debug(\"Select age checkbox\")\n age_confirm_check_box = setting.get('Locators', 'age_confirm_check_box')\n self.page.select_checkbox(age_confirm_check_box)\n\n self.log.debug(\"Select agreement checkbox\")\n agreement_checkbox = setting.get('Locators', 'agreement_checkbox')\n self.page.select_checkbox(agreement_checkbox)\n\n self.log.debug(\"Select mail system checkbox\")\n mailing_system_checkbox = setting.get('Locators', 'mailing_system_checkbox')\n self.page.select_checkbox(mailing_system_checkbox)\n\n self.log.debug(\"Select mail product checkbox\")\n mailing_product_checkbox = setting.get('Locators', 'mailing_product_checkbox')\n self.page.select_checkbox(mailing_product_checkbox)\n\n self.log.debug(\"Select mail news checkbox\")\n mailing_news_checkbox = setting.get('Locators', 'mailing_news_checkbox')\n self.page.select_checkbox(mailing_news_checkbox)\n\n self.log.debug(\"Click on Submit account button\")\n self.page.click_create_account_button()",
"def test_SQL_injection(self):\n main_page = loginPage.MainPage(self.driver)\n self.assertEqual(main_page.is_title_matches(), \"Example Login Page\")\n main_page.fill_in_credentials(\"'or''='\",'mode')\n main_page.click_login_button()\n self.assertEqual(main_page.is_title_matches(), \"Login error\")",
"def login(self):\n driver = self.selenium_test.driver\n driver.get(self.selenium_test.get_server_url())\n self.selenium_test.wait_fn(self.preenche_username)\n driver.find_element_by_id('btnlogin').click()\n self.selenium_test.wait_to_be_logged_in()",
"def login(self):\n try:\n self.driver = webdriver.Ie()\n self.driver.maximize_window()\n self.driver.get(self.url_login)\n self.driver.find_element_by_id('txtName').send_keys(self.username)\n self.driver.find_element_by_id('txtPassword').send_keys(self.password)\n self.driver.execute_script('frmLogin.action = \"login.aspx?action=login\";frmLogin.submit();')\n # Add/Edit User\n self.wait = WebDriverWait(self.driver, 10)\n self.wait.until(EC.element_to_be_clickable((By.ID, 'a151'))) # Add edit user button\n time.sleep(1)\n return\n except Exception:\n raise",
"def wait_for_the_login_page_to_appear(driver):\n # to make sure the UI is refresh for the login page\n assert wait_on_element(driver, 240, '//input[@data-placeholder=\"Username\"]')\n assert wait_on_element(driver, 240, '//p[text()=\"HA is enabled.\"]')",
"def test_login_url(self):\r\n res = self.testapp.get('/login', status=200)\r\n\r\n body_str = u\"Log In\"\r\n form_str = u'name=\"login\"'\r\n\r\n self.assertTrue(\r\n body_str in res.body,\r\n msg=\"Request should contain Log In: \" + res.body)\r\n\r\n # There should be a login form on there.\r\n self.assertTrue(\r\n form_str in res.body,\r\n msg=\"The login input should be visible in the body:\" + res.body)",
"def test_02_account_login(self):\n self.login(email='[email protected]', password='Abcd@1234')\n self.assertEquals(\n self.selenium.current_url, self.get_absolute_url())\n print 'valid login test completed'",
"def test_professor_can_login_to_web_portal(professor):",
"def test_login_to_youtube(self):\n\n # Finding elements on the page and actions.\n self.wait_clickable_by_css(\n \"#buttons a > .style-scope.ytd-button-renderer\"\n \".style-suggestive.size-small[role='button']\"\n ).click()\n self.wait_clickable_by_id(\"identifierId\").send_keys(config.USER1[\"email\"])\n self.wait_clickable_by_id(\"identifierNext\").click()\n self.wait_invisibility_by_id(\"identifierId\")\n self.wait_clickable_by_css(\".whsOnd.zHQkBf\").send_keys(config.USER1[\"password\"])\n self.wait_clickable_by_id(\"passwordNext\").click()\n\n try:\n self.wait_clickable_by_css(\".ZFr60d.CeoRYc\").click()\n\n except:\n\n pass\n\n # Waiting for button to appear.\n self.wait_visibility_by_css(\"#avatar-btn\")\n self.make_screenshot()\n print(\"Test 1: User is successfully logged in.\")"
]
| [
"0.81608397",
"0.7807469",
"0.7750099",
"0.7648665",
"0.7499904",
"0.74693316",
"0.7445929",
"0.7401844",
"0.7391671",
"0.73524094",
"0.73394597",
"0.7300667",
"0.72945255",
"0.7244298",
"0.7213896",
"0.7185873",
"0.7171691",
"0.7159399",
"0.7155342",
"0.7133687",
"0.7129667",
"0.71255505",
"0.70879966",
"0.70666265",
"0.70420456",
"0.7012021",
"0.70094055",
"0.6957092",
"0.69554025",
"0.69525677"
]
| 0.8222996 | 0 |
Check for valid username and invalid password should not be able to login | def test_valid_username_invalid_password(self):
response = self.client.post(reverse('users:login'), {'username': self.user['username'], 'password': '1sfsdf'})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', None, ERROR_MSG) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_authentication(self, username, password):\n return self.user_table[username]['pwd'] == password",
"def test_auth_user_fail_bad_username(self):\n\n self.assertFalse(User.authenticate(\"invalid\", \"allison\"))",
"def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw",
"def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password",
"def is_valid_login(self, username, password):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_IS_LOGIN_INFORMATION_VALID, username + '|' + password)",
"def test_invalid_username_valid_password(self):\n response = self.client.post(reverse('users:login'), {'username': 'xyzabe', 'password': self.user['password1']})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)",
"def test_login_wrong_credentials(self):\n\t\tdata = {'username' : 'nonexistentuser', 'password' : 'nopasswordlol'}\n\t\tresponse = self.login(data)\n\n\t\terror_text = \"Unable to log in with provided credentials.\"\n\n\t\tself.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\t\ttry:\n\t\t\tif error_text not in response.data[\"non_field_errors\"]:\n\t\t\t\tself.fail(\"Error text must be : '\" + error_text + \"'\")\n\t\texcept AttributeError:\n\t\t\tself.fail(\"There must be at least one entry in 'non_field_errors'\")",
"def testLoginBadUsernameAndPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userJ\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"nobody_user\", \"nobody_password\"))",
"def test_incorrect_username(self):\n input = (\"\", \"password\")\n if is_travis():\n self.login_test(*input, True)\n else:\n self.login_test(*input)",
"def test_invalid_username_invalid_password(self):\n response = self.client.post(\n reverse('users:login'), {\n 'username': self.create_user_data()['username'],\n 'password': self.create_user_data()['password1']\n }\n )\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)",
"def validate_authentication(self, username, password, handler):\n hash = md5(password).hexdigest()\n msg = \"Authentication failed.\"\n if not self.has_user(username):\n if username == 'anonymous':\n msg = \"Anonymous access not allowed.\"\n raise AuthenticationFailed(msg)\n if username != 'anonymous':\n if self.user_table[username]['pwd'] != hash:\n raise AuthenticationFailed(msg)",
"def test_user_authenticate_invalid_username(self):\n\n user = User.authenticate(\"wrong_username\", \"password\")\n\n self.assertEqual(user, False)",
"def test_authenticate_invalid_username(self):\r\n print(\"Authenticate user invalid username\")\r\n username = \"test9999user\"\r\n password = \"password\"\r\n\r\n self.assertEqual(self.system.authenticate(username, password), False)",
"def test_auth_user_fail_bad_password(self):\n\n self.assertFalse(User.authenticate(self.user1.username, \"invalid\"))",
"def is_logged_in_user_valid(user_name, password):\n if user_name.upper() == \"HELLO\" and password == \"World\":\n return True # User input matches user name and password.\n else:\n return False # User input does not match user name and password.s",
"def is_correct_user(self, login, password):\n pass",
"def test_wrong_login_input(self):\n self.user.list_of_accounts = [{'username': 'dalton',\n 'pwd': 'chromelegend',\n 'email': '[email protected]'}]\n msg = self.user.login(\"[email protected]\", \"legendchrome\")\n self.assertEqual(msg, \"Invalid email, password combination\")",
"def check_auth(username, password):\n return username == USERNAME and password == PASSWORD",
"def check_auth_password(self, username, password):\n return AUTH_FAILED",
"def check_auth(username, password):\n return username == 'admin' and password == 'password'",
"def test_valid_login_form_but_failed_authentication(self):\n\n\n\t\tpass",
"def check_auth(username, password):\n return username == 'nicholas' and password == ADMIN_PASS",
"def check_auth(username, password):\n return username == 'admin' and password == 'worcester'",
"def clean(self):\n cleaned_data = super().clean()\n username = cleaned_data['username']\n password = cleaned_data['password']\n\n if authenticate(username=username, password=password) is None:\n raise ValidationError('Your username or password is incorrect.')",
"def check_auth(username, password):\n return username == 'admin' and password == 'pebble'",
"def check_auth(username, password):\n return username == 'aweber' and password == 'aweber1100'",
"def test_credentials(self):\r\n data = self._deep_clean('[email protected]')\r\n error = data.get(ERROR_CODE, None)\r\n if error in (1,2):\r\n raise InvalidCredentialsError(\"Credentials are invalid for user '{}'\".format(self._username))\r\n return True",
"def check_auth(username, password):\n return username == 'admin' and password == 'admin'",
"def check_auth(username, password):\n return username == 'jeffkoons' and password == 'likesweirdbaloons'",
"def invalid_credentials( form , field ): \n\tusername_entered = form.username.data\n\tpassword_entered = field.data \n\tuser_object = User.query.filter_by(username = username_entered).first()\n\tif user_object is None : \n\t\traise ValidationError(\"Username or Password is incorrect !\")\n\telif not pbkdf2_sha256.verify(password_entered , user_object.password) : \n\t\traise ValidationError(\"Username or Password is incorrect !\")"
]
| [
"0.7802883",
"0.778277",
"0.7756373",
"0.7752627",
"0.7586923",
"0.75375336",
"0.7489758",
"0.7483796",
"0.74812096",
"0.7474361",
"0.74624926",
"0.7450919",
"0.74466735",
"0.7442546",
"0.74277145",
"0.7423208",
"0.74196374",
"0.74130267",
"0.7404624",
"0.7398643",
"0.73966765",
"0.739318",
"0.73885375",
"0.7382583",
"0.73676443",
"0.7363194",
"0.73543376",
"0.7346598",
"0.7344426",
"0.73354495"
]
| 0.7786559 | 1 |
Check for blank username and blank password should not be able to login | def test_blank_username_blank_password(self):
response = self.client.post(reverse('users:login'), {'username': '', 'password': ''})
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'username', 'This field is required.')
self.assertFormError(response, 'form', 'password', 'This field is required.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_login_empty_username(self):\n self.client.post('api/v2/auth/signup', json=self.user,\n headers={'Content-Type': 'application/json'})\n\n res_other = self.client.post('/api/v2/auth/login', json={\n 'username': None, 'password': 'mikemike'}, headers={'Content-Type': 'application/json'})\n data_other = res_other.get_json()\n\n self.assertEqual(res_other.status_code, 400)\n self.assertEqual(data_other['message'], 'Invalid data. Please fill all required fields')",
"def test_incorrect_username(self):\n input = (\"\", \"password\")\n if is_travis():\n self.login_test(*input, True)\n else:\n self.login_test(*input)",
"def validate_login(self, username, password):\n user = User(self).get(username)\n return user and user['Password'] == password",
"def clean(self):\n cleaned_data = super().clean()\n username = cleaned_data['username']\n password = cleaned_data['password']\n\n if authenticate(username=username, password=password) is None:\n raise ValidationError('Your username or password is incorrect.')",
"def test_login_empty_password(self):\n self.client.post('api/v2/auth/signup', json=self.user,\n headers={'Content-Type': 'application/json'})\n\n res_other = self.client.post('/api/v2/auth/login', json={\n 'username': \"SirMike\", 'password': None}, headers={'Content-Type': 'application/json'})\n data_other = res_other.get_json()\n\n self.assertEqual(res_other.status_code, 400)\n self.assertEqual(data_other['message'], 'Invalid data. Please fill all required fields')",
"def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw",
"def test_valid_username_invalid_password(self):\n response = self.client.post(reverse('users:login'), {'username': self.user['username'], 'password': '1sfsdf'})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)",
"def test_login_missing_username(self):\n resp = self.client.post(\n reverse('login'),\n json.dumps({\n \"password\": \"pass\",\n }),\n content_type=\"application/json\"\n )\n assert resp.status_code == 400, resp.content.decode('utf-8')\n assert not self.is_authenticated(self.user)",
"def check_auth_none(self, username):\n return AUTH_FAILED",
"def can_log_in_without_cas(self):\n return self.password is not None and self.password != \"\"",
"def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']",
"def validate_authentication(self, username, password):\n return self.user_table[username]['pwd'] == password",
"def test_authenticate_invalid_username(self):\r\n print(\"Authenticate user invalid username\")\r\n username = \"test9999user\"\r\n password = \"password\"\r\n\r\n self.assertEqual(self.system.authenticate(username, password), False)",
"def check_auth(username, password):\n return username == app.config['USERNAME'] and (\n password == app.config['PASSWORD'])",
"def clean(self):\n\n clean = super(LoginForm, self).clean()\n username = clean.get(\"username\")\n password = clean.get(\"password\")\n\n if (username and password):\n if User.objects.filter(username=username).exists():\n user = authenticate(username=username, password=password)\n if not(user and user.is_active):\n self._errors['username'] = self.error_class([\n \"Username and Password you enterd do not match\"])\n raise forms.ValidationError(\"\")\n\n else:\n self._errors['username'] = self.error_class([\n \"That username does not exist in our database\"])\n raise forms.ValidationError(\"\")\n\n return clean",
"def test_login_missing_password(self):\n response = self.client.post('/api/v2/auth/login',\n data=json.dumps(users[7]),\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n assert 'Missing required parameter ', str(response.data)",
"def check_auth(username, password):\n return username == 'admin' and password == 'password'",
"def test_authenticate_user_with_no_username(self):\n data = {\n 'username': '',\n 'password': 'testpassword'\n }\n response = self.client.post(self.authenticate_url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)",
"def is_valid_login(self, username, password):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_IS_LOGIN_INFORMATION_VALID, username + '|' + password)",
"def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']",
"def check_auth(username, password):\n return username == 'admin' and password == 'worcester'",
"def check_auth(username, password):\n return (username == app.config['USERNAME'] and\n password == app.config['PASSWORD'])",
"def test_auth_user_fail_bad_username(self):\n\n self.assertFalse(User.authenticate(\"invalid\", \"allison\"))",
"def check_auth(username, password):\n return username == USERNAME and password == PASSWORD",
"def _check_user_pass(self):\n if not self.username:\n self.username = input(' 请输入手机号:')\n if self.username.isdigit() and '+86' not in self.username:\n self.username = '+86' + self.username\n\n if not self.password:\n self.password = input(' 请输入密码:')",
"def is_correct_user(self, login, password):\n pass",
"def check_auth(username, password):\n return username == 'admin' and password == 'root'",
"def check_auth(username, password):\n return username == 'admin' and password == 'Passw0rd'",
"def check_auth(username, password):\n return username == 'admin' and password == 'admin'",
"def test_login_with_bad_username(self):\n resp = self.client.post(\n reverse('login'),\n json.dumps({\n \"username\": \"\",\n \"password\": self.PASSWORD,\n }),\n content_type=\"application/json\"\n )\n assert resp.status_code == 403, resp.content.decode('utf-8')\n assert not self.is_authenticated(self.user)"
]
| [
"0.7467626",
"0.7417053",
"0.73873866",
"0.73845154",
"0.7267287",
"0.7184474",
"0.7178433",
"0.7175828",
"0.7163327",
"0.71527857",
"0.71351755",
"0.71197534",
"0.70886356",
"0.7087536",
"0.70774215",
"0.7072684",
"0.7067899",
"0.70463395",
"0.7042235",
"0.7033119",
"0.70229065",
"0.7022398",
"0.70187277",
"0.7008216",
"0.69996953",
"0.69912094",
"0.6991156",
"0.6989328",
"0.69890547",
"0.6983199"
]
| 0.76699674 | 0 |
Retrieves all info needed for worker and puts it into a dictionary | def get_task_worker(self):
start, end = self.get_block()
return {
'task_id':self.id,
'finished':self.finished,
'free_block':(start != end),
'keyword':self.keyword,
'chars':self.chars,
'algorithm':self.algorithm,
'start_point':start,
'end_point':end
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def read_worker_metadata(self) -> Dict[str, Any]:\n response = await self._client.get(\"collections/views/aggregate-worker-metadata\")\n response.raise_for_status()\n return response.json()",
"def get_information(self):\n info_dict = dict()\n info_dict['run'] = self._runNumber\n info_dict['reduced'] = self._isReduced\n if self._slicerKey is None:\n # regular reduced data\n info_dict['slicer_key'] = None\n else:\n # chopped run\n info_dict['slicer_key'] = self._slicerKey\n info_dict['workspaces'] = self._choppedWorkspaceNameList[:]\n info_dict['raw_files'] = self._choppedNeXusFileList[:]\n if self._reducedFiles is not None:\n info_dict['files'] = self._reducedFiles[:]\n else:\n info_dict['files'] = None\n\n return info_dict",
"def getWorker(self):\n pass",
"def info(self) -> dict:",
"def fixture_info(self):\n stats_list = []\n print(\"Getting fixture info..\")\n with Pool(self.pool) as p:\n fixture_info = list(tqdm(p.imap(self.fixture_info_singel, self.fixture_ids, chunksize=1), total=len(self.fixture_ids)))\n print('Getting data from workers..')\n i = 0\n for info in fixture_info:\n stats = {}\n if info:\n stats = info\n else:\n i += 1\n if stats:\n stats_list.append(stats)\n\n print('Completed')\n if i >0:\n print(f'{i} games retreived had no stats')\n self.save_completed('fixtureinfo', stats_list, StorageConfig.STATS_DIR)",
"def get_info(self):\n return {}",
"def get_stats():\r\n stats = {\r\n \"progress_precent\": 100.0*finished_work_units_amount/work_units_amount,\r\n \"results\": None if work_status == Db.WorkStatusNames.finished_work.value else Db.collect_results(),\r\n #If it's already finished, then all the results were already sent to the main server.\r\n }\r\n return stats",
"def get_worker_list(self):\n return [{WORKER_ID_KEY: worker_id, REGISTRATION_STATUS_KEY: value}\n for worker_id, value in self.registered_workers.items()]",
"async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()",
"def info() -> Dict[str, Any]:",
"def get_job_dict(self, selector):\n pass",
"def get_info(self) -> Optional[Dict[str, Any]]:",
"def get(self):\n\t\treturn {\n\t\t\t'system': self.get_system_information(),\n\t\t\t'cpu': self.get_cpu_stats(),\n\t\t\t'gpu': self.get_gpu_stats(),\n\t\t\t'ram': self.get_ram_stats(),\n\t\t\t'storage': self.get_storage_stats(),\n\t\t\t'battery': self.get_battery_stats(),\n\t\t\t'temps': self.get_temperatures()\n\t\t}",
"def get_worker(self, worker_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"workers\", \"worker_id\", worker_id)",
"def build_info(self) -> Dict[str, str]:\n manager_bl = self.safe_buildinfo(\"/api/buildinfo\")\n gateway_bl = self.safe_buildinfo(\"/gateway/buildinfo\")\n monitoring_bl = self.safe_buildinfo(\"/monitoring/buildinfo\")\n rootcause_bl = self.safe_buildinfo(\"/rootcause/buildinfo\")\n visualization_bl = self.safe_buildinfo(\"/visualization/buildinfo\")\n stat_bl = self.safe_buildinfo(\"/stat/buildinfo\")\n return {\n \"manager\": manager_bl,\n \"gateway\": gateway_bl,\n \"monitoring\": monitoring_bl,\n \"rootcase\": rootcause_bl,\n \"visualization\": visualization_bl,\n \"stat\": stat_bl,\n }",
"def getInfo():",
"def getwork(self, data: Optional[str] = None) -> Dict[str, Any]:\n assert data is None or type(data) == str\n return self.rpc_call(\"getwork\", data)",
"def get_job_details():\n job = dict()\n job['dids'] = json.loads(os.getenv('DIDS', None))\n job['metadata'] = dict()\n job['files'] = dict()\n job['algo'] = dict()\n job['secret'] = os.getenv('secret', None)\n algo_did = os.getenv('TRANSFORMATION_DID', None)\n if job['dids'] is not None:\n for did in job['dids']:\n # get the ddo from disk\n filename = '/data/ddos/' + did\n print(f'Reading json from {filename}')\n with open(filename) as json_file:\n ddo = json.load(json_file)\n # search for metadata service\n for service in ddo['service']:\n if service['type'] == 'metadata':\n job['files'][did] = list()\n index = 0\n for file in service['attributes']['main']['files']:\n job['files'][did].append(\n '/data/inputs/' + did + '/' + str(index))\n index = index + 1\n if algo_did is not None:\n job['algo']['did'] = algo_did\n job['algo']['ddo_path'] = '/data/ddos/' + algo_did\n return job",
"def get_job_details():\n job = dict()\n job['dids'] = json.loads(os.getenv('DIDS', None))\n job['metadata'] = dict()\n job['files'] = dict()\n job['algo'] = dict()\n job['secret'] = os.getenv('secret', None)\n algo_did = os.getenv('TRANSFORMATION_DID', None)\n if job['dids'] is not None:\n for did in job['dids']:\n # get the ddo from disk\n filename = '/data/ddos/' + did\n print(f'Reading json from {filename}')\n with open(filename) as json_file:\n ddo = json.load(json_file)\n # search for metadata service\n for service in ddo['service']:\n if service['type'] == 'metadata':\n job['files'][did] = list()\n index = 0\n for file in service['attributes']['main']['files']:\n job['files'][did].append(\n '/data/inputs/' + did + '/' + str(index))\n index = index + 1\n if algo_did is not None:\n job['algo']['did'] = algo_did\n job['algo']['ddo_path'] = '/data/ddos/' + algo_did\n return job",
"def get_cluster_info(self) -> Dict[str, Any]:\n pass",
"def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s",
"def fetchObjects(self):\n try:\n for i in service.Service.get_workers():\n yield i\n except Exception as e:\n Events.Status.emit(f\"unable to fetch worker information: {e}\")",
"def get_info(self): \n return {\n \"ident\": self.ident,\n \"interval\": self._interval,\n \"exception\": self._exception,\n \"execute\": self._execute,\n \"args\": self._args,\n \"kwargs\": self._kwargs}",
"def t_info_process(self, *args, **kwargs):\n\n self.dp.qprint(\"In info process...\")\n\n d_request = {}\n d_ret = {}\n b_status = False\n hits = 0\n for k, v in kwargs.items():\n if k == 'request': d_request = v\n\n d_search = self.t_search_process(request = d_request)['d_ret']\n\n p = self._ptree\n for j in d_search.keys():\n d_j = d_search[j]\n for job in d_j.keys():\n str_pathStart = '/api/v1/' + job + '/startInfo'\n str_pathEnd = '/api/v1/' + job + '/endInfo'\n d_ret[str(hits)+'.0'] = {}\n d_ret[str(hits)+'.0'] = self.DB_get(path = str_pathStart)\n d_ret[str(hits)+'.1'] = {}\n d_ret[str(hits)+'.1'] = self.DB_get(path = str_pathEnd)\n hits += 1\n if not hits:\n d_ret = {\n \"-1\": {\n \"noJobFound\": {\n \"endInfo\": {\"allJobsDone\": None}\n }\n }\n }\n else:\n b_status = True\n return {\"d_ret\": d_ret,\n \"status\": b_status}",
"def getJobDict(self):\n c = \"/cli:python /app:matrix /cmd:getinfo /dev:joblist\"\n self.sendCMDstring(c)\n time.sleep(self.delay)\n answers = self.readandparseCAM()\n joblist = {}\n for a in answers:\n if a['dev']=='joblist':\n for i in range(int(a['count'])):\n nr = a['jobid' +str(i+1)]\n name = a['jobname' +str(i+1)].lower()\n joblist[name]=nr\n else:\n print \"no joblist in answers\"\n return joblist",
"def get_task_info(self):\n\n print()\n employee_name = self.task.get_employee_name()\n task_name = self.task.get_task_name()\n mins = self.task.get_time_spent()\n notes = self.task.get_notes()\n date = self.task.get_date()\n\n task = {\n 'employee_name': employee_name,\n 'task_name': task_name,\n 'mins': mins,\n 'notes': notes,\n 'date': date\n }\n\n return task",
"def run(self) -> Dict[str, Union[float, str]]:\n try:\n self.is_run = True\n deque(self, maxlen=0) # feed the entire iterator into a zero-length deque\n info = gather_info(\n self.start_time, self.train_collector, self.test_collector,\n self.best_reward, self.best_reward_std\n )\n finally:\n self.is_run = False\n\n return info",
"def info(self):\n return {}",
"def get_worker_nodes(self):\n worker_nodes_count = input('enter number of worker nodes\\n'\n 'default [2]: ')\n default = 2\n worker_nodes_count = set_values(worker_nodes_count, default, check='integer')\n worker_keys = ['name','ip','mac']\n self.inventory_dict['csah']['vars']['worker_nodes'] = []\n for num in range(worker_nodes_count):\n worker_values = []\n default = 'worker-{}'.format(num)\n worker_name = input('enter the worker {} node name\\n'\n 'default [{}]: '.format(num, default))\n worker_name = set_values(worker_name, default)\n worker_ip = get_ip(node_name=worker_name, ip_type='os')\n worker_mac = get_network_device_mac(node_name=worker_name, ip_type='idrac')\n worker_values.append(worker_name)\n worker_values.append(worker_ip)\n worker_values.append(worker_mac)\n worker_node_dict_pairs = dict(zip(worker_keys, worker_values))\n logging.info('adding {} values as name: {} ip: {} mac: {}'.format(worker_name, worker_name,\n worker_ip, worker_mac)) \n self.inventory_dict['csah']['vars']['worker_nodes'].append(worker_node_dict_pairs)\n self.clear_screen()\n self.inventory_dict['csah']['vars']['number_of_workers'] = worker_nodes_count",
"def get_stats(self): \n return dict(l.split('\\t', 1) \\\n for l in wait(self.proto.stat()).splitlines() if l)"
]
| [
"0.678594",
"0.6314804",
"0.61548144",
"0.60795724",
"0.6061691",
"0.60179424",
"0.60070586",
"0.6003615",
"0.6000852",
"0.5975921",
"0.5956673",
"0.59529394",
"0.5952804",
"0.59031546",
"0.5901462",
"0.58881104",
"0.58605975",
"0.58536565",
"0.58536565",
"0.5828623",
"0.5828349",
"0.5791624",
"0.57615167",
"0.5731367",
"0.57249075",
"0.5724364",
"0.5723612",
"0.57193935",
"0.56717426",
"0.5656823"
]
| 0.6506152 | 1 |
Gets the Updated job template right now changes from active to inactive. | def get_updated_jobtemplate(self):
return self.response_json | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Updated(self):\n return self._get_attr('Updated')",
"def getChanges():",
"def last_status_update(self):\n try:\n return StatusUpdate.objects.filter(section=self).latest(\"created_at\")\n except StatusUpdate.DoesNotExist:\n return None",
"def changes(self) -> dict:\n return self.config['changes']",
"def last_status_change(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_status_change\")",
"def job_templates(self):\n return self._tower.job_templates.filter({'project__exact': self.id})",
"def GetChanges(self):\n return self._changes",
"def last_update(self):\n # get modification time of QWC2 themes config file\n config_updated_at = None\n if os.path.isfile(self.themes_config_path):\n config_updated_at = datetime.utcfromtimestamp(\n os.path.getmtime(self.themes_config_path)\n )\n\n # create session for ConfigDB\n session = self.config_models.session()\n\n # query timestamp\n LastUpdate = self.config_models.model('last_update')\n query = session.query(LastUpdate.updated_at)\n last_update = query.first()\n if last_update is not None:\n if config_updated_at is not None:\n # use latest of both timestamps\n updated_at = max(last_update.updated_at, config_updated_at)\n else:\n # use timestamp from ConfigDB\n updated_at = last_update.updated_at\n else:\n # no entry in ConfigDB, use config timestamp or now\n updated_at = config_updated_at or datetime.utcnow()\n\n # close session\n session.close()\n\n return {\n 'permissions_updated_at': updated_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n }",
"def modified(self):\n return self.properties.get(\"Modified\", datetime.min)",
"def get_working_status(self):\n #TODO: fix some issue on restarting and so on about current status\n return self.working_map[self.get_status()]",
"def get_change(self, ):\n return self.get_parameter('change')",
"def updated(self):\n return getattr(self, self.schema._updated.name, None)",
"def modified(self):\n return self._modified",
"def modified(self):\n return self._modified",
"def modified(self):\n return self._modified",
"def modified(self):\n return self._modified",
"def update_status(self) -> str:\n return pulumi.get(self, \"update_status\")",
"def was_modified(self):\n return self.modified",
"def get_job_applied():\n\n return JobCompletedApplication.query.all()",
"def modified(self):\n return self.__modified",
"def modified(self):\n return self.__modified",
"def updated(self) -> str:\n return self._updated",
"def getCompileStatus():\n logger.debug(\"[FLASKWEB] Retrieving current active compilation status\")\n\n jobs = compileService.getActiveState()\n title = \"Active Compiling Tasks\" if jobs else \"NO Active Compiling Jobs\"\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(jobs), 200\n else:\n return render_template(\"keyvalue.html\", title=title, store=jobs)",
"def latest_job(self):\n return self.jobmanagers[self.current_network].latest_job",
"def is_update_active(self):\n return self._update_action",
"def diff(self):\n if self.event == 'Create':\n old = ''\n else:\n # Get the Change just ahead of _this_ change because that has the\n # state of the Resource before this Change occurred.\n # TODO(nickpegg): Get rid of this if we change the behavior of\n # Change to store the previous version of the object\n old_change = Change.objects.filter(\n change_at__lt=self.change_at,\n resource_id=self.resource_id,\n resource_name=self.resource_name\n ).order_by(\n '-change_at'\n ).first()\n old = json.dumps(old_change._resource, indent=2, sort_keys=True)\n\n if self.event == 'Delete':\n current = ''\n else:\n resource = apps.get_model(self._meta.app_label, self.resource_name)\n obj = resource.objects.get(pk=self.resource_id)\n\n serializer_class = self.get_serializer_for_resource(\n self.resource_name)\n serializer = serializer_class(obj)\n current = json.dumps(serializer.data, indent=2, sort_keys=True)\n\n diff = \"\\n\".join(difflib.ndiff(\n old.splitlines(),\n current.splitlines()\n ))\n\n return diff",
"def get_step_changes_after(\n project: 'projects.Project',\n timestamp: float,\n write_running: bool = False\n) -> typing.List[dict]:\n return [\n _get_step_changes(project, step, write_running)\n for step in project.steps\n if step.report.last_update_time >= timestamp\n or (step.last_modified or 0) >= timestamp\n ]",
"def _get_current_job_status(acq_tframes):\n cur_job = dict()\n if acq_tframes:\n cur_job['employer'] = f'{str_sep}'.join(\n {tframe.employer if tframe.category == 'O' else tframe.category for tframe in\n acq_tframes}).replace(',', '')\n cur_job['start'] = [tf.start for tf in acq_tframes][0]\n cur_job['end'] = sorted([tf.end for tf in acq_tframes])[-1]\n return cur_job",
"def get_initial(self):\n initial = {'user': self.request.user,\n 'start': timezone.now()}\n try:\n latest = Log.objects.latest()\n initial['job'] = latest.job\n except Log.DoesNotExist:\n pass\n\n return initial",
"def updates(self):\r\n return list(self.state_updates)"
]
| [
"0.5720681",
"0.5635518",
"0.5623788",
"0.5566456",
"0.55534095",
"0.5552222",
"0.5476115",
"0.5407608",
"0.5343847",
"0.5331713",
"0.53309363",
"0.5325143",
"0.5319297",
"0.5319297",
"0.5319297",
"0.5319297",
"0.5312014",
"0.5278158",
"0.52175415",
"0.52158743",
"0.52158743",
"0.5173634",
"0.5158782",
"0.5141392",
"0.510916",
"0.51036954",
"0.50964606",
"0.50776285",
"0.5072619",
"0.5056966"
]
| 0.73913294 | 0 |
Display all videos in a playlist with a given name. | def show_playlist(self, playlist_name):
if self.playlists[playlist_name.lower()]!=[]:
print(f"Showing playlist: {playlist_name}")
for i in self.playlists[playlist_name.lower()]:
videos = self._video_library.get_all_videos()
templist = []
def converttostr(input_seq, seperator):
# Join all the strings in list
final_str = seperator.join(input_seq)
return final_str
for vid in videos:
if i == vid.video_id:
templist.append([vid.title,vid.video_id,vid.tags])
print(f" {templist[0][0]} ({templist[0][1]}) [{converttostr(list(templist[0][2]), ' ')}]")
else:
print(f"Showing playlist: {playlist_name}")
print(" No videos here yet")
#print("show_playlist needs implementation") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_playlist(self, playlist_name):\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n playlist = self._playlists[playlist_name.lower()]\n print(f\"Showing playlist: {playlist_name}\")\n if not playlist.videos:\n print(\"No videos here yet\")\n for video in playlist.videos:\n print(video)",
"def show_playlist(self, playlist_name):\n playlist_id = playlist_name.lower()\n if not playlist_id in self.playlists.keys():\n print(f\"Cannot show playlist {playlist_name}: Playlist does not exist\")\n return\n\n playlist = self.playlists.get(playlist_id)\n videos = playlist.videos\n\n if len(videos) == 0:\n print(f\"Showing playlist: {playlist_name}\")\n print(\"No videos here yet\")\n return\n\n print(f\"Showing playlist: {playlist_name}\")\n for video_id in videos:\n print(self._video_library.get_video(video_id))\n return",
"def show_playlist(self, playlist_name):\n print(f\"Showing playlist: {playlist_name}\")\n print(\" No videos here yet\")",
"def show_playlist(self, playlist_name):\n playlist_exists = False\n for playlist in list(self.playlists.keys()):\n if playlist_name.upper() == playlist.upper():\n playlist_exists = True\n real_playlist_name = playlist\n break\n if playlist_exists:\n print(f\"Showing playlist: {playlist_name}\")\n if len(self.playlists[real_playlist_name]) == 0:\n print(\"\\tNo videos here yet\")\n else:\n for song in self.playlists[real_playlist_name]:\n video = self._video_library.get_video(song)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\"{video.title} ({video.video_id}) [{tags}]\")\n\n else:\n print(f\"\\tCannot show playlist {playlist_name}: Playlist does not exist\")\n\n # print(\"show_playlist needs implementation\")",
"def show_playlist(self, playlist_name):\n \n if playlist_name.lower() not in self.playlists:\n print(\"Cannot show playlist\", playlist_name, end=\"\")\n print(\": Playlist does not exist\")\n elif len(self.playlists[playlist_name.lower()]) == 0:\n print(\"Showing playlist:\", playlist_name)\n print(\"No videos here yet\")\n else:\n print(\"Showing playlist:\", playlist_name)\n for video in self.playlists[playlist_name.lower()]:\n if video.flagged:\n print(f\"{self.videos_dict[video]} - FLAGGED (reason: {video.flag_reason})\")\n else:\n print(self.videos_dict[video])",
"def getAllPlaylists(self,name):\n return [p for p in self.playlists if p.title == name]",
"def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()",
"def index():\n seen = set()\n seen_add = seen.add\n videos = []\n all_videos = mythVideo.searchVideos(insertedafter = '1900-01-01 00:00:00')\n\n for video in all_videos:\n path = video.filename.split('/')[0]\n if path not in seen and not seen_add(path):\n video.url = url_for('.with_path', path=path)\n video.label = path\n videos.append(video)\n\n videos = sorted(videos, key = lambda video: video.label.lowercase())\n return render_template('list.html', items = videos, page_title = 'Videos')",
"def get_all_playlist_videos( playlistURL ):\r\n \r\n request = youtube.playlistItems().list(\r\n part=\"contentDetails,id,snippet\",\r\n maxResults=50,\r\n playlistId=\"PLxgoClQQBFjgTMrhvedWk8Q_CVLWwy3ak\"\r\n )\r\n response = request.execute()",
"def list_videos(movie,thumb):\n\n videos = get_videos(movie)\n listing = []\n for video in videos:\n list_item = xbmcgui.ListItem(label=video[0])\n list_item.setArt({'thumb': thumb,\n 'icon': thumb,\n 'fanart': thumb})\n list_item.setInfo('video', {'title': video[0]})\n list_item.setProperty('IsPlayable', 'true')\n url = '{0}?action=play&video={1}'.format(_url, video[1])\n is_folder = False\n listing.append((url, list_item, is_folder))\n\n xbmcplugin.addDirectoryItems(_handle, listing, len(listing))\n xbmcplugin.endOfDirectory(_handle)",
"def with_season(title, season):\n videos = list(mythVideo.searchVideos(title = title, season = season))\n\n for video in videos:\n video.label = video.title + \" - \" + video.subtitle\n video.url = \"/videos/\" + video.title + \"/\" + video.hash\n\n videos = sorted(videos, key = lambda video: video.episode)\n return render_template('list.html', items = videos, page_title = title + \" Season \" + str(season))",
"def get_videos(town):\n\n entries = get_town_videos(town)\n\n print entries\n\n if entries:\n return render_template('videos.html', videos=entries, town=town)\n else:\n flash('No se encontraron videos.')\n return render_template('videos.html', town=town)",
"def fetch_videos():\n channels = get_channels_from_file()\n\n channels_request = service.channels().list(\n part='id, contentDetails',\n forUsername=channels[0]['channelUsername'] # first channel for now\n )\n\n video_list = []\n\n channels_response = channels_request.execute()\n for channel in channels_response['items']:\n uploads_list_id = channel['contentDetails']['relatedPlaylists']['uploads']\n\n next_page_token = ''\n while next_page_token is not None:\n playlistitems_response = service.playlistItems().list(\n playlistId=uploads_list_id,\n part='snippet',\n maxResults=50,\n pageToken=next_page_token\n ).execute()\n\n for playlist_item in playlistitems_response['items']:\n title = playlist_item['snippet']['title']\n video_id = playlist_item['snippet']['resourceId']['videoId']\n print(f'{title}, {video_id}')\n video_list.append({'title': title, 'video_id': video_id})\n\n next_page_token = playlistitems_response.get('nextPageToken')\n\n return video_list",
"def list(self):\n\n query = \"\"\"\n SELECT id, uri, filename, description\n FROM videos\n \"\"\"\n\n result = Model.execute(query)\n\n return result.fetchall()",
"def clear_playlist(self, playlist_name):\n playlist_id = playlist_name.lower()\n if not playlist_id in self.playlists.keys():\n print(f\"Cannot clear playlist {playlist_name}: Playlist does not exist\")\n return\n\n self.playlists.get(playlist_id).videos = []\n print(f\"Successfully removed all videos from {playlist_name}\")",
"def playlist_videos(playlist_id):\r\n url = PLAYLIST_ITEMS_URL.format(API_KEY, playlist_id)\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n video_list = []\r\n # next_page_token = response['json']['nextPageToken']\r\n try:\r\n if 'items' in response['json']:\r\n for item in response['json']['items']:\r\n video_id = item['snippet']['resourceId']['videoId']\r\n details = video_details(video_id)\r\n if details is not None:\r\n info = {\r\n 'type': 'youTube',\r\n 'video_id': video_id,\r\n 'video_title': details['video_title'],\r\n 'video_time': details['video_time']\r\n }\r\n video_list.append(info)\r\n return video_list\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None",
"def remove_from_playlist(self, playlist_name, video_id):\n for i in playlist_name:\n if i.video_id == video_id:\n print(f\"Removed video from {playlist_name}: {self.title}\")",
"def show_playing(self):\n\n temp_list = []\n if self.is_playing is False:\n print(\"No video is currently playing\")\n else:\n videos = self._video_library.get_all_videos()\n for vid in videos:\n if vid.title == self.playing_now:\n tags = \"[\"\n for tag in vid.tags:\n tags = tags + tag + \" \"\n tags = tags + \"]\"\n if tags != \"[]\":\n tags = tags[0:len(tags) - 2] + \"]\"\n\n temp_list += [f\"{vid.title} ({vid.video_id}) {tags}\"]\n if self.is_paused is True:\n print(\"Currently playing: \" + temp_list[0] + \" - PAUSED\")\n if self.is_paused is False:\n print(\"Currently playing: \" + temp_list[0])",
"def all_videos(request):\n\n videos = Video.objects.all()\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n videos = videos.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n videos = videos.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n videos = videos.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter search criteria\")\n return redirect(reverse('videos'))\n \n queries = Q(title__icontains=query) | Q(description__icontains=query)\n videos = videos.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'videos': videos,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'videos.html', context)",
"def clear_playlist(self, playlist_name):\n if playlist_name.lower() not in self._playlists:\n print(f\"Cannot clear playlist {playlist_name}: Playlist does not exist\")\n return\n print(f\"Successfully removed all videos from {playlist_name}\")\n self._playlists[playlist_name.lower()].videos = []",
"def get_liked_videos(self):\n request = self.youtube_client.playlists().list(\n part=\"snippet\",\n mine=True\n )\n playlistid_ = 0\n response = request.execute()\n for item in response[\"items\"]:\n if item[\"snippet\"][\"title\"] == self.nameOfPlaylist:\n playlistid_ = item[\"id\"]\n if playlistid_ == 0:\n raise\n request2 = self.youtube_client.playlistItems().list(\n part=\"snippet\",\n playlistId=playlistid_,\n maxResults=\"50\"\n )\n response = request2.execute()\n nextToken = response.get('nextPageToken')\n while('nextPageToken' in response):\n nextpage = self.youtube_client.playlistItems().list(\n part=\"snippet\",\n playlistId=playlistid_,\n maxResults=\"50\",\n pageToken=nextToken\n ).execute()\n response['items'] += nextpage['items']\n if 'nextPageToken' not in nextpage:\n response.pop('nextPageToken',None)\n else:\n nextToken = nextpage['nextPageToken']\n for item in response[\"items\"]:\n video_title = item[\"snippet\"][\"title\"]\n youtube_url = f\"https://www.youtube.com/watch?v={item['snippet']['resourceId']['videoId']}\"\n\n #use youtube_dl to collect the song name & artist name\n try:\n video = youtube_dl.YoutubeDL({}).extract_info(youtube_url,download=False)\n except:\n continue\n song_name= video[\"track\"]\n artist = video[\"artist\"]\n\n #save all important info\n songUri= self.get_spotify_uri(song_name,artist)\n if songUri != None:\n self.all_song_info[video_title] ={\n \"youtube_url\": youtube_url,\n \"song_name\": song_name,\n \"artist\": artist,\n\n #add the uri, easy to get song to put into playlist\n \"spotify_uri\": self.get_spotify_uri(song_name,artist)\n }",
"def add_to_playlist(self, playlist_name, video_id):\n video = self._video_library.get_video(video_id)\n for i in playlist_name:\n if i.title == video.title:\n print(f\"Cannot add video to {playlist_name}: Video already added\")\n else:\n print(f\"Added video to {playlist_name}: {video.title}\")",
"def get_videos_of_folder(folder):\n\n Settings.dev_print(\"getting videos of folder: {}\".format(folder.get_title()))\n if not folder: return []\n videos = []\n files = []\n valid_videos = [\".mp4\",\".mov\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_videos:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"video path: {}\".format(os.path.join(folder.get_path(),f)))\n return files",
"def getPlaylist(self,name):\n playlist = self.getAllPlaylists(name)\n return playlist[0] if playlist else None",
"def video_list(self) -> list:\n return self._video_list",
"def get_playlists_for_user_by_name(self, request): \n user = Account.find_by_username(request.username)\n playlists = Playlist.find_by_owner(user.key).fetch(20)\n return self.build_playlist_response(playlists)",
"def get_videos_by_playlist(playlist_id='', parse_video=True):\n videos = api.get_playlist_items(playlist_id=playlist_id, count=None)\n video_ids = []\n for item in videos.items:\n item = item.to_dict()\n video_ids.append(item['contentDetails']['videoId'])\n if parse_video:\n results = []\n for video_id in video_ids:\n results.append(get_video_by_id(video_id))\n return results\n else:\n return video_ids",
"def add_videos(playlist):\n surl = playlist['link']\n # 작은 playlist의 url을 surl에 저장\n soup = get_soup(surl)\n # 작은 플레이리스트의 html 파싱하여 soup에 저장\n print(f\" getting videos for playlist: {playlist['title']}\")\n\n videos = []\n\n # items are list of video a links from list\n items = soup('a', class_='yt-uix-tile-link')\n # a 태그의 class가 'yt-uix-tile-link'인 태그 items에 저장\n # items는 작은 플레이리스트의 동영상 목록들임\n\n # note first part of look get info from playlist page item,\n # and the the last part opens the video and gets more details\n if len(items) > 0:\n for i in items:\n # 각각의 items i에 하나씩 저장\n d = dict()\n vurl = fix_url(i['href'])\n # 동영상 url을 vurl에 저장\n t = i.find_next('span', {'aria-label': True})\n # 동영상의 span 태그 중 aria=label값이 존재하는 것 t에 저장\n # t는 동영상의 재생 시간임\n d['time'] = t.text if t else 'NA'\n # d 딕셔너리에 t저장\n\n d.update(parse_video(vurl))\n videos.append(d)\n # videos에 d를 append\n\n else: # must be only one video\n d = {'time': 'NA'}\n d.update(parse_video(surl))\n videos.append(d)\n\n # add new key to this playlist of list of video infos\n playlist['videos'] = videos\n print()",
"def play_url(url, name):\n video_url = scraper.get_media_url(url)\n if video_url == -1:\n GUI.info_box(u\"Vesen\", u\"Fann ekki upptöku\")\n else:\n player.play(video_url, name)",
"async def get_videos(self) -> APIReturn:\n return await self._request(\"GET\", \"/getVideos\")"
]
| [
"0.795203",
"0.7930741",
"0.7693881",
"0.7545777",
"0.74915266",
"0.68660384",
"0.6755695",
"0.6657861",
"0.6496447",
"0.62669796",
"0.6146096",
"0.6144877",
"0.6072285",
"0.6068287",
"0.6064832",
"0.60442716",
"0.6008929",
"0.59817594",
"0.5926295",
"0.59068966",
"0.59018767",
"0.58664745",
"0.5857419",
"0.58224547",
"0.57823354",
"0.5781823",
"0.578004",
"0.5769995",
"0.5761922",
"0.57205826"
]
| 0.7973328 | 0 |
Display all the videos whose titles contain the search_term. | def search_videos(self, search_term):
videos = self._video_library.get_all_videos()
temp_list = []
for vid in videos:
# Convoluted way to display tags in required format
tags = "["
for tag in vid.tags:
tags = tags + tag + " "
tags = tags + "]"
print(f"{vid.title}")
if tags != "[]":
tags = tags[0:len(tags) - 2] + "]"
if str(search_term.lower()) in str(vid.title):
temp_list += [f"{vid.title} ({vid.video_id}) {tags}"]
# Sort the list and display
sorted_list = sorted(temp_list)
print(f"Here are the results for {search_term}:")
for x in sorted_list:
print(" " + f"{sorted_list.index(x) + 1}) " + x) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_videos(self, search_term):\n results = []\n for video in self._video_library.get_all_videos():\n if search_term.lower() in video.title.lower() and video.flag is None:\n results.append(video)\n self.output_search_results(results, search_term)",
"def search_videos(self, search_term):\n print(\"search_videos needs implementation\")",
"def search_videos(self, search_term):\n all_videos = self._video_library.get_all_videos()\n all_videos.sort(key=lambda x: x.title)\n matching_videos = []\n for video in all_videos:\n if search_term.lower() in video.title.lower():\n matching_videos.append(video)\n\n matching_videos.sort(key=lambda x: x.title)\n\n if len(matching_videos) == 0:\n print(f\"No search results for {search_term}\")\n return\n\n print(\"Here are the results for cat:\")\n for i, matching_video in enumerate(matching_videos):\n print(f\"{i + 1}) {str(matching_video)}\")\n\n print(\n \"Would you like to play any of the above? If yes, specify the number of the video.\\nIf your answer is not a valid number, we will assume it's a no.\")\n video_number = input()\n\n # print(video_number)\n\n try:\n int_video_number = int(video_number)\n if int_video_number > len(matching_videos) or int_video_number < 0:\n return\n else:\n self.play_video(matching_videos[int_video_number - 1].video_id)\n except ValueError:\n return",
"def search_videos(self, search_term):\n videos = self._video_library.get_all_videos()\n videos.sort(key=lambda x: x.title, reverse=False)\n matched_id = []\n for v in videos:\n if search_term.lower() in v.title.lower():\n matched_id.append(v.video_id)\n \n if matched_id:\n i = 1\n print(f\"Here are the results for {search_term}:\")\n for id in matched_id:\n video = self._video_library.get_video(id)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\" {i}) {video.title} ({video.video_id}) [{tags}]\")\n\n i = i+1\n \n print(\"Would you like to play any of the above? If yes, \"\n \"specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n option = input()\n # option = input(\"Would you like to play any of the above? If yes, \"\n # \"specify the number of the video. \\n If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n value = int(option)\n if value > 1 and value < len(matched_id)+1 :\n self.play_video(matched_id[value-1])\n except ValueError:\n pass\n\n else:\n print(f\"No search results for {search_term}\")\n \n \n # print(\"search_videos needs implementation\")",
"def search_videos(self, search_term):\n recommendations = []\n for video in self.videos_dict:\n if not video.flagged and search_term in self.videos_dict[video]:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n\n if n == 0:\n print(f\"No search results for {search_term}\")\n else:\n print(f\"Here are the results for {search_term}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass",
"def search(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n results = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n results.append(Colors.YELLOW + video[\"file\"] + Colors.END + \" - \" + video[\"source\"] + \" - \" +\n video[\"title\"])\n if results:\n for result in results:\n safeprint(result)\n else:\n safeprint(\"No video matching the given query was found.\")",
"def all_videos(request):\n\n videos = Video.objects.all()\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n videos = videos.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n videos = videos.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n videos = videos.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter search criteria\")\n return redirect(reverse('videos'))\n \n queries = Q(title__icontains=query) | Q(description__icontains=query)\n videos = videos.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'videos': videos,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'videos.html', context)",
"def search(self, query):\n URL = \"https://www.youtube.com/results\"\n r = requests.get(URL, params={'search_query': query})\n results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})', r.content)\n self.results = results[::2]\n return self.results",
"def search_videos_tag(self, video_tag):\n videos = self._video_library.get_all_videos()\n\n temp_list = []\n for vid in videos:\n\n # Convoluted way to display tags in required format\n tags =\"[\"\n for tag in vid.tags:\n tags = tags + tag + \" \"\n tags = tags + \"]\"\n\n if tags != \"[]\":\n tags = tags[0:len(tags)-2] + \"]\"\n if video_tag.lower() in tags:\n temp_list += [f\"{vid.title} ({vid.video_id}) {tags}\"]\n\n # Sort the list and display\n sorted_list = sorted(temp_list)\n print(f\"Here are the results for {video_tag}:\")\n numberlist=[]\n for x in sorted_list:\n numberlist.append(sorted_list.index(x)+1)\n print(\" \" + f\"{sorted_list.index(x)+1}) \" + x)\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")",
"def search(request):\n if 'q' in request.GET:\n term = request.GET['q']\n story_list = Story.objects.filter(Q(title__contains=term)|Q(markdown_content__contains=term))\n heading = \"Search results\"\n return render_to_response(\"cms/story_list.html\",locals())",
"def _searchOnTitle(self, title, media, quality, results, offset=0):\n try:\n params = {\n 'cid': 210, # Movie/Video category\n 'offset': offset,\n 'limit': T411.limit\n }\n url = self.urls['search'].format(simplifyString(title),\n tryUrlencode(params))\n data = self.getJsonData(url, headers=self.headers)\n self.checkError(data)\n now = datetime.now()\n for torrent in data['torrents']:\n category = int(torrent['category'])\n # Filter on animations, movies & documentaries\n if category in [455, 631, 634]:\n added = datetime.strptime(torrent['added'],\n '%Y-%m-%d %H:%M:%S')\n # Convert size from byte to kilobyte\n size = int(torrent['size'])/1024\n id_ = int(torrent['id'])\n result = {\n 'id': id_,\n 'name': torrent['name'],\n 'seeders': int(torrent['seeders']),\n 'leechers': int(torrent['leechers']),\n 'size': self.parseSize(str(size)+self.size_kb[0]),\n 'age': (now - added).days,\n 'url': self.urls['url'].format(id_),\n 'detail_url': self.urls['detail_url'].format(id_),\n 'verified': bool(int(torrent['isVerified'])),\n 'get_more_info': self.getMoreInfo,\n 'extra_check': self.extraCheck\n }\n T411.log.debug('{0}|{1}'.format(result.get('id'),\n simplifyString(result.get('name'))))\n results.append(result)\n # Get next page if we don't have all results\n if int(data['total']) > len(data['torrents'])+offset:\n self._searchOnTitle(title, media, quality, results,\n offset+T411.limit)\n except:\n T411.log.error('Failed searching release from {0}: {1}'.\n format(self.getName(), traceback.format_exc()))",
"def search_movies(title: str) -> list[tuple]:\n with connection:\n search = '%' + title + '%'\n return list(connection.execute(SEARCH_MOVIE, (search,)))",
"def search_videos_tag(self, video_tag):\n videos = self._video_library.get_all_videos()\n videos.sort(key=lambda x: x.title, reverse=False)\n matched_id = []\n for v in videos:\n if video_tag.lower() in v.tags:\n matched_id.append(v.video_id)\n \n if matched_id:\n i = 1\n print(f\"Here are the results for {video_tag}:\")\n for id in matched_id:\n video = self._video_library.get_video(id)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\" {i}) {video.title} ({video.video_id}) [{tags}]\")\n\n i = i+1\n \n print(\"Would you like to play any of the above? If yes, \"\n \"specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n option = input()\n\n try:\n value = int(option)\n if value > 1 and value < len(matched_id)+1 :\n self.play_video(matched_id[value-1])\n except ValueError:\n pass\n\n else:\n print(f\"No search results for {video_tag}\")\n \n # print(\"search_videos_tag needs implementation\")",
"def search_videos_tag(self, video_tag):\n if not video_tag.startswith('#'):\n print(f\"No search results for {video_tag}\")\n return\n\n all_videos = self._video_library.get_all_videos()\n matching_videos = []\n for video in all_videos:\n if video_tag.lower() in list(map(str.lower, video.tags)):\n matching_videos.append(video)\n\n matching_videos.sort(key=lambda x: x.title)\n\n if len(matching_videos) == 0:\n print(f\"No search results for {video_tag}\")\n return\n\n print(f\"Here are the results for {video_tag}:\")\n for i, matching_video in enumerate(matching_videos):\n print(f\"{i + 1}) {str(matching_video)}\")\n\n print(\n \"Would you like to play any of the above? If yes, specify the number of the video.\\nIf your answer is not a valid number, we will assume it's a no.\")\n video_number = input()\n\n # print(video_number)\n\n try:\n int_video_number = int(video_number)\n if int_video_number > len(matching_videos) or int_video_number < 0:\n return\n else:\n self.play_video(matching_videos[int_video_number - 1].video_id)\n except ValueError:\n return",
"def search_videos_tag(self, video_tag):\n results = []\n for video in self._video_library.get_all_videos():\n if video_tag.lower() in video.tags and video.flag is None:\n results.append(video)\n self.output_search_results(results, video_tag)",
"def get_individual_video_link(self):\r\n self.filter_url_portion = '' # ignore the filter option.\r\n\r\n target_search_results_obj = []\r\n # in case we want to search more pages just change this and make a loop\r\n self.page_url_portion = '&page=1'\r\n\r\n # start with forming the search\r\n self.form_search_url()\r\n\r\n # Get the dom object from the search page\r\n search_result_dom = self.get_dom_object(self.target_yt_search_url_str)\r\n\r\n # Get the search results\r\n target_search_results_obj.extend(self.tag_element_results(search_result_dom,\r\n 'div[class=\"yt-lockup-content\"] h3[class=\"yt-lockup-title\"] a'))\r\n\r\n #print 'results len: ', len(target_search_results_obj)\r\n\r\n each_video_link_title_dict = {}\r\n for n in target_search_results_obj:\r\n video_link = n.attributes['href']\r\n ## modified video link\r\n # video_link = re.sub('watch\\?v=',r'v/',video_link)\r\n\r\n video_title = n.attributes['title'] #\"Mix\" in video_title[:4] or \"mix\" i(n video_title[:4] or\r\n ile = video_title.lower()\r\n if \"cover\" in ile or \"live\" in ile or \"acustic\" in ile or \"acoustic\" in ile or \"lesson\" in ile:\r\n print \"found blacklisted term, bypassing song: \" + ile\r\n pass #dont want these\r\n else:\r\n each_video_link_title_dict[video_title] = 'https://www.youtube.com' + video_link\r\n\r\n self.video_link_title_dict.update(each_video_link_title_dict)",
"def search_videos_tag(self, video_tag):\n print(\"search_videos_tag needs implementation\")",
"def search_by_title(title):\n\turl = tmdb_api(\"search/movie\")+\"&query=\"+urllib.quote_plus(title)\n\tresponse = json.load(urllib2.urlopen(url))\n\treturn JSONResponse(response)",
"def search_for_title(session, search_term):\n try:\n s_result = session.search_movie(search_term)\n shows = {}\n\n # made the keys of the namedtuple a digit for ease of selecting the correct one later\n for count, result in enumerate(s_result):\n show_id = count\n movie_id = result.movieID\n title = result['long imdb canonical title']\n url = f'http://www.imdb.com/title/tt{movie_id}/parentalguide'\n shows[count] = Show(show_id, movie_id, title, url)\n return shows\n except imdb._exceptions.IMDbDataAccessError:\n display_error()",
"def query_youtube(movie_title):\n #convert movie_title to “percent-encoded” string, then open search\n query_string = urllib.urlencode({\"search_query\" : movie_title + \" trailer\"})\n html_content = urllib.urlopen(\"http://www.youtube.com/results?\" +\n query_string)\n #use regular expressions to find all 11 character videos IDs\n query_results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})',\n html_content.read())\n return \"http://www.youtube.com/watch?v=\" + query_results[0]",
"def full_text_search_title(self, term: str):\n if not self.client:\n self.connect()\n query = templates.full_text_search_title(term)\n return self.client.moviebuff.engtitles.aggregate(query)",
"def with_season(title, season):\n videos = list(mythVideo.searchVideos(title = title, season = season))\n\n for video in videos:\n video.label = video.title + \" - \" + video.subtitle\n video.url = \"/videos/\" + video.title + \"/\" + video.hash\n\n videos = sorted(videos, key = lambda video: video.episode)\n return render_template('list.html', items = videos, page_title = title + \" Season \" + str(season))",
"def search(self, title):\n close_matches = self.get_close_matches_by_title(title)\n count = 0\n for item in self.item_list.values():\n if item.title in close_matches:\n print(item)\n count += 1\n if count == 0:\n print(\"No result found.\")",
"def search(search_term):\r\n if search_term:\r\n if 'list' in search_term:\r\n search_term = search_term.split('?list')[0]\r\n\r\n url = SEARCH_URL.format(API_KEY, util.web.quote(search_term.encode('ascii', 'ignore')))\r\n response = util.web.http_get(url=url, json=True, referer='http://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n try:\r\n if 'items' in response['json']:\r\n for item in response['json']['items']:\r\n video_id = item['id']['videoId']\r\n details = video_details(video_id)\r\n if details is not None:\r\n return {\r\n 'type': 'youTube',\r\n 'video_id': video_id,\r\n 'video_time': details['video_time'],\r\n 'video_title': details['video_title']\r\n }\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None",
"def index():\n seen = set()\n seen_add = seen.add\n videos = []\n all_videos = mythVideo.searchVideos(insertedafter = '1900-01-01 00:00:00')\n\n for video in all_videos:\n path = video.filename.split('/')[0]\n if path not in seen and not seen_add(path):\n video.url = url_for('.with_path', path=path)\n video.label = path\n videos.append(video)\n\n videos = sorted(videos, key = lambda video: video.label.lowercase())\n return render_template('list.html', items = videos, page_title = 'Videos')",
"def search_videos_tag(self, video_tag):\n recommendations = []\n\n if not video_tag.startswith(\"#\"):\n print(f\"No search results for {video_tag}\")\n else:\n for video in self.videos_dict:\n #s = self.videos_dict[video]\n #result = re.search(r\"\\[([A-Za-z0-9_]+)\\]\", s)\n #print(result.group(1))\n #tag_string = str(result.group(1))\n #if video_tag in tag_string:\n # recommendations.append(self.videos_dict[video])\n if video_tag in video._tags and not video.flagged:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n if n == 0:\n print(f\"No search results for {video_tag}\")\n else:\n print(f\"Here are the results for {video_tag}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass",
"def do_search(request):\n products = Product.objects.filter(title__icontains=request.GET['q'])\n return render(request, \"products.html\", {\"products\": products})",
"def searchMovie(api, title, date, count, retweets=False):\n\n since = date\n until = datetime.strptime(since, '%Y-%m-%d') + timedelta(days=1)\n until = until.strftime('%Y-%m-%d')\n\n query = \"\\\"{0}\\\" since:{1} until:{2} -filter:links\".format(processTitle(title), since, until)\n if retweets == False:\n query += \" -filter:retweets\"\n\n rawtweets = tweepy.Cursor(api.search, q=query, result_type=\"recent\", lang=\"en\").items(count)\n results = []\n\n for i, tweet in enumerate(rawtweets):\n try:\n results.append(processTweet(title, tweet, remove_title=True))\n except tweepy.error.TweepError as err:\n print(\"\\nThere was an error processing tweet #{0} for title [{1}]\".format(i, title))\n print(err.messages[0]['code'])\n\n results = pd.DataFrame(results)\n results['title'] = title\n results['tweet_date'] = date\n return results",
"def video(title, hash = None, season = None, episode = None):\n if not hash:\n video = list(mythVideo.searchVideos(title = title, season = season, episode = episode))[0]\n else:\n video = [video for video in mythVideo.searchVideos(title = title) if video.hash == hash][0]\n\n return render_template('recording.html', item = video)",
"def search_venues():\n\n # TODO: implement search on artists with partial string search. Ensure it is case-insensitive.\n # seach for Hop should return \"The Musical Hop\". (DONE)\n # search for \"Music\" should return \"The Musical Hop\" and \"Park Square Live Music & Coffee\" (DONE)\n response = request.form.get('search_term', '')\n response = response.lower()\n\n venues = db.session.query(Venue).filter(Venue.name.ilike('%' + response + '%')).all()\n results = []\n \n for v in venues:\n print(v.name)\n results.append({\n 'id': v.id,\n 'name' : v.name\n })\n\n response={\n \"count\": len(results),\n \"data\": results\n }\n\n return render_template(\n \"pages/search_venues.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )"
]
| [
"0.8331212",
"0.79130685",
"0.78181237",
"0.7745555",
"0.7296771",
"0.71350473",
"0.6829117",
"0.65758127",
"0.6539812",
"0.65275896",
"0.6482727",
"0.6481391",
"0.64698297",
"0.640734",
"0.638748",
"0.6354398",
"0.63542914",
"0.63518894",
"0.6330999",
"0.6296946",
"0.6252102",
"0.62304425",
"0.6166756",
"0.6154516",
"0.61328864",
"0.60951537",
"0.6048668",
"0.6028948",
"0.6019666",
"0.6015907"
]
| 0.8280379 | 1 |
Display all videos whose tags contains the provided tag. | def search_videos_tag(self, video_tag):
videos = self._video_library.get_all_videos()
temp_list = []
for vid in videos:
# Convoluted way to display tags in required format
tags ="["
for tag in vid.tags:
tags = tags + tag + " "
tags = tags + "]"
if tags != "[]":
tags = tags[0:len(tags)-2] + "]"
if video_tag.lower() in tags:
temp_list += [f"{vid.title} ({vid.video_id}) {tags}"]
# Sort the list and display
sorted_list = sorted(temp_list)
print(f"Here are the results for {video_tag}:")
numberlist=[]
for x in sorted_list:
numberlist.append(sorted_list.index(x)+1)
print(" " + f"{sorted_list.index(x)+1}) " + x)
print("Would you like to play any of the above? If yes, specify the number of the video.")
print("If your answer is not a valid number, we will assume it's a no.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_videos_tag(self, video_tag):\n results = []\n for video in self._video_library.get_all_videos():\n if video_tag.lower() in video.tags and video.flag is None:\n results.append(video)\n self.output_search_results(results, video_tag)",
"def search_videos_tag(self, video_tag):\n videos = self._video_library.get_all_videos()\n videos.sort(key=lambda x: x.title, reverse=False)\n matched_id = []\n for v in videos:\n if video_tag.lower() in v.tags:\n matched_id.append(v.video_id)\n \n if matched_id:\n i = 1\n print(f\"Here are the results for {video_tag}:\")\n for id in matched_id:\n video = self._video_library.get_video(id)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\" {i}) {video.title} ({video.video_id}) [{tags}]\")\n\n i = i+1\n \n print(\"Would you like to play any of the above? If yes, \"\n \"specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n option = input()\n\n try:\n value = int(option)\n if value > 1 and value < len(matched_id)+1 :\n self.play_video(matched_id[value-1])\n except ValueError:\n pass\n\n else:\n print(f\"No search results for {video_tag}\")\n \n # print(\"search_videos_tag needs implementation\")",
"def search_videos_tag(self, video_tag):\n print(\"search_videos_tag needs implementation\")",
"def search_videos_tag(self, video_tag):\n if not video_tag.startswith('#'):\n print(f\"No search results for {video_tag}\")\n return\n\n all_videos = self._video_library.get_all_videos()\n matching_videos = []\n for video in all_videos:\n if video_tag.lower() in list(map(str.lower, video.tags)):\n matching_videos.append(video)\n\n matching_videos.sort(key=lambda x: x.title)\n\n if len(matching_videos) == 0:\n print(f\"No search results for {video_tag}\")\n return\n\n print(f\"Here are the results for {video_tag}:\")\n for i, matching_video in enumerate(matching_videos):\n print(f\"{i + 1}) {str(matching_video)}\")\n\n print(\n \"Would you like to play any of the above? If yes, specify the number of the video.\\nIf your answer is not a valid number, we will assume it's a no.\")\n video_number = input()\n\n # print(video_number)\n\n try:\n int_video_number = int(video_number)\n if int_video_number > len(matching_videos) or int_video_number < 0:\n return\n else:\n self.play_video(matching_videos[int_video_number - 1].video_id)\n except ValueError:\n return",
"def search_videos_tag(self, video_tag):\n recommendations = []\n\n if not video_tag.startswith(\"#\"):\n print(f\"No search results for {video_tag}\")\n else:\n for video in self.videos_dict:\n #s = self.videos_dict[video]\n #result = re.search(r\"\\[([A-Za-z0-9_]+)\\]\", s)\n #print(result.group(1))\n #tag_string = str(result.group(1))\n #if video_tag in tag_string:\n # recommendations.append(self.videos_dict[video])\n if video_tag in video._tags and not video.flagged:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n if n == 0:\n print(f\"No search results for {video_tag}\")\n else:\n print(f\"Here are the results for {video_tag}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass",
"def search_videos(self, search_term):\n videos = self._video_library.get_all_videos()\n\n temp_list = []\n for vid in videos:\n\n # Convoluted way to display tags in required format\n tags = \"[\"\n for tag in vid.tags:\n tags = tags + tag + \" \"\n tags = tags + \"]\"\n print(f\"{vid.title}\")\n if tags != \"[]\":\n tags = tags[0:len(tags) - 2] + \"]\"\n if str(search_term.lower()) in str(vid.title):\n temp_list += [f\"{vid.title} ({vid.video_id}) {tags}\"]\n\n # Sort the list and display\n sorted_list = sorted(temp_list)\n print(f\"Here are the results for {search_term}:\")\n for x in sorted_list:\n print(\" \" + f\"{sorted_list.index(x) + 1}) \" + x)",
"def search_videos(self, search_term):\n results = []\n for video in self._video_library.get_all_videos():\n if search_term.lower() in video.title.lower() and video.flag is None:\n results.append(video)\n self.output_search_results(results, search_term)",
"def search_videos(self, search_term):\n videos = self._video_library.get_all_videos()\n videos.sort(key=lambda x: x.title, reverse=False)\n matched_id = []\n for v in videos:\n if search_term.lower() in v.title.lower():\n matched_id.append(v.video_id)\n \n if matched_id:\n i = 1\n print(f\"Here are the results for {search_term}:\")\n for id in matched_id:\n video = self._video_library.get_video(id)\n tags = str(video.tags)\n tags=tags.replace(\"'\",\"\")\n tags=tags.replace(\",\", \"\") \n tags=tags.replace(\")\", \"\") \n tags=tags.replace(\"(\", \"\") \n print(f\" {i}) {video.title} ({video.video_id}) [{tags}]\")\n\n i = i+1\n \n print(\"Would you like to play any of the above? If yes, \"\n \"specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n option = input()\n # option = input(\"Would you like to play any of the above? If yes, \"\n # \"specify the number of the video. \\n If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n value = int(option)\n if value > 1 and value < len(matched_id)+1 :\n self.play_video(matched_id[value-1])\n except ValueError:\n pass\n\n else:\n print(f\"No search results for {search_term}\")\n \n \n # print(\"search_videos needs implementation\")",
"def search_videos(self, search_term):\n print(\"search_videos needs implementation\")",
"async def tags(self, ctx, tag=None):\r\n\t\tnum = 0\r\n\t\tTags = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tfuz = self.bot.get_cog('FuzzySearch')\r\n\t\tif not fuz:\r\n\t\t\treturn await ctx.send('Can\\'t find FuzzySearch Cog')\r\n\r\n\t\tRes = fuz.fuzSearch(ctx, tag, Tags)\r\n\r\n\t\t\t\t\r\n\t\tif ctx.author.top_role.colour:\r\n\t\t\tcol = ctx.author.top_role.colour\r\n\t\telse:\r\n\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\tembed = discord.Embed(\r\n\t\t\ttitle = Res[num],\r\n\t\t\tdescription = Tags[Res]['data'],\r\n\t\t\tcolour = col\r\n\t\t)\r\n\t\tembed.set_footer(text='Last Edited {}'.format(Tags[Res]['time']))\r\n\t\tawait ctx.send(embed=embed)",
"def tagged(tag = ''):\n\tresults = queries.tagged(tag)\n\ttags = queries.tags()\n\treturn render_template('index.html', packages=results, tags=tags, currentFilter=tag)",
"def search_videos(self, search_term):\n all_videos = self._video_library.get_all_videos()\n all_videos.sort(key=lambda x: x.title)\n matching_videos = []\n for video in all_videos:\n if search_term.lower() in video.title.lower():\n matching_videos.append(video)\n\n matching_videos.sort(key=lambda x: x.title)\n\n if len(matching_videos) == 0:\n print(f\"No search results for {search_term}\")\n return\n\n print(\"Here are the results for cat:\")\n for i, matching_video in enumerate(matching_videos):\n print(f\"{i + 1}) {str(matching_video)}\")\n\n print(\n \"Would you like to play any of the above? If yes, specify the number of the video.\\nIf your answer is not a valid number, we will assume it's a no.\")\n video_number = input()\n\n # print(video_number)\n\n try:\n int_video_number = int(video_number)\n if int_video_number > len(matching_videos) or int_video_number < 0:\n return\n else:\n self.play_video(matching_videos[int_video_number - 1].video_id)\n except ValueError:\n return",
"def show_tags():\n\n tags = Tag.query.all()\n\n return render_template(\"tags/tag_list.html\", tags=tags)",
"def show_tags():\n tags = Tag.query.all()\n\n return render_template('tags/show_tags.html', tags=tags)",
"def tagged(request,slug):\n\n tag = get_object_or_404(Tag, slug=slug)\n books = Book.objects.filter(tags=tag)\n \n for book in books:\n book\n\n context = {\n 'tag':tag,\n 'books':books,\n }\n return render(request, 'favorite.html', context)",
"def latest_tagged_video(tag):\n if not isinstance(tag, Tag):\n try:\n tag = Tag.objects.get(text=tag)\n except Tag.DoesNotExist:\n return mark_safe('')\n video = first_or_none(Video.objects.filter(tags=tag)\n .order_by('-issue__issue_date'))\n if video:\n return mark_safe(video.key)\n return mark_safe('')",
"async def video(self, ctx, *, arg: str):\n await ctx.send(site + self.extraire(search + self.traduire(arg.split(' ')), watch_))",
"def get_videos(town):\n\n entries = get_town_videos(town)\n\n print entries\n\n if entries:\n return render_template('videos.html', videos=entries, town=town)\n else:\n flash('No se encontraron videos.')\n return render_template('videos.html', town=town)",
"def search_tag(self, tag):\n self.driver.get(self.tag_url.format(tag))",
"def index():\n seen = set()\n seen_add = seen.add\n videos = []\n all_videos = mythVideo.searchVideos(insertedafter = '1900-01-01 00:00:00')\n\n for video in all_videos:\n path = video.filename.split('/')[0]\n if path not in seen and not seen_add(path):\n video.url = url_for('.with_path', path=path)\n video.label = path\n videos.append(video)\n\n videos = sorted(videos, key = lambda video: video.label.lowercase())\n return render_template('list.html', items = videos, page_title = 'Videos')",
"def tags_show_page(tag_id):\n\n tags= Tag.query.get_or_404(tag_id)\n return render_template('tags/tags_show.html', tags=tags)",
"def parse_tags_for_video (self, video):\n tags = []\n for tag_key in dict(video['tags']).keys():\n if self._is_size_key(key=tag_key) == False and tag_key != 'summary':\n tags.append(video['tags'][tag_key]['name'])\n return tags",
"def search(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n results = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n results.append(Colors.YELLOW + video[\"file\"] + Colors.END + \" - \" + video[\"source\"] + \" - \" +\n video[\"title\"])\n if results:\n for result in results:\n safeprint(result)\n else:\n safeprint(\"No video matching the given query was found.\")",
"def get_videos_by_hashtag(self, hashtag, count = 30, page = 1):\n uri = 'hashtags/' + hashtag + '/videos'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)",
"def all_videos(request):\n\n videos = Video.objects.all()\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n videos = videos.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n videos = videos.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n videos = videos.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter search criteria\")\n return redirect(reverse('videos'))\n \n queries = Q(title__icontains=query) | Q(description__icontains=query)\n videos = videos.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'videos': videos,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'videos.html', context)",
"def video(title, hash = None, season = None, episode = None):\n if not hash:\n video = list(mythVideo.searchVideos(title = title, season = season, episode = episode))[0]\n else:\n video = [video for video in mythVideo.searchVideos(title = title) if video.hash == hash][0]\n\n return render_template('recording.html', item = video)",
"def show_tag(tag, page):\n per_page = current_app.config['POSTS_PER_PAGE']\n tag = Tag.query.filter_by(name=tag).first() or abort(404)\n posts = tag.posts.order_by(Post.id.desc())\n if not session.get('logged_in'): posts = posts.filter_by(visible=True)\n items = posts.limit(per_page).offset((page - 1) * per_page).all()\n pagination = Pagination(posts, page=page, per_page=per_page, \n total=posts.count(), items=items)\n flash(\"Posts tagged with '%s'\" % tag.name)\n return render_template('posts.html', pagination=pagination,\n endpoint_func=lambda x: url_for('main.show_tag', tag=tag.name, page=x))",
"def search_videos(self, search_term):\n recommendations = []\n for video in self.videos_dict:\n if not video.flagged and search_term in self.videos_dict[video]:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n\n if n == 0:\n print(f\"No search results for {search_term}\")\n else:\n print(f\"Here are the results for {search_term}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass",
"async def search(self, ctx: \"IceTeaContext\", *, query):\n response_list = await ctx.guild_data.search_tags(query)\n if len(response_list) > 0:\n response_message = \"\\n\".join([tag.title for tag in response_list])\n await ctx.send(f\"Found these tags:\\n{response_message}\")\n else:\n await ctx.send(\"No similar tags found\")",
"def filter_skill_vf(request, s_id=''):\n tag = request.REQUEST.get('tag', None)\n try:\n skill = Skill.objects.get(id=int(s_id))\n except Skill.DoesNotExist, Skill.MultipleObjectsReturned:\n skill = None\n if skill and tag:\n cvs = []\n for cv_sk in CVSkill.objects.filter(skill=skill):\n if cv_sk.desc:\n lst = map(unicode.strip, cv_sk.desc.lower().split(','))\n if tag in lst:\n cvs.append(cv_sk.cv)\n return render_to_response(\n \"cv/filter_skill.html\",\n {\n 'cv': get_cv(request),\n 'nav': \"cv\",\n 'skill': skill,\n 'tag': tag,\n 'cvs': cvs,\n },\n RequestContext(request))\n else:\n messages.error(request, 'Bad request')\n return redirect(\"erp_home\")"
]
| [
"0.8045308",
"0.7540733",
"0.7481872",
"0.73584425",
"0.68464065",
"0.67552704",
"0.63414866",
"0.61289597",
"0.6046852",
"0.59463876",
"0.5857648",
"0.58262694",
"0.5813439",
"0.58052194",
"0.57624304",
"0.57576835",
"0.57258624",
"0.5666013",
"0.5597602",
"0.5587654",
"0.5580796",
"0.5557333",
"0.54895484",
"0.5472987",
"0.54706436",
"0.5469777",
"0.54565084",
"0.5417121",
"0.53392833",
"0.5332115"
]
| 0.77634656 | 1 |
Get current active operational dataset in TLVS format, or None. | async def get_active_dataset_tlvs(self) -> bytes | None:
return await self.api.get_active_dataset_tlvs() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def async_get_active_dataset_tlvs(hass: HomeAssistant) -> bytes | None:\n if DOMAIN not in hass.data:\n raise HomeAssistantError(\"OTBR API not available\")\n\n data: OTBRData = hass.data[DOMAIN]\n return await data.get_active_dataset_tlvs()",
"def _get_data(self):\n c = Connector(self.host, self.username, self.password)\n return c.getLanDevices()",
"def getCurentData(self):\n if not self.labExperiment:\n super().getCurentData()\n else:\n return np.array(self.connection.query('get_actuator_data'))",
"def get_dataset(self) -> datasets.OpenMLDataset:\n return datasets.get_dataset(self.dataset_id)",
"def ata_smart_data(self) -> SmartSsdAtaSmartData:\n return self._ata_smart_data",
"def get_dataset(self):\n return",
"def get_sdata(self):\n payload = self.get('data_request?id=sdata&output_format=json')\n return payload",
"def GetSelData():\r\n sel = Data.GetSel()\r\n if sel[2] == 0:\r\n return None\r\n\r\n r, s = FileRead(sel[0], sel[2])\r\n return None if not r else s",
"def data_estagio(self):\n return self._data_estagio",
"def sapo_data(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sapo_data\")",
"def get_dataset(self):\n return datasets.get_dataset(self.dataset_id)",
"def getLSData(*args):\n return args[0].Data.LSData.ls_data",
"def fetch(self):\n return read_voltage()",
"def GetData(self):\r\n \r\n return self._data",
"def get_dataset(self):\n if self.mode == \"test\":\n return OnlineQueryDataset(self.mode, self.df, self.tokenizer)\n else:\n return OnlineQueryDataset(self.mode, self.df_reindex, self.tokenizer)",
"def getData(self):\n return self.data",
"def getData(self):\n return self.data",
"def data():\n return None",
"def get_dataset(self):\n\n self.dataset = pykitti.raw(self.kitti_dataset_path, self.kitti_date, self.kitti_drive, frames = range(self.start_index, self.end_index, 1))\n\n LOGGER.info('Drive: ' + str(self.dataset.drive))\n LOGGER.info('Frame range: ' + str(self.dataset.frames))",
"def dc_data(self):\n return self._dc_data",
"def get_data(self):\r\n return self.kinds",
"def getData(self):\r\n return self._data",
"def getLCData(*args):\n return args[0].Data.LCData.lc_data",
"def getTCData(*args):\n return args[0].Data.TCData.tc_data",
"def getData(self):\n return self._data",
"def getData(self):\n return self._data",
"def get_data(self):\n return self.data",
"def get_data(self):\n return self.data",
"def get_data(self):\n return self.data",
"def getData(self):\n return self.__data"
]
| [
"0.64140606",
"0.53788054",
"0.5368342",
"0.5340623",
"0.5304488",
"0.5285245",
"0.5271362",
"0.52568066",
"0.525625",
"0.52145404",
"0.5213808",
"0.5200595",
"0.51775974",
"0.51763356",
"0.514455",
"0.5142501",
"0.5142501",
"0.5139437",
"0.5129725",
"0.5113209",
"0.50932395",
"0.50579685",
"0.5054134",
"0.5045507",
"0.5043559",
"0.5043559",
"0.50377953",
"0.50377953",
"0.50377953",
"0.5030779"
]
| 0.65660226 | 0 |
Create an active operational dataset. | async def create_active_dataset(
self, dataset: python_otbr_api.OperationalDataSet
) -> None:
return await self.api.create_active_dataset(dataset) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_dataset():\n\n dataset_id = \"{}.airflow\".format(client.project)\n dataset = bigquery.Dataset(dataset_id)\n dataset.location = \"US\"\n dataset = client.create_dataset(dataset, exists_ok=True)\n print(\"Created dataset {}.{}\".format(client.project, dataset.dataset_id))\n return dataset",
"def _create_dataset(source=''):\n return ExperimentalDataset()",
"def create_dataset(store, dataset, author=None, initial_head='main'):\n dataset_path = store.get_dataset_path(dataset)\n if os.path.isdir(dataset_path):\n raise Exception('Dataset already exists')\n if not author:\n author = pygit2.Signature(COMMITTER_NAME, COMMITTER_EMAIL)\n repo = pygit2.init_repository(\n dataset_path, False, initial_head=initial_head)\n init_annex(dataset_path)\n # Setup .gitattributes to limit what files are annexed by default\n with open(os.path.join(dataset_path, '.gitattributes'), 'w') as gitattributes:\n gitattributes.write(GIT_ATTRIBUTES)\n repo.index.add('.gitattributes')\n # Set a datalad UUID\n create_datalad_config(dataset_path)\n repo.index.add('.datalad/config')\n git_commit(repo, ['.gitattributes', '.datalad/config'], author,\n '[OpenNeuro] Dataset created', parents=[])\n return repo.head.target.hex",
"def create_dataset(self, dataset: DatasetDB) -> DatasetDB:\n\n self._es.add_document(\n index=DATASETS_INDEX_NAME,\n doc_id=dataset.id,\n document=self._dataset_to_es_doc(dataset),\n )\n\n self._es.create_index(\n index=dataset_records_index(dataset.id),\n force_recreate=True,\n )\n return dataset",
"def create_dataset(opt):\n\tdata_loader = CustomDatasetDataLoader(opt)\n\tdataset = data_loader.load_data()\n\treturn dataset",
"def create(self, dataset_name, labels=None, driver=None, attributes=None, ontology_ids=None,\n checkout=False) -> entities.Dataset:\n # labels to list\n if labels is not None:\n if not all(isinstance(label, entities.Label) for label in labels):\n labels = entities.Dataset.serialize_labels(labels)\n else:\n labels = list()\n # get creator from token\n payload = {'name': dataset_name,\n 'projects': [self.project.id]}\n if driver is not None:\n payload['driver'] = driver\n success, response = self._client_api.gen_request(req_type='post',\n path='/datasets',\n json_req=payload)\n if success:\n dataset = entities.Dataset.from_json(client_api=self._client_api,\n _json=response.json(),\n datasets=self,\n project=self.project)\n # create ontology and recipe\n dataset = dataset.recipes.create(ontology_ids=ontology_ids, labels=labels, attributes=attributes).dataset\n # # patch recipe to dataset\n # dataset = self.update(dataset=dataset, system_metadata=True)\n else:\n raise exceptions.PlatformException(response)\n logger.info('Dataset was created successfully. Dataset id: {}'.format(dataset.id))\n assert isinstance(dataset, entities.Dataset)\n if checkout:\n self.checkout(dataset=dataset)\n return dataset",
"def create_dataset(self, dataset_name):\n url = self.prism_endpoint + \"/datasets\"\n\n headers = {\n \"Authorization\": \"Bearer \" + self.bearer_token,\n \"Content-Type\": \"application/json\",\n }\n\n data = {\"name\": dataset_name}\n\n r = requests.post(url, headers=headers, data=json.dumps(data))\n\n if r.status_code == 201:\n logging.info(\"Successfully created an empty API dataset\")\n return r.json()\n elif r.status_code == 400:\n logging.warning(r.json()[\"errors\"][0][\"error\"])\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))",
"def create_dataset(self, config, rng):\n raise NotImplementedError()",
"def create_dataset(opt):\n data_loader = CustomDatasetDataLoader(opt)\n dataset = data_loader.load_data()\n return dataset",
"def create_dataset(dataset_name, project=None):\n bigquery_client = bigquery.Client(project=project)\n\n dataset = bigquery_client.dataset(dataset_name)\n\n dataset.create()\n\n print('Created dataset {}.'.format(dataset_name))",
"def create(\n session: Session,\n instance: Instance,\n df: \"pd.DataFrame\",\n *,\n name: str,\n primary_key_name: Optional[str] = None,\n description: Optional[str] = None,\n external_id: Optional[str] = None,\n) -> Dataset:\n # preconditions\n if primary_key_name is None:\n if df.index.name is not None:\n primary_key_name = df.index.name\n else:\n raise primary_key.NotFound(\n \"No primary key was specified and DataFrame index is unnamed\"\n )\n _check_primary_key(df, primary_key_name)\n\n # dataset creation\n try:\n ds = dataset.create(\n session,\n instance,\n name=name,\n key_attribute_names=(primary_key_name,),\n description=description,\n external_id=external_id,\n )\n except (TamrClientException, requests.HTTPError) as e:\n raise CreationFailure(f\"Dataset was not created: {e}\")\n\n # attribute creation\n for col in df.columns:\n if col == primary_key_name:\n # this attribute already exists as a key attribute\n continue\n try:\n attribute.create(session, ds, name=col, is_nullable=True)\n except (TamrClientException, requests.HTTPError) as e:\n _handle_creation_failure(session, ds, f\"An attribute was not created: {e}\")\n\n # record creation\n try:\n response = upsert(session, ds, df, primary_key_name=primary_key_name)\n if not response[\"allCommandsSucceeded\"]:\n _handle_creation_failure(session, ds, \"Some records had validation errors\")\n except (TamrClientException, requests.HTTPError) as e:\n _handle_creation_failure(session, ds, f\"Record could not be created: {e}\")\n\n # Get Dataset from server\n return dataset._dataset._by_url(session, ds.url)",
"def create_new_dataset(self,\n dataset_dir, \n split,\n description=\"\",\n url=\"\",\n version=\"\",\n year=0,\n contributor=\"\",\n date_created=\"\",\n license_url=\"\",\n license_id=0,\n license_name=\"\"\n ):\n\n self.dataset_dir = os.path.abspath(dataset_dir)\n self.logger.info('Creating an empty COCO dataset at {}'.format(self.dataset_dir))\n assert self.annotation_file is None, \\\n \"COCO dataset is already initialized with the annotation file: {}\".format(self.annotation_file)\n \n ## Create the dataset directory\n self.imgs_dir = os.path.join(dataset_dir, split)\n os.makedirs(self.imgs_dir, exist_ok=True)\n anns_dir = os.path.join(dataset_dir, 'annotations')\n os.makedirs(anns_dir, exist_ok=True)\n\n self.annotation_file = os.path.join(anns_dir, \n \"instances_{}.json\".format(split)) \n ## Create class members\n self.catNameToId = {}\n self.pointclouds = {}\n self.imgToPc = {}\n self.dataset = {'annotations':[], 'images':[], 'categories':[], 'pointclouds':[]}\n self.dataset['info'] = {\n \"description\": description,\n \"url\": url,\n \"version\": version,\n \"year\": year,\n \"contributor\": contributor,\n \"date_created\": date_created}\n self.dataset['licenses'] = [{\n \"url\": license_url,\n \"id\": license_id,\n \"name\": license_name}]",
"def create_dataset(project, dataset_name):\n dataset = dataset_name\n get_dataset = project.datasets.get(dataset_name=dataset)\n project.datasets.create(dataset_name=dataset_name)\n \n return get_dataset",
"def create_target_dataset(self, target_datastore, layer_name, *args, **kwargs):\n return target_datastore.CreateLayer(layer_name, *args, **kwargs)",
"def test_create_simple(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (1,))\n assert dset.shape == (1,)",
"def create_data(storage, df, df_contains='xy', y_col_name=None, y_pred_col_name=None):\n return DataFactory.factories[storage].create(df, df_contains, y_col_name, y_pred_col_name)",
"def make_dataset(self, df, **kwargs):\n\t\treturn df",
"def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)",
"def test_create(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.require_dataset('foo', (10, 3), 'f')\n assert isinstance(dset, Dataset)\n assert dset.shape == (10, 3)\n\n dset2 = grp.require_dataset('bar', data=(3, 10))\n dset3 = grp.require_dataset('bar', data=(4, 11))\n assert isinstance(dset2, Dataset)\n assert np.all(dset2[:] == (3, 10))\n assert np.all(dset3[:] == (3, 10))\n assert dset2 == dset3",
"def create_dataset(fd, dobj, verbose=False, nodata=False):\n msg = \"creating dataset {}, shape: {}, type: {}\".format(dobj.name, dobj.shape, dobj.dtype)\n logging.info(msg)\n if verbose:\n print(msg) \n \n fillvalue = None\n try: \n # can trigger a runtime error if fillvalue is undefined\n fillvalue = dobj.fillvalue\n except RuntimeError:\n pass # ignore\n chunks=None\n if dobj.chunks:\n chunks = tuple(dobj.chunks)\n try:\n dset = fd.create_dataset( dobj.name, shape=dobj.shape, dtype=dobj.dtype, chunks=chunks, \\\n compression=dobj.compression, shuffle=dobj.shuffle, \\\n fletcher32=dobj.fletcher32, maxshape=dobj.maxshape, \\\n compression_opts=dobj.compression_opts, fillvalue=fillvalue, \\\n scaleoffset=dobj.scaleoffset)\n msg = \"dataset created, uuid: {}, chunk_size: {}\".format(dset.id.id, str(dset.chunks)) \n logging.info(msg)\n if verbose:\n print(msg)\n except (IOError, TypeError, KeyError) as e:\n msg = \"ERROR: failed to create dataset: {}\".format(str(e))\n logging.error(msg)\n print(msg)\n return\n # create attributes\n for da in dobj.attrs:\n copy_attribute(dset, da, dobj)\n\n if nodata:\n msg = \"skipping data load\"\n logging.info(msg)\n if verbose:\n print(msg)\n return\n\n if dset.shape is None:\n # null space dataset\n msg = \"no data for null space dataset: {}\".format(dobj.name)\n logging.info(msg)\n if verbose:\n print(msg)\n return # no data \n\n if len(dset.shape) == 0:\n # scalar dataset\n x = dobj[()]\n msg = \"writing: {} for scalar dataset: {}\".format(x, dobj.name)\n logging.info(msg)\n if verbose:\n print(msg)\n dset[()] = x\n return\n\n msg = \"iterating over chunks for {}\".format(dobj.name)\n logging.info(msg)\n if verbose:\n print(msg)\n try:\n it = ChunkIterator(dset)\n\n logging.debug(\"src dtype: {}\".format(dobj.dtype))\n logging.debug(\"des dtype: {}\".format(dset.dtype))\n \n for s in it:\n msg = \"writing dataset data for slice: {}\".format(s)\n logging.info(msg)\n if verbose:\n print(msg)\n arr = dobj[s]\n dset[s] = arr\n except (IOError, TypeError) as e:\n msg = \"ERROR : failed to copy dataset data : {}\".format(str(e))\n logging.error(msg)\n print(msg)\n msg = \"done with dataload for {}\".format(dobj.name)\n logging.info(msg)\n if verbose:\n print(msg)",
"def _create_dataset(self, node):\n self.dataset_node = node\n creating = self.control_service.create_dataset(\n primary=node.uuid)\n\n # Not sure about handling errors and timeout in the same errback.\n # How could I handle them differently?\n def handle_timeout_and_errors(failure):\n failure.trap(CancelledError)\n raise DatasetCreationTimeout()\n\n timeout(self.reactor, creating, self.timeout)\n\n creating.addErrback(handle_timeout_and_errors)\n return creating",
"def create_sandbox_dataset(project_id, dataset_id):\n sandbox_dataset_id = get_sandbox_dataset_id(dataset_id)\n friendly_name = f'Sandbox for {dataset_id}'\n description = f'Sandbox created for storing records affected by the cleaning rules applied to {dataset_id}'\n label_or_tag = {'label': '', 'tag': ''}\n create_dataset(project_id=project_id,\n dataset_id=sandbox_dataset_id,\n friendly_name=friendly_name,\n description=description,\n label_or_tag=label_or_tag,\n overwrite_existing=False)\n\n return sandbox_dataset_id",
"def dataset_create(self, name, description, ts):\n\n # Inputs check\n check_type(value=name, allowed_types=str, var_name=\"name\", raise_exception=True)\n check_type(value=description, allowed_types=str, var_name=\"description\", raise_exception=True)\n check_type(value=ts, allowed_types=list, var_name=\"ts\", raise_exception=True)\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.POST,\n template=TEMPLATES['dataset_create'],\n uri_params={\n 'data_set': name,\n },\n data={\n 'name': name,\n 'description': description,\n 'tsuidList': ','.join(ts),\n })\n\n if response.status_code == 409:\n raise IkatsConflictError(\"Dataset %s already exists in database\" % name)",
"def create_dataset(dataset_type, soruce, opts): \n\n p = PreProcessor(dataset_type, opts)\n\n # If we are NOT running \"implementation.py\", we read the data from file\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n path_to_data = soruce\n p.read_labelled_data(path_to_data) \n # Otherwise, we read the sentence that \"implementation.py\" gave us\n elif dataset_type == \"submit\":\n submission_sentence = soruce\n p.read_test_data(submission_sentence)\n\n # Encode all the data to a list of torchTensors\n encoded_tokens, encoded_pred, encoded_tokens_pos, encoded_labels = p.encode_all_data()\n # Create SRL dataset\n dataset = SRLDataset(x=encoded_tokens, pr=encoded_pred, p=encoded_tokens_pos, y=encoded_labels)\n print(\"{} dataset size is {}\".format(dataset_type, len(dataset)))\n\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n return dataset\n elif dataset_type == \"submit\":\n return dataset, p.list_l_original_predicates",
"def dataset_create_new_cli(self,\r\n folder=None,\r\n public=False,\r\n quiet=False,\r\n convert_to_csv=True,\r\n dir_mode='skip'):\r\n folder = folder or os.getcwd()\r\n result = self.dataset_create_new(folder, public, quiet, convert_to_csv,\r\n dir_mode)\r\n if result.invalidTags:\r\n print('The following are not valid tags and could not be added to '\r\n 'the dataset: ' + str(result.invalidTags))\r\n if result.status.lower() == 'ok':\r\n if public:\r\n print('Your public Dataset is being created. Please check '\r\n 'progress at ' + result.url)\r\n else:\r\n print('Your private Dataset is being created. Please check '\r\n 'progress at ' + result.url)\r\n else:\r\n print('Dataset creation error: ' + result.error)",
"def create_dataset(client: DatasetClient, name: str, props: dict,\n dataset_type: str, override: bool = True):\n if override:\n response = client.datasets.list()\n datasets = {r.name: r.dataset_id for r in response}\n if name in datasets:\n client.datasets.delete(datasets[name])\n response = client.datasets.create(name, dataset_type, props=props)\n dataset_id = response.dataset_id\n return dataset_id",
"def test_create_extended_data(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n data = np.ones((63,), 'f')\n dset = grp.create_dataset('foo', data=data)\n assert dset.shape == data.shape",
"def datasetcreate(self, dataset_path='datasets', class_name='Demo',\n haarcascade_path='haarcascade/haarcascade_frontalface_default.xml',\n eyecascade_path='haarcascade/haarcascade_eye.xml', eye_detect=False,\n save_face_only=True, no_of_samples=100,\n width=128, height=128, color_mode=False):\n obj = dc.DatasetCreate(dataset_path=dataset_path, class_name=class_name,\n haarcascade_path=haarcascade_path,\n eyecascade_path=eyecascade_path, eye_detect=eye_detect,\n save_face_only=save_face_only, no_of_samples=no_of_samples,\n width=width, height=height, color_mode=color_mode)\n obj.create()",
"def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test",
"def create_forecast_dataset(self):\n pass"
]
| [
"0.7311756",
"0.6959166",
"0.6920446",
"0.67189634",
"0.67153424",
"0.6510759",
"0.6500872",
"0.64714384",
"0.6467696",
"0.64092255",
"0.64085776",
"0.6398017",
"0.6389895",
"0.6304472",
"0.62450254",
"0.623023",
"0.62095875",
"0.6189691",
"0.6167886",
"0.6153979",
"0.6152047",
"0.6149982",
"0.6146309",
"0.6139527",
"0.6125402",
"0.6104818",
"0.6096153",
"0.6079347",
"0.60766786",
"0.6062172"
]
| 0.7764948 | 0 |
Warn user if insecure default network settings are used. | def _warn_on_default_network_settings(
hass: HomeAssistant, entry: ConfigEntry, dataset_tlvs: bytes
) -> None:
dataset = tlv_parser.parse_tlv(dataset_tlvs.hex())
insecure = False
if (
network_key := dataset.get(tlv_parser.MeshcopTLVType.NETWORKKEY)
) is not None and bytes.fromhex(network_key) in INSECURE_NETWORK_KEYS:
insecure = True
if (
not insecure
and tlv_parser.MeshcopTLVType.EXTPANID in dataset
and tlv_parser.MeshcopTLVType.NETWORKNAME in dataset
and tlv_parser.MeshcopTLVType.PSKC in dataset
):
ext_pan_id = dataset[tlv_parser.MeshcopTLVType.EXTPANID]
network_name = dataset[tlv_parser.MeshcopTLVType.NETWORKNAME]
pskc = bytes.fromhex(dataset[tlv_parser.MeshcopTLVType.PSKC])
for passphrase in INSECURE_PASSPHRASES:
if pskc == compute_pskc(ext_pan_id, network_name, passphrase):
insecure = True
break
if insecure:
ir.async_create_issue(
hass,
DOMAIN,
f"insecure_thread_network_{entry.entry_id}",
is_fixable=False,
is_persistent=False,
severity=ir.IssueSeverity.WARNING,
translation_key="insecure_thread_network",
)
else:
ir.async_delete_issue(
hass,
DOMAIN,
f"insecure_thread_network_{entry.entry_id}",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insecure(self) -> bool:\n return self._insecure",
"def set_insecure(self, bool_value=True):\n self.insecure = bool_value\n self._geturl.insecure = bool_value",
"def insecure(self, insecure: bool):\n\n self._insecure = insecure",
"def no_network_access_check(user):\n return not user.has_property(\"network_access\")",
"def DisableSSLVerify():\n\n\t\ttry:\n\t\t\trequests.packages.urllib3.disable_warnings()\n\t\texcept:\n\t\t\tpass",
"def filter_nossl(request):\n if request.scheme == 'http':\n return True\n else:\n return False",
"def turn_off_internet(verbose=False):\n __tracebackhide__ = True\n if verbose:\n print(\"Internet access disabled\")\n\n socket.create_connection = check_internet_off(socket_create_connection)\n socket.socket.bind = check_internet_off(socket_bind)\n socket.socket.connect = check_internet_off(socket_connect)\n\n return socket",
"def internet_advertising_disabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"internet_advertising_disabled\")",
"def internet_advertising_disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"internet_advertising_disabled\")",
"def internet_advertising_disabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"internet_advertising_disabled\")",
"def firewallOff():\n pass",
"def insecure_ssl(self):\n # type: () -> bool\n return self._insecure_ssl",
"def allow_ip_sans(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_ip_sans\")",
"def allow_ip_sans(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_ip_sans\")",
"def miss_ssl(ctx,dns_type):\n # if not dns_type:\n if(str(dns_type)==\"1\"):\n try:\n op_cf()\n except JumpOutFuckingClick2:\n click.echo(\"<_@,@_<2\")\n if(str(dns_type)==\"2\"):\n try:\n op_ali()\n except JumpOutFuckingClick2:\n click.echo(\"<_@,@_<2\")\n raise JumpOutFuckingClick",
"def check_secure():\n return get_config_handler().check_secure()",
"def allow_ip_sans(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_ip_sans\")",
"def allow_v_net_override(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_v_net_override\")",
"def check_internet_scheme(self, elb_item):\n scheme = elb_item.config.get('scheme', None)\n if scheme and scheme == u\"internet-facing\":\n self.add_issue(1, 'ELB is Internet accessible.', elb_item)",
"def insecure_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"insecure_tls\")",
"def insecure_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"insecure_tls\")",
"def allow_v_net_override(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_v_net_override\")",
"def has_internet() -> bool:\n if public_address():\n return True\n else:\n return False",
"def silence_requests_warnings():\n requests.packages.urllib3.disable_warnings()",
"def test_ssl_default(self):\n assert security.security_settings.ssl_verify()",
"def check_uri_security(uri):\n if urlparse(uri).scheme != 'https':\n warning_message = (\n 'WARNING: this client is sending a request to an insecure'\n ' API endpoint. Any API request you make may expose your API key and'\n ' secret to third parties. Consider using the default endpoint:\\n\\n'\n ' {}\\n'.format(uri))\n warnings.warn(warning_message, UserWarning)\n return uri",
"def get_use_secure_connection_from_user(self) -> bool:\n use_secure_conn = input(\n 'Do you wish to use a secure connection (TLS) ' +\n 'to communicate with on-board? (y/n):'\n )\n\n if use_secure_conn.lower() == 'n':\n return False\n else:\n return True",
"def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2",
"def fingertip_no_remote(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_remote\", False)",
"def checkSettings(self):\n client.checkSettings(self)\n # TODO: Check your settings. Example:\n #\n # if self.postFixParams and len(self.postFixParams) > self.protocolVersion:\n # raise Exception( \"You really don't know how this client works, do you? ... Do I, actually?\" )"
]
| [
"0.6017677",
"0.6015723",
"0.591362",
"0.58963645",
"0.5826089",
"0.580664",
"0.5791929",
"0.5757958",
"0.5702233",
"0.5702233",
"0.5658171",
"0.5645198",
"0.5638157",
"0.5638157",
"0.5621047",
"0.5604798",
"0.55833834",
"0.55188936",
"0.5460819",
"0.5445649",
"0.5445649",
"0.54336154",
"0.54185724",
"0.541758",
"0.5379929",
"0.53645015",
"0.53613305",
"0.5358017",
"0.53247815",
"0.53154296"
]
| 0.79430103 | 0 |
Unload a config entry. | async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
hass.data.pop(DOMAIN)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def async_unload_entry(hass, config_entry):\n unload_ok = await hass.config_entries.async_forward_entry_unload(\n config_entry, \"climate\"\n )\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)\n\n return unload_ok",
"async def test_unload_config_entry(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n entry = await init_integration(hass, aioclient_mock)\n assert hass.data[DOMAIN]\n\n await hass.config_entries.async_unload(entry.entry_id)\n await hass.async_block_till_done()\n assert not hass.data.get(DOMAIN)",
"async def async_unload_entry(hass, config_entry):\n hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)\n\n remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(\n config_entry.entry_id)\n remove_listener()\n\n await hass.config_entries.async_forward_entry_unload(\n config_entry, 'sensor')\n\n return True",
"async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n unload_ok = bool(\n await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS)\n )\n if unload_ok:\n hass.data[DOMAIN].pop(config_entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry):\n unload_ok = await hass.config_entries.async_unload_platforms(\n config_entry, PLATFORMS\n )\n if unload_ok:\n shell = hass.data[DOMAIN]\n shell.remove_entry(config_entry)\n if shell.is_idle():\n # also remove shell if not used by any entry any more\n del hass.data[DOMAIN]\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(\n config_entry, PLATFORMS\n ):\n hass.data[DOMAIN].pop(config_entry.entry_id)\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n del hass.data[DOMAIN][entry.entry_id]\n if not hass.data[DOMAIN]:\n del hass.data[DOMAIN]\n _LOGGER.debug(\"Unloaded entry for %s\", entry.title)\n return True\n return False",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok",
"async def test_unload_entry(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n unique_id=\"0123456789\",\n data=DEFAULT_CONFIG,\n options=DEFAULT_OPTIONS,\n )\n entry.add_to_hass(hass)\n\n await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n assert await hass.config_entries.async_unload(entry.entry_id)\n assert not hass.data[DOMAIN]",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n\n _LOGGER.debug(\"Unload entry\")\n unloaded = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, platform)\n for platform in PLATFORMS\n ]\n )\n )\n if unloaded:\n coordinator = hass.data[DOMAIN].pop(entry.entry_id)\n coordinator.unsub()\n\n return True # unloaded",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in AUGUST_COMPONENTS\n ]\n )\n )\n\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok",
"async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n if not hass.data[DOMAIN]:\n hass.data.pop(DOMAIN)\n return unload_ok"
]
| [
"0.697284",
"0.6779855",
"0.6747459",
"0.6689002",
"0.6657831",
"0.66162205",
"0.6603433",
"0.65925974",
"0.65595686",
"0.65411645",
"0.6507643",
"0.6507643",
"0.6507643",
"0.6507643",
"0.64977276",
"0.64931643",
"0.6486601",
"0.6486601",
"0.6486601",
"0.6486601",
"0.6469397",
"0.6469397",
"0.6462678",
"0.6459733",
"0.6459733",
"0.6459733",
"0.6459733",
"0.6459733",
"0.6459733",
"0.64596915"
]
| 0.6888074 | 1 |
Get current active operational dataset in TLVS format, or None. Returns None if there is no active operational dataset. Raises if the http status is 400 or higher or if the response is invalid. | async def async_get_active_dataset_tlvs(hass: HomeAssistant) -> bytes | None:
if DOMAIN not in hass.data:
raise HomeAssistantError("OTBR API not available")
data: OTBRData = hass.data[DOMAIN]
return await data.get_active_dataset_tlvs() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def get_active_dataset_tlvs(self) -> bytes | None:\n return await self.api.get_active_dataset_tlvs()",
"def _get_data(self):\n response = self._get_raw_data()\n if response is None:\n # error has already been logged\n return None\n\n if response.startswith('ERROR'):\n self.error(\"received ERROR\")\n return None\n\n try:\n parsed = response.split(\"\\n\")\n except AttributeError:\n self.error(\"response is invalid/empty\")\n return None\n\n # split the response\n data = {}\n for line in parsed:\n if line.startswith('STAT'):\n try:\n t = line[5:].split(' ')\n data[t[0]] = t[1]\n except (IndexError, ValueError):\n self.debug(\"invalid line received: \" + str(line))\n pass\n\n if len(data) == 0:\n self.error(\"received data doesn't have any records\")\n return None\n\n # custom calculations\n try:\n data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])\n data['used'] = int(data['bytes'])\n except:\n pass\n\n return data",
"def get_sdata(self):\n payload = self.get('data_request?id=sdata&output_format=json')\n return payload",
"def get_adl(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.ADL(data)\n if result is None:\n raise IndicatorException\n return result",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def system_data(self) -> 'outputs.SystemDataResponse':\n return pulumi.get(self, \"system_data\")",
"def get():\n\n logger.debug('Catch GET request by URL /api/departments.')\n departments = ds.get_all()\n return marshal_departments(departments)",
"def _read_status(self):\n results = self.status_table.query_items({'api_version': self.api_version})\n if not results:\n return None\n else:\n return results[0]"
]
| [
"0.6278388",
"0.51822865",
"0.50992644",
"0.50317097",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49691647",
"0.49371913",
"0.48981184"
]
| 0.6420956 | 0 |
This function is the_cats_ass. That's an overstatement. This is an understatement. See what I did there? What _you_ can do here is save the sys.ENVIRONMENT by reducing printed waste. Mew. | def the_cats_ass():
return __cat_whisperer()[Cat.ASS] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tuxedo_cat():\n return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]",
"def env_cleanup(self):\n pass",
"def _create_extra_environment(self):\n return {}",
"def calico_kitty():\n return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]",
"def _get_environment(cls):\n return cls.__name__.lower()",
"def cooked_mode(self) -> ContextManager[None]:",
"def _finalize_env(self, env: Dict[str, str]) -> None:\n\n # add the applicable kernel_id and language to the env dict\n env['KERNEL_ID'] = self.kernel_id\n\n kernel_language = 'unknown-kernel-language'\n if len(self.kernel_spec.language) > 0:\n kernel_language = self.kernel_spec.language.lower()\n # if already set in env: stanza, let that override.\n env['KERNEL_LANGUAGE'] = env.get('KERNEL_LANGUAGE', kernel_language)\n\n # Remove any potential sensitive (e.g., passwords) or annoying values (e.g., LG_COLORS)\n for k in env_pop_list:\n env.pop(k, None)",
"def environment(request):\n context = {\n 'COMPRESS_ENABLED': settings.COMPRESS_ENABLED,\n 'GOOGLE_ANALYTICS_CODE': getattr(settings, 'GOOGLE_ANALYTICS_CODE', None),\n 'GOOGLE_ANALYTICS_ADDRESS': getattr(settings, 'GOOGLE_ANALYTICS_ADDRESS', None),\n }\n return context",
"def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0",
"def B():\n set_env()",
"def get_contexts(self):\n return ['Hi!', '']",
"def test_get_environment_string(self):\n pass",
"def getEnvironment(self):\n pass",
"def __cat_whisperer(**kwargs):\n from inspect import currentframe\n frames = []\n frame = currentframe()\n while frame is not None:\n frame = frame.f_back\n try:\n c_frame = frame.f_locals.copy()\n co_name = frame.f_code.co_name\n except AttributeError:\n break\n else:\n frames.append(\n PrettyKitty(co_name, {k: v for k, v in c_frame.items()\n if not any([k.startswith('_'), callable(v)])}, **kwargs))\n return frames",
"def makePretty(env):\n # Env = env.astype(str)\n Env = copy.deepcopy(env)\n\n for i in range(len(Env)):\n for j in range(len(Env[i])):\n if int(Env[i][j]) == C.Pit:\n Env[i][j] = bcolors.FAIL + 'P' + bcolors.ENDC\n elif int(Env[i][j]) == C.Wumpus:\n Env[i][j] = bcolors.BLUE + 'W' + bcolors.ENDC\n elif int(Env[i][j]) == C.Gold:\n Env[i][j] = bcolors.YELLOW + 'G' + bcolors.ENDC\n elif int(Env[i][j]) == C.Agent:\n Env[i][j] = bcolors.GREEN + bcolors.BOLD + 'A' + bcolors.ENDC\n\n return Env",
"def SetupEnvironment(self):\n pass",
"def print_environment(self):\n for row in self._environment:\n row_str = str(row)\n row_str = row_str.replace(\"1\", WALL_CHAR) # replace the wall character\n row_str = row_str.replace(\"0\", SPACE_CHAR) # replace the space character\n row_str = row_str.replace(\"2\", HERO_CHAR) # replace the hero character\n row_str = row_str.replace(\"3\", GOBLIN_CHAR) # replace the goblin character\n row_str = row_str.replace(\"4\", MONSTER_CHAR) # replace the monster character\n\n print(\"\".join(row_str))",
"def __MakeEnvironment(self):\n environment= os.environ.copy()\n\n for key, value in self.__context.items():\n if type(value) is str:\n name = \"QMV_\" + key.replace(\".\", \"__\")\n environment[name]= value\n\n return environment",
"def determine_used_cats(obs_mode, cat_dict):\n if obs_mode in ['imaging', 'pom', 'ami', 'coron']:\n possible_cats = [cat_dict[entry] for entry in IMAGING_ALLOWED_CATALOGS]\n elif obs_mode == 'wfss':\n possible_cats = [cat_dict[entry] for entry in WFSS_ALLOWED_CATALOGS]\n elif obs_mode == 'ts_imaging':\n possible_cats = [cat_dict[entry] for entry in TS_IMAGING_ALLOWED_CATALOGS]\n elif obs_mode == 'ts_grism':\n possible_cats = [cat_dict[entry] for entry in TS_GRISM_ALLOWED_CATALOGS]\n\n # Remove any catalogs that are set to None\n cats = [ele for ele in possible_cats if str(ele).lower() != 'none']\n return cats",
"def codrspace_contexts(request):\n contexts = {}\n contexts.update({\n 'TAGLINE': \"Why you no write tutorial?\"\n })\n\n return contexts",
"def environment_variables(self, alias):\n raise NotImplementedError",
"def color_sample():\r\n env = dict()\r\n setup_quiet_build(env)\r\n for item in env.iteritems():\r\n print item[0],item[1]",
"def default_environment():\n return dict(_VARS)",
"def initFromEnv(self):\n #self.command = 'scram' # SB I think this line is not needed\n self[\"SCRAM_ARCH\"] = None\n\n if 'SCRAM_ARCH' in os.environ:\n self[\"SCRAM_ARCH\"] = os.environ[\"SCRAM_ARCH\"]\n else:\n stdout, _, _ = execute_command(command='scram arch')\n self[\"SCRAM_ARCH\"] = stdout\n\n try:\n self[\"CMSSW_BASE\"] = os.environ[\"CMSSW_BASE\"]\n self[\"CMSSW_VERSION\"] = os.environ[\"CMSSW_VERSION\"]\n# Commenting these two out. I don't think they are really needed\n# self.cmsswReleaseBase = os.environ[\"CMSSW_RELEASE_BASE\"]\n# self.localRT = os.environ[\"LOCALRT\"]\n except KeyError as ke:\n self[\"CMSSW_BASE\"] = None\n self[\"CMSSW_VERSION\"] = None\n# self.cmsswReleaseBase = None\n# self.localRT = None\n msg = \"Please make sure you have setup the CMS enviroment (cmsenv). Cannot find %s in your env\" % str(ke)\n msg += \"\\nPlease refer to https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial#CMS_environment for how to setup the CMS enviroment.\"\n raise EnvironmentException(msg)",
"def ugly():\n\n global _pretty\n _pretty = False",
"def wrapper_environment(args):\n\n return {\n ENVIRONMENT_KEY: json.dumps({\n 'verbose': args.verbose,\n 'cc': shlex.split(args.cc),\n 'cxx': shlex.split(args.cxx)\n })\n }",
"def warmup():\n\treturn ''",
"def warmup():\n\treturn ''",
"def _transform_env(self) -> None:\n self.env = None if self.env == {} else self.env",
"def __attrs_post_init__(self):\n self.path = (Path(CONFIG['conda_folder']) / 'envs' / self.name)"
]
| [
"0.5378321",
"0.5314823",
"0.51957476",
"0.5176816",
"0.50836897",
"0.5041659",
"0.50129634",
"0.5007439",
"0.49973828",
"0.49863762",
"0.4961463",
"0.4959659",
"0.49336305",
"0.492728",
"0.48846954",
"0.48724553",
"0.48644373",
"0.48611596",
"0.48404202",
"0.48205215",
"0.48087224",
"0.4804478",
"0.48031053",
"0.48003727",
"0.480001",
"0.4790801",
"0.4787212",
"0.4787212",
"0.47858024",
"0.47766227"
]
| 0.616136 | 0 |
You really shouldn't be poking cats. But if you insist, it is recommended to bring catnip as it's not unusual for cats to attack dicks who poke them. | def poke_the_cat(where, catnip=False):
if not catnip:
from random import randint
class BadCat(InterruptedError):
pass
if randint(1, 10) == 7:
mew = "You attempt to poke the cat but it attacks. " \
"Maybe if you gave it some catnip?"
raise BadCat(mew)
return __cat_whisperer()[where] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def schrodingers_cat(peek=False):\n from random import choice, randint\n if peek:\n if randint(1, 10) % 2 == 0:\n # RIP\n return \"Nothing at all\"\n else:\n return poke_the_cat(Cat.LEGS, catnip=True)\n else:\n garbled_cries = \"mew meow wokka beocat ekkie\".split()\n return choice(garbled_cries)",
"def cat_command(update: Update, context: CallbackContext) -> None:\n try:\n r = randint(0, 100000000)\n context.bot.send_photo(chat_id=update.effective_chat.id, photo=f\"https://cataas.com/cat?_nocache={r}\")\n except telegram.error.BadRequest:\n update.message.reply_text(BAD_REQUEST)",
"def calico_kitty():\n return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]",
"def cat(var, wrapper, message):\n wrapper.send(messages[\"cat_toss\"].format(wrapper.source), messages[\"cat_land\"].format(), sep=\"\\n\")",
"async def cat(self, interaction: Interaction, animal: Literal['cat', 'dog', 'fox', 'duck']):\n await post_animal_command(interaction, animal)",
"def DoMaliciousThings():\r\n\tprint(\"You are infected\")",
"def the_cats_ass():\n return __cat_whisperer()[Cat.ASS]",
"def cointoss(self, mess, args):\n return random.choice(['heads', 'tails'])",
"async def cat(self, ctx: Message):\n\t\timage_url = requests.get(\"https://api.thecatapi.com/v1/images/search\", headers={\"x-api-key\": \"37b77c23-9000-46c8-b808-a224a26f2d2a\"}).json()[0][\"url\"]\n\t\tawait self.send(image_url, whisper=[ctx.author.id])",
"def brute_force_cow_transport(cows,limit=10):\n # TODO: Your code here\n pass",
"def substantiate():",
"def dogcat():\n return render_template(\n 'dogcat.html',\n title='dogcat',\n year=datetime.now().year,\n message='Your contact page.'\n )",
"async def category(self,ctx):\n await ctx.send(\"Yes this is a category.\")",
"def animal_eats(self):\n self.update_fodder()\n self.herbivore_eats()\n self.carnivore_eats()",
"async def cat(self, ctx):\n async with ctx.session.get('https://api.thecatapi.com/v1/images/search') as resp:\n if resp.status != 200:\n return await ctx.send('No cat found :(')\n js = await resp.json()\n await ctx.send(embed=discord.Embed(title='Random Cat').set_image(url=js[0]['url']))",
"def tuxedo_cat():\n return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]",
"async def random_cat_text(self):\r\n data_response = await self.http.endpoint(\"cat\")\r\n return result.TextResult(data_response, target=\"cat\")",
"def top_cat_command(update: Update, context: CallbackContext) -> None:\n context.bot.send_photo(chat_id=update.effective_chat.id,\n photo=get_random_top_cat_photo(),\n caption=get_random_top_cat_text())",
"def cat_turn():\r\n\tglobal men\r\n\tl = [bat, pounce, legkick]\r\n\tx = randint(0, 3)\r\n\tif men >= 85 and x == 3:\r\n\t\tx = randint(0,2)\r\n\tif x != 3 and men - l[x][5] >= 0:\r\n\t\treturn cat.hit(*l[x])\r\n\telse:\r\n\t\tmen += cat.sleep(*csleep)\r\n\t\treturn 0",
"def attack_command(s, atck_type=None, victim_ip=None):\n global BOT_STATES\n global PORT\n valid_attack = {\n '0' : 'spread worm',\n '1' : 'syn flood'\n }\n\n # Validate user input options\n if not atck_type or not victim_ip:\n print(\"Invalid input. Enter 'help for options.\")\n return\n if atck_type not in valid_attack:\n print(\"Invalid attack type. Enter 'help' for options.\")\n return\n try:\n socket.inet_aton(victim_ip)\n except socket.error as error:\n print(\"Invalid IPv4 address. Reason: {}\".format(error))\n return\n\n logger.info(\"Starting {} on {}\".format(valid_attack[atck_type], victim_ip))\n\n for addr in BOT_STATES.keys():\n if BOT_STATES[addr] == 1:\n logger.info(\"\\tCommand sent to {}\".format(addr))\n s.sendto(\"ATCK::{}::{}\".format(atck_type, victim_ip), (addr, PORT))\n BOT_STATES[addr] = 2",
"def test_cat_basic(self):\n\n utils.compare_tracing_methods(\n SimpleCatModule(0, 1, 2),\n torch.randn(2, 3, 4),\n torch.randn(2, 3, 4),\n torch.randn(2, 3, 4),\n fusible_ops={\"prim::FusedConcat\"},\n )",
"def funny_command(update: Update, context: CallbackContext) -> None:\n try:\n r = randint(0, 100000000)\n context.bot.send_photo(chat_id=update.effective_chat.id,\n photo=f\"https://cataas.com/cat/cute?_nocache={r}\",\n caption=get_random_story(intro=choice([4, 4, 4, 8, 11])))\n except telegram.error.BadRequest:\n update.message.reply_text(BAD_REQUEST)",
"def is_cooling(action_data):\n return (action_data == COOLING_ACTION) | (action_data == TWO_STAGE_COOLING_ACTION)",
"def cute_command(update: Update, context: CallbackContext) -> None:\n try:\n r = randint(0, 100000000)\n context.bot.send_photo(chat_id=update.effective_chat.id, photo=f\"https://cataas.com/cat/cute?_nocache={r}\")\n except telegram.error.BadRequest:\n update.message.reply_text(BAD_REQUEST)",
"async def pizza(ctx):\r\n author = ctx.message.author\r\n await ctx.send(author.mention + \" has eaten \" + str(randint(2, 120)) + \" slices of pizza today.\")\r\n ctx.counter(n)",
"async def funnypts(message, client, extra_args):\n\n await message.channel.send(\"CHECK WHAT YOU CAN DO WITH `help funnypts`\")",
"def control_ai(current, cat, dogs, goal, field):\n return _cat_move",
"def keyword_classifier(utterance):\n categories = {\n 'hello': ['hi ', 'greetings', 'hello', 'what\\'s up', 'hey ', 'how are you?', 'good morning', 'good night',\n 'good evening', 'good day', 'howdy', 'hi-ya', 'hey ya'],\n 'bye': ['bye', 'cheerio', 'adios', 'sayonara', 'peace out', 'see ya', 'see you', 'c ya', 'c you', 'ciao'],\n 'ack': ['okay', 'whatever', 'ok ', 'o.k. ', 'kay ', 'fine '],\n 'confirm': ['is it', 'is that', 'make sure', 'confirm', 'double check', 'check again', 'does it'],\n 'deny': ['dont want', 'don\\'t want', 'wrong', 'dont like', 'don\\'t like'],\n 'inform': ['dont care', 'don\\'t care', 'whatever', 'bakery', 'bar', 'cafe', 'coffeeshop', 'pub', 'restaurants',\n 'roadhouse', 'african',\n 'american', 'arabian', 'asian', 'international', 'european', 'central american', 'middle eastern',\n 'world', 'vegan', 'vegetarian', 'free', 'kosher', 'traditional', 'fusion', 'modern', 'afghan',\n 'algerian', 'angolan', 'argentine',\n 'austrian', 'australian', 'bangladeshi', 'belarusian', 'belgian', 'bolivian', 'bosnian',\n 'herzegovinian', 'brazilian', 'british', 'bulgarian', 'cambodian',\n 'cameroonian', 'canadian', 'cantonese', 'catalan', 'caribbean', 'chadian', 'chinese', 'colombian',\n 'costa rican', 'czech', 'congolese', 'cuban', 'danish', 'ecuadorian', 'salvadoran', 'emirati',\n 'english', 'eritrean',\n 'estonian',\n 'ethiopian', 'finnish', 'french', 'german', 'ghanaian', 'greek', 'guatemalan', 'dutch', 'honduran',\n 'hungarian', 'icelandic',\n 'indian', 'indonesian', 'iranian', 'iraqi', 'irish', 'israeli', 'italian', 'ivorian', 'jamaican',\n 'japanese',\n 'jordanian', 'kazakh', 'kenyan', 'korean', 'lao', 'latvian', 'lebanese', 'libyan', 'lithuanian',\n 'malagasy', 'malaysian',\n 'mali', 'mauritanian', 'mediterranean', 'mexican', 'moroccan', 'namibian', 'new zealand',\n 'nicaraguan',\n 'nigerien', 'nigerian', 'norwegian', 'omani', 'pakistani', 'panamanian', 'paraguayan', 'peruvian',\n 'persian', 'philippine', 'polynesian', 'polish', 'portuguese', 'romanian', 'russian', 'scottish',\n 'senegalese', 'serbian',\n 'singaporean', 'slovak', 'somalian', 'spanish', 'sudanese', 'swedish', 'swiss', 'syrian', 'thai',\n 'tunisian', 'turkish',\n 'ukranian', 'uruguayan', 'vietnamese', 'welsh', 'zambian', 'zimbabwean', 'west', 'north', 'south',\n 'east', 'part of town', 'moderate', 'expensive', 'cheap', 'any ', 'priced', 'barbecue', 'burger',\n 'chicken',\n 'doughnut', 'fast food',\n 'fish and chips', 'hamburger', 'hot dog', 'ice cream', 'noodles', 'pasta', 'pancake', 'pizza',\n 'ramen', 'restaurant', 'seafood', 'steak',\n 'sandwich', 'sushi'],\n 'negate': ['no ', 'false', 'nope'],\n 'repeat': ['repeat', 'say again', 'what was that'],\n 'reqalts': ['how about', 'what about', 'anything else'],\n 'reqmore': ['more', 'additional information'],\n 'request': ['what', 'whats' 'what\\'s', 'why', 'where', 'when', 'how much', 'may', 'address', 'post code',\n 'location', 'phone number'],\n 'restart': ['reset', 'start over', 'restart'],\n 'thankyou': ['thank you', 'cheers', 'thanks'],\n 'affirm': ['ye ', 'yes', 'right ']\n }\n\n classification = []\n sentence_to_classify = utterance.lower()\n for category, keywords in categories.items():\n keywords_found = [keyword for keyword in keywords if keyword in sentence_to_classify]\n if len(keywords_found) > 0: classification.append(category)\n\n return classification if len(classification) > 0 else ['null']",
"def testEchoCatCat(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('echo hi | cat | cat')\n self.assertEqual(['hi'], pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)",
"def greedy_cow_transport(cows,limit=10):\n # TODO: Your code here\n pass"
]
| [
"0.61793715",
"0.5548466",
"0.52614313",
"0.5206707",
"0.5189093",
"0.5179284",
"0.5168439",
"0.51067805",
"0.51002204",
"0.5057231",
"0.5049274",
"0.4982071",
"0.49557567",
"0.4942977",
"0.4941298",
"0.48946017",
"0.48391628",
"0.48205256",
"0.47957787",
"0.47827622",
"0.47161993",
"0.4685319",
"0.46787497",
"0.46659744",
"0.46504727",
"0.46502528",
"0.464977",
"0.46423498",
"0.46397996",
"0.4636374"
]
| 0.66019756 | 0 |
Peek in the box for a 50/50 shot of retrieving your desired output, while the other half of the time the cat is dead and the function returns nothing at all. If you decide not to peek, the cat being neither dead nor alive responds with random nonsense. | def schrodingers_cat(peek=False):
from random import choice, randint
if peek:
if randint(1, 10) % 2 == 0:
# RIP
return "Nothing at all"
else:
return poke_the_cat(Cat.LEGS, catnip=True)
else:
garbled_cries = "mew meow wokka beocat ekkie".split()
return choice(garbled_cries) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_peek_shows_value_of_current_tail(dq_3):\n assert dq_3.peek() == 'ragtime'",
"def poke_the_cat(where, catnip=False):\n if not catnip:\n from random import randint\n\n class BadCat(InterruptedError):\n pass\n\n if randint(1, 10) == 7:\n mew = \"You attempt to poke the cat but it attacks. \" \\\n \"Maybe if you gave it some catnip?\"\n raise BadCat(mew)\n\n return __cat_whisperer()[where]",
"def main():\n deli = troll_fight()\n if not empty_stack(deli):\n pick_berries(deli)\n else:\n print(\"The Troll has defeated the Goats! /sadface\")",
"def test_peek(self):\n server, client = loopback()\n server.send(b\"xy\")\n assert client.recv(2, MSG_PEEK) == b\"xy\"\n assert client.recv(2, MSG_PEEK) == b\"xy\"\n assert client.recv(2) == b\"xy\"",
"def test_5_false(self):\n\t\tself.spawn(\"./quidditch\").stdin(\"5\").stdin(\"0\").stdout(\"50\\n\").exit(0)",
"def catExists():\n currentTime = datetime.now().strftime('%H:%M:%S')\n while True:\n #wlbt.Trigger()\n target = wlbt.GetSensorTargets()\n if target:\n breathing = isBreathing()\n if breathing == 1:\n #print(\"the cat is alive!\")\n catStatus = 2\n else:\n #print(\"the cat is dead!\")\n catStatus = 1\n else:\n #print(\"There's no cat in this box\")\n catStatus = 0\n return catStatus",
"def peek(self):",
"def test_peek_returns_value(full_deque):\n assert full_deque.peek() == 1",
"def dequeue_loop():\n while True:\n result = dequeue_function()\n if not result:\n break\n print(result)",
"def peek(self):\n if self._peeked is not None:\n return self._peeked\n else:\n # This must echo the implementation in next()\n line = self.proc.stdout.readline()\n self._peeked = line[:-1] if line else None\n return self._peeked",
"def test_peek_return_none_when_empty(new_empty_deque):\n assert new_empty_deque.peek() is None",
"def test_peek_shows_value_of_current_head(dq_3):\n assert dq_3.peek_left() == 'snine'",
"def sniff(self, func=None, timeout=None):\n msg = None\n while True:\n msg = self.shell.client.get_stream_packet(type_=\"packet\", timeout=timeout)\n if func is not None:\n func(msg)\n else:\n break\n return msg",
"def test_that_peek_returns_tail(filled_deque):\n assert filled_deque.peek() == 1",
"def appearing(ghost):\n print_pause(\"You walk on.\", 2)\n print_pause(\"The\" + \" \" + ghost + \" \" + \"appears!!!\", 2)",
"def ping():\n return '{}'",
"def peek(self):\n pass",
"def peek(self):\n pass",
"def liveness_probe():\n return \"I am still alive!\"",
"def lets_get_punny():\n \n text = '\\033[35;1m' # text color\n background = '\\033[30;1;45m'\n \n chat = True\n while chat:\n\n # Get a message from the user\n msg = input(background + 'You say \\U0001F4AC:\\t')\n out_msg = None\n \n #Checks if input has question mark\n question = is_question(msg)\n defined_question = how_question(msg)\n \n # Checks if input has exclamation point\n exclamation = is_screaming(msg)\n\n # Prepare the input message\n msg = prepare_text(msg)\n\n # Check for an end msg = \n if end_chat(msg):\n out_msg = '¡Adiós! \\U0001F44B'\n print(out_msg)\n break\n \n # all my message outputs here \n if not out_msg:\n \n outs = []\n \n outs.append(selector(msg, GREETING_IN, GREETING_OUT)) # Greetings\n \n outs.append(selector(msg, QUESTION_GREETING_IN, QUESTION_GREETING_OUT))\n \n outs.append(selector(msg, JOKE_REQUEST_IN, JOKE_REQUEST_OUT)) # Responses for certain questions\n outs.append(selector(msg, NO_JOKE_IN, NO_JOKE_OUT))\n outs.append(selector(msg, NO_JOKE_REPLY_IN, NO_JOKE_REPLY_OUT))\n \n outs.append(selector(msg, YES_JOKE_IN, YES_JOKE_OUT))\n outs.append(selector(msg, YES_JOKE_REPLY_IN, JOKE_REPLY_OUT))\n \n # How jokes get responses works\n msg_str = ' '.join(msg)\n msg_str = msg_str.lower()\n \n if msg_str in JOKE_REPLY_IN_2:\n name = find_in_list(msg, JOKE_REPLY_IN_2)\n outs.append(joke_reply_2(msg))\n \n outs.append(respond_echo(selector(msg, LAUGH_IN, LAUGH_OUT), 1, \"\\U0001F923 \"))\n \n options = list(filter(None, outs))\n \n if options:\n out_msg = random.choice(options)\n \n if not out_msg and exclamation: \n out_msg = random.choice(SCREAMING)\n \n if not out_msg and question:\n out_msg = text + random.choice(UNKNOWN_QUESTION)\n\n # Catch-all to say something if msg not caught & processed so far\n if not out_msg:\n out_msg = random.choice(UNKNOWN)\n\n print(text + 'JokeBot \\U0001F47E:\\t', out_msg + '\\n')",
"def head_surprised():\n print (hair_spiky())\n print (eye_wide())\n print (nose_leftwards())\n print (mouth_open())\n print (chin_combo())",
"def testEchoCatCat(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('echo hi | cat | cat')\n self.assertEqual(['hi'], pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)",
"def testEchoCat(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('echo hi | cat')\n self.assertEqual(['hi'], pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)",
"def throwaway():\n print \"Nothing to execute (how about writing some code?)\"",
"def test_fetchSpecificPeek(self):\n d = self.client.fetchSpecific('6', peek=True)\n self.assertEqual(\n self.transport.value(), b'0001 FETCH 6 BODY.PEEK[]\\r\\n')\n # BODY.PEEK responses are just BODY\n self.client.lineReceived(b'* 6 FETCH (BODY[] \"Some body\")')\n self.client.lineReceived(b'0001 OK FETCH completed')\n self.assertEqual(\n self.successResultOf(d), {6: [['BODY', [], \"Some body\"]]})",
"def test_cant_peek_empty(empty_deque):\n assert empty_deque.peek() is None",
"def test_2_true(self):\n\t\tself.spawn(\"./quidditch\").stdin(\"2\").stdin(\"1\").stdout(\"170\\n\").exit(0)",
"def __call__(self):\n return self.shoot()",
"def run(topic, port, server_address):\n global print_counter\n number_of_prints_on_this_roll = math.floor(roll_length / each_print_length)\n t = tappy_typing()\n while True:\n value = next(t)\n print \"taking a pic with settings:\"\n print \"iso {}, exposure_mode {}, mode_or_iso {}\".format(iso,\n exposure_mode,\n mode_or_iso)\n take_a_picture(filepath=\"live.jpg\", # values from globals\n exposure_mode=exposure_mode,\n iso=iso,\n mode_or_iso=mode_or_iso)\n if value != \"exit please\":\n payload = json.dumps({\"handle\": \"turkClient\",\n \"text\": value})\n print \"dump\", payload\n publish.single(topic,\n payload=payload,\n hostname=server_address,\n port=port)\n else:\n # ws.close()\n print \"thread terminating...\"\n return True\n\n print_counter += 1\n m = \"{} of {} prints left on this roll\"\n print m.format(int(number_of_prints_on_this_roll - print_counter),\n int(number_of_prints_on_this_roll))\n if print_counter == (number_of_prints_on_this_roll -\n number_of_prints_before_end_to_warn):\n paper_warning = (\"Please warn my human helper that my paper \"\n \"is running low or I will be silenced.\")\n publish.single(topic,\n payload=paper_warning,\n hostname=server_address,\n port=port)\n\n time.sleep(1)",
"def hot_potato(name_list, num):\n queue = ArrayQueue()\n for i in name_list:\n queue.enqueue(i)\n\n stop = False\n while not stop:\n for i in range(1,num):\n s = queue.dequeue()\n queue.enqueue(s)\n print(s)\n print()\n print(\" Drop it {0}\".format(queue.dequeue()))\n if queue.__len__() == 1:\n return queue.dequeue()"
]
| [
"0.5551094",
"0.5481319",
"0.54402834",
"0.52876055",
"0.5216225",
"0.5209968",
"0.5209621",
"0.51197696",
"0.509371",
"0.50888205",
"0.5036659",
"0.5000694",
"0.49975404",
"0.4992409",
"0.4970316",
"0.49431387",
"0.49377355",
"0.49363643",
"0.4902926",
"0.48842472",
"0.48525676",
"0.4849756",
"0.48459873",
"0.4845809",
"0.48445398",
"0.48432836",
"0.48359388",
"0.48336393",
"0.48001584",
"0.4762659"
]
| 0.6159067 | 0 |
Thread that polls to get the current force on the FSR. Populates the self._average_force value. | def _force_loop(self):
NUM_SAMPLES = 10.0
# Get the initial readings
time.sleep(1)
readings = []
for i in range(0, int(NUM_SAMPLES)):
readings.insert(0, self._sensor.value)
time.sleep(self._sampling_rate)
self._average_force = sum(r for r in readings) / NUM_SAMPLES
# Average the readings
while True:
readings.insert(0, self._sensor.value)
readings.pop()
self._average_force = sum(r for r in readings) / NUM_SAMPLES
time.sleep(self._sampling_rate) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start(self):\n lastbeat = time.time()\n averages = []\n for x in range(self.keep_amount):\n averages.append(1)\n while self.run:\n cur_amount = len(averages)\n if self.keep_amount != cur_amount:\n if self.keep_amount < cur_amount:\n while self.keep_amount != len(averages):\n averages.pop(0)\n elif self.keep_amount < cur_amount:\n avg = sum(averages)/cur_amount\n while self.keep_amount != len(averages):\n averages.append(avg)\n self.do_fft()\n mag = self.get_average(self.s_freq, self.e_freq)\n avg = sum(averages)/self.keep_amount\n averages.pop(0)\n if (mag/avg)-1 > self.percent:\n now = time.time()\n if ((now-lastbeat) > self.wait_time) and self.relative_range() > .009:\n lastbeat = now\n self.detect_func()\n averages.append(avg)\n else:\n averages.append(mag)\n else:\n averages.append(mag)",
"def getForce(self):\n if self.connected:\n if not self.lock:\n self.lock = True\n measured_voltage1 = _read_once(1,self.serial)\n measured_voltage2 = _read_once(2,self.serial)\n self.lock = False\n else:\n print(\"forceSensorArduino.py: getForce, already locked.\")\n measured_voltage1 = None\n measured_voltage2 = None\n else:\n measured_voltage1 = None\n measured_voltage2 = None\n print(\"forceSensorArduino.py: getForce, serial not connected.\",file=sys.stderr)\n return (measured_voltage1, measured_voltage2)",
"def go(self):\n # ipdb.set_trace()\n self.ang_data = np.zeros(int(self.avg.get()))\n self.amp_a2 = np.zeros(int(self.avg.get()))\n self.amp_b2 = np.zeros(int(self.avg.get()))\n self.ab_re = np.zeros(int(self.avg.get()))\n self.ab_im = np.zeros(int(self.avg.get()))\n df = bw*1.0/channels\n self.index = trunc(float(self.freq2meas.get())/df)\n self.index_offset = trunc(int(self.chann_span.get()))\n self.enabler = 1\n #self.t = threading.Thread(target=self.obtain_data, name='data_thread')\n print(enabler)\n self.t.start()\n return 1",
"def get_force(self):\n # @todo: Probably need to check the state of the landing gear for this (e.g. are they on the track?).\n # Note: you can get the state of the landing gear by going through self.sim \n return 0.0",
"def force(self):\n return self._divisor / self._average_force",
"def work(self):\n while(True):\n debug_print = False\n if debug_print == True:\n start = time.time()\n\n flow = self.gauge.read_flow_from_dp()\n self.flw_q.put([time.time(), flow])\n\n if debug_print == True:\n flow_time = time.time()\n print(f\"Runtime - calc_flow: {1000 * (flow_time - start):.0f} ms\")\n\n pressure = self.gauge.read_pressure()\n self.prs_q.put([time.time(), pressure])\n\n if debug_print == True:\n pressure_time = time.time()\n print(f\"Runtime - read_pressure: {1000 * (pressure_time - flow_time):.0f} ms\")\n \n if debug_print == True:\n runtime = time.time() - start\n print(f\"Runtime - total: {1000 * runtime:.1f} ms\")\n print(f\"Frequency: {1 / runtime:.1f} Hz\")",
"async def run(self):\n\n\t\tawait asyncio.sleep(self.delay)\n\t\tR_load = self.lock.mag/(self.sense - self.lock.mag)*self.R_ref\n\t\tawait self.resistance.push(R_load)\n\t\tawait self.current.push(self.lock.dc/(self.R_ref+R_load))\n\t\tawait self.voltage.push(self.lock.dc*R_load/(self.R_ref+R_load))\n\n\t\tlogger.debug(\"Stream has filled {} of {} points\".format(self.resistance.points_taken,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.resistance.num_points() ))\n\n\t\t#await asyncio.sleep(2*self.integration_time) # Give the filters some time to catch up?",
"def update_forces(self):\r\n # update all the functions\r\n self.compute_gravity()\r\n self.compute_tides()\r\n self.compute_centrifugal()\r\n self.compute_coriolis()\r\n\r\n # add together the forces into the summation function\r\n self.forcing.assign(self.ftides+self.gravity +\r\n self.centrifugal+self.coriolis)",
"def platform_auto_calibrate_magnetometer(self):\n self._platform_auto_calibrate_check()\n return self.platform.auto_calibrate_magnetometer()",
"def read_line(self):\n self.read_calibrated()\n\n avg = 0\n summ = 0\n online = False\n\n for i in range(0, self.NUM_SENSORS):\n val = self.sensorValues[i]\n if val > 500: online = True\n if val > 50:\n multiplier = i * 1000\n avg += val * multiplier\n summ += val\n\n if online == False:\n if self.lastValue < (self.NUM_SENSORS-1)*1000/2:\n return 0\n else:\n return (self.NUM_SENSORS-1)*1000\n\n self.lastValue = avg/summ\n return self.lastValue",
"def run(self):\n result = self.Take_Voltage_Measurement()\n self.result_queue.put(result)",
"def run(self):\n result = self.Take_Voltage_Measurement()\n self.result_queue.put(result)",
"def update(self):\n self.value = self.sensor.update()",
"def _force_read(self, gas_measurement_enabled):\n self._set_gas_measurement(gas_measurement_enabled)\n\n temp = self._read_register_1ubyte(self.BME680_CTRL_MEAS)\n temp |= OperationModes.ForcedMode\n self._write_register(self.BME680_CTRL_MEAS, temp)\n\n while(self._get_measuring_status()):\n time.sleep(0.001)\n\n if (gas_measurement_enabled):\n while (self._get_gas_measuring_status()):\n time.sleep(0.001)",
"def update_forces(self):\n\n pass",
"def wait_for_fingerscore(self):\n while True:\n self.recv_event()\n if self.last_event_code == DEFS.EF_FPFTR:\n return self.parse_score_fp_event()",
"def _update_feedback(self):\n #First read in the current voltage (power)\n #Read in numReadsPerCycle signals (arb) to average\n #TODO: allow user to select reads per signal\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, self.numReadsPerCycle, max_range=self.max_input_voltage)\n\n #Add new data to the pid\n self.pid.set_pv(np.atleast_1d(np.mean(currSignal)))\n\n #Now compute the new control value and update the AO\n self.pid.set_cv()\n self._curr_output_voltage = self._curr_output_voltage + self.pid.cv\n if self._curr_output_voltage < self.min_voltage:\n self._curr_output_voltage = self.min_voltage\n elif self._curr_output_voltage > self.max_voltage:\n self._curr_output_voltage = self.max_voltage\n\n\n #Finally updating the analog output\n\n #Do a final check to make sure that if you are in hardware control mode that the voltage control is still HIGH\n #This is to avoid the potential error if the voltage control is toggled low between the last call of _check_hardware_control\n #and update_feedback, whcih would mean that currSignal would be 0 (assuming a pulsed experiment), and causing a garbage\n #feedback which could be an issue in the next pulse.\n if (~self._under_hardware_control or self.ai_client.get_ai_voltage(self._hwc_ai_channel)[-1] > self._hwc_thresh):\n self._ao_client.set_ao_voltage(self._ao_channel, self._curr_output_voltage)",
"def read_gas_resistance(self):\n #Declare global variables\n global calAmbTemp\n\n self._force_read(True)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n gasResADC = (self._read_register_1ubyte(self.BME680_GAS_R_MSB) << 2) | (self._read_register_1ubyte(self.BME680_GAS_R_LSB) >> 6)\n gasRange = self._read_register_1ubyte(self.BME680_GAS_R_LSB) & 0x0F\n\n calAmbTemp = self._compensate_temperature(tempADC)\n val = self._calculate_gas_resistance(gasResADC, gasRange)\n\n return float(val)",
"def update(self):\n \n self.accelerometer()\n self.magnetometer()",
"def raw_force(self):\n if self.live:\n return self._sensor.value\n\n # For testing\n return self._test_raw_force",
"def _calibrate_measurement(self):\n\n cold_blackbody = bb_radiance(self.cbb.header.cbb_temperature + 273.15,\n self.cbb.data.wavelength)\n warm_blackbody = bb_radiance(self.wbb.header.wbb_temperature + 273.15,\n self.wbb.data.wavelength)\n\n self.wbb.data.average_spectrum[0] = 1\n self.wbb.data.average_spectrum[2047] = 1\n\n calibration_slope = ((warm_blackbody - cold_blackbody) /\n (self.wbb.data.average_spectrum - self.cbb.data.average_spectrum))\n calibration_offset = warm_blackbody - (self.wbb.data.average_spectrum * \n calibration_slope)\n\n self.wbb.calibrate_file(calibration_slope, calibration_offset)\n self.cbb.calibrate_file(calibration_slope, calibration_offset)\n self.sam.calibrate_file(calibration_slope, calibration_offset)\n\n if not self.dwr is None:\n self.dwr.calibrate_file(calibration_slope, calibration_offset)\n\n plate_temperature = self.dwr.header.spare_f[0]\n if (self.plate == -1) :\n plate_emissivity = self.dwr.header.spare_f[1]\n\n plate_blackbody = bb_radiance(plate_temperature + 273.15,\n self.dwr.data.wavelength)\n plate_emission = plate_emissivity * plate_blackbody\n\n self.dwr.data.average_spectrum = ((self.dwr.data.average_spectrum - \n plate_emission) / (1 - plate_emissivity))",
"def calculateAverage(self): \n if not self.lastTransferAverage: \n size=[0,0,0,0]\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n size[i]=self.lastNbrSamplesPerSeg\n self.lastAverageArray = [zeros(size[0]),zeros(size[1]),zeros(size[2]),zeros(size[3])]\n nbrSamp=self.lastNbrSamplesPerSeg\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n nbrSeg=self.lastNbrSegmentsArray[i]\n for j in range (0,nbrSamp):\n for k in range(0,nbrSeg): \n self.lastAverageArray[i][j]+=self.lastWaveformArray[i][k*nbrSamp+j]\n self.lastAverageArray[i][j]/=nbrSeg\n self.lastAverageCalculated=True\n else: print \"NOn averaged data are not available\"",
"def _calculate_fuel(self):\n self._fuel = self._calculate_fuel_r(self._mass)",
"def run(self):\n old_sampling = rospy.Time(0)\n while not rospy.is_shutdown():\n self.mutex.acquire()\n reference_received = all(self.reference_flags.values())\n if reference_received:\n if not self.ready:\n # first value of ni_ref\n self.ni_ref.last_value[0:3] = self.eta1_ref_body.dot + self.speed_ref\n self.ni_ref.last_value[3:6] = self.controller.vehicle.ned2body_angular(self.eta2_ref.dot, self.eta2)\n self.ni_ref.last_sampling = rospy.Time.now()\n\n # error\n old_sampling = rospy.Time.now()\n\n # Node is ready to call controller\n self.ready = True\n else:\n # Set ni_ref\n self.ni_ref.value[0:3] = self.eta1_ref_body.dot + self.speed_ref\n self.ni_ref.value[3:6] = self.controller.vehicle.ned2body_angular(self.eta2_ref.dot, self.eta2)\n dt = rospy.Time.now() - self.ni_ref.last_sampling\n\n # compute derivative of ni_ref\n self.ni_ref.dot = (self.ni_ref.value - self.ni_ref.last_value) / dt.to_sec()\n self.ni_ref.last_value = deepcopy(self.ni_ref.value)\n self.ni_ref.last_sampling = rospy.Time.now()\n\n # Set PI of controller with error value\n self.ni_tilde = self.ni - self.ni_ref.value\n dt = rospy.Time.now() - old_sampling\n self.controller.PI.update(self.ni_tilde, dt.to_sec())\n\n # compute tau with eta2, ni and ni_ref_dot\n tau = self.controller.control_law(self.eta2, self.ni, self.ni_ref.dot)\n\n # publish messages\n self.publish(tau)\n self.tester(tau)\n\n self.mutex.release()\n self.node_loop.sleep()",
"def get_frequency(self):\r\n # print '*********in get freq'\r\n self.cntr.run('FREQ 1')\r\n f_0_ = self.cntr.get_measurements(1)\r\n self.f_0 = f_0_[0]\r\n self.cntr.run('FREQ 2')\r\n f_rep_ = self.cntr.get_measurements(1)\r\n self.f_rep = f_rep_[0]",
"def calculate(self):\n if self.sampling_timer.IsRunning():\n return\n if self.daq.data0 == []:\n average = 0.0\n else:\n average = mean(self.daq.data0)\n res_string = '%.2f' %average\n self.control_box.result_box.SetLabel(res_string)",
"def safe_calibrate(self):\n\n status = -1\n while status < 3:\n ifMutexAcquire(self.use_mutex)\n try:\n new_status = self.BNO055.get_calibration_status()[3]\n except:\n new_status = -1\n finally:\n ifMutexRelease(self.use_mutex)\n if new_status != status:\n status = new_status",
"def update(self):\n\n obstVals = self.robot.getDepth(self.startCol, self.startRow,\n self.sampleWidth, self.sampleHeight)\n\n masked_obstVals = numpy.ma.masked_array(obstVals, obstVals == 0)\n\n if numpy.ma.count(masked_obstVals) == 0:\n meanDistance = 500\n else:\n meanDistance = numpy.mean(masked_obstVals)\n if meanDistance < 500:\n meanDistance = 500\n\n if meanDistance < 1200: # Changing this value will change how sensitive robot is to walls\n self.setVector(self.speedMult / meanDistance, 180 - self.angle)\n else:\n self.setVector(0.0, 0.0)",
"def lineDetectThread(self):\n \n while True:\n\n tmpList = self.octo.analog_read_all()\n avg = 0\n for x in tmpList:\n avg = avg + x\n\n avg = avg / len(tmpList)\n\n if avg <= self.cutoff:\n if not self.seenLine: # first time seeing the white line\n print(\"Line detected\")\n \n self.seenLine = True\n \n # change directions of the motors to drive away from the line\n if self.lmd == self.BWD:\n self.lmd = self.FWD\n else:\n self.lmd = self.BWD\n \n if self.rmd == self.BWD:\n self.rmd = self.FWD\n else:\n self.rmd = self.BWD\n\n self.myMotor.set_drive(self.L_MTR, self.lmd, self.lmp)\n self.myMotor.set_drive(self.R_MTR, self.rmd, self.rmp)\n else:\n if self.seenLine:\n print(\"back to black\")\n self.seenLine = False",
"def compute_scl(self):\n \n # We can initialize run-once stuff here\n interval = 5.0 # time between samples\n ch = 0 # channel index\n\n # ----------- Process loop for acquiring secondary data ---------------\n while self.run_state.value:\n\n # 1. Snapshot current data buffer (last 90 seconds)\n data,times = self.data_snapshot([90, 90])\n\n # 2. Calculate the desired metric and grab the current time-stamp\n if len(data[0])>0:\n new_value = np.mean(data[0])\n else:\n new_value = 0\n time_stamp = lsl.local_clock()\n\n # 3. Update secondary buffer \n self.push_sample_secondary(ch,time_stamp,new_value) \n\n # 4. Sleep until its time to calculate another value \n time.sleep(interval)\n # ---------------------------------------------------------------------"
]
| [
"0.6141744",
"0.57550305",
"0.5539875",
"0.551071",
"0.54476047",
"0.5357573",
"0.53213555",
"0.5257272",
"0.52194697",
"0.5190291",
"0.51847667",
"0.51847667",
"0.5159389",
"0.5158231",
"0.51533365",
"0.51522845",
"0.5108973",
"0.5106741",
"0.5078752",
"0.5078129",
"0.50639135",
"0.5061199",
"0.5050533",
"0.5025298",
"0.50076723",
"0.49896404",
"0.49722317",
"0.49405548",
"0.4933604",
"0.49187723"
]
| 0.71686685 | 0 |
Find a natural gutter between start and stop, inclusive. | def get_gutter(start_obj, stop_obj):
log.debug(f'get_gutter({start_obj.group(0)}, {stop_obj.group(0)})')
start = start_obj.end(0)
stop = stop_obj.start(0)-1
gutters = list()
for column in range(start, stop+1):
if all(line.rjust(column+1)[column] == ' ' for line in lines):
gutters.append(column)
if gutters:
if len(gutters) == 1:
return gutters[0]
if gutters[-1] - gutters[0] + 1 == len(gutters):
return gutters[0] # there's more than one potential gutter but they're all contiguous. We'll return the first column
else:
log.fatal(f'Two or more potential gutters found between columns {start} ({start_obj.group(0)}) and {stop} ({stop_obj.group(0)}), inclusive: {gutters}')
exit(1)
else:
log.fatal(f'No gutter found between columns {start} ({start_obj.group(0)}) and {stop} ({stop_obj.group(0)}), inclusive')
exit(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetNiceExtentsBySpacing(minval,maxval,spacing,tolerance):\n pass",
"def find_midpoint(start, end):\n mid = (start + end) / 2\n return int(mid)",
"def gutter_spacing(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gutter_spacing\")",
"def gutter_spacing(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gutter_spacing\")",
"def gutter_spacing(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"gutter_spacing\")",
"def make_slice_inclusive(start, stop=None, step=None):\n if stop is None:\n return start, stop, step\n\n if step is None or step > 0:\n if stop == -1:\n stop = None\n else:\n stop += 1\n else:\n if stop == 0:\n stop = None\n else:\n stop -= 1\n return start, stop, step",
"def range_inclusive(start, stop):\n return range(start, stop + 1)",
"def range_1(stop: int) -> range:\n return range(1, stop + 1)",
"def scanner_pos_at_round( layer, round_num ):\n\tpos = round_num % ( layer.range * 2 - 2 )\n\treturn pos",
"def geomspace(start, ratio=None, stop=False, num=50):\n if not ratio and stop != False:\n ratio = (stop/start)**(1/(num-1))\n seq = []\n seq.append(start)\n if stop == False:\n for j in range(1, num):\n seq.append(seq[j-1]*ratio)\n return seq\n else:\n val, j = start, 1\n while val <= stop or np.allclose(val, stop, ):\n val = seq[j-1]*ratio\n seq.append(val)\n j+=1\n return seq[:-1]",
"def margin_range(base, val):\n from math import ceil\n if isinstance(val, float):\n margin = ceil(val * base)\n r = range(base - margin, base + margin + 1)\n elif isinstance(val, int):\n r = range(base - val, base + val + 1)\n else:\n raise Exception(\"Value must be either float or int.\")\n return r",
"def compute_skiprows(start, end) -> List[int]:\n return list(range(start - 1)) + list(range(end, end + 20))",
"def grange(start=1, step=1, stop=None):\n \n if stop is None:\n x = int(start)\n dif = int(step)\n while True:\n yield x\n x += dif\n else:\n for x in range(start, stop, step):\n yield x",
"def calculate_segment_bin_start(startbin, stopbin, nbin, fraction_step=1):\n st = np.arange(startbin, stopbin, int(nbin * fraction_step), dtype=int)\n if st[-1] + nbin > stopbin:\n return st[:-1]\n return st",
"def geomspace(start, stop, num=50, include_endpoint=True, dtype=None, constant=False):\n return Tensor(np.geomspace(start, stop, num, include_endpoint, dtype), constant=constant)",
"def fraction(amount, start, stop, truncated, sequence):\n ratio = stop\n for x in range(start, amount):\n y = abs(round(ratio / (abs(x) + 1)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def pixel_spacing_range(self) -> Optional[float]:\n return self._get_property(PIXEL_SPACING_RANGE_PROP, float)",
"def findOverlapOrNearest(gs, ts, tree, start, end):\n #step 1, find overlaps\n rs = set()\n for i in range(start, end + 1):\n if i in gs:\n rs.add(gs[i])\n if len(rs) > 0:\n rs = list(rs)\n return rs, [0] * len(rs)\n #find the nearest one\n else:\n d, i = tree.query([(start + end) / 2], k=1)\n g = gs[ts[i][0]]\n #d = ts[i][0] - (start+end)/2\n d = int(d)\n return [g], [d]",
"def lenRange(start, stop, step=1):\n return (stop - start + step - 1 + 2 * (step < 0)) // step",
"def slice_spacing(self):\n return np.median(np.diff(self.slice_zvals))",
"def get_range(start, stop):\n \n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n\n return nums",
"def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result",
"def findspan(self, u):\n #if u >= self.kv[-self.p-1]:\n # return self.kv.size - self.p - 2 # last interval\n #else:\n # return self.kv.searchsorted(u, side='right') - 1\n return pyx_findspan(self.kv, self.p, u)",
"def mid(start_row, start_peg, end_row, end_peg):\n if start_row + 2 == end_row:\n mid_row = start_row + 1\n elif start_row == end_row + 2:\n mid_row = start_row - 1\n elif start_row == end_row:\n mid_row = start_row\n\n if start_peg + 2 == end_peg:\n mid_peg = start_peg + 1\n elif start_peg == end_peg + 2:\n mid_peg = start_peg - 1\n elif start_peg == end_peg:\n mid_peg = start_peg\n\n return (mid_row, mid_peg)",
"def span_num(center: float, span: float, num: int, endpoint: bool=True):\n return np.linspace(center-span/2, center+span/2, num, endpoint=endpoint)",
"def clamp(num,start,end):\n if num >= start and num <= end: return num\n elif num < start: return start\n elif num > end: return end",
"def boundary(gap, min_tags_in_window, average):\n\tassert min_tags_in_window >= 1;\n\ttemp = 0;\n\tfor i in range(0, min_tags_in_window): temp += poisson(i, average);\n\ttemp = pow(temp, gap+1); \n\treturn temp*temp; # start & end ",
"def _find_break_points(self, start, end, num_classes):\r\n if start >= end:\r\n raise ValueError(\"Cannot find breakpoints because the starting \"\r\n \"point is greater than or equal to the ending \"\r\n \"point.\")\r\n if num_classes < 1:\r\n raise ValueError(\"Cannot have fewer than one distance class.\")\r\n\r\n width = (end - start) / num_classes\r\n break_points = [start + width * class_num\r\n for class_num in range(num_classes)]\r\n break_points.append(float(end))\r\n\r\n # Move the first breakpoint a little bit to the left. Machine epsilon\r\n # is taken from:\r\n # http://en.wikipedia.org/wiki/Machine_epsilon#\r\n # Approximation_using_Python\r\n epsilon = finfo(float).eps\r\n break_points[0] = break_points[0] - epsilon\r\n\r\n return break_points",
"def linspace(start, stop, n, istart=True, istop=True):\r\n n = n-1\r\n arr = [start + ((stop-start)/n) * i for i in range(n+1)]\r\n return arr",
"def get_range_of_claims(start, stop):\n num_trucks = get_claimed_objects_in_range(start, stop)\n print('('*50)\n print('NUM TRUCKS FROM CLAIMS', num_trucks.count())\n print('(' * 50)\n\n return num_trucks.count()"
]
| [
"0.6075604",
"0.57111067",
"0.57065743",
"0.57065743",
"0.57065743",
"0.55262905",
"0.5473661",
"0.5458969",
"0.538811",
"0.5384582",
"0.5346922",
"0.5319414",
"0.5292439",
"0.51302606",
"0.5105891",
"0.51037973",
"0.5075034",
"0.5074723",
"0.5058725",
"0.50488275",
"0.4984003",
"0.4963664",
"0.49566147",
"0.49442953",
"0.48995295",
"0.48814225",
"0.48804206",
"0.48718053",
"0.48348707",
"0.48241854"
]
| 0.80115026 | 0 |
Returns a list of request names of requests in runningopen state | def getRunningOpen(self, teamName):
result = []
for request in self.status:
if self.status[request] == 'running-open':
result.append(request)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _GetRequestsByState(self, state):\n requests_dir = self._spool_state_dirs[state]\n return sorted(os.listdir(requests_dir))",
"def get_available_request(self):\n for med in self.__state.__class__.__dict__:\n if med.startswith('request_'): yield med",
"def get_requests(self):\n\t\tself.last_processed = self.last_modified\n\t\treturn self.requests",
"def get_requests(self):\r\n\t\tself.last_processed = self.last_modified\r\n\t\treturn self.requests",
"def queryAllRequests(self):\n logging.info(\"Querying all requests at ReqMgr instance ...\")\n r = self.reqMgrService.getRequestNames()\n print \"Found %s requests:\" % len(r)\n for req in r:\n print req",
"def listRequests(self):\n reqmgr = RequestManagerImpl()\n retval = []\n for request in reqmgr.listRequests(self.endpoint):\n tmpRequest = Request()\n tmpRequest.setReqmgrUrl( self.endpoint )\n tmpRequest.setWorkflowName( request['request_name'] )\n retval.append( tmpRequest )\n return retval",
"def _GetNewRequests(self):\n new_requests = self._GetRequestsByState(self._REQUESTED)\n if new_requests:\n while self._MakeRequestId() == new_requests[-1]:\n pass\n for request_id in new_requests:\n self._TransitionRequest(request_id, self._REQUESTED, self._PENDING)\n return new_requests",
"def current_requests(self):\n return len(self._current_requests)",
"def get_ffdc_get_request_index(self):\n return FFDC_GET_REQUEST.keys()",
"def get_requests(self):\n return self.dbsession.query(RequestModel).all()",
"def get_waiting_jobs(self):\n open_jobs = []\n with closing(self._conn.cursor()) as cursor:\n for row in cursor.execute( \"select job_name, job_version from jobs where job_state in ('\"\n + JobState.WAITING.value + \"','\" + JobState.WAITING_PRED.value + \"','\" + JobState.RUNNING.value +\"')\"):\n open_jobs.append((row[0], row[1]))\n return open_jobs",
"def ReadFlowProcessingRequests(self):\n return list(self.flow_processing_requests.values())",
"def getBuildRequests():",
"def get_opened_windows_list():\n\n global opened_windows_names\n EnumWindows(EnumWindowsProc(foreach_window), 0)\n return opened_windows_names",
"def list_pending_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.reserved()",
"def get_request_candidates(self):\n return os.listdir(self.cache_dir_)",
"def get_job_names(self):\n return []",
"def list_requested_files(request):\n request_datetime_range = datetime_range(\n request.start_datetime,\n request.end_datetime,\n request.time_resolution\n )\n # Translate request datetime into files\n request_file_names = [\n datetime_to_file_name(ts) for ts in request_datetime_range\n ]\n return request_file_names",
"def _GetAbortRequests(self):\n new_requests = self._GetRequestsByState(self._ABORTING)\n for request_id in new_requests:\n logging.info('Abort requested for %s', request_id)\n self._ClearRequest(request_id, self._ABORTING)\n return new_requests",
"def get_request_states(self, request, relations):\n complete = []\n requests = {}\n for relation in relations:\n complete = False\n previous_request = self.get_previous_request(relation)\n if request == previous_request:\n sent = True\n complete = self.is_request_complete_for_relation(\n previous_request,\n relation)\n else:\n sent = False\n complete = False\n\n rid = \"{}:{}\".format(relation.name, relation.id)\n requests[rid] = {\n 'sent': sent,\n 'complete': complete,\n }\n\n return requests",
"def check_active_requests():\n\n active_requests = jobtracker.query(\"SELECT * FROM requests \" \\\n \"WHERE status='waiting'\")\n for request in active_requests:\n\n\t# Check requested status \n\tif DownloaderSPAN512.check_request_done(request):\n\t dlm_cout.outs(\"Restore (GUID: %s) has succeeded. Will create file entries.\\n\" % request['guid'])\n\t create_file_entries(request)\n\n\telse:\n#\t dlm_cout.outs(\"Request (GUID: %s) has failed.\\n\" \\\n#\t \"\\tDatabase failed to report the data as restored.\" % request['guid'])\n#\t jobtracker.query(\"UPDATE requests SET status='failed', \" \\\n# \"details='Request failed. Why ?', \" \\\n# \"updated_at='%s' \" \\\n# \"WHERE guid='%s'\" % (jobtracker.nowstr(), request['guid']))\n\n query = \"SELECT (TO_SECONDS('%s')-TO_SECONDS(created_at)) \" \\\n \"AS deltaT_seconds \" \\\n \"FROM requests \" \\\n \"WHERE guid='%s'\" % \\\n (jobtracker.nowstr(), request['guid'])\n row = jobtracker.query(query, fetchone=True)\n #if row['deltaT_seconds']/3600. > config.download.request_timeout:\n if row/3600. > config.download.request_timeout:\n dlm_cout.outs(\"Restore (GUID: %s) is over %d hr old \" \\\n \"and still not ready. Marking \" \\\n \"it as failed.\" % \\\n (request['guid'], config.download.request_timeout))\n jobtracker.query(\"UPDATE requests \" \\\n \"SET status='failed', \" \\\n \"details='Request took too long (> %d hr)', \" \\\n \"updated_at='%s' \" \\\n \"WHERE guid='%s'\" % \\\n (config.download.request_timeout, jobtracker.nowstr(), \\\n request['guid']))",
"def getOpenWorkflowStates(self):\n return OPEN_STATES",
"def list_running_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.active()",
"def _list(self, req):\n list_type = None\n status_prefix = 'STATUS LIST '\n if req:\n list_type = req.pop(0)\n if list_type and list_type == SPECTATE:\n games = self.server.get_unfinished_games()\n status_prefix += SPECTATE + ' '\n else:\n games = self.server.get_open_games()\n self.send_line(status_prefix + ' '.join(\n [str(g.id) for g in games if not self.game or self.game is not g]))",
"def list_requesters():\n from mephisto.core.local_database import LocalMephistoDB\n from tabulate import tabulate\n\n db = LocalMephistoDB()\n requesters = db.find_requesters()\n dict_requesters = [r.to_dict() for r in requesters]\n click.echo(tabulate(dict_requesters, headers=\"keys\"))",
"def get_waiting_jobs(self):\n return []",
"def view_requests(self):\n requests = self.caller.db.scene_requests or {}\n table = EvTable(\"{wName{n\", \"{wSummary{n\", width=78, border=\"cells\")\n for tup in requests.values():\n table.add_row(tup[0], tup[1])\n self.msg(str(table))",
"def get_request_journal(self):\n response = requests.get(self.requests_url)\n if response.status_code != http_client.OK:\n raise ValueError(response.text, response.status_code)\n response_body = json.loads(response.text)\n return response_body[\"requests\"]",
"def Run(self, args):\n p = parent.GetParent(args)\n return requests.List(parent=p, filter=(\n args.state.upper() if args.state else None))",
"def getRequestList(self):\n\n result = RequestsDAO().getRequests()\n mapped_result = []\n\n if not result:\n return jsonify(Error=\"NOT FOUND\"), 404\n\n else:\n for r in result:\n mapped_result.append(self.mapToUserRequestDict(r))\n\n return jsonify(TURN=mapped_result), 200"
]
| [
"0.69532305",
"0.67579085",
"0.6556441",
"0.65352577",
"0.6497508",
"0.64858216",
"0.6394398",
"0.6211951",
"0.60207194",
"0.60156894",
"0.59925896",
"0.5909057",
"0.5878461",
"0.58699447",
"0.58357877",
"0.57668465",
"0.57654035",
"0.576168",
"0.5757679",
"0.5750489",
"0.56673974",
"0.5646558",
"0.5637824",
"0.56258506",
"0.56146145",
"0.5609619",
"0.55907524",
"0.5578814",
"0.5550742",
"0.55008"
]
| 0.7229156 | 0 |
Function for rendering the home page | def render_home():
return render_template("index.html") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def home():\n\n return render_template('home_page.html')",
"def home():\n return render_template('homepage.html')",
"def home():\n\n return render_template(\"home.html\")",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template('home.html')",
"def home():\n return render_template(\"home.html\")",
"def home():\n return render_template(\"home.html\")",
"def home_page() -> str:\r\n return render_template(\"home.html\")",
"def home():\n return render_template('home.html', title=\"Home\")",
"def home_page():\n\n return render_template('index.html')",
"def home():\n return render_template('Main_Page.html')",
"def homepage():\n return render_template('home/index.html', title=\"Home\")",
"def homepage():\n return render_template(\"home/index.html\")",
"def home():\n return render_template(\n 'home.html',\n title='Home Page',\n year=datetime.now().year,\n )",
"def homepage():\n return render_template(\"home/index.html\", title=\"Welcome\")",
"def homepage():\n\treturn render_template(\"home/a_homepage.html\", title=\"Welcome\")"
]
| [
"0.88329524",
"0.86391366",
"0.8630416",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.86062366",
"0.8602575",
"0.8602575",
"0.8553502",
"0.85482097",
"0.8533194",
"0.8482488",
"0.8467323",
"0.8461552",
"0.8454767",
"0.8442521",
"0.8437944"
]
| 0.88435775 | 0 |
Function for rendering an existing session page | def render_player_session(id):
sess = get_player_session(id)
if sess:
pass
#return render_template("session.html")
else:
pass
#some kind of 404
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loginPage():\n\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n \t for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)",
"def index():\n print(\"Inside index()\")\n if \"display_name\" not in session:\n return render_template(\"create_account.html\")\n\n return f\"Hello, {session['display_name']}\"",
"def angular_main_page():\n print(session)\n return render_template(\"/angular_main_page.html\")",
"def secure_page():\n return render_template('secure_page.html')",
"def render(self, session: Session) -> str:\n raise NotImplementedError()",
"def show_login():\n # Generate a unique session token\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n # return \"The current session state is %s\" % login_session['state']\n return render_template('login.html', STATE=state)",
"def render_DM_session(id):\r\n\tsess = get_DM_session(id)\r\n\tif sess:\r\n\t\tpass\r\n\telse:\r\n\t\tpass",
"def view_session(request , session_name):\n sessions_path=settings.MEDIA_ROOT + \"Sessions\"\n s_li=os.listdir(sessions_path)\n if session_name+\".ngl\" in s_li:\n \n mdsrv_url=obtain_domain_url(request)\n redirect_url='/html/session.html?load=pufa.ngl'\n\n return redirect(mdsrv_url+redirect_url)",
"def index():\n if 'name' in session:\n return render_template('home.html')\n return redirect(url_for('log_in'))",
"def online():\r\n if current_user.is_authenticated:\r\n session['name'] = current_user.username\r\n name = current_user.username\r\n else:\r\n name = 'Guest' + str(secrets.token_hex(8))\r\n session['name'] = name\r\n # room = session.get('room', '123')\r\n return render_template('online.html', name=name)",
"def showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template(\"login.html\", state=state)",
"def showLogin():\r\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\r\n for x in xrange(32))\r\n login_session['state'] = state\r\n return render_template('login.html', STATE=state)",
"def showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)",
"def showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)",
"def get_index_page() -> Any:\n return render_template(\"home.html\", img_upload=session['img_full_path'],\n img_rec1=session['img_rec1'], img_rec2=session['img_rec2'],\n img_rec3=session['img_rec3'], img_rec4=session['img_rec4'],\n img_rec5=session['img_rec5'], user_rec1=session['user_rec1'],\n fols_rec1=session['fols_rec1'], user_rec2=session['user_rec2'],\n fols_rec2=session['fols_rec2'], user_rec3=session['user_rec3'],\n fols_rec3=session['fols_rec3'], user_rec4=session['user_rec4'],\n fols_rec4=session['fols_rec4'], user_rec5=session['user_rec5'],\n fols_rec5=session['fols_rec5'], likes_rec1=session['likes_rec1'],\n likes_rec2=session['likes_rec2'], likes_rec3=session['likes_rec3'],\n likes_rec4=session['likes_rec4'], likes_rec5=session['likes_rec5'])",
"def display():\n\n #still needs some cleanup on imagry and what the site is about. \n\n return render_template(\"index.html\")",
"def showLogin():\n if(checkLogin()):\n return redirect(url_for('catelog'))\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state, isLogin=checkLogin())",
"def login():\n return render_template('login.html', next=flask.request.args.get(\"next\",\"/sessions\"))",
"def session_coverpage(course, session, coverpage):\n if course.is_link():\n naucse.utils.views.forks_raise_if_disabled()\n\n try:\n data_from_fork = course.render_session_coverpage(\n session, coverpage, request_url=request.path)\n record_content_urls(data_from_fork, f\"/{course.slug}/\")\n\n content = data_from_fork.get(\"content\")\n if content is None:\n raise InvalidInfo(\"Content of the page can't be None.\")\n\n kwargs = {\n \"course\": process_course_data(data_from_fork.get(\"course\"), slug=course.slug),\n \"session\": process_session_data(data_from_fork.get(\"session\"), slug=session),\n \"edit_info\": links.process_edit_info(data_from_fork.get(\"edit_info\")),\n \"content\": content\n }\n except POSSIBLE_FORK_EXCEPTIONS as e:\n if raise_errors_from_forks():\n raise\n\n # there's no way to replace this page, render an error page instead\n logger.error(\"There was an error rendering url %s for course '%s'\", request.path, course.slug)\n logger.exception(e)\n return render_template(\n \"error_in_fork.html\",\n malfunctioning_course=course,\n edit_info=get_edit_info(course.edit_path),\n faulty_page=f\"session_{coverpage}\",\n session=session,\n root_slug=model.meta.slug,\n travis_build_id=os.environ.get(\"TRAVIS_BUILD_ID\"),\n )\n else:\n session = course.sessions.get(session)\n\n content = session_coverpage_content(course, session, coverpage)\n allowed_elements_parser.reset_and_feed(content)\n\n kwargs = {\n \"course\": course,\n \"session\": session,\n \"edit_info\": get_edit_info(session.get_edit_path(course, coverpage)),\n \"content\": content\n }\n\n return render_template(\"coverpage.html\", **kwargs)",
"def display():\n return render_template(\"signin.html\")",
"def renderPage():\n return render_template(\"index.html\")",
"def index():\n return render_template('index.html', username=session['username'])",
"def game():\n\tif \"username\" in session:\n\t\treturn render_template(\"index.html\")\n\telse:\n\t\treturn redirect(url_for(\"default\"))",
"def play_page():\n session.permanent = True\n if 'tracks' not in session:\n session['tracks'] = {}\n if 'id' not in session:\n session['id'] = uuid4().int\n return app.send_static_file('play.html')",
"def settings_page():\n log.info(\":WEB:/settings\")\n if \"username\" in session.keys():\n if \"logged-in\" in session.keys():\n if session[\"logged-in\"]:\n session[\"user\"] = db[\"users\"].find_one(username=session[\"username\"])\n if session[\"user\"]:\n return render_template(\"settings.html\")\n return redirect(\"/\")",
"def user_profile():\n if CURR_USER_KEY in session:\n return render_template('/profile/detail.html')\n else:\n return redirect('/login')",
"def index():\n if (session_get_int(\"user_id\") is not None):\n return render_template(\"dashboard.html\")\n else:\n return render_template(\"index.html\")",
"def start():\n #test session has been started\n if session.has_key('id'):\n return render_template('index.html', id=session['id'], error=\"Unique ID has already been generated,\")\n\n randId = generateID()\n #continue to generate a new id if records are returned\n while (Submission.hasDuplicate(randId)):\n randId = generateID()\n \n #start the session\n session['id'] = randId\n return render_template('index.html', id=randId)",
"def home(request):\n if 'member_id' not in request.session:\n return redirect(\"/login/\")\n return render(request, 'esihapp/index1.html')",
"def index(page=''):\n LOGGER.info(\"Session: %s\", flask.session)\n LOGGER.info(\"Request path: %s\", flask.request.path)\n\n if 'me' in flask.session:\n return flask.render_template_string(\n r\"\"\"\n<p>Hello {{profile.name or me}}.\nWant to <a href=\"{{url_for('logout', redir=request.path[1:])}}\">log out</a>?</p>\n\n{% if profile %}\n<p>Profile data:</p>\n<ul>\n{% for k,v in profile.items() %}\n<li>{{k}}: {{v}}</li>\n{% endfor %}\n</ul>\n{% endif %}\"\"\",\n me=flask.session['me'],\n profile=flask.session.get('profile')\n )\n\n return 'You are not logged in. Want to <a href=\"{login}\">log in</a>?'.format(\n login=flask.url_for('authl.login', redir=flask.request.path[1:]))"
]
| [
"0.68190837",
"0.6669858",
"0.66534925",
"0.6565649",
"0.65498805",
"0.6485435",
"0.63670814",
"0.635986",
"0.6321983",
"0.6302406",
"0.62669337",
"0.6244736",
"0.62421983",
"0.62421983",
"0.62374693",
"0.6187892",
"0.6185891",
"0.6181225",
"0.61790746",
"0.61767036",
"0.61660385",
"0.6144776",
"0.61333936",
"0.6104799",
"0.61044943",
"0.6085644",
"0.6082293",
"0.6081402",
"0.6066309",
"0.60369366"
]
| 0.7415087 | 0 |
Function for rendering a DM session page | def render_DM_session(id):
sess = get_DM_session(id)
if sess:
pass
else:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_player_session(id):\r\n\tsess = get_player_session(id)\r\n\tif sess:\r\n\t\tpass\r\n\t\t#return render_template(\"session.html\")\r\n\telse:\r\n\t\tpass\t\t\r\n\t\t#some kind of 404\r",
"def loginPage():\n\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n \t for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)",
"def show_login():\n # Generate a unique session token\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n # return \"The current session state is %s\" % login_session['state']\n return render_template('login.html', STATE=state)",
"def showLogin():\r\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\r\n for x in xrange(32))\r\n login_session['state'] = state\r\n return render_template('login.html', STATE=state)",
"def showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)",
"def showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)",
"def showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template(\"login.html\", state=state)",
"def render(self, session: Session) -> str:\n raise NotImplementedError()",
"def angular_main_page():\n print(session)\n return render_template(\"/angular_main_page.html\")",
"def data_pair_page() :\r\n\r\n script = server_document( url=f'http://localhost:5006/{data_explore_route}',\r\n arguments={'session_id' : session[ session_info.session_id_key ] } )\r\n\r\n return render_template( 'data_explore_page.html', script=script )",
"def display():\n return render_template(\"signin.html\")",
"def showLogin():\n if(checkLogin()):\n return redirect(url_for('catelog'))\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state, isLogin=checkLogin())",
"def secure_page():\n return render_template('secure_page.html')",
"def showLogin():\r\n # crate anti-forgery state token\r\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\r\n for x in xrange(32))\r\n login_session['state'] = state\r\n return render_template('login.html', STATE=state)",
"def data_page():\n\n return render_template('Data_Page.html')",
"def get(self):\n return render_template(LOGIN_TEMPLATE)",
"def get(self):\n self.render(\"login.html\")",
"def adpanel():\n if 'user_id' not in session or session['user_id'] != 'admin':\n return redirect(url_for('login'))\n return render_template('adminpanel.html')",
"def index():\n print(\"Inside index()\")\n if \"display_name\" not in session:\n return render_template(\"create_account.html\")\n\n return f\"Hello, {session['display_name']}\"",
"def feedpage():\n\n # This page should not be seen for people who aren't login. \n # http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n # Use login required decorator @login_required.\n\n return render_template('feedpage.html')",
"def view_session(request , session_name):\n sessions_path=settings.MEDIA_ROOT + \"Sessions\"\n s_li=os.listdir(sessions_path)\n if session_name+\".ngl\" in s_li:\n \n mdsrv_url=obtain_domain_url(request)\n redirect_url='/html/session.html?load=pufa.ngl'\n\n return redirect(mdsrv_url+redirect_url)",
"def display():\n\n #still needs some cleanup on imagry and what the site is about. \n\n return render_template(\"index.html\")",
"def login():\n return render_template('login.html', next=flask.request.args.get(\"next\",\"/sessions\"))",
"def show_profile():\n print('LOGIN SESSION:', login_session)\n if 'userid' in login_session:\n category = session.query(Category).first()\n item = session.query(Item).first()\n return render_template('profile.html', login_session=login_session, root=app.instance_path, category=category,\n item=item)\n flash('Unfortunately you need to be logged in to see your profile', 'error')\n return redirect(url_for('show_homepage'))",
"def render(self):\n navbar = self.render_navbar()\n postcards = self.render_postcards()\n sm_metadata = self.render_social_media_metadata()\n\n return PAGE_TEMPLATE % (sm_metadata, navbar, postcards)",
"def get_patron_login():\n return render_template('login.html')",
"def login():\n\n return render_template('login.html')",
"def view_html_page():\n\n return render_template(\"moby.html\")",
"def online():\r\n if current_user.is_authenticated:\r\n session['name'] = current_user.username\r\n name = current_user.username\r\n else:\r\n name = 'Guest' + str(secrets.token_hex(8))\r\n session['name'] = name\r\n # room = session.get('room', '123')\r\n return render_template('online.html', name=name)",
"def showLoginPage(request):\n return render(request, \"core/login.html\", {\n\n })"
]
| [
"0.69160146",
"0.6585566",
"0.6536112",
"0.62708634",
"0.6269151",
"0.6269151",
"0.62596005",
"0.6238901",
"0.61822796",
"0.6137672",
"0.6050705",
"0.603166",
"0.6014789",
"0.59822565",
"0.59557796",
"0.59547424",
"0.5920551",
"0.589151",
"0.58809185",
"0.5834792",
"0.5823835",
"0.5793246",
"0.5781567",
"0.57783896",
"0.57648367",
"0.57552075",
"0.57509124",
"0.56974626",
"0.56884396",
"0.5654286"
]
| 0.7526199 | 0 |
Return user properties from OGDS. Always returns a minimal set of the properties 'ogg.user.userid' and 'ogg.user.title' even when no ogdsuser is found. | def _collect_properties(self):
properties = {
'userid': self.user_id,
'title': self.get_fullname()
}
if not self.ogds_user:
return properties
for attribute_name in self.ogds_user_attributes:
value = getattr(self.ogds_user, attribute_name)
properties[attribute_name] = value
return properties | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_user_gql(data):\n return {\n \"pk\": int(data[\"id\"]),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data[\"is_private\"],\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n \"media_count\": data[\"edge_owner_to_timeline_media\"][\"count\"],\n \"follower_count\": data[\"edge_followed_by\"][\"count\"],\n \"following_count\": data[\"edge_follow\"][\"count\"],\n \"biography\": data[\"biography\"],\n \"external_url\": data[\"external_url\"],\n \"is_business\": data[\"is_business_account\"],\n }",
"def extract_user_short(data):\n user_pk = data.get(\"id\", data.get(\"pk\"))\n assert user_pk, 'User without pk \"%s\"' % data\n return {\n \"pk\": int(user_pk),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data.get(\"is_private\"),\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n # \"is_unpublished\": data.get(\"is_unpublished\"),\n }",
"def getdat(user):\r\n profile = user.profile\r\n return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]",
"def get_pet_user_info():\n\n return db.session.query(User.email, Pet.pet_name, Pet.pet_type, Pet.pet_breed, \n Pet.pet_color, Pet.pet.status, Pet.last_address).filter(User.user_id == Pet.user_id).all()",
"def populate_og_data(self, user):\n title = user.username if not user.profile.name else user.profile.name\n\n image_field = None\n # For image use reel thumbnail\n if user.profile.reel_thumbnail_16_9:\n image_field = user.profile.reel_thumbnail_16_9\n # Otherwise use the avatar\n elif user.profile.avatar:\n image_field = user.profile.avatar\n # Otherwise use the thumbnails of the latest posts\n elif user.post_set.filter(status='published'):\n latest_post = user.post_set.filter(status='published').last()\n if latest_post.thumbnail:\n image_field = latest_post.thumbnail\n\n image_alt = f\"{title} on anima.to\"\n\n return OgData(\n title=title,\n description=user.profile.bio,\n image_field=image_field,\n image_alt=image_alt,\n )",
"def get_users_info(): \n \n data = user_obj.get_users_info()\n return data",
"def rootuser_info(self, datadict):\n\n dict1 = OrderedDict()\n dict1 = datadict['entry_data']['ProfilePage'][0]['graphql']['user']\n\n userdict = OrderedDict()\n keylist = ['id', 'username', 'full_name', 'biography', 'edge_follow', 'edge_followed_by', 'is_private', 'external_url', 'profile_pic_url_hd']\n\n for key in keylist:\n if key is 'edge_follow':\n userdict['following'] = dict1[key]\n elif key is 'edge_followed_by':\n userdict['followers'] = dict1[key]\n else:\n userdict[key] = dict1[key]\n\n userdict['platform'] = datadict['platform']\n\n return (json.dumps(userdict, indent=4))",
"def fetch_user_data(self, user_id):\n\n log.info('Fetching user data from Twitter for ID %s' % user_id)\n user = self.api.get_user(user_id)\n props = user.__dict__ # user properties\n\n del props['_api'], props['status'] # no embedded objects\n\n props['accessed'] = datetime.datetime.now()\n props['detail'] = 'full'\n props['type'] = 'user'\n\n return props",
"def get_users(self):\n # remove some user media fields that we can't submit back\n def clean_media(entry):\n entry.pop(\"mediaid\", None)\n entry.pop(\"userid\", None)\n entry.pop(\"description\", None)\n return entry\n zabbix_users = self.conn.user.get(selectMedias=\"extend\", selectUsrgrps=\"extend\")\n zabbix_users = {user[\"alias\"].lower(): User(\n id=user[\"userid\"],\n name=user[\"name\"],\n surname=user[\"surname\"],\n alias=user[\"alias\"],\n groups=set(g[\"usrgrpid\"] for g in user[\"usrgrps\"]),\n media=[clean_media(entry) for entry in user[\"medias\"]],\n ) for user in zabbix_users}\n return zabbix_users",
"def get_user_profiles(self):\n print 'inside get user profiles'\n print 'self.username :' + self.username\n g = GoogleAnalyticsAPI(self.username)\n if g:\n print 'GA client exists'\n user_accounts = g.get_user_accounts()\n return user_accounts.get('items')\n else:\n print 'GA client does not exist'\n return []",
"def get_user():\n with open(app.config['DATA_XML'], 'r') as xmlfile:\n root = ElementTree.parse(xmlfile).getroot()\n\n for item in root.iter('server'):\n result = '{}://{}'.format(\n item.find('protocol').text,\n item.find('host').text\n )\n\n data = {\n user.attrib['id']: {\n 'name': user.find('name').text,\n 'avatar': '{}{}'.format(\n result,\n user.find('avatar').text\n )\n }\n for user in root.iter('user')\n }\n return OrderedDict(\n sorted(\n data.items(),\n key=lambda result: itemgetter('name')(itemgetter(1)(result)),\n cmp=locale.strcoll\n )\n )",
"def list_users(order_by=\"id\"):\n ret = {}\n status, result = _query(action=\"meta\", command=\"users\")\n root = ET.fromstring(result)\n for user in root:\n user_id = None\n user_ret = {}\n for item in user.items():\n user_ret[item[0]] = item[1]\n if item[0] == \"id\":\n user_id = item[1]\n for item in user:\n user_ret[item.tag] = item.text\n ret[user_ret[order_by]] = user_ret\n return ret",
"def showORGusers(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n jsonResponse = get_csp_users_json(strCSPProdURL, ORG_ID, sessiontoken)\n if jsonResponse == None:\n print(\"API Error\")\n sys.exit(1)\n\n users = jsonResponse['results']\n table = PrettyTable(['First Name', 'Last Name', 'User Name'])\n for i in users:\n table.add_row([i['user']['firstName'],i['user']['lastName'],i['user']['username']])\n print (table.get_string(sortby=\"Last Name\"))",
"def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)",
"def get_gensec(self, obj):\n if obj.gensec is None:\n return None\n serializer = UserProfileSerializer(obj.gensec)\n return serializer.data",
"def users_get(self) -> Dict[str, list]:\n self.__logger.debug('Eva.users_get called')\n return self.__http_client.users_get()",
"def get_user_info_by_id(self, user_id: int) -> dict:",
"def user_data(self, access_token, *args, **kwargs):\n fields_selectors = ','.join(set(['id', 'first-name', 'last-name'] +\n self.setting('FIELD_SELECTORS', [])))\n # use set() over fields_selectors since LinkedIn fails when values are\n # duplicated\n url = 'https://api.linkedin.com/v1/people/~:(%s)' % fields_selectors\n raw_xml = self.oauth_request(access_token, url).content\n try:\n return to_dict(ElementTree.fromstring(raw_xml))\n except (ExpatError, KeyError, IndexError):\n return None",
"def test_get_user_info(self):\n \n name = app.get_user_info(\"skullphish\", name=True)\n vocab_count = app.get_user_info(\"skullphish\", vocab_count=True)\n userId = app.get_user_info(\"skullphish\", userId=True)\n \n self.assertTrue(name == \"damian rodbari\")\n self.assertEqual(vocab_count,0)\n self.assertTrue(userId == ObjectId(\"5bb8a0c006f1f8105bc3bb23\"))",
"def get_user_info(self, access_token, openid):\n url = get_config(\"login.wechat.user_info_url\") % (access_token, openid)\n return self._access_wxapi_or_raise(url)",
"def locate_users_with_gapps(self):\n return self.ldap_connection.search_s(\"ou=mx,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE, \"(|(sendmailMTAAliasValue=*@gapps.redhat.com)(sendmailMTAKey=*@gapps.redhat.com))\",\n [\"sendmailMTAKey\", \"sendmailMTAAliasValue\", \"rhatMTAAllowExternal\"])",
"def list():\n\n result = {}\n status = 404\n\n # nodes=Property.query.all()\n props_by_user = Property.query.filter(Property.users.contains(current_user)).all()\n if props_by_user:\n result['props'] = props_by_user\n status = 200\n\n return result, status",
"def get_users(self):\n return {key: value.user for key, value in self}",
"def get_user_info(self) -> str:\n return self._searcher.get_user_info()",
"def get_node_query_if_properties(self, uid):\n\n return ((\"MATCH (n:user) \"\n \"WHERE (n.id = %s AND HAS(n.name)) \"\n \"RETURN n\") % str(uid))",
"def properties_with_uid(self):\n return \"uid\", \"first_name\", \"last_name\", \"email\", \"phone\", \"description\", \"company_uid\"",
"def get_user_data(prs, client_id, client_secret):\n users = {}\n for owner, repo, number, pr in prs:\n username = pr.username\n\n # Initialize the User if needed\n if username not in users:\n print(pr.user_url, file=sys.stderr)\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret\n }\n resp = requests.get(pr.user_url, params=payload)\n\n # Abort if the return is an error\n out = resp.json()\n if 'message' in out:\n pprint.pprint(out, file=sys.stderr)\n raise Exception(resp.text)\n\n user = User(out)\n users[username] = user\n\n users[username].add_pr(pr)\n\n return users",
"def _parse_user_data (self, netflix_page_data):\n user_data = {};\n important_fields = [\n 'authURL',\n 'gpsModel',\n 'guid'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n user_data.update({important_field: netflix_page_data.get(important_field, '')})\n return user_data\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'memberContext' in dict(item).keys():\n for important_field in important_fields:\n user_data.update({important_field: item['memberContext']['data']['userInfo'][important_field]})\n\n return user_data",
"def get(\n user_id=None, discord_id=None, google_id=None, email=None,\n ):\n temp_cursor = user_db.cursor()\n\n pos_selectors = {\n \"user_id\": user_id,\n \"discord_id\": discord_id,\n \"google_id\": google_id,\n \"email\": email,\n }\n\n user = None\n for selector in pos_selectors.keys():\n sel_value = pos_selectors[selector]\n if sel_value is None:\n continue\n user = temp_cursor.execute(\n \"SELECT * FROM users WHERE \" + selector + \" = ?\", (sel_value,)\n ).fetchone()\n\n if user is not None:\n return User_Info.init_from_db(user)\n\n return None",
"def parse_user_fields(json_data):\n # Populate the fields\n user_info = {}\n for db_field, json_field in Users.UserJSON.fields.items():\n try:\n user_info[db_field] = get_json_field(json_data, json_field)\n if db_field == 'user_address_street':\n user_info[db_field] = user_info.get(db_field).replace('\\n', '')\n elif (db_field == 'user_first_login') or (db_field == 'user_last_login'):\n raw_timestamp = user_info.get(db_field)[:19]\n user_info[db_field] = core_utils.validate_timestamp(raw_timestamp)\n elif db_field == 'user_tags':\n user_info[db_field] = ', '.join(user_info.get(db_field))\n elif db_field == 'user_profile':\n profile = user_info[db_field]\n for idx in range(len(profile)):\n if profile[idx]['jive_label'] in Users.UserJSON.profile_fields:\n profile_field_name = Users.UserJSON.profile_fields.get(profile[idx]['jive_label'])\n user_info[profile_field_name] = profile[idx]['value']\n del user_info['user_profile']\n except (KeyError, IndexError, AttributeError):\n # Continue on to the next field\n continue\n # Return the user information\n return user_info"
]
| [
"0.64163625",
"0.59113324",
"0.58720744",
"0.58373237",
"0.569525",
"0.558774",
"0.5474033",
"0.5464169",
"0.54381526",
"0.5365471",
"0.53464866",
"0.53319556",
"0.53300774",
"0.5273262",
"0.5270092",
"0.5262506",
"0.52574897",
"0.5255697",
"0.5240036",
"0.5235572",
"0.5226835",
"0.52178824",
"0.5211456",
"0.51819503",
"0.518032",
"0.51619536",
"0.51571715",
"0.51570815",
"0.51382774",
"0.5136459"
]
| 0.7212165 | 0 |
Calls functions in each NN file to get results. Starts with python, calls run file for c++ versions | def get_results():
#Get python results
import mnist_nn
import mnist_nn_gpu
mnist_nn.save_results()
mnist_nn_gpu.save_results()
#Get cpp results
import subprocess
subprocess.call(['c++//./run.sh']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_cpp(self):",
"def main():\n\n config = None\n\n try:\n args = get_args()\n config = process_config(args.config)\n raise RuntimeError(\"Missing or invalid arguments\")\n except Exception as e:\n logging.error(\"Failed\", exc_info=e)\n\n print(\"Create the data generator.\")\n # data_loader = MnistDataLoader(config=config)\n data_loader = IrisDataLoader(config=config)\n train_data = data_loader.get_train_data()\n test_data = data_loader.get_test_data()\n\n print(\"Build the model\")\n # cnn_model = ConvModel(config=config).build_model()\n cnn_model = ANNModel(config=config).build_model()\n\n print(\"Load the best weights\")\n cnn_model.load_weights(\"experiments/{}/{}/checkpoints/{}-weights.best.hdf5\".format(\n config.evaluation.date, config.exp.name, config.exp.name))\n\n print(\"Evaluate the model\")\n print(\"Training Metrics\")\n evaluate(model=cnn_model, data=train_data)\n print(\"Testing Metrics\")\n evaluate(model=cnn_model, data=test_data)\n\n # print(\"Visualize loss and accuracy for Training and Validation data\")\n # plot_history(config=config)\n\n # print(\"Plotting ROC Curve\")\n # plot_roc(model=cnn_model, data=test_data)\n\n print(\"Classifcation Accuracy Report\")\n classification_accuracy_report(model=cnn_model, data=test_data)",
"def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)",
"def main():\n dataset_idx = [11]\n network_idx = [0]\n reshape_input = [False]\n output_idxs = [0, 1]\n lrs = [0, 1, 2]\n dataset_ft_idx = [0,1,2,3]\n counter_exp = 0\n freeze = [0]\n percentages = [12]\n for dts in range(len(dataset_idx)):\n for nt in range(len(network_idx)):\n for opt in output_idxs:\n for dft in dataset_ft_idx:\n for pr in percentages:\n for rsi in range(len(reshape_input)):\n for fr in freeze:\n for lr in lrs:\n config = configuration(dataset_idx=dataset_idx[dts],\n network_idx=network_idx[nt],\n output_idx=opt,\n usage_modus_idx=5,\n dataset_fine_tuning_idx=dft,\n reshape_input=reshape_input[rsi],\n learning_rates_idx=lr,\n name_counter=counter_exp,\n freeze=fr,\n percentage_idx=pr,\n fully_convolutional=False)\n\n setup_experiment_logger(logging_level=logging.DEBUG,\n filename=config['folder_exp'] + \"logger.txt\")\n\n logging.info('Finished')\n\n modus = Modus_Selecter(config)\n\n # Starting process\n modus.net_modus()\n counter_exp += 1\n\n\n return",
"def main():\n\n clues_file = \"data/part1-clues.txt\"\n parsed_clues_file = \"data/part1-parsedclues.txt\"\n cp = ClueParser()\n\n clues = loadList(clues_file)\n gold_parsed_clues = loadList(parsed_clues_file)\n assert(len(clues) == len(gold_parsed_clues))\n\n cp.train(clues, gold_parsed_clues)\n parsed_clues = cp.parseClues(clues)\n cp.evaluate(parsed_clues, gold_parsed_clues)",
"def main():\n parser = argparse.ArgumentParser(\n description='Runs test for C++ implementation of M*')\n parser.add_argument('test_file', help='File describing test cases')\n parser.add_argument('output_file', help='Name of output file')\n parser.add_argument('num_processors', type=int, action='store',\n help='Number of processes to run on each node. ' +\n 'The local host running the primary server will ' +\n 'run one fewer worker processes')\n parser.add_argument('-i', action='store', type=float, default=1.0,\n help='Set inflation factor for the heuristic, ' +\n 'defaults to 1', metavar='INF', dest='inflation')\n parser.add_argument('-t', action='store', type=int, default=120,\n help='Set time limit for planning. Defaults to 2 ' +\n 'minutes', dest='time_limit')\n parser.add_argument('--hosts', action='store',\n default=('python', 'cobra', 'viper', 'anaconda'),\n help='Hostnames/IPs to use as processing nodes.',\n nargs='*', metavar='HOSTNAME')\n\n args = parser.parse_args()\n\n run_cpp_mstar_trial(args.test_file, args.output_file,\n inflation=args.inflation, time_limit=args.time_limit,\n hosts=args.hosts, num_processors=args.num_processors)",
"def main():\n logger.info(\"=> creating model ...\")\n logger.info(\"Classes: %s\", cfg.classes)\n\n value_scale = 255\n mean = [0.485, 0.456, 0.406]\n mean = [item * value_scale for item in mean]\n std = [0.229, 0.224, 0.225]\n std = [item * value_scale for item in std]\n gray_folder = os.path.join(cfg.result_path, 'gray')\n color_folder = os.path.join(cfg.result_path, 'color')\n\n test_transform = pt_transform.Compose([pt_transform.Normalize(mean=mean, std=std, is_train=False)])\n\n if cfg.data_root[-1] == \"/\":\n val_list = cfg.data_root + cfg.val_list\n else:\n val_list = cfg.data_root + '/' + cfg.val_list\n\n test_data = pt_dataset.SemData(\n split='val', data_root=cfg.data_root,\n data_list=val_list,\n transform=test_transform)\n\n test_loader = ds.GeneratorDataset(test_data, column_names=[\"data\", \"label\"],\n shuffle=False)\n test_loader.batch(1)\n colors = numpy.loadtxt(cfg.color_txt).astype('uint8')\n\n from src.model import cpnet\n\n CPNet = cpnet.CPNet(\n prior_channels=256,\n proir__size=60,\n am_kernel_size=11,\n pretrained=True,\n pretrained_path=cfg.pretrain_path,\n deep_base=True\n )\n\n ms_checkpoint = load_checkpoint(cfg.ckpt)\n load_param_into_net(CPNet, ms_checkpoint, strict_load=True)\n CPNet.set_train(False)\n test(test_loader, test_data.data_list, CPNet, cfg.classes, mean, std, cfg.base_size, cfg.test_h,\n cfg.test_w, cfg.scales, gray_folder, color_folder, colors)\n if cfg.split != 'test':\n cal_acc(test_data.data_list, gray_folder, cfg.classes)",
"def task_process(args):\n if args.mode == 'change model':\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('rm -rf ctpn_change_{}x{}.onnx'.format(h, w))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('{} change_model.py --input_path={}/ctpn_{}x{}.onnx --output_path={}/ctpn_change_{}x{}.onnx' \\\n .format(args.interpreter, args.src_dir, h, w,args.res_dir, h, w)) \n if args.mode == 'preprocess':\n for i in range(config.center_len):\n os.system('mkdir -p {}_{}x{}'.format(args.res_dir, config.center_list[i][0], config.center_list[i][1]))\n os.system('{} ctpn_preprocess.py --src_dir={} --save_path={}' \\\n .format(args.interpreter, args.src_dir, args.res_dir))\n if args.mode == 'ais_infer':\n fps_all = 0\n os.system('mkdir -p {}/inf_output'.format(args.res_dir))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n\n os.system('{} --model={} --input={}_{}x{} --dymHW {},{} --device {} --batchsize={} --output={}/inf_output' \\\n .format(args.interpreter, args.om_path, args.src_dir ,h , w, h, w,args.device, args.batch_size, args.res_dir))\n\n sumary_path = glob.glob('{}/inf_output/*ary.json'.format(args.res_dir))[0]\n with open(sumary_path, 'r') as f:\n output = json.load(f)\n throughput = output['throughput'] \n fps_all = fps_all + throughput * config.center_count[i]\n os.system('rm -f {}'.format(sumary_path))\n os.system('mv {}/inf_output/*/*.bin {}'.format(args.res_dir, args.res_dir))\n os.system('rm {}/inf_output -rf'.format(args.res_dir))\n fps_all = fps_all / config.imgs_len\n print(\"====performance data====\")\n print('CTPN bs{} models fps:{}'.format(args.batch_size, fps_all))",
"def main():\r\n # Read dataset.\r\n reader = DatasetReader\r\n train_filename = sys.argv[1]\r\n test_filename = train_filename.replace('_train_', '_dev_')\r\n term_index, tag_index, train_data, test_data = reader.ReadData(train_filename, test_filename)\r\n (train_terms, train_tags, train_lengths) = train_data\r\n (test_terms, test_tags, test_lengths) = test_data\r\n\r\n model = SequenceModel(train_tags.shape[1], len(term_index), len(tag_index))\r\n model.build_inference()\r\n model.build_training()\r\n for j in range(5):\r\n model.train_epoch(train_terms,train_tags, train_lengths)\r\n print('Finished epoch %i. Evaluating ...' % (j+1))\r\n model.evaluate(test_terms, test_tags, test_lengths)",
"def main():\n import sys\n param = MODULE_HELPER.initialize_module()\n\n\n outfile = param['module_dir']+param['outstub']\n call = [param['featureCount_exec']]\n\n\n if param['paired']:\n call = call + ['-P', '-p']\n if param['featureCount_by_meta'] == False:\n call.append('-f')\n\n call = call + ['-t', param['featureCount_t']]\n call = call + ['-s', param['featureCount_s']]\n call = call + ['-g', param['featureCount_id']]\n call = call + ['-a', param['genome_annotation_gft']]\n call = call + ['-o', outfile]\n call.append(param['working_file'])\n\n param['file_handle'].write('CALL: '+' '.join(call)+'\\n')\n output, error = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n param['file_handle'].write(error)\n param['file_handle'].write(output)\n\n if not os.path.exists(outfile+'.summary'):\n param['file_handle'].write('featureCount run failed \\n')\n sys.exit(0)\n\n #wrap up and return the current workingfile\n MODULE_HELPER.wrapup_module(param, [outfile])",
"def main(params):\n\n train = []\n test = []\n imdir = params['dest'] + '/{0}/COCO_{0}_{1:012d}.jpg'\n\n if params['v'] == 2:\n train_annotations_file = params['dir'] + '/v2_mscoco_train2014_annotations.json'\n val_annotations_file = params['dir'] + '/v2_mscoco_val2014_annotations.json'\n train_questions_file = params['dir'] + '/v2_OpenEnded_mscoco_train2014_questions.json'\n val_questions_file = params['dir'] + '/v2_OpenEnded_mscoco_val2014_questions.json'\n test_questions_file = params['dir'] + '/v2_Questions_Test_mscoco/v2_OpenEnded_mscoco_test2015_questions.json'\n else:\n train_annotations_file = params['dir'] + '/mscoco_train2014_annotations.json'\n val_annotations_file = params['dir'] + '/mscoco_val2014_annotations.json'\n train_questions_file = params['dir'] + '/OpenEnded_mscoco_train2014_questions.json'\n val_questions_file = params['dir'] + '/OpenEnded_mscoco_val2014_questions.json'\n test_questions_file = params['dir'] + '/Questions_Test_mscoco/v2_OpenEnded_mscoco_test2015_questions.json'\n\n if params['split'] == 1:\n\n print('Loading annotations and questions...')\n train_anno = json.load(open(train_annotations_file, 'r'))\n val_anno = json.load(open(val_annotations_file, 'r'))\n\n train_ques = json.load(open(train_questions_file, 'r'))\n val_ques = json.load(open(val_questions_file, 'r'))\n\n subtype = 'train2014'\n for i in range(len(train_anno['annotations'])):\n ans = train_anno['annotations'][i]['multiple_choice_answer']\n\n answer_dict = sum_over_occurences(train_anno['annotations'][i]['answers'])\n question_id = train_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, train_anno['annotations'][i]['image_id'])\n\n question = train_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans,\n 'answers': answer_dict})\n\n subtype = 'val2014'\n for i in range(len(val_anno['annotations'])):\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n\n # A modification to count the number of occurences of each answer and then store\n # them in the json file as well\n answer_dict = sum_over_occurences(val_anno['annotations'][i]['answers'])\n\n question_id = val_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, val_anno['annotations'][i]['image_id'])\n\n question = val_ques['questions'][i]['question']\n\n test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans,\n 'answers': answer_dict})\n else:\n print('Loading annotations and questions...')\n train_anno = json.load(open(train_annotations_file, 'r'))\n val_anno = json.load(open(val_annotations_file, 'r'))\n\n train_ques = json.load(open(train_questions_file, 'r'))\n val_ques = json.load(open(val_questions_file, 'r'))\n test_ques = json.load(open(test_questions_file, 'r'))\n\n subtype = 'train2014'\n for i in range(len(train_anno['annotations'])):\n ans = train_anno['annotations'][i]['multiple_choice_answer']\n question_id = train_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, train_anno['annotations'][i]['image_id'])\n\n question = train_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n subtype = 'val2014'\n for i in range(len(val_anno['annotations'])):\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n question_id = val_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, val_anno['annotations'][i]['image_id'])\n\n question = val_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n subtype = 'test2015'\n for i in range(len(test_ques['questions'])):\n print(test_ques.keys())\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n question_id = test_ques['questions'][i]['question_id']\n image_path = imdir.format(subtype, test_ques['questions'][i]['image_id'])\n\n question = test_ques['questions'][i]['question']\n\n test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n print('Training sample %d, Testing sample %d...' % (len(train), len(test)))\n\n if v2:\n json.dump(train, open('data/vqa_raw_train.json', 'w'))\n json.dump(test, open('data/vqa_raw_test.json', 'w'))\n else:\n json.dump(train, open('data/VQAv1/vqa_raw_train.json', 'w'))\n json.dump(test, open('data/VQAv1/vqa_raw_test.json', 'w'))",
"def main():\n get_engine(onnx_file_path, engine_file_path)",
"def main(unused_argv):\n make_dir(FLAGS.raw_dir)\n\n # Get paths of download/extracted training and evaluation files.\n print(\"Downloading data from source\")\n train_files = get_raw_files(FLAGS.raw_dir, constants.TRAIN_DATA_SOURCES)\n eval_files = get_raw_files(FLAGS.raw_dir, constants.EVAL_DATA_SOURCES)",
"def main():\n argparser = ArgumentParser()\n argparser.add_argument('--case', type=int, required=True,\n help='case number to create observations e.g. 1 if 1.json')\n args = argparser.parse_args()\n\n case = args.case\n observation_file = os.path.join(OBSERVATION_DIR, '{}.json'.format(case))\n with open(observation_file, 'r') as f:\n observation_config = json.load(f)\n\n nodes = observation_config['nodes']\n edges = observation_config['edges']\n observations = observation_config['observations']\n\n # solution part\n parameters = _get_learned_parameters(nodes=nodes, edges=edges, observations=observations)\n # end solution part\n\n # json only recognises floats, not np.float, so we need to cast the values into floats.\n for node, node_params in parameters.items():\n for param, val in node_params.items():\n node_params[param] = float(val)\n parameters[node] = node_params\n\n if not os.path.exists(PREDICTION_DIR):\n os.makedirs(PREDICTION_DIR)\n prediction_file = os.path.join(PREDICTION_DIR, '{}.json'.format(case))\n\n with open(prediction_file, 'w') as f:\n json.dump(parameters, f, indent=1)\n print('INFO: Results for test case {} are stored in {}'.format(case, prediction_file))",
"def main():\n parser = argparse.ArgumentParser(\n description=\"Lite version of the CNVnator written in Python.\\nA tool for CNV discovery from depth of read mapping.\")\n parser.add_argument('-version', '--version', action='store_true', help='show version number and exit')\n parser.add_argument('-root', '--root', type=str, nargs=\"+\",\n help=\"CNVnator hd5 file: data storage for all calculations\", default=None)\n\n parser.add_argument('-download', '--download_resources', action='store_true', help='download resource files')\n\n parser.add_argument('-chrom', '--chrom', type=str, nargs=\"+\", help=\"list of chromosomes to apply calculation\",\n default=[])\n parser.add_argument('-v', '--verbose', type=str,\n choices=[\"none\", \"debug\", \"info\", \"warning\", \"error\", \"d\", \"e\", \"i\", \"w\"],\n help=\"verbose level: debug, info (default), warning, error\", default=\"info\")\n parser.add_argument('-log', '--log_file', type=str, help='log file')\n parser.add_argument('-j', '--max_cores', type=int,\n help=\"maximal number of cores to use in calculation\", default=8)\n parser.add_argument('-rd', '--rd', nargs=\"+\", type=str, help=\"read bam/sam/cram and store read depth information\")\n parser.add_argument('-T', '--reference_filename', type=str, help=\"reference fasta for CRAM\")\n\n parser.add_argument('-gc', '--gc', type=str, help=\"read fasta file and store GC/AT content\")\n parser.add_argument('-cgc', '--copy_gc', type=str, help=\"copy GC/AT content from another cnvnator file\")\n parser.add_argument('-his', '--his', type=binsize_type, nargs=\"+\",\n help=\"create histograms for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-snp2his', '--his_from_snp', type=binsize_type, nargs=\"+\",\n help=\"create histograms for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-stat', '--stat', type=binsize_type, nargs=\"+\",\n help=\"calculate statistics for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-partition', '--partition', type=binsize_type, nargs=\"+\",\n help=\"calculate segmentation for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-call', '--call', type=str, nargs=\"+\",\n help=\"CNV caller: [baf] bin_size [bin_size2 ...] (multiple bin sizes separate by space)\")\n parser.add_argument('-vcf', '-snp', '--vcf', nargs=\"+\", type=str, help=\"read SNP data from vcf files\")\n parser.add_argument('-somatic_snv', '--somatic_snv', nargs=\"+\", type=str, help=\"read SNP data from vcf files\")\n\n parser.add_argument('-minc', '--min_count', type=int,\n help=\"minimal count of haterozygous SNPs\", default=None)\n parser.add_argument('-vcf2rd', '--rd_from_vcf', type=str, help=\"read SNP data from vcf files\")\n parser.add_argument('-noAD', '--no_snp_counts', action='store_true',\n help=\"read positions of variants, not counts (AD tag)\")\n parser.add_argument('-nofilter', '--no_filter', action='store_true',\n help=\"read all variants (not only PASS)\")\n parser.add_argument('-ad', '--ad_tag', type=str, help=\"counts tag (default: AD)\", default=\"AD\")\n parser.add_argument('-gt', '--gt_tag', type=str, help=\"genotype tag (default: GT)\", default=\"GT\")\n parser.add_argument('-dp', '--dp_tag', type=str, help=\"read depth tag (default: DP)\", default=\"DP\")\n parser.add_argument('-callset', '--callset', type=str, help=\"name for somatic VCF signal\", default=None)\n parser.add_argument('-maxcn', '--max_copy_number', type=int, help=\"maximal copy number\", default=10)\n parser.add_argument('-mindbaf', '--baf_threshold', type=float, help=\"threshold for change in BAF level\",\n default=0.0)\n parser.add_argument('-bafres', '--baf_resolution', type=int, help=\"Resolution for unphased BAF likelihood\",\n default=200)\n parser.add_argument('-nolh', '--no_save_likelihood', action='store_true',\n help=\"do not save likelihood histograms (reduce size of pytor file)\")\n parser.add_argument('-oth', '--overlap_threshold', type=float, help=\"likelihood overlap threshold\",\n default=None)\n parser.add_argument('-mincf', '--min_cell_fraction', type=float, help=\"minimal cell fraction\", default=0.0)\n\n parser.add_argument('-pileup', '--pileup_bam', nargs=\"+\", type=str, help=\"calculate SNP counts from bam files\")\n parser.add_argument('-snp2rd', '--rd_from_snp', action='store_true', help=\"calculate RD from SNP counts\")\n parser.add_argument('-sbin', '--s_bin_size', type=binsize_type, help=\"Super bin size (use with -snp2rd)\",\n default=10000)\n\n parser.add_argument('-mask', '--mask', type=str, help=\"read fasta mask file and flag SNPs in P region\")\n parser.add_argument('-mask_snps', '--mask_snps', action='store_true', help=\"flag SNPs in P region\")\n parser.add_argument('-trio_phase', '--trio_phase', action='store_true', help=\"Phase trio\")\n parser.add_argument('-parents', '--phase_parents', action='store_true', help=\"Phase parents\")\n parser.add_argument('-mask_snvs', '--mask_snvs', type=str, help=\"flag SNVs in P region\")\n parser.add_argument('-idvar', '--idvar', type=str, help=\"read vcf file and flag SNPs that exist in database file\")\n parser.add_argument('-random_phase', '--random_phase', action='store_true', help=\"randomly phase SNPs\")\n parser.add_argument('-baf', '--baf', type=binsize_type, nargs=\"+\",\n help=\"create BAF histograms for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-nomask', '--no_mask', action='store_true', help=\"do not use P mask in BAF histograms\")\n parser.add_argument('-useid', '--use_id', action='store_true', help=\"use id flag filtering in SNP histograms\")\n parser.add_argument('-usehom', '--use_hom', action='store_true', help=\"use hom\")\n parser.add_argument('-usephase', '--use_phase', action='store_true',\n help=\"use information about phase while processing SNP data\")\n parser.add_argument('-reducenoise', '--reduce_noise', action='store_true',\n help=\"reduce noise in processing SNP data\")\n parser.add_argument('-blw', '--baf_likelihood_width', type=float,\n help=\"likelihood width used in processing SNP data (default=0.8)\", default=0.8)\n parser.add_argument('-altc', '--alt_corr', action='store_true',\n help=\"Remove alt/ref bias\")\n\n parser.add_argument('-plot', '--plot', type=str, nargs=\"+\", help=\"plotting\")\n parser.add_argument('-view', '--view', type=binsize_type,\n help=\"Enters interactive ploting mode\")\n parser.add_argument('-agg', '--force_agg', action='store_true', help=\"Force Agg matplotlib backend\")\n\n parser.add_argument('-panels', '--panels', type=str, nargs=\"+\", default=[\"rd\"], choices=[\"rd\", \"baf\", \"likelihood\"],\n help=\"plot panels (with -plot regions)\")\n\n parser.add_argument('-style', '--plot_style', type=str,\n help=\"available plot styles: \" + \", \".join(plt.style.available), choices=plt.style.available)\n parser.add_argument('-o', '--plot_output_file', type=str, help=\"output filename prefix and extension\", default=\"\")\n parser.add_argument('-anim', '--animation', type=str, help=\"animation folder/prefix\", default=\"\")\n\n parser.add_argument('-make_gc_file', '--make_gc_genome_file', action='store_true',\n help=\"used with -gc will create genome gc file\")\n parser.add_argument('-make_mask_file', '--make_mask_genome_file', action='store_true',\n help=\"used with -mask will create genome mask file\")\n parser.add_argument('-rd_use_mask', '--use_mask_with_rd', action='store_true', help=\"used P mask in RD histograms\")\n parser.add_argument('-nogc', '--no_gc_corr', action='store_true', help=\"do not use GC correction in RD histograms\")\n parser.add_argument('-rg', '--reference_genome', type=str, help=\"Manually set reference genome\", default=None)\n parser.add_argument('-sample', '--vcf_sample', type=str, help=\"Sample name in vcf file\", default=\"\")\n parser.add_argument('-conf', '--reference_genomes_conf', type=str, help=\"Configuration with reference genomes\",\n default=None)\n\n parser.add_argument('-ls', '--ls', action='store_true', help='list pytor file(s) content')\n parser.add_argument('-gc_info', '--gc_info', action='store_true', help='list pytor file(s) gc content stat')\n parser.add_argument('-rg_info', '--rg_info', action='store_true', help='list loaded reference gnomes')\n parser.add_argument('-info', '--info', type=binsize_type, nargs=\"*\", help='print statistics for pythor file(s)')\n parser.add_argument('-qc', '--qc', type=binsize_type, nargs=\"*\", help='print quality control statistics')\n parser.add_argument('-rdqc', '--rd_qc', type=binsize_type, nargs=\"*\",\n help='print quality control statistics without SNP data')\n parser.add_argument('-comp', '--compare', type=str, nargs=\"*\", help='compere two regions: -comp reg1 reg2 [n_bins]')\n parser.add_argument('-genotype', '--genotype', type=str, nargs=\"*\")\n parser.add_argument('-a', '--all', action='store_true', help='Genotype with all columns')\n parser.add_argument('-meta', '--metadata', action='store_true', help='list Metadata')\n parser.add_argument('-fasta2rg', '--reference_genome_template', type=str,\n help=\"create template for reference genome using chromosome lengths from fasta file\")\n parser.add_argument('-export', '--export', type=str, nargs=\"*\", help='Export to jbrowse and cnvnator')\n args = parser.parse_args(sys.argv[1:])\n\n log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if args.verbose in {\"debug\", \"d\"}:\n level = logging.DEBUG\n elif args.verbose in {\"info\", \"i\"}:\n level = logging.INFO\n elif args.verbose in {\"warning\", \"w\"}:\n level = logging.WARNING\n elif args.verbose in {\"error\", \"e\"}:\n level = logging.ERROR\n else:\n level = logging.CRITICAL\n\n if args.log_file:\n logging.basicConfig(filename=args.log_file, level=logging.DEBUG, format=log_format)\n logger = logging.getLogger('cnvpytor')\n ch = logging.StreamHandler()\n formatter = logging.Formatter(log_format)\n ch.setFormatter(formatter)\n ch.setLevel(level)\n logger.addHandler(ch)\n else:\n logging.basicConfig(level=level, format=log_format)\n logger = logging.getLogger('cnvpytor')\n logger.debug(\"Start logging...\")\n\n if args.reference_genome_template is not None:\n Fasta(args.reference_genome_template).print_reference_genome_template()\n\n if args.download_resources:\n Genome.download_resources()\n return 0\n\n if not Genome.check_resources():\n logger.error(\"Some reference genome resource files are missing. \"\n \"Run 'cnvpytor -download' as same user who has installed cnvpytor.\")\n return 0\n\n if args.version:\n print('CNVpytor {}'.format(__version__))\n return 0\n\n if args.reference_genomes_conf:\n Genome.load_reference_genomes(args.reference_genomes_conf)\n elif os.path.exists(os.path.expanduser('~/.cnvpytor/reference_genomes_conf.py')):\n Genome.load_reference_genomes(os.path.expanduser('~/.cnvpytor/reference_genomes_conf.py'))\n\n if args.rg_info:\n Genome.print_reference_genomes()\n\n if args.root is not None:\n\n if args.ls:\n show = Show(args.root)\n show.ls()\n\n if args.gc_info:\n show = Show(args.root)\n show.gc_info()\n\n if args.export:\n if len(args.export) > 0:\n dir_name_list = args.export[1:]\n dir_name = ''\n if len(dir_name_list) > 0:\n dir_name = dir_name_list[0]\n export_program = args.export[0].lower()\n if export_program in ['jbrowse', 'cnvnator']:\n if export_program == 'jbrowse':\n export_j = ExportJBrowse(args.root, dir_name)\n export_j.create_reference_json()\n export_j.rd_signal()\n export_j.snp_signal()\n export_j.create_tracklist_json()\n elif export_program == 'cnvnator':\n logger.info(\"Under Development\")\n else:\n logger.error(\"Incorrect export program name\")\n\n if args.metadata:\n show = Show(args.root)\n show.meta()\n\n if args.info is not None:\n show = Show(args.root)\n show.info(args.info)\n\n\n if args.genotype is not None:\n params = {\"output_filename\": args.plot_output_file,\n \"chrom\": args.chrom,\n \"panels\": args.panels,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.genotype_prompt(list(map(binsize_type, args.genotype)), all=args.all)\n\n if args.qc is not None:\n params = {\"bin_size\": binsize_type(args.qc[-1]),\n \"chrom\": args.chrom,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.qc()\n\n if args.rd_qc is not None:\n params = {\"bin_size\": binsize_type(args.rd_qc[-1]),\n \"chrom\": args.chrom,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.qc(snp_qc=False)\n\n\n if args.compare is not None:\n params = {\"bin_size\": binsize_type(args.compare[-1]),\n \"rd_use_gc_corr\": not args.no_gc_corr,\n \"rd_use_mask\": args.use_mask_with_rd\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n if len(args.compare) == 3:\n view.compare(args.compare[0], args.compare[1])\n elif len(args.compare) == 4:\n view.compare(args.compare[0], args.compare[1], int(args.compare[2]))\n\n if args.rd:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.rd(args.rd, chroms=args.chrom, reference_filename=args.reference_filename)\n\n if args.reference_genome:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.set_reference_genome(args.reference_genome)\n\n if args.plot:\n params = {\"output_filename\": args.plot_output_file,\n \"chrom\": args.chrom,\n \"panels\": args.panels,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n if args.plot_style:\n params[\"style\"] = args.plot_style\n view = Viewer(args.root, params)\n view.plot_command(args.plot)\n\n if args.view:\n params = {\"bin_size\": args.view,\n \"output_filename\": args.plot_output_file,\n \"chrom\": args.chrom,\n \"panels\": args.panels,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n if args.plot_style:\n params[\"style\"] = args.plot_style\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.prompt()\n\n if args.gc:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.gc(args.gc, chroms=args.chrom, make_gc_genome_file=args.make_gc_genome_file)\n\n if args.copy_gc:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.copy_gc(args.copy_gc, chroms=args.chrom)\n\n if args.vcf:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.vcf(args.vcf, chroms=args.chrom, sample=args.vcf_sample, no_counts=args.no_snp_counts,\n ad_tag=args.ad_tag, gt_tag=args.gt_tag, filter=not args.no_filter)\n\n if args.idvar:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.variant_id(args.idvar, chroms=args.chrom)\n\n if args.somatic_snv:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n callset = \"default\" if args.callset is None else args.callset\n app.vcf(args.somatic_snv, chroms=args.chrom, sample=args.vcf_sample, no_counts=args.no_snp_counts,\n ad_tag=args.ad_tag, gt_tag=args.gt_tag, filter=not args.no_filter, callset=callset)\n\n if args.rd_from_vcf:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.rd_from_vcf(args.rd_from_vcf, chroms=args.chrom, sample=args.vcf_sample, ad_tag=args.ad_tag,\n dp_tag=args.dp_tag)\n\n if args.pileup_bam:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.pileup(args.pileup_bam, chroms=args.chrom, reference_filename=args.reference_filename)\n\n if args.rd_from_snp:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.rd_from_snp(chroms=args.chrom, use_mask=not args.no_mask, use_id=args.use_id,\n s_bin_size=args.s_bin_size)\n\n if args.mask:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.mask(args.mask, chroms=args.chrom, make_mask_genome_file=args.make_mask_genome_file)\n\n if args.mask_snps:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.mask_snps()\n\n if args.mask_snvs:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.mask_snps(callset=args.mask_snvs)\n\n if args.random_phase:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.random_phase()\n\n if args.trio_phase:\n app = Trio(args.root)\n app.trio_phase(parents=args.phase_parents)\n\n if args.stat:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.rd_stat(chroms=args.chrom)\n\n if args.his:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.calculate_histograms(args.his, chroms=args.chrom)\n\n if args.his_from_snp:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.calculate_histograms_from_snp_counts(args.his_from_snp, chroms=args.chrom, use_mask=not args.no_mask,\n use_id=args.use_id, callset=args.callset,\n min_count=args.min_count)\n if args.baf:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.calculate_baf(args.baf, chroms=args.chrom, use_mask=not args.no_mask, use_id=args.use_id,\n use_phase=args.use_phase, res=args.baf_resolution, reduce_noise=args.reduce_noise, blw=args.baf_likelihood_width,\n use_hom=args.use_hom, alt_ref_correct=args.alt_corr, save_likelihood=not args.no_save_likelihood)\n if args.partition:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.partition(args.partition, chroms=args.chrom, use_gc_corr=not args.no_gc_corr,\n use_mask=args.use_mask_with_rd)\n\n if args.call:\n app = Root(args.root[0], max_cores=args.max_cores)\n if args.call[0] == \"baf\":\n if args.call[1] in [\"mosaic\", \"germline\"]:\n event_type = args.call[1]\n bins = list(map(binsize_type, args.call[2:]))\n else:\n event_type = \"both\"\n bins = list(map(binsize_type, args.call[1:]))\n if args.use_phase:\n app.call_baf_phased(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n else:\n app.call_baf(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n #app.call_baf_old([binsize_type(x) for x in args.call[1:]], chroms=args.chrom, use_id=args.use_id,\n # use_mask=not args.no_mask, mcount=args.min_count, anim=args.animation)\n elif args.call[0] == \"mosaic\":\n app.call_mosaic(list(map(binsize_type, args.call[1:])), chroms=args.chrom,\n use_gc_corr=not args.no_gc_corr,\n use_mask=args.use_mask_with_rd, anim=args.animation)\n elif args.call[0] == \"subclones\":\n bins = list(map(binsize_type, args.call[1:]))\n app.call_subclones(bins, chroms=args.chrom, cnv_calls=\"calls combined\", print_calls=True,\n use_gc_corr=not args.no_gc_corr, rd_use_mask=args.use_mask_with_rd,\n snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold)\n elif args.call[0] == \"combined\":\n if args.call[1] in [\"mosaic\", \"germline\"]:\n event_type = args.call[1]\n bins = list(map(binsize_type, args.call[2:]))\n else:\n event_type = \"both\"\n bins = list(map(binsize_type, args.call[1:]))\n if args.use_phase:\n app.call_2d_phased(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n else:\n app.call_2d(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n else:\n app.call(list(map(binsize_type, args.call)), chroms=args.chrom, print_calls=True,\n use_gc_corr=not args.no_gc_corr, use_mask=args.use_mask_with_rd)",
"def main():\n os.makedirs(PATH)\n fetch_data()\n convert_to_json(model_list, 'models.json', is_model=True)\n convert_to_json(backend_list, 'backends.json')\n convert_to_json(type_list, 'types.json')\n convert_to_json(featurizer_list, 'featurizers.json')",
"def main():\r\n parser = get_parser()\r\n config = parser.parse_args(['--cfg', 'config.yaml'])\r\n result_filing.init_config_vars(config)\r\n run_id = config.info.run_id\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n\r\n operation = config.info.operation_type\r\n logger.info(\"Selected operation type %s.\"%(operation))\r\n if operation == const.TRAIN_OP:\r\n train.train_model(config)\r\n elif operation == const.DEPLOY_OP:\r\n test.test_model(config)",
"def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()",
"def main(model_path='models/Nakakuki_Cell_2010_ODE'):\n n_file = []\n fitparam_files = os.listdir(model_path.strip('/') + '/fitparam')\n for file in fitparam_files:\n if re.match(r'\\d', file):\n n_file.append(int(file))\n for nth_paramset in n_file:\n os.makedirs(\n model_path.strip('/') \n + '/dat2npy/out/{:d}'.format(nth_paramset), exist_ok=True\n )\n nth_fitparam_files = os.listdir(\n model_path.strip('/') + '/fitparam/{:d}'.format(nth_paramset)\n )\n for dat_file in nth_fitparam_files:\n if 'fit' in dat_file:\n \"\"\"\n - fit_param%d.dat -> fit_param%d.npy\n - best_fitness.dat -> best_fitness.npy\n \"\"\"\n try:\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='float'\n )\n except ValueError:\n pass\n else:\n \"\"\"\n - count_num.dat -> count_num.npy\n - generation.dat -> generation.npy\n \"\"\"\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='int'\n )\n np.save(\n model_path.strip('/') + '/dat2npy/out/{:d}/'.format(nth_paramset)\n + dat_file.replace('.dat', '.npy'), data\n )\n if os.path.isfile(\n './logs/{:d}.log'.format(nth_paramset)):\n shutil.copyfile(\n './logs/{:d}.log'.format(nth_paramset),\n model_path.strip('/') \n + '/dat2npy/out/{:d}/optimization.log'.format(nth_paramset)\n )",
"def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)",
"def run(pars, #parameter files\n #directory of scenario files\n scen_dir = r'C:\\LS\\03_TOOLS\\_git\\COVID_01\\scenarios',\n \n #map to scenario files\n scen_d = {\n 'NoNPI':'NPI_Scenario1_None.R',\n 'BI1918':'NPI_Scenario2_Bootsma_1918Influenza.R',\n 'SouthKorea':'NPI_Scenario3_SouthKorea.R',\n 'Reduced':'NPI_Scenario4_ReducedGamma.R', \n }\n ):\n \n \n \n #===========================================================================\n # precheck \n #===========================================================================\n assert len(pars)==4, 'unexpected inputs count'\n print('pars: \\n%s'%pars)\n \n #check the R Environment variables\n assert 'R_USER' in os.environ\n assert 'R_HOME' in os.environ\n \n #print('R_USER=%s \\nR_HOME=%s'%(os.getenv('R_USER'), os.getenv('R_HOME')))\n\n \n \n \n \n #===========================================================================\n # setup\n #===========================================================================\n s = setup.Setup(setup_name = 'mid_utah_'+pars[2],\n spatial_setup = WestCoastSpatialSetup(),\n nsim = int(pars[1]),\n ti = datetime.date(2020, 3, 6),\n tf = datetime.date(2020, 10, 1),\n interactive = False,\n write_csv = True,\n dt = 1/4)\n \n #===========================================================================\n # set the scenario parmaters\n #===========================================================================\n\n \n \n assert pars[2] in scen_d, 'unrecognized scenario: %s'%pars[2]\n \n rfp = os.path.join(scen_dir, scen_d[pars[2]])\n assert os.path.exists(rfp)\n \n s.script_npi = rfp\n \n print('set script_npi=%s'%s.script_npi)\n\n #===========================================================================\n # execute\n #===========================================================================\n\n print()\n print()\n print(f\">>> Starting {s.nsim} model runs on {pars[3]} processes\")\n print(f\">>> Setup *** {s.setup_name} *** from {s.ti} to {s.tf} !\")\n print(f\">>> writing to folder : {s.datadir}{s.setup_name}\")\n print()\n print()\n \n tic = time.time()\n \n res_l = seir.run_parallel(s, int(pars[3]))\n print(f\">>> Runs done in {time.time()-tic} seconds...\")",
"def main():\n parser = make_argument_parser()\n args = parser.parse_args()\n\n input_dirs = args.inputdirs\n tf = args.factor\n valid_chroms = args.validchroms\n valid_input_dirs = args.validinputdirs\n test_chroms = args.testchroms\n epochs = args.epochs\n patience = args.patience\n learningrate = args.learningrate\n seed = args.seed\n utils.set_seed(seed)\n dropout_rate = args.dropout\n L = args.seqlen\n w = args.motifwidth\n utils.L = L\n utils.w = w\n utils.w2 = w/2\n negatives = args.negatives\n assert negatives > 0\n meta = args.meta\n gencode = args.gencode\n motif = args.motif\n\n num_motifs = args.kernels\n num_recurrent = args.recurrent\n num_dense = args.dense\n \n features = ['bigwig'] \n\n if tf:\n print 'Single-task training:', tf\n singleTask = True\n if meta:\n print 'Including metadata features'\n features.append('meta')\n if gencode:\n print 'Including genome annotations'\n features.append('gencode')\n else:\n print 'Multi-task training'\n singleTask = False\n #Cannot use any metadata features\n assert not meta\n assert not gencode\n\n if args.outputdir is None:\n clobber = True\n output_dir = args.outputdirc\n else:\n clobber = False\n output_dir = args.outputdir\n\n try: # adapted from dreme.py by T. Bailey\n os.makedirs(output_dir)\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n if not clobber:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'but you specified not to clobber it') % output_dir\n sys.exit(1)\n else:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'so it will be clobbered') % output_dir\n\n print 'Loading genome'\n genome = utils.load_genome()\n if valid_input_dirs:\n print 'You specified at least one validation input directory'\n assert singleTask # This option only works for single-task training\n print 'Loading ChIP labels'\n if singleTask:\n chip_bed_list, nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(input_dirs, tf)\n if valid_input_dirs:\n valid_chip_bed_list, valid_nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(valid_input_dirs, tf)\n num_tfs = 1\n else:\n assert len(input_dirs) == 1 # multi-task training only supports one cell line\n input_dir = input_dirs[0]\n tfs, positive_windows, y_positive, nonnegative_regions_bed = \\\n utils.load_chip_multiTask(input_dir)\n num_tfs = len(tfs)\n print 'Loading bigWig data'\n bigwig_names, bigwig_files_list = utils.load_bigwigs(input_dirs)\n num_bigwigs = len(bigwig_names)\n if valid_input_dirs:\n valid_bigwig_names, valid_bigwig_files_list = utils.load_bigwigs(valid_input_dirs)\n assert valid_bigwig_names == bigwig_names\n if not singleTask:\n bigwig_files = bigwig_files_list[0]\n if meta:\n print 'Loading metadata features'\n meta_names, meta_list = utils.load_meta(input_dirs)\n if valid_input_dirs:\n valid_meta_names, valid_meta_list = utils.load_load(valid_input_dirs)\n assert valid_meta_names == meta_names\n else:# meta option was not selected, pass empty metadata features to the functions\n meta_list = [[] for bigwig_files in bigwig_files_list]\n if valid_input_dirs:\n valid_meta_list = [[] for bigwig_files in valid_bigwig_files_list]\n \n print 'Making features'\n if singleTask:\n if not valid_input_dirs: #validation directories not used, must pass placeholder values\n valid_chip_bed_list = None\n valid_nonnegative_regions_bed_list = None\n valid_bigwig_files_list = None\n valid_meta_list = None \n datagen_train, datagen_valid = \\\n utils.make_features_singleTask(chip_bed_list,\n nonnegative_regions_bed_list, bigwig_files_list, bigwig_names,\n meta_list, gencode, genome, epochs, negatives, valid_chroms, test_chroms, \n valid_chip_bed_list, valid_nonnegative_regions_bed_list, \n valid_bigwig_files_list, valid_meta_list)\n else:\n datagen_train, datagen_valid = \\\n utils.make_features_multiTask(positive_windows, y_positive,\n nonnegative_regions_bed, bigwig_files, bigwig_names,\n genome, epochs, valid_chroms, test_chroms)\n print 'Building model'\n if num_recurrent == 0:\n print 'You specified 0 LSTM units. Omitting BLSTM layer'\n if num_recurrent < 0:\n print 'You specified less than 0 LSTM units. Replacing BLSTM layer with global max-pooling layer'\n if meta or gencode:\n num_meta = 0\n if meta:\n num_meta = len(meta_names)\n if gencode:\n num_meta += 6\n model = utils.make_meta_model(num_tfs, num_bigwigs, num_meta, num_motifs, num_recurrent, num_dense, dropout_rate)\n else:\n model = utils.make_model(num_tfs, num_bigwigs, num_motifs, num_recurrent, num_dense, dropout_rate)\n\n if motif:\n assert singleTask # This option only works with single-task training\n motifs_db = utils.load_motif_db('resources/HOCOMOCOv9.meme')\n if tf in motifs_db:\n print 'Injecting canonical motif'\n pwm = motifs_db[tf]\n pwm += 0.001\n pwm = pwm / pwm.sum(axis=1)[:, np.newaxis]\n pwm = np.log2(pwm/0.25)\n utils.inject_pwm(model, pwm)\n output_tf_file = open(output_dir + '/chip.txt', 'w')\n if singleTask:\n output_tf_file.write(\"%s\\n\" % tf)\n else:\n for tf in tfs:\n output_tf_file.write(\"%s\\n\" % tf)\n output_tf_file.close()\n output_feature_file = open(output_dir + '/feature.txt', 'w')\n for feature in features:\n output_feature_file.write(\"%s\\n\" % feature)\n output_feature_file.close()\n output_bw_file = open(output_dir + '/bigwig.txt', 'w')\n for bw in bigwig_names:\n output_bw_file.write(\"%s\\n\" % bw)\n output_bw_file.close()\n if meta:\n output_meta_file = open(output_dir + '/meta.txt', 'w')\n for meta_name in meta_names:\n output_meta_file.write(\"%s\\n\" % meta_name)\n output_meta_file.close()\n model_json = model.to_json()\n output_json_file = open(output_dir + '/model.json', 'w')\n output_json_file.write(model_json)\n output_json_file.close()\n train(datagen_train, datagen_valid, model, epochs, patience, learningrate, output_dir)",
"def main(args):\n ## Starting time\n start_time = datetime.now()\n ## Reading all elements and converting to python dictionary\n param_dict = vars(args)\n ## Checking for correct input\n param_vals_test(param_dict)\n #\n # Creating instance of `ReadML` with the input parameters\n param_dict['ml_args'] = ReadML(**param_dict)\n ## Program message\n prog_msg = param_dict['Prog_msg']\n # Adding additional parameters\n param_dict = add_to_dict(param_dict)\n ##\n ## Creating Folder Structure\n # proj_dict = cwpaths.cookiecutter_paths(__file__)\n proj_dict = param_dict['ml_args'].proj_dict\n proj_dict = directory_skeleton(param_dict, proj_dict)\n ##\n ## Printing out project variables\n print('\\n'+50*'='+'\\n')\n for key, key_val in sorted(param_dict.items()):\n if key != 'Prog_msg':\n print('{0} `{1}`: {2}'.format(prog_msg, key, key_val))\n print('\\n'+50*'='+'\\n')\n ##\n ## Feature keys\n param_dict['feat_cols_dict'] = param_dict['ml_args'].feat_cols_names_dict(\n return_all=True)\n ##\n ## Reading in the main catalogue\n catl_pd = catl_file_read_clean(param_dict, proj_dict)\n ###\n ### ------ Figures ------ ###\n ##\n ## Comparison of estimated group masses via HAM and Dynamical Masses\n frac_diff_model(param_dict, proj_dict, plot_opt=param_dict['plot_opt'])\n #\n # Covariance Matrix\n covariance_plot(catl_pd, param_dict, proj_dict)\n #\n # Traditional methods for estimating masses\n # pred_masses_halo_mass(param_dict, proj_dict)\n #\n # Fractional Difference plots vs True mass of galaxy GROUPS\n # frac_diff_groups_model(param_dict, proj_dict,\n # plot_opt=param_dict['plot_opt'])\n ##\n ## End time for running the catalogues\n end_time = datetime.now()\n total_time = end_time - start_time\n print('{0} Total Time taken (Create): {1}'.format(prog_msg, total_time))",
"def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)",
"def main():\r\n run_processes('tests.csv', 'labs.csv')",
"def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)",
"def main():\n\n ############################ variable settings #################################\n parser = argparse.ArgumentParser(description='Run Subtask C of GermEval 2017 Using Pre-Trained Language Model.')\n parser.add_argument('--seed', type=int, default=42, help='Random seed.')\n parser.add_argument('--lang_model', type=str, default='bert-base-german-dbmdz-uncased', help='The pre-trained language model.')\n parser.add_argument('--epochs', type=int, default=4, help='Number of epochs for training.')\n parser.add_argument('--lr', type=float, default=5e-5, help='The learning rate.')\n parser.add_argument('--max_len', type=int, default=256, help='The maximum sequence length of the input text.')\n parser.add_argument('--batch_size', type=int, default=32, help='Your train set batch size.')\n parser.add_argument('--df_path', type=str, default='./data/', help='The data directory.') \n parser.add_argument('--train_data', type=str, default='train_df_cat.tsv', help='The filename of the input train data.')\n parser.add_argument('--dev_data', type=str, default='dev_df_cat.tsv', help='The filename of the input development data.')\n parser.add_argument('--test_data1', type=str, default='test_syn_df_cat.tsv', help='The filename of the first input test data (synchronic).')\n parser.add_argument('--test_data2', type=str, default='test_dia_df_cat.tsv', help='The filename of the second input test data (diachronic).')\n parser.add_argument('--output_path', type=str, default='./output/subtaskC/', help='The output directory of the model and predictions.')\n parser.add_argument(\"--train\", default=True, action=\"store_true\", help=\"Flag for training.\")\n parser.add_argument(\"--save_prediction\", default=False, action=\"store_true\", help=\"Flag for saving predictions.\")\n parser.add_argument(\"--save_cr\", default=False, action=\"store_true\", help=\"Flag for saving confusion matrix.\")\n parser.add_argument(\"--exclude_general\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein.\")\n parser.add_argument(\"--exclude_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding neutral polarity.\")\n parser.add_argument(\"--exclude_general_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein:neutral.\")\n args = parser.parse_args()\n ################################################################################\n set_all_seeds(args.seed)\n device, n_gpu = initialize_device_settings(use_cuda=True)\n \n # Load data\n train_df = pd.read_csv(args.df_path + args.train_data, delimiter = '\\t')\n dev_df = pd.read_csv(args.df_path + args.dev_data, delimiter = '\\t')\n test_syn_df = pd.read_csv(args.df_path + args.test_data1, delimiter = '\\t')\n test_dia_df = pd.read_csv(args.df_path + args.test_data2, delimiter = '\\t')\n \n # Create a tokenizer\n lower_case = False\n if args.lang_model[-7:] == \"uncased\":\n lower_case = True\n\n if args.lang_model[:4] == \"bert\":\n model_class = \"BERT\"\n tokenizer = BertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n if args.lang_model[:10] == \"distilbert\":\n model_class = \"DistilBERT\"\n tokenizer = DistilBertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n\n # get training features\n cats = train_df.columns[5:]\n end = \"full\"\n # exclude categories if required\n if (args.exclude_general):\n cats = [i for i in list(cats) if \"Allgemein\" not in i]\n end = \"excl_gen\"\n if (args.exclude_neutral):\n cats = [i for i in list(cats) if \"neutral\" not in i]\n end = \"excl_neu\"\n if (args.exclude_general_neutral):\n cats = [i for i in list(cats) if \"Allgemein:neutral\" not in i]\n end = \"excl_genneu\"\n \n num_labels = len(list(cats))\n\n # create one hot labels\n train_df['one_hot_labels'] = list(train_df[list(cats)].values)\n dev_df['one_hot_labels'] = list(dev_df[list(cats)].values)\n test_syn_df['one_hot_labels'] = list(test_syn_df[list(cats)].values)\n test_dia_df['one_hot_labels'] = list(test_dia_df[list(cats)].values)\n\n # retrieve sentences and labels\n df = pd.concat([train_df, dev_df])\n sentences = df.text.values\n labels = list(df.one_hot_labels.values) \n\n sentences_syn = test_syn_df.text.values\n labels_syn = list(test_syn_df.one_hot_labels.values)\n\n sentences_dia = test_dia_df.text.values\n labels_dia = list(test_dia_df.one_hot_labels.values)\n \n print(\"number of categories:\", len(list(cats)))\n\n # Tokenize all of the sentences and map the tokens to their word IDs. \n input_ids = [tokenizer.encode(sent, add_special_tokens=True, truncation=True, \n max_length=args.max_len) for sent in sentences]\n input_ids = pad_sequences(input_ids, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n # Create attention masks\n attention_masks = [[int(token_id > 0) for token_id in sent] for sent in input_ids]\n \n # synchronic test data\n input_ids_syn = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_syn]\n input_ids_syn = pad_sequences(input_ids_syn, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_syn = [[int(token_id > 0) for token_id in sent] for sent in input_ids_syn]\n \n # diachronic test data\n input_ids_dia = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_dia]\n input_ids_dia = pad_sequences(input_ids_dia, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_dia = [[int(token_id > 0) for token_id in sent] for sent in input_ids_dia]\n\n # split train, dev\n train_inputs, train_labels, dev_inputs, dev_labels, train_masks, dev_masks = split_train_dev(\n train_df, dev_df, attention_masks, input_ids, labels)\n \n # transform to torch tensor\n train_inputs = torch.tensor(train_inputs)\n dev_inputs = torch.tensor(dev_inputs)\n\n train_labels = torch.tensor(train_labels)\n dev_labels = torch.tensor(dev_labels)\n\n train_masks = torch.tensor(train_masks)\n dev_masks = torch.tensor(dev_masks)\n\n test_syn_inputs = torch.tensor(input_ids_syn)\n test_syn_masks = torch.tensor(attention_masks_syn)\n test_syn_labels = torch.tensor(labels_syn)\n\n test_dia_inputs = torch.tensor(input_ids_dia)\n test_dia_masks = torch.tensor(attention_masks_dia)\n test_dia_labels = torch.tensor(labels_dia)\n\n # Create the DataLoader\n train_dataloader = create_dataloader(train_inputs, train_masks, \n train_labels, args.batch_size, train = True)\n\n dev_dataloader = create_dataloader(dev_inputs, dev_masks, \n dev_labels, args.batch_size, train = False)\n\n test_syn_dataloader = create_dataloader(test_syn_inputs, test_syn_masks, \n test_syn_labels, args.batch_size, \n train = False)\n\n test_dia_dataloader = create_dataloader(test_dia_inputs, test_dia_masks, \n test_dia_labels, args.batch_size, \n train = False)\n\n # Create model\n if args.train:\n if model_class == \"BERT\":\n config = BertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = BertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n\n if model_class == \"DistilBERT\":\n config = DistilBertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = DistilBertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n model.cuda()\n\n\n # Create an optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.lr,\n eps = 1e-8\n )\n # Total number of training steps = number of batches * number of epochs\n total_steps = len(train_dataloader) * args.epochs\n # Create the learning rate scheduler\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n )\n \n # train model\n # Main Loop\n print(\"=================== Train ================\")\n print(\"##### Language Model:\", args.lang_model, \",\", \"learning rate:\", args.lr)\n print()\n\n track_time = time.time()\n # trange is a tqdm wrapper around the normal python range\n for epoch in trange(args.epochs, desc=\"Epoch\"):\n print(\"Epoch: %4i\"%epoch, dt.datetime.now())\n\n model, optimizer, scheduler, tr_loss = train_multilabel(\n train_dataloader=train_dataloader, \n model=model, \n device=device, \n optimizer=optimizer, \n scheduler=scheduler, \n num_labels=num_labels\n )\n # EVALUATION: TRAIN SET\n pred_bools_train, true_bools_train, f1_train = eval_multilabel(\n train_dataloader, model=model, device=device)\n print(\"TRAIN: micro F1 %.3f\"%(f1_train))\n \n # EVALUATION: DEV SET\n pred_bools_dev, true_bools_dev, f1_dev = eval_multilabel(\n dev_dataloader, model=model, device=device)\n print(\"EVAL: micro F1 %.3f\"%(f1_dev))\n \n\n print(\" Training and validation took in total: {:}\".format(format_time(time.time()-track_time)))\n\n # EVALUATION: TEST SYN SET\n pred_bools_syn, true_bools_syn, f1_test_syn = eval_multilabel(\n test_syn_dataloader, model=model, device=device)\n print(\"TEST SYN: micro F1 %.4f\"%(f1_test_syn))\n\n # classification report\n clf_report_syn = classification_report(true_bools_syn, pred_bools_syn, target_names=cats, digits=3)\n print(clf_report_syn)\n\n\n # EVALUATION: TEST DIA SET\n pred_bools_dia, true_bools_dia, f1_test_dia = eval_multilabel(\n test_dia_dataloader, model=model, device=device\n )\n print(\"TEST DIA: micro F1 %.4f\"%(f1_test_dia))\n\n # classification report\n clf_report_dia = classification_report(true_bools_dia, pred_bools_dia, target_names=cats, digits=3)\n print(clf_report_dia)\n \n if args.save_cr:\n pickle.dump(clf_report_syn, open(args.output_path+'clf_report_'+args.lang_model+'_test_syn_'+str(num_labels)+end+'.txt','wb'))\n pickle.dump(clf_report_dia, open(args.output_path+'clf_report_'+args.lang_model+'_test_dia_'+str(num_labels)+end+'.txt','wb'))\n\n\n if args.save_prediction:\n test_syn_df[\"category_pred\"] = pred_bools_syn\n test_dia_df[\"category_pred\"] = pred_bools_dia\n test_syn_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_syn_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")\n test_dia_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_dia_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")",
"def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))",
"def main():\n # Path used in assembly and previously discovered min year value.\n split_in_dir_path = \"../../data/split\"\n avg_5_in_dir_path = \"../../data/averaged_5\"\n avg_25_in_dir_path = \"../../data/averaged_25\"\n avg_50_in_dir_path = \"../../data/averaged_50\"\n dates_mat_path = \"../../data/dates_matrix/dates_matrix.npy\"\n min_year = 1962\n data_out_dir_path = \"../../data/rnn_set/data\"\n labels_out_dir_path = \"../../data/rnn_set/labels\"\n assemble_set(\n split_in_dir_path, avg_5_in_dir_path, avg_25_in_dir_path,\n avg_50_in_dir_path, dates_mat_path, min_year,\n data_out_dir_path, labels_out_dir_path\n )",
"def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)"
]
| [
"0.65926754",
"0.6401703",
"0.63215774",
"0.63125354",
"0.6269683",
"0.62658095",
"0.6245949",
"0.6243756",
"0.622921",
"0.62033784",
"0.61332804",
"0.6127914",
"0.61143494",
"0.6087046",
"0.60750556",
"0.6072328",
"0.606391",
"0.6054718",
"0.6037091",
"0.6021028",
"0.6000744",
"0.5995008",
"0.59776133",
"0.59758276",
"0.59525025",
"0.5936515",
"0.5933709",
"0.5927292",
"0.59225243",
"0.5908597"
]
| 0.668048 | 0 |
Create tables Start with Times table, load in the data and put into useful lists. Load lists into numpy array, then create cell text. Create columns, column colors, then create the table and save it. Repeat for accuracy table. | def create_tables(times, accuracies, batch_sizes):
#Get time data
p_cpu_times = list(times[0].values())
p_gpu_times = list(times[1].values())
c_cpu_times = list(times[2].values())
c_gpu_times = list(times[3].values())
#Get differences in times
p_diff_times = [a - b for a, b in zip(p_cpu_times, p_gpu_times)]
c_diff_times = [a - b for a, b in zip(c_cpu_times, c_gpu_times)]
cpu_diff_times = [a - b for a, b in zip(p_cpu_times, c_cpu_times)]
gpu_diff_times = [a - b for a, b in zip(p_gpu_times, c_gpu_times)]
#Set data in np array for table
data = np.array([p_cpu_times,
p_gpu_times,
p_diff_times,
c_cpu_times,
c_gpu_times,
c_diff_times,
cpu_diff_times,
gpu_diff_times]).T
#Get data in text format
n_rows = data.shape[0]
cell_text = []
for row in range(n_rows):
cell_text.append(['%1.3f' % x for x in data[row]])
#Get rows and cols for table
columns = ('P CPU Time (s)', 'P GPU Time (s)', 'P Diff (s)', 'C CPU Time (s)', 'C GPU Time (s)', 'C Diff (s)', 'CPU Diff (s)', 'GPU Diff (s)')
row_colors = plt.cm.BuPu(np.linspace(0, 0.5, n_rows))
col_colors = np.array([192/255,192/255,192/255, 1])
col_colors = np.repeat(col_colors.reshape((1, col_colors.shape[0])), len(columns), axis=0)
#Create table
plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')
plt.table(cellText=cell_text,
rowLabels=batch_sizes,
rowColours=row_colors,
colLabels=columns,
colColours=col_colors,
loc='center')
ax = plt.gca()
ax.axis('off')
plt.savefig('results\\figures\\table_time.png')
#Get accuracy table
#Get accuracy data
p_cpu_accuracy = list(accuracies[0].values())
p_gpu_accuracy = list(accuracies[1].values())
c_cpu_accuracy = list(accuracies[2].values())
c_gpu_accuracy = list(accuracies[3].values())
#Get max of each batch
p_cpu_max = [max(x) for x in p_cpu_accuracy]
p_gpu_max = [max(x) for x in p_gpu_accuracy]
c_cpu_max = [max(x) for x in c_cpu_accuracy]
c_gpu_max = [max(x) for x in c_gpu_accuracy]
#Get differences in accuracies
p_diff_acc = [a - b for a, b in zip(p_cpu_max, p_gpu_max)]
c_diff_acc = [a - b for a, b in zip(c_cpu_max, c_gpu_max)]
cpu_diff_acc = [a - b for a, b in zip(p_cpu_max, c_cpu_max)]
gpu_diff_acc = [a - b for a, b in zip(p_gpu_max, c_gpu_max)]
#Set data in np array for table
data = np.array([p_cpu_max,
p_gpu_max,
p_diff_acc,
c_cpu_max,
c_gpu_max,
c_diff_acc,
cpu_diff_acc,
gpu_diff_acc]).T
#Get data in text format
n_rows = data.shape[0]
cell_text = []
for row in range(n_rows):
cell_text.append(['%1.3f' % x for x in data[row]])
#Get rows and cols for table
columns = ('P CPU Acc (%)', 'P GPU Acc (%)', 'P Diff (%)', 'C CPU Acc (%)', 'C GPU Acc (%)', 'C Diff (%)', 'CPU Diff (%)', 'GPU Diff (%)')
#Create table
plt.clf()
plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')
plt.table(cellText=cell_text,
rowLabels=batch_sizes,
rowColours=row_colors,
colLabels=columns,
colColours=col_colors,
loc='center')
ax = plt.gca()
ax.axis('off')
plt.savefig('results\\figures\\table_acc.png') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_display_data_table():\n\n for ccd in range(0, 10):\n for node in range(0, 4):\n file = 'ccd' + str(ccd) + '_' + str(node)\n infile = data_dir + file\n outfile = web_dir + 'Data/' + file\n\n f = open(infile, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n fo = open(outfile, 'w')\n#\n#--- adding heading\n#\n line = \"#\\n#Date Mn K alpha Al K alpha Ti K alpha Slope Sigma Int Sigma\\n#\\n\"\n fo.write(line)\n for ent in data:\n atemp = re.split('\\s+', ent)\n stime = int(atemp[0])\n#\n#--- converting the date into <mon> <year> form (e.g. May 2013)\n#\n ltime = tcnv.axTimeMTA(stime)\n btemp = re.split(':', ltime)\n year = btemp[0]\n [mon, mdate] = tcnv.changeYdateToMonDate(int(year), int(btemp[1]))\n lmon = tcnv.changeMonthFormat(mon)\n line = lmon + ' ' + year \n for j in range(1, len(atemp)):\n line = line + '\\t' + atemp[j]\n\n line = line + '\\n'\n fo.write(line)\n fo.close()",
"def create_table(self, title: str, columns: List[str], data: Dict[str, str]) -> None:\n table = Table(title=title, box=box.SIMPLE)\n for column in columns:\n table.add_column(column, justify=\"right\", style=\"bright_yellow\", no_wrap=True)\n\n for model, percentage in data.items():\n table.add_row(model, percentage)\n\n console = Console()\n console.print(table)",
"def table_maker():\r\n try:\r\n off_copy = off.copy()\r\n man_copy = man.copy()\r\n exe_copy = exe.copy()\r\n ceo_copy = ceo.copy()\r\n list_of_lists = [off_copy, man_copy, exe_copy, ceo_copy]\r\n\r\n for i in list_of_lists:\r\n for j in i:\r\n if type(j) == str:\r\n continue\r\n else:\r\n raise ValueError('All elements must be strings')\r\n\r\n row_num = max(len(off_copy), len(man_copy),\r\n len(exe_copy), len(ceo_copy))\r\n for i in list_of_lists:\r\n if len(i) != row_num:\r\n diff = row_num - len(i)\r\n for j in range(diff):\r\n i.append('')\r\n\r\n t = PrettyTable(\r\n ['Office Workers', 'Managers', 'Executives', 'CEO'])\r\n for i in range(row_num):\r\n t.add_row([off_copy[i], man_copy[i], exe_copy[i], ceo_copy[i]])\r\n\r\n with open('Employee Table.txt', 'w') as f:\r\n f.write(str(t))\r\n\r\n except FileNotFoundError:\r\n print(\"Error: No file entered\")",
"def trial_table():\n # Read data\n included = read_npy_file('trials.included.npy')\n fb_type = read_npy_file('trials.feedbackType.npy')\n fb_type = fb_type.astype(int)\n fb_time = read_npy_file('trials.feedback_times.npy')\n go_cue = read_npy_file('trials.goCue_times.npy')\n trial_intervals = read_npy_file('trials.intervals.npy')\n rep_num = read_npy_file('trials.repNum.npy')\n response_choice = read_npy_file('trials.response_choice.npy')\n response_times = read_npy_file('trials.response_times.npy')\n visual_left = read_npy_file('trials.visualStim_contrastLeft.npy')\n visual_right = read_npy_file('trials.visualStim_contrastRight.npy')\n visual_times = read_npy_file('trials.visualStim_times.npy')\n\n for j in range(len(trial_intervals)):\n nwb_file.add_trial(trial_intervals[j, 0], trial_intervals[j, 1])\n\n nwb_file.add_trial_column(\n 'included',\n 'Importantly, while this variable gives inclusion criteria according '\n 'to the definition of disengagement (see manuscript Methods), it does '\n 'not give inclusion criteria based on the time of response, as used '\n 'for most analyses in the paper.',\n np.ravel(included)\n )\n nwb_file.add_trial_column(\n 'go_cue',\n 'The \\'goCue\\' is referred to as the \\'auditory tone cue\\' in the manuscript.',\n np.ravel(go_cue)\n )\n nwb_file.add_trial_column(\n 'visual_stimulus_time',\n 'Times are relative to the same time base as every other time in the dataset, '\n 'not to the start of the trial.',\n np.ravel(visual_times)\n )\n nwb_file.add_trial_column(\n 'visual_stimulus_left_contrast',\n 'Proportion contrast. A value of 0.5 means 50% contrast. 0 is a blank '\n 'screen: no change to any pixel values on that side (completely undetectable).',\n np.ravel(visual_left)\n )\n nwb_file.add_trial_column(\n 'visual_stimulus_right_contrast',\n 'Proportion contrast. A value of 0.5 means 50% contrast. 0 is a blank '\n 'screen: no change to any pixel values on that side (completely undetectable).',\n np.ravel(visual_right)\n )\n nwb_file.add_trial_column(\n 'response_time',\n 'Times are relative to the same time base as every other time in the dataset, '\n 'not to the start of the trial.',\n np.ravel(response_times)\n )\n nwb_file.add_trial_column(\n 'response_choice',\n 'Enumerated type. The response registered at the end of the trial, '\n 'which determines the feedback according to the contrast condition. '\n 'Note that in a small percentage of cases (~4%, see manuscript Methods) '\n 'the initial wheel turn was in the opposite direction. -1 for Right '\n 'choice (i.e. correct when stimuli are on the right); +1 for left '\n 'choice; 0 for Nogo choice.',\n np.ravel(response_choice)\n )\n nwb_file.add_trial_column(\n 'feedback_time',\n 'Times are relative to the same time base as every other time in the dataset, '\n 'not to the start of the trial.',\n np.ravel(fb_time)\n )\n nwb_file.add_trial_column(\n 'feedback_type',\n 'Enumerated type. -1 for negative feedback (white noise burst); +1 for '\n 'positive feedback (water reward delivery).',\n np.ravel(fb_type)\n )\n nwb_file.add_trial_column(\n 'rep_num',\n 'Trials are repeated if they are \"easy\" trials (high contrast stimuli '\n 'with large difference between the two sides, or the blank screen '\n 'condition) and this keeps track of how many times the current '\n 'trial\\'s condition has been repeated.',\n np.ravel(rep_num)\n )",
"def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()",
"def build_table(type_, test_type, device_name, thresholds):\n x = PrettyTable() \n x.field_names = [device_name] + thresholds\n \n \"Chrome,\" + test_type + \",\" + str(notAfter_date) + \",\" + thresholds[index], \",fail\"\n \n ##read all Chromep entries\n ##get all test_type rows\n ##loop rows\n ##show table",
"def generate_table(start_int=0, end_int=10, table_type='Addition'):\n lines = [r'\\documentclass{article}',\n r'\\usepackage{geometry}',\n r'\\geometry{landscape,a4paper,total={170mm,257mm},left=10mm,right=10mm,top=10mm}',\n r'\\usepackage{amsmath}',\n r'\\usepackage{amsfonts}',\n r'\\usepackage{amssymb}',\n r'\\usepackage{dcolumn}',\n r'\\newcolumntype{2}{D{.}{}{2.0}}',\n r'\\begin{document}',\n r'\\begin{large}',\n r'\\begin{center}',\n r'{\\Large ' + table_type + r' Table version 0.1\\par}',\n r'\\vspace*{25px}',\n r'\\renewcommand\\arraystretch{1.3}',\n r'\\setlength\\doublerulesep{0pt}',\n r'\\pagenumbering{gobble}',\n r'\\begin{tabular}{r||*{' + str(end_int - start_int + 1) + '}{3|}}']\n\n operator = {'Addition': r'$+$',\n 'Subtraction': r'$-$',\n 'Multiplication': r'$\\times$'}\n\n lines.append(operator[table_type] + ''.join([' & {} '.format(x) for x in range(start_int, end_int + 1)]) + r'\\\\')\n lines.append('\\hline\\hline')\n for i in range(start_int, end_int + 1):\n if table_type == 'Addition':\n lines.append(str(i) + ''.join([' & {} '.format(x + i) for x in range(start_int, end_int + 1)]) + r'\\\\')\n if table_type == 'Subtraction':\n lines.append(str(i) + ''.join([' & {} '.format(x - i) for x in range(start_int, end_int + 1)]) + r'\\\\')\n if table_type == 'Multiplication':\n lines.append(str(i) + ''.join([' & {} '.format(x * i) for x in range(start_int, end_int + 1)]) + r'\\\\')\n lines.append('\\hline')\n\n lines.append(r'\\end{tabular}')\n lines.append(r'\\end{center}')\n lines.append(r'\\end{large}')\n lines.append(r'\\end{document}')\n\n return '\\n'.join(lines)",
"def _buildtable(self):\n\n tabrows = []\n\n for i, (expid, exfiles) in enumerate(self._exposure_files.items()):\n specflux_b, specflux_r, specflux_z = [], [], []\n tab = None\n\n if len(exfiles) == 0:\n continue\n\n print(expid)\n for exfile in exfiles:\n print(exfile)\n hdu = fits.open(exfile)\n\n # The following tables are present in the redux sframes and the\n # nightwatch qcframes.\n wave = hdu['WAVELENGTH'].data\n\n # However, in the nightwatch files the wavelength data are a\n # table of size nfiber x nwavelength.\n if self._filetype == 'nightwatch':\n if wave.ndim > 1:\n wave = wave[0]\n\n fluxhead = hdu['FLUX'].header\n fluxdata = hdu['FLUX'].data\n ivardata = hdu['IVAR'].data\n fibermap = hdu['FIBERMAP'].data\n exptime = fluxhead['EXPTIME']\n if not np.all(self._unditherfa['FIBER'] ==\n np.arange(len(self._unditherfa))):\n raise ValueError('weird fiberassign file format!')\n fibermap = self._unditherfa[fibermap['FIBER']]\n\n target_id = fibermap['TARGETID']\n target_ra = fibermap['TARGET_RA']\n target_dec = fibermap['TARGET_DEC']\n fiber = fibermap['FIBER']\n objtype = fibermap['OBJTYPE']\n flux_g = fibermap['FLUX_G']\n flux_r = fibermap['FLUX_R']\n flux_z = fibermap['FLUX_Z']\n x, y = [fibermap['FIBERASSIGN_{}'.format(val)] for val in ('X', 'Y')]\n\n camera = fluxhead['CAMERA'][0].upper()\n\n if getattr(self, '_deltara', None) is not None:\n dra = self._deltara[i]*np.ones(len(fiber))\n ddec = self._deltadec[i]*np.ones(len(fiber))\n elif self._dithertype == 'telescope':\n dithra = self._ditherfa['target_ra']\n dithdec = self._ditherfa['target_dec']\n udithra = self._unditherfa['target_ra']\n udithdec = self._unditherfa['target_dec']\n ontarget = ((self._ditherfa['targetid'] ==\n self._unditherfa['targetid']) &\n (self._ditherfa['objtype'] == 'TGT'))\n dfiberra = (dithra-udithra)*np.cos(np.radians(udithdec))*60*60\n dfiberdec = (dithdec-udithdec)*60*60\n if not np.all(self._ditherfa['FIBER'] ==\n np.arange(len(self._ditherfa))):\n raise ValueError('unexpected shape of dither file')\n dfiberra[~ontarget] = np.nan\n dfiberdec[~ontarget] = np.nan\n dfiberra = dfiberra[fiber]\n dfiberdec = dfiberdec[fiber]\n wcs = self.lookup_wcs(fluxhead['MJD-OBS'])\n centralwcs = self._central_wcs\n if (~np.isfinite(centralwcs['cenra'][1]) or\n ~np.isfinite(centralwcs['cendec'][1])):\n raise ValueError('central pointing ra/dec is NaN!')\n dtelra = (wcs['cenra'][1]-centralwcs['cenra'][1])\n dtelra *= np.cos(np.radians(centralwcs['cendec'][1]))\n dteldec = wcs['cendec'][1]-centralwcs['cendec'][1]\n dra = dfiberra + dtelra*60*60\n ddec = dfiberdec + dteldec*60*60\n if np.all(~np.isfinite(dra)):\n print('warning: no good telescope offset for %s' %\n exfile)\n else:\n raise ValueError('not implemented')\n \n for j, fiber_id in enumerate(fiber):\n flux = fluxdata[j]\n ivar = ivardata[j]\n if not np.any(ivar > 0):\n specflux = 0\n specflux_ivar = 0\n else:\n meanivar = np.mean(ivar[ivar > 0])\n mask = ivar > meanivar / 100\n specflux = np.trapz(flux*mask, wave)\n specflux_ivar = 1./np.sum(ivar[mask]**-1)\n # Schlegel: sum over correct wavelengths, all three\n # filters, plus 11 pixel median filter to reject\n # cosmics.\n # will require being better about reading in\n # the spectrographs together.\n tabrows.append((expid, exptime,\n target_id[j], target_ra[j], target_dec[j],\n fiber[j], objtype[j],\n flux_g[j], flux_r[j], flux_z[j],\n specflux, specflux_ivar, camera,\n dra[j], ddec[j],\n x[j], y[j]))\n\n tab = Table(rows=tabrows,\n names=('EXPID', 'EXPTIME',\n 'TARGETID', 'TARGET_RA', 'TARGET_DEC',\n 'FIBER', 'OBJTYPE',\n 'FLUX_G', 'FLUX_R', 'FLUX_Z',\n 'SPECTROFLUX', 'SPECTROFLUX_IVAR', 'CAMERA',\n 'DELTA_X_ARCSEC', 'DELTA_Y_ARCSEC',\n 'XFOCAL', 'YFOCAL'),\n meta={'EXTNAME' : 'DITHER',\n 'TILEID' : '{}'.format(self._tileid)})\n\n return tab",
"def make_performance_table(self):\n table = Table()\n table.add_column(\"Classifier\", ratio=25)\n table.add_column(\"Score\", ratio=10, justify=\"center\", no_wrap=True)\n table.add_column(\"Params\", ratio=25, no_wrap=False)\n table.add_column(\"Model ID\",ratio=40, no_wrap=True)\n\n for name, stuff in self.trainer.performance.items():\n score, params, hash_id = stuff\n style = \"bold green\" if name == self.trainer.best_classifier__name else \"\"\n best_one = \" ***\" if name == self.trainer.best_classifier__name else \"\"\n \n table.add_row(\n str(name),\n str(np.round(score, 3)), \n str(params), \n f\"{str(hash_id)}{best_one}\",\n style=style)\n \n return table",
"def _make_table(\n self,\n epoch,\n total_loss_training,\n total_loss_validation=None,\n losses_training=None,\n losses_validation=None,\n metrics=None,\n learning_rate=None,\n ):\n col_width = 9\n multi_target = losses_training is not None and len(losses_training) > 1\n\n title = \"\\n\\n Training history\\n\"\n\n # Calculate width of table and columns\n epoch_width = 18\n if not multi_target:\n train_width = 20\n else:\n train_width = min(40, (col_width + 2) * (len(losses_training) + 1))\n\n val_width = 0\n if total_loss_validation is not None:\n val_width = 20\n if multi_target:\n val_width = min(40, (col_width + 2) * (len(losses_training) + 1))\n\n all_metrics_width = 0\n if metrics is not None:\n if not multi_target:\n metrics_width = col_width + 2\n else:\n metrics_width = len(losses_training) * (col_width + 2) + 1\n all_metrics_width = len(metrics) * metrics_width\n\n table_width = epoch_width + train_width + val_width + all_metrics_width\n\n self.table = rich.table.Table(\n expand=False, box=rich.box.SIMPLE, title=title, width=table_width, leading=0\n )\n\n self.table.add_column(\n Text(\"Epoch\", style=\"Grey\"), justify=\"center\", width=epoch_width\n )\n self.table.add_column(\n Text(\"Training loss\", style=\"red bold\"), justify=\"center\", width=train_width\n )\n if total_loss_validation is not None:\n self.table.add_column(\n Text(\"Validation loss\", style=\"blue bold\"),\n justify=\"center\",\n width=val_width,\n )\n if metrics is not None:\n for name, m in metrics.items():\n self.table.add_column(\n Text(name, style=\"purple bold\"),\n justify=\"center\",\n width=metrics_width,\n )\n\n def make_header_columns():\n # Epoch and LR\n columns = [Text(\"#\", justify=\"right\", style=\"bold\")]\n if learning_rate is not None:\n columns += [Text(\"LR\", justify=\"right\")]\n yield Columns(columns, align=\"center\", width=6)\n\n # Training losses\n text = Align(\n Text(\"Total\", justify=\"right\", style=\"bold red\"),\n width=col_width,\n align=\"center\",\n )\n if multi_target:\n columns = [text] + [\n Align(Text(n, justify=\"right\", style=\"red\"), width=col_width)\n for n in losses_training.keys()\n ]\n yield Columns(columns, align=\"center\", width=col_width)\n else:\n yield text\n\n # Validation losses\n if total_loss_validation is not None:\n text = Align(\n Text(\"Total\", justify=\"center\", style=\"bold blue\"),\n width=col_width,\n align=\"center\",\n )\n if multi_target:\n columns = [text] + [\n Align(Text(n, justify=\"center\", style=\"blue\"), width=col_width)\n for n in losses_validation.keys()\n ]\n yield Columns(columns, align=\"center\", width=col_width)\n else:\n yield text\n\n # Metrics\n if metrics is not None:\n for name, values in metrics.items():\n if isinstance(values, dict):\n columns = [\n Align(\n Text(n, justify=\"center\", style=\"purple\"),\n width=col_width,\n )\n for n in values.keys()\n ]\n yield Columns(columns, align=\"center\", width=col_width)\n else:\n yield Align(Text(\"\"), width=col_width)\n\n self.table.add_row(*make_header_columns())\n self.table.add_row()",
"def create_latex_table(data, id):\n bd = data['bd']\n sd = data['sd']\n \n filename = 'LatestResults.tex'\n file = r'..\\latex\\tables\\\\' + filename\n\n if os.path.exists(file):\n f_temp = os.path.splitext(file)[0] # without extension\n os.rename(file, f_temp + '_' + id + '.tex')\n\n f = codecs.open(file, 'w', 'utf-8')\n \n f.write('\\n' + r'\\begin{table}' + '\\n')\n f.write(r' \\centering' + '\\n')\n f.write(r' \\caption{Results for each drum instrument with batch sizes 64, 256 and 512.}' + '\\n')\n f.write(r' \\begin{tabular}{l c c c}' + '\\n')\n f.write(r' \\textbf{Batch size} & Metric & BD & SD \\\\' + '\\n')\n f.write(r' \\midrule' + '\\n')\n f.write(r' \\midrule' + '\\n')\n \n for batch_size in BATCHES:\n f.write(' ' + str(batch_size).rstrip('\\n'))\n # 0.805 +- 0.02\n f.write(r' & P & ' + r'$' + '{:.3}'.format(bd[batch_size]['p_mean']) + r' \\pm ' + '{:.3f}'.format(bd[batch_size]['p_std']) + '$' + r' & ' + r'$' + '{:.3}'.format(sd[batch_size]['p_mean']) + r' \\pm ' + '{:.3f}'.format(sd[batch_size]['p_std']) + '$' + r' \\\\' + '\\n')\n f.write(r' & R & ' + r'$' + '{:.3}'.format(bd[batch_size]['r_mean']) + r' \\pm ' + '{:.3f}'.format(bd[batch_size]['r_std']) + '$' + r' & ' + r'$' + '{:.3}'.format(sd[batch_size]['r_mean']) + r' \\pm ' + '{:.3f}'.format(sd[batch_size]['r_std']) + '$' + r' \\\\' + '\\n')\n f.write(r' & F & ' + r'$' + '{:.3}'.format(bd[batch_size]['f_mean']) + r' \\pm ' + '{:.3f}'.format(bd[batch_size]['f_std']) + '$' + r' & ' + r'$' + '{:.3}'.format(sd[batch_size]['f_mean']) + r' \\pm ' + '{:.3f}'.format(sd[batch_size]['f_std']) + '$' + r' \\\\' + '\\n')\n # Don't write horizontal line on the last batch.\n if batch_size != BATCHES[-1]:\n f.write(r' \\midrule' + '\\n')\n\n f.write(r' \\end{tabular}' + '\\n')\n f.write(r' \\label{tab:ResultsTable}' + '\\n')\n f.write(r'\\end{table}' + '\\n')\n f.close()",
"def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)",
"def generate_table(self, rows):\n ...",
"def tables(args):\n\n config_file = args.setupfn\n conf_base = os.path.basename(config_file).split('.')[0]\n statfile = os.path.join(args.outputdir,\n \"{}_radvel.stat\".format(conf_base))\n status = load_status(statfile)\n\n assert status.getboolean('mcmc', 'run'), \\\n \"Must run MCMC before making tables\"\n\n P, post = radvel.utils.initialize_posterior(config_file)\n post = radvel.posterior.load(status.get('fit', 'postfile'))\n chains = pd.read_csv(status.get('mcmc', 'chainfile'))\n minafactor = status.get('mcmc', 'minafactor')\n maxarchange = status.get('mcmc', 'maxarchange')\n maxgr = status.get('mcmc', 'maxgr')\n mintz = status.get('mcmc', 'mintz')\n if 'derive' in status.sections() and status.getboolean('derive', 'run'):\n dchains = pd.read_csv(status.get('derive', 'chainfile'))\n chains = chains.join(dchains, rsuffix='_derived')\n derived = True\n else:\n derived = False\n report = radvel.report.RadvelReport(P, post, chains, minafactor, maxarchange, maxgr, mintz, derived=derived)\n tabletex = radvel.report.TexTable(report)\n attrdict = {'priors': 'tab_prior_summary', 'rv': 'tab_rv',\n 'params': 'tab_params', 'derived': 'tab_derived',\n 'crit': 'tab_crit'}\n for tabtype in args.type:\n print(\"Generating LaTeX code for {} table\".format(tabtype))\n\n if tabtype == 'ic_compare':\n assert status.has_option('ic_compare', 'ic'), \\\n \"Must run Information Criteria comparison before making comparison tables\"\n\n compstats = eval(status.get('ic_compare', 'ic'))\n report = radvel.report.RadvelReport(\n P, post, chains, minafactor, maxarchange, maxgr, mintz, compstats=compstats\n )\n tabletex = radvel.report.TexTable(report)\n tex = tabletex.tab_comparison()\n elif tabtype == 'rv':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title, max_lines=None)\n elif tabtype == 'crit':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n else:\n if tabtype == 'derived':\n assert status.has_option('derive', 'run'), \\\n \"Must run `radvel derive` before making derived parameter table\"\n assert tabtype in attrdict, 'Invalid Table Type %s ' % tabtype\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n\n saveto = os.path.join(\n args.outputdir, '{}_{}.tex'.format(conf_base, tabtype)\n )\n with open(saveto, 'w+') as f:\n f.write(tex)\n\n savestate = {'{}_tex'.format(tabtype): os.path.relpath(saveto)}\n save_status(statfile, 'table', savestate)",
"def write_the_table(what):\n global count_row\n count_row += 1\n\n if what.get('rank') == 0:\n background_blue.append(count_row)\n\n struct = what.get('structure')\n link = what.get('link')\n exams_1 = what.get('exams_1')\n exams_2 = what.get('exams_2')\n exams_empty = [['', '', '', '', '', '', '', '', '', '', '', '']] \\\n if self.training.session_type != '1' else \\\n [['', '', '', '', '', '']]\n\n def formated(number):\n \"\"\"\n Remove trailing 0\n \"\"\"\n frac, whole = modf(number)\n if frac == 0:\n return int(whole)\n return str(number).rstrip('0')\n\n def write_exams(list_1, list_2):\n exam_table = []\n for ex_1, ex_2 in itertools.zip_longest(list_1, list_2):\n ex_1_table = [\n formated(ex_1.coefficient) if ex_1 is not None else '',\n [\n Paragraph(filter_content(ex_1.label) if ex_1 else '',\n self.styles['SmallNormal']),\n Paragraph(\n \"<para textColor=grey>\" + filter_content(ex_1.additionnal_info) \\\n if ex_1 and ex_1.additionnal_info \\\n else \"\" + \"</para\\>\",\n self.styles['SmallNormal'])\n ],\n ex_1.type_exam if ex_1 is not None else '',\n ex_1.text_duration if ex_1 is not None else '',\n '' if ex_1 is None \\\n else ex_1.convocation if not training_is_ccct \\\n else ex_1.get_type_ccct_display(),\n ex_1.eliminatory_grade if ex_1 is not None else '',\n ex_1.threshold_session_2 if ex_1 is not None else '',\n ]\n\n ex_2_table = [\n formated(ex_2.coefficient) if ex_2 is not None else '',\n [Paragraph(filter_content(ex_2.label) if ex_2 is not None else '', self.styles[\n 'SmallNormal']), Paragraph(\"<para textColor=grey\\\n >\" + ex_2.additionnal_info + \"</para\\\n >\" if ex_2.additionnal_info is not None else \"\",\n self.styles['SmallNormal'])],\n ex_2.type_exam if ex_2 is not None else '',\n ex_2.text_duration if ex_2 is not None else '',\n ex_2.eliminatory_grade if ex_2 is not None else '',\n ] if ex_2 is not None else ['', '', '', '', '']\n if self.training.session_type != '1':\n ex_1_table.extend(ex_2_table)\n else:\n ex_1_table.pop()\n exam_table.append(ex_1_table)\n exam_table = exam_table if len(exam_table) > 0 else exams_empty\n if exam_table == exams_empty:\n # TODO: calculate empty space to set rowHeights in order to\n # avoid blank in table\n pass\n inner_table = Table(\n exam_table, colWidths=width_exams, rowHeights=None)\n inner_table.setStyle(TableStyle(\n [('INNERGRID', (0, 0), (-1, -1), 0.1, colors.black),\n ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),\n ('ALIGN', (0, 0), (-1, -1), 'CENTER'),\n ('FONTSIZE', (0, 0), (-1, -1), 8),\n # ('LEFTPADDING', (0, 0), (-1, -1), 0),\n # ('RIGHTPADDING', (0, 0), (-1, -1), 0),\n ('BOTTOMPADDING', (0, 0), (-1, -1), 0),\n ('TOPPADDING', (0, 0), (-1, -1), 0),\n ]))\n return inner_table\n\n ref_scol = struct.ref_si_scol if struct.ref_si_scol else \"\" # FIX bug with rof data\n ref_data = (\n Paragraph(struct.ROF_ref, self.styles['CenterSmall']),\n Paragraph(ref_scol, self.styles['CenterSmall'])\n ) if self.reference == 'both' \\\n else Paragraph(struct.ROF_ref, self.styles['CenterSmall']) if self.reference == 'with_rof' \\\n else Paragraph(ref_scol, self.styles['CenterSmall']) if self.reference == 'with_si' \\\n else Paragraph('', self.styles['CenterSmall'])\n\n object_line = [\n Paragraph(\n \"<para leftIndent=%s>%s</para> \" % (what.get('rank')*10, filter_content(struct.label)),\n self.styles['SmallBold'] if what.get('rank') == 0 \\\n or what.get('structure').nature == 'UE' \\\n else self.styles['SmallNormal']\n ),\n Paragraph(\n struct.get_respens_name if not struct.external_name \\\n else struct.external_name,\n self.styles['CenterSmall'] if not struct.external_name else \\\n self.styles['CenterSmallItalic']\n ),\n [ref_data],\n '30' if self.training.degree_type.ROF_code in self.training_types_for_which_to_display_30_ects\\\n and struct.nature == 'SE'\\\n else struct.ECTS_credit if struct.ECTS_credit else '-',\n formated(link.coefficient) if link.coefficient else '',\n link.eliminatory_grade,\n write_exams(exams_1, exams_2)\n ]\n if self.respforms:\n if self.reference == 'without':\n object_line.pop(2)\n else:\n object_line.pop(1)\n if self.reference == 'without':\n object_line.pop(1)\n\n big_table.append(object_line)\n\n for e in what.get('children'):\n write_the_table(e)",
"def test_make_HTML_table(self):\r\n\r\n # test pie charts\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(self.counts1, 5, 10, 'pie')\r\n\r\n img_data = make_HTML_table(\"Phylum\", other_frac, 10, red, other_cat,\r\n fracs_labels_other, fracs_labels,\r\n self.dir_path, all_counts, 1, self.prefs,\r\n self.color_prefs, 'black', 'white', 'pie',\r\n 'Test1',\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, 0,\r\n 'categorical', False, False)\r\n\r\n self.assertEqual(len(img_data), 2)\r\n\r\n # test area charts\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(self.counts1, 5, 10, 'area')\r\n\r\n img_data = make_HTML_table(\"Phylum\", other_frac, 10, red, other_cat,\r\n fracs_labels_other, fracs_labels,\r\n self.dir_path, all_counts, 1, self.prefs,\r\n self.color_prefs, 'black', 'white', 'pie',\r\n 'Test1',\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, 0,\r\n 'categorical', False, False)\r\n\r\n self.assertEqual(len(img_data), 2)\r\n\r\n # test bar charts\r\n fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \\\r\n get_fracs(self.counts1, 5, 10, 'bar')\r\n\r\n img_data = make_HTML_table(\"Phylum\", other_frac, 10, red, other_cat,\r\n fracs_labels_other, fracs_labels,\r\n self.dir_path, all_counts, 1, self.prefs,\r\n self.color_prefs, 'black', 'white', 'pie',\r\n 'Test1',\r\n self.generate_image_type, self.plot_width,\r\n self.plot_height, self.bar_width, self.dpi, 0,\r\n 'categorical', False, False)\r\n\r\n self.assertEqual(len(img_data), 2)\r\n self._paths_to_clean_up = [\"/tmp/qiimewebfiles/charts/\" + f\r\n for f in listdir(\"/tmp/qiimewebfiles/charts\")]",
"def _make_tables(self, df):\n # Time table:\n time_keys = ['time', 'endtime', 'event_number_nv']\n self.df_event_time = df.loc[:, time_keys]\n\n # Properties tables:\n pos_keys = ['angle', 'pos_x', 'pos_x_spread', 'pos_y',\n 'pos_y_spread', 'pos_z', 'pos_z_spread']\n self.df_event_position = df.loc[:, pos_keys]\n\n keys = df.columns.values\n keys = [k for k in keys if k not in time_keys + pos_keys]\n self.df_event_properties = df.loc[:, keys]\n\n # Table panels:\n index = self.evt_sel_slid.value\n self.time_table = pn.panel(self.df_event_time.loc[index],\n )\n self.pos_table = pn.panel(self.df_event_position.loc[index:index, :],\n sizing_mode='scale_width')\n\n self.prop_table = pn.panel(self.df_event_properties.loc[index:index, :],\n sizing_mode='scale_width')",
"def table_gen(NamesL_pairs, p_pL, m_mL, p_mL, m_pL, p_valsL, p_vals_BonferoniL, RatiosL, p_valsL_divergent_convergent,\n p_valsL_divergent_convergent_BonferoniL, RatiosL_divergent_convergent, output_table):\n datafile = open(output_table, \"w\")\n datafile.write(\n \"Feature_1\" + '\\t' + \"Feature_2\" + \"\\t\" + \"plus_plus\" + '\\t' + \"minus_minus\" + '\\t' + \"plus_minus\" + '\\t' + \"minus_plus\" + '\\t' + \"p_value_same_opposite\" + '\\t' + \"p-value_same_opposite_Bonferoni_corrected\" + '\\t' + \"Ratio_same_opposite\" + '\\t' + \"p_value_divergent_convergent\" + '\\t' + \"p_value_divergent_convergent Bonferoni corrected\" + '\\t' + \"Ratio divergent convergent\" + '\\n')\n for i in range(len(NamesL_pairs)):\n datafile.write(\n NamesL_pairs[i][0] + '\\t' + NamesL_pairs[i][1] + '\\t' + str(p_pL[i]) + '\\t' + str(m_mL[i]) + '\\t' + str(\n p_mL[i]) + '\\t' + str(m_pL[i]) + '\\t' + str(p_valsL[i]) + '\\t' + str(p_vals_BonferoniL[i]) + '\\t' + str(\n RatiosL[i]) + '\\t' + str(p_valsL_divergent_convergent[i]) + '\\t' + str(\n p_valsL_divergent_convergent_BonferoniL[i]) + '\\t' + str(RatiosL_divergent_convergent[i]) + '\\n')\n datafile.close()\n return",
"def column_creator(path):\n if not os.path.exists(path+'tables'):\n os.makedirs(path+'tables')\n\n\n # Sequences\n if os.path.exists(path+'SEQ.txt'):\n with open(os.path.join(path+'SEQ.txt')) as f1, open(os.path.join(path+'tables/sequences_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Modifications\n if os.path.exists(path + 'modifications.txt'):\n\n with open(os.path.join(path+'modifications.txt')) as f1, open(os.path.join(path+'tables/modifications_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Spectrum identify:\n if os.path.exists(path + 'spectrum_identify.txt'):\n\n with open(os.path.join(path+'spectrum_identify.txt')) as f1, open(path+'tables/spectrum_ide_table.txt', 'a') as f3:\n lines1 = f1.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines1))\n f1.close()\n f3.close()\n\n if os.path.exists(path + 'spectrum_unidentify.txt'):\n with open(os.path.join(path+'spectrum_unidentify.txt')) as f2, open(path+'tables/spectrum_unide_table.txt', 'a') as f3:\n lines2 = f2.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines2))\n f2.close()\n f3.close()\n\n if os.path.exists(path+'taxonomy_identify.txt'):\n # Taxonomy ide:\n with open(os.path.join(path+'taxonomy_identify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_ide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n\n if os.path.exists(path + 'taxonomy_unidentify.txt'):\n # Taxonomy unide:\n with open(os.path.join(path+'taxonomy_unidentify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_unide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()",
"def create_tables():\n\tlog_msg4(\"No hay tablas para el año \" + txt_year + \". Creando\")\n\n\tcreate_table('visited')\n\tcreate_table('saved')\n\tcreate_table('actions')\n\n\tglobal new_tables_created\n\tnew_tables_created = True\n\n\tlog_msg_ok4()",
"def tabular_data(self):\n path = CFG.GRAPHS_DIR\n chdir(path)\n\n if self.experiment_count == 1:\n f = open(self.tablefile, 'w')\n f.write(self.print_border_line())\n f.write(self.table_header())\n f.write(self.print_border_line())\n f.write(self.pretty_string(\"Functions\"))\n f.write(self.pretty_string(\"Batch Size\"))\n f.write(self.pretty_string(\"Training (%)\"))\n f.write(self.pretty_string(\"Testing (%)\", True))\n f.write('\\n')\n f.write(self.print_border_line())\n f.close()\n\n f = open(self.tablefile, 'a')\n f.write(self.pretty_string(self.function_name))\n f.write(self.pretty_string(str(self.batch_size)))\n f.write(self.pretty_string(self.tr_mean_str))\n f.write(self.pretty_string(self.test_mean_str, True))\n f.write('\\n')\n f.close()",
"def defstuff():\n\t\n\tglobal PA, PB, col, col2, rng, xlimits, nbin, lPbw, WJK, outTab\n\t\n\tPA = ['Per1', 'Per2', 'Per3', 'Per4', 'Per5', 'Per6', 'Per7', 'Per8', 'Per9', 'Per10'] # Period columns for A sample\n\tPB = ['P_1', 'P_2', 'P_3'] # Period columns for B sample\n\t# logPB = ['logP_1', 'logP_2', 'logP_3'] \n\tcol = {1:'r', 2:'g', 3:'b'} \n\tcol2 = {1:'m', 2:'y', 3:'k'}\n\trng = (8,14) # Magnitude range\n\txlimits = (0.3 ,3.0) # X-axis plot limits\n\tbw = 0.01 # histogram bin width -- not global!\n\tnbin = (max(rng)-min(rng))/bw # How many bins for histogram.\n\n\t################# CAREFUL!!!!! #####################\n\tlPbw = 0.025 # log period bin width\n\t\n\toutTab = Table(np.zeros((len(B), 11)), names=('ID', 'WJK', 'est_mag', 'delta_mag', 'delta1', 'delta2', 'delta3', 'KDE_mag', 'KDEdelta_mag', 'sigma', 'nstar'), dtype=('string', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64' ))",
"def table_example():\n\n print(\"\\nExample making a new table from scratch:\\n\")\n # Make a new (empty) table object\n tbl = table(\"A table with random data\")\n # Add three columns called \"x\", \"x^2\" and \"1/x\"\n tbl.addcolumn(\"x\")\n tbl.addcolumn(\"x^2\")\n tbl.addcolumn(\"1/x\")\n # Add some rows of data\n for i in range(0, 10):\n row = dict()\n row[\"x\"] = i\n row[\"x^2\"] = i * i\n if i != 0:\n row[\"1/x\"] = 1.0 / float(i)\n else:\n row[\"1/x\"] = \"?\"\n tbl.add_data(row)\n # Define some graphs\n tbl.definegraph(\"Y = X(squared)\", (\"x\", \"x^2\"))\n tbl.definegraph(\"Y = 1/X\", (\"x\", \"1/x\"))\n tbl.definegraph(\"All data\", (\"x\", \"x^2\", \"1/x\"))\n # Print out the data as a simple \"table\" and in loggraph markup\n print(tbl.show())\n print(tbl.loggraph())",
"def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])",
"def __init__(self, titles: list, rows: list, file_name: str, colour: list = False, convert_columns: bool = False,\n title_colours: list = False):\n self.titles = titles\n self.rows = rows\n self.table = None\n self.table_to_image = None\n self.file_name = file_name\n self.colour = colour\n self.convert_columns = convert_columns\n self.title_colours = title_colours\n\n # Setup and add rows to the table\n self.setup()\n self.add_rows()\n\n # Create the image from the table\n self.turn_into_image()\n self.save_image()",
"def make_HTML_table(l, other_frac, total, red, other_cat, fracs_labels_other,\r\n fracs_labels, dir_path, all_counts, level,\r\n prefs, pref_colors, background_color, label_color, chart_type,\r\n label, generate_image_type,\r\n plot_width, plot_height, bar_width, dpi, resize_nth_label,\r\n label_type, include_html_legend, include_html_counts):\r\n img_data = []\r\n\r\n # generate html for pie charts\r\n if chart_type == 'pie':\r\n # in the case the user wants to trim down the number of taxa\r\n if other_cat > 0:\r\n # first generate the pie charts containing an other group for all\r\n # taxa below the cutoff.\r\n fracs_labels_other.append((\"All Other Categories\", other_frac))\r\n title = TITLE_include % (l, total, total,\r\n len(fracs_labels_other), total - red, other_cat)\r\n all_taxons = [l]\r\n pie_charts_placement = []\r\n\r\n # make pie chart image\r\n pie = make_pie_chart(fracs_labels_other, dir_path, level,\r\n prefs, pref_colors, background_color, label_color,\r\n generate_image_type,\r\n plot_width, plot_height, bar_width, dpi,\r\n include_html_legend,\r\n props={'title': title})\r\n\r\n pie_charts_placement.append(pie[0] + ' ' + pie[1] +\r\n '</td></tr><tr><td>' + pie[2] +\r\n '</td></tr><tr><td class=\"ntitle\">')\r\n\r\n # second generate the pie charts where the other category is removed\r\n # and percents are recalculated\r\n title = TITLE_exclude % (l, red, total, len(fracs_labels),\r\n total - red, other_cat)\r\n\r\n # make pie chart image\r\n pie = make_pie_chart(fracs_labels, dir_path, level,\r\n prefs, pref_colors, background_color, label_color,\r\n generate_image_type,\r\n plot_width, plot_height, bar_width, dpi,\r\n include_html_legend,\r\n props={'title': title})\r\n\r\n pie_charts_placement.append(pie[0] + ' ' + pie[1] +\r\n '</td></tr><tr><td class=\"ntitle\">' +\r\n pie[2])\r\n\r\n all_taxons.extend(pie_charts_placement)\r\n all_taxons.extend((\" \", \" \"))\r\n\r\n # put the charts into the html image data\r\n img_data.append(TABLE_graph % tuple(all_taxons))\r\n img_data.append(DATA_TABLE_HTML % ''.join(all_counts))\r\n\r\n else:\r\n # if there is no category cutoff generate plots, without other cat\r\n title = TITLE % (l, total, total, len(fracs_labels_other))\r\n all_taxons = [l]\r\n\r\n # make pie chart image\r\n pie = make_pie_chart(fracs_labels_other, dir_path, level,\r\n prefs, pref_colors, background_color, label_color,\r\n generate_image_type,\r\n plot_width, plot_height, bar_width, dpi,\r\n include_html_legend,\r\n props={'title': title})\r\n\r\n all_taxons.extend(pie)\r\n\r\n # put the charts into the html image data\r\n img_data.append(TABLE_graph % tuple(all_taxons))\r\n img_data.append(DATA_TABLE_HTML % ''.join(all_counts))\r\n\r\n # generate html for bar and area charts\r\n elif chart_type == 'area' or chart_type == 'bar':\r\n\r\n taxa_percents = fracs_labels_other\r\n sample_ids = l\r\n taxa = other_cat\r\n\r\n all_categories = []\r\n title = TITLE % (label, total, total, len(fracs_labels_other))\r\n all_taxons = [label]\r\n\r\n # make area chart image\r\n area = make_area_bar_chart(sample_ids, taxa_percents, taxa, dir_path,\r\n level, prefs, pref_colors,\r\n background_color, label_color, chart_type,\r\n generate_image_type,\r\n plot_width, plot_height, bar_width, dpi,\r\n resize_nth_label, label_type,\r\n include_html_legend, include_html_counts,\r\n props={'title': title})\r\n\r\n all_taxons.extend(area)\r\n\r\n # put the charts into the html image data\r\n img_data.append(TABLE_graph % tuple(all_taxons))\r\n\r\n return img_data",
"def table_plot(true4_found4_corr, true4_found4_incorr, true4_found3,\n true3_found4, true3_found3, savename=None):\n \n # Prepare plot on which to place table\n _, ax = plt.subplots()\n plt.xlim(-0.1,5.1)\n plt.ylim(-0.1,3.7)\n ax.axis('off')\n\n n_events = sum([true4_found4_corr, true4_found4_incorr, true4_found3,\n true3_found4, true3_found3])\n\n n_col_1 = sum([true4_found4_corr, true4_found4_incorr, true4_found3])\n n_col_2 = sum([true3_found4, true3_found3])\n\n n_row_1 = sum([true4_found4_corr, true4_found4_incorr, true3_found4])\n n_row_2 = sum([true4_found3, true3_found3])\n \n if n_col_1 != 0:\n true4_found4_corr_pc = true4_found4_corr / n_col_1 * 100\n true4_found4_incorr_pc = true4_found4_incorr / n_col_1 * 100\n true4_found3_pc = true4_found3 / n_col_1 * 100\n else:\n true4_found4_corr_pc = 0\n true4_found4_incorr_pc = 0\n true4_found3_pc = 0\n if n_col_2 != 0:\n true3_found4_pc = true3_found4 / n_col_2 * 100\n true3_found3_pc = true3_found3 / n_col_2 * 100\n else:\n true3_found4_pc = 0\n true3_found3_pc = 0\n\n # add a whole bunch of squares and text\n ax.text(0.5,1, \"4th Jet\\nReco\", fontsize=18, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.add_patch(patches.Rectangle((0,0),1,2,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(1.5,1+1/3, \"4th jet\\nfound\", fontsize=13, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(1.5,1, f\"({n_row_1:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((1,0),1,1-1/3,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(3-0.05,1/3, f\"{true4_found3_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(3-0.05,1/9, f\"({true4_found3:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,0),2-0.1,1-1/3,linewidth=1,edgecolor='#262626',facecolor='#ffff66'))\n\n ax.text(4.45,1/3, f\"{true3_found3_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(4.45,1/9, f\"({true3_found3:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((4-0.1,0),1+0.1,1-1/3,linewidth=1,edgecolor='#262626',facecolor='#00ff66'))\n\n ax.text(1.5,0.4, \"No 4th jet\\nfound\", fontsize=13, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(1.5,1/9, f\"({n_row_2:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((1,1-1/3),1,1+1/3,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(2.5,1+2/3, \"Correct\\n4th jet\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,1-1/3),1,0.5*(1+1/3),linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(2.5,1, \"Incorrect\\n4th jet\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,1-1/3+0.5*(1+1/3)),1,0.5*(1+1/3),linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(3.45,1+2/3, f\"{true4_found4_corr_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(3.45,1+2/3-2/9, f\"({true4_found4_corr:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((3,1-1/3),1-0.1,0.5*(1+1/3),linewidth=1,edgecolor='k',facecolor='#ff6666'))\n\n ax.text(3.45,1, f\"{true4_found4_incorr_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(3.45,1-2/9, f\"({true4_found4_incorr:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((3,1-1/3+0.5*(1+1/3)),1-0.1,0.5*(1+1/3),linewidth=1,edgecolor='#262626',facecolor='#00ff66'))\n\n ax.text(4.45,1+1/3, f\"{true3_found4_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(4.45,1+1/3-2/9, f\"({true3_found4:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((4-0.1,1-1/3),1+0.1,1+1/3,linewidth=1,edgecolor='#262626',facecolor='#ff6666'))\n\n ax.text(3,2.375, \"4th tag exists\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(3,2.375-2/9, f\"({n_col_1:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,2),2-0.1,0.75,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(4.45,2.375, \"No 4th tag\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(4.45,2.375-2/9, f\"({n_col_2:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((4-0.1,2),1+0.1,0.75,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(3.5,3.1, \"Truth-Matching\", fontsize=18, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n\n ax.text(1,2.375, f\"(# events={n_events:.0f})\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,2+0.75),3,0.75,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n # format and show/save\n plt.tight_layout()\n if savename:\n plt.savefig(f\"table_{savename}.png\", dpi=300)\n plt.show()",
"def create_numbers_table():\n work_tuples = parse_columns()\n print('\\n\\n\\n ----- Tableau récapitulatif -----')\n print('-----------------------')\n for ii in work_tuples:\n line = '|'\n for ij in ii:\n line += ' ij |'\n print(line)\n print('-----------------------')",
"def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str",
"def start_table(self):\n self.col_widths = []\n self.result = \"\""
]
| [
"0.69752",
"0.6746598",
"0.66616607",
"0.657273",
"0.6527887",
"0.6494611",
"0.6466136",
"0.6444683",
"0.64372617",
"0.63944316",
"0.63084507",
"0.63014144",
"0.6282424",
"0.62796277",
"0.62533253",
"0.6238252",
"0.62094986",
"0.61975396",
"0.61698943",
"0.6117473",
"0.6115017",
"0.6107175",
"0.6093482",
"0.6071403",
"0.60564655",
"0.6032475",
"0.601465",
"0.5985491",
"0.5965248",
"0.5959575"
]
| 0.7961645 | 0 |
Create accuracy plots for each batch size Load in accuracy information into useful lists, then create a plot. Each plot has the accuracy for all 4 datasets listed | def create_batch_plots(accuracies, batch_sizes):
#Get accuracy data
p_cpu_accuracy = list(accuracies[0].values())
p_gpu_accuracy = list(accuracies[1].values())
c_cpu_accuracy = list(accuracies[2].values())
c_gpu_accuracy = list(accuracies[3].values())
#Create plot for each batch size comparing CPU and GPU accuracy
x_axis = list(range(1, len(p_cpu_accuracy[0]) + 1))
#Get the min of each key (batch size) for all 4 datasets, then get the minimum of those 4, then get the minimum again.
min_y = max(min([min(x) for x in [[min(dataset[key]) for key in dataset] for dataset in accuracies]]) - 0.05, 0.0)
for item in range(len(p_cpu_accuracy)):
plt.clf()
plt.close()
plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network Accuracy, Python and C++, Batch Size {}'.format(batch_sizes[item]))
l1, = plt.plot(x_axis, p_cpu_accuracy[item], '-o')
l2, = plt.plot(x_axis, p_gpu_accuracy[item], '-s')
l3, = plt.plot(x_axis, c_cpu_accuracy[item], '-o')
l4, = plt.plot(x_axis, c_gpu_accuracy[item], '-s')
plt.legend((l1, l2, l3, l4), ('P CPU Acc', 'P GPU Acc', 'C CPU Acc', 'C GPU Acc'))
plt.xlabel('Epoch')
plt.ylabel('Accuracy (%)')
plt.ylim((min_y, 100.0))
plt.grid()
plt.savefig('results\\figures\\size_{}_acc.png'.format(batch_sizes[item])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def accuracy_plot(LS_sizes, data_fun):\r\n\r\n opt_neigh = []\r\n\r\n #plot of optimal n_neighbors as a function of the LS size\r\n\r\n for size in LS_sizes:\r\n\r\n acc = []\r\n neighbors_values = np.arange(1,size+1,1)\r\n\r\n # For a given LS size, plots of accuracy(n_neighbors)\r\n\r\n for value in neighbors_values:\r\n\r\n X_train, y_train, X_test, y_test = data_fun(n_ts=500, n_ls=size)\r\n\r\n clf = KNeighborsClassifier(n_neighbors = value)\r\n clf = clf.fit(X_train, y_train)\r\n acc.append(clf.score(X_test,y_test))\r\n\r\n plt.figure()\r\n plt.plot(neighbors_values,acc, '.')\r\n plt.title(\"Evolution of accuracy as a function \\nof n_neighbors for LS_size = {} samples, for {}.\".format(size, data_fun.__name__))\r\n plt.savefig(\"acc(n_neigh)_{}_{}.pdf\".format(size, data_fun.__name__))\r\n\r\n opt_neigh.append(np.argmax(acc)+1)\r\n\r\n plt.figure()\r\n plt.plot(LS_sizes, opt_neigh, '.')\r\n plt.title(\"Optimal n_neighbors as a function \\nof the size of the learning sample, for {}.\".format(data_fun.__name__))\r\n plt.savefig(\"opt_n_neigh(LS_size)_{}.pdf\".format(data_fun.__name__))",
"def make_accuracy_plot(num_trials=10):\n data = load_digits()\n # print data.DESCR\n train_percentages = range(5, 95, 5)\n test_accuracies = numpy.zeros(len(train_percentages))\n\n for i in range(len(train_percentages)):\n individual_trial_accuracies = []\n for j in range(num_trials):\n X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size=train_percentages[i]*.01)\n model = LogisticRegression(C=10**-10)\n model.fit(X_train, y_train)\n individual_trial_accuracies.append(model.score(X_test, y_test))\n test_accuracies[i] = numpy.mean(individual_trial_accuracies)\n\n fig = plt.figure()\n plt.plot(train_percentages, test_accuracies, 'b')\n plt.xlabel('Percentage of Data Used for Training')\n plt.ylabel('Accuracy on Test Set')\n plt.show()",
"def plot_accuracy(self):\n plot_title, img_title = self.prep_titles(\"\")\n test_legend = ['training data', 'test data']\n\n # Data for plotting x- and y-axis\n x = np.arange(1, CFG.EPOCHS + 1)\n y = [self.tr_accuracy, self.test_accuracy]\n\n # prints x and y-axis values\n print(f'x: {x}')\n print(f'training: {self.tr_accuracy}')\n print(f'test: {self.test_accuracy}')\n\n plt.figure(figsize=(CFG.FIG_WIDTH, CFG.FIG_HEIGHT))\n\n # Create the lineplot\n for line in range(2):\n ax = sns.lineplot(x=x, y=y[line], color=CFG.COLOR_ACCURACY[line], label=test_legend[line])\n\n if CFG.ANNOTATE:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS + 2),\n ylim=(0, 119))\n\n for line in range(2):\n for e in range(0, CFG.EPOCHS):\n if y[line][e] > CFG.ANNOTATE_LEVEL:\n value = \"{:.2f}\".format(y[line][e])\n label = \"epoch \" + str(e + 1) + \"\\n\" + value + \"%\"\n plt.annotate(label,\n xy=(x[e], y[line][e]),\n alpha=1,\n size=9,\n rotation=45,\n textcoords='offset pixels', xytext=(0, 7),\n ha='left', va='bottom')\n else:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS),\n ylim=(0, 102))\n\n ax.legend(loc='best')\n\n self.save_plot(img_title)\n plt.show()",
"def cross_validation_visualization_accuracy_multiple(epochs, accs, save=False, filename=\"cross_validation_acc_multiple\"):\n \n for i in range(accs.shape[0]):\n plt.plot(epochs, accs[i], marker=\".\", color='r', label=str(i+1)+'th accuracy')\n \n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)",
"def train_visualization(output_path): \n log_path = output_path + 'output.log'\n Train_Cost, Valid_Cost, Test_Cost, Train_Acc, Valid_Acc, Test_Acc = log_reader(log_path)\n n_epoch = len(Train_Cost)\n\n x1 = range(n_epoch)\n x2 = range(n_epoch)\n y1 = Train_Cost\n y2 = Valid_Cost\n y3 = Test_Cost\n y4 = Train_Acc\n y5 = Valid_Acc\n y6 = Test_Acc\n plt.subplot(2, 1, 1)\n plt.plot(x1, y1, label=\"Train_Cost\", linewidth=2)\n plt.plot(x1, y2, label=\"Valid_Cost\", linewidth=2)\n plt.plot(x1, y3, label=\"Test_Cost\", linewidth=2)\n\n plt.title('binary cross entropy vs. epoches')\n plt.ylabel('binary cross entropy')\n plt.legend(loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(x2, y4, label=\"Train_Acc\", linewidth=2)\n plt.plot(x2, y5, label=\"Valid_Acc\", linewidth=2)\n plt.plot(x2, y6, label=\"Test_Acc\", linewidth=2)\n plt.xlabel('Accuracy@20 vs. epoches')\n plt.ylabel('Accuracy@20')\n plt.legend(loc='best')\n plt.savefig(output_path + 'loss_fig.png')\n # plt.show()",
"def main():\n args = parse_args()\n\n with open(args.train_details_json, mode='r', encoding='utf-8') as json_f:\n results_dict = json.load(json_f)[-1]\n\n losses_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_loss']) + 1),\n results_dict['train_loss'])\n plt.plot(range(1, len(results_dict['val_loss']) + 1),\n results_dict['val_loss'])\n plt.plot(range(1, len(results_dict['test_loss']) + 1),\n results_dict['test_loss'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'loss vs epoch for {args.model} model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.grid(True)\n losses_plot.set_size_inches((8, 8))\n losses_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_losses_plot.png'))\n\n accuracies_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_acc']) + 1),\n results_dict['train_acc'])\n plt.plot(range(1, len(results_dict['val_acc']) + 1),\n results_dict['val_acc'])\n plt.plot(range(1, len(results_dict['test_acc']) + 1),\n results_dict['test_acc'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'accuracy vs epoch for {args.model} '\n f'model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n plt.grid(True)\n accuracies_plot.set_size_inches((8, 8))\n accuracies_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_accuracies_plot.png'))",
"def accuracy_plot(training, test, layers, data_size, n_neighbours, learning_rate, dropout_rate):\n\n plt.figure()\n plt.plot(training, label=\"Training\")\n plt.plot(test, label=\"Test\")\n plt.xlabel(\"Iterations\", size='medium')\n plt.ylabel(\"Accuracy function (%)\", size='medium')\n plt.suptitle(\"Accuracy function while training the neural network\", size='medium', ha='center')\n plt.title(\"layers: {} with dropout rate of {}, learning rate: {}\".format(layers, dropout_rate, learning_rate),\n size='small', ha='center')\n if n_neighbours == 0:\n plt.figtext(0.83, 0.80, \"Neighbours\\nexcluded\", size='medium')\n else:\n plt.figtext(0.83, 0.80, \"{} neighbours\\nincluded\".format(n_neighbours), size='medium')\n plt.figtext(0.83, 0.70, \"{}\\nsamples\".format(data_size), size='medium')\n plt.legend(loc='right', bbox_to_anchor=(1.3, 0.5))\n plt.subplots_adjust(right=0.8)\n\n working_dir = os.path.dirname(os.path.abspath(__file__))\n saving(working_dir + \"/output_ANN/accuracy_plots/{}_accuracy_{}\".format(n_neighbours, data_size))",
"def plot_testacc_numlabels(dataset, models, results, path, suffix):\n res = results[results['dataset'] == dataset]\n plt.close()\n for mod in models:\n res_model = res[res['model'] == mod]\n res_model.sort_values(by=['num_labels'], inplace=True)\n plt.scatter(res_model['num_labels'], res_model['test_acc'],\n label=mod, marker='o')\n plt.ylabel('test accuracy')\n plt.xlabel('num labels')\n plt.title('Dataset: {}'.format(dataset))\n plt.legend()\n plt.tight_layout()\n if suffix is None:\n plt.savefig('{}{}_performance.png'.format(path, dataset))\n else:\n plt.savefig('{}{}_performance_{}.png'.format(path, dataset,\n suffix))",
"def plot_accuracy_and_loss(histories=None):\n fig = subplots.make_subplots(rows=2, cols=2, subplot_titles=('Training accuracy', 'Validation accuracy',\n 'Training loss ', 'Validation loss'))\n\n def append_trace(model_name, acc, val_acc, loss, val_loss, epochs):\n e = list(range(epochs))\n color = random.choice(hex_colors_only)\n trace_ta = create_trace(e, acc, model_name, color)\n trace_va = create_trace(e, val_acc, model_name, color)\n trace_tl = create_trace(e, loss, model_name, color)\n trace_vl = create_trace(e, val_loss, model_name, color)\n\n fig.append_trace(trace_ta, 1, 1)\n fig.append_trace(trace_va, 1, 2)\n fig.append_trace(trace_tl, 2, 1)\n fig.append_trace(trace_vl, 2, 2)\n\n if histories is None:\n df_accuracies, df_losses = get_tensorboard_scalars()\n for model_name in df_accuracies.model_name.unique():\n df_acc = df_accuracies.loc[df_accuracies.model_name == model_name]\n df_l = df_losses.loc[df_losses.model_name == model_name]\n\n acc = df_acc.loc[df_acc.result_of == 'train'].accuracy.values.tolist()\n val_acc = df_acc.loc[df_acc.result_of == 'validation'].accuracy.values.tolist()\n loss = df_l.loc[df_l.result_of == 'train'].loss.values.tolist()\n val_loss = df_l.loc[df_l.result_of == 'validation'].loss.values.tolist()\n epochs = len(df_acc)\n\n append_trace(model_name, acc, val_acc, loss, val_loss, epochs)\n\n else:\n for model_name, history in histories.items():\n acc = history['accuracy']\n val_acc = history['val_accuracy']\n loss = history['loss']\n val_loss = history['val_loss']\n epochs = list(range(1, len(acc) + 1))\n\n append_trace(model_name, acc, val_acc, loss, val_loss, epochs)\n fig['layout']['xaxis'].update(title='Epoch')\n fig['layout']['xaxis2'].update(title='Epoch')\n fig['layout']['yaxis'].update(title='Accuracy', range=[0, 1])\n fig['layout']['yaxis2'].update(title='Loss', range=[0, 1])\n\n iplot(fig, filename='accuracies-losses')",
"def make_accuracy_plot(ax,\n groundtruth_boxes,\n hpu_boxes,\n cpu_boxes,\n hpu_strategy,\n label,\n N=10,\n num_graph_points=20,\n match_mode=\"ellipse\",\n):\n print \"Making plot for\", repr(label)\n print \"TODO: this should graph seconds per image\"\n mix_fractions = np.linspace(0, 1.0, num_graph_points)\n # Plot confidence intervals\n min_ci = []\n max_ci = []\n mean_accs = []\n stderr_accs = []\n for mix_fraction in mix_fractions:\n accuracies = [\n maximum_F_score(\n groundtruth_boxes,\n hpu_strategy(hpu_boxes, cpu_boxes, mix_fraction),\n match_mode=match_mode,\n )\n for _ in xrange(N)\n ]\n mean_accs.append(np.mean(accuracies))\n stderr_accs.append(np.std(accuracies, ddof=1) / np.sqrt(N))\n #print mix_fraction, np.mean(accuracies)\n ax.errorbar(mix_fractions, mean_accs, stderr_accs, label=label)\n ax.set_xlabel(\"Fraction of HPU-labeled images\")\n ax.set_ylabel(\"Maximum F-score\")",
"def create_val_plots(x_vals, vals_zeros,vals_ones):\n plt.plot(x_vals, vals_zeros,label=\"non-fraud\")\n plt.plot(x_vals, vals_ones,label=\"fraud\")\n plt.title('Accuracy per number of iterations')\n plt.xlabel('Number of Iterations')\n plt.ylabel('Accuracy')\n plt.xticks(np.arange(100, 210, 10))\n plt.legend() \n plt.show()\n # plt.savefig('./analysis_deliverable/visualizations/accuracy_plot.png')",
"def plot_accuracy(model_fit, save_folder): \n train_acc = model_fit.history['binary_accuracy']\n val_acc = model_fit.history['val_binary_accuracy']\n epoch_axis = np.arange(1, len(train_acc) + 1)\n plt.title('Train vs Validation Accuracy')\n plt.plot(epoch_axis, train_acc, 'b', label='Train Acc')\n plt.plot(epoch_axis, val_acc,'r', label='Val Acc')\n plt.xlim([1, len(train_acc)])\n plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_acc) / 10) + 0.5)))\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epochs')\n plt.savefig(save_folder + '/accuracy.png')\n plt.show()\n plt.close()",
"def show_plots(history):\n loss_vals = history['loss']\n val_loss_vals = history['val_loss']\n epochs = range(1, len(history['accuracy'])+1)\n \n f, ax = plt.subplots(nrows=1,ncols=2,figsize=(16,4))\n \n # plot losses on ax[0]\n ax[0].plot(epochs, loss_vals, color='navy',marker='o', linestyle=' ', label='Training Loss')\n ax[0].plot(epochs, val_loss_vals, color='firebrick', marker='*', label='Validation Loss')\n ax[0].set_title('Training & Validation Loss')\n ax[0].set_xlabel('Epochs')\n ax[0].set_ylabel('Loss')\n ax[0].legend(loc='best')\n ax[0].grid(True)\n \n # plot accuracies\n acc_vals = history['accuracy']\n val_acc_vals = history['val_accuracy']\n\n ax[1].plot(epochs, acc_vals, color='navy', marker='o', ls=' ', label='Training Accuracy')\n ax[1].plot(epochs, val_acc_vals, color='firebrick', marker='*', label='Validation Accuracy')\n ax[1].set_title('Training & Validation Accuracy')\n ax[1].set_xlabel('Epochs')\n ax[1].set_ylabel('Accuracy')\n ax[1].legend(loc='best')\n ax[1].grid(True)\n \n plt.show()\n plt.close()\n \n # delete locals from heap before exiting\n del loss_vals, val_loss_vals, epochs, acc_vals, val_acc_vals",
"def plot_results(\n train_data: tuple[Tensor, Tensor],\n test_data: tuple[Tensor, Tensor],\n correct_class: Tensor\n):\n #fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(21,7), subplot_kw=dict(box_aspect=1))\n fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,7), subplot_kw=dict(box_aspect=1))\n fig2, ax3 = plt.subplots(figsize=(7,7), subplot_kw=dict(box_aspect=1))\n ax1.set_title('Training data')\n plot_dataset(train_data, ax1)\n\n ax2.set_title('Test data')\n plot_dataset(test_data, ax2)\n\n ax3.set_title('Test prediction correctness')\n plot_dataset((test_data[0], correct_class.int()), ax3, cmap={0: '#ff0000', 1: '#00ff00'})\n \n fig1.savefig('plots/datasets')\n fig2.savefig('plots/predictions')\n plt.show()",
"def plot_acc(acc_v, acc_t, save_plots_path):\n\n plt.figure()\n plt.plot(acc_v, label='Validation acc')\n plt.plot(acc_t, label='Training acc')\n plt.legend()\n title = 'Accuracy per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.savefig(save_plots_path + \"swag_accuracy_plot.png\")",
"def show_train_images(train_data, train_labels):\n plt.figure(1, figsize=(8, 8))\n n = 0\n\n for i in range(16):\n n += 1\n # each time random images are loaded\n # r = np.random.randint(0, train_data.shape[0], 1)\n plt.subplot(4, 4, n)\n plt.subplots_adjust(hspace=0.5, wspace=0.5)\n plt.imshow(train_data[i] / 255.)\n plt.title('{}'.format(train_labels[i]))\n plt.xticks([]), plt.yticks([])\n plt.show()",
"def plot_eval_3(trained_model, X_val, y_val, image_name):\n # FOR EACH CLASS\n # val_pred = trained_model.predict_proba(X_val, num_iteration=iteration)\n \n iterations = trained_model.booster_.current_iteration()\n# results = np.zeros((2, iterations))\n results = np.zeros((iterations,))\n for pos in range(iterations):\n \n # Calculate the current iteration (from 1 to iterations)\n iteration = pos + 1\n \n # Predict validation set for the current iteration\n# start_time = timeit.default_timer()\n val_pred = trained_model.predict(X_val, num_iteration=iteration)\n# end_time = timeit.default_timer()\n# time = end_time - start_time\n# speed = int(X_val.shape[0] / time)\n \n # Number of hits\n val_ok = (val_pred == y_val)\n \n # Percentage of hits\n val_acc = val_ok.sum() / val_ok.size\n \n # Actualize data for plotting results\n# results[0][pos] = time\n# results[1][pos] = val_acc\n results[pos] = val_acc\n \n # Generate accuracy plot\n plt.figure()\n# plt.plot(results[0], results[1], 'b')\n plt.plot(results, 'b')\n plt.title('Validation accuracy')\n plt.xlabel('iterations')\n plt.ylabel('accuracy')\n plt.legend()\n \n # Save validation plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_val_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')",
"def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()",
"def visualise_dataset_balancer_results_multi_dataset(dataset_results):\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = \"raw_dump_{0}.txt\".format(current_time)\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/../results/\" + file_name, \"wb\") as output_file:\n output_file.write(str(dataset_results))\n sns.set(style='ticks')\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(1, 1, 1)\n markers = [\"s\", \"o\", \"^\", \"d\", \"*\"]\n sizes = [150, 200, 200, 200, 250]\n colors = [\"#64B3DE\", \"#1f78b4\", \"#B9B914\", \"#FBAC44\", \"#bc1659\", \"#33a02c\", \"#6ABF20\", \"#ff7f00\", \"#6a3d9a\", \"grey\", \"#b15928\", \"#e31a1c\", \"black\"]\n hatches = [None, \"////\", \"..\"]\n color_dict = {}\n index = 0\n for (classifier_description, result_arr) in dataset_results[0][1]:\n color_dict[classifier_description] = colors[index]\n index += 1\n\n # Move left y-axis and bottom x-axis to centre, passing through (0,0)\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position((\"axes\", 0.5))\n\n # Eliminate upper and right axes\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n\n # Show ticks in the left and lower axes only\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.set_axis_on()\n ax.spines['left'].set_color('black')\n ax.spines['bottom'].set_color('black')\n plt.xlabel(\"Change in TPR\")\n plt.ylabel(\"Change in TNR\")\n\n ax.xaxis.set_label_coords(0.1, 0.53)\n ax.yaxis.set_label_coords(0.53, 0.9)\n\n plt.ylim(-0.3, 0.3)\n plt.xlim(-0.3, 0.3)\n data_set_labels = ([], [])\n balancer_labels = ([], [])\n data_set_index = 0\n for (data_set, dataset_result) in dataset_results:\n balancer_result_pos = {}\n balancer_result_neg = {}\n\n for (classifier_description, result_arr) in dataset_result:\n for (balancer_description, results) in result_arr:\n if balancer_description in balancer_result_pos:\n balancer_result_pos[balancer_description] = balancer_result_pos[balancer_description] + results[3]\n balancer_result_neg[balancer_description] = balancer_result_neg[balancer_description] + results[4]\n else:\n balancer_result_pos[balancer_description] = results[3]\n balancer_result_neg[balancer_description] = results[4]\n\n for (balancer_description, _) in dataset_result[0][1]:\n balancer_result_pos[balancer_description] = balancer_result_pos[balancer_description] / float(len(dataset_result))\n balancer_result_neg[balancer_description] = balancer_result_neg[balancer_description] / float(len(dataset_result))\n\n true_pos_arr = [value for _, value in balancer_result_pos.iteritems()]\n true_neg_arr = [value for _, value in balancer_result_neg.iteritems()]\n\n i = 0\n hatch_index = 0\n for key, value in balancer_result_pos.iteritems():\n if key != \"None\":\n if i != 0 and hatch_index == 0 and i % len(colors) == 0:\n hatch_index += 1\n\n if data_set_index == 0:\n balancer_labels[0].append(mpatches.Patch(facecolor=colors[i % len(colors)], hatch=hatches[hatch_index], label=key, alpha=0.8, edgecolor=\"black\"))\n balancer_labels[1].append(key)\n\n ax.scatter(value - balancer_result_pos[\"None\"], balancer_result_neg[key] - balancer_result_neg[\"None\"], marker=markers[data_set_index % len(markers)], hatch=hatches[hatch_index],\n s=sizes[data_set_index % len(markers)], alpha=0.8, color=colors[i % len(colors)], edgecolor=\"black\" if colors[i % len(colors)] != \"black\" else \"grey\",\n zorder=i % len(markers), lw=0.8)\n # Work around to get legend entries correct\n pt = ax.scatter(-99999999999, -9999999999, marker=markers[data_set_index % len(markers)], s=sizes[data_set_index % len(markers)], alpha=0.8, color=\"white\", edgecolor=\"black\",\n zorder=data_set_index, lw=0.8)\n if i == 0:\n data_set_labels[0].append(pt)\n data_set_labels[1].append(data_set)\n i += 1\n hatch_index = (hatch_index + 1) % len(hatches)\n data_set_index += 1\n legend = plt.legend(data_set_labels[0] + balancer_labels[0], data_set_labels[1] + balancer_labels[1], loc='upper right', bbox_to_anchor=(1, 1), fancybox=False, frameon=False, ncol=1)\n legend.get_frame().set_facecolor('#ffffff')\n\n sns.despine()\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/classifier_dataset_plt_{0}.png\".format(current_time), bbox_extra_artists=((legend,)), bbox_inches='tight')\n plt.close(fig)",
"def cross_validation_visualization_accuracy(epochs, accs, save=False, filename=\"cross_validation_acc\"):\n plt.plot(epochs, accs, marker=\".\", color='r', label='accuracy')\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)",
"def show_batch(dataloader):\n bs = dataloader.batch_size\n num_samples = dataloader.dataset.data.shape[0]\n batches = num_samples // bs\n batch_id = np.random.choice(batches)\n one_batch = list(dataloader)[batch_id]\n batch_imgs, batch_labels = one_batch[0], one_batch[1]\n class_idx = dataloader.dataset.class_to_idx\n idx_class = idx_to_class(class_idx)\n n_rows = n_cols = int(np.sqrt(len(batch_imgs)))\n fig, axes = plt.subplots(n_rows, n_cols, figsize=(10, 10))\n if batch_imgs.shape[1] == 1:\n cmap = 'gray'\n else:\n cmap = None\n for i, ax in enumerate(axes.flatten()):\n ax.axis('off')\n title = f'Class : {idx_class[batch_labels[i].item()]}'\n single_img = np.clip(batch_imgs[i].squeeze().permute(1, 2, 0).numpy(), 0, 1)\n ax.imshow(single_img, cmap=cmap)\n ax.set_title(title)\n fig.tight_layout()",
"def plot_acc(model_dir):\n ## extract loss from csv\n file_dir = os.path.join(model_dir, 'acc.csv')\n data = pd.read_csv(file_dir)\n epochs = data['epoch'].ravel()\n acc_train = data['acc_train'].ravel()\n acc_test = data['acc_test'].ravel()\n # epoch,acc_train,acc_test\n\n ## Theoretical Loss\n fig, ax = plt.subplots(1, 1, figsize=(7, 5), sharey=True, sharex=True, dpi=400)\n ax.plot(epochs, acc_train, label='train', color='green', alpha=0.8)\n ax.plot(epochs, acc_test, label='test', color='red', alpha=0.8)\n ax.set_ylabel('Accuracy', fontsize=10)\n ax.set_xlabel('Epoch', fontsize=10)\n ax.legend(loc='lower right', prop={\"size\": 15}, ncol=3, framealpha=0.5)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.tight_layout()\n\n ## create saving directory\n acc_dir = os.path.join(model_dir, 'figures', 'acc')\n os.makedirs(acc_dir, exist_ok=True)\n file_name = os.path.join(acc_dir, 'accuracy.png')\n plt.savefig(file_name, dpi=400)\n print(\"Plot saved to: {}\".format(file_name))\n file_name = os.path.join(acc_dir, 'accuracy.pdf')\n plt.savefig(file_name, dpi=400)\n plt.close()\n print(\"Plot saved to: {}\".format(file_name))",
"def plot_scatter_n_accuracy_joint(self, data_objects, labels, label_self, markers):\n dataframes = [self.df] + [data.df for data in data_objects]\n labels = [label_self] + labels\n\n acc = []\n n = []\n statistics = []\n for df, label in zip(dataframes, labels):\n acc = df.groupby('worker')['correct'].mean()\n n = df.groupby('worker')['question'].count()\n df_new = pd.concat([acc, n], axis=1)\n df_new['dataset'] = label\n statistics.append(df_new)\n\n df = pd.concat(statistics, axis=0)\n sns.lmplot('question', 'correct', data=df, hue='dataset',\n markers=markers, fit_reg=False)\n plt.xlabel('Number of questions answered')\n plt.ylabel('Accuracy')\n plt.xlim((0, None))\n plt.ylim((0, 1))\n plt.title('')\n return plt.gca()",
"def plot_results(models,\n data,\n batch_size=128,\n model_name=None):\n\n encoder, decoder = models\n x_test, y_test = data\n # display a 2D plot of the digit classes in the latent space\n z_mean = encoder(x_test)\n plt.figure(figsize=(12, 10))\n plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)\n plt.colorbar()\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n \n plt.show()\n if not model_name is None:\n os.makedirs(model_name, exist_ok=True)\n filename = os.path.join(model_name, \"vae_mean.png\")\n plt.savefig(filename)\n filename = os.path.join(model_name, \"digits_over_latent.png\")\n # display a 30x30 2D manifold of digits\n n = 30\n digit_size = 28\n figure = np.zeros((digit_size * n, digit_size * n))\n # linearly spaced coordinates corresponding to the 2D plot\n # of digit classes in the latent space\n grid_x = np.linspace(-4, 4, n)\n grid_y = np.linspace(-4, 4, n)[::-1]\n\n for i, yi in enumerate(grid_y):\n for j, xi in enumerate(grid_x):\n z_sample = np.array([[xi, yi]])\n x_decoded = decoder(z_sample)\n digit = x_decoded[0].reshape(digit_size, digit_size)\n figure[i * digit_size: (i + 1) * digit_size,\n j * digit_size: (j + 1) * digit_size] = digit\n\n plt.figure(figsize=(10, 10))\n start_range = digit_size // 2\n end_range = n * digit_size + start_range + 1\n pixel_range = np.arange(start_range, end_range, digit_size)\n sample_range_x = np.round(grid_x, 1)\n sample_range_y = np.round(grid_y, 1)\n plt.xticks(pixel_range, sample_range_x)\n plt.yticks(pixel_range, sample_range_y)\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.imshow(figure, cmap='Greys_r')\n if not model_name is None:\n plt.savefig(filename)\n plt.show()",
"def sample_list3(data_list, rows=15, cols=4, start_with=0, show_every=2, scale=4, fig_name=None, start_inx=0):\n\n n_batch = len(data_list)\n _, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])\n\n for ind in range(n_batch):\n # read data and calculate average precision\n input1 = data_list[ind]['slice1']\n input2 = data_list[ind]['slice2']\n label = data_list[ind]['label']\n hu0050 = data_list[ind]['hu0050']\n overlap = data_list[ind]['overlap']\n f_score = data_list[ind]['f1']\n mix_overlap = data_list[ind]['mix_overlap']\n noncal_eval = data_list[ind]['noncal_eval']\n file_path = data_list[ind]['file_path']\n if (ind - start_with) % show_every == 0:\n i = (ind - start_with) // show_every\n if i < rows:\n ax[i, 0].imshow(input1, cmap='gray')\n ax[i, 0].set_title(\"Slice {} ({}) \\n {}\".format(ind + start_inx, file_path, 'Input with HU(-100~155)'), loc='left')\n ax[i, 0].axis('off')\n\n ax[i, 1].imshow(input2, cmap='gray')\n ax[i, 1].set_title(\"{}\".format('Input with HU(200~1200)'))\n ax[i, 1].axis('off')\n\n ax[i, 2].imshow(gray2rgb(label))\n ax[i, 2].set_title('{}'.format('Label'))\n ax[i, 2].axis('off')\n\n ax[i, 3].imshow(gray2rgb(hu0050))\n ax[i, 3].set_title('{}'.format('Mask HU(0~50)'))\n ax[i, 3].axis('off')\n\n ax[i, 4].imshow(gray2rgb(overlap))\n ax[i, 4].set_title('{} (F1= {:.4f})'.format('Overlap', f_score))\n ax[i, 4].axis('off')\n\n # not all red pixels are within HU range 0~50\n\n if(np.sum(overlap == 76)) != 0:\n n_above50, n_below0, topk, buttomk = noncal_eval[0], noncal_eval[1], noncal_eval[2:7], noncal_eval[7:12]\n ax[i, 4].text(5, 30, \"top5 HU: {}\".format(topk), color='red')\n ax[i, 4].text(5, 60, \"but5 HU: {}\".format(buttomk), color='red')\n ax[i, 4].text(5, 90, \"Num of pixels HU>50: {}\".format(n_above50), color='red')\n ax[i, 4].text(5, 120, \"Num of pixels HU<0: {}\".format(n_below0), color='red')\n\n ax[i, 5].imshow(gray2rgb(mix_overlap))\n ax[i, 5].set_title('{} (F1= {:.4f})'.format('Label+Overlap', f_score))\n ax[i, 5].axis('off')\n\n # ax[i, 3].scatter(range(0, n_class), f_score)\n # ax[i, 3].set_title('Slice %d : Ave F-score = %0.2f' % (ind + start_inx, ave_f_score))\n # ax[i, 3].set_ylabel('F score')\n # ax[i, 3].set_ylim([-0.1, 1.1])\n\n # plt.show()\n if fig_name:\n plt.savefig(fig_name + '.pdf')\n plt.close()",
"def plot_data(losses, accuracies, name):\n # convert accuracies to percentages\n accuracies['Train'] = [acc * 100 for acc in accuracies['Train']]\n accuracies['Valid'] = [acc * 100 for acc in accuracies['Valid']]\n # set fontsize\n plt.rcParams.update({'font.size': 13})\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(8,5))\n ax1.set_xlabel('Number of Epochs')\n ax1.set_ylabel('Cross Entropy Loss')\n ax1.set_ylim(0,2)\n ax1.plot(losses['Train'], label='Training')\n ax1.plot(losses['Valid'], label='Validation')\n ax1.legend(loc='upper right')\n\n ax2.set_xlabel('Number of Epochs')\n ax2.set_ylabel('Accuracy (%)')\n ax2.set_ylim(0,100)\n ax2.plot(accuracies['Train'], label='Training')\n ax2.plot(accuracies['Valid'], label='Validation')\n ax2.legend(loc='upper left')\n\n fig.tight_layout()\n fig.savefig('../outputs/' + name)",
"def plot_train_results(metrics2record, loss_metric,\n train_metrics, test_metrics):\n pyplot.figure(figsize=(10, 5))\n min_, max_ = np.min(loss_metric), np.max(loss_metric)\n lg, = pyplot.plot(loss_metric)\n pyplot.yticks(min_ + np.arange(5) * (max_ - min_))\n # if learning_rate is not None:\n # lg, = pyplot.plot(learning_rate)\n pyplot.title('Loss')\n pyplot.xlabel('Epoch')\n pyplot.yscale('log')\n pyplot.show()\n\n for prm in basic_metrics:\n if prm in metrics2record:\n leg = []\n met_idx = metrics2record.index(prm)\n pyplot.figure(figsize=(10, 5))\n lg, = pyplot.plot(train_metrics[:, met_idx], label=('train'))\n leg.append(lg)\n lg, = pyplot.plot(test_metrics[:, met_idx], label=('test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title(prm)\n pyplot.xlabel('Epoch')\n pyplot.show()\n\n has_prf = any([(prm in PRF_metrics) for prm in metrics2record])\n if has_prf:\n pyplot.figure(figsize=(10, 5))\n leg = []\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(train_metrics[:, met_idx],\n label=(prm + ':train'))\n leg.append(lg)\n\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(test_metrics[:, met_idx],\n label=(prm + ':test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title('Precision / Recall')\n pyplot.xlabel('Epoch')\n pyplot.show()",
"def classificationError():\n print('Computing Classification Test Error')\n\n dl1, dl2 = CIFAR10TestLoader() \n err1, err2 = [], []\n\n # load every model \n with open('classification_error.csv','w') as clf:\n writer = csv.writer(clf)\n writer.writerow(['Classification Error Dataset 1','Classification Error Dataset 2'])\n\n for i in tqdm(range(5,95,2)):\n mtl = MTL()\n PATH = os.getcwd() + '/mtl_model_{d1}_vs_{d2}.pth'.format(d1=i, d2=100-i)\n mtl.load_state_dict(state_dict=torch.load(PATH))\n\n e1, e2 = accuracy(dl1, mtl), accuracy(dl2, mtl)\n err1.append(e1)\n err2.append(e2)\n writer.writerow([e1,e2])\n \n fig, ax_left = plt.subplots()\n ax_right = ax_left.twinx()\n ax_left.plot([0.05 + 0.02* i for i in range(45)], err1, color='black')\n ax_right.plot([0.05 + 0.02* i for i in range(45)], err2, color='red')\n ax_right.set_ylabel('Dataset 2 (red) accuracy')\n ax_left.set_ylabel('Dataset 1 (black) accuracy')\n ax_left.set_xlabel('Percentage of points from dataset 1 in training set')\n plt.title('Classification Accuracy over both datasets')\n plt.savefig('classification_error_plots.pdf')",
"def visualize_training(features, labels, pl):\n print(\"Visualizing training\")\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Take out each feature type, one at a time\n label_map = get_label_map(labels)\n\n for key in label_map.keys():\n like_ind = label_map[key]\n like_data = np.array([features[i] for i in like_ind])\n\n plt.scatter(like_data[:,0],like_data[:,1],label=key)\n\n # get limits\n xmin = features.column_min(0) - .5\n xmax = features.column_max(0) + .5\n ymin = features.column_min(1) - .5\n ymax = features.column_max(1) + .5\n\n plt.xlim(xmin,xmax)\n plt.ylim(ymin,ymax)\n\n # Track the current dividing line, as well as the number of epochs passed\n divider, = plt.plot([],[])\n epoch_tracker = plt.text(-1,.9, '', fontsize=15)\n\n def update(i):\n \"\"\"\n 1.) Get the next set of weights from the tracker\n 2.) Calculate and draw the new divider line\n 3.) Update the epoch counter\n 4.) If we are at the end of an epoch, plot a dashed divider line to track progress\n \"\"\"\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider\n\n ani = animation.FuncAnimation(fig, update, frames=range(len(pl.weights_tracker)), interval=250,repeat=False)\n plt.legend()\n\n # optional save file\n if len(sys.argv) >= 3 :\n ani.save(sys.argv[2], writer='imagemagick', fps=5)\n\n plt.show()",
"def draw_num_classes_graphs():\n values = [10, 50, 100, 250, 1000, 4000]\n for num_classes in values:\n print(\"Training model on {} most common classes.\".format(num_classes))\n model = create_pretrained_model(num_classes=num_classes)\n histories = train(model, num_classes, epochs=50)\n run_name = get_run_name(\"{}classes\".format(num_classes))\n save_learning_curves(histories, run_name)\n csv_path = os.path.join(\"plots/\", run_name, \"data.csv\")\n ut.write_csv_dict(histories,\n keys=['loss', 'acc', 'val_loss', 'val_acc'],\n filename=csv_path)"
]
| [
"0.73184514",
"0.7299639",
"0.70774925",
"0.69942814",
"0.6946279",
"0.6922432",
"0.68750584",
"0.6874822",
"0.6822587",
"0.6822159",
"0.67851657",
"0.6782538",
"0.6775013",
"0.6749557",
"0.6698696",
"0.6659146",
"0.6657379",
"0.6631417",
"0.6601273",
"0.6561536",
"0.6523589",
"0.6515112",
"0.650381",
"0.6482415",
"0.6480102",
"0.6479879",
"0.64569193",
"0.6448005",
"0.64406526",
"0.6423089"
]
| 0.78852516 | 0 |
Enables some of the global JAX flags for debugging. | def enable_jax_debugging_flags():
# Enable the NaN-checker behavior to cause JAX to hard-break on the first
# occurrence of a NaN.
jax.config.update('jax_debug_nans', True)
# Enable the compilation logger to check whether or not we're accidentally
# causing a lot of re-compilation (inspect logs for excessive jitting).
jax.config.update('jax_log_compiles', True)
# Detect numpy-style automatic rank promotion and force strict, explicit
# casts. We can use `raise` instead of warn to raise an error.
jax.config.update('jax_numpy_rank_promotion', 'warn')
# Print global JAX flags in logs.
logging.info('Global JAX flags: %s', jax.config.values) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_debug(flag):\n global debug\n debug = flag\n XLM.XLM_Object.debug = flag\n XLM.xlm_library.debug = flag\n XLM.ms_stack_transformer.debug = flag\n XLM.stack_transformer.debug = flag\n XLM.excel2007.debug = flag",
"def setDebug():\n\tglobal debug\n\tdebug = True",
"def _pma_set_debug_flag(flag):\n global _pma_debug\n\n if not isinstance(flag, (bool)):\n raise Exception(\"flag argument must be of class bool\")\n _pma_debug = flag\n if flag is True:\n print(\"Debug flag enabled. You will receive extra feedback and messages from pma_python (like this one)\")",
"def set_global_flags(self):\n\n import rpki.http, rpki.x509, rpki.sql, rpki.async, rpki.log\n\n try:\n rpki.http.debug_http = self.getboolean(\"debug_http\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.want_persistent_client = self.getboolean(\"want_persistent_client\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.want_persistent_server = self.getboolean(\"want_persistent_server\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.use_adns = self.getboolean(\"use_adns\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.enable_ipv6_clients = self.getboolean(\"enable_ipv6_clients\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.http.enable_ipv6_servers = self.getboolean(\"enable_ipv6_servers\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.x509.CMS_object.debug_cms_certs = self.getboolean(\"debug_cms_certs\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.sql.sql_persistent.sql_debug = self.getboolean(\"sql_debug\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.async.timer.gc_debug = self.getboolean(\"gc_debug\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.async.timer.run_debug = self.getboolean(\"timer_debug\")\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get(\"dump_outbound_cms\"))\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get(\"dump_inbound_cms\"))\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.async.gc_summary(self.getint(\"gc_summary\"), self.getint(\"gc_summary_threshold\", 0))\n except ConfigParser.NoOptionError:\n pass\n\n try:\n rpki.log.enable_tracebacks = self.getboolean(\"enable_tracebacks\")\n except ConfigParser.NoOptionError:\n pass",
"def vv_flag():\n log.setLevel(logging.DEBUG)",
"def _set_debug(debug):\n global _DEBUG\n _DEBUG = debug\n if debug:\n logging.disable(logging.NOTSET)\n else:\n logging.disable(logging.DEBUG)",
"def set_debug_flag(flag):\n pma._pma_set_debug_flag(flag)",
"def debug_mode(x):\n if x:\n logger.setLevel(logging.DEBUG)\n ch.setLevel(logging.DEBUG)\n _DEBUG = True\n KEEP_TEMPFILES = True\n logger.info(\n 'Debug mode enabled. You may also want to set '\n 'pybedtools.KEEP_TEMPFILES=True to prevent automatic deletion '\n 'of files upon exit.')\n else:\n logger.setLevel(logging.INFO)\n ch.setLevel(logging.INFO)\n _DEBUG = False\n KEEP_TEMPFILES = False\n logger.info('Debug mode disabled')",
"def set_debug(state):\n global _DEBUG\n _DEBUG = bool(state)",
"def set_debug_mode(self):\n self.debug_mode = True",
"def fault_debug(value: bool = False) -> None:",
"def set_debug_mode(debug_bool):\n\n MKL.MKL_DEBUG = debug_bool",
"def _enableDebugPrint(self):\n self._dbPrint = Printer(debugPrint=True)",
"def v_flag():\n log.setLevel(logging.INFO)",
"def SetDebugMode(enabled=True):\n global option\n option['debug_mode'] = enabled",
"def set_debug(debug_val):\n global _DEBUG # noqa: PLW0603\n _DEBUG = debug_val",
"def toggle_debug(self):\n self.__debug = not self.__debug",
"def setup_requests_debugging(self):\n\n # These two lines enable debugging at httplib level (requests->urllib3->http.client)\n # You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.\n # The only thing missing will be the response.body which is not logged.\n try:\n import http.client as http_client\n except ImportError:\n # Python 2\n import httplib as http_client\n http_client.HTTPConnection.debuglevel = 1\n\n # You must initialize logging, otherwise you'll not see debug output.\n self.logger.setLevel(logging.DEBUG)\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True",
"def set_debug(self, value=True):\n self.debug = value",
"def setVerboseDebugOn(self):\n self.edLogging.setVerboseDebugOn()",
"def debug_requests_on():\n HTTPConnection.debuglevel = 2\n\n logging.basicConfig(filename='example1.log', filemode='w', level=logging.INFO, format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n logging.getLogger().setLevel(logging.DEBUG)\n\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True",
"def debug():\n return bool(_environ.get(\"ACCELPY_DEBUG\", False))",
"def debug(mode=True):\r\n global DEBUG\r\n DEBUG = bool(mode)",
"def catalogSetDebug(level):\n ret = libxml2mod.xmlCatalogSetDebug(level)\n return ret",
"def enable_debugger(app):\n import aiohttp_debugtoolbar\n\n # dev mode only\n # this will be served at API_SERVER_URL/_debugtoolbar\n aiohttp_debugtoolbar.setup(app)",
"def enable_debugger(app):\n import aiohttp_debugtoolbar\n\n # dev mode only\n # this will be served at API_SERVER_URL/_debugtoolbar\n aiohttp_debugtoolbar.setup(app)",
"def enable_rendering(flag=True):\n global _rendering_enabled\n _rendering_enabled = flag",
"def debug():\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"",
"def _debug():\n return _DEBUG",
"def enableCLangLogger(self):"
]
| [
"0.7106474",
"0.6122721",
"0.6115534",
"0.59994066",
"0.59944767",
"0.599425",
"0.5859546",
"0.5831815",
"0.57899874",
"0.574849",
"0.5723658",
"0.57184064",
"0.5690623",
"0.56572646",
"0.56189394",
"0.56183857",
"0.55708146",
"0.55087733",
"0.55059654",
"0.549461",
"0.54930943",
"0.54761434",
"0.5454504",
"0.54366446",
"0.537866",
"0.537866",
"0.53625095",
"0.5348113",
"0.5342462",
"0.5334421"
]
| 0.84278125 | 0 |
Parse an input specs into a jax.ShapeDtypeStruct. | def input_spec_to_jax_shape_dtype_struct(
spec: Union[Tuple[Tuple[int, ...], jnp.dtype], Tuple[int, ...]],
batch_size: Optional[int] = None) -> jax.ShapeDtypeStruct:
spec = tuple(spec)
if len(spec) == 2 and isinstance(spec[0], collections.abc.Iterable):
shape = (batch_size,) + tuple(spec[0][1:]) if batch_size else spec[0]
dtype = spec[1]
else:
shape = (batch_size,) + tuple(spec[1:]) if batch_size else spec
dtype = jnp.float32
return jax.ShapeDtypeStruct(shape, dtype) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_spec (spec_file):\n spec_object = None\n spec_name = spec_file.replace(\".\", \"_\")\n params = []\n default_params = {}\n int_conversion = []\n namedtuple = False\n delimiter = \"\\n\"\n\n spec_file = open(spec_file, \"r\")\n spec = spec_file.readlines()\n spec_file.close()\n\n for line in spec:\n line = line.strip()\n param_name = None\n default_param = None\n if line.startswith(\"%id\"):\n spec_name = line.split(\" \", 1)[1]\n elif line.startswith(\"%delim\"):\n delimiter = line.split(\" \", 1)[1].strip()\n elif line.startswith(\"$\"):\n line = line.split(\" \", 1)\n if len(line) >= 1:\n param_name = line[0].strip(\"$\")\n if len(line) == 2:\n default_param = line[1].strip()\n if param_name and not param_name.isdigit():\n namedtuple = True\n if default_param and param_name.isdigit():\n assert param_name != \"0\"\n params.append(param_name)\n if default_param:\n default_params[param_name]=default_param\n elif line.startswith(\"%int\"):\n var = line.split(\" \", 1)[1].strip()\n int_conversion.append(var)\n\n if namedtuple:\n class parent (object):\n def __init__ (self, *args, **kwargs):\n self.__name__ = spec_name\n if len(args) == len(params):\n # arg for arg\n for key, value in zip(params, args):\n self.__dict__[key] = value\n elif len(kwargs) == len(params):\n for key, value in kwargs.iteritems():\n self.__dict__[key] = value\n else:\n assert not \"Didn't get the right number of arguments!\"\n def __repr__ (self):\n values = \"\"\n for key in params:\n values += \"%s=%s,\" % (key, repr(self.__dict__[key]))\n return \"<%s %s>\" % (self.__name__, values.strip(\", \"))\n else:\n parent = list\n\n class spec_object (parent):\n def __init__ (self, block):\n self.__name__ = spec_name\n if isinstance(block, str):\n block = split_escaped_delim(delimiter, block.strip())\n assert len(block) + len(default_params) >= len(params)\n if len(block) < len(params):\n for key, default in default_params.iteritems():\n if key.isdigit():\n assert int(key) >= len(block)\n block.insert(int(key), default)\n else:\n block.append(\"%s=%s\" % (key, default))\n\n if not namedtuple:\n if int_conversion:\n for conv in int_conversion:\n block[conv] = int(block[conv])\n parent.__init__(self, block)\n else:\n new_data = {}\n for item in block:\n new_item = split_escaped_delim(\"=\", item, 1)\n if len(new_item) == 1:\n new_item = split_escaped_delim(\":\", item, 1)\n if len(new_item) == 1:\n raise DatabaseError, \"Corrupted line? %s\" % item\n item = new_item\n if int_conversion and item[0] in int_conversion:\n item[1] = int(item[1])\n assert len(item) == 2\n # Don't overwrite real data with default values!\n if item[0] not in new_data:\n new_data[item[0]] = item[1]\n\n parent.__init__(self, **new_data)\n elif isinstance(block, list):\n if not namedtuple:\n parent.__init__(self, block)\n else:\n parent.__init__(self, *block)\n elif isinstance(block, dict):\n assert namedtuple\n parent.__init__(self, **block)\n def __repr__ (self):\n if namedtuple:\n return parent.__repr__(self)\n else:\n return \"<%s %s>\" % (self.__name__, parent.__repr__(self))\n\n return spec_object",
"def _determine_dtypes_and_shapes(self):\r\n while True:\r\n raw_entry = next(self.entry_generator(yield_just_one=True))\r\n if raw_entry is None:\r\n continue\r\n preprocessed_entry_dict = self.preprocess_entry(raw_entry)\r\n if preprocessed_entry_dict is not None:\r\n break\r\n labels, values = zip(*list(preprocessed_entry_dict.items()))\r\n dtypes = [value.dtype for value in values]\r\n shapes = [value.shape for value in values]\r\n return labels, dtypes, shapes",
"def from_specs(cls, fields_specs, **kwargs):\n names = []\n lengths = []\n positions = []\n for field_str in fields_specs.split():\n atoms = field_str.split(':')\n name = atoms[0]\n position = None\n if len(atoms) == 2:\n if atoms[1].startswith('@'): # NAME:@NUM_BITS\n length = int(atoms[1][1:])\n else: # NAME:BIT_POSITION\n length = 1\n position = int(atoms[1])\n elif len(atoms) == 3: # NAME:START_POSITION:END_POSITION\n start, end = int(atoms[1]), int(atoms[2])\n # we will make it that we always have HIGHER:LOWER\n if end > start:\n start, end = end, start\n length = abs(end - start) + 1\n position = (start, end)\n else:\n print('ERROR: ignoring field \"%s\"' % field_str)\n continue\n names.append(name)\n lengths.append(length)\n positions.append(position)\n\n # if not even two positions have been specified, do not assume order\n n_positions = sum(pos is not None for pos in positions)\n # assert n_positions > 1, 'At least 2 positions are required to assume order of bits'\n if n_positions < 2:\n # assume increasing\n # print('Assuming increasing order (from 0) because only %d positions were given' % n_positions)\n positions_given = (0, 1)\n else:\n positions_given = list(filter(None, positions))\n\n # for min/max on either int or a tuple\n def min_any(arg):\n if isinstance(arg, tuple):\n return min(arg)\n else:\n return arg\n def max_any(arg):\n if isinstance(arg, tuple):\n return max(arg)\n else:\n return arg\n\n # assume order\n p1, p2 = positions_given[:2]\n # simplify if tuples\n p1 = min_any(p1)\n p2 = min_any(p2)\n assert p1 != p2, 'Could not assume order, positions: %s' % positions\n assuming_decreasing = p1 > p2\n\n # sort positions based on the assumed order\n positions_sorted = []\n for pos in positions:\n if isinstance(pos, tuple):\n ps = (min(pos), max(pos))\n positions_sorted.append(tuple(reversed(ps)) if assuming_decreasing else ps)\n else:\n positions_sorted.append(pos)\n positions = positions_sorted\n\n # make the list be always increasing for consistency\n if assuming_decreasing:\n positions = list(reversed(positions))\n lengths = list(reversed(lengths))\n names = list(reversed(names))\n\n # fill the Nones\n if positions[0] is None:\n if lengths[0] == 1:\n positions[0] = 0\n else:\n positions[0] = (0, lengths[0] - 1)\n for i in range(0, len(positions)):\n if positions[i] is None:\n prev = max_any(positions[i - 1])\n if lengths[i] == 1:\n positions[i] = prev + 1\n else:\n positions[i] = (prev + 1, prev + 1 + lengths[i] - 1)\n\n # test if everything is consistent, i.e. all increasing or all decreasing\n # flatten the position data\n positions_flat = []\n for pos in positions:\n if isinstance(pos, tuple):\n positions_flat.append(max(pos))\n positions_flat.append(min(pos))\n else:\n positions_flat.append(pos)\n descreasing = all(earlier > later for earlier, later in zip(positions_flat, positions_flat[1:]))\n\n # if it was not decreasing than check if it is increasing\n if not descreasing:\n positions_flat = []\n for pos in positions:\n if isinstance(pos, tuple):\n positions_flat.append(min(pos))\n positions_flat.append(max(pos))\n else:\n positions_flat.append(pos)\n increasing = all(earlier < later for earlier, later in zip(positions_flat, positions_flat[1:]))\n\n assert increasing, 'Positions list was neither increasing nor descreasing: %s' % positions\n\n # ok, now fill in the missing holes in positions with reserved areas\n assert len(names) == len(lengths) == len(positions), 'Yyy...something is not yes...'\n new_names = []\n new_lengths = []\n new_positions = []\n prev_max = -1\n for i in range(len(positions)):\n if min_any(positions[i]) - (prev_max + 1) > 0:\n new_names.append('_')\n new_lengths.append(min_any(positions[i]) - (prev_max + 1))\n if new_lengths[-1] == 1:\n new_positions.append(prev_max + 1)\n else:\n new_positions.append((prev_max + 1, min_any(positions[i]) - 1))\n new_names.append(names[i])\n new_lengths.append(lengths[i])\n new_positions.append(positions[i])\n prev_max = max_any(positions[i])\n\n names = new_names\n lengths = new_lengths\n positions = new_positions\n\n # create register object\n return cls(names, lengths, positions=positions, **kwargs)",
"def _setup_type_shapes(self, named_ops, extra_type_shapes):\n type_shape_set = set()\n for op in six.itervalues(named_ops):\n type_shape_set.update(op.input_type_shapes)\n type_shape_set.update(op.output_type_shapes)\n if extra_type_shapes is not None:\n type_shape_set.update(extra_type_shapes)\n\n # _type_shapes: a list of all the typeshapes this loom object supports.\n self._type_shapes = sorted(type_shape_set)\n\n # Enforce uniqueness for non-empty TypeShape tags.\n non_empty_tags = set()\n for ts in self._type_shapes:\n if ts.tag:\n if ts.tag in non_empty_tags:\n raise TypeError('Tags on tagged TypeShapes must be unique; '\n '%s occured more than once.' % (ts.tag,))\n else:\n non_empty_tags.add(ts.tag)\n\n # _type_shape_to_idx: a dict mapping TypeShape objects to their indices in\n # '_type_shapes'.\n self._type_shape_to_idx = {ts: idx for idx, ts in\n enumerate(self._type_shapes)}",
"def infer_signature(configs: Sequence[Config],\n step_spec: reverb_types.SpecNest) -> reverb_types.SpecNest:\n if not configs:\n raise ValueError('At least one config must be provided.')\n\n if any(c.pattern_structure != configs[0].pattern_structure for c in configs):\n raise ValueError(\n 'All configs must have exactly the same pattern_structure.')\n\n if any(c.table != configs[0].table for c in configs):\n raise ValueError(\n f'All configs must target the same table but provided configs '\n f'included {\", \".join(sorted(set(c.table for c in configs)))}.')\n\n flat_step_spec = tree.flatten(step_spec)\n\n def _validate_and_convert_to_spec(path, *nodes):\n # Check that all nodes share the same dtype.\n dtypes = [flat_step_spec[node.flat_source_index].dtype for node in nodes]\n if any(dtype != dtypes[0] for dtype in dtypes):\n raise ValueError(\n f'Configs produce trajectories with multiple dtypes at {path}. '\n f'Got {dtypes}.')\n\n # Create shapes for all nodes.\n shapes = []\n for node in nodes:\n shape = list(flat_step_spec[node.flat_source_index].shape)\n if node.HasField('start'):\n length = (node.stop - node.start) // (node.step or 1)\n shape = [length, *shape]\n\n shapes.append(tensor_shape.TensorShape(shape))\n\n # Check that all shapes are either completely identical or at least\n # identical in all dimensions but the first.\n if (any(shape.rank != shapes[0].rank for shape in shapes) or\n (shapes[0].rank > 1 and\n any(shape[1:] != shapes[0][1:] for shape in shapes))):\n raise ValueError(\n f'Configs produce trajectories with incompatible shapes at {path}. '\n f'Got {shapes}.')\n\n # Merge the shapes into a single shape. If the first dimension varies then\n # we set the leading dimension as undefined.\n if all(shape == shapes[0] for shape in shapes):\n merged_shape = shapes[0]\n else:\n merged_shape = [None, *shapes[0][1:]]\n\n return tensor_spec.TensorSpec(\n shape=merged_shape,\n dtype=dtypes[0],\n name='/'.join(str(x) for x in path))\n\n patterns = [unpack_pattern(config) for config in configs]\n return tree.map_structure_with_path(_validate_and_convert_to_spec, *patterns)",
"def print_parsed(specs):\n observed_types = set()\n for i in specs.values():\n observed_types.update(i['types'])\n observed_types = sorted(observed_types)\n\n s = ['# Observed types from the parsed document']\n s.append('TRACKTYPES = [')\n for i in observed_types:\n s.append(\" '{}',\".format(i))\n s.append(']')\n print('\\n'.join(s) + '\\n')\n\n data_types = specs['bigDataUrl']['types']\n\n s = ['# Tracks for which the definition specifies bigDataUrl']\n s.append('DATA_TRACKTYPES = [')\n for i in data_types:\n s.append(\" '{}',\".format(i))\n s.append(']')\n print('\\n'.join(s) + '\\n')\n print('param_defs = [')\n print()\n for k, v in sorted(specs.items()):\n print(\n (\n '''\n Param(\n name=\"{k}\",\n fmt={v[format]},\n types={v[types]},\n required={v[required]},\n validator=str),'''.format(**locals())\n )\n )",
"def _parse(self):\n\n self.specification = {}\n\n while True:\n try:\n line = self._lines.current\n if ':' in line:\n self.specification.update(self._parse_spec())\n elif line.startswith('NODE_COORD_SECTION'):\n next(self._lines)\n self.coords = self._parse_coords()\n elif line.startswith('EDGE_WEIGHT_SECTION'):\n next(self._lines)\n self.weights = self._parse_weights()\n elif line.startswith('DISPLAY_DATA_SECTION'):\n next(self._lines)\n self.display = self._parse_coords()\n else:\n break\n except StopIteration:\n break\n\n del self._lines",
"def parse_spec(inp_file):\n try:\n y_spec = yaml.load(inp_file, Loader=yaml.SafeLoader)\n spec = create_spec(y_spec)\n except jsonschema.exceptions.RefResolutionError:\n logging.error(\"Could not load specification. Check your network or try again\")\n raise err.BeaconTestError()\n except openapi_spec_validator.exceptions.OpenAPIValidationError:\n logging.error(\"Could not read specification. Check tat your file is valid\")\n raise err.BeaconTestError()\n return spec",
"def unpack_element_spec_from(\n buffer: bytes, offset: int = 0\n) -> tuple[structure_utils.Structure[tf.TypeSpec], int]:\n length, length_size = _unpack_length_from(buffer, offset=offset)\n offset += length_size\n element_spec_bytes, *_ = struct.unpack_from(\n f'!{length}s', buffer, offset=offset\n )\n partial_bytes = pickle.loads(element_spec_bytes)\n\n def _deserialize_tensor_spec(buffer: bytes) -> tf.TensorSpec:\n proto = tf.TensorSpec.experimental_type_proto().FromString(buffer)\n return tf.TensorSpec.experimental_from_proto(proto)\n\n element_spec = structure_utils.map_structure(\n _deserialize_tensor_spec, partial_bytes\n )\n return element_spec, length_size + length",
"def read_input(self, specs):\n print('DEBUGG specs:', specs)",
"def parseDef(self, firstLine, lines):\n\n m = re.match(r'\\s*typedef\\s+struct\\s*\\{\\s*(?P<rest>.*)', firstLine,\n re.VERBOSE)\n if not m:\n raise RuntimeError('No prefix mathed in %s' % (firstLine))\n \n l = m.groupdict()['rest']\n\n nlines = 0\n parts = []\n while l != None:\n nlines += 1\n l = l.strip()\n\n # Finished with this line, fetch the next one.\n if l == '' or l.startswith('#'):\n l = next(lines)\n continue\n\n # Look for the ending struct name.\n m = self.typedefNameRE.match(l)\n if m:\n g = m.groupdict()\n name = g['name']\n return nlines, name, parts\n\n # Get the next column definition.\n m = self.typedefRE.match(l)\n if not m:\n raise RuntimeError('unmatched struct definition at %s' % (l))\n \n g = m.groupdict()\n if g['arr1']:\n arrSize = int(g['arr1'])\n else:\n arrSize = 1\n \n if g['arr2']:\n arrSize = (int(g['arr2']), arrSize)\n\n defn = (g['name'], g['type'], arrSize)\n parts.append(defn)\n\n # Process the rest of the line.\n l = g['rest']",
"def _read_and_decode(example_proto,data_shape,dtypes):\n features = {}\n for name in data_shape:\n features[name] = tf.FixedLenFeature([], tf.string)\n parsed_features = tf.parse_single_example(example_proto, features)\n count = 0\n res = {}\n for name in data_shape:\n res[name] = parsed_features[name]\n if dtypes[count]!=str:\n res[name]=tf.decode_raw(res[name],dtypes[count])\n if dtypes[count]==tf.float32 or dtypes[count]==tf.float64:\n res[name]=tf.convert_to_tensor(res[name],dtype=dtypes[count])\n if data_shape[name]:\n res[name]=tf.reshape(res[name],shape=data_shape[name])\n count += 1\n return res",
"def parse_json(json_filepath):\n file = open(json_filepath, \"r\")\n file_str = file.read()\n json_dict = json.loads(file_str)\n input_struct = DriverConfig()\n\n input_struct.device_name = json_dict['devices'][0]['name']\n input_struct.device_type = DeviceType.FPGA\n input_struct.device_name_abbrev = input_struct.device_name\n input_struct.compatible = f'dev,{input_struct.vendor}-{input_struct.device_name}' \n attributes = json_dict['devices'][0]['registers']\n input_struct.device_attributes = []\n input_struct.device_attributes.append(DeviceAttribute(\"name\", DataType(\"string\", 32), \"0444\"))\n for attr in attributes:\n input_struct.device_attributes.append(DeviceAttribute.parse_json(attr, input_struct.device_type))\n return input_struct",
"def get_dtype_and_shape(self, val):\n # get type of object as string\n val_type = str(type(val))\n matchObj = re.match(r\"<(type|class) '([^']+)'>\", val_type)\n if not matchObj:\n raise SystemError(\"** Error: Unable to find type in %s\" % val_type)\n val_type = matchObj.group(2)\n # check for \"value_info\" passed in through calling script (e.g. Matlab)\n # if so, then type and shape is given in val (it does not contain the actual data\n # to store.\n if val_type == 'str' and self.file.options['storage_method'] == 'none':\n # value_info string looks like the following:\n # value_info: type=\"float\", shape=\"[5]\" *OR*\n # value_info: type=\"float\", shape=\"[scalar]\"\n matchObj = re.match(r'^value_info: type=\"([^\"]+)\", shape=\"\\[([^\\]]+)\\]\"$', val)\n if matchObj:\n dtype = matchObj.group(1)\n shape = matchObj.group(2)\n if shape != 'scalar':\n # convert dimensions from string (like '4 5') to integer list\n shape = map(int, shape.split())\n return (dtype, shape)\n # check data shape and type \n if val_type in ('str', 'int', 'float', 'long', 'unicode', 'bool'):\n shape = \"scalar\"\n dtype = val_type\n elif val_type == 'list':\n # convert from list to np array to get shape\n a = np.array(val)\n shape = a.shape\n dtype = str(a.dtype)\n # print \"found list, dtype is %s, shape is:\" % dtype\n # pp.pprint (shape)\n elif 'numpy' in val_type or type(val) is h5py._hl.dataset.Dataset: \n shape = val.shape\n dtype = str(val.dtype)\n # print \"found numpy or h5py dataset, dtype is %s\", dtype\n else:\n print \"** Error, unable to determine shape of value assiged to dataset\"\n print \"value type is '%s'\" % val_type\n traceback.print_stack()\n sys.exit(1)\n return (dtype, shape)",
"def input_grammar():\r\n dummy = Word(alphas.lower(), exact=1)\r\n axis = Or([positive_integer('size'), dummy('identifier')])\r\n shape = squareBracketedExpr( ( delimitedList(axis) ) )('shape')\r\n input = type_argument + Optional(shape) + identifier\r\n\r\n return Optional(delimitedList(Group(input)))",
"def load_structure(self, **kwargs):\n\n\t\t# PDB fields\n\t\tself.s_name = kwargs[\"s_name\"]\t\t\t\t\t\t\t\t# Name of the structure\n\t\tself.l_s_leading_data = kwargs[\"l_s_leading_data\"]\t\t\t# PDB information written above the atom properties\n\t\tself.l_s_trailing_data = kwargs[\"l_s_trailing_data\"]\t\t# PDB information written under the atom properties\n\n\t\t# Structural fields\n\t\tself.i_atom_count = len(kwargs[\"d_atoms\"][\"element_type\"])\t\t# Retrieves the number of atoms\n\t\tself.a_atoms = np.arange(self.i_atom_count).astype(\t\t\t\t# Array of atoms properties\n\t\t\tnp.dtype([\n\t\t\t\t(\"element_type\", np.str, 6),\t\t\t\t# ATOM or HETATM\n\t\t\t\t(\"atom_serial\", np.uint16, 1),\t\t\t\t# Atom serial number\n\t\t\t\t(\"atom_name\", np.str, 4),\t\t\t\t\t# Atom name\n\t\t\t\t(\"alternative_location\", np.str, 1),\t\t# Alternate location indicator\n\t\t\t\t(\"residue_name\", np.str, 3),\t\t\t\t# Residue name\n\t\t\t\t(\"chain_id\", np.str, 1),\t\t\t\t\t# Chain identifier\n\t\t\t\t(\"residue_serial\", np.int16, 1),\t\t\t# Residue sequence number\n\t\t\t\t(\"residue_insertion\", np.str, 1),\t\t\t# Code for insertion of residues\n\t\t\t\t(\"coord_x\", np.float32, 1),\t\t\t\t\t# Orthogonal coordinates for X in Angstroms\n\t\t\t\t(\"coord_y\", np.float32, 1),\t\t\t\t\t# Orthogonal coordinates for Y in Angstroms\n\t\t\t\t(\"coord_z\", np.float32, 1),\t\t\t\t\t# Orthogonal coordinates for Z in Angstroms\n\t\t\t\t(\"occupancy\", np.float16, 1),\t\t\t\t# Occupancy\n\t\t\t\t(\"temperature_factor\", np.float16, 1),\t\t# Temperature factor\n\t\t\t\t(\"element_symbol\", np.str, 2),\t\t\t\t# Element symbol\n\t\t\t\t(\"element_charge\", np.str, 2),\t\t\t\t# Charge on the atom\n\t\t\t\t(\"element_mass\", np.float16, 1),\t\t\t# Mass of the atom\n\t\t\t\t(\"grid_x\", np.int16, 1),\t\t\t\t\t# X coordinates in the grid\n\t\t\t\t(\"grid_y\", np.int16, 1),\t\t\t\t\t# Y coordinates in the grid\n\t\t\t\t(\"grid_z\", np.int16, 1),\t\t\t\t\t# Z coordinates in the grid\n\t\t\t\t(\"custom_type\", np.str, 3),\t\t\t\t\t# A custom name for the element\n\t\t\t])\n\t\t)\n\n\t\t# For each field to save\n\t\tfor s_key in kwargs[\"d_atoms\"]:\n\t\t\tself.a_atoms[s_key] = kwargs[\"d_atoms\"][s_key]\t\t# Saves each field of the dictionary of atom properties\n\n\t\tself.a_atoms[\"element_mass\"] = retrieve_element_mass(\t\t# Retrieves the atomic mass of the given elements\n\t\t\tx_element_symbol=self.a_atoms[\"element_symbol\"],\t\t# Element symbol\n\t\t\tx_backup_symbol=self.a_atoms[\"atom_name\"]\t\t\t\t# Element symbol in case of fail\n\t\t)\n\t\tself.translate_custom_types()\t\t# Translates to the custom element types\n\n\t\tself.l_l_elements = set(self.a_atoms[\"element_symbol\"])\t\t# List all the different elements contained in the structure\n\t\tl_s_elements = [None] * len(gp.D_ELEMENT_NUMBER)\t\t\t# Creates an empty list with a slot for each possible element\n\n\t\t# For each chemical element\n\t\tfor s_element in self.l_l_elements:\n\n\t\t\ti_element_number = gp.D_ELEMENT_NUMBER[s_element]\t\t\t# Retrieves the atomic number of the element\n\t\t\ta_element_indexes = np.where(\t\t\t\t\t\t\t\t# Retrieves the indexes of the elements\n\t\t\t\tself.a_atoms[\"element_symbol\"] == s_element\n\t\t\t)\n\n\t\t\tl_s_elements[i_element_number] = [\t\t# Orders each element by their atomic number\n\t\t\t\ts_element,\t\t\t\t\t\t\t# Element symbol\n\t\t\t\ti_element_number,\t\t\t\t\t# Atomic number of the element\n\t\t\t\ta_element_indexes,\t\t\t\t\t# Indexes of the element in the structure\n\t\t\t\tNone,\t\t\t\t\t\t\t\t# Coordinates of the element in the grid\n\t\t\t\tNone,\t\t\t\t\t\t\t\t# VdW radius of the element\n\t\t\t\tNone\t\t\t\t\t\t\t\t# Sphere coordinates of the element\n\t\t\t]\n\t\t# End for\n\n\t\tself.l_l_elements = list(filter(None, l_s_elements))\t\t# Removes empty elements in the list\n\n\t\t# Miscellaneous fields\n\t\tself.f_mass = sum(self.a_atoms[\"element_mass\"])\t\t# Sums the mass of each element",
"def process_input_file(filename):\n\n # Parse the input file\n try:\n ast = parser.parse(open(filename, 'r').read())\n except pyparsing.ParseBaseException as e:\n print \"Parse error in %s: %s\" % (os.path.basename(filename), str(e))\n sys.exit(1)\n\n ofinput = of_g.OFInput()\n\n # Now for each structure, generate lists for each member\n for s in ast:\n if s[0] == 'struct':\n name = s[1].replace(\"ofp_\", \"of_\", 1)\n members = [dict(m_type=x[0], name=x[1]) for x in s[2]]\n ofinput.classes[name] = members\n ofinput.ordered_classes.append(name)\n if name in type_maps.inheritance_map:\n # Clone class into header class and add to list\n ofinput.classes[name + \"_header\"] = members[:]\n ofinput.ordered_classes.append(name + \"_header\")\n elif s[0] == 'metadata':\n if s[1] == 'version':\n log(\"Found version: wire version \" + s[2])\n if s[2] == 'any':\n ofinput.wire_versions.update(of_g.wire_ver_map.keys())\n elif int(s[2]) in of_g.supported_wire_protos:\n ofinput.wire_versions.add(int(s[2]))\n else:\n debug(\"Unrecognized wire protocol version\")\n sys.exit(1)\n found_wire_version = True\n\n if not ofinput.wire_versions:\n debug(\"Missing #version metadata\")\n sys.exit(1)\n\n return ofinput",
"def parse_DESI_brick(hdulist, select=0, **kwargs):\n fx = hdulist[0].data\n # Sig\n if hdulist[1].name in ['ERROR', 'SIG']:\n sig = hdulist[1].data\n else:\n ivar = hdulist[1].data\n sig = np.zeros_like(ivar)\n gdi = ivar > 0.\n sig[gdi] = np.sqrt(1./ivar[gdi])\n # Wave\n wave = hdulist[2].data\n wave = give_wv_units(wave)\n if wave.shape != fx.shape:\n wave = np.tile(wave, (fx.shape[0],1))\n # Finish\n xspec1d = XSpectrum1D(wave, fx, sig, select=select, **kwargs)\n return xspec1d",
"def get_specification(self) -> Dict:\n specification = {\n 'version': VERSION,\n 'metadata': {\n 'twoColumn': True,\n 'layout': self.autolayout\n },\n 'nodes': []\n }\n\n def strip_io(io_list: list, direction) -> list:\n \"\"\"\n Strips every input/output from metadata and leaves only\n `name` and `type` keys.\n \"\"\"\n return [\n {\n 'name': io['name'],\n 'type': io['type'],\n 'direction': direction\n }\n for io in io_list\n ]\n\n toremove = set()\n for key, node in self.nodes.items():\n try:\n node_cls = load_class(node.cls_name)\n except (ModuleNotFoundError, ImportError, Exception) as err:\n msg = f'Could not add {node_cls}. Reason:'\n _LOGGER.warn('-' * len(msg))\n _LOGGER.warn(msg)\n _LOGGER.warn(err)\n _LOGGER.warn('-' * len(msg))\n toremove.add(key)\n continue\n parameterschema = node_cls.form_parameterschema()\n\n properties = []\n for name, props in parameterschema['properties'].items():\n new_property = {'name': name}\n\n if 'default' in props:\n new_property['default'] = props['default']\n\n if 'description' in props:\n new_property['description'] = props['description']\n\n def add_default(default_val):\n if new_property.get('default') is None:\n new_property['default'] = default_val\n\n # Case for an input with range defined\n if 'enum' in props:\n new_property['type'] = 'select'\n new_property['values'] = list(map(str, props['enum']))\n add_default(new_property['values'][0])\n # Case for a single value input\n elif 'type' in props:\n if 'array' in props['type']:\n new_property['type'] = 'list'\n if 'items' in props and 'type' in props['items']:\n dtype = props['items']['type']\n new_property['dtype'] = dtype\n add_default([])\n elif 'boolean' in props['type']:\n new_property['type'] = 'checkbox'\n add_default(False)\n elif 'string' in props['type']:\n new_property['type'] = 'text'\n add_default('')\n elif 'integer' in props['type']:\n new_property['type'] = 'integer'\n add_default(0)\n elif 'number' in props['type']:\n new_property['type'] = 'number'\n add_default(0)\n elif 'object' in props['type']:\n # Object arguments should be defined in specification\n # as node inputs, rather than properties\n new_property = None\n else:\n new_property['type'] = 'text'\n add_default('')\n # If no type is specified then text is used\n else:\n new_property['type'] = 'text'\n add_default('')\n\n if new_property is not None:\n properties.append(new_property)\n\n specification['nodes'].append({\n 'name': node.name,\n 'type': node.type,\n 'category': node.category,\n 'properties': properties,\n 'interfaces': strip_io(\n self.io_mapping[node.type]['inputs'],\n 'input'\n ) + strip_io(\n self.io_mapping[node.type]['outputs'],\n 'output'\n )\n })\n\n for key in toremove:\n del self.nodes[key]\n return specification",
"def input_type_shapes(self):\n return self._input_type_shapes",
"def test_convert_dosdp(self):\n schema = self._convert('dosdp_schema', 'yaml',\n name='dosdp',\n root_class_name='Pattern',\n data_files=['OMIM_disease_series_by_gene.yaml'],\n target_class='')\n #print(yaml_dumper.dumps(schema))\n axiom_type_options = schema.enums['axiom_type_options']\n self.assertIn('equivalentTo', axiom_type_options.permissible_values)\n self.assertIn('axiom_type', schema.slots)\n self.assertIn('PrintfClause', schema.classes)",
"def parse(source):\r\n #remove shared indentation\r\n source = dedent(source)\r\n print source\r\n\r\n structure = structure_parse(source)\r\n\r\n print structure.signature\r\n print structure.annotation\r\n print structure.body\r\n\r\n\r\n signature = signature_parse(structure.signature)\r\n\r\n kernel = kernel_parse(signature.kernel)\r\n inputs = input_parse(signature.input)\r\n\r\n print signature.kernel\r\n print signature.input\r\n print signature.output\r\n\r\n\r\n quit()",
"def parse_type(fobj, data_type):\n if data_type == \"Boolean\": ## False if 0x00 else True\n return bool(fobj.read(1))\n elif data_type == \"Byte\": ## 1 byte int\n return fobj.read(1)[0]\n elif data_type == \"DateTime\": ## 8 bytes signed int\n return struct.unpack(\"<q\", fobj.read(8))[0]\n elif data_type == \"Double\": ## 8 bytes floating point\n return struct.unpack(\"<d\", fobj.read(8))[0]\n elif data_type == \"Int\": ## 4 bytes unsigned int\n return struct.unpack(\"<I\", fobj.read(4))[0]\n elif data_type == \"Int-Double pair\": ## 0x08-Int-0x0d-Double with AssertionError\n bb = fobj.read(1)[0]\n if bb != 0x08:\n raise AssertionError('parse_type(fobj, data_type): '\n '1st byte(%s) of \"Int-Double pair\" != 0x08' % bb)\n first_int = parse_type(fobj, \"Int\")\n bb = fobj.read(1)[0]\n if bb != 0x0d:\n raise AssertionError('parse_type(fobj, data_type): '\n '6th byte(%s) of \"Int-Double pair\" != 0x0d' % bb)\n return [first_int, parse_type(fobj, \"Double\")]\n elif data_type == \"Int-Double pair*\": ## int(n) - \"Int-Double pair\"*n\n return [parse_type(fobj, \"Int-Double pair\") for i in range(parse_type(fobj, \"Int\"))]\n elif data_type == \"Long\": ## 8 bytes unsigned int\n return struct.unpack(\"<Q\", fobj.read(8))[0]\n elif data_type == \"Short\": ## 2 bytes unsigned int\n return struct.unpack(\"<H\", fobj.read(2))[0]\n elif data_type == \"Single\": ## 4 bytes floating point\n return struct.unpack(\"<f\", fobj.read(4))[0]\n elif data_type == \"String\": ## 0x00 or 0x0b - ULE128(n) - UTF-8(length=n)\n bb = fobj.read(1)[0]\n if bb == 0x00:\n return None\n elif bb != 0x0b:\n ## TODO: show integers in assertion error in hexadecimal and decimal\n ## to make debug more convenient (cause I may inspect the file in a byte reader.\n raise AssertionError('parse_type(fobj, data_type): '\n '1st byte(%s) of \"String\" not in {0x00, 0x0b}' % bb)\n strlen = parse_type(fobj, \"ULEB128\")\n return fobj.read(strlen).decode(\"utf-8\")\n elif data_type == \"ULEB128\": ## https://en.wikipedia.org/wiki/LEB128#Decode_unsigned_integer\n i = 0 ## derived from the wiki psuedo code\n res = 0\n shift = 0\n while True:\n bb = fobj.read(1)[0]\n i += 1\n res |= ((bb & 0b1111111) << shift)\n if (bb & 0b10000000) == 0:\n break\n shift += 7\n return res\n elif data_type == \"Timing point\": ## Double - Double - Boolean\n return parse_types(fobj, [\"Double\", \"Double\", \"Boolean\"])\n elif data_type == \"Timing point+\": ## int(n) - \"Timing point\"*n\n return [parse_type(fobj, \"Timing point\") for i in range(parse_type(fobj, \"Int\"))]\n else:\n raise NotImplementedError('parse_type(fobj, data_type): Unknown data type: \"%s\".' % data_type)",
"def validate(self):\n import os\n\n if self.kind == KDM.INTEROP:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'interop.xsd'), 'r') as f:\n schema = f.read()\n elif self.kind == KDM.SMPTE:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'smpte.xsd'), 'r') as f:\n schema = f.read()\n\n base_dir = os.getcwd()\n os.chdir(os.path.join(os.path.dirname(__file__), 'xsd'))\n try:\n schema = ET.XMLSchema(ET.XML(schema))\n xmlparser = ET.XMLParser(schema=schema)\n ET.fromstring(self.raw, xmlparser)\n finally:\n os.chdir(base_dir)",
"def getInputSpecification(cls):\n specs = super().getInputSpecification()\n specs.description = r\"\"\"The \\xmlString{DMD} ROM aimed to construct a time-dependent (or any other monotonic\n variable) surrogate model based on Dynamic Mode Decomposition\n This surrogate is aimed to perform a ``dimensionality reduction regression'', where, given time\n series (or any monotonic-dependent variable) of data, a set of modes each of which is associated\n with a fixed oscillation frequency and decay/growth rate is computed\n in order to represent the data-set.\n In order to use this Reduced Order Model, the \\xmlNode{ROM} attribute\n \\xmlAttr{subType} needs to be set equal to \\xmlString{DMD}.\n \\\\\n Once the ROM is trained (\\textbf{Step} \\xmlNode{RomTrainer}), its parameters/coefficients can be exported into an XML file\n via an \\xmlNode{OutStream} of type \\xmlAttr{Print}. The following variable/parameters can be exported (i.e. \\xmlNode{what} node\n in \\xmlNode{OutStream} of type \\xmlAttr{Print}):\n \\begin{itemize}\n \\item \\xmlNode{rankSVD}, see XML input specifications below\n \\item \\xmlNode{energyRankSVD}, see XML input specifications below\n \\item \\xmlNode{rankTLSQ}, see XML input specifications below\n \\item \\xmlNode{exactModes}, see XML input specifications below\n \\item \\xmlNode{optimized}, see XML input specifications below\n \\item \\xmlNode{features}, see XML input specifications below\n \\item \\xmlNode{timeScale}, XML node containing the array of the training time steps values\n \\item \\xmlNode{dmdTimeScale}, XML node containing the array of time scale in the DMD space (can be used as mapping\n between the \\xmlNode{timeScale} and \\xmlNode{dmdTimeScale})\n \\item \\xmlNode{eigs}, XML node containing the eigenvalues (imaginary and real part)\n \\item \\xmlNode{amplitudes}, XML node containing the amplitudes (imaginary and real part)\n \\item \\xmlNode{modes}, XML node containing the dynamic modes (imaginary and real part)\n \\end{itemize}\"\"\"\n specs.addSub(InputData.parameterInputFactory(\"dmdType\", contentType=InputTypes.makeEnumType(\"dmd\", \"dmdType\", [\"dmd\", \"hodmd\"]),\n descr=r\"\"\"the type of Dynamic Mode Decomposition to apply.Available are:\n \\begin{itemize}\n \\item \\textit{dmd}, for classical DMD\n \\item \\textit{hodmd}, for high order DMD.\n \\end{itemize}\"\"\", default=\"dmd\"))\n specs.addSub(InputData.parameterInputFactory(\"pivotParameter\", contentType=InputTypes.StringType,\n descr=r\"\"\"defines the pivot variable (e.g., time) that represents the\n independent monotonic variable\"\"\", default=\"time\"))\n specs.addSub(InputData.parameterInputFactory(\"rankSVD\", contentType=InputTypes.IntegerType,\n descr=r\"\"\"defines the truncation rank to be used for the SVD.\n Available options are:\n \\begin{itemize}\n \\item \\textit{-1}, no truncation is performed\n \\item \\textit{0}, optimal rank is internally computed\n \\item \\textit{>1}, this rank is going to be used for the truncation\n \\end{itemize}\"\"\", default=None))\n specs.addSub(InputData.parameterInputFactory(\"energyRankSVD\", contentType=InputTypes.FloatType,\n descr=r\"\"\"energy level ($0.0 < float < 1.0$) used to compute the rank such\n as computed rank is the number of the biggest singular values needed to reach the energy identified by\n \\xmlNode{energyRankSVD}. This node has always priority over \\xmlNode{rankSVD}\"\"\", default=None))\n specs.addSub(InputData.parameterInputFactory(\"rankTLSQ\", contentType=InputTypes.IntegerType,\n descr=r\"\"\"$int > 0$ that defines the truncation rank to be used for the total\n least square problem. If not inputted, no truncation is applied\"\"\", default=None))\n specs.addSub(InputData.parameterInputFactory(\"exactModes\", contentType=InputTypes.BoolType,\n descr=r\"\"\"True if the exact modes need to be computed (eigenvalues and\n eigenvectors), otherwise the projected ones (using the left-singular matrix after SVD).\"\"\", default=True))\n specs.addSub(InputData.parameterInputFactory(\"optimized\", contentType=InputTypes.FloatType,\n descr=r\"\"\"True if the amplitudes need to be computed minimizing the error\n between the modes and all the time-steps or False, if only the 1st timestep only needs to be considered\"\"\", default=False))\n return specs",
"def generateDataHDU(input_file, \n header_file='lib/header_dataHDU.txt',\n coldef_file='lib/coldefs_dataHDU.txt'):\n \n sd_in = pf.open(input_file)\n sd_data = sd_in[1].data\n num_rows = sd_data.shape[0]\n \n cols = []\n \n # The column definitions are loaded from an external file, which is\n # parsed line-by-line, using regular experssions.\n \n unit_pat = \"unit\\s*\\=\\s*'([\\w/%]+)'\"\n name_pat = \"name\\s*\\=\\s*'([\\w-]+)'\"\n dim_pat = \"dim\\s*\\=\\s*'(\\([\\d,]+\\))'\"\n format_pat = \"format\\s*\\=\\s*'(\\w+)'\" \n \n # Loop through, matching on each line\n cfile = open(coldef_file)\n for line in cfile.readlines():\n unit = name = dim = format = None\n name_match = re.search(name_pat, line)\n if name_match:\n name = name_match.group(1)\n \n format_match = re.search(format_pat, line)\n dim_match = re.search(dim_pat, line)\n unit_match = re.search(unit_pat, line)\n \n if unit_match: \n unit = unit_match.group(1)\n \n \n if dim_match: \n dim = dim_match.group(1)\n \n arr_shape = sd_data[name].shape\n \n if format_match: \n fits_fmt = format_match.group(1)\n zarr=None\n\n try:\n if name == 'DATA' or name == 'FLAGGED':\n np_dtype, data_len, data_fmt = formatLookup(fits_fmt)\n print name, \" no data\"\n else:\n # Data array must be flattened (e.g. (2,2) -> 4)\n np_dtype, data_len, data_fmt = formatLookup(fits_fmt)\n if data_len > 1 and data_fmt != 'str_':\n z_shape = (sd_data[name].shape[0], data_len)\n else:\n z_shape = sd_data[name].shape\n #print name, z_shape, sd_data[name].shape\n zarr = sd_data[name].reshape(z_shape)\n \n except:\n print \"Error with %s\"%name\n \n # Append the column to the column list\n cols.append(pf.Column(name=name, format=fits_fmt, unit=unit, dim=dim, array=zarr))\n \n # Now we have made a list of columns, we can make a new table\n #print cols\n coldefs = pf.ColDefs(cols)\n #print coldefs\n tbhdu = pf.new_table(coldefs)\n \n # If that all worked, we can populate with the final header values\n cards = generateCards(header_file)\n \n for card in cards:\n if card.keyword == 'COMMENT':\n pass\n tbhdu.header.add_comment(card.value)\n elif card.keyword == 'HISTORY':\n pass\n tbhdu.header.add_history(card.value)\n else:\n tbhdu.header.set(card.keyword, card.value, card.comment)\n \n return tbhdu",
"def parse(location):\n if not is_podspec(location):\n return\n\n podspec_object = Spec()\n podspec_data = podspec_object.parse_spec(location)\n return build_package(podspec_data)",
"def _parse_types(self):\n for root in self.roots:\n for types in root.iter('types'):\n for node in types.iter('type'):\n type_name = GLGenerator.get_name(node)\n text = GLGenerator.get_text(node).strip()\n if '*' in text and not text.startswith('struct'):\n self.pointer_types.append(type_name)",
"def structure_parse(source):\r\n return structure_grammar().parseString(source)",
"def read_input():\n\n filenames = sorted(glob.glob(\"%s/openflow_input/*\" % root_dir))\n\n for filename in filenames:\n log(\"Processing struct file: \" + filename)\n ofinput = process_input_file(filename)\n\n # Populate global state\n for wire_version in ofinput.wire_versions:\n version_name = of_g.of_version_wire2name[wire_version]\n versions[version_name]['classes'].update(copy.deepcopy(ofinput.classes))\n of_g.ordered_classes[wire_version].extend(ofinput.ordered_classes)"
]
| [
"0.53998345",
"0.5399715",
"0.53511655",
"0.52155787",
"0.5175422",
"0.50308526",
"0.4997735",
"0.4945539",
"0.48813435",
"0.48683077",
"0.4854738",
"0.485412",
"0.4849468",
"0.48474205",
"0.4809551",
"0.47939855",
"0.4788989",
"0.47868943",
"0.47620076",
"0.4761896",
"0.47330627",
"0.4729263",
"0.47251692",
"0.47215077",
"0.47023204",
"0.46912834",
"0.46869153",
"0.46847418",
"0.46821472",
"0.46791136"
]
| 0.6715354 | 0 |
Load dataset from file '../data/dataset.txt' and transfer to a list. | def load_data():
with open('../data/dataset.txt', 'r') as data_file:
return data_file.read().split('\n') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_datasets(filepath):\n\n data_file = open(filepath, 'r')\n data_list = data_file.readlines()\n data_file.close()\n\n return data_list",
"def load_data(path_dataset):\n data = read_txt(path_dataset)[1:]\n return preprocess_data(data)",
"def load_data(text_file) -> list:\n\n file = open(text_file)\n data = []\n\n line = file.readline().strip(\"\\n\")\n while line:\n data.append(line.split(\",\"))\n line = file.readline().strip(\"\\n\")\n\n file.close()\n\n return data",
"def load_data(path):\n with open(path) as f:\n return f.readlines()",
"def load_data(loc='../data/SICK/'):\n trainA, trainB, testA, testB = [],[],[],[]\n trainS, testS = [],[]\n\n with open(loc + 'SICK_train.txt', 'rb') as f:\n for line in f:\n text = line.strip().split('\\t')\n trainA.append(text[1])\n trainB.append(text[2])\n trainS.append(text[3])\n with open(loc + 'SICK_test_annotated.txt', 'rb') as f:\n for line in f:\n text = line.strip().split('\\t')\n testA.append(text[1])\n testB.append(text[2])\n testS.append(text[3])\n\n trainS = [float(s) for s in trainS[1:]]\n testS = [float(s) for s in testS[1:]]\n\n return [trainA[1:], trainB[1:]], [testA[1:], testB[1:]], [trainS, testS]",
"def load_dataset(filepath):\n \n X = list()\n x = list()\n\n Y = list()\n y = list()\n \n for line in open(filepath):\n # blank lines separate sequences\n if len(line) <= 1:\n X.append(x)\n Y.append(y)\n\n x = list()\n y = list()\n else:\n a, b = line.strip().split('\\t')\n x.append(a)\n y.append(b)\n \n return X, Y",
"def load_data():\n data = []\n with euler.Resource('triangle.txt') as datafile:\n for line in datafile.readlines():\n data.insert(0, map(int, line.strip().split()))\n return data",
"def load_nli_file(data_path, num_par=2):\n tokenizer = tokenization.NltkTokenizer()\n dataset = tf.data.TextLineDataset(data_path)\n dataset = dataset.map(\n functools.partial(_nli_line_to_tensors, tokenizer=tokenizer),\n num_parallel_calls=num_par)\n dataset = dataset.filter(lambda x: tf.greater_equal(x[\"label\"], 0))\n return dataset",
"def load_dataset(file_handle) -> list:\n output = []\n lines = file_handle.readlines()\n name = None\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n if line.startswith(\">\"):\n if name:\n output.append(sequence)\n name = line[1:]\n sequence = \"\"\n else:\n sequence += line\n\n if name:\n output.append(sequence)\n \n return output",
"def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)",
"def load(self, file):\n with open(file) as file:\n self.dataset = [line.strip() for line in file]\n\n return self.dataset",
"def loadtrainData():\n train_x = []\n train_y = []\n fileIn = open(PATH + 'traindata_Subtask4.txt')\n for line in fileIn.readlines():\n lineArr = line.strip().split()\n train_x.append([float(lineArr[i]) for i in range(len(lineArr) - 1)])\n train_y.append(int(lineArr[-1]))\n return np.mat(train_x), np.mat(train_y).transpose()",
"def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]",
"def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data",
"def loadData(self, dataType): \n if dataType == \"train\":\n f = self.urls[0]\n elif dataType == \"valid\":\n f = self.urls[1]\n elif dataType == \"test\":\n f = self.urls[2] \n\n \"\"\" Load text file \"\"\"\n corpus = list()\n with io.open(f, encoding='UTF-8') as f:\n for line in f: \n if len(line) > self.minSeq and len(line) < self.maxLen:\n corpus.append(line.lstrip().rstrip().split(' '))\n return corpus",
"def load_data(self, data_path):\n data = []\n with open(data_path, \"r\") as f:\n data = [line.split(\"\\t\") for line in f if len(line.strip()) > 0 and\n line.strip()[0] != '#']\n return data",
"def read_dataset(filename: str) -> List[str]:\n with open(filename, encoding=\"utf8\") as file:\n f = (line.strip() for line in file)\n return [line for line in f if line]",
"def collect_datset(self):\n response = requests.get(self.url)\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # to remove labels from list\n dataset = np.matrix(data)\n return dataset",
"def load_dataset(path):\n training_data = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data",
"def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)",
"def load_data(filename):\n data = []\n with open(filename, 'r') as file:\n for line in file:\n data.append(line.strip().split(','))\n return data",
"def load_dataset(filename):\n return [(\n lambda point: {\n 'coordinate': tuple(map(float, point[:-1])),\n 'label': int(point[-1])})\n (string.strip().rstrip().split(','))\n for string in open(filename, 'r').read()\n .strip().rstrip().split('\\n')]",
"def __read_data(self):\n data_list = []\n file_stream = open(self.data_id, \"r\")\n for line in file_stream:\n data_list.append(line.strip().split(\",\"))\n file_stream.close()\n return data_list",
"def read_file_data_to_list(file_name):\r\n file = open(file_name, \"r\")\r\n data = file.readlines() # reads rows of data into a list object\r\n file.close()\r\n return data",
"def load_data_list(self) -> List[dict]: # noqa: E501\n try:\n import lvis\n if getattr(lvis, '__version__', '0') >= '10.5.3':\n warnings.warn(\n 'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"', # noqa: E501\n UserWarning)\n from lvis import LVIS\n except ImportError:\n raise ImportError(\n 'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".' # noqa: E501\n )\n with self.file_client.get_local_path(self.ann_file) as local_path:\n self.lvis = LVIS(local_path)\n self.cat_ids = self.lvis.get_cat_ids()\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)\n\n img_ids = self.lvis.get_img_ids()\n data_list = []\n total_ann_ids = []\n for img_id in img_ids:\n raw_img_info = self.lvis.load_imgs([img_id])[0]\n raw_img_info['img_id'] = img_id\n if raw_img_info['file_name'].startswith('COCO'):\n # Convert form the COCO 2014 file naming convention of\n # COCO_[train/val/test]2014_000000000000.jpg to the 2017\n # naming convention of 000000000000.jpg\n # (LVIS v1 will fix this naming issue)\n raw_img_info['file_name'] = raw_img_info['file_name'][-16:]\n ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])\n raw_ann_info = self.lvis.load_anns(ann_ids)\n total_ann_ids.extend(ann_ids)\n\n parsed_data_info = self.parse_data_info({\n 'raw_ann_info':\n raw_ann_info,\n 'raw_img_info':\n raw_img_info\n })\n data_list.append(parsed_data_info)\n if self.ANN_ID_UNIQUE:\n assert len(set(total_ann_ids)) == len(\n total_ann_ids\n ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n del self.lvis\n\n return data_list",
"def load_data():\r\n train = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'train.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n val = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'valid.txt', ['words', 'pos', 'ignore', 'chunk'])) # testa will be our val set\r\n test = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'test.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n\r\n return train, val, test",
"def load_dataset(input_path):\n with open(input_path, \"r\") as f:\n smiles_list = f.read().strip().split(\"\\n\")\n return smiles_list",
"def load_data_list(self):\n\n data = mat4py.loadmat(self.ann_file)['images']\n names = data['name']\n labels = data['class']\n parts = data['set']\n num = len(names)\n assert num == len(labels) == len(parts), 'get error ann file'\n\n if self.split == 'train':\n target_set = {1}\n elif self.split == 'val':\n target_set = {2}\n elif self.split == 'test':\n target_set = {3}\n else:\n target_set = {1, 2}\n\n data_list = []\n for i in range(num):\n if parts[i] in target_set:\n img_name = names[i]\n img_path = self.backend.join_path(self.img_prefix, img_name)\n gt_label = labels[i] - 1\n info = dict(img_path=img_path, gt_label=gt_label)\n data_list.append(info)\n\n return data_list",
"def get_data(filename=\"../data/d.txt\"):\r\n a=[]\r\n for line in open(filename):\r\n items=line.split(\",\")\r\n a.append(items)\r\n print \"finishd reading file\", filename\r\n return a",
"def load_dataset(path, test_or_train):\n senta_batch, sentb_batch, scores_batch = [], [], []\n with open(path, encoding='utf-8') as f:\n for i, line in enumerate(f):\n items = line.strip().split('\\t')\n if test_or_train == 'train':\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n elif test_or_train in ['dev', 'test']:\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n else:\n raise Exception(\"{} error\".format(test_or_train))\n senta_batch.append(senta)\n sentb_batch.append(sentb)\n scores_batch.append(score)\n return senta_batch, sentb_batch, scores_batch"
]
| [
"0.7310075",
"0.7081436",
"0.69368184",
"0.6830235",
"0.681044",
"0.6765116",
"0.6706157",
"0.6666456",
"0.6652845",
"0.6651481",
"0.664702",
"0.6612349",
"0.660296",
"0.65759784",
"0.6559554",
"0.65574807",
"0.65524375",
"0.6543727",
"0.6543159",
"0.6526245",
"0.651906",
"0.6513335",
"0.650974",
"0.6491854",
"0.64859456",
"0.6483561",
"0.6475999",
"0.64702994",
"0.64373285",
"0.6421506"
]
| 0.8187677 | 0 |
Load stop words from file '../data/stop_words.txt' and transfer to a list. | def load_stop_words():
with open('../data/stop_words.txt', 'r') as stop_words_file:
return stop_words_file.read().split() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_stop_words() -> list:\r\n with open(f'{ENGINE}/stop_words.txt', 'r') as i:\r\n stop_words = i.read().splitlines()\r\n stop_words = list(map(lambda x: x.upper(), stop_words)) # Force all stop words to UPPER case.\r\n return stop_words",
"def load_stop_list():\n stop_list = []\n with open(STOP_LIST, \"r\") as f:\n lines = f.readlines()\n stop_list = [word.strip() for word in lines]\n return stop_list",
"def load_stop_words(stop_word_file):\n stop_words = []\n for line in open(stop_word_file):\n if line.strip()[0:1] != \"#\":\n for word in line.split(): # in case more than one per line\n stop_words.append(word)\n return stop_words",
"def getstopwords():\n file = open('stopWords.txt', 'r')\n stoplist = []\n for word in file.readlines():\n word = word.strip('\\n')\n stoplist.append(word)\n return stoplist",
"def loadStopWordList(swFile):\n f = open(swFile, 'r')\n lines = f.readlines()\n f.close()\n result = list()\n for line in lines:\n sWord = line.strip('\\n')\n result.append(sWord)\n return result",
"def _stopwords():\n global _stopword_set\n if _stopword_set:\n return _stopword_set\n f_name = \"stopword.list\"\n if os.path.isfile(f_name):\n res = set()\n with open(f_name) as f:\n for line in f:\n res.add(line.strip())\n _stopword_set = res\n return res\n else:\n error(\"stop words - not a file: %s\" % f_name)",
"def load_stopwords():\r\n\tglobal stopwords\r\n\tif os.path.exists(paths.path_data_stopwords_txt):\r\n\t\tprint('\\nloading stopwords')\r\n\t\twith open(paths.path_data_stopwords_txt,'r') as inf:\r\n\t\t\tstopwords = inf.read().split('\\n')\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def stopwords(self):\n with open(STOPWORDS_LIST, 'r') as content:\n return content.read().splitlines()",
"def loadWords():\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n return wordlist",
"def loadWords() -> List[str]:\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print(\" \", len(wordList), \"words loaded.\")\n\n return wordList",
"def loadWords():\n inFile = open(wordFile, 'r')\n wordlist = []\n for line in inFile:\n wordlist.append(line)\n return wordlist",
"def load_words():\r\n## print \"Loading word list from file...\"\r\n # inFile: file\r\n inFile = open(WORDLIST_FILENAME, 'r', 0)\r\n # wordlist: list of strings\r\n wordlist = []\r\n for line in inFile:\r\n wordlist.append(line.strip().lower())\r\n## print \" \", len(wordlist), \"words loaded.\"\r\n return wordlist",
"def load_words():\n print(\"Loading word list from file..\")\n WORDLIST_FILENAME = \"words.txt\"\n # with open('words.txt', 'r') as f:\n # inFile = f.read()\n inFile = open(WORDLIST_FILENAME, 'r')\n wordlist = []\n\n for line in inFile:\n wordlist.append(line.strip().lower())\n return wordlist",
"def __init__(self,dir_stopwords):\n \n arc = open(dir_stopwords, \"r\", encoding='utf-8')\n self.stp_wrds = [line.strip() for line in arc]\n arc.close()",
"def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print \" \", len(wordList), \"words loaded.\"\n return wordList",
"def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print \" \", len(wordList), \"words loaded.\"\n return wordList",
"def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print \" \", len(wordList), \"words loaded.\"\n return wordList",
"def make_stopwords(filepath='stopwords.txt'):\n sw = open(filepath, \"r\")\n my_stopwords = sw.read()\n my_stopwords = my_stopwords.split(\", \")\n sw.close()\n\n all_stopwords = stopwords.words('english')\n all_stopwords.extend(my_stopwords)\n return all_stopwords",
"def load_words():\r\n \r\n my_file = open(\"words.txt\")\r\n words = my_file.read()\r\n words_list = words.split(\" \")\r\n return (words_list)\r\n my_file.close()",
"def loadWords():\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # wordList: list of strings\n wordList = []\n for line in inFile:\n wordList.append(line.strip().lower())\n print(\" \", len(wordList), \"words loaded.\")\n return wordList",
"def load_words():\n \n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # wordlist: list of strings\n wordlist = []\n for line in inFile:\n wordlist.append(line.strip().lower())\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist",
"def load_words(file_path: str) -> List[Word]:\n \n words = load_words_raw(file_path)\n \n \n words = remove_stop_words(words)\n\n \n words = remove_duplicates(words)\n \n return words",
"def readStopList():\n f = None\n try:\n f = open('documents/stoplist.txt', 'r')\n except FileNotFoundError:\n print(\"ERROR: File not found.\")\n exit(-1)\n if f is None:\n print(\"ERROR: Error loading stoplist\")\n exit(-1)\n\n return str(f.read()).split()",
"def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist",
"def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist",
"def load_words():\n print\n \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print\n \" \", len(wordlist), \"words loaded.\"\n return wordlist",
"def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist",
"def loadWords():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = string.split(line)\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist",
"def load_words():\n print \"Loading word list from file...\"\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', 0)\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print \" \", len(wordlist), \"words loaded.\"\n return wordlist",
"def loadWords():\r\n print(\"Loading word list from file...\")\r\n # inFile: file\r\n inFile = open(WORDLIST_FILENAME, 'r')\r\n # line: string\r\n line = inFile.readline()\r\n # wordlist: list of strings\r\n wordList = line.split()\r\n\r\n print(\" \", len(wordList), \"words loaded.\")\r\n return wordList"
]
| [
"0.86854786",
"0.85702604",
"0.82870823",
"0.8085797",
"0.8026547",
"0.80048776",
"0.7859866",
"0.7739571",
"0.75412995",
"0.75381225",
"0.7501206",
"0.74701536",
"0.7468609",
"0.7460182",
"0.7407307",
"0.7407307",
"0.7407307",
"0.7394937",
"0.7389229",
"0.7364115",
"0.7340056",
"0.7317379",
"0.726052",
"0.72397876",
"0.72397876",
"0.7236634",
"0.72182614",
"0.72182614",
"0.7213081",
"0.71948975"
]
| 0.8828992 | 0 |
Generate a label list according to the first word in each line of dataset. | def generate_labels():
label_set = set([])
for data in load_data():
label = data.split(' ', 1)[0]
label_set.add(label)
labels = list(label_set)
labels.sort()
return labels | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = [0, 1, 2, 3, 4]\n print(len(x_text))\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']] \n label[datasets['target'][i]] = labels[i]\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]",
"def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = []\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']]\n #print('target={}, i={}'.format(datasets['target'], i))\n label[datasets['target'][i]] = 1\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]",
"def _gen_words(sentence, labels):\r\n word = \"\"\r\n words = []\r\n for token, label in zip(sentence, labels):\r\n word += token\r\n if label in [1, 3]:\r\n words.append(word)\r\n word = \"\"\r\n return words",
"def get_labels(train_f_path):\n results = []\n with open(train_f_path, 'r') as f:\n for line in f:\n n_line = line.strip()\n if n_line:\n results.append(n_line.split()[0])\n return results",
"def label_data(data):\n if data == 'cat': return [1, 0]\n elif data == 'dog': return [0, 1]",
"def first_label(self):\r\n return self.labels.split(',')[0]",
"def get_fashion_mnist_labels(labels): #@save\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]",
"def _labels_of_sentence(self, sentence, split):\n labels = torch.ones(1)\n labels[0] = self.category_int_of_label_string(sentence[0][self.name_to_index_dict['label']]) #\n return labels",
"def format_for_nltk(labels, dataset):\n if len(labels) != len(dataset):\n return []\n return [(v, labels[i]) for i,v in enumerate(dataset)]",
"def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l",
"def _string_labels_of_sentence(self, sentence):\n label_strings = []\n ongoing_label = 'O'\n for token_index, token_attribute_list in enumerate(sentence):\n raw_label_string = token_attribute_list[self.task_label_index].strip('*')\n if '(' in raw_label_string:\n ongoing_label = raw_label_string.strip('(').strip(')')\n beginning = True\n #labels[token_index] = self.category_int_of_label_string(ongoing_label)\n if ongoing_label == 'O':\n label_strings.append(ongoing_label)\n else:\n label_strings.append('{}-{}'.format('B' if beginning else 'I', ongoing_label))\n beginning = False\n if ')' in raw_label_string:\n ongoing_label = 'O'\n #bioes_tags = bio2_to_bioes(label_strings)\n bioes_tags = label_strings\n return bioes_tags",
"def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist",
"def get_fasta_labels(input_fasta_fp):\r\n\r\n fasta_labels = []\r\n\r\n fasta_f = open(input_fasta_fp, \"U\")\r\n\r\n for label, seq in parse_fasta(fasta_f):\r\n fasta_labels.append(label.split()[0])\r\n\r\n return fasta_labels",
"def _labels_of_sentence(self, sentence, esplit):\n\n labels = torch.zeros(len(sentence))\n for token_index, token_attribute_list in enumerate(sentence):\n label_string = token_attribute_list[self.task_label_index]\n labels[token_index] = self.category_int_of_label_string(label_string)\n return labels",
"def _labels_of_sentence(self, sentence, split):\n #print(self.label_vocab)\n self.category_int_of_label_string('O')\n bioes_tags = self._string_labels_of_sentence(sentence)\n labels = torch.zeros(len(sentence))\n for index, label in enumerate(bioes_tags):\n labels[index] = self.category_int_of_label_string(label)\n return labels",
"def format_labels(self, data):\n ret = []\n for sentence, labels, attr in data:\n sentence_length = len(sentence)\n labels_copy = copy.deepcopy(labels)\n labels_copy = [label[0] for label in labels_copy if type(label) is list ]\n ret.append((sentence, labels_copy, attr))\n return ret",
"def label_sentences(corpus, label_type):\r\n labeled = []\r\n for i, v in enumerate(corpus):\r\n label = label_type + '_' + str(i)\r\n labeled.append(doc2vec.TaggedDocument(v.split(), [label]))\r\n return labeled",
"def sample_labels(self, y, num_of_sents = 5, num_of_samples = 10,\n num_of_classes = 3, start_index = 5, get_prob = True):\n classes = self.classes_()\n ret = []\n for sent in y[:num_of_sents]:\n cur = []\n for word in sent[start_index: start_index + num_of_samples]:\n sorted_prob = am(word)\n cur.append([(classes[ind], word[ind]) if get_prob else classes[ind]\n for ind in sorted_prob[:num_of_classes]])\n ret.append(cur)\n return ret",
"def fix_label(self, label):\n if label is None:\n return None\n assert len(label) == len(self.data)\n fixed_labels = []\n for y, x in zip(label, self.data):\n assert len(y) == len(x)\n encode = self.transform_function(' '.join(x))\n fixed_label = list(chain(*[\n [label] + [PAD_TOKEN_LABEL_ID] * (len(self.transform_function.tokenize(word)) - 1)\n for label, word in zip(y, x)]))\n if encode['input_ids'][0] in self.transform_function.all_special_ids:\n fixed_label = [PAD_TOKEN_LABEL_ID] + fixed_label\n fixed_label += [PAD_TOKEN_LABEL_ID] * (len(encode['input_ids']) - len(fixed_label))\n fixed_label = fixed_label[:self.transform_function.max_seq_length]\n fixed_labels.append(fixed_label)\n return fixed_labels",
"def label(l):\r\n def action(string, loc, tokens):\r\n newlist = [l]\r\n newlist.extend(tokens)\r\n return newlist\r\n return action",
"def sample_labels(self, y, num_of_sents = 5, num_of_samples = 10,\n num_of_classes = 3, start_index = 5, get_prob = True):\n classes = self.classes_()\n ret = []\n am = lambda myList: [i[0] for i in sorted(enumerate(myList), key=lambda x:x[1], reverse= True)]\n\n for sent in y[:num_of_sents]:\n cur = []\n for word in sent[start_index: start_index + num_of_samples]:\n sorted_prob = am(word)\n cur.append([(classes[ind], word[ind]) if get_prob else classes[ind]\n for ind in sorted_prob[:num_of_classes]])\n ret.append(cur)\n return ret",
"def label_sentences(corpus, label_type):\n labeled = []\n for i, v in enumerate(corpus):\n label = label_type + '_' + str(i)\n labeled.append(TaggedDocument(v.split(), [label]))\n return labeled",
"def preprocess_data(extracted_data: List[Tuple[str, str]]) -> List[str]:\n return [f'__label__{data[0]} {clean_formatting(data[1])}' for data in extracted_data]",
"def infer_label(self, string):\n label_bits = []\n if string:\n label_bits.append(string)\n for k, v in self.table.items():\n # The automatic label includes any keys with multiple values\n if len(v) > 1:\n # If a key has multiple values, add both its name and its key.\n # That is, if @key1@ has multiple values, label_bits will have\n # 'key1' + '@key1@' appended. This means the label includes\n # both the key's name and the particular value it has for a\n # given job.\n label_bits.append(re.search(self.key_pattern, k).group(1) + k)\n label = '-'.join(label_bits)\n # Add the label as a key-values pair to the weird data structure\n # This is as if there were in the bp file the line,\n # label\n if not label:\n raise ValueError, \"The label is blank. No label was supplied \"\\\n \"and none can be inferred.\"\n self.table[self.label_key] = [label]",
"def label_sentences(self, corpus, label_type):\n labeled = []\n for i, v in enumerate(corpus):\n label = label_type + '_' + str(i)\n labeled.append(TaggedDocument(v.split(), [label]))\n return labeled",
"def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)",
"def build_label_vocab(labels: Iterable[str]):\n labels_set = set()\n for l in labels:\n labels_set.add(l)\n label_list = sorted(list(labels_set))\n return label_list",
"def get_labels(df):\n labels = []\n for i in df.index:\n label = sample_label_from_sample_name(i)\n labels.append(label)\n return labels",
"def _load_labels(self, label_path: str) -> List[str]:\n with open(label_path, 'r') as f:\n return [line.strip() for _, line in enumerate(f.readlines())]",
"def get_labels(self) -> Set[str]:"
]
| [
"0.67625064",
"0.67520624",
"0.67273957",
"0.6714275",
"0.6602932",
"0.64764017",
"0.6445512",
"0.6442318",
"0.64260095",
"0.64215446",
"0.6377378",
"0.63764703",
"0.63636106",
"0.62697333",
"0.6204013",
"0.6184638",
"0.616843",
"0.6156621",
"0.6108328",
"0.6107273",
"0.60736483",
"0.6067465",
"0.6058491",
"0.6047519",
"0.60405314",
"0.60367185",
"0.60268945",
"0.602022",
"0.60016274",
"0.5943446"
]
| 0.7592209 | 0 |
Write labels to file '../data/labels.txt', each line is a label | def write_labels():
with open('../data/labels.txt', 'w') as labels_file:
labels = generate_labels()
labels_file.write('\n'.join(labels)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _write_labels(self, labels: List[str], labels_path: Path):\n labels_path.write_text(escape_line_delimited_texts(labels))",
"def _write_labels(self, labels: List[str], labels_path: Path):\n labels_path.write_text(escape_line_delimited_texts(labels))",
"def SaveLabels(filepath, labels):\n # 1) Create a string with all the text to be stored\n text = '\\n'.join(labels)\n\n # 2) Open the datafile and save the text\n with open(filepath, 'w') as outfile:\n outfile.write(text)",
"def write_labels_txt(labels: pd.DataFrame, path: str):\n\n # If the file containing the labels already exist, delete it\n if os.path.isfile(path):\n print('\\nA labels file already exists at {}, deleting it...'.format(path))\n os.remove(path)\n\n # Write the names of the labels on a txt\n labels.to_csv(path, header=None, index=None, sep=' ', mode='a')\n\n print('\\nThe labels file has been written at', path)",
"def write_label_file(labels_to_class_names, dataset_dir, filename='labels.txt'):\n labels_filename = os.path.join(dataset_dir, filename)\n with tf.gfile.Open(labels_filename, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))",
"def write_labels_file(labels_to_class_names, dataset_dir,\n filename='labels.txt'):\n labels_path = os.path.join(dataset_dir, filename)\n with open(labels_path, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))",
"def write_label_file(labels_to_class_names, labels_filename):\n with tf.gfile.Open(labels_filename, \"w\") as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n'%(label, class_name))",
"def output_classLabel_to_txt(save_path):\n file_obj = open(save_path,'w')\n length = len(class_label)\n for i in range(0,length):\n line = '%d:%s'%(i,class_label[i])\n file_obj.writelines(line+'\\n')\n return True",
"def write_label(filename, label, verbose=None):\n\n with open(filename, 'wb') as fid:\n n_vertices = len(label.vertices)\n data = np.zeros((n_vertices, 5), dtype=np.float)\n data[:, 0] = label.vertices\n data[:, 1:4] = label.coords # self.pos #1e3 *\n data[:, 4] = label.values\n fid.write(b(\"#%s\\n\" % label.comment))\n fid.write(b(\"%d\\n\" % n_vertices))\n for d in data:\n fid.write(b(\"%d %f %f %f %f\\n\" % tuple(d)))\n return label",
"def writeLabel(self, label):\r\n\r\n # Label declaration.\r\n self.filename.write(\"({}:{})\\n\".format(self.actualFile.upper(), label.upper()))",
"def write_label(self, label):\n self._write_line('label ' + label) # TODO generate unique labels?",
"def writeLabels(labels, filepath, includeAlpha = True):\n def label2line(label):\n # convert lower case category to KITTI categories\n category = label['category']\n if category == 'dontcare':\n category = 'DontCare'\n else:\n category = category[0].upper() + category[1:]\n\n # compute alpha if required\n if includeAlpha:\n # set to object orientation\n alpha = label['box3D']['rotation_y']\n # adjust to X/Z observation angle of object center\n alpha -= -math.atan2(label['box3D']['location']['z'], label['box3D']['location']['x']) - 1.5*math.pi\n # wrap to +/-Pi\n alpha = pyNormalizeAngle(alpha)\n # convert to string\n alpha = '%.2f' % alpha\n else:\n # set to KITTI default (invalid) value\n alpha = '-10'\n\n label_line = '%(category)s %(truncated).2f %(occluded)d %(alpha)s %(left).2f %(top).2f %(right).2f %(bottom).2f %(height).2f %(width).2f %(length).2f %(x).2f %(y).2f %(z).2f %(rotation_y).2f %(score).2f\\n' % {\n 'category': category,\n 'truncated': label['info']['truncated'],\n 'occluded': -1, # invalid value to be ignored by KITTI evaluation\n 'alpha': alpha,\n 'left': label['box2D']['left'],\n 'top': label['box2D']['top'],\n 'right': label['box2D']['right'],\n 'bottom': label['box2D']['bottom'],\n 'height': label['box3D']['dimensions']['height'],\n 'width': label['box3D']['dimensions']['width'],\n 'length': label['box3D']['dimensions']['length'],\n 'x': label['box3D']['location']['x'],\n 'y': label['box3D']['location']['y'] + label['box3D']['dimensions']['height'] / 2.0,\n 'z': label['box3D']['location']['z'],\n 'rotation_y': label['box3D']['rotation_y'],\n 'score': label['info']['weight']*100, # multiply by 100 to avoid precision loss\n }\n return label_line\n\n with open(filepath, mode='w') as f:\n for label in labels:\n f.write(label2line(label))",
"def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]",
"def add_label_info(filepath, label_filepath):\n label_lines = []\n with open(label_filepath, 'r') as f:\n inside_label = False\n for line in f:\n stripped_line = line.strip()\n if not inside_label and stripped_line == '__label__':\n inside_label = True\n elif inside_label:\n # stop if blank line or next section starts\n if not stripped_line or line.startswith('__'):\n break\n # save label content (in case it's the last line, force newline)\n label_lines.append(f'{stripped_line}\\n')\n\n with open(filepath, 'r') as f:\n # create a temporary file with the modified content before it replaces the original file\n temp_dir = tempfile.mkdtemp()\n try:\n temp_filepath = os.path.join(temp_dir, 'test.p8')\n with open(temp_filepath, 'w') as temp_f:\n inside_label = False\n for line in f:\n stripped_line = line.strip()\n if inside_label:\n # reset inside_label if blank line or next section starts\n if not stripped_line or line.startswith('__'):\n inside_label = False\n else:\n temp_f.write(line)\n if stripped_line == '__label__':\n inside_label = True\n # immediately print all label lines\n for label_line in label_lines:\n temp_f.write(label_line)\n\n shutil.copy(temp_filepath, filepath)\n finally:\n shutil.rmtree(temp_dir)",
"def write_feature_labels(output, feature_labels):\n with open(os.path.join(output, 'features.list'), 'w') as out_file:\n out_file.write('\\n'.join(feature_labels))",
"def write_label(self, label: str) -> None:\n self._write(f'({self._file_name}${label})')",
"def save_labels_to_disk(labels: list, label_path: str):\n\n with open(label_path, \"w\") as result_file:\n wr = csv.writer(result_file, dialect=\"excel\")\n wr.writerows(labels)",
"def create_labels(filename, class_indices):\n \n _logger.debug(\"Mapping labels\")\n label={}\n label['category']=[]\n for key in class_indices:\n label['category'].append({\n 'name' : key,\n 'index' : class_indices[key]\n })\n label_path = os.path.join(config.TRAINED_MODELS_DATA, filename)\n with open(os.path.join(label_path, 'labels.txt'), 'w') as outfile:\n json.dump(label, outfile)\n return label_path",
"def create_labels_file(mapping_file, metadata_category, labels_file, simple_id=True, metadata_value=None):\n label_dict = ml_parse.parse_metadata_category_from_mapping_file(mapping_file, \\\n metadata_category)\n output = open(labels_file, 'w')\n output.write('label\\n')\n for key, value in label_dict.iteritems(): \n if simple_id: key = key.split('.')[0] \n if metadata_value is not None: value = str(value) in metadata_value\n output.write('%s\\t%s\\n' % (key, str(value)))\n output.close()",
"def write_processed_data_to_file(labeled_texts: List[Tuple[list, str]], file):\n\n try:\n for text, label in labeled_texts:\n output_tagged_sents(text, out=file)\n print(f'#{label}#', file=file)\n return True\n except IOError:\n print('Could not write to stream', file)\n return False",
"def generate_labels(path_to_classes: str, path_to_dataset: str):\n\n print('Generating the labels...')\n\n path_to_labels = os.path.join(path_to_dataset, 'labels')\n\n if not os.path.isdir(path_to_labels):\n print('Creating labels folder at {}...'.format(path_to_labels))\n os.makedirs(path_to_labels)\n\n path_to_csv = os.path.join(path_to_labels, 'class_name_to_number.csv')\n path_to_txt = os.path.join(path_to_labels, 'labels.txt')\n\n # Read the list of characters into a dataframe\n classes = pd.read_csv(path_to_classes)\n\n # Write the class-label mapping to csv file\n write_class_label_map(classes, path_to_csv)\n\n # Write the labels to txt file\n write_labels_txt(pd.DataFrame(classes['Unicode']), path_to_txt)",
"def export_labels(self, export_dir: str, label_filename: str = 'labels.txt'):\n if not tf.io.gfile.exists(export_dir):\n tf.io.gfile.makedirs(export_dir)\n\n label_filepath = os.path.join(export_dir, label_filename)\n tf.compat.v1.logging.info('Saving labels in %s', label_filepath)\n with tf.io.gfile.GFile(label_filepath, 'w') as f:\n f.write('\\n'.join(self._label_names))",
"def dump_labels(label_indices, labels_path):\n\n with open(labels_path, 'w') as json_file:\n json.dump(label_indices, json_file)",
"def _labels_for_write(self, labels):\n if labels == \"all\":\n labels = list(self.labels)\n elif not all(label in self.labels for label in labels):\n for label in labels:\n if label not in self.labels:\n raise ValueError(\n \"The label {} is not present in the file\".format(label)\n )\n return labels",
"def write_label_file(file_path, over_write_existing=False, root_path=None):\n\n if root_path:\n file_path = os.path.join(root_path, file_path)\n else:\n root_path = os.path.dirname(file_path)\n\n tracks = get_tracks(path_to_file=file_path)\n\n labels_file = os.path.join(root_path, 'labels.txt')\n if os.path.isfile(labels_file) and not over_write_existing:\n raise IOError('Can\\'t create [%s]. This file already '\n 'exists. Set the overwrite option' % labels_file)\n with open(labels_file, 'w') as f_pt:\n if os.path.isfile(labels_file) and over_write_existing:\n f_pt.truncate()\n for track in tracks[:-1]:\n f_pt.write(\"%s\\n\" % track)\n f_pt.write(\"%s\" % tracks[-1])",
"def _reflow_labels(self, filename=\"Dockerfile\"):\n\n dfp = DockerfileParser(path=filename)\n labels = dict(dfp.labels) # Make a copy of the labels we need to add back\n\n # Delete any labels from the modeled content\n for key in dfp.labels:\n del dfp.labels[key]\n\n # Capture content without labels\n df_content = dfp.content.strip()\n\n # Write the file back out and append the labels to the end\n with open(filename, 'w') as df:\n df.write(\"%s\\n\\n\" % df_content)\n if labels:\n df.write(\"LABEL\")\n for k, v in labels.iteritems():\n df.write(\" \\\\\\n\") # All but the last line should have line extension backslash \"\\\"\n escaped_v = v.replace('\"', '\\\\\"') # Escape any \" with \\\"\n df.write(\" %s=\\\"%s\\\"\" % (k, escaped_v))\n df.write(\"\\n\\n\")",
"def label_file(input_file):\n file_name, file_ext = os.path.splitext(input_file)\n output_file = file_name + \".label\" + file_ext\n\n # read input file and save them in dict\n features = load_protobuf(input_file)\n\n # for each obstacle ID, sort dict by their timestamp\n fea_trajs = build_trajectory(features)\n\n # for each obstacle ID, label them, remove record cannot be labeled\n for fea_key, fea_traj in fea_trajs.items():\n fea_traj = fea_trajs[fea_key]\n fea_traj = TrajectoryToSample.clean(fea_traj)\n fea_traj = TrajectoryToSample.label(fea_traj)\n for i, fea in enumerate(fea_traj):\n if not fea.HasField('label_update_time_delta'):\n del fea_traj[i]\n continue\n if fea.label_update_time_delta < parameters['feature']['threshold_label_time_delta']:\n del fea_traj[i]\n fea_trajs[fea_key] = fea_traj\n # save them in the output file with the same format as the input file\n save_protobuf(output_file, fea_trajs.values())",
"def add_labels(self, labels: dict):\n self.status = \"Creating labels\"\n for lname, value in labels.items():\n self.labels.add_label(lname, value)",
"def create_label_map(self, outpath):\n cnt = 1\n with open(outpath, 'w') as fp:\n for itm in self.clazzes:\n fp.write('item {\\n')\n fp.write('\\tname: \"{}\"\\n'.format(itm))\n fp.write('\\tid: {}\\n'.format(cnt))\n fp.write('}\\n')\n cnt += 1",
"def write_output(label1, label2, label3, submission_file):\n with open(submission_file, 'w') as f:\n f.write('Id,Bound'+ '\\n')\n for index, lab in enumerate(label1):\n f.write(str(index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label2):\n f.write(str(len(label1) + index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label3):\n f.write(str(len(label1) + len(label2) + index) + ',' + str(int(lab)))\n if index < len(label3) - 1:\n f.write('\\n')"
]
| [
"0.8117613",
"0.8117613",
"0.7926602",
"0.78796244",
"0.77901435",
"0.7717908",
"0.75571084",
"0.73993284",
"0.7089501",
"0.70833546",
"0.70000726",
"0.6994793",
"0.6964666",
"0.69481635",
"0.68660444",
"0.68279904",
"0.6752153",
"0.66779125",
"0.6638492",
"0.6627301",
"0.66157776",
"0.65944356",
"0.6570272",
"0.6557797",
"0.6497534",
"0.64523906",
"0.64088863",
"0.6371773",
"0.63593596",
"0.6357438"
]
| 0.92478794 | 0 |
Generate corpus from dataset, remove label from every question. | def generate_corpus():
data = load_data()
questions = [s.split(' ', 1)[1].lower() for s in data]
return questions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_text_classifier_del_training_samples_all(self):\n pass",
"def create_corpus(df):\r\n corpus=[]\r\n for tweet in tqdm(df['text']):\r\n words=[word.lower() for word in word_tokenize(tweet) if((word.isalpha()==1))]\r\n corpus.append(words)\r\n return corpus",
"def test_text_classifier_del_training_samples(self):\n pass",
"def get_corpus():\n all_text = []\n\n for _, _, files in os.walk(DATA_DIRECTORY):\n for f in files:\n with open(os.path.join(DATA_DIRECTORY, f), 'r') as article:\n # Quotation marks rarely come out as pairs in finished chains.\n # So we remove them before adding the article text:\n all_text.append(re.sub(r'[„“]', '', article.read()))\n\n return markovify.Text(\"\".join(all_text), state_size=2)",
"def test_text_classifier_del_testing_samples_all(self):\n pass",
"def load_delete_corpus():\n corpus = set() # set<list<action, status, sentence>>\n with open(os.path.join(BASE, \"data/corpus.csv\")) as fp:\n for line in fp:\n corpus.add(line.split(\",\"))\n return corpus",
"def test_text_classifier_del_testing_samples(self):\n pass",
"def generate_corpus(model, sample):\r\n \r\n dl_corpus = []\r\n for word in sample:\r\n if word in model:\r\n dl_corpus.append(model[word])\r\n else:\r\n dl_corpus.append([0]*VECTOR_DIM)\r\n\r\n return [dl_corpus]",
"def preprocess_corpus(self) -> List[str]:\n return self.tidy_text(self.corpus)",
"def generate(size, data_dim=5, n_phrase_labels=4, n_words=3,\n n_phrase_words=3, n_phrases=5, label_noise=0.,\n min_sent_len=5, max_sent_len=5, tag_end=True):\n assert n_words < 256\n assert max_sent_len >= n_phrase_words\n global dictionary, phrases\n\n # generate dictionary\n dictionary = uniform(-1.0, 1.0, size=(n_words, data_dim))\n\n # generate n_phrases unique word sequences of length n_phrase_words\n print \"Generating %d phrases\" % n_phrases\n phrases = []\n phrase_labels = []\n while len(phrases) != n_phrases:\n phrases = np.unique(np.array([\"\".join(map(chr, randint(n_words, size=n_phrase_words)))\n for i in xrange(n_phrases)], dtype=np.object))\n assert np.unique(map(len, phrases)) == n_phrase_words\n phrase_labels = 1+randint(n_phrase_labels-1, size=n_phrases)\n\n # generate 'sentences'\n print \"Generating %d sentences\" % sum(size)\n Xind = []\n Y = []\n for i in xrange(sum(size)):\n while True:\n sent_len = randint(min_sent_len, max_sent_len+1)\n sent = \"\".join(map(chr, randint(n_words, size=sent_len)))\n if contains_any_phrase(sent, phrases):\n print \".\",\n break\n Y.append(np.zeros(sent_len,dtype=np.int))\n Xind.append(sent)\n\n # generate labels for dataset\n print \"Generating labels for the sentences...\"\n for phrase, plabel in zip(phrases, phrase_labels):\n for idx, sent in enumerate(Xind):\n start = 0\n while True:\n sidx = sent.find(phrase, start)\n if sidx < 0:\n break\n if tag_end:\n Y[idx][sidx+len(phrase)-1] = plabel\n else:\n Y[idx][sidx] = plabel\n start += 1\n\n print \"Trafo...\"\n # transform dataset to code\n if data_dim > 1:\n X = [[dictionary[ord(c)] for c in sent] for sent in Xind]\n else:\n X = [[ord(c) for c in sent] for sent in Xind]\n\n Xtrain, Xtest = X[:size[0]], X[size[0]:]\n Ytrain, Ytest = Y[:size[0]], Y[size[0]:]\n\n # training label noise\n for sent in Ytrain:\n mask = uniform(size=sent.size) < label_noise\n sent[mask] = randint(n_phrase_labels, size=mask.sum())\n print \"Done.\"\n\n return Xtrain, Xtest, Ytrain, Ytest",
"def test_corpus_labeling(self):\n corpusName = \"test\"\n built_corpus_Path = corpus_tools.getDataPath(corpusName)\n filename = built_corpus_Path + \"-GT\"\n reader = LinguoDatasetReader()\n with tempfile.TemporaryDirectory() as temp_dir:\n outpath = temp_dir + \"-labeled\"\n corpus_tools.labelCorpus(filename, outpath,\n g_label=0, ug_type=\"WS\")\n original = corpus_tools.load_tokenized_corpus(filename)\n loaded = reader.read(outpath)\n for original_sent, loaded_sent in zip(original, loaded):\n self.assertEqual(loaded_sent.fields[\"g_label\"].label,\n \"ungrammatical\")\n self.assertEqual(loaded_sent.fields[\"ug_type\"].label, \"WS\")\n plain_loaded = [str(token) for\n token in loaded_sent.fields[\"sentence\"].tokens]\n self.assertEqual(plain_loaded, original_sent)",
"def generate_corpus(series, documents):\r\n all_words = []\r\n for data_chunk in pd.read_csv(documents, chunksize=10000):\r\n for record in data_chunk.itertuples(index=True, name='Pandas'):\r\n text = getattr(record, 'text')\r\n lang = getattr(record, 'language')\r\n words = text.split(\" \")\r\n\r\n if lang == 'en':\r\n if series != None:\r\n show = getattr(record, 'category')\r\n if show == series:\r\n all_words.append(words)\r\n if series == None:\r\n all_words.append(words)\r\n \r\n return all_words",
"def prepare_corpus(self, final_training):\n if final_training:\n df_seq = self.data_processor.create_user_click_sequence()\n else:\n df_seq = self.data_processor.create_user_click_sequence(\n end_date=self.config[\"test_split_date\"]\n )\n sentences = df_seq[\"merchant_seq\"].values.tolist()\n sentences = [list(map(str, sent)) for sent in sentences]\n return sentences",
"def fit(self, data):\n self.uniq_words = set()\n results = Parallel(n_jobs=self.n_jobs)(delayed(self._get_words)(sentence) for sentence in data)\n for s in results:\n self.uniq_words = self.uniq_words.union(s)\n self.uniq_words = list(self.uniq_words)\n self.uniq_words.append('Unknown')",
"def generate_labels():\n label_set = set([])\n for data in load_data():\n label = data.split(' ', 1)[0]\n label_set.add(label)\n labels = list(label_set)\n labels.sort()\n return labels",
"def transform(self, dataset, labels):\n print(f\"Dropping {len(self.deficient)} deficient features...\")\n dataset.drop(columns=self.deficient, inplace=True)\n print(f\"Scanning {len(dataset)} samples for duplicates...\")\n duplicates = dataset.duplicated()\n print(f\"Dropping {sum(duplicates)} duplicate samples...\")\n dataset.drop(index=dataset.index[duplicates], inplace=True)\n dataset.reset_index(drop=True, inplace=True)\n labels.drop(labels=labels.index[duplicates], inplace=True)\n labels.reset_index(drop=True, inplace=True)\n return dataset, labels",
"def generate_corpus(self, text):\n if isinstance(text, str):\n sentences = self.sentence_split(text)\n else:\n sentences = []\n for line in text:\n sentences += self.sentence_split(line)\n passing = filter(self.test_sentence_input, sentences)\n runs = map(self.word_split, passing)\n return runs",
"def set_up_data():\r\n \r\n X, Y = pretreatment.import_dataset()\r\n \r\n print('Applying cleansing...')\r\n X = pretreatment.pretreatment(X)\r\n Y = pretreatment.pretreatment(Y)\r\n \r\n indice = [i for i in range(len(X)) if (len(X[i]) > SENTENCE_LENGTH-2 and len(X[i]) < SENTENCE_LENGTH+1 and len(Y[i]) > SENTENCE_LENGTH-2 and len(Y[i]) < SENTENCE_LENGTH+1)]#(len(X[i]) > SENTENCE_LENGTH and len(X[i]) < 2 * SENTENCE_LENGTH and len(Y[i]) > SENTENCE_LENGTH and len(Y[i]) < 2 * SENTENCE_LENGTH)]\r\n X = [X[i] for i in indice]\r\n Y = [Y[i] for i in indice]\r\n \r\n X = pretreatment.standardize_sentence_length(X)\r\n Y = pretreatment.standardize_sentence_length(Y)\r\n \r\n print('Computing the corpus sizes...')\r\n compute_T(X, 'english')\r\n compute_T(Y, 'french')\r\n compute_S(X, 'english')\r\n compute_S(Y, 'french')\r\n compute_N(X, 'french')\r\n compute_N(Y, 'english')\r\n \r\n print('English corpus: %d tokens' % T_ENGLISH)\r\n print('French corpus: %d tokens' % T_FRENCH)\r\n print('English sentence length: %d' % S_ENGLISH)\r\n print('French sentence length: %d' % S_FRENCH)\r\n print('Number of sentences (both english and french): %d / %d' % (N_ENGLISH, N_FRENCH))\r\n \r\n print('Converting in one hot vectors')\r\n global CORPUS_ENGLISH, CORPUS_FRENCH\r\n params_ENGLISH = (N_ENGLISH, S_ENGLISH, T_ENGLISH)\r\n params_FRENCH = (N_FRENCH, S_FRENCH, T_FRENCH)\r\n X, CORPUS_ENGLISH= treatment.convert_to_one_hot(X, params_ENGLISH)\r\n Y, CORPUS_FRENCH= treatment.convert_to_one_hot(Y, params_FRENCH)\r\n \r\n return (X, Y)",
"def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = []\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']]\n #print('target={}, i={}'.format(datasets['target'], i))\n label[datasets['target'][i]] = 1\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]",
"def load_data_labels(datasets):\n # Split by words\n x_text = datasets['data']\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n labels = [0, 1, 2, 3, 4]\n print(len(x_text))\n for i in range(len(x_text)):\n label = [0 for j in datasets['target_names']] \n label[datasets['target'][i]] = labels[i]\n labels.append(label)\n y = np.array(labels)\n return [x_text, y]",
"def corpus_group():",
"def clean(corpus):\n # Initiate clean_corpus\n clean_corpus = [] \n \n for speech in corpus:\n \n # Removes meaningless intro \n speech = speech[5:] \n\n for i in range(len(speech)):\n # Removes 'meaningless text hear (min:sec)\\n' at the beginning of each paragraph\n speech[i] = speech[i][speech[i].find('\\n') + 1:] \n # Replaces brackets with paranthesis\n speech[i] = speech[i].replace('[', '(') \n speech[i] = speech[i].replace(']', ')')\n # Removes meaningless text in parantheses\n speech[i] = re.sub(r'\\([^)]*\\)', '', speech[i]) \n\n # Join all of the paragraphs into one speech\n speech = ','.join(speech) \n\n clean_corpus.append(speech)\n \n # Combined all of the speeches into one document\n \n if len(clean_corpus) == 1:\n clean_corpus = clean_corpus[0]\n if len(clean_corpus) == 2:\n clean_corpus = clean_corpus[0] + clean_corpus[1]\n if len(clean_corpus) == 3:\n clean_corpus = clean_corpus[0] + clean_corpus[1] + clean_corpus[2]\n if len(clean_corpus) == 8:\n clean_corpus = clean_corpus[0] + clean_corpus[1] + clean_corpus[2] + clean_corpus[3] + clean_corpus[4] + \\\n clean_corpus[5] + clean_corpus[6] + clean_corpus[7]\n \n return clean_corpus",
"def makedata():\n # train\n print('Clean Train Dataset and separate questions')\n df = pd.read_csv(TRAIN_DATASET).replace(np.nan, ' ')\n t = df.shape[0] * 2\n print t\n df['question1'] = cleanText(df['question1'])\n df['question2'] = cleanText(df['question2'])\n\n df.to_csv(os.path.join(rootpathdata_cleaned, 'train.csx'), index=False)\n overallquestions = df['question1'].tolist() + df['question2'].tolist()\n tpm = pd.DataFrame()\n tpm['question'] = overallquestions\n tpm.to_csv(os.path.join(rootpathdata_cleaned, 'train_allquestions.csx'), index=False)\n # test\n\n print('Clean Test Dataset and separate questions')\n df = pd.read_csv(TEST_DATASET).fillna(' ')\n t1 = df.shape[0] * 2\n df['question1'] = cleanText(df['question1'])\n df['question2'] = cleanText(df['question2'])\n df.to_csv(os.path.join(rootpathdata_cleaned, 'test.csx'), index=False)\n\n overallquestions += df['question1'].tolist() + df['question2'].tolist()\n tpm = pd.DataFrame()\n tpm['question'] = overallquestions\n tpm.to_csv(os.path.join(rootpathdata_cleaned, 'test_allquestions.csx'), index=False)\n print len(overallquestions), t1 + t",
"def prepare_datasets(data, validation_data, language, target, tfidf_parameters, top_k_words, positive_count, negative_count):\n vectorizer = TfidfVectorizer(**tfidf_parameters)\n\n # Learn vocabulary from training texts and vectorize training texts.\n x_train = vectorizer.fit_transform(data['clean'])\n train_labels = data[target]\n \n # Vectorize validation texts.\n x_val = vectorizer.transform(validation_data['clean'])\n val_labels = validation_data[target]\n\n # Select top words of the vectorized features.\n selector = SelectKBest(f_classif, k=min(top_k_words, x_train.shape[1]))\n selector.fit(x_train, train_labels)\n x_train = selector.transform(x_train).astype('float32')\n x_val = selector.transform(x_val).astype('float32')\n\n output = [\n ' '.join([\n language,\n target,\n str(positive_count),\n str(negative_count)\n ]),\n x_train,\n train_labels,\n x_val,\n val_labels\n ]\n\n return output",
"def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1",
"def test_unlabeled_corpus_saving(self):\n\n original_corpus = [[\"Yo\", \"soy\", \"una\", \"oración\", \"gramatical\", \",\",\n \"regocíjense\", \"en\", \"mi\", \"glória\", \".\"],\n [\"Yo\", \"ungrammatical\", \"es\", \"oración\", \",\"\n \"tú\", \"presumido\", \"elitista\", \".\"]]\n with tempfile.TemporaryDirectory() as temp_dir:\n fileName = temp_dir + \"testfile\"\n corpus_tools.save_tokenized_corpus(fileName, original_corpus)\n loaded_corpus = corpus_tools.load_tokenized_corpus(fileName)\n assert len(original_corpus) == len(loaded_corpus)\n for original_sent, loaded_sent in zip(original_corpus,\n loaded_corpus):\n self.assertEqual(original_sent, loaded_sent)",
"def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)",
"def get_random_texts(self):\n texts=[]\n nodes=self.get_random_nodes()\n for node in nodes:\n texts+=self.get_corpus_from_node(node)\n return texts",
"def train(self, corpus):\n\n\n temp = \"\"\n for sentence in corpus.corpus:\n\n i = 0\n for datum in sentence.data:\n # print str(sentence.data)\n self.total=self.total+1\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n if (i == 0):\n temp = datum.word\n i = i + 1\n continue\n\n i = i + 1\n\n key = temp + \",\" + token\n self.bigramCounts[key] = self.bigramCounts[key] + 1\n # print token\n temp = token\n\n pass",
"def clean_corpus(self, corpus, flatten=True):\n if flatten:\n t = [self.process_text(sent) for sent in list(sent_tokenize(corpus))]\n return [item for sublist in t for item in sublist]\n return [self.process_text(sent) for sent in list(sent_tokenize(corpus))]"
]
| [
"0.6179051",
"0.61695355",
"0.612701",
"0.60080475",
"0.5991426",
"0.5968233",
"0.5966245",
"0.5962449",
"0.5961979",
"0.5824949",
"0.58028716",
"0.57764983",
"0.5754227",
"0.57507855",
"0.5742542",
"0.57408285",
"0.57311505",
"0.57246226",
"0.5599109",
"0.5594712",
"0.5587724",
"0.5578667",
"0.55575216",
"0.55429286",
"0.55353403",
"0.55220175",
"0.5512764",
"0.5501683",
"0.5501139",
"0.5494171"
]
| 0.7546315 | 0 |
Write vocabulary to '../data/vocabulary.txt', each line contains a word and its frequency. | def write_vocabulary():
with open('../data/vocabulary.txt', 'w') as vocabulary_file:
vocabulary = generate_vocabulary()
word_count = sum(vocabulary.values())
print(word_count)
vocabs_str = [("%s %d" % (key, value)) for key, value in vocabulary.items()]
vocabulary_file.write('\n'.join(vocabs_str)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dump_vocab(vocab, path, encoding=\"Utf-8\"):\n with open(path, \"w\", encoding=encoding) as fout:\n for word, freq in vocab:\n fout.write(\"%s\\t%d\\n\" % (word, freq))",
"def write_vocabulary(vocab_processor, outfile):\n vocab_size = len(vocab_processor.vocabulary_)\n with open(outfile, \"w\") as vocabfile:\n for id in range(vocab_size):\n word = vocab_processor.vocabulary_._reverse_mapping[id]\n vocabfile.write(word + \"\\n\")\n print(\"Saved vocabulary to {}\".format(outfile))",
"def write_vocab(vocab, filename):\n print(\"Writing vocab...\")\n with open(filename, \"w\", encoding='utf-8') as f:\n for i, word in enumerate(vocab):\n if i != len(vocab) - 1:\n f.write(\"{}\\n\".format(word))\n else:\n f.write(word)\n print(\"- done. {} tokens\".format(len(vocab)))",
"def get_vocabulary(text_fname, vocab_fname):\n with codecs.open(text_fname,'r','utf-8') as infile, \\\n codecs.open(vocab_fname,'w','utf-8') as outfile: \n\n count_map={}\n for line in infile:\n sent=line.strip().split(' ')\n for w in sent:\n count_map[w]=count_map.get(w,0.0)+1.0\n\n for w,c in count_map.iteritems(): \n outfile.write(u'{}|{}\\n'.format(w,c))",
"def save_vocab(self):\n opts = self._options\n with open(os.path.join(opts.save_path, \"vocab.txt\"), \"w\") as f:\n for i in xrange(opts.vocab_size):\n f.write(\"%s %d\\n\" % (tf.compat.as_text(opts.vocab_words[i]),\n opts.vocab_counts[i]))",
"def save_vocabulary(self, save_dir: str) -> None:\n vocab_f: str = os.path.join(save_dir, 'vocab.tsv')\n with open(vocab_f, 'w') as ofile:\n for i, word_type in enumerate(self.get_instruction_vocabulary()):\n ofile.write(str(i) + '\\t' + word_type + '\\n')",
"def save_vocab(lines, filename):\n data = '\\n'.join(lines)\n file = open(filename, 'w')\n file.write(data)\n file.close()",
"def saveVocabulary(self, filepath, vocabulary):\n\t\timport numpy as np\n\t\tnp.save( filepath, list(vocabulary.items()) )\n\t\tprint ('\\tVocabulary saved in: {}'.format(filepath))",
"def save_vocab(count, name='vocab.txt'):\n pwd = os.getcwd()\n vocabulary_size = len(count)\n with open(os.path.join(pwd, name), \"w\") as f:\n for i in xrange(vocabulary_size):\n f.write(\"%s %d\\n\" % (tf.compat.as_text(count[i][0]), count[i][1]))\n print(\"%d vocab saved to %s in %s\" % (vocabulary_size, name, pwd))",
"def save(self, filename):\n with open(filename, \"w\", encoding=\"utf8\") as f:\n f.write(\n \"\\n\".join(\n [\n w + \" \" + \" \".join([str(x) for x in v])\n for w, v in zip(self._words, self._vecs)\n ]\n )\n )\n print(\"Wrote\", self.n, \"words to\", filename)",
"def save_vocabulary(self, vocab_path):\n index = 0\n if os.path.isdir(vocab_path):\n vocab_file = os.path.join(vocab_path, VOCAB_NAME)\n else:\n vocab_file = vocab_path\n with open(vocab_file, 'w', encoding='utf-8') as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))\n index = token_index\n writer.write(token + u'\\n')\n index += 1\n return vocab_file",
"def save_vocabulary(path, vocab):\n print('saving vocabulary..')\n with open(path, 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print('vocabulary was saved successfully!')",
"def save_vocab(self, output_path: str):\n logger.info(\"Saving vocab to `{}`\".format(output_path))\n with open(output_path, 'w') as f:\n f.write(\"\\n\".join(self.vocab))",
"def build_vocab(path, fname):\r\n\twords = open(path, 'r', encoding='utf-8').read().split()\r\n\twordCount = Counter(words)\r\n\tif not os.path.exists(pm.vocab_path):\r\n\t\tos.makedirs(pm.vocab_path)\r\n\twith open(pm.vocab_path + fname, 'w', encoding='utf-8') as f:\r\n\t\tf.write(\"{}\\t1000000000\\n{}\\t1000000000\\n{}\\t1000000000\\n{}\\t1000000000\\n\".format(\"<PAD>\", \"<UNK>\", \"<SOS>\", \"<EOS>\"))\r\n\t\tfor word, count in wordCount.most_common(len(wordCount)):\r\n\t\t\tf.write(u\"{}\\t{}\\n\".format(word, count))",
"def vocabulary(self):\n lst = []\n for key in self.frequencies().keys():\n lst.append(key)\n return sorted(lst)\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # for word in wordslst:\n # if word not in lst:\n # lst.append(word.lower())\n #return sorted(lst)",
"def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary",
"def _build_vocabulary(input_files):\n if FLAGS.vocab_file:\n tf.logging.info(\"Loading existing vocab file.\")\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(FLAGS.vocab_file, mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n assert word not in vocab, \"Attempting to add word twice: %s\" % word\n vocab[word] = i\n tf.logging.info(\"Read vocab of size %d from %s\",\n len(vocab), FLAGS.vocab_file)\n return vocab\n\n tf.logging.info(\"Creating vocabulary.\")\n num = 0\n wordcount = collections.Counter()\n for input_file in input_files:\n tf.logging.info(\"Processing file: %s\", input_file)\n for sentence in tf.gfile.FastGFile(input_file):\n wordcount.update(sentence.split())\n\n num += 1\n if num % 1000000 == 0:\n tf.logging.info(\"Processed %d sentences\", num)\n\n tf.logging.info(\"Processed %d sentences total\", num)\n\n words = wordcount.keys()\n freqs = wordcount.values()\n sorted_indices = np.argsort(freqs)[::-1]\n\n vocab = collections.OrderedDict()\n vocab[special_words.EOS] = special_words.EOS_ID\n vocab[special_words.UNK] = special_words.UNK_ID\n for w_id, w_index in enumerate(sorted_indices[0:FLAGS.num_words - 2]):\n vocab[words[w_index]] = w_id + 2 # 0: EOS, 1: UNK.\n\n tf.logging.info(\"Created vocab with %d words\", len(vocab))\n\n vocab_file = os.path.join(FLAGS.output_dir, \"vocab.txt\")\n with tf.gfile.FastGFile(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(vocab.keys()))\n tf.logging.info(\"Wrote vocab file to %s\", vocab_file)\n\n word_counts_file = os.path.join(FLAGS.output_dir, \"word_counts.txt\")\n with tf.gfile.FastGFile(word_counts_file, \"w\") as f:\n for i in sorted_indices:\n f.write(\"%s %d\\n\" % (words[i], freqs[i]))\n tf.logging.info(\"Wrote word counts file to %s\", word_counts_file)\n\n return vocab",
"def save_txt(words,data,fname):\n\n out=open(fname,\"w\")\n\n rows,dims=data.shape\n print(\"{} {}\".format(rows,dims),file=out)\n counter=0\n\n for i,w in enumerate(words):\n print(w,\" \".join((\"{:6f}\".format(x) for x in data[i,:])),file=out)\n out.close()",
"def create_vocabulary(vocabulary_path, words, max_vocabulary_size, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s with max size %d\" % (vocabulary_path, max_vocabulary_size))\n vocab = {}\n counter = 0\n for w in words:\n counter += 1\n if counter % 10000 == 0:\n print(\" processing word %d = %s\" % (counter, w))\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"w\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")",
"def save_frequency(count_table, input_file):\n # Opens new file to output to\n with open(f\"{input_file}.out\", \"w\") as text:\n # Total sum of every word's occurrence in the file.\n totalCount = sum(count_table.values())\n\n # Loop through each key and corresponding value in the dictionary\n for word, count in count_table.items():\n # Output the word, the count and the relative frequency of the word.\n text.write(f\"{word} {count} {round(count / totalCount, 3)}\\n\")",
"def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc",
"def update_from_vocabulary(self, vocab_path):\n with open(vocab_path, 'r') as vocab_file:\n for word in vocab_file:\n word = word.strip()\n self._add_new_word(word)",
"def save_to_file(self, vocab_file = None):\n if len(self.words_new) > 0:\n # Use file path which is given either by the constructor or this method's argument.\n # This method's argument takes priority.\n if not vocab_file:\n vocab_file = self.vocab_file\n\n if vocab_file:\n self.export_appended = False\n if os.path.exists(vocab_file):\n # Append the data to the existing vocab file.\n self.export_appended = True\n else:\n # If the vocab file is to be newly created, initialize the file with special tokens first.\n with open(vocab_file, 'w', encoding='utf8') as fp:\n for d in special_tokens:\n fp.write(\"%s\\n\" % d)\n\n # Append the newly added data\n with open(vocab_file, 'a', encoding='utf8') as fp:\n for d in self.words_new:\n fp.write(\"%s\\n\" % d)\n self.export_num += 1",
"def save_vocabulary(self):\n out_vocab_file = 'xlnet_vocab.txt'\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n\n return (out_vocab_file,)",
"def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n line = line.strip().split('\\t')[0]\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted_vocab\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) )\n else:\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), len(vocab), 0))\n\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")",
"def create_vocabulary(vocabulary_path, json_vocab_path):\n if not gfile.Exists(vocabulary_path):\n print(\"Transform vocabulary to %s\" % vocabulary_path)\n with gfile.GFile(json_vocab_path, mode=\"rb\") as f:\n jvocab = json.load(f)\n vocab = jvocab['w2id']\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")",
"def save(self, dirname=None):\n self.genio.save(dirname)\n logging.info(\n f'Saved word vectorizations for {dirname}')",
"def save_embeddings(self, filename, binary=True):\n with open(filename, \"wb\", encoding=\"utf8\") as fout:\n fout.write(\"%s %s\\n\" % self._vecs.shape)\n # store in sorted order: most frequent words at the top\n for i, word in enumerate(self._words):\n row = self._vecs[i]\n if binary:\n fout.write(word + b\" \" + row.tostring())\n else:\n fout.write(\n \"%s %s\\n\" % (word, \" \".join(\"%f\" % val for val in row))\n )",
"def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary"
]
| [
"0.77696854",
"0.76118624",
"0.75237185",
"0.74841684",
"0.7428175",
"0.7263195",
"0.7193403",
"0.7069946",
"0.6974747",
"0.6899833",
"0.67634916",
"0.67428726",
"0.67143416",
"0.66633344",
"0.6374416",
"0.63653886",
"0.6339275",
"0.6330938",
"0.6319669",
"0.631088",
"0.6263414",
"0.62480754",
"0.6216911",
"0.6189386",
"0.61578244",
"0.6152151",
"0.61463964",
"0.6135035",
"0.6123537",
"0.61039567"
]
| 0.8666452 | 0 |
Returns variable value from launch params | def get_action_var_val_from_launch_params(launch_vars, var_name):
filtered_launch_vars = list(
filter(
lambda e: e["name"] == var_name,
launch_vars,
)
)
if len(filtered_launch_vars) > 1:
LOG.error(
"Unable to populate runtime editables: Multiple matches for value of variable '{}'".format(
var_name
)
)
sys.exit(-1)
if len(filtered_launch_vars) == 1:
return filtered_launch_vars[0].get("value", {}).get("value", None)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getParameter(self, value):\n if value in self.commandLineDefaults:\n return self.commandLineDefaults[value]\n if value in self.defaults:\n return self.defaults[value]\n return None",
"def get_parm_value(parameters, name, env_name, default_value):\n value = parameters.get(name, '')\n return os.environ.get(env_name, default=default_value) if not value else value",
"def gui_get_param(self,param_name):\n return self._tkvars[param_name].get()",
"def getParam(key):\n \n if globalParams == {}:\n warning(\"WARNING: runtime parameters not yet initialized\")\n LoadParams(\"_defaults\")\n \n if key in globalParams.keys():\n return globalParams[key]\n else:\n raise ValueError()",
"def get_var(var_name: str):\n return os.environ[var_name]",
"def get_a_param(self):\n value = self.memory[self.exec_ptr + 1]\n Vm.validate_value(value)\n return value",
"def get_parameter(par_name):\r\n config_file = open('./config.txt', 'r')\r\n lines = config_file.readlines()\r\n for line in lines:\r\n line = line.rstrip('\\n\\r')\r\n if line.startswith(par_name):\r\n return line.split('=')[1]",
"def get_launch_params_filepath():\n try:\n cli_args = sys.argv\n return sys.argv[sys.argv.index(\"--params-file\") + 1]\n except ValueError:\n return \"Failed to parse params file path from command line arguments. Check that --params-file command line argument is specified.\"",
"def __getitem__(self, name):\n return os.environ[name]",
"def get_global_arg(self, key):\n return self.args[key]",
"def parameter(name,default_value=None):\n settings = file(settings_file()).read()\n for line in settings.split(\"\\n\"):\n line = line.strip(\" \\n\\r\")\n if len(line.split(\"=\")) != 2: continue\n keyword,value = line.split(\" = \")\n keyword = keyword.strip(\" \")\n if keyword == name: return eval(value)\n return default_value",
"def params():\n return utils.Params('../experiments/base-model/params.json')",
"def get_setting(param_name, default='None'):\n\n try:\n value = os.environ.get(param_name.upper())\n if value is None:\n value = get_ssm_parameter(param_name.lower())\n # logging.error(f'Param Name: {param_name} not set in env file')\n\n except (IndexError, KeyError) as e:\n logging.warn(e)\n value = default\n\n return value",
"def arg(self, get, default=None):\n req = self.locate(iweb.IRequest)\n return req.args.get(get, [default])[0]",
"def variable(self, name):\n\n status, stdout, stderr = self.__xcall__(['--variable=%s' % name])\n\n if status != 0:\n raise RuntimeError(\"error querying --variable=%s for package `%s': %s\" % (name, self.name, stderr))\n\n return stdout.strip()",
"def get_c_param(self):\n value = self.memory[self.exec_ptr + 3]\n Vm.validate_value(value)\n return value",
"def Var(key):\n return vars[key]",
"def getParameter(self, name):",
"def __getitem__(self, key):\n return os.environ[key]",
"def _GetRunParameters() -> dict[str, str]:\n result = {}\n for kv in FLAGS.ycsb_run_parameters:\n param, value = kv.split('=', 1)\n result[param] = value\n return result",
"def parameters(tel_inst):\n\n if tel_inst not in implemented_instruments:\n print(tel_inst+' not implemented. Exiting...')\n exit()\n \n tel_param = params[tel_inst]\n \n return tel_param",
"def constructor_env_variables(loader, node) -> Any: # type: ignore\n value = loader.construct_scalar(node)\n match = pattern.findall(value) # to find all env variables in line\n if match:\n full_value = value\n for g in match:\n (env_var, default_val) = g.split(\"::\")\n value = os.environ.get(env_var, default_val)\n full_value = full_value.replace(f\"${{{g}}}\", value)\n if not full_value:\n full_value = None\n _logger.debug(f\"injected ENV parameter {env_var} resolved to {value}\")\n return full_value\n return value",
"def get_airflow_variable(key: str) -> str:\n return models.Variable.get(key)",
"def DoIt(self, host, vm, variable):\n\n vm = Operation.GetVm(host, vm)\n\n variableComponents = variable.split('.', 1)\n device = vm.GetDevice(variableComponents[0])\n if device:\n if len(variableComponents) > 1:\n return rec_getattr(device, variableComponents[1])\n else:\n return device\n\n\n value = vm.GetExtraConfig().get(variable, None)\n if value: return value\n\n return rec_getattr(vm, self.GetVmodlProperty(variable))",
"def env_get_var_value(var_name, allow_missing=False):\n if allow_missing:\n if var_name not in os.environ.keys():\n return None\n assert var_name in os.environ.keys(), \"Please supply %s in environment\" % var_name\n return os.environ[var_name]",
"def set_variable_value():\n\n mp_rp_conf_file = 'entitlement-tests/CCI/ReportPortal/mp_rp_conf.json'\n \n # 1. Set project name which is just the test product name with upper case letter\n cmd = \"sed -i -e 's/PROJECT_NAME/{0}/g' {1}\".format(test_product.upper(), mp_rp_conf_file)\n (ret, output) = commands.getstatusoutput(cmd)\n \n # 2. Set launch name\n # Launch name examples - Errata-49798_RHEL7_Server_x86_64_Full_ProdCDN; Errata-53717_RHEL8_x86_64_Full_ProdCDN\n cmd = \"sed -i -e 's/LAUNCH_NAME/{0}/g' {1}\".format(get_launch_name(), mp_rp_conf_file)\n (ret, output) = commands.getstatusoutput(cmd)\n \n # 3. Set variables value in description of launch\n # a) Set Errata url in description of launch\n errata_url = \"[{0}](https:\\/\\/errata.devel.redhat.com\\/advisory\\/{1})\".format(errata_id, errata_id)\n cmd = \"sed -i -e 's/ERRATA_URL/{0}/g' {1}\".format(errata_url, mp_rp_conf_file)\n (ret, output) = commands.getstatusoutput(cmd)\n \n # b) Set jenkins job url in description of launch\n build_id = build_url.strip('/').split('/')[-1]\n build_url_str = \"[{0}]({1})\".format(build_id, build_url.replace(\"/\",\"\\/\"))\n \n cmd = \"sed -i -e 's/BUILD_URL/{0}/g' {1}\".format(build_url_str, mp_rp_conf_file)\n (ret, output) = commands.getstatusoutput(cmd)\n \n # 4. Set launch tag\n # Tag examples - OpenStack16; Ceph3; CNV2\n cmd = \"cat product_version.txt\"\n (ret, output) = commands.getstatusoutput(cmd)\n \n cmd = \"sed -i -e 's/LAUNCH_TAG/{0}{1}/g' {2}\".format(test_product, output, mp_rp_conf_file)\n (ret, output) = commands.getstatusoutput(cmd)",
"def getvar(self, varname):\n try:\n val = self.get(MAIN_SECTION, varname)\n except ConfigParser.NoOptionError:\n val = VARS_OPT[varname]\n \n return val",
"def get_current_param(self, t=None):\n if self.current_context is None:\n raise Exception(\"The MAB game is not started.\")\n \n return self.get_param(self.current_context)",
"def get(self):\n if not self.__name in g_platform_variables:\n raise RuntimeError(\"unknown platform variable '%s'\" % (self.__name))\n current_var = g_platform_variables[self.__name]\n combinations = get_platform_combinations()\n for ii in combinations:\n if ii in current_var:\n return current_var[ii]\n raise RuntimeError(\"current platform %s not supported for variable '%s'\" % (str(combinations), self.__name))",
"def GetEnvVariable(name):\n return os.environ.get(name)"
]
| [
"0.65366197",
"0.6387033",
"0.6228277",
"0.6148368",
"0.6089504",
"0.59744775",
"0.5972712",
"0.59664136",
"0.59335375",
"0.5925959",
"0.58825845",
"0.58721554",
"0.58434135",
"0.5822617",
"0.5805199",
"0.57746196",
"0.576227",
"0.5749721",
"0.5739871",
"0.57335603",
"0.5729944",
"0.56985277",
"0.5688333",
"0.5671001",
"0.56610763",
"0.56534564",
"0.5647314",
"0.56423396",
"0.5634083",
"0.5629382"
]
| 0.7026889 | 0 |
Returns patch arguments or variable data | def get_patch_runtime_args(
app_uuid, deployments, patch_payload, ignore_runtime_variables, runtime_params_file
):
patch_name = patch_payload["name"]
patch_args = {}
patch_args["patch"] = patch_payload
patch_args["variables"] = []
attrs_list = patch_payload["attrs_list"]
if ignore_runtime_variables:
return patch_args
def disk_in_use(substrate, disk):
boot_disk = substrate["create_spec"]["resources"]["boot_config"]["boot_device"]
return (
disk["disk_address"]["adapter_type"]
== boot_disk["disk_address"]["adapter_type"]
and disk["disk_address"]["device_index"]
== boot_disk["disk_address"]["device_index"]
)
def nic_name(nic):
return nic["subnet_reference"]["name"] if nic["subnet_reference"] else ""
def disk_name(disk):
return "{}-{}".format(
disk["device_properties"]["disk_address"]["adapter_type"],
disk["device_properties"]["disk_address"]["device_index"],
)
nic_index_pattern = r".+?\[([0-9]*)\]"
# If file is supplied for launch params
if runtime_params_file:
click.echo("Patching values for runtime variables under patch action ...")
for attrs in attrs_list:
patch_items = attrs["data"]
target_deployment_uuid = attrs["target_any_local_reference"]["uuid"]
target_deployment = next(
filter(
lambda deployment: deployment["uuid"] == target_deployment_uuid,
deployments,
),
None,
)
if target_deployment == None:
LOG.info(
"Target deployment with uuid {} not found. Skipping patch attributes editables".format(
target_deployment_uuid
)
)
continue
substrate = target_deployment["substrate"]
nic_in_use = -1
nic_address = substrate["readiness_probe"]["address"]
readiness_probe_disabled = substrate["readiness_probe"][
"disable_readiness_probe"
]
if nic_address:
matches = re.search(nic_index_pattern, nic_address)
if matches != None and not readiness_probe_disabled:
nic_in_use = int(matches.group(1))
# Skip nics that are being used by the vm
nics = (
patch_items["pre_defined_nic_list"]
if nic_in_use == -1
else patch_items["pre_defined_nic_list"][nic_in_use + 1 :]
)
disks = patch_items["pre_defined_disk_list"]
patch_attrs_editables = parse_launch_params_attribute(
launch_params=runtime_params_file, parse_attribute="patch_attrs"
)
editables = next(
filter(
lambda patch_attrs: patch_attrs["patch_attributes_uuid"]
== attrs["uuid"],
patch_attrs_editables,
),
None,
)
if editables == None:
LOG.info(
"No patch editables found for patch attributes with uuid {}".format(
attrs["uuid"]
)
)
continue
vm_config_editables = editables.get("vm_config", {})
nic_editables = editables.get("nics", {})
disk_editables = editables.get("disks", {})
category_editables = editables.get("categories", {})
# VM config editables
for key, value in vm_config_editables.items():
patch_item = patch_items[key + "_ruleset"]
if (
patch_item["editable"]
and patch_item["min_value"] <= value <= patch_item["max_value"]
):
if patch_item["value"] != value:
LOG.info(
"Attribute {} marked for modify with value {}".format(
key, value
)
)
patch_item["value"] = value
# NIC delete
if patch_items["nic_delete_allowed"]:
for i, nic in enumerate(nics):
nic_index = i if nic_in_use == -1 else i + nic_in_use
if nic_index in nic_editables.get("delete", []):
LOG.info('NIC "{}" marked for deletion'.format(nic_name(nic)))
nic["operation"] = "delete"
nics_not_added = []
# NIC add
for i, nic in enumerate(nics):
if nic["operation"] == "add" and nic["editable"]:
nic_edit = next(
filter(
lambda n: n["identifier"] == nic["identifier"],
nic_editables.get("add", []),
),
None,
)
if (
nic_edit
and nic["subnet_reference"]["uuid"]
!= nic_edit["subnet_reference"]["uuid"]
):
LOG.info(
"NIC with identifier {} marked for modify with subnet {}".format(
nic["identifier"], nic_name(nic_edit)
)
)
nic["subnet_reference"] = nic_edit["subnet_reference"]
if nic["operation"] == "add" and i in nic_editables.get("delete", []):
LOG.info(
"NIC with identifier {} skipped from addition".format(
nic["identifier"]
)
)
nics_not_added.append(i)
# Skip adding nics that are deleted
nics = [nic for i, nic in enumerate(nics) if i not in nics_not_added]
patch_items["pre_defined_nic_list"] = nics
# Disk delete
if patch_items["disk_delete_allowed"]:
for i, disk in enumerate(disks):
if i in disk_editables.get("delete", []) and not disk_in_use(
substrate, disk["device_properties"]
):
LOG.info("Disk {} marked for deletion".format(disk_name(disk)))
disk["operation"] = "delete"
# Disk modify
for disk in disks:
if (
disk["operation"] == "modify"
and disk["disk_size_mib"]
and disk["disk_size_mib"]["editable"]
):
disk_edit = next(
filter(
lambda d: disk_name(d) == disk_name(disk),
disk_editables.get("modify", []),
),
None,
)
if (
disk_edit
and disk["disk_size_mib"]["min_value"]
<= disk_edit["disk_size_mib"]["value"]
<= disk["disk_size_mib"]["max_value"]
):
if (
disk["disk_size_mib"]["value"]
!= disk_edit["disk_size_mib"]["value"]
):
LOG.info(
"Disk {} marked for modify with size {}".format(
disk_name(disk), disk_edit["disk_size_mib"]["value"]
)
)
disk["disk_size_mib"]["value"] = disk_edit["disk_size_mib"][
"value"
]
disks_not_added = []
# Disk add
for i, disk in enumerate(disks):
if (
disk["operation"] == "add"
and disk["disk_size_mib"]
and disk["disk_size_mib"]["editable"]
):
disk_edit = next(
filter(
lambda d: i == d["index"],
disk_editables.get("add", []),
),
None,
)
if (
disk_edit
and disk["disk_size_mib"]["min_value"]
<= disk_edit["disk_size_mib"]["value"]
<= disk["disk_size_mib"]["max_value"]
):
if (
disk["disk_size_mib"]["value"]
!= disk_edit["disk_size_mib"]["value"]
):
LOG.info(
"Disk {} marked for addition with size {}".format(
disk_name(disk), disk_edit["disk_size_mib"]["value"]
)
)
disk["disk_size_mib"]["value"] = disk_edit["disk_size_mib"][
"value"
]
if disk["operation"] == "add" and i in disk_editables.get("delete", []):
LOG.info("Disk {} skipped from addition".format(disk_name(disk)))
disks_not_added.append(i)
# Skip adding disks that are deleted
disks = [disk for i, disk in enumerate(disks) if i not in disks_not_added]
patch_items["pre_defined_disk_list"] = disks
categories = patch_items["pre_defined_categories"]
# Category delete
if patch_items["categories_delete_allowed"]:
for i, category in enumerate(categories):
if i in category_editables.get("delete", []):
LOG.info(
"Category {} marked for deletion".format(category["value"])
)
category["operation"] = "delete"
# Category add
if patch_items["categories_add_allowed"]:
for category in category_editables.get("add", []):
LOG.info("Category {} marked for addition".format(category))
patch_items["pre_defined_categories"].append(
{"operation": "add", "value": category}
)
return patch_args
# Else prompt for runtime variable values
click.echo("Please provide values for runtime variables in the patch action")
for attrs in attrs_list:
patch_items = attrs["data"]
target_deployment_uuid = attrs["target_any_local_reference"]["uuid"]
click.echo(
"Patch editables targeted at deployment {} are as follows \n {}".format(
target_deployment_uuid,
json.dumps(patch_items, indent=4, separators=(",", ": ")),
)
)
nic_in_use = -1
disk_in_use = ""
# find out which nic and disk is currently used
for deployment in deployments:
if deployment["uuid"] == target_deployment_uuid:
substrate = deployment["substrate"]
nic_address = substrate["readiness_probe"]["address"]
readiness_probe_disabled = substrate["readiness_probe"][
"disable_readiness_probe"
]
if nic_address:
matches = re.search(nic_index_pattern, nic_address)
if matches != None and not readiness_probe_disabled:
nic_in_use = int(matches.group(1))
disk_address = substrate["create_spec"]["resources"]["boot_config"][
"boot_device"
]["disk_address"]
disk = "{}-{}".format(
disk_address["adapter_type"], disk_address["device_index"]
)
disk_in_use = disk
def prompt_value(patch_item, display_message):
min_value = (
patch_item["value"]
if patch_item["operation"] == "increase"
else patch_item["min_value"]
)
max_value = (
patch_item["value"]
if patch_item["operation"] == "decrease"
else patch_item["max_value"]
)
click.echo()
return click.prompt(
display_message,
default=highlight_text(patch_item["value"]),
type=click.IntRange(min=min_value, max=max_value),
)
def prompt_bool(display_message):
click.echo()
return click.prompt(
display_message,
default=highlight_text("n"),
type=click.Choice(["y", "n"]),
)
click.echo("\n\t\t\t", nl=False)
click.secho("VM CONFIGURATION", underline=True, bold=True)
# Sockets, cores and memory modify
display_names = {
"num_sockets_ruleset": "vCPUs",
"num_vcpus_per_socket_ruleset": "Cores per vCPU",
"memory_size_mib_ruleset": "Memory (MiB)",
}
for ruleset in display_names:
patch_item = patch_items[ruleset]
if patch_item["editable"]:
new_val = prompt_value(
patch_item,
"Enter value for {}".format(display_names[ruleset]),
)
patch_item["value"] = new_val
nics = (
patch_items["pre_defined_nic_list"]
if nic_in_use == -1
else patch_items["pre_defined_nic_list"][nic_in_use + 1 :]
)
click.echo("\n\t\t\t", nl=False)
click.secho("NETWORK CONFIGURATION", underline=True, bold=True)
# NIC add
nics_not_added = []
for i, nic in enumerate(nics):
if nic["operation"] == "add":
to_add = prompt_bool(
'Do you want to add the NIC "{}" with identifier {}'.format(
nic["subnet_reference"]["name"], nic["identifier"]
)
)
if to_add == "n":
nics_not_added.append(i)
# remove NICs not added from patch list
nics = [nic for i, nic in enumerate(nics) if i not in nics_not_added]
# NIC delete
if patch_items["nic_delete_allowed"] and len(nics) > 0:
to_delete = prompt_bool("Do you want to delete a NIC")
if to_delete == "y":
click.echo()
click.echo("Choose from following options")
for i, nic in enumerate(nics):
click.echo(
"\t{}. NIC-{} {}".format(
highlight_text(i), i + 1, nic["subnet_reference"]["name"]
)
)
click.echo()
nic_to_delete = click.prompt(
"Choose nic to delete",
default=0,
type=click.IntRange(max=len(nics)),
)
nics[nic_to_delete]["operation"] = "delete"
LOG.info(
"Delete NIC-{} {}".format(
nic_to_delete + 1,
nics[nic_to_delete]["subnet_reference"]["name"],
)
)
patch_items["pre_defined_nic_list"] = nics
click.echo("\n\t\t\t", nl=False)
click.secho("STORAGE CONFIGURATION", underline=True, bold=True)
# Disk delete
disks = list(
filter(
lambda disk: disk_name(disk) != disk_in_use,
patch_items["pre_defined_disk_list"],
)
)
if patch_items["disk_delete_allowed"] and len(disks) > 0:
to_delete = prompt_bool("Do you want to delete a disk")
if to_delete == "y":
click.echo()
click.echo("Choose from following options")
for i, disk in enumerate(disks):
click.echo(
"\t{}. DISK-{} {} {}".format(
highlight_text(i),
i + 1,
disk_name(disk),
disk["disk_size_mib"]["value"],
)
)
click.echo()
disk_to_delete = click.prompt(
"Choose disk to delete",
default=0,
type=click.IntRange(max=len(disks)),
)
disks[disk_to_delete]["operation"] = "delete"
LOG.info(
"Delete DISK-{} {}".format(
disk_to_delete + 1, disk_name(disks[disk_to_delete])
)
)
# Disk modify
for disk in disks:
disk_size = disk["disk_size_mib"]
if disk_size != None and disk_size["editable"]:
new_val = prompt_value(
disk_size,
"Enter size for disk {}".format(disk_name(disk)),
)
disk_size["value"] = new_val
patch_items["pre_defined_disk_list"] = disks
click.echo("\n\t\t\t", nl=False)
click.secho("CATEGORIES", underline=True, bold=True)
# Category delete
categories = patch_items["pre_defined_categories"]
if patch_items["categories_delete_allowed"] and len(categories) > 0:
to_delete = prompt_bool("Do you want to delete a category")
if to_delete == "y":
click.echo()
click.echo("Choose from following options")
for i, category in enumerate(categories):
click.echo("\t{}. {}".format(highlight_text(i), category["value"]))
click.echo()
category_to_delete = click.prompt(
"Choose category to delete",
default=0,
type=click.IntRange(max=len(categories)),
)
categories[category_to_delete]["operation"] = "delete"
LOG.info(
"Delete category {}".format(categories[category_to_delete]["value"])
)
# Category add
if patch_items["categories_add_allowed"]:
to_add = prompt_bool("Add a category?")
while to_add == "y":
click.echo()
new_val = click.prompt(
"Enter value for category", default="", show_default=False
)
patch_items["pre_defined_categories"].append(
{"operation": "add", "value": new_val}
)
to_add = prompt_bool("Add another category?")
return patch_args | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def patch_data(self):\n return get_json()",
"def patch(self):\n return self._get(\"patch\")",
"def arguments(self):\n return parse_arguments(self['data'])",
"def punkte(self):\n return self.args",
"def view_patch():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )",
"def _patches(s):\n if hasattr(s,'dict'):\n pp=s.patches\n elif isinstance(s,dispatch._dispatch._patch):\n pp=[s]\n else:\n pp=s\n return pp",
"def get_data_config(args):\n diff_data(args, \".\")",
"def patch_data():\n return json.loads('{\"success\":true, \"message\":\"Field of data updated (but not really)\" }')",
"def _get_updates(self, patch):\n updates = {}\n for p in patch:\n attribute = p['path'] if p['path'][0] != '/' else p['path'][1:]\n updates[attribute] = p['value']\n return updates",
"def getData(self, product, variables, attributes, variable, *args):\r\n\r\n data = None\r\n return data",
"def get(self):\n return self.args, self.kwargs",
"def args_extract(self, args, kwargs):\n # make popable (can't pop tuple of args)\n args = list(args)\n\n def getarg(name, num):\n if args and len(args) > num:\n return args.pop(num)\n elif kwargs.get('files'):\n return kwargs.pop('files')\n return None\n\n # First to not affect data = args.pop(0)\n files = getarg('files', 1)\n data = getarg('data', 0)\n\n # make mutable if something\n if files:\n files = MultiValueDict(files)\n if data:\n data = MultiValueDict(data)\n\n return data, files, args, kwargs",
"def get_args(self):\r\n return self.args",
"def get_args():\n return {\"id\": fields.UUID(required=True, location=\"view_args\")}",
"def getPTData(*args):\n return args[0].Data.PTData.pt_data",
"def args(self, default_args=(), diff=()):\n args = self._args\n if not args:\n args = default_args\n\n return self.expand_vars(args, diff=diff)",
"def getCloneArgs(self):\n\n values = {\n \"source_code\": self.subnode_source_code.makeClone(),\n \"globals_arg\": self.subnode_globals_arg.makeClone()\n if self.subnode_globals_arg is not None\n else None,\n \"locals_arg\": self.subnode_locals_arg.makeClone()\n if self.subnode_locals_arg is not None\n else None,\n }\n\n values.update(self.getDetails())\n\n return values",
"def _get_reproducing_arguments(self):\n reproducing_arguments = {\n 'include': self.include,\n 'exclude': self.exclude,\n 'copy': self.copy,\n }\n args_names = {name: getattr(self, name) for name in self.args_names}\n reproducing_arguments.update(args_names)\n return reproducing_arguments",
"def _get_base_patch_attribute(self, name):\n return getattr(self.patches[0],name)",
"def patch(*args, **kwargs):\n return update(*args, patch=True, **kwargs)",
"def parameters(self) -> Dict[str, Any]:\n return self.data[\"args\"].get(\"parameters\", {})",
"def dataargs(self):\n return self.argsbytype(Data)",
"def getCloneArgs(self):\n\n values = {\n \"locals_arg\": self.subnode_locals_arg.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values",
"def api_patchset(request):\n comments = request.GET.get('comments', 'false').lower() == 'true'\n values = _patchset_as_dict(request.patchset, comments, request)\n return values",
"def get_args(self):\n return {\n 'contents': self.get_formatted_code()\n }",
"def get_partial_arguments(self):\n return (), {}",
"def __call__(self, data: bytearray):\n for name in self._argo:\n value = getattr(self._argv, name, None)\n if value is self.PendingUpdate:\n raise RuntimeError(F'Attempting to resolve {name} while an update for this argument is in flight')\n if value and pending(value):\n self._args[name] = self.PendingUpdate\n self._args[name] = manifest(value, data)\n self._store(_guid=id(data))\n return data",
"def getPositionalArgs():",
"def get_args(self):\n rqst = self.request\n args = rqst.arguments()\n resp = {}\n for arg in args:\n resp[arg] = repr(rqst.get_all(arg))\n return resp",
"def test_get_patch_info_returns(self):\n # This test assumes IIQ isn't installed, thus the pile of errors that'll\n # occur shouldn't prevent us from getting a PatchInfo object\n fake_log = MagicMock()\n patch_info = versions.get_patch_info('bogus-patch.tgz', fake_log)\n\n self.assertTrue(isinstance(patch_info, versions._PatchInfo))\n self.assertEqual(patch_info.iiq_dir, '')"
]
| [
"0.6376717",
"0.635689",
"0.5775939",
"0.57362425",
"0.5714351",
"0.56508195",
"0.55597174",
"0.55584913",
"0.54944223",
"0.5489795",
"0.5393315",
"0.5366492",
"0.53662485",
"0.53613627",
"0.5359275",
"0.5359163",
"0.5354778",
"0.53506154",
"0.53359705",
"0.53352034",
"0.5322791",
"0.5312758",
"0.5295935",
"0.5291624",
"0.528383",
"0.5260206",
"0.5250114",
"0.51966715",
"0.5180992",
"0.51711774"
]
| 0.64711076 | 0 |
Download runlogs, given runlog uuid and app name | def download_runlog(runlog_id, app_name, file_name):
client = get_api_client()
app = _get_app(client, app_name)
app_id = app["metadata"]["uuid"]
if not file_name:
file_name = "runlog_{}.zip".format(runlog_id)
res, err = client.application.download_runlog(app_id, runlog_id)
if not err:
with open(file_name, "wb") as fw:
fw.write(res.content)
click.echo("Runlogs saved as {}".format(highlight_text(file_name)))
else:
LOG.error("[{}] - {}".format(err["code"], err["error"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_run_logs(id_, **kwargs):\n run = get_run_object(id_)\n check_run_permission(run, kwargs[\"token_info\"])\n query = \"ilyde-run-{}\".format(run.id)\n return query_elasticsearch(query)",
"def download_workflow_log_files(repo, github_token, workflow_run_id, data_root_dir):\n headers = {\n 'Accept': 'application/vnd.github.v3+json',\n }\n query_url = f\"https://api.github.com/repos/{repo.owner.login}/{repo.name}/actions/runs/{workflow_run_id}/logs\"\n response = requests.get(query_url, headers=headers,\n auth=('username', github_token))\n if 'zip' in response.headers['Content-Type']:\n zip_obj = zipfile.ZipFile(io.BytesIO(response.content))\n data_dir = Path(data_root_dir, Workflows.WORKFLOWS_DIR, str(workflow_run_id))\n zip_obj.extractall(data_dir)\n return len(zip_obj.namelist())\n else:\n return None",
"def _process_logs_download(self, logfile):\r\n\r\n print 'Downloading PCU logs'\r\n command = 'robot --outputdir \"C:\\Robot Framework\\Output\\PCU_logs\" {}.robot'.format(self.name)\r\n\r\n return self._run_command(command, logfile)",
"def PullLogs(ssh, log_files, download_folder):\n for log_file in log_files:\n target_file = os.path.join(download_folder, os.path.basename(log_file))\n ssh.ScpPullFile(log_file, target_file)\n _DisplayPullResult(download_folder)",
"def getLogs(sessionId):\n try:\n zipFileName = sessionId + \"/logs.zip\"\n dirname = util.getPath(STATIC_GENERATED_FILE_LOCATION + sessionId)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n zipFilePath = util.getPath(\n STATIC_GENERATED_FILE_LOCATION) + zipFileName\n if os.path.exists(zipFilePath):\n os.remove(zipFilePath)\n zipFile = zipfile.ZipFile(zipFilePath, mode=\"w\")\n for f in [\"/var/log/admin.log\", \"/var/log/monitoring.log\", \"/var/log/federation.log\",\n \"/var/log/streaming.log\", \"/var/log/occupancy.log\", \"/var/log/flask/federation.log\", \"/var/log/servicecontrol.log\",\n \"/var/log/flask/spectrumbrowser.log\", \"/var/log/flask/spectrumdb.log\", \"/var/log/nginx/access.log\", \"/var/log/nginx/error.log\"]:\n if os.path.exists(f):\n zipFile.write(f, compress_type=zipfile.ZIP_DEFLATED)\n zipFile.close()\n url = Config.getGeneratedDataPath() + \"/\" + zipFileName\n return jsonify({\"status\": \"OK\", \"url\": url})\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n print sys.exc_info()\n traceback.print_exc()\n util.logStackTrace(sys.exc_info())\n raise",
"def get_run_log(self, run_id):\n postresult = requests.get(\n f\"{self.proto}://{self.host}/ga4gh/wes/v1/runs/{run_id}\",\n headers=self.auth,\n )\n return wes_reponse(postresult)",
"def get_run_log():\r\n params=request.values\r\n result = ExecRunLog.query.filter(ExecRunLog.exec_id==params['exec_id']).all()\r\n return json_response(result=result)",
"def download_result_archive(run_id):\n from robflask.service import service\n with service() as api:\n ioBuffer = api.runs().get_result_archive(run_id=run_id)\n return send_file(\n ioBuffer.open(),\n as_attachment=True,\n attachment_filename='run.tar.gz',\n mimetype='application/gzip'\n )",
"def getLogs():",
"def getLogs():",
"def get_logs():\n callback = bottle.request.query.get('callback')\n folder = os.path.dirname(os.path.abspath(__file__))\n test_run_title = bottle.request.query.test_run_id\n results = {'logs': {'monitor': '', 'testrun': ''}, 'host': bottle.request.headers.get('host')}\n try:\n with open(os.path.join(folder, 'monitor.log'), 'r+') as _f:\n results['logs'].update({'monitor': tools.get_last_logs(_f.readlines())})\n with open(os.path.join(folder, '%s-testrun.log' % test_run_title), 'r+') as _f:\n results['logs'].update({'testrun': tools.get_last_logs(_f.readlines())})\n except IOError as err:\n key = 'monitor' if 'monitor' in str(err) else 'testrun'\n results['logs'].update({key: 'Could not find logs: %s' % err})\n return '{0}({1})'.format(callback, [results])",
"def main(args):\n try:\n _download_mjlog(args.log_id, args.output)\n except requests.exceptions.HTTPError as error:\n if error.response.status_code == 404:\n _LG.error('Log file (%s) not found.', args.log_id)\n else:\n _LG.exception('Unexpected error.')\n sys.exit(1)",
"def download_csv_log():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n if Configuration.enable_download:\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n if lh.check_user_enabled_download(user, process):\n content = lh.get_handler_for_process_and_session(process, session).download_csv_log()\n return jsonify({\"content\": content})\n return jsonify({\"content\": \"\"})",
"def get_logs(ctx, num):\n app = ctx.obj['app']\n api_client = ctx.obj['api_client']\n colors = dict()\n logs = api_client.get_application_logs(app, lines=num)\n for log in reversed(logs):\n if log['process'] not in colors:\n index = len(colors)\n colors[log['process']] = _available_colors[index % len(_available_colors)]\n for log in logs:\n color = colors[log['process']]\n header = click.style('{timestamp} {app_name}[{process}]:'.format(\n timestamp=log['timestamp'],\n app_name=log['app'],\n process=log['process'],\n ), fg=color)\n click.echo('{header} {message}'.format(header=header, message=log['message']))",
"def download_xes_log():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n if Configuration.enable_download:\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n if lh.check_user_enabled_download(user, process):\n content = lh.get_handler_for_process_and_session(process, session).download_xes_log()\n return jsonify({\"content\": content.decode('utf-8')})\n return jsonify({\"content\": \"\"})",
"def main():\n for i, url in enumerate(opts.thread, start=1):\n opts.archived_md5 = reload_archive()\n thread = DownloadableThread(i, url)\n thread.resolve_path()\n asyncio.run(thread.download(), debug=False)",
"def download(self, output):\n self.wait()\n path = 'auditlogEntryReport/download'\n with open(output, 'w') as f:\n f.write(self._session.get(path))\n LOGGER.info('log downloaded: {}'.format(output))",
"async def logs(id: UUID):\n page_size = 200\n offset = 0\n more_logs = True\n log_filter = LogFilter(flow_run_id={\"any_\": [id]})\n\n async with get_client() as client:\n # Get the flow run\n try:\n flow_run = await client.read_flow_run(id)\n except ObjectNotFound as exc:\n exit_with_error(f\"Flow run {str(id)!r} not found!\")\n\n while more_logs:\n # Get the next page of logs\n page_logs = await client.read_logs(\n log_filter=log_filter, limit=page_size, offset=offset\n )\n\n # Print the logs\n for log in page_logs:\n app.console.print(\n # Print following the flow run format (declared in logging.yml)\n f\"{pendulum.instance(log.timestamp).to_datetime_string()}.{log.timestamp.microsecond // 1000:03d} | {logging.getLevelName(log.level):7s} | Flow run {flow_run.name!r} - {log.message}\",\n soft_wrap=True,\n )\n\n if len(page_logs) == page_size:\n offset += page_size\n else:\n # No more logs to show, exit\n more_logs = False",
"def download_appstats(servername, appid, path, secure,\n rpc_server_factory, filename, appdir,\n merge, java_application):\n\n\n\n\n if os.path.isdir(appdir):\n sys.path.insert(0, appdir)\n try:\n logging.info('Importing appengine_config from %s', appdir)\n import appengine_config\n except ImportError, err:\n logging.warn('Failed to load appengine_config: %s', err)\n\n\n remote_api_stub.ConfigureRemoteApi(appid, path, auth_func,\n servername=servername,\n save_cookies=True, secure=secure,\n rpc_server_factory=rpc_server_factory)\n remote_api_stub.MaybeInvokeAuthentication()\n\n\n os.environ['SERVER_SOFTWARE'] = 'Development (remote_api_shell)/1.0'\n\n if not appid:\n\n appid = os.environ['APPLICATION_ID']\n download_data(filename, merge, java_application)",
"def get(self, audit_uuid):\n\n schema = AuditDownloadInputSchema()\n params, errors = schema.load(request.args)\n if errors:\n abort(400, errors)\n\n audit_query = AuditTable.select().where(AuditTable.uuid == audit_uuid)\n\n audit = audit_query.dicts()[0]\n output = audit[\"name\"] + \"\\n\" + audit[\"description\"] + \"\\n\\n\"\n\n scan_ids = []\n for scan in audit_query[0].scans.dicts():\n if scan[\"processed\"] is True:\n scan_ids.append(scan[\"id\"])\n\n results = (\n ResultTable.select(ResultTable, ScanTable, VulnTable)\n .join(ScanTable)\n .join(VulnTable, on=(ResultTable.oid == VulnTable.oid))\n .where(ResultTable.scan_id.in_(scan_ids))\n .order_by(ResultTable.scan_id)\n )\n\n with tempfile.TemporaryFile(\"r+\") as f:\n writer = csv.DictWriter(f, AuditDownload.AUDIT_CSV_COLUMNS, extrasaction=\"ignore\")\n writer.writeheader()\n for result in results.dicts():\n result[\"started_at\"] = result[\"started_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"ended_at\"] = result[\"ended_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"description\"] = Utils.format_openvas_description(result[\"description\"])\n writer.writerow(result)\n f.flush()\n f.seek(0)\n output += f.read()\n\n headers = {\"Content-Type\": \"text/csv\", \"Content-Disposition\": \"attachment\"}\n return Response(response=output, status=200, headers=headers)",
"def get_logs(self):\n logs_directory = self.protocol_config['logs']\n protocol_name = self.protocol_config['protocol']\n os.system(f'fab -f Execution/fabfile.py get_logs:{logs_directory} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')",
"def CreateRunJoblog(loop, isOldRstdirDeleted, g_params):#{{{\n gen_logfile = g_params['gen_logfile']\n # gen_errfile = g_params['gen_errfile']\n name_server = g_params['name_server']\n\n webcom.loginfo(\"CreateRunJoblog for server %s...\"%(name_server), gen_logfile)\n\n path_static = g_params['path_static']\n # path_cache = g_params['path_cache']\n\n path_result = os.path.join(path_static, 'result')\n path_log = os.path.join(path_static, 'log')\n\n submitjoblogfile = f\"{path_log}/submitted_seq.log\"\n runjoblogfile = f\"{path_log}/runjob_log.log\"\n finishedjoblogfile = f\"{path_log}/finished_job.log\"\n\n # Read entries from submitjoblogfile, checking in the result folder and\n # generate two logfiles:\n # 1. runjoblogfile\n # 2. finishedjoblogfile\n # when loop == 0, for unfinished jobs, regenerate finished_seqs.txt\n hdl = myfunc.ReadLineByBlock(submitjoblogfile)\n if hdl.failure:\n return 1\n\n finished_job_dict = {}\n if os.path.exists(finishedjoblogfile):\n finished_job_dict = myfunc.ReadFinishedJobLog(finishedjoblogfile)\n\n # these two list try to update the finished list and submitted list so that\n # deleted jobs will not be included, there is a separate list started with\n # all_xxx which keeps also the historical jobs\n new_finished_list = [] # Finished or Failed\n new_submitted_list = []\n\n new_runjob_list = [] # Running\n new_waitjob_list = [] # Queued\n lines = hdl.readlines()\n while lines is not None:\n for line in lines:\n strs = line.split(\"\\t\")\n if len(strs) < 8:\n continue\n submit_date_str = strs[0]\n jobid = strs[1]\n ip = strs[2]\n numseq_str = strs[3]\n jobname = strs[5]\n email = strs[6].strip()\n method_submission = strs[7]\n start_date_str = \"\"\n finish_date_str = \"\"\n rstdir = os.path.join(path_result, jobid)\n\n numseq = 1\n try:\n numseq = int(numseq_str)\n except ValueError:\n pass\n\n isRstFolderExist = False\n if not isOldRstdirDeleted or os.path.exists(rstdir):\n isRstFolderExist = True\n\n if isRstFolderExist:\n new_submitted_list.append([jobid, line])\n\n if jobid in finished_job_dict:\n if isRstFolderExist:\n li = [jobid] + finished_job_dict[jobid]\n new_finished_list.append(li)\n continue\n\n status = webcom.get_job_status(jobid, numseq, path_result)\n if 'DEBUG_JOB_STATUS' in g_params and g_params['DEBUG_JOB_STATUS']:\n webcom.loginfo(\"status(%s): %s\"%(jobid, status), gen_logfile)\n\n starttagfile = \"%s/%s\"%(rstdir, \"runjob.start\")\n finishtagfile = \"%s/%s\"%(rstdir, \"runjob.finish\")\n if os.path.exists(starttagfile):\n start_date_str = myfunc.ReadFile(starttagfile).strip()\n if os.path.exists(finishtagfile):\n finish_date_str = myfunc.ReadFile(finishtagfile).strip()\n\n li = [jobid, status, jobname, ip, email, numseq_str,\n method_submission, submit_date_str, start_date_str,\n finish_date_str]\n if status in [\"Finished\", \"Failed\"]:\n new_finished_list.append(li)\n\n isValidSubmitDate = True\n try:\n submit_date = webcom.datetime_str_to_time(submit_date_str)\n except ValueError:\n isValidSubmitDate = False\n\n if isValidSubmitDate:\n current_time = datetime.now(submit_date.tzinfo)\n timeDiff = current_time - submit_date\n queuetime_in_sec = timeDiff.seconds\n else:\n queuetime_in_sec = g_params['UPPER_WAIT_TIME_IN_SEC'] + 1\n\n # for servers not in the list [\"topcons2\"] all jobs are handled by the qd_fe\n if (name_server.lower() not in [\"topcons2\"]\n or (numseq > 1\n or method_submission == \"wsdl\" \n or queuetime_in_sec > g_params['UPPER_WAIT_TIME_IN_SEC'])):\n if status == \"Running\":\n new_runjob_list.append(li)\n elif status == \"Wait\":\n new_waitjob_list.append(li)\n lines = hdl.readlines()\n hdl.close()\n\n# rewrite logs of submitted jobs\n li_str = []\n for li in new_submitted_list:\n li_str.append(li[1])\n if len(li_str)>0:\n myfunc.WriteFile(\"\\n\".join(li_str)+\"\\n\", submitjoblogfile, \"w\", True)\n else:\n myfunc.WriteFile(\"\", submitjoblogfile, \"w\", True)\n\n# rewrite logs of finished jobs\n li_str = []\n for li in new_finished_list:\n li = [str(x) for x in li]\n li_str.append(\"\\t\".join(li))\n if len(li_str) > 0:\n myfunc.WriteFile(\"\\n\".join(li_str)+\"\\n\", finishedjoblogfile, \"w\", True)\n else:\n myfunc.WriteFile(\"\", finishedjoblogfile, \"w\", True)\n# rewrite logs of finished jobs for each IP\n new_finished_dict = {}\n for li in new_finished_list:\n ip = li[3]\n if not ip in new_finished_dict:\n new_finished_dict[ip] = []\n new_finished_dict[ip].append(li)\n for ip in new_finished_dict:\n finished_list_for_this_ip = new_finished_dict[ip]\n divide_finishedjoblogfile = \"%s/divided/%s_finished_job.log\"%(path_log, ip)\n li_str = []\n for li in finished_list_for_this_ip:\n li = [str(x) for x in li]\n li_str.append(\"\\t\".join(li))\n if len(li_str)>0:\n myfunc.WriteFile(\"\\n\".join(li_str)+\"\\n\", divide_finishedjoblogfile, \"w\", True)\n else:\n myfunc.WriteFile(\"\", divide_finishedjoblogfile, \"w\", True)\n\n# update allfinished jobs\n allfinishedjoblogfile = \"%s/all_finished_job.log\"%(path_log)\n allfinished_jobid_set = set(myfunc.ReadIDList2(allfinishedjoblogfile, col=0, delim=\"\\t\"))\n li_str = []\n for li in new_finished_list:\n li = [str(x) for x in li]\n jobid = li[0]\n if not jobid in allfinished_jobid_set:\n li_str.append(\"\\t\".join(li))\n if len(li_str)>0:\n myfunc.WriteFile(\"\\n\".join(li_str)+\"\\n\", allfinishedjoblogfile, \"a\", True)\n\n# update all_submitted jobs\n allsubmitjoblogfile = \"%s/all_submitted_seq.log\"%(path_log)\n allsubmitted_jobid_set = set(myfunc.ReadIDList2(allsubmitjoblogfile, col=1, delim=\"\\t\"))\n li_str = []\n for li in new_submitted_list:\n jobid = li[0]\n if not jobid in allsubmitted_jobid_set:\n li_str.append(li[1])\n if len(li_str)>0:\n myfunc.WriteFile(\"\\n\".join(li_str)+\"\\n\", allsubmitjoblogfile, \"a\", True)\n\n# write logs of running and queuing jobs\n# the queuing jobs are sorted in descending order by the suq priority\n# frist get numseq_this_user for each jobs\n# format of numseq_this_user: {'jobid': numseq_this_user}\n numseq_user_dict = webcom.GetNumSeqSameUserDict(new_runjob_list + new_waitjob_list)\n\n# now append numseq_this_user and priority score to new_waitjob_list and\n# new_runjob_list\n\n for joblist in [new_waitjob_list, new_runjob_list]:\n for li in joblist:\n jobid = li[0]\n ip = li[3]\n email = li[4].strip()\n rstdir = \"%s/%s\"%(path_result, jobid)\n outpath_result = \"%s/%s\"%(rstdir, jobid)\n\n # if loop == 0 , for new_waitjob_list and new_runjob_list\n # regenerate finished_seqs.txt\n runjob_lockfile = \"%s/%s.lock\"%(rstdir, \"runjob.lock\")\n if 'DEBUG' in g_params and g_params['DEBUG'] and os.path.exists(runjob_lockfile):\n webcom.loginfo(\"runjob_lockfile %s exists. \"%(runjob_lockfile), gen_logfile)\n if loop == 0 and os.path.exists(outpath_result) and not os.path.exists(runjob_lockfile):#{{{\n finished_seq_file = \"%s/finished_seqs.txt\"%(outpath_result)\n finished_idx_file = \"%s/finished_seqindex.txt\"%(rstdir)\n finished_idx_set = set([])\n\n finished_seqs_idlist = []\n if os.path.exists(finished_seq_file):\n finished_seqs_idlist = myfunc.ReadIDList2(finished_seq_file, col=0, delim=\"\\t\")\n finished_seqs_idset = set(finished_seqs_idlist)\n finished_info_list = []\n queryfile = \"%s/query.fa\"%(rstdir)\n (seqIDList, seqAnnoList, seqList) = myfunc.ReadFasta(queryfile)\n try:\n dirlist = os.listdir(outpath_result)\n except Exception as e:\n webcom.loginfo(\"Failed to os.listdir(%s) with errmsg=%s\"%(outpath_result, str(e)), gen_logfile)\n for dd in dirlist:\n if dd.find(\"seq_\") == 0:\n origIndex_str = dd.split(\"_\")[1]\n finished_idx_set.add(origIndex_str)\n\n if dd.find(\"seq_\") == 0 and dd not in finished_seqs_idset:\n origIndex = int(dd.split(\"_\")[1])\n outpath_this_seq = \"%s/%s\"%(outpath_result, dd)\n timefile = \"%s/time.txt\"%(outpath_this_seq)\n runtime = webcom.ReadRuntimeFromFile(timefile, default_runtime=0.0)\n # get origIndex and then read description the description list\n try:\n description = seqAnnoList[origIndex].replace('\\t', ' ')\n except:\n description = \"seq_%d\"%(origIndex)\n try:\n seq = seqList[origIndex]\n except:\n seq = \"\"\n info_finish = webcom.GetInfoFinish(name_server, outpath_this_seq,\n origIndex, len(seq), description,\n source_result=\"newrun\", runtime=runtime)\n finished_info_list.append(\"\\t\".join(info_finish))\n if len(finished_info_list)>0:\n myfunc.WriteFile(\"\\n\".join(finished_info_list)+\"\\n\", finished_seq_file, \"a\", True)\n if len(finished_idx_set) > 0:\n myfunc.WriteFile(\"\\n\".join(list(finished_idx_set))+\"\\n\", finished_idx_file, \"w\", True)\n else:\n myfunc.WriteFile(\"\", finished_idx_file, \"w\", True)\n #}}}\n\n try:\n numseq = int(li[5])\n except (IndexError, ValueError):\n numseq = 1\n pass\n try:\n numseq_this_user = numseq_user_dict[jobid]\n except KeyError:\n numseq_this_user = numseq\n pass\n # note that the priority is deducted by numseq so that for jobs\n # from the same user, jobs with fewer sequences are placed with\n # higher priority\n priority = myfunc.FloatDivision( myfunc.GetSuqPriority(numseq_this_user) - numseq, math.sqrt(numseq))\n\n if ip in g_params['blackiplist']:\n priority = priority/1000.0\n\n if email in g_params['vip_user_list']:\n numseq_this_user = 1\n priority = 999999999.0\n webcom.loginfo(\"email %s in vip_user_list\"%(email), gen_logfile)\n\n li.append(numseq_this_user)\n li.append(priority)\n\n # sort the new_waitjob_list in descending order by priority\n new_waitjob_list = sorted(new_waitjob_list, key=lambda x: x[11], reverse=True)\n new_runjob_list = sorted(new_runjob_list, key=lambda x: x[11], reverse=True)\n\n # write to runjoblogfile\n li_str = []\n for joblist in [new_waitjob_list, new_runjob_list]:\n for li in joblist:\n li2 = li[:10]+[str(li[10]), str(li[11])]\n li_str.append(\"\\t\".join(li2))\n# print \"write to\", runjoblogfile\n# print \"\\n\".join(li_str)\n if len(li_str) > 0:\n myfunc.WriteFile(\"\\n\".join(li_str)+\"\\n\", runjoblogfile, \"w\", True)\n else:\n myfunc.WriteFile(\"\", runjoblogfile, \"w\", True)",
"def download_all(conn, logger):\n # setup slices, 24 in total\n slices = [f'year{x}month{y}' for x in [2, 1] for y in range(12, 0, -1)]\n for slice in slices:\n download_intraday_extended(conn, logger, slice)",
"def check_run_detail(run, db, run_dir):\n run_id = run['_id']\n host_name = os.uname().nodename\n json_logs = glob(op.join(run_dir, \"*.log.json\"))\n\n if len(json_logs)<1:\n return\n if len(json_logs)>1:\n print(\"There are two json logs for %s\"%run_id)\n\n json_logs.sort() \n log_dict = load_json_logs(json_logs)\n\n log_data = defaultdict(list)\n keys_list = list()\n for epoch in log_dict:\n keys_list.extend(log_dict[epoch].keys())\n keys_list = list(set(keys_list))\n for epoch in log_dict: \n for k in [x for x in keys_list if x in log_dict[epoch].keys()]:\n log_data[k].extend(log_dict[epoch][k])\n\n mtime = datetime.datetime.fromtimestamp(int(op.getmtime(json_logs[-1])))\n db.run.update_one({\"_id\": run_id},\n {\"$set\": {\"log_data_0\": log_data,\n \"host\":host_name,\n \"log_last_update\":mtime}})",
"def process_logs(logs):\n all_data = {}\n for log in logs:\n with open(log) as f:\n data = json.load(f)\n scenario = data[0].get(\"scenario\", None)\n if scenario is None:\n # No scenario name, no way to organize the data\n continue\n\n # Use the log's date as the run identifier\n # This assumes the format is SCENARIO-YYYY-MM-DD.json\n # NOTE: This may not match the GitHub Action run dates due to tests taking\n # a very long time.\n day = datetime.strptime(log[1+len(scenario):-5], \"%Y-%m-%d\").strftime(\"%Y%m%d\")\n if day not in all_data:\n all_data[day] = {}\n\n # Group them by scenario, assume each file is from one scenario per day\n all_data[day][scenario] = data\n return all_data",
"def get_log(request, **kwargs):\n\n #Creating the command for the logs \n try:\n\tprint(kwargs)\n\tprint(request.GET['project_id'])\n\toutputStr = sidecar.events.test_logs(project_id=request.GET['project_id'])\n\tlog_data = outputStr.log_data\n\toutputStr = \" <br>\".join(log_data.split(\"\\n\"))\n except Exception, e:\n outputStr = \"Updating the logs...\"\t\n #Making the output\n context = {\n \"page_title\": _(\"Test Details\"),\n \"test_lists\": 'report_list', #tests_list\n \"log_data\": outputStr\n }\n return render(request, 'rally_dashboard/events/test_logs.html', context)",
"def download_artifacts(token, artifacts):\n zipfiles = []\n for a in artifacts:\n updated_at = datetime.fromisoformat(a[\"updated_at\"][:-1])\n datename = a[\"name\"]+updated_at.strftime(\"-%Y-%m-%d\")\n filename = datename + \".zip\"\n if os.path.exists(filename):\n zipfiles.append((a[\"name\"], datename, filename))\n print(f\"{filename} skipped, already downloaded\")\n continue\n\n print(f\"Fetching {filename}\")\n ok = run_curl(token, a[\"archive_download_url\"], filename)\n if not ok:\n continue\n\n zipfiles.append((a[\"name\"], datename, filename))\n\n return zipfiles",
"def download_files(self):",
"def main(eid, auth_key, ignore_rows):\n uri = usage_data.get_previous_6_months_uri(eid)\n blob_path = usage_data.get_report_blob_uri(uri, auth_key)\n\n cur_time = datetime.utcnow()\n cur_time = cur_time.replace(tzinfo=timezone.utc, microsecond=0)\n\n file_name, file_size = usage_data.download_file(blob_path, cur_time, ignore_rows)\n\n return (file_name, file_size)",
"def download_result_file(run_id, file_id):\n print('download {} {}'.format(run_id, file_id))\n from robflask.service import service\n with service() as api:\n # Authentication of the user from the expected api_token in the header\n # will fail if no token is given or if the user is not logged in.\n fh = api.runs().get_result_file(run_id=run_id, file_id=file_id)\n return send_file(\n fh.open(),\n as_attachment=True,\n attachment_filename=fh.name,\n mimetype=fh.mime_type\n )"
]
| [
"0.64194244",
"0.59884953",
"0.5788865",
"0.57788175",
"0.5744236",
"0.5743501",
"0.5706302",
"0.5553888",
"0.5492111",
"0.5492111",
"0.5474755",
"0.5382931",
"0.53668857",
"0.53567404",
"0.52949184",
"0.5284501",
"0.5284132",
"0.52608335",
"0.522752",
"0.5216262",
"0.5213479",
"0.51250464",
"0.5121373",
"0.51180226",
"0.5115106",
"0.5110504",
"0.5107724",
"0.51026833",
"0.50854284",
"0.5068286"
]
| 0.77006644 | 0 |
Add an auto configured output to the layout This will place the output in a sensible location in the layout. The coordinates of the output in the layout may adjust dynamically when the layout changes. If the output is already in the layout, it will become auto configured. If the position of the output is set such as with `wlr_output_layout_move()`, the output will become manually configured. | def add_auto(self, output: Output) -> None:
lib.wlr_output_layout_add_auto(self._ptr, output._ptr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, output: Output, lx: int, ly: int) -> None:\n lib.wlr_output_layout_add(self._ptr, output._ptr, lx, ly)",
"def move(self, output: Output, lx: int, ly: int) -> None:\n lib.wlr_output_layout_move(self._ptr, output._ptr, lx, ly)",
"def output(self, layout: Optional[dict] = None) -> OutputWidget:\n return OutputWidget(self, layout)",
"def output_position(self, output_position: int):\n\n self._output_position = output_position",
"def add_full_grid_output(self,output_filename,output_type, start, step):\n self.ricom.nopt = output_type \n self.ricom.noptstart = start\n self.ricom.nskip = step\n self.ricom.outputFileFull = output_filename",
"def output_at(self, x: float, y: float) -> Output | None:\n output_ptr = lib.wlr_output_layout_output_at(self._ptr, x, y)\n if output_ptr == ffi.NULL:\n return None\n return Output(output_ptr)",
"def placement_automatic(args):\n clarity_epp.placement.plate.copy_layout(lims, args.process_id)",
"def update_output_window(self):\n\n self.output_window.refresh(self.coordinate_dict['output_y'], 0, 0,\n self.coordinate_dict['output_x'] + 1,\n self.full_height_out - 1,\n self.full_width_out - 1)",
"def _configure(self):\n OutputSoln._configure(self)",
"def remove(self, output: Output) -> None:\n lib.wlr_output_layout_remove(self._ptr, output._ptr)",
"def add_output(self, output, number, logid='default-log'):\n cell = self.get_cell(number, logid)\n out_element = ET.SubElement(cell, 'output')\n out_element.text = output",
"def setup_output(self, job_to_use=None, output_type=\"hazard_map\"):\n job = job_to_use if job_to_use else self.setup_classic_job()\n output = Output(owner=job.owner, oq_job=job, output_type=output_type)\n output.path = self.touch(\n dir=os.path.join(job.path, \"computed_output\"), suffix=\".xml\",\n prefix=\"hzrd.\" if output_type == \"hazard_map\" else \"loss.\")\n output.display_name = os.path.basename(output.path)\n output.save()\n return output",
"def add_output_item(self,\n data_constant = False,\n array_variable = True,\n absolute_relative = False,\n wrap = False,\n linear = False,\n preferred = True,\n null = False,\n volatile = False):\n item_flags = ItemFlags.build({\n \"data_constant\": data_constant,\n \"array_variable\": array_variable,\n \"absolute_relative\": absolute_relative,\n \"wrap\": wrap,\n \"linear\": linear,\n \"nPreferred\": ~preferred,\n \"null\": null,\n \"volatile\": volatile,\n })\n self.add_report_item(HIDPrefix.OUTPUT, ord(item_flags))",
"def addLayout(self, *args):\n return _libsbml.LayoutModelPlugin_addLayout(self, *args)",
"def _generate_layout(self):\n\n pass",
"def add_output(self, id: str, type: str, initial_value: float=None, **kwargs):\n output = Output(id=id, type=type, initial_value=initial_value)\n for key, value in kwargs.items():\n output.add_parameter(key, value)\n self.outputs.append(output)",
"def add_element_output_locations(self, xy, epsgIN,start,end,step): \n elementIds = self.grid.get_element_output_locations(xy,epsgIN)\n if(elementIds != []):\n self.run_nc.add_element_output_locations(elementIds,start,end,step)",
"def add_layout_pins(self):\n en_offset = self.dc_inst.get_pin(\"in\").ll()\n self.add_layout_pin(text=\"en\",\n layer=\"metal1\",\n offset=en_offset.scale(1,0),\n width=self.m1_width,\n height=en_offset.y)\n\n out_offset = self.rbl_inv_inst.get_pin(\"Z\").ll()\n self.add_layout_pin(text=\"out\",\n layer=\"metal1\",\n offset=out_offset.scale(1,0),\n width=self.m1_width,\n height=out_offset.y)",
"def _do_layout(self):\n return",
"def output(self, output):\n self._output = output",
"def create_layout( self ):",
"def normalOutputWritten(self, text):\r\n # Maybe QTextEdit.append() works as well, but this is how I do it:\r\n cursor = self.console_text_edit.textCursor()\r\n cursor.movePosition(QTextCursor.End)\r\n cursor.insertText(text)\r\n self.console_text_edit.setTextCursor(cursor)\r\n self.console_text_edit.ensureCursorVisible() \r\n\r\n #**************************************************************************************************************************************\r\n #--------------------------------------------------------------------------------------------------------------------------------------\r\n #-----------------------------------------------------------Fucs for Motor movement----------------------------------------------------\r\n #-------------------------------------------------------------------------------------------------------------------------------------- \r\n #************************************************************************************************************************************** \r",
"def do_layout(self, *args, **kw):\n if self.use_draw_order and self.component is not None:\n self._layout_as_overlay(*args, **kw)\n else:\n super(PlotGrid, self).do_layout(*args, **kw)\n return",
"def add_output(self, variable):\n self.outputs.append(variable)",
"def updatePlotLayout(self):\n self.plotLayoutType = self.plotLayoutDropdown.currentText() # ['paper', 'poster', 'talk']\n\n self.updatePlotLayoutGrid()\n\n self.update2()",
"def _output_section_write(self):\n if not self.args.old_galaxy:\n self.output_sect = etree.SubElement(self.inputs, 'section', name='output_opt', title='Additional Output Parameters', expanded='False')\n else:\n self.output_sect = etree.SubElement(self.inputs, 'conditional', name='output_opt')\n self.output_sect_sel = etree.SubElement(self.output_sect, 'param', name='output_opt_sel', type='select',\n label='Additional output parameters?')\n self.opt_yes = etree.SubElement(self.output_sect_sel, 'option', value='yes')\n self.opt_yes.text = 'yes'\n self.opt_no = etree.SubElement(self.output_sect_sel, 'option', value='no', selected='true')\n self.opt_no.text = 'no'\n self.when_yes = etree.SubElement(self.output_sect, 'when', value='yes')",
"def output(self, output):\n\n self._output = output",
"def output(self, output):\n\n self._output = output",
"def output(self, output):\n\n self._output = output",
"def show_outputpad(self, frame2, outputpad):\n frame2.pack(side=GUI.TOP, fill=GUI.BOTH, expand=GUI.YES)\n outputpad.pack(side=GUI.TOP, fill=GUI.BOTH, expand=GUI.YES)"
]
| [
"0.70198876",
"0.63294667",
"0.5761119",
"0.56359804",
"0.5577666",
"0.55675364",
"0.55489993",
"0.5502449",
"0.54143834",
"0.53270626",
"0.5263954",
"0.5225019",
"0.5154998",
"0.5103272",
"0.5067622",
"0.50329566",
"0.4984036",
"0.49752924",
"0.49733323",
"0.49177164",
"0.4917258",
"0.4912085",
"0.49054855",
"0.49007654",
"0.4890106",
"0.4873064",
"0.48687148",
"0.48687148",
"0.48687148",
"0.483547"
]
| 0.80270034 | 0 |
Determine coordinates of the output in the layout Given x and y in layout coordinates, adjusts them to local output coordinates relative to the given reference output. | def output_coords(self, output: Output) -> tuple[float, float]:
ox = ffi.new("double *")
oy = ffi.new("double *")
lib.wlr_output_layout_output_coords(self._ptr, output._ptr, ox, oy)
return ox[0], oy[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_position(layout):\n\n if layout.children:\n toyplot_vertical_align = layout.style[\"-toyplot-vertical-align\"]\n # Align the first line's baseline with the anchor.\n if toyplot_vertical_align == \"first-baseline\":\n offset_y = 0\n # Align the last line's baseline with the anchor.\n elif toyplot_vertical_align == \"last-baseline\":\n offset_y = -(layout.height + layout.children[0].top - layout.children[-1].bottom)\n # Align the top of the layout with the anchor.\n elif toyplot_vertical_align == \"top\":\n offset_y = -layout.children[0].top\n # Align the middle of the layout with the anchor.\n elif toyplot_vertical_align == \"middle\":\n offset_y = -((layout.height * 0.5) + layout.children[0].top)\n # Align the bottom of the layout with the anchor.\n elif toyplot_vertical_align == \"bottom\":\n offset_y = -(layout.height + layout.children[0].top)\n else:\n raise ValueError(\"Unknown -toyplot-vertical-align value: %s\" % toyplot_vertical_align) # pragma: no cover\n\n for line in layout.children:\n text_anchor = line.style[\"text-anchor\"] if line.children else \"middle\"\n if text_anchor == \"start\":\n anchor_offset = 0\n elif text_anchor == \"middle\":\n anchor_offset = -line.width * 0.5\n elif text_anchor == \"end\":\n anchor_offset = -line.width\n else:\n raise ValueError(\"Unknown text-anchor value: %s\" % text_anchor)\n anchor_offset += layout.style[\"-toyplot-anchor-shift\"]\n\n offset_x = anchor_offset\n\n # Line left/right/bottom/top are relative offsets from the layout anchor in canvas coordinates.\n line.left = offset_x\n line.right = offset_x + line.width\n line.top += offset_y\n line.baseline = offset_y\n line.bottom += offset_y\n\n for child in line.children:\n # Child left/right/bottom/top are relative offsets from the layout anchor in canvas coordinates.\n child.left = offset_x\n child.right = child.left + child.width\n child.top += offset_y\n child.baseline += offset_y\n child.bottom += offset_y\n # Note that baseline-shift is the opposite of canvas coordinates (positive values shift UP)\n child.baseline -= child.style[\"baseline-shift\"]\n\n offset_x += child.width\n offset_y += line.height\n\n layout.top = layout.children[0].top\n layout.left = numpy.min([line.left for line in layout.children])\n layout.right = numpy.max([line.right for line in layout.children])\n layout.bottom = layout.children[-1].bottom\n\n else:\n layout.top = 0\n layout.left = 0\n layout.right = 0\n layout.bottom = 0\n\n # Layout top/left/right/bottom are relative offsets from the layout anchor in canvas coordinates\n layout.width = layout.right - layout.left\n layout.height = layout.bottom - layout.top",
"def output_at(self, x: float, y: float) -> Output | None:\n output_ptr = lib.wlr_output_layout_output_at(self._ptr, x, y)\n if output_ptr == ffi.NULL:\n return None\n return Output(output_ptr)",
"def closest_point(\n self, lx: float, ly: float, reference: Output | None = None\n ) -> tuple[float, float]:\n if reference:\n reference_ptr = reference._ptr\n else:\n reference_ptr = ffi.NULL\n\n dest_lx = ffi.new(\"double *\")\n dest_ly = ffi.new(\"double *\")\n lib.wlr_output_layout_closest_point(\n self._ptr, reference_ptr, lx, ly, dest_lx, dest_ly\n )\n return dest_lx[0], dest_ly[0]",
"def absolute_to_relative(self, x, y):\n rel_x = (x - self.width / 2) / (self.width / 2)\n if rel_x > 1:\n rel_x = 1\n elif rel_x < -1:\n rel_x = -1\n\n rel_y = (self.height / 2 - y) / (self.height / 2)\n if rel_y > 1:\n rel_y = 1\n elif rel_y < -1:\n rel_y = -1\n\n return rel_x, rel_y",
"def _update_coords(self, change=None):\n if self.node_id:\n x, y = self.layout[self.node_id]\n self.coords = (x - self.dist, x + self.dist, y - self.dist, y + self.dist)",
"def img2widgetcoords(self, x,y):\n\t\tif self._i2w_matrix is None: self._calc_matrix()\n\t\treturn self._i2w_matrix.transform_point(x,y)",
"def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)",
"def update_location(self):\n if self.simulation:\n return (self.y, self.x)\n else:\n raise NotImplementedError\n\n self.y = new_y\n self.x = new_x\n\n return (new_y, new_x)",
"def calculate_coordinates(self):\n # get coordinates for lef side of equation\n self._calculate_for_one_side(self.left_side)\n\n # set process glyph x coordinate\n self.process_glyph_x = self.x_limit + 150\n\n self._calculate_for_one_side(self.right_side, side=\"right_side\")\n\n self.x_limit, self.y_limit = self._generate_real_coordinates_according_to_compartment()\n\n # set process glyph y coordinate\n self.process_glyph_y = self.y_limit / 2\n\n # set final image width, height\n self.x_limit += 10\n self.y_limit += 20",
"def compute_coordinates(self):\n self._x, self._y = self.board.index_to_coordinates(self.index)",
"def adjust_position(self):\n\n # Adjust position for x-axis\n r = self.rect.x % 30\n if r != 0:\n if r <= 16:\n x = self.rect.x - r\n else:\n x = self.rect.x + (30 - r)\n\n else:\n x = self.rect.x\n\n # Adjust position for y-axis\n r = self.rect.y % 30\n if r != 0:\n if r <= 16:\n y = self.rect.y - r\n else:\n y = self.rect.y + (30 - r)\n else:\n y = self.rect.y\n\n return x, y",
"def xyxy(x, y, hd1, hd2):\n wcs1 = wcs.WCS(hd1)\n wcs2 = wcs.WCS(hd2)\n ra, dec = wcs1.all_pix2world(x, y, 1)\n x2, y2 = wcs2.all_world2pix(ra, dec, 1)\n return x2, y2",
"def calcOffset(self, x, y):\r\n # Datalayout\r\n # X = longitude\r\n # Y = latitude\r\n # Sample for size 1201x1201\r\n # ( 0/1200) ( 1/1200) ... (1199/1200) (1200/1200)\r\n # ( 0/1199) ( 1/1199) ... (1199/1199) (1200/1199)\r\n # ... ... ... ...\r\n # ( 0/ 1) ( 1/ 1) ... (1199/ 1) (1200/ 1)\r\n # ( 0/ 0) ( 1/ 0) ... (1199/ 0) (1200/ 0)\r\n # Some offsets:\r\n # (0/1200) 0\r\n # (1200/1200) 1200\r\n # (0/1199) 1201\r\n # (1200/1199) 2401\r\n # (0/0) 1201*1200\r\n # (1200/0) 1201*1201-1\r\n return x + self.size * (self.size - y - 1)",
"def dest_xy(self) -> Tuple[int, int]:\n return self.entity.x + self.dx, self.entity.y + self.dy",
"def reproject_coordinates(x_in, y_in, spatial_reference_source, spatial_reference_target=None): \n if spatial_reference_target is not None:\n pass\n else:\n spatial_reference_target = osr.SpatialReference()\n spatial_reference_target.ImportFromEPSG(4326) \n pass\n \n if int(osgeo.__version__[0]) >= 3:\n # GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546\n \n spatial_reference_source.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n spatial_reference_target.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n \n pTransform = osr.CoordinateTransformation( spatial_reference_source, spatial_reference_target)\n \n x_new,y_new, z = pTransform.TransformPoint( x_in,y_in)\n \n return x_new,y_new",
"def action_to_coords(self, x, y):\n self.scene.center_on(x, y)",
"def calculate_entry_exit(self, pos_y, pos_x):\r\n return (pos_y * 70 + 35), (pos_x * 70 + 35)",
"def _do_layout(self):\n if self.stack_order == \"bottom_to_top\":\n components = (self.zoomed_plot, self.reference_plot)\n relative_sizes = (4 / 3., 2 / 3.)\n else:\n components = (self.reference_plot, self.zoomed_plot)\n relative_sizes = (2 / 3., 4 / 3.)\n if self.halign == \"left\":\n align = \"min\"\n elif self.halign == \"center\":\n align = \"center\"\n else:\n align = \"max\"\n #import pdb; pdb.set_trace()\n return self._do_stack_layout(components, relative_sizes, align)",
"def widget2imgcoords(self, x,y):\n\t\tif self._w2i_matrix is None: self._calc_matrix()\n\t\treturn self._w2i_matrix.transform_point(x,y)",
"def __getxy(x1, y1, x2, y2):\n\t\treturn x1*27+y1*9+x2*3+y2",
"def update_pos(self):\n self.imgx=self.pathX[min(self.x,len(self.pathX)-1)]\\\n [min(self.y,len(self.pathX[self.x])-1)]\n self.imgy=self.pathY[min(self.x,len(self.pathY)-1)]\\\n [min(self.y,len(self.pathY[self.x])-1)]",
"def get_correct_coords(start_x=0,viewing_distance=12.0,field_height=10,field_width=10,pixel_width=0.282,pixel_height=0.282,**config):\n \n x = (start_x + np.arange(np.ceil(-field_width/2.0),np.ceil(field_width/2.0),1))*pixel_width\n y = np.arange(np.ceil(-field_height/2.0),np.ceil(field_height/2.0),1)*pixel_height\n x,y = np.meshgrid(x,y)\n coords = np.vstack((x.ravel(),y.ravel())).T\n return coords",
"def world_to_screen(self, x, y):\n return x-self.x, self.h-(y-self.y)",
"def position(self, x, y):\n if self.portrait:\n # HMSB\n index = (x + y * self.size[0]) >> 3\n offset = 7 - (x & 0x07)\n else:\n # VMSB\n index = (y >> 3) * self.size[0] + x\n offset = 7 - (y & 0x07)\n return index, offset",
"def convert_coords(x, y, conversion):\n if conversion == \"cartesian\" :\n # convert to cartesian plane coordinates \n x_new = x - (width/2)\n y_new = (height/2) + y \n\n elif conversion == \"pygame\":\n # only needed to place images in pygame\n x_new = x + (width/2)\n y_new = (height/2) - y\n \n return x_new, y_new",
"def det_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec",
"def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y",
"def relative_position(self) -> Tuple[int, int]:\n return self.position[0] - self.region.rect.x, self.position[1] - self.region.rect.y",
"def _target_xy(header, outwcs):\n tgt_x, tgt_y = None, None\n tgt_ra = header.get('TGTRA', None)\n tgt_dec = header.get('TGTDEC', None)\n if tgt_ra is not None and tgt_dec is not None \\\n and not np.allclose([tgt_ra, tgt_dec], 0):\n # convert from hours to degrees\n tgt_ra *= 15.0\n if outwcs.wcs.naxis == 2:\n tgt_x, tgt_y = \\\n outwcs.wcs_world2pix(tgt_ra, tgt_dec, 0)\n else:\n tgt_w, tgt_y, tgt_x = \\\n outwcs.wcs_world2pix(0, tgt_dec, tgt_ra, 0)\n return tgt_x, tgt_y",
"def move(self, output: Output, lx: int, ly: int) -> None:\n lib.wlr_output_layout_move(self._ptr, output._ptr, lx, ly)"
]
| [
"0.6292434",
"0.60976356",
"0.60090214",
"0.5698411",
"0.567165",
"0.5587634",
"0.5562373",
"0.5496815",
"0.54904616",
"0.5438968",
"0.5402502",
"0.53729147",
"0.53563195",
"0.53547263",
"0.53337777",
"0.53318155",
"0.53305453",
"0.5305409",
"0.5298191",
"0.5286754",
"0.525534",
"0.52546334",
"0.5232022",
"0.52286744",
"0.52114934",
"0.5195527",
"0.5191395",
"0.5191395",
"0.5187763",
"0.51871604"
]
| 0.6738363 | 0 |
Use the output layout in a context manager | def __enter__(self) -> OutputLayout:
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _do_layout(self):\n return",
"def layout(self):\n pass",
"def _generate_layout(self):\n\n pass",
"def output(self, layout: Optional[dict] = None) -> OutputWidget:\n return OutputWidget(self, layout)",
"def create_layout( self ):",
"def context(subcontext=None) -> None:\n if subcontext is None:\n subcontext = []\n args = subcontext\n\n if len(args) == 0:\n args = config_context_sections.split()\n\n sections = [(\"legend\", lambda *args, **kwargs: [M.legend()])] if args else []\n sections += [(arg, context_sections.get(arg[0], None)) for arg in args]\n\n result = defaultdict(list)\n result_settings: DefaultDict[str, dict] = defaultdict(dict)\n for section, func in sections:\n if func:\n target = output(section)\n # Last section of an output decides about output settings\n settings = output_settings.get(section, {})\n result_settings[target].update(settings)\n with target as out:\n result[target].extend(\n func(\n target=out,\n width=settings.get(\"width\", None),\n with_banner=settings.get(\"banner_top\", True),\n )\n )\n\n for target, res in result.items():\n settings = result_settings[target]\n if len(res) > 0 and settings.get(\"banner_bottom\", True):\n with target as out:\n res.append(pwndbg.ui.banner(\"\", target=out, width=settings.get(\"width\", None)))\n\n for target, lines in result.items():\n with target as out:\n if result_settings[target].get(\"clearing\", config_clear_screen) and lines:\n clear_screen(out)\n out.write(\"\\n\".join(lines))\n if out is sys.stdout:\n out.write(\"\\n\")\n out.flush()",
"def render_layout(self):\n return self._layout.format(*self._options.keys(), self.task)",
"def _populate_output(self):\n pass",
"def render(self, mode='human', close=False):\n pass",
"def get_renderer_context(self):\n context = super().get_renderer_context()\n context['indent'] = 2\n return context",
"def setup(self) -> \"None\":\n # Patch the renderer to extend the output height\n renderer._output_screen_diff = _patched_output_screen_diff\n\n if config.page and sys.stdout.isatty():\n # Use a temporary file as display output if we are going to page the output\n from tempfile import TemporaryFile\n\n self.out_file = TemporaryFile(\"w+\")\n\n else:\n if config.page:\n log.warning(\"Cannot page output because standard output is not a TTY\")\n # If we are not paging output, determine when to print it\n if config.dump_file is None or str(config.dump_file) in (\n \"-\",\n \"/dev/stdout\",\n ):\n self.out_file = sys.stdout\n elif str(config.dump_file) == \"/dev/stderr\":\n self.out_file = sys.stderr\n else:\n try:\n self.out_file = open(config.dump_file, \"w+\")\n except (\n FileNotFoundError,\n PermissionError,\n io.UnsupportedOperation,\n ) as error:\n log.error(error)\n log.error(\n f\"Output file `{config.dump_file}` cannot be opened. \"\n \"Standard output will be used.\"\n )\n self.out_file = sys.stdout\n\n # Ensure we do not recieve the \"Output is not a terminal\" message\n Vt100_Output._fds_not_a_terminal.add(self.out_file.fileno())\n # Do not use stderr instead of stdout if stdout is not a tty\n self.out_file = cast(\"TextIO\", self.out_file)\n self.output = create_output(self.out_file, always_prefer_tty=False)\n\n # Use the width and height of stderr (this gives us the terminal size even if\n # output is being piped to a non-tty)\n # if hasattr(self.output, '_get_size'):\n setattr(self.output, \"get_size\", create_output(stdout=sys.stderr).get_size)\n\n # Disable character position requests when dumping output to stop extra output\n # This also speeds things up as we do not need to wait for the response\n # Ignore typing here as mypy does not understand __class__\n class DumpingOutput(self.output.__class__): # type: ignore\n # Disable character position requests when dumping output\n responds_to_cpr = False\n\n # Patch the output to prevent CPR detection\n self.output.__class__ = DumpingOutput\n\n # Set pre-run commands\n self.pre_run.append(self.post_dump)",
"def use(self, layout):\n self._wid.setLayout(layout)\n return layout",
"def __enter__(self):\n sys.stdout.flush()\n sys.stdout = open(self._path, mode=\"w\")\n sys.stdout.flush()\n return self",
"def render(self, mode='human', close=False):\n pass",
"def __call__(self):\n return ILayoutAware(self.context).content",
"def render_layout(self, page, task):\n return self._layout.format(page + 1, len(self.result), task)",
"def __init__(self, ctx, layout):\n self.ctx = ctx\n self.layout = layout",
"def __enter__(self):\n if self.back_flag:\n # Set LaTeX params\n matplotlib.rcParams.update({ \n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n \"pgf.preamble\": \"\\n\".join( self.packages ),\n })\n plt.rc('font', size=self.SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=self.BIGGER_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=self.MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=self.SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=self.SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=self.MEDIUM_SIZE) # legend fontsize\n plt.rc('figure', titlesize=self.BIGGEST_SIZE) # fontsize of the figure title",
"def render(self, cli):\n output = Output(self.stdout)\n\n # Create screen and write layout to it.\n screen = Screen(size=output.get_size())\n\n height = self._last_screen.current_height if self._last_screen else 0\n height = max(self._min_available_height, height)\n self.layout.write_to_screen(cli, screen, height)\n\n accept_or_abort = cli.is_exiting or cli.is_aborting or cli.is_returning\n\n # Process diff and write to output.\n output_buffer = []\n self._cursor_pos, self._last_char = output_screen_diff(\n output, screen, self._cursor_pos,\n self._last_screen, self._last_char, accept_or_abort,\n style=self._style, grayed=cli.is_aborting,\n )\n self._last_screen = screen\n\n output.flush()",
"def render(self, mode='human'):",
"def __enter__(self):\n self._stdout = sys.stdout\n sys.stdout = self._stringio = StringIO()\n return self",
"def generate(self):\n logger=self.logger\n outputter=self.OutputType()\n outputter.make_runner(parser=self.parser,dry_run=self.dry_run,\n setarith=self.setarith)\n con=fileless_context(\n scopes=[self.parse_result],verbose=self.verbose,logger=logger,\n run_mode=self.run_mode)\n self.make_more(self.parse_result,con)",
"def dspyRender(self):\n pass",
"def layoutFunction(self): # real signature unknown; restored from __doc__\n pass",
"def render(self, mode='human', close=False):\n return None",
"def render(self, mode='human'):\n pass # no use in this situation",
"def context(n, content):\n file = OpenFile(n, \"w\")\n file.__enter__(content)\n file.__exit__()",
"def layoutDefault(self): # real signature unknown; restored from __doc__\n pass",
"def layout(self, format=\"\", fname=\"\", **kw):\n # Only do relayout if changed\n format, fname = map(encode_page, [format, fname])\n\n if self.changed:\n # print \"gv.layout(g, '%s')\" % (self.engine)\n gv.layout(self.handle, self.engine)\n self.changed = 0\n\n if fname:\n if not format:\n format = 'dot'\n # print \"gv.render(g, '%s', '%s')\" % (format, file)\n gv.render(self.handle, format, fname)\n elif format:\n # Render to stdout, FIXME when gv improves\n gv.render(self.handle, format)\n else:\n # Render to attrs\n gv.render(self.handle)",
"def layoutTwice(self):\n\t\treturn True"
]
| [
"0.64091706",
"0.6069155",
"0.5997315",
"0.5928173",
"0.58884573",
"0.5709779",
"0.5672082",
"0.5537715",
"0.5532014",
"0.5498159",
"0.548384",
"0.5475992",
"0.5458588",
"0.54453886",
"0.53986293",
"0.53619075",
"0.53441685",
"0.5322948",
"0.53207755",
"0.5281953",
"0.5273164",
"0.52647907",
"0.5211496",
"0.51861477",
"0.51854455",
"0.51560265",
"0.51335907",
"0.5132165",
"0.51032233",
"0.5097745"
]
| 0.719726 | 0 |
Get the output at the specified layout coordinates. Returns None if no output matches the coordinates. | def output_at(self, x: float, y: float) -> Output | None:
output_ptr = lib.wlr_output_layout_output_at(self._ptr, x, y)
if output_ptr == ffi.NULL:
return None
return Output(output_ptr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def output_coords(self, output: Output) -> tuple[float, float]:\n ox = ffi.new(\"double *\")\n oy = ffi.new(\"double *\")\n lib.wlr_output_layout_output_coords(self._ptr, output._ptr, ox, oy)\n\n return ox[0], oy[0]",
"def get_output_by_name(self, name):\n for var in self.outputs:\n if var.get_object().name == name:\n return var\n logger.exception(\"Output variable with name {0} not found\".format(name))\n return None",
"def get_output_anchor(\n self, output_connection_name: str\n ) -> Optional[\"OutputAnchor\"]:\n return self._output_anchor_map.get(output_connection_name)",
"def FirstPwmOutput():\n devRef = YRefParam()\n neededsizeRef = YRefParam()\n serialRef = YRefParam()\n funcIdRef = YRefParam()\n funcNameRef = YRefParam()\n funcValRef = YRefParam()\n errmsgRef = YRefParam()\n size = YAPI.C_INTSIZE\n #noinspection PyTypeChecker,PyCallingNonCallable\n p = (ctypes.c_int * 1)()\n err = YAPI.apiGetFunctionsByClass(\"PwmOutput\", 0, p, size, neededsizeRef, errmsgRef)\n\n if YAPI.YISERR(err) or not neededsizeRef.value:\n return None\n\n if YAPI.YISERR(\n YAPI.yapiGetFunctionInfo(p[0], devRef, serialRef, funcIdRef, funcNameRef, funcValRef, errmsgRef)):\n return None\n\n return YPwmOutput.FindPwmOutput(serialRef.value + \".\" + funcIdRef.value)",
"def closest_point(\n self, lx: float, ly: float, reference: Output | None = None\n ) -> tuple[float, float]:\n if reference:\n reference_ptr = reference._ptr\n else:\n reference_ptr = ffi.NULL\n\n dest_lx = ffi.new(\"double *\")\n dest_ly = ffi.new(\"double *\")\n lib.wlr_output_layout_closest_point(\n self._ptr, reference_ptr, lx, ly, dest_lx, dest_ly\n )\n return dest_lx[0], dest_ly[0]",
"def get_output(self, name='0'):\n if name not in self._outputs:\n raise ValueError(\"Invalid port name '{0}'\".format(name))\n return self._outputs[name]",
"def get_pipeline_topology_output(account_name: Optional[pulumi.Input[str]] = None,\n pipeline_topology_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPipelineTopologyResult]:\n ...",
"def nextPwmOutput(self):\n hwidRef = YRefParam()\n if YAPI.YISERR(self._nextFunction(hwidRef)):\n return None\n if hwidRef.value == \"\":\n return None\n return YPwmOutput.FindPwmOutput(hwidRef.value)",
"def getOutputPoint(self):\n return self[1].getPoint()",
"def GetOutput(self, *args) -> \"itkPointSetD2 *\":\n return _itkMeshSourcePython.itkMeshSourcePSD2_GetOutput(self, *args)",
"def get_stack_output_value(stack_outputs, output_key):\n return next((o.get(\"OutputValue\") for o in stack_outputs if o.get(\"OutputKey\") == output_key), None)",
"def get_output(self, **kwargs):\n return self.out",
"def output(self):\n text_list = self.q(css='#output').text\n\n if len(text_list) < 1:\n return None\n return text_list[0]",
"def get_output(self):\n return None, None",
"def get_output(self):\n return None, None",
"def whichInput(input, output):\n debugLog(\"whichInput called to check input: %s and output: %s\" % (input.name(), output.name()))\n numInputs = output.inputs()\n idx = 0\n while idx < numInputs:\n # first need to check that name is not None otherwise it will crash\n # this occurs where unconnected inputs > 1 exist ie. a Viewer node\n if output.input(idx) is not None:\n if output.input(idx).name() == input.name():\n return idx\n else:\n idx += 1\n\n\n #TODO: write func to display a list of gizmos used in script",
"def get_output(self, **kwargs):\n with tf.variable_scope(self.layer_scope):\n return self.out",
"def get_output(self, idx):\n raise NotImplementedError",
"def get_coords( self, query, metadata, **kw ):\n hash_str = self.make_hash_str( query, **kw )\n\n graph = self.do_graph( query, metadata, True, **kw )\n cache_data = self.check_cache( hash_str )\n if cache_data:\n return cache_data[0]\n else:\n return None",
"def get_output(self):\r\n x = self.query('OUTP?')\r\n if x == None: return None\r\n return int(x)",
"def get_value(self):\n coord = np.round(self.coordinates).astype(int)\n if self.multichannel:\n shape = self._data_view.shape[:-1]\n else:\n shape = self._data_view.shape\n\n if all(0 <= c < s for c, s in zip(coord[self.dims.displayed], shape)):\n value = (\n self.data_level,\n self._data_view[tuple(coord[self.dims.displayed])],\n )\n else:\n value = None\n\n return value",
"def output(self, layout: Optional[dict] = None) -> OutputWidget:\n return OutputWidget(self, layout)",
"def GetOutput(self, *args) -> \"itkPointSetD3 *\":\n return _itkMeshSourcePython.itkMeshSourcePSD3_GetOutput(self, *args)",
"def GetOutput(self, *args):\n return _itkMeshSourcePython.itkMeshSourceMD2Q_GetOutput(self, *args)",
"def get_output(self):\n if self.cursor.description is None:\n return None\n names = [i[0] for i in self.cursor.description]\n names = self.fix_duplicate_field_names(names)\n try:\n output = self.cursor.fetchall()\n except Exception as error:\n logger.warn(str(error))\n return None\n if not output or len(output) == 0:\n return None\n else:\n output = np.rec.fromrecords(output,names=names)\n return output",
"def retrieve_graph(self):\n\n g = self.g\n\n if 'grid' in g['name']:\n my_layout = g.layout(\"grid\")\n else:\n my_layout = g.layout(\"kk\")\n\n return g, my_layout",
"def get_graph_layout(self, graph_id, layout_id):\n\n\t\tresponse = self._make_request(\"GET\", '/api/v1/graphs/%s/layouts/%s' % (graph_id, layout_id)).json()\n\t\treturn None if 'id' not in response else response",
"def default_output(self):\r\n\r\n do = getattr(self.op, 'default_output', None)\r\n if do is None:\r\n if len(self.outputs) == 1:\r\n return self.outputs[0]\r\n else:\r\n raise AttributeError(\"%s.default_output should be an output index.\" % self.op)\r\n elif do < 0 or do >= len(self.outputs):\r\n raise AttributeError(\"%s.default_output is out of range.\" % self.op)\r\n return self.outputs[do]",
"def eval(self, coordinates, **kwargs):\n\n output = kwargs.get(\"output\", None)\n # check crs compatibility\n if output is not None and \"crs\" in output.attrs and output.attrs[\"crs\"] != coordinates.crs:\n raise ValueError(\n \"Output coordinate reference system ({}) does not match\".format(output.crs)\n + \"request Coordinates coordinate reference system ({})\".format(coordinates.crs)\n )\n\n if settings[\"DEBUG\"]:\n self._requested_coordinates = coordinates\n key = \"output\"\n cache_coordinates = coordinates.transpose(*sorted(coordinates.dims)) # order agnostic caching\n\n if not self.force_eval and self.cache_output and self.has_cache(key, cache_coordinates):\n data = self.get_cache(key, cache_coordinates)\n if output is not None:\n order = [dim for dim in output.dims if dim not in data.dims] + list(data.dims)\n output.transpose(*order)[:] = data\n self._from_cache = True\n else:\n data = self._eval(coordinates, **kwargs)\n if self.cache_output:\n self.put_cache(data, key, cache_coordinates)\n self._from_cache = False\n\n # extract single output, if necessary\n # subclasses should extract single outputs themselves if possible, but this provides a backup\n if \"output\" in data.dims and self.output is not None:\n data = data.sel(output=self.output)\n\n # transpose data to match the dims order of the requested coordinates\n order = [dim for dim in coordinates.xdims if dim in data.dims]\n if \"output\" in data.dims:\n order.append(\"output\")\n data = data.part_transpose(order)\n\n if settings[\"DEBUG\"]:\n self._output = data\n\n # Add style information\n data.attrs[\"layer_style\"] = self.style\n\n if self.units is not None:\n data.attrs[\"units\"]\n\n # Add crs if it is missing\n if \"crs\" not in data.attrs:\n data.attrs[\"crs\"] = coordinates.crs\n\n return data",
"def GetOutput(self, *args) -> \"itkPointSetUS2 *\":\n return _itkMeshSourcePython.itkMeshSourcePSUS2_GetOutput(self, *args)"
]
| [
"0.6571606",
"0.5839956",
"0.56035906",
"0.54842794",
"0.5437366",
"0.5349081",
"0.5301348",
"0.5281909",
"0.51899874",
"0.5155416",
"0.5151213",
"0.51356196",
"0.51304936",
"0.5116058",
"0.5116058",
"0.5091115",
"0.5090367",
"0.5016866",
"0.5016222",
"0.49573547",
"0.4944155",
"0.49270037",
"0.49145505",
"0.490345",
"0.48745203",
"0.48644283",
"0.48639056",
"0.48532754",
"0.4846427",
"0.48416024"
]
| 0.7370843 | 0 |
Add the output to the layout at the specified coordinates. If the output is already part of the output layout, this moves the output. | def add(self, output: Output, lx: int, ly: int) -> None:
lib.wlr_output_layout_add(self._ptr, output._ptr, lx, ly) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move(self, output: Output, lx: int, ly: int) -> None:\n lib.wlr_output_layout_move(self._ptr, output._ptr, lx, ly)",
"def add_node_output_locations(self, xy,epsgIN,start,end,step): \n nodeIds = self.grid.get_node_output_locations(xy,epsgIN)\n if(elementIds != []):\n self.run_nc.add_node_output_locations(nodeIds,start,end,step)",
"def add_element_output_locations(self, xy, epsgIN,start,end,step): \n elementIds = self.grid.get_element_output_locations(xy,epsgIN)\n if(elementIds != []):\n self.run_nc.add_element_output_locations(elementIds,start,end,step)",
"def output_position(self, output_position: int):\n\n self._output_position = output_position",
"def output_at(self, x: float, y: float) -> Output | None:\n output_ptr = lib.wlr_output_layout_output_at(self._ptr, x, y)\n if output_ptr == ffi.NULL:\n return None\n return Output(output_ptr)",
"def move_stage_to_xy(self, coordinates):\n raise NotImplementedError",
"def add_auto(self, output: Output) -> None:\n lib.wlr_output_layout_add_auto(self._ptr, output._ptr)",
"def add_output(self, output, number, logid='default-log'):\n cell = self.get_cell(number, logid)\n out_element = ET.SubElement(cell, 'output')\n out_element.text = output",
"def add_out(self, *outputs: 'Output') -> None:\n self.outputs.extend(outputs)",
"def add_output(self, variable):\n self.outputs.append(variable)",
"def output_coords(self, output: Output) -> tuple[float, float]:\n ox = ffi.new(\"double *\")\n oy = ffi.new(\"double *\")\n lib.wlr_output_layout_output_coords(self._ptr, output._ptr, ox, oy)\n\n return ox[0], oy[0]",
"def update_output_window(self):\n\n self.output_window.refresh(self.coordinate_dict['output_y'], 0, 0,\n self.coordinate_dict['output_x'] + 1,\n self.full_height_out - 1,\n self.full_width_out - 1)",
"def insert_outputs(self, children):\n parents = self.get_direct_outputs()\n for parent in parents:\n print parent.remove_input(self)\n for child in children:\n for parent in parents:\n parent.add_input(child)\n child.add_input(self)\n return True",
"def add_output(self):\r\n if self.slots[self.length-1].item is not Item.E:\r\n self.outputs.append(self.slots[self.length-1].item)",
"def insert_output(self, action):\n parents = self.get_direct_outputs()\n action.add_input(self)\n for parent in parents:\n parent.remove_input(self)\n parent.add_input(action)\n return True",
"def addOutput(self, *args):\n return _libsbml.Transition_addOutput(self, *args)",
"def instantiate_output_move(row, col, row_idx_bitwidth, col_idx_bitwidth):\n group_name = py_ast.CompVar(\n NAME_SCHEME[\"out mem move\"].format(pe=f\"pe_{row}_{col}\")\n )\n pe = py_ast.CompVar(f\"pe_{row}_{col}\")\n return py_ast.Group(\n group_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(row_idx_bitwidth, row),\n py_ast.CompPort(OUT_MEM, \"addr0\"),\n ),\n py_ast.Connect(\n py_ast.ConstantPort(col_idx_bitwidth, col),\n py_ast.CompPort(OUT_MEM, \"addr1\"),\n ),\n py_ast.Connect(\n py_ast.CompPort(pe, \"out\"), py_ast.CompPort(OUT_MEM, \"write_data\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(OUT_MEM, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(OUT_MEM, \"done\"), py_ast.HolePort(group_name, \"done\")\n ),\n ],\n )",
"def move_to(self, destination_coords):\n self.x = destination_coords[0]\n self.y = destination_coords[1]\n return",
"def add_output(self, result):\n if not isinstance(result, six.integer_types):\n raise TypeError('add_output must be called with an integer '\n '(LoomResult id.) Did you forget to call constant?')\n if not self._weaver.AddOutput(result):\n raise AssertionError('Weaver AddOutput failed: %s' %\n self._weaver.error_string())",
"def place_ptx(self):\n\n # Compute the other pmos2 location, but determining offset to overlap the\n # source and drain pins\n self.overlap_offset = self.pmos.get_pin(\"D\").ll() - self.pmos.get_pin(\"S\").ll()\n \n # adds the lower pmos to layout\n #base = vector(self.width - 2*self.pmos.width + self.overlap_offset.x, 0)\n self.lower_pmos_position = vector(self.bitcell.get_pin(self.bitcell_bl).lx(),\n self.pmos.active_offset.y)\n self.lower_pmos_inst.place(self.lower_pmos_position)\n\n # adds the upper pmos(s) to layout\n ydiff = self.pmos.height + 2*self.m1_space + contact.poly.width\n self.upper_pmos1_pos = self.lower_pmos_position + vector(0, ydiff)\n self.upper_pmos1_inst.place(self.upper_pmos1_pos)\n\n upper_pmos2_pos = self.upper_pmos1_pos + self.overlap_offset\n self.upper_pmos2_inst.place(upper_pmos2_pos)",
"def addLayout(self, *args):\n return _libsbml.LayoutModelPlugin_addLayout(self, *args)",
"def add_full_grid_output(self,output_filename,output_type, start, step):\n self.ricom.nopt = output_type \n self.ricom.noptstart = start\n self.ricom.nskip = step\n self.ricom.outputFileFull = output_filename",
"def remove(self, output: Output) -> None:\n lib.wlr_output_layout_remove(self._ptr, output._ptr)",
"def relmoveto(self, x = 0, y = 0):\n self.cur_x += x\n self.cur_y += y\n if x < 0:\n self.out.write(self.csi + \"%sD\" % -x)\n elif x > 0:\n self.out.write(self.csi + \"%sC\" % x)\n if y < 0:\n self.out.write(self.csi + \"%sA\" % -y)\n elif y > 0:\n self.out.write(self.csi + \"%sB\" % y)",
"def add_layout(self, layout: Layout, position: Coordinate, row_span: int = 1, column_span: int = 1):\n\n if position.x < 0:\n raise ValueError(\"X position can not be negative\")\n if position.y < 0:\n raise ValueError(\"Y position can not be negative\")\n if row_span <= 0:\n raise ValueError(\"Cannot span less than one row\")\n if column_span <= 0:\n raise ValueError(\"Cannot span less than one column\")\n if position.x + column_span > self.grid_width or position.y + row_span > self.grid_height:\n raise ValueError(f\"Out of bounds: position: {position} column span: {column_span} row span: {row_span}\")\n\n self.layouts[layout] = position, row_span, column_span",
"def set_outputs(self, outputs):\n if not isinstance(outputs, Bus):\n raise TypeError(\"ERROR: Invalid output Bus\")\n\n if (outputs.width != self.outputs.width):\n raise TypeError(\"ERROR: Output width mismatch.\")\n\n with AutoUpdater._lock:\n AutoUpdater.remove_link(self.outputs)\n AutoUpdater.add_link(\n self.outputs,\n outputs)",
"def set_from_left_to_right(self, leftmost_output_name):\n leftmost_output_id = r.output_name_to_id(leftmost_output_name)\n width, height = self.set(leftmost_output_id, 0)\n screen_width, screen_height = width, height\n for output_id in list(self.connected_outputs.keys()):\n if output_id == leftmost_output_id:\n continue\n r.get_resources()\n width, height = r.set(output_id, screen_width)\n screen_width += width\n screen_height = max(screen_height, height)\n self.set_screen_size(screen_width, screen_height, 0, 0)",
"def set_outputs_from_pairs(self, new_outputs: Iterable[Tuple[tf.DType,\n tf.shape]]):\n self._outputs = []\n i = 0\n for (dtype, shape) in new_outputs:\n self._outputs.append(tensor.Tensor(self, i, dtype, shape))\n i += 1\n self._graph.increment_version_counter() # Just in case",
"def set_new_location(self, xPos, yPos):",
"def _add_output(self, name, input_layer):\n self.model.add_output(name=name, input=input_layer)\n self.output = name"
]
| [
"0.6960685",
"0.60513806",
"0.60398746",
"0.5813254",
"0.575669",
"0.56998533",
"0.56029904",
"0.5567559",
"0.54616624",
"0.5377717",
"0.53622025",
"0.5348922",
"0.5346724",
"0.5326581",
"0.53180915",
"0.5304156",
"0.5291399",
"0.52477604",
"0.51817405",
"0.5181331",
"0.5157834",
"0.51545167",
"0.51203257",
"0.5117162",
"0.51130813",
"0.5069146",
"0.50540334",
"0.50374293",
"0.5010588",
"0.50020677"
]
| 0.7201141 | 0 |
Move an output to specified coordinates. | def move(self, output: Output, lx: int, ly: int) -> None:
lib.wlr_output_layout_move(self._ptr, output._ptr, lx, ly) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move_to(self, destination_coords):\n self.x = destination_coords[0]\n self.y = destination_coords[1]\n return",
"def move(self, coordinates, direction):\n pass",
"def instantiate_output_move(row, col, row_idx_bitwidth, col_idx_bitwidth):\n group_name = py_ast.CompVar(\n NAME_SCHEME[\"out mem move\"].format(pe=f\"pe_{row}_{col}\")\n )\n pe = py_ast.CompVar(f\"pe_{row}_{col}\")\n return py_ast.Group(\n group_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(row_idx_bitwidth, row),\n py_ast.CompPort(OUT_MEM, \"addr0\"),\n ),\n py_ast.Connect(\n py_ast.ConstantPort(col_idx_bitwidth, col),\n py_ast.CompPort(OUT_MEM, \"addr1\"),\n ),\n py_ast.Connect(\n py_ast.CompPort(pe, \"out\"), py_ast.CompPort(OUT_MEM, \"write_data\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(OUT_MEM, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(OUT_MEM, \"done\"), py_ast.HolePort(group_name, \"done\")\n ),\n ],\n )",
"def move_stage_to_xy(self, coordinates):\n raise NotImplementedError",
"def MoveTo(self, x, y):\n return _Terminal.move % (y,x)",
"def move(self, output_file, input_file, no_input=False):\n input = open(input_file, \"r\")\n for command in input:\n cleaned_cmd = command.strip()\n if cleaned_cmd in self.movements:\n movement, move_str = self.pick_movement(cleaned_cmd)\n if move_str == 'forward' or move_str == 'backward':\n movement(4)\n else:\n movement(2)\n # for debug purposes\n self.log_arr.append(move_str)\n drone_pose = self.record_pose()\n self.get_np_image(True, \"curr_image.png\")\n airsim.time.sleep(1)\n input.close()\n self.write_log(output_file)\n print(\"finished episode\")",
"def output_position(self, output_position: int):\n\n self._output_position = output_position",
"def make_move(self, playername, coordinates, direction):\n\n pass",
"def move(x,y):\r\n pass",
"def move(self, x, y):\n\n #log.info(\"MOVE x:%s y:%s\", x, y)",
"def move(self, absolutePosition):\n if self.connection is not None:\n c = self.connection.getChannel(self.chanNamePrefix % 'start_one')\n\n c.write(absolutePosition)\n\n w = SpecWaitObject.SpecWaitObject(self.connection)\n w.waitChannelUpdate(self.chanNamePrefix % 'move_done', waitValue = 0) #move_done is set to 0 when move has finished",
"def move_to(self, x, y):\n return _Terminal.move % (y, x)",
"def _move(self, pos):\n self.put_par(\"drive\", pos)",
"def move(self, location):\n disp_x = location[0] - self._x_coord\n disp_y = location[1] - self._y_coord\n board = self._board\n\n # Instantiate dictionary of displaced locations to value they will take\n mov_map = dict()\n for position in self._area:\n mov_map[(position[0] + disp_x, position[1] + disp_y)] = board[position[0]][position[1]]\n\n # Clear previous locations\n for position in self._area:\n board[position[0]][position[1]] = \" \"\n\n # Place stones to displaced location\n for position in self._area:\n board[position[0] + disp_x][position[1] + disp_y] = \\\n mov_map[(position[0] + disp_x, position[1] + disp_y)]\n\n # Return the new stone locations for processing\n return set(mov_map.keys())",
"def move_to(self, ypos, xpos):\n # the screen's coordinates are 1 based, but the command is 0 based\n xpos -= 1\n ypos -= 1\n self.exec_command(\"MoveCursor({0}, {1})\".format(ypos, xpos).encode(\"utf-8\"))",
"def relmoveto(self, x = 0, y = 0):\n self.cur_x += x\n self.cur_y += y\n if x < 0:\n self.out.write(self.csi + \"%sD\" % -x)\n elif x > 0:\n self.out.write(self.csi + \"%sC\" % x)\n if y < 0:\n self.out.write(self.csi + \"%sA\" % -y)\n elif y > 0:\n self.out.write(self.csi + \"%sB\" % y)",
"def move_to(self, x, y):\r\n self.__current_room = x, y",
"def move_to(xy):\n (x,y) = xy\n win32api.SetCursorPos((x,y))",
"def move_to_position2(self):",
"def move_to(self, x, y):\n self.x = x\n self.y = y",
"def move_to(self, x, y):\n self.x = x\n self.y = y",
"def move_to_pos(self, x, y, z):\n try:\n angles = self.ik_to(x, y, z)\n self.move_to_angle(*angles)\n\n self.footPosition = np.array([x, y, z])\n self.angles = angles\n\n except Exception as exc:\n print (exc)",
"def move(self, x, y):\n self.x = x\n self.y = y\n self.call('move', x, y)",
"def MoveTo(*args, **kwargs):\n return _gdi_.NativePixelData_Accessor_MoveTo(*args, **kwargs)",
"def move(self):\n \n self.position = self.explore()",
"def output_coords(self, output: Output) -> tuple[float, float]:\n ox = ffi.new(\"double *\")\n oy = ffi.new(\"double *\")\n lib.wlr_output_layout_output_coords(self._ptr, output._ptr, ox, oy)\n\n return ox[0], oy[0]",
"def move(self):\n pass",
"def print_move(origin, destination):\n print(\"Move the top disk from rod\", origin, \"to rod\", destination)",
"def print_move(origin, destination):\n print(\"Move the top disk from rod\", origin, \"to rod\", destination)",
"def print_move(origin, destination):\n print(\"Move the top disk from rod\", origin, \"to rod\", destination)"
]
| [
"0.66784644",
"0.64650375",
"0.63968575",
"0.6348587",
"0.6258059",
"0.6240109",
"0.62277734",
"0.62160134",
"0.61788344",
"0.6122616",
"0.60436296",
"0.60330397",
"0.60210365",
"0.59689134",
"0.5945228",
"0.59423894",
"0.59325606",
"0.5914747",
"0.58816713",
"0.5859127",
"0.5859127",
"0.58449966",
"0.5844212",
"0.584106",
"0.58354425",
"0.58102316",
"0.5784193",
"0.578198",
"0.578198",
"0.578198"
]
| 0.7212036 | 0 |
Remove an output from the layout. | def remove(self, output: Output) -> None:
lib.wlr_output_layout_remove(self._ptr, output._ptr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_layout(self, layout: Layout):\n self.layouts.pop(layout, None)",
"def removeLayout(self, *args):\n return _libsbml.LayoutModelPlugin_removeLayout(self, *args)",
"def removeOutput(self, *args):\n return _libsbml.Transition_removeOutput(self, *args)",
"def destroy(self):\n self.window.destroy_output_panel(self.name)",
"def remove(self):\n self.inp.inputs.discard(self)\n self.out.outputs.discard(self)",
"def remove_output(nb):\n for cell in nb['cells']:\n if 'outputs' in cell:\n cell['outputs'] = []",
"def remove_layout(self, layout):\n\n #we can get 'layout' or 'layout (variant)'\n (layout, variant) = parse_layout_variant(layout)\n\n layouts_variants = zip(self._rec.layouts, self._rec.variants)\n\n if not (layout, variant) in layouts_variants:\n msg = \"'%s (%s)' not in the list of added layouts\" % (layout,\n variant)\n raise XklWrapperError(msg)\n\n idx = layouts_variants.index((layout, variant))\n # pylint: disable=unsubscriptable-object\n new_layouts = self._rec.layouts[:idx] + self._rec.layouts[(idx + 1):]\n # pylint: disable=unsubscriptable-object\n new_variants = self._rec.variants[:idx] + self._rec.variants[(idx + 1):]\n\n self._rec.set_layouts(new_layouts)\n self._rec.set_variants(new_variants)\n\n if not self._rec.activate(self._engine):\n raise XklWrapperError(\"Failed to remove layout '%s (%s)'\" % (layout,\n variant))",
"def remove_output(self, ch):\n outs = self._outs\n for i, out in enumerate(outs):\n out_ch, _ = out\n if ch is out_ch:\n del outs[i]\n break",
"def unset_output(self):\n self._custom_setter('presenting', False)",
"def cleanup_output(output_name):\n print(\"Removing {}\".format(output_name))\n os.remove(output_name)",
"def cleanWorkspace(self):\n self.window.labelMessage.setText(\"\")\n\n if self.inspectinoAnalyzer:\n del self.analyzerWidget\n self.inspectinoAnalyzer = False\n\n for index in reversed(range(self.window.layoutDepthermInpesction.count())):\n layoutItem = self.window.layoutDepthermInpesction.itemAt(index)\n widgetToRemove = layoutItem.widget()\n print(\"found widget: \" + str(widgetToRemove))\n widgetToRemove.setParent(None)\n self.window.layoutDepthermInpesction.removeWidget(widgetToRemove)",
"def clear_output(self):\n\n self.output_text.config(state=NORMAL)\n self.output_text.delete(1.0, END)\n self.output_text.config(state=DISABLED)",
"def remove_expected_output(self, name):\n if name in self._expected_outputs:\n del self._expected_outputs[name]",
"def del_layout(layout): # FIXME delete it\n for i in reversed(range(layout.count())):\n if layout.itemAt(i).widget() is not None:\n layout.itemAt(i).widget().setParent(None)\n elif layout.itemAt(i).layout() is not None:\n del_layout(layout.itemAt(i).layout())\n layout.itemAt(i).layout().setParent(None)\n else:\n layout.removeItem(layout.itemAt(i))",
"def removeAfterRender(call, args=(), kwargs={}, nodeClass='Write'):",
"def remove(self):\n mod_file = self.layout.filename\n if os.path.exists(mod_file):\n try:\n os.remove(mod_file) # Remove the module file\n os.removedirs(\n os.path.dirname(mod_file)\n ) # Remove all the empty directories from the leaf up\n except OSError:\n # removedirs throws OSError on first non-empty directory found\n pass",
"def remove(self, *args):\n return _libsbml.ListOfLayouts_remove(self, *args)",
"def purge_output(out):\n debug = out is None\n if not debug:\n output_path = out.output_path\n stop_output(out)\n new_out = start_output(debug=debug, output_path=output_path)\n return new_out\n return None",
"def reset_output(self):\r\n self.output.seek(0)\r\n self.output.truncate(0)",
"def strip_output(nb):\n nb.metadata.pop(\"signature\", None)\n for cell in _cells(nb):\n if \"outputs\" in cell:\n cell[\"outputs\"] = []\n if \"prompt_number\" in cell:\n cell[\"prompt_number\"] = None\n return nb",
"def discard(self) -> None:\n\n self.plot.close()",
"def remove_acro_output(self, acro_in):\n if acro_in in self.acros_output:\n del self.acros_output[acro_in]",
"def suppress_output_after_render(app, out_text):\n if not hasattr(app.pargs, 'output_handler_override'):\n return\n elif app.pargs.output_handler_override == 'yaml':\n app._suppress_output()",
"def erase_plot(self, line_position=0):\n self.axplot.lines.pop(line_position).remove\n self.fig.canvas.draw()\n return",
"def remove_render_function(self):\n self.index_wid.remove_render_function()\n self._render_function = None",
"def __clear_layout(self):\r\n\r\n # Test if layout is empty\r\n if self.__layout.count():\r\n for i in reversed(range(self.__layout.count())):\r\n widget = self.__layout.takeAt(i).widget()\r\n if widget is not None:\r\n widget.setParent(None)",
"def clearLayout(self, layout):\n while layout.count(): #loop while layout has children\n child = layout.takeAt(0) #first child of layout\n if child.widget():\n child.widget().deleteLater() #delete widget\n elif child.layout():\n self.clearLayout(child.layout()) #delete layout",
"def removeLatticeFrame(self):\n self.latticeFrame.remove()",
"def removeExistWidget(self, layout):\n for index in range(layout.count()):\n if layout.itemAt(index).widget():\n layout.itemAt(index).widget().deleteLater()",
"def teardown_output(self, output, teardown_job=True, filesystem_only=True):\n job = output.oq_job\n if not filesystem_only:\n output.delete()\n if teardown_job:\n self.teardown_job(job, filesystem_only=filesystem_only)"
]
| [
"0.7089926",
"0.68975186",
"0.6884714",
"0.6528211",
"0.63225365",
"0.6296304",
"0.6290189",
"0.6283299",
"0.62830955",
"0.62702155",
"0.626779",
"0.62561774",
"0.62509525",
"0.6069614",
"0.6065491",
"0.6051975",
"0.60506624",
"0.5988264",
"0.5982314",
"0.59661174",
"0.5852069",
"0.5850642",
"0.58503354",
"0.58473134",
"0.58366483",
"0.5815233",
"0.58120483",
"0.5800117",
"0.5787845",
"0.5761979"
]
| 0.849233 | 0 |
Get the box of the layout for the given reference output in layout coordinates. If `reference` is None, the box will be for the extents of the entire layout. If the output isn't in the layout, the box will be empty. | def get_box(
self, reference: Output | None = None, dest_box: Box | None = None
) -> Box:
if reference:
reference_ptr = reference._ptr
else:
reference_ptr = ffi.NULL
if not dest_box:
dest_box = Box(ptr=ffi.new("struct wlr_box *"))
lib.wlr_output_layout_get_box(self._ptr, reference_ptr, dest_box._ptr)
return dest_box | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n if self.rotation is None or self.rotation % 90 == 0:\n cell_bbox = self.ref_cell.get_bounding_box()\n if cell_bbox is None:\n return None\n polygons = self._transform_polygons([cell_bbox])\n else:\n # For non-cardinal rotations of a reference, we must use the\n # flattened polygons for the reference\n polygons = self.get_polygons()\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(\n (\n (all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max()),\n )\n )\n return bb",
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n\n if self.rotation is None or self.rotation % 90 == 0:\n cell_bbox = self.ref_cell.get_bounding_box()\n if cell_bbox is None:\n return None\n polygons = self._transform_polygons([cell_bbox])\n else:\n # For non-cardinal rotations of a reference, we must use the\n # flattened polygons for the reference\n polygons = self.get_polygons()\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(\n (\n (all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max()),\n )\n )\n return bb",
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n key = (self.ref_cell, self.rotation, self.magnification,\n self.x_reflection, self.columns, self.rows, self.spacing[0],\n self.spacing[1])\n deps = self.ref_cell.get_dependencies(True)\n if not (self.ref_cell._bb_valid and\n all(ref._bb_valid for ref in deps) and key in _bounding_boxes):\n for ref in deps:\n ref.get_bounding_box()\n self.ref_cell.get_bounding_box()\n tmp = self.origin\n self.origin = None\n polygons = self.get_polygons()\n self.origin = tmp\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(((all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max())))\n _bounding_boxes[key] = bb\n else:\n bb = _bounding_boxes[key]\n if self.origin is None or bb is None:\n return bb\n else:\n return bb + numpy.array(((self.origin[0], self.origin[1]),\n (self.origin[0], self.origin[1])))",
"def get_bounding_box(self):\n deps_still_valid = all(ref._bb_valid for ref in self.get_dependencies(True))\n cached_bbox_still_valid = self._bb_valid and deps_still_valid\n if not cached_bbox_still_valid:\n bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))\n all_polygons = []\n for polygon in self.polygons:\n all_polygons.extend(polygon.polygons)\n for path in self.paths:\n all_polygons.extend(path.to_polygonset().polygons)\n for reference in self.references:\n reference_bb = reference.get_bounding_box()\n if reference_bb is not None:\n all_polygons.append(reference_bb)\n if len(all_polygons) > 0:\n all_points = numpy.concatenate(all_polygons).transpose()\n bb[0, 0] = min(bb[0, 0], all_points[0].min())\n bb[0, 1] = min(bb[0, 1], all_points[1].min())\n bb[1, 0] = max(bb[1, 0], all_points[0].max())\n bb[1, 1] = max(bb[1, 1], all_points[1].max())\n self._bounding_box = bb\n else:\n self._bounding_box = None\n self._bb_valid = True\n\n if self._bounding_box is None:\n return None\n else:\n # return a *copy* of the cached bounding box to ensure it doesn't get inadvertently modified\n return numpy.array(self._bounding_box)",
"def get_bounding_box(self):\n if not isinstance(self.ref_cell, Cell):\n return None\n if (self.rotation is None and self.magnification is None and\n self.x_reflection is None):\n key = self\n else:\n key = (self.ref_cell, self.rotation, self.magnification,\n self.x_reflection)\n deps = self.ref_cell.get_dependencies(True)\n if not (self.ref_cell._bb_valid and\n all(ref._bb_valid for ref in deps) and key in _bounding_boxes):\n for ref in deps:\n ref.get_bounding_box()\n self.ref_cell.get_bounding_box()\n tmp = self.origin\n self.origin = None\n polygons = self.get_polygons()\n self.origin = tmp\n if len(polygons) == 0:\n bb = None\n else:\n all_points = numpy.concatenate(polygons).transpose()\n bb = numpy.array(((all_points[0].min(), all_points[1].min()),\n (all_points[0].max(), all_points[1].max())))\n _bounding_boxes[key] = bb\n else:\n bb = _bounding_boxes[key]\n if self.origin is None or bb is None:\n return bb\n else:\n return bb + numpy.array(((self.origin[0], self.origin[1]),\n (self.origin[0], self.origin[1])))",
"def node_format(surface, node, reference=True):\n reference_size = 'xy' if reference else (0, 0)\n width = size(surface, node.get('width', '100%'), reference_size[0])\n height = size(surface, node.get('height', '100%'), reference_size[1])\n viewbox = node.get('viewBox')\n if viewbox:\n viewbox = re.sub('[ \\n\\r\\t,]+', ' ', viewbox)\n viewbox = tuple(float(position) for position in viewbox.split())\n width = width or viewbox[2]\n height = height or viewbox[3]\n return width, height, viewbox",
"def box(self) -> math.Box:\n area = self.__dict__[\"area\"]\n if area is None:\n return math.Box()\n return math.Box(math.Point(*area[:2]), math.Point(*area[-2:]))",
"def exact_box(self, use_shapetolerance=False):\n b = Bnd_Box()\n use_triangulation = True\n brepbndlib_AddOptimal(self.topods_shape(), b, use_triangulation, use_shapetolerance)\n return geom_utils.box_to_geometry(b)",
"def bounding_box(self):\n frames = self.available_frames\n transform_0 = self.get_transform(frames[0], frames[1])\n try:\n bb = transform_0.bounding_box\n except NotImplementedError:\n return None\n if transform_0.n_inputs == 1:\n return bb\n try:\n axes_order = self.input_frame.axes_order\n except AttributeError:\n axes_order = np.arange(transform_0.n_inputs)\n # Model.bounding_box is in python order, need to reverse it first.\n return tuple(bb[::-1][i] for i in axes_order)",
"def bounding_box(self):\n return None",
"def __get_box(self, position):\n return self.__board[position//self.__length][position%self.__length]",
"def get_reference(self, compare_path):\n if compare_path:\n with open(compare_path, 'r') as f:\n reference = json.load(f)\n return reference[\"boxes\"]\n return None",
"def visual_reference(self) -> pulumi.Output[Optional['outputs.CanaryVisualReference']]:\n return pulumi.get(self, \"visual_reference\")",
"def visual_reference(self) -> Optional[pulumi.Input['CanaryVisualReferenceArgs']]:\n return pulumi.get(self, \"visual_reference\")",
"def resolve_output_reference(reference, workflow_dict, input_dict):\n log(\"Resolving reference \" + str(reference))\n source = reference.split(sep='/')\n if len(source) == 1:\n if reference not in input_dict:\n input_def = [d for d in workflow_dict['inputs'] if d['id'] == reference]\n if input_def:\n input_def = input_def[0]\n if 'default' in input_def:\n return input_def['default'], True\n else:\n if 'type' in input_def and input_def['type'].endswith('?'):\n return None, True\n else:\n exit_perm_fail(\"No input and no default for required input {}\".format(reference))\n else:\n exit_perm_fail(\"Source reference {} not found\".format(reference))\n exit_perm_fail(\"Could not resolve input \" + reference)\n return input_dict[reference], True\n\n if len(source) != 2:\n exit_perm_fail(\"Source reference with more than one /\")\n\n step_id = source[0]\n output_id = source[1]\n for step in workflow_dict['steps']:\n if 'id' in step and step['id'] == step_id:\n if 'cwltiny_output_available' not in step:\n return None, False\n if not 'out' in step:\n exit_perm_fail('Step {} does not have an \"out\" member'.format(step_id))\n for output in step['out']:\n if output['id'] == output_id:\n return output['cwltiny_value'], True",
"def box(self):\n b = Bnd_Box()\n brepbndlib_Add(self.topods_shape(), b)\n return geom_utils.box_to_geometry(b)",
"def GetBox(*args, **kwargs):\n return _gdi_.GraphicsPath_GetBox(*args, **kwargs)",
"def consume_output(self, confidence, wait):\n consumed, detections = super().consume_output(wait)\n if consumed and detections is not None:\n face_box = self._get_bounding_box(detections, confidence=confidence) \n else:\n face_box = None\n return consumed, face_box",
"def _viewer_by_reference(self, reference):\n viewer_item = self._viewer_item_by_reference(reference)\n\n return self._viewer_store[viewer_item['id']]",
"def get_position_to(self, reference):\n if isinstance(reference, Item):\n return self.get_position_to_item(reference)\n elif isinstance(reference, ItemGroup):\n return self.get_position_to_item_group(reference)\n else:\n raise TypeError(\"reference must be instance of Item or ItemGroup\")",
"def _get_render_area(self):\n # take off our page margins\n render_area = Box2d(\n self._margin,\n self._margin,\n self._pagesize[0] -\n self._margin,\n self._pagesize[1] -\n self._margin)\n\n # then if user specified a box to render get intersection with that\n if self._box:\n return render_area.intersect(self._box)\n\n return render_area",
"def get_bounding_box(self):\n if len(self.elements) == 0:\n return None\n if not (self._bb_valid and\n all(ref._bb_valid for ref in self.get_dependencies(True))):\n bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))\n all_polygons = []\n for element in self.elements:\n if isinstance(element, PolygonSet):\n all_polygons.extend(element.polygons)\n elif isinstance(element, CellReference) or isinstance(\n element, CellArray):\n element_bb = element.get_bounding_box()\n if element_bb is not None:\n bb[0, 0] = min(bb[0, 0], element_bb[0, 0])\n bb[0, 1] = min(bb[0, 1], element_bb[0, 1])\n bb[1, 0] = max(bb[1, 0], element_bb[1, 0])\n bb[1, 1] = max(bb[1, 1], element_bb[1, 1])\n if len(all_polygons) > 0:\n all_points = numpy.concatenate(all_polygons).transpose()\n bb[0, 0] = min(bb[0, 0], all_points[0].min())\n bb[0, 1] = min(bb[0, 1], all_points[1].min())\n bb[1, 0] = max(bb[1, 0], all_points[0].max())\n bb[1, 1] = max(bb[1, 1], all_points[1].max())\n self._bb_valid = True\n _bounding_boxes[self] = bb\n return _bounding_boxes[self]",
"def _viewer_item_by_reference(self, reference):\n def find_viewer_item(stack_items):\n out_viewer_item = None\n\n for stack_item in stack_items:\n for viewer_item in stack_item.get('viewers'):\n if viewer_item['reference'] == reference:\n out_viewer_item = viewer_item\n break\n\n if len(stack_item.get('children')) > 0:\n out_viewer_item = find_viewer_item(stack_item.get('children'))\n\n return out_viewer_item\n\n viewer_item = find_viewer_item(self.state.stack_items)\n\n return viewer_item",
"def get_boundingbox(face, width, height, scale=1.3, minsize=None):\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n size_bb = int(max(x2 - x1, y2 - y1) * scale)\n if minsize:\n if size_bb < minsize:\n size_bb = minsize\n center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\n\n # Check for out of bounds, x-y top left corner\n x1 = max(int(center_x - size_bb // 2), 0)\n y1 = max(int(center_y - size_bb // 2), 0)\n # Check for too big bb size for given x, y\n size_bb = min(width - x1, size_bb)\n size_bb = min(height - y1, size_bb)\n\n return x1, y1, size_bb",
"def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))",
"def GetBox(quad):\n x0, y0, _, _, x1, y1, _, _ = quad\n return (x0, y0, x1, y1)",
"def bbox(self, node):\n node_id = node.get('id')\n #inkex.utils.debug(\"Check if \" + str(node_id) + \" is in \" + str(self.node_info))\n info = self.node_info[node_id] \n \n x = info.x\n y = info.y\n width = info.width\n height = info.height\n\n return Box(Point(x, y),\n Point(x + width, y),\n Point(x + width, y + height),\n Point(x, y + height))",
"def get_iounet_box(self, pos, sz, sample_pos, sample_scale):\n box_center = (pos - sample_pos) / sample_scale + (self.iou_img_sample_sz - 1) / 2\n box_sz = sz / sample_scale\n target_ul = box_center - (box_sz - 1) / 2\n return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))])",
"def get_iounet_box(self, pos, sz, sample_pos, sample_scale):\n box_center = (pos - sample_pos) / sample_scale + (self.iou_img_sample_sz - 1) / 2\n box_sz = sz / sample_scale\n target_ul = box_center - (box_sz - 1) / 2\n return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))])",
"def bounding_box(self, index_or_id):\n\n\t\tcell_index = self.grid.insure_index(index_or_id)\n\n\t\tleft = self.cell_size[0] * cell_index[1] + self.origin[0]\n\t\ttop = self.cell_size[1] * cell_index[0] + self.origin[1]\n\t\tright = left + self.cell_size[0]\n\t\tbottom = top + self.cell_size[1]\n\t\treturn (left, top, right, bottom)"
]
| [
"0.5901545",
"0.58824706",
"0.5489302",
"0.5370161",
"0.53481007",
"0.5327489",
"0.5279537",
"0.5278215",
"0.5267045",
"0.51329285",
"0.51328117",
"0.5108612",
"0.5098542",
"0.5029901",
"0.49236295",
"0.49177372",
"0.48856136",
"0.48354876",
"0.47964993",
"0.47919694",
"0.47916692",
"0.47695938",
"0.4726149",
"0.4699542",
"0.4696562",
"0.46792057",
"0.46592033",
"0.4637716",
"0.4637716",
"0.46164715"
]
| 0.7389529 | 0 |
Get the closest point on this layout from the given point from the reference output. If reference is NULL, gets the closest point from the entire layout. | def closest_point(
self, lx: float, ly: float, reference: Output | None = None
) -> tuple[float, float]:
if reference:
reference_ptr = reference._ptr
else:
reference_ptr = ffi.NULL
dest_lx = ffi.new("double *")
dest_ly = ffi.new("double *")
lib.wlr_output_layout_closest_point(
self._ptr, reference_ptr, lx, ly, dest_lx, dest_ly
)
return dest_lx[0], dest_ly[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def closest_point(self, point, maxdist=0.0, return_param=False):\n return self.xyz",
"def closest_point_to(self, x):\n x = np.array(x)\n v = self.p1 - self.p0\n b = self.p0 - x\n\n t = -np.dot(v, b) / np.dot(v, v)\n if (0 <= t <= 1):\n closest = t*(self.p1 - self.p0) + self.p0\n return closest\n else:\n if np.linalg.norm(x - self.p0) < np.linalg.norm(x - self.p1):\n return self.p0\n else:\n return self.p1",
"def FindClosestPoint(self, ):\n ...",
"def closest_point(point, points):\n return points[cdist([point], points).argmin()]",
"def closest_point(self, point, start_param=None, Ns=25):\n x, z = self.rotate_to_xz_plane(point)\n la = self._closest_point(x, z, start_param, Ns)\n return la",
"def nearest_on_boundary(self, point):\n _, minpt = self._nearest_to_point(point)\n return Point(minpt, crs=self.crs)",
"def closest(point, points):\n pts = [(Point.distance(point, p), p) for p in points]\n pts.sort()\n return pts[0][1]",
"def closest(self, x):\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = arg.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam) # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)",
"def get_tangent(self, reference_point):\r\n closest_distance = inf\r\n current_piece = None\r\n # Iterate over all track's pieces to find the piece which is closest to the reference point.\r\n for piece in self.pieces:\r\n closest = piece.get_closest_to_point(reference_point)\r\n distance = closest.distance(reference_point)\r\n if distance < closest_distance:\r\n closest_distance = distance\r\n current_piece = piece\r\n # Returns the tangent of the closest point to the reference point in this piece\r\n return current_piece.get_tangent(reference_point)",
"def getNearestNode(self, point):\n nodes = list(mm.nodeidx.nearest((point.getPoint().x, point.getPoint().y)))\n return self.node_counter__node.get(nodes[0])",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.position.x,\n pose.position.y))\n else:\n return 0",
"def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.x,\n pose.y))\n else:\n return 0",
"def _nearest_to_point(self, point):\n ptvertex = point.get_vertex(crs=self.crs)\n segments = zip(self.vertices.slice(0, -1), self.vertices.slice(1, 0))\n\n if isinstance(self.crs, CartesianCRS):\n func = _cvectorgeo.pt_nearest_planar\n def func(seg):\n return _cvectorgeo.pt_nearest_planar(ptvertex[0], ptvertex[1],\n seg[0][0], seg[0][1], seg[1][0], seg[1][1])\n else:\n fwd = self.crs.forward\n inv = self.crs.inverse\n def func(seg):\n return _cvectorgeo.pt_nearest_proj(fwd, inv, ptvertex,\n seg[0], seg[1], tol=0.01)\n\n point_dist = map(func, segments)\n min_point = None\n min_dist = -1.0\n for i, (point, dist) in enumerate(point_dist):\n if dist < min_dist or (i == 0):\n min_point = point\n min_dist = dist\n\n return min_dist, min_point",
"def closest_point(self, point, maxdist=0.0):\n face, point = self.geometry.ClosestPoint(Rhino.Geometry.Point3d(*point), maxdist)\n return list(point)",
"def LineClosestPoint(line, testpoint):\n line = rhutil.coerceline(line, True)\n testpoint = rhutil.coerce3dpoint(testpoint, True)\n return line.ClosestPoint(testpoint, False)",
"def get_closest_waypoint(self, x, y):\n # TODO implement\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n return closest_idx",
"def getNearestEdge(self, point):\n edge = mm.idx.nearest((point.getPoint().x, point.getPoint().y), objects=True)\n edges = [e.object for e in edge]\n if len(edges) == 1:\n result = edges[0]\n else:\n dist = 99999999999999999999999999999999999999999\n for edge in edges:\n distance = point.getPoint().distance(edge.getGeometry())\n if distance < dist:\n dist = distance\n result = edge\n return result",
"def get_closest_point(path, point):\n np_path = convert_path_type(path) # modify path to be a numpy array\n np_point = convert_point_type(point) # modify point to be a [x,y,z] numpy array\n\n # compute the distance from current location to every point in path and find index of the min distance\n distances = ((np_path[:,0] - np_point[0])**2 + (np_path[:,1] - np_point[1])**2)**0.5\n closest_idx = np.argmin(distances)\n\n if closest_idx != len(np_path) - 1: # check if this point is behind current location, if so use index+1\n closest_point = np_path[closest_idx]\n next_closest_point = np_path[closest_idx+1]\n\n # create vectors between the three points\n path_vector = next_closest_point - closest_point\n current_vector = np_point - closest_point\n\n # compute dot product to figure out whether location is behind or in front of closest_point\n dot_prod = np.dot(path_vector, current_vector)\n\n if dot_prod >= 0: # closest point is behind current location\n closest_idx += 1\n\n closest_point = path[closest_idx] # retrieve point from original `path` argument for type consistency\n\n return closest_point, closest_idx",
"def find_point(self, point: Point):\n for internal_point in self.points:\n if internal_point == point:\n return internal_point\n return None",
"def nearest_point_index(self, point):\n return _nearest_point_index(self._points, point)",
"def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]",
"def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)",
"def closest_point(graph, point_3d):\n current_point = (point_3d[0], point_3d[1])\n closest_point = None\n dist = 100000\n for p in graph.nodes:\n d = LA.norm(np.array(p) - np.array(current_point))\n if d < dist:\n closest_point = p\n dist = d\n return closest_point",
"def __get_closest_waypoint_index(self, x, y):\n return self.__waypoint_tree.query([x, y], 1)[1]",
"def closest_point_in_cloud(point, cloud):\n data = sort_points(point, cloud)\n return data[0]",
"def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2",
"def find_nearest_d(self, point, layers):\n if isinstance(point, tuple):\n point = geojson.Point(coordinates=[point[0], point[1]])\n if len(point) == 3:\n point = self.transform(point, point[2], self.db_proj)\n gd = self.find_nearest(point, layers)\n if gd:\n return gd, distance(point, gd.data)\n else:\n return None, None",
"def closest(self, x, y):\n pts = np.column_stack([self.x, self.y])\n # Transform data coordinates to pixel coordinates.\n pts = self.ax.transData.transform(pts)\n diff = pts - [x, y]\n dist = np.hypot(*diff.T)\n min_index = np.argmin(dist)\n return min_index, dist[min_index]",
"def closest_point(self, shape, inf_dist=1.0, homogenous=True):\n\n self._check_pyb()\n\n if not _pyb: # pragma nocover\n raise ImportError(\n \"The package PyBullet is required for collision \"\n \"functionality. Install using pip install pybullet\"\n )\n\n if not self.pinit:\n self._init_pob()\n self._update_pyb()\n\n self._update_pyb()\n\n if not shape.pinit:\n shape._init_pob()\n shape._update_pyb()\n\n ret = p.getClosestPoints(self.co, shape.co, inf_dist)\n\n if homogenous:\n try:\n return ret[0][8], np.append(np.array(ret[0][5]), 1.0), np.append(np.array(ret[0][6]), 1.0)\n except ValueError:\n return None, None, None\n except IndexError:\n # Obstacle is further away than inf_dist\n return None, None, None\n else:\n try:\n return ret[0][8], np.array(ret[0][5]), np.array(ret[0][6])\n except ValueError:\n return None, None, None\n except IndexError:\n # Obstacle is further away than inf_dist\n return None, None, None",
"def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj"
]
| [
"0.68781555",
"0.6766535",
"0.6657588",
"0.66149604",
"0.6594103",
"0.65055597",
"0.64913166",
"0.6426414",
"0.64132017",
"0.63651127",
"0.62755954",
"0.624393",
"0.6239674",
"0.62271637",
"0.6206133",
"0.60887647",
"0.6084108",
"0.60477453",
"0.60337806",
"0.6023641",
"0.60132015",
"0.5971383",
"0.5966573",
"0.5964847",
"0.5950204",
"0.5931008",
"0.59233266",
"0.5895234",
"0.5876592",
"0.58474606"
]
| 0.76994383 | 0 |
Sets the current_state to the initial_state (0) and sets input_symbol to None. | def reset (self):
self.currentState = self.initialState
self.inputSymbol = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _reset_state(self):\n self.state = self.start_state.copy()",
"def _reset_for_new_walk(self):\n # Starting State\n self.state = State('start', 0, 1, 0, 0, self.state_space_parameters.input_size, 0, 0, False)\n\n # Architecture String\n self.state_list = [self.state.copy()]",
"def reset(self):\n self.set_state(self._initial_state)",
"def reset(self):\n self.state.fill(EMPTY)",
"def reset(self):\n self.state = self.resolve_et({NFA.START})",
"def reset(self):\n self._current_state = self._initial_state\n return self._current_state",
"def reset(self, state: nx.Graph = None):\n if state is None:\n self._state = self.init_mol\n else:\n self._state = state\n\n self.action_space.update_actions(self._state, self.observation_space)\n if self.record_path:\n self._path = [self._state]\n self._counter = 0",
"def unsetSymbol(self):\n return _libsbml.InitialAssignment_unsetSymbol(self)",
"def reset(self):\n self.state = [\n ['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R'],\n ['P'] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n [' '] * 8,\n ['p'] * 8,\n ['r', 'n', 'b', 'q', 'k', 'b', 'n', 'r']\n ]",
"def process (self, inputSymbol):\n \n self.inputSymbol = inputSymbol\n (self.action, self.nextState) = self.getTransition (self.inputSymbol, self.currentState)\n \n if self.action is not None:\n self.action (self)\n \n self.memoryState.append(self.currentState)\n self.currentState = self.nextState\n self.nextState = None",
"def reset(self):\n self.state = \"YYYYRRRRGGGGOOOOBBBBWWWW\"",
"def reset(self):\n self.previous = None\n self.state = None\n self.args = None\n self.context = None",
"def Reset(self):\r\n #if self.originalState != self.currentState:\r\n # self._set_SS_State(self.originalState)\r\n #self.enable()\r\n self._set_SS_State(self.originalState)",
"def initial_state(self):\n return None",
"def reset(self):\r\n self.state = copy.copy(self.mu)",
"def set_input(self, input):\r\n\r\n self.reset()\r\n self.input = input",
"def restore_input(cls):\n del globals()[\"input\"]",
"def reset_state(self, name=None):\n if name is None:\n self.solver.reset_solver(self.initial_state.copy())\n else:\n # TODO: Raise a nice ProtocolError if state not defined\n self.solver.reset_solver(self.saved_states[name].copy())",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n self.state = copy.copy(self.mu)",
"def reset(self):\n# \n self.end_and_close()\n# self.sim.start()\n\n # Start the next simulation\n self.sim._model.swmm_open()\n self.sim._model.swmm_start()\n\n # get the state\n state = self._state()\n return state"
]
| [
"0.65661925",
"0.64960563",
"0.6418424",
"0.6401293",
"0.6351065",
"0.6345368",
"0.6306686",
"0.62734896",
"0.6161061",
"0.6148739",
"0.6071161",
"0.60412455",
"0.6023714",
"0.6023049",
"0.6021861",
"0.6006364",
"0.598042",
"0.5976253",
"0.5970858",
"0.5970858",
"0.5970858",
"0.5970858",
"0.5970858",
"0.5970858",
"0.5970858",
"0.5970858",
"0.5970858",
"0.5970858",
"0.5970858",
"0.5948646"
]
| 0.83541876 | 0 |
This sets the default transition. This defines an action and next_state if the FSM cannot find the input symbol provided by the user and the current state in the transition list. The default transition can be removed by setting the attribute defaultTransition to None. In this case, for default the nextState will be the current state of the FSM. | def setDefaultTransition (self, action, nextState):
if nextState is not None:
self.defaultTransition = (action, nextState)
else:
self.defaultTransition = (action, self.initialState) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getTransition (self, inputSymbol, state):\n\n if (inputSymbol, state) in self.stateTransitions:\n return self.stateTransitions[(inputSymbol, state)]\n elif self.defaultTransition is not None:\n return self.defaultTransition\n else:\n raise ExceptionFSM ('Transition is undefined: (%s, %s).' %\n (str(inputSymbol), str(state)) )",
"def input(self, symbol, *args, **kwargs):\n if self.__state is None:\n raise ValueError(\"FSM state is undefined\")\n try:\n transition = self.__get_state_attr(self._transition_prefix)\n except AttributeError:\n raise Exception(\"unable to find transition function or target\")\n if callable(transition):\n new_state = transition(symbol)\n elif isinstance(transition, dict):\n new_state = transition[symbol]\n else:\n new_state = transition\n return None if new_state is None else self.enter(new_state, *args, **kwargs)",
"def _transition(next, symbols=[0, 1]):\n return {'next': next, 'value': choice(symbols)}",
"def defaultInitialState():\n raise NotImplementedError",
"def reset(self):\n if self._initial_state is None:\n self._current_state = random.choice(tuple(self._transition_probs.keys()))\n elif self._initial_state in self._transition_probs:\n self._current_state = self._initial_state\n elif callable(self._initial_state):\n self._current_state = self._initial_state()\n else:\n raise ValueError(\"initial state %s should be either a state or a function() -> state\" % self._initial_state)\n return self._current_state",
"def next_state(self, action):\n self.state = self.states[action][self.state]",
"def initial_step(self, state, action):\n next_state = self.state_transition(state, action)\n env_action = random.choice(list(self.action_space(next_state)[1]))\n next_state = self.state_transition(next_state, env_action)\n return next_state",
"def _set_transition(\n self, current_state, current_symbol, next_symbol, direction, next_state\n ):\n self._set_symbol(current_symbol)\n self._set_symbol(next_symbol)\n self._set_state(current_state)\n self._set_state(next_state)\n\n if self._transitions.get(current_state) is None:\n self._transitions[current_state] = {}\n\n self._transitions[current_state][current_symbol] = (\n next_symbol,\n direction,\n next_state,\n )",
"def default_action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_action\")",
"def state_transition(self, curr_state, curr_action):\n #The state transition happens from the current state to the next state based on agent's action\n curr_state[curr_action[0]]=curr_action[1]\n return curr_state",
"def setDefaultTerm(self, *args):\n return _libsbml.Transition_setDefaultTerm(self, *args)",
"def state_transition(self, curr_state, curr_action):\n next_state = curr_state.copy()\n next_state[curr_action[0]] = curr_action[1]\n return next_state",
"def get_next_state(self, state, action):\n pass",
"def set_default(self, state: _base.State):\n\n if self.default is not None:\n fmt = \"Overwriting current default state '%s' with '%s'\"\n _logger.warning(fmt % (self.default, state))\n self.default = state",
"def state_transition(self, curr_state, curr_action):\n curr_state[curr_action[0]] = curr_action[1]\n return curr_state",
"def default() -> \"SwitchTo\":\n return SwitchTo(None)",
"def set_default_state(self, state_id):\n\n if self._changing_state:\n return False\n\n if state_id not in self._states:\n return False\n\n current_state_id = self.current_state_id\n\n if state_id == current_state_id:\n return False\n\n self._changing_state = True\n state = self._states[state_id]\n state.enter(current_state_id)\n\n if self.is_state_binder():\n self._set_state_bindings(state_id)\n\n self._default_state_id = state_id\n self.current_state_id = state_id\n self._changing_state = False\n\n return True",
"def default_action(self) -> Optional[str]:\n return pulumi.get(self, \"default_action\")",
"def _next_state(self, state, action):\n\n # Transition table to define movement for each action\n if self.action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif self.action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state\n else: # Valid move for 0, 2, 3, 4\n return new_state",
"def createDefaultTerm(self):\n return _libsbml.Transition_createDefaultTerm(self)",
"def gen_default(self, stmt: statements.Default) -> None:\n block = self.builder.new_block()\n assert self.switch_options is not None\n self.switch_options[\"default\"] = block\n self.builder.emit_jump(block) # fall through\n self.builder.set_block(block)\n self.gen_stmt(stmt.statement)",
"def default_action(self) -> str:\n return pulumi.get(self, \"default_action\")",
"def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action",
"def change_state(self):\n transitions = self.transition_map[self.current_state]\n self.current_state = select_from_probability_dict(random(),transitions)",
"def set_initial_machine_state(self, machine_state):\n\t\tself.machine_state = machine_state",
"def set_transition(self, s_transition) :\n logger.debug('In %s.set_transition received state: %s' % (self._name, s_transition))",
"def default_action(self):\n pass",
"def setTransition(self,input,target):\n\t\tself.trans[input] = target\n\t\t# self.outputs[input] = output\n\t\t# self.direction[input] = direction if direction >= -1 and direction <= 1 else 1",
"def generate_next_state(self, action) :\n raise NotImplementedError",
"def transition_to_state(state):\n\n # Clear our \"time-in-state\" counter.\n m.d.ss += cycles_in_state.eq(0)\n\n # If we have any additional entry conditions for the given state, apply them.\n if state in tasks_on_entry:\n m.d.ss += tasks_on_entry[state]\n\n m.next = state"
]
| [
"0.5937508",
"0.58190686",
"0.5759606",
"0.57087535",
"0.5651682",
"0.565062",
"0.5649066",
"0.56352",
"0.5580734",
"0.5491866",
"0.5468211",
"0.5421407",
"0.53243065",
"0.52767915",
"0.5274182",
"0.5246616",
"0.5239235",
"0.5184615",
"0.51798594",
"0.5175904",
"0.5171481",
"0.51306957",
"0.50692",
"0.50543267",
"0.50277287",
"0.5011134",
"0.49714303",
"0.49687213",
"0.49421173",
"0.4915305"
]
| 0.77794313 | 0 |
This method returns the tuples (action, next state) given an inputSymbol and state. | def getTransition (self, inputSymbol, state):
if (inputSymbol, state) in self.stateTransitions:
return self.stateTransitions[(inputSymbol, state)]
elif self.defaultTransition is not None:
return self.defaultTransition
else:
raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
(str(inputSymbol), str(state)) ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_next_state(self, state, action):\n pass",
"def p(self, next_state, state, action):\n\n return self._p[state, next_state, action]",
"def process (self, inputSymbol):\n \n self.inputSymbol = inputSymbol\n (self.action, self.nextState) = self.getTransition (self.inputSymbol, self.currentState)\n \n if self.action is not None:\n self.action (self)\n \n self.memoryState.append(self.currentState)\n self.currentState = self.nextState\n self.nextState = None",
"def nextState(self, state, action):\n return state + action",
"def get_next_state(self, pos, action):\n new_pos = [0, 0]\n if action in [5, 0, 4]:\n new_pos[0] = pos[0]-1\n elif action in [7, 1, 6]:\n new_pos[0] = pos[0]+1\n else:\n new_pos[0] = pos[0]\n\n if action in [5, 3, 7]:\n new_pos[1] = pos[1]-1\n elif action in [4, 2, 6]:\n new_pos[1] = pos[1]+1\n else:\n new_pos[1] = pos[1]\n return tuple(new_pos)",
"def nextState(self, state, action):\n j = 0\n i = 0\n while (j != action[1]):\n if(state[i] == ' '): j += 1\n i += 1\n j = i\n while (i < len(state) and state[i] != ' '): i+=1\n return state[0:j] + action[0] + state[i:len(state)]",
"def input(self, symbol, *args, **kwargs):\n if self.__state is None:\n raise ValueError(\"FSM state is undefined\")\n try:\n transition = self.__get_state_attr(self._transition_prefix)\n except AttributeError:\n raise Exception(\"unable to find transition function or target\")\n if callable(transition):\n new_state = transition(symbol)\n elif isinstance(transition, dict):\n new_state = transition[symbol]\n else:\n new_state = transition\n return None if new_state is None else self.enter(new_state, *args, **kwargs)",
"def _next_state(self, state, action):\n\n # Transition table to define movement for each action\n if self.action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif self.action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state\n else: # Valid move for 0, 2, 3, 4\n return new_state",
"def step(self, action):\n possible_states, probs = zip(*self.get_next_states(self._current_state, action).items())\n next_state = weighted_choice(possible_states, p=probs)\n reward = self.get_reward(self._current_state, action, next_state)\n is_done = self.is_terminal(next_state)\n self._current_state = next_state\n return next_state, reward, is_done, {}",
"def r(self, next_state, state, action):\n\n return self._r[state, next_state, action]",
"def successor(self, state):\n for action in self.actions:\n nexts = state.move(action)\n if nexts is not None:\n yield (action,nexts)",
"def get_action(self, state):\r\n\r\n action = self.minimax(state)\r\n print(self.exp, self.pru, self.recCall)\r\n\r\n return action",
"def next_state(self, action):\n self.state = self.states[action][self.state]",
"def generate_next_state(self, action) :\n raise NotImplementedError",
"def query_model(self, state, action):\n\n state = np.asarray(state)\n if(action == 0):\n return [(1, (1, state[1], state[2], state[3]), 0, False)]\n if(action == 1):\n return [(1, (2, state[1], state[2], state[3]), 0, False)]\n if(action == 2):\n return [(1, (3, state[1], state[2], state[3]), 0, False)]\n if(state[state[0]] > self.observation_space.low[state[0]]):\n state[state[0]] -= 1\n reward = 1\n else:\n reward = 0\n\n return [(1, tuple(state), reward, False)]",
"def next_state_func(self, state, action, Time_matrix):\n curr_loc, curr_time, curr_day = state\n pickup_loc, drop_loc = action\n \n rewards = self.reward_func(state, action, Time_matrix)\n total_time = 0\n \n if action == (0,0):\n # update time by 1 hour\n curr_time, curr_day = self.time_day_update_func(curr_time, curr_day, 1)\n next_state = (curr_loc, curr_time, curr_day)\n total_time = 1\n else:\n # time from curr_loc to reach pickup_loc\n t1 = int(Time_matrix[curr_loc][pickup_loc][curr_time][curr_day])\n curr_time, curr_day = self.time_day_update_func(curr_time, curr_day, t1)\n \n # time from pickup_loc to reach drop_loc\n t2 = int(Time_matrix[pickup_loc][drop_loc][curr_time][curr_day])\n curr_time, curr_day = self.time_day_update_func(curr_time, curr_day, t2)\n \n total_time = t1 + t2\n next_state = (drop_loc, curr_time, curr_day)\n \n return next_state, rewards, total_time",
"def applyAction(state, action):\r\n if action == 'N':\r\n return (state[0] - 1, state[1])\r\n\r\n if action == 'E':\r\n return (state[0], state[1] + 1)\r\n\r\n if action == 'W':\r\n return (state[0], state[1] - 1)\r\n\r\n if action == 'S':\r\n return (state[0] + 1, state[1])",
"def getNextState(self,input):\n\t\tif input in self.trans:\n\t\t\treturn self.trans[input]\n\t\telse:\n\t\t\treturn None",
"def go_to(self, state, symbol):\n C = []\n # in state search for LR(0) item that has dot in front of symbol\n for production in state:\n dot_index = production[1].index('.')\n alpha = production[1][:dot_index]\n xbeta = production[1][dot_index + 1:]\n if len(xbeta) == 0:\n continue\n X, beta = xbeta[0], xbeta[1:]\n if X == symbol:\n # move the dot after the symbol\n res = alpha + [X] + ['.'] + beta\n result_prod = (production[0], res)\n C += [result_prod]\n # call closure on this new item\n return self.closure(C)",
"def __get_actions(self, state, next_states):\r\n val_tok_mov = np.zeros((4, 4))\r\n for token_id in range(4):\r\n val_tok_mov[token_id] = self.__valid_token_moves(state, next_states[token_id], token_id)\r\n\r\n actions = np.logical_or.reduce((val_tok_mov[0,:], val_tok_mov[1,:], val_tok_mov[2,:], val_tok_mov[3,:]))\r\n\r\n return actions, val_tok_mov",
"def state_transition(self, curr_state, curr_action):\n next_state = curr_state.copy()\n next_state[curr_action[0]] = curr_action[1]\n return next_state",
"def get_action_outcomes(self, state, action):\r\n temp_state = tuple([max(0, min(self.pond_size[i]-1, state[i] + self.action_directions[action][i]))\r\n for i in range(2)])\r\n return self.transition_lists[temp_state]",
"def getNextState(self, current_state, coordinator_action):\n \n # if robot_action == human_action:\n # return current_state\n \n robot_action = coordinator_action[1]\n human_action = self.getHumanAction(current_state, coordinator_action)\n total_action = tuple(map(add, human_action, robot_action))\n state = (tuple(map(add, current_state[0], total_action)), current_state[1])\n return state",
"def get_possible_actions(self, state):\n return tuple(self._transition_probs.get(state, {}).keys())",
"def step(self, curr_state, curr_action):\n next_state = self.state_transition(curr_state, curr_action)\n terminal_state = self.is_terminal(next_state)\n if terminal_state[0]:\n r = self.reward(\"agent\", terminal_state[1])\n return (next_state, r, terminal_state[0])\n env_action = random.choice(list(self.action_space(next_state)[1]))\n next_state = self.state_transition(next_state, env_action)\n terminal_state = self.is_terminal(next_state)\n if terminal_state[0]:\n r = self.reward(\"env\", terminal_state[1])\n return (next_state, r, terminal_state[0]) \n return (next_state, -1, False)",
"def step(action, state):\n observation = state\n reward = 0\n done = False\n if action == \"right\":\n if state == N_STATES - 2:\n observation = \"terminal\"\n reward = 1\n done = True\n else:\n observation = state + 1\n else:\n # move left\n if state != 0:\n observation = state - 1\n return observation, reward, done",
"def getAction(self, state):\n if 'actionIndex' not in dir(self): self.actionIndex = 0\n\n if self.actionIndex == 0:\n self.actions = self.spreadOutAndFindDot(state)\n if len(self.actions) == 1:\n return self.actions[0]\n else:\n self.actionIndex += 1\n return self.actions[0]\n else:\n i = self.actionIndex\n self.actionIndex += 1\n if i < len(self.actions):\n return self.actions[i]\n else:\n self.actionIndex = 0\n return Directions.STOP\n # self.actions = self.spreadOutAndFindDot(state)\n # if len(self.actions) == 1:\n # return self.actions[0]\n # else:\n # self.actionIndex += 1\n # return self.actions[0]\n\n #raise NotImplementedError()",
"def nextAction(state) :\n newState = copy.deepcopy(state)\n # Updates the timeline to be at the next action, i.e. the action with the \n # lowest timestamp in the list of next actions\n newState['timeline']['currentAction'] = newState['timeline']['nextActions'][0][1]\n newState['timeline']['timestamp'] = newState['timeline']['nextActions'][0][0]\n newState['timeline']['nextActions'] = newState['timeline']['nextActions'][1::]\n return newState",
"def result(self, state, action):\n\n # blank is the index of the blank square\n blank = self.find_blank_square(state)\n new_state = list(state)\n\n delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1}\n neighbor = blank + delta[action]\n new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank]\n\n return tuple(new_state)",
"def _transition(next, symbols=[0, 1]):\n return {'next': next, 'value': choice(symbols)}"
]
| [
"0.7005093",
"0.6508219",
"0.6507524",
"0.6235809",
"0.6222719",
"0.6205359",
"0.6205105",
"0.60368776",
"0.5969271",
"0.5955513",
"0.59514",
"0.59455127",
"0.5928941",
"0.5909269",
"0.5861482",
"0.58402",
"0.5836453",
"0.58101356",
"0.58081824",
"0.58023584",
"0.5797929",
"0.5794363",
"0.5785467",
"0.5763307",
"0.5752268",
"0.5701096",
"0.5667724",
"0.5657228",
"0.56360435",
"0.56288517"
]
| 0.6625956 | 1 |
This is the main method that process user input. This cause the FSM to change state and call an action. This method calls getTransition() to find the correct action and nextState associated with the inputSymbol and currentState. This method processes one complete input symbol. | def process (self, inputSymbol):
self.inputSymbol = inputSymbol
(self.action, self.nextState) = self.getTransition (self.inputSymbol, self.currentState)
if self.action is not None:
self.action (self)
self.memoryState.append(self.currentState)
self.currentState = self.nextState
self.nextState = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def input(self, symbol, *args, **kwargs):\n if self.__state is None:\n raise ValueError(\"FSM state is undefined\")\n try:\n transition = self.__get_state_attr(self._transition_prefix)\n except AttributeError:\n raise Exception(\"unable to find transition function or target\")\n if callable(transition):\n new_state = transition(symbol)\n elif isinstance(transition, dict):\n new_state = transition[symbol]\n else:\n new_state = transition\n return None if new_state is None else self.enter(new_state, *args, **kwargs)",
"def _act_user_input(self, global_env):\n # get input from user\n # validate input\n # update location and global_env\n\n print(\"please insert an action:\")\n print(\"numbers: (1,2,3,...) will move the Pc if an edge allow it\")\n print(\"letters: \\\"tr\\\" to traverse, \\\"p\\\" to pick up people, \\\"d\\\" to drop off at shelter \")\n print(\"letters: \\\"a\\\" to annihilate an adjacent edge, \\\"te\\\" to terminate\")\n traversable_nodes = self._get_traversable_nodes()\n options = [\"tr\", \"p\", \"d\", \"a\", \"te\"] # p - pick up, d - drop off, a - annihilate.\n if len(options) == 0:\n print(\"no option for Agent {} to traverse and is terminated\".format(self.name))\n self.change_state(\"terminated\")\n return\n\n input_ok = False\n user_input = None\n while not input_ok:\n user_input = input()\n if not (user_input in options):\n print(\"{} is not a valid option\".format(user_input, self.location))\n continue\n if (user_input == \"tr\"):\n print(\"Please type destination: \")\n try:\n destination = int(input())\n if (destination) in traversable_nodes:\n self.traverse_to_node(destination, global_env)\n input_ok = True\n continue\n else:\n print(\"{} is not a neighbor to {}\".format(destination, self.location))\n continue\n except ValueError as e:\n print(\"{} is not a valid input as destination\".format(user_input))\n input_ok = False\n\n if ((user_input) == \"te\"):\n input_ok = True\n self.change_state(\"terminated\")\n self._act_terminated(global_env)\n\n if ((user_input) == \"p\"):\n print(\"picked up: {} people\".format(global_env.get_attr(self.location, \"people\")))\n self.people_carried += global_env.get_attr(self.location, \"people\")\n global_env.change_attr(self.location, \"people\", 0)\n\n if ((user_input == \"d\")):\n if self.local_environment.get_attr(self.location, \"shelter\") > 0:\n print(\"Dropping off {} people\".format(self.people_carried))\n self.people_saved += self.people_carried\n self.people_carried = 0\n else:\n print(\"Not a valid drop-off location, NO MAN LEFT BEHIND!\")",
"def __process_input(self, input_):\n if self.state.game_over:\n if input_.key_pressed:\n self.state.exit = True\n else:\n if input_.action == 'PLAYER_UP':\n self.state.player.direction = 'U'\n elif input_.action == 'PLAYER_DOWN':\n self.state.player.direction = 'D'\n elif input_.action == 'PLAYER_LEFT':\n self.state.player.direction = 'L'\n elif input_.action == 'PLAYER_RIGHT':\n self.state.player.direction = 'R'",
"def act_on_input(self, input):\n if not input:\n return\n self.parse_input(input)\n commands = self.extract_commands()\n self.execute_commands(commands)",
"def main():\n user_input = user_input_state()\n check_user_input(user_input)",
"def handle_inputs(self):\n user_input = \"\"\n while user_input != \"exit\":\n self.print_divider()\n user_input = input()\n self.do_action_for_input(user_input)",
"def parse(self, input_string):\n print(\"----------- Syntax analysis -----------\")\n table = self.generate_table()\n self.workingStack = ['0']\n self.inputStack = [char for char in input_string]\n self.output = []\n try:\n print(\"--------- Parsing ---------\")\n while len(self.workingStack) != 0:\n state = int(self.workingStack[-1]) # which dict from parsing table, index of state\n if len(self.inputStack) > 0:\n char = self.inputStack.pop(0)\n else:\n char = None\n if table[state]['action'] == 'shift':\n # Shift operation on the stack\n if char not in table[state]:\n raise (Exception(\"Syntax error! Expected \" + str(table[state]) +\n \"!\\nCannot parse shift. Character: \" + char))\n self.workingStack.append(char)\n self.workingStack.append(table[state][char])\n elif table[state]['action'] == 'acc':\n # Accept operation, sequence is accepted\n if len(self.inputStack) != 0:\n raise (Exception(\"Syntax error! Expected \" + str(table[state]) +\n \"!\\nCannot parse accept. Character: \" + char))\n self.workingStack.clear()\n else:\n # Reduce operation on the stack\n reduce_state = int(table[state]['action'].split(' ')[1])\n reduce_production = self.grammar.P[reduce_state]\n to_remove_from_working_stack = [symbol for symbol in reduce_production[1]]\n while len(to_remove_from_working_stack) > 0 and len(self.workingStack) > 0:\n if self.workingStack[-1] == to_remove_from_working_stack[-1]:\n to_remove_from_working_stack.pop()\n self.workingStack.pop()\n if len(to_remove_from_working_stack) != 0:\n raise (Exception('Syntax error!' +\n '!\\nCannot parse reduce. Character: ', char))\n self.inputStack.insert(0, char)\n self.inputStack.insert(0, reduce_production[0])\n self.output.insert(0, reduce_state)\n print('Syntax analysis successfully. Yay!')\n except Exception as ex:\n raise Exception(ex)\n print()\n return self.output",
"def getTransition (self, inputSymbol, state):\n\n if (inputSymbol, state) in self.stateTransitions:\n return self.stateTransitions[(inputSymbol, state)]\n elif self.defaultTransition is not None:\n return self.defaultTransition\n else:\n raise ExceptionFSM ('Transition is undefined: (%s, %s).' %\n (str(inputSymbol), str(state)) )",
"def do_action_for_input(self, user_input):\n if user_input == CommandLineProgram.ACTION.HELP:\n self.print_help()\n elif user_input == CommandLineProgram.ACTION.ADD_USER:\n self.input_and_create_user()\n elif user_input == CommandLineProgram.ACTION.LIST_USERS:\n self.print_users()\n elif user_input == CommandLineProgram.ACTION.ADD_TRANSACTION:\n self.select_user_and_add_transaction()\n elif user_input == CommandLineProgram.ACTION.GENERATE_REPORT:\n self.select_user_and_print_report()",
"def step(self,inp): ## function responsible for exciting the machine with a SINGLE INPUT VALUE\n (s, o) = self.getNextValues(self.state,inp)\n # will store the state and return the output\n self.state =s\n return o",
"def step(self):\n self.emit_symbol()\n self.change_state()",
"def process_event(self, event):\n\n print('Current state: {}, Event to process: {}'.format(\n self._current_state.name,\n event))\n\n # # Is the event expected?\n # try:\n # transition = self._current_state.events[type(event)]\n # except KeyError:\n # raise UnexpectedEventError()\n #\n # if not transition.guard(event):\n # return\n #\n # transition.action(event)\n #\n # self._current_state = transition.to_state",
"def parseInputLine(self, action):\r\n output = None\r\n if action is not None and action is not '':\r\n func = getattr(self, 'cmd_' + str(action[0]).upper(), None)\r\n if func is not None:\r\n output = func()\r\n else:\r\n return fail(InvalidRequest(\"No such action\"))\r\n return succeed(output)",
"def parse(self, input_string, game_data):\n\n settings = game_data['settings']\n assert 'debug' in settings\n assert 'verbose' in settings\n\n self.setup_parse(input_string, game_data)\n # pylint: disable=no-member\n self.state_machine = StateMachine(\n state_classes=MJScoreState.__subclasses__(),\n initial_state='GameOpening',\n debug=settings.debug and settings.verbose)\n self.state_machine.config = settings\n\n input_lines = tuple(i.strip() for i in input_string.split('\\n'))\n\n self.state_machine.run(input_lines, context=game_data)\n self.state_machine.unlink()\n self.finish_parse()",
"def main():\n \n\n f = FiniteStatesMachine('stopped', [])\n f.setDefaultTransition(Error, None)\n \n f.addTransitionList('start', 'stopped', starFSMVariables, 'started')\n f.addTransitionList('collect', 'started', collectData, 'collecting')\n f.addTransitionList('collect', 'processing', collectData, 'collecting')\n f.addTransitionList('stop', 'started', stopFSM, 'stopped')\n f.addTransitionList('process', 'collecting', processData, 'processing')\n f.addTransitionList('stop', 'collecting', stopFSM, 'stopped')\n f.addTransitionList('stop', 'processing', stopFSM, 'stopped')\n \n f.addTransitionList('start', 'stopped', starFSMVariables, 'started')\n \n \n print('This is a Finite Machine State system')\n print('You can change the FSM state by sending an input to the system')\n print('The possible states are: started, collecting, processing and stopped')\n print('Different inputs cause a state change in the FSM')\n print('The possible inputs are: start, collect, process and stop')\n print('You can also check for the current and previous state of the FSM')\n print('The possible inputs for this case are: current and previous')\n print('The initial state of the FSM system is stopped')\n print('Please, provide an input for the FSM system')\n \n inputstr = \"\"\n \n while inputstr != \"exit\":\n inputstr = input('> ')\n \n if inputstr != \"exit\":\n \n if inputstr == \"current\":\n print(f.getCurrentState())\n \n elif inputstr == \"previous\":\n print(f.getPreviousState())\n \n else:\n f.process(inputstr)\n \n if inputstr == \"process\":\n # After process go the 'collecting' state again\n f.process('collect')",
"def obtain_action(self, timestep):\r\n\t\t# Loops constantly until a valid input is obtained.\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\t# Tries to obtain a valid input manually and convert it to an\r\n\t\t\t\t# integer.\r\n\t\t\t\taction = int(input('Please provide an input action index between 0 and (number of actions - 1): %i: ' % (self.num_actions-1)))\r\n\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint('Invalid input detected, try again.')\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t# Checks if the input is within the acceptable range of action\r\n\t\t\t# index values.\r\n\t\t\tif 0 <= action < self.num_actions:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tprint('Action should be an index between 0 and (number of actions - 1): %i' % (self.num_actions-1))\r\n\r\n\t\treturn action",
"def process_user_input(game, player, other, buttons):\n money = 100\n\n move = get_user_input(buttons)\n if move:\n print(move)\n print(player.wager)\n print(other.wager)\n if move == \"fold\":\n player.fold()\n\n elif move == \"raise\": #player bets an amount\n player.call(other.wager)\n player.bet(money)\n\n\n elif move == \"check\" or move == \"call\" or move == \"match\":\n if player.funds - money < 0:\n print(\"You don't have enough money. Sorry\")\n process_user_input(game, player, other, buttons)\n if other.wager < player.wager:\n print(\"You can't match when you're ahead on betting!\")\n process_user_input(game, player, other, buttons)\n player.call(other.wager)\n player.check()\n\n\n return player.wager\n else:\n pass",
"def step(self, action_dict):\n curr_player_id = self.curr_time_step.observations[\"current_player\"]\n legal_actions = self.curr_time_step.observations[\"legal_actions\"][curr_player_id]\n\n player_action = action_dict[self.player_map(curr_player_id)]\n orig_player_action = player_action\n\n if self._continuous_action_space or \\\n (self._individual_players_with_continuous_action_space and curr_player_id in self._individual_players_with_continuous_action_space):\n player_action = parse_discrete_poker_action_from_continuous_space(\n continuous_action=player_action, legal_actions_list=legal_actions,\n total_num_discrete_actions_including_dummy=self.num_discrete_actions)\n\n if self.dummy_action_multiplier != 1:\n # extended dummy action space is just the base discrete actions repeated multiple times\n # convert to the base discrete action space.\n player_action = player_action % self.base_num_discrete_actions\n\n if player_action not in self._base_action_space:\n raise ValueError(\"Processed player action isn't in the base action space.\\n\"\n f\"orig action: {orig_player_action}\\n\"\n f\"processed action: {player_action}\\n\"\n f\"action space: {self.action_space}\\n\"\n f\"base action space: {self._base_action_space}\")\n\n if player_action not in legal_actions:\n legal_actions_mask = np.zeros(self.openspiel_env.action_spec()[\"num_actions\"])\n legal_actions_mask[legal_actions] = 1.0\n raise ValueError(f\"illegal actions are not allowed.\\n\"\n f\"Action was {player_action}.\\n\"\n f\"Legal actions are {legal_actions}\\n\"\n f\"Legal actions vector is {legal_actions_mask}\")\n try:\n self.curr_time_step = self.openspiel_env.step([player_action])\n except SpielError:\n # if not self._is_universal_poker:\n raise\n # Enforce a time limit on universal poker if the infostate size becomes larger\n # than the observation array size and throws an error.\n # self.curr_time_step = TimeStep(observations=self.curr_time_step.observations,\n # rewards=np.zeros_like(self.curr_time_step.rewards),\n # discounts=self.curr_time_step.discounts,\n # step_type=StepType.LAST)\n\n new_curr_player_id = self.curr_time_step.observations[\"current_player\"]\n obs = self._get_current_obs()\n done = self.curr_time_step.last()\n\n dones = {self.player_map(new_curr_player_id): done, \"__all__\": done}\n\n if done:\n rewards = {self.player_map(0): self.curr_time_step.rewards[0],\n self.player_map(1): self.curr_time_step.rewards[1]}\n\n assert self.curr_time_step.rewards[0] == -self.curr_time_step.rewards[1]\n\n infos = {0: {}, 1: {}}\n\n infos[self.player_map(0)]['game_result_was_invalid'] = False\n infos[self.player_map(1)]['game_result_was_invalid'] = False\n\n assert sum(\n self.curr_time_step.rewards) == 0.0, \"curr_time_step rewards in are terminal state are {} (they should sum to zero)\".format(\n self.curr_time_step.rewards)\n\n infos[self.player_map(0)]['rewards'] = self.curr_time_step.rewards[0]\n infos[self.player_map(1)]['rewards'] = self.curr_time_step.rewards[1]\n\n if self.curr_time_step.rewards[0] > 0:\n infos[self.player_map(0)]['game_result'] = 'won'\n infos[self.player_map(1)]['game_result'] = 'lost'\n elif self.curr_time_step.rewards[1] > 0:\n infos[self.player_map(1)]['game_result'] = 'won'\n infos[self.player_map(0)]['game_result'] = 'lost'\n else:\n infos[self.player_map(1)]['game_result'] = 'tied'\n infos[self.player_map(0)]['game_result'] = 'tied'\n else:\n assert self.curr_time_step.rewards[\n new_curr_player_id] == 0, \"curr_time_step rewards in non terminal state are {}\".format(\n self.curr_time_step.rewards)\n assert self.curr_time_step.rewards[-(new_curr_player_id - 1)] == 0\n\n rewards = {self.player_map(new_curr_player_id): self.curr_time_step.rewards[new_curr_player_id]}\n assert self.curr_time_step.rewards[1 - new_curr_player_id] == 0.0\n infos = {}\n\n if self._apply_penalty_for_invalid_actions:\n for player_id, penalty in enumerate(self._invalid_action_penalties):\n if penalty and self.player_map(player_id) in rewards:\n rewards[self.player_map(player_id)] -= 4.0\n self._invalid_action_penalties[player_id] = False\n\n if self._is_universal_poker:\n # normalize magnitude of rewards for universal poker\n rewards = {p: r / (self._stack_size * 0.1) for p, r in rewards.items()}\n\n return obs, rewards, dones, infos",
"def forward(self, sentence, actions=None):\n self.refresh() # clear up hidden states from last run, if need be\n\n padded_sent = sentence + [END_OF_INPUT_TOK]\n\n # Initialize the parser state\n sentence_embs = self.word_embedding_component(padded_sent)\n\n parser_state = ParserState(padded_sent, sentence_embs, self.combiner, null_stack_tok_embed=self.null_stack_tok_embed)\n outputs = [] # Holds the output of each action decision\n actions_done = [] # Holds all actions we have done\n\n dep_graph = set() # Build this up as you go\n\n # Make the action queue if we have it\n if actions is not None:\n action_queue = deque()\n action_queue.extend([ Actions.action_to_ix[a] for a in actions ])\n have_gold_actions = True\n else:\n have_gold_actions = False\n # STUDENT\n # END STUDENT\n while(not parser_state.done_parsing()):\n features = self.feature_extractor.get_features(parser_state)\n log_probs = self.action_chooser(features)\n outputs.append(log_probs)\n do = utils.argmax(log_probs) if (not have_gold_actions) else action_queue.popleft()\n if do == 0 and parser_state.input_buffer_len() > 1 :\n parser_state.shift()\n actions_done.append(Actions.SHIFT)\n elif do == 1 and (parser_state.stack_len()>1):\n dependency_edge = parser_state.reduce_left()\n dep_graph.add(dependency_edge)\n actions_done.append(Actions.REDUCE_L)\n\n else:\n dependency_edge = parser_state.reduce_right()\n dep_graph.add(dependency_edge)\n actions_done.append(Actions.REDUCE_R)\n \n dep_graph.add(DepGraphEdge((ROOT_TOK, -1), (parser_state.stack[-1].headword, parser_state.stack[-1].headword_pos)))\n return outputs, dep_graph, actions_done",
"def nextState(self, state, action):\n return state + action",
"def step(self):\n # parse opcode and parameter mode(s) from instruction\n # (convert integer into 5-digit string with zeroes before parsing)\n instruction = str(self.program[self.index]).zfill(5)\n opcode = int(instruction[-2:])\n param1_mode = int(instruction[-3])\n param2_mode = int(instruction[-4])\n param3_mode = int(instruction[-5])\n\n # opcode to halt program\n if opcode == 99:\n self.halted = True\n return\n\n # opcodes for addition or multiplication\n if opcode in (1, 2):\n val1 = self.get_value(self.index+1, param1_mode)\n val2 = self.get_value(self.index+2, param2_mode)\n\n if opcode == 1:\n total = val1 + val2\n elif opcode == 2:\n total = val1 * val2\n\n self.set_value(self.index+3, param3_mode, total)\n self.index += 4\n return\n\n # opcode for input\n if opcode == 3:\n try:\n inputval = self.inputs.pop(0)\n except IndexError:\n # no input available, halt program until external process\n # adds input and restarts the process\n self.halted = True\n return\n\n self.set_value(self.index+1, param1_mode, inputval)\n self.index += 2\n return\n\n # opcode for output\n if opcode == 4:\n self.outputs += [self.get_value(self.index+1, param1_mode)]\n self.index += 2\n return\n\n # opcodes for jump-if-true / jump-if-false\n if opcode in (5, 6):\n val1 = self.get_value(self.index+1, param1_mode)\n val2 = self.get_value(self.index+2, param2_mode)\n\n # Should jump; update instruction pointer directly\n if (opcode == 5 and val1 != 0) or (opcode == 6 and val1 == 0):\n self.index = val2\n return\n\n # No action, continue to next instruction\n self.index += 3\n return\n\n # opcode for less than / equal to\n if opcode in (7, 8):\n val1 = self.get_value(self.index+1, param1_mode)\n val2 = self.get_value(self.index+2, param2_mode)\n\n # Default 0 (False), set to 1 if True\n result = 0\n if opcode == 7 and val1 < val2:\n result = 1\n elif opcode == 8 and val1 == val2:\n result = 1\n\n self.set_value(self.index+3, param3_mode, result)\n self.index += 4\n return\n\n # opcode for relative base offset\n if opcode == 9:\n self.relative_base += self.get_value(self.index+1, param1_mode)\n self.index += 2\n return\n\n raise Exception(\"unknown opcode, something went wrong\")",
"def step(self, action: nx.Graph):\n # Get the SMILES string associated with this action\n self._state = action\n if self.record_path:\n self._path.append(self._state)\n\n # Update the action space\n self.action_space.update_actions(self._state, self.observation_space)\n self._counter += 1\n\n # Check if we have finished\n # Out of steps or no more moves\n done = len(self.action_space.get_possible_actions()) == 0\n\n # Compute the fingerprints for the state\n return self._state, self.reward(), done, {}",
"def fsm_loop(self):\n while not rospy.is_shutdown():\n rospy.sleep(0.1)\n self.reset_fsm_preempted()\n self.reset_continued()\n\n state = self.get_state()\n if state is None:\n rospy.logerr(\"State is None. Error\")\n continue\n #\n if state not in self._fxns.keys():\n rospy.logdebug(\"{}: no function for given state. May be a bug\".format(state))\n continue\n ###\n # call the correct callback\n try:\n self._fxns[state](self._goal)\n except ActionServerError as error:\n # current state aborted, so return to the previous state\n rospy.logerr(traceback.format_exc())\n rospy.logdebug(\"{}: Aborted, reverting to previous state\".format(self._state_enums(state)))\n # self.print_prior_states()\n # pdb.set_trace()\n self.revert_state()\n\n # set result\n self._fill_result()",
"def parse(self):\r\n readnext = False\r\n error = False\r\n errorInfo = (-100, -100)\r\n token = self.tokens.nextToken()\r\n accepted = False\r\n while len(self.stack) > 0:\r\n (type, value) = self.stack.pop()\r\n self.runtime_transitions.append((type, value))\r\n\r\n if(readnext):\r\n token = self.tokens.nextToken()\r\n\r\n if (type == TERMINAL):\r\n if (value == token.category_num or value == EPSILON):\r\n if(value != EPSILON):\r\n print(token)\r\n\r\n if (not self.tokens.hasToken() and len(self.stack) == 0):\r\n print(\"Input Accepted!\")\r\n error = False\r\n accepted = True\r\n\r\n if(value != EPSILON):\r\n readnext = True\r\n\r\n else:\r\n error = True\r\n errorInfo = (value, token.category_num)\r\n\r\n\r\n\r\n elif (type == RULE):\r\n if(token.category_num > -1):\r\n rule = self.table[value][token.category_num]\r\n\r\n self.print_production(rule, value)\r\n self.last_transition = rule\r\n\r\n if(rule == -1):\r\n print(toRuleString[value], self.terminal_to_str(token.category_num))\r\n break\r\n for r in reversed(self.rules[rule]):\r\n self.stack.append(r)\r\n\r\n readnext = False\r\n\r\n if(error):\r\n break;\r\n\r\n if(error):\r\n print(\"Syntax error on position (%d, %d): [%s] expected. Got [%s]\" % (token.line, token.column, self.terminal_to_str(errorInfo[0]), self.terminal_to_str(errorInfo[1])))\r\n print(token)\r\n self.printStack()",
"def handle_input(self):\n\n\t\tline = sys.stdin.readline().strip()\n\n\t\tif line == '':\n\t\t\t# print('')\n\t\t\tself.print_prompt()\n\t\t\treturn\n\n\t\tcommand_name, *parts = line.split()\n\n\t\tif command_name in self.commands:\n\t\t\t# Call given command and unpack parts into args\n\t\t\tself.commands[command_name]['callback'](*parts)\n\t\telse:\n\t\t\tprint(command_name + ' : command not found')\n\t\t\tself.print_available_commands()\n\n\n\t\tself.print_prompt()",
"def get_input(self):\n result = None\n\n try:\n while True:\n result = self.console.read_for_condition(prompt=\">>> \", condition=self.is_valid_input)\n\n if result is not None:\n break\n except KeyboardInterrupt:\n quit()\n\n # run command for next condition\n self.game_branch[result]()",
"def sysinput(rockstate):\n raise NotImplementedError",
"def input_(self, op):\n value = input(\"Enter your input: \")\n self.set_value(op.address, value, op.type_, op.is_global)",
"def processUserAction(self, user_action):\n self.history[\"user_action\"] = user_action\n dialogue_act = user_action[\"action\"]\n self.current_function = None\n self.dont_know = False\n\n\n def provideQuery():\n self.query = user_action[\"query\"]\n self.query_vector = self.dataset.getVectorForQuery(self.query)\n self.dataset.updateResults(query = self.query)\n self.result_index=0\n self.list_current = False\n return user_action\n\n def provideKw():\n self.keywords[\"provided\"].add(user_action[\"keyword\"])\n self.keywords[\"rejected\"].discard(user_action[\"keyword\"])\n self.dataset.updateResults(keywords = self.keywords)\n self.list_current = False\n self.result_index=0\n return user_action\n\n def rejectKws():\n self.keywords[\"provided\"].difference_update(user_action[\"keywords\"])\n self.keywords[\"rejected\"].update(user_action[\"keywords\"])\n self.dataset.updateResults(keywords = self.keywords)\n self.list_current = False\n return user_action\n\n def rejectFunctions():\n self.functions_rejected.update(user_action[\"functions\"])\n self.dataset.updateResults(not_functions = self.functions_rejected)\n self.list_current = False\n return user_action\n\n def eliSugg():\n return user_action\n\n def eliInfo():\n self.current_function = user_action[\"function\"]\n return user_action\n\n def eliInfoAll():\n self.current_function = user_action[\"function\"]\n return user_action\n\n def changePage():\n return user_action\n\n def dontKnow():\n self.dont_know = True\n\n\n switcher = {\n 'provide-query':provideQuery,\n 'provide-kw':provideKw,\n 'reject-kws':rejectKws,\n 'reject-functions':rejectFunctions,\n 'eli-sugg':eliSugg,\n 'eli-sugg-all':eliSugg,\n 'eli-info':eliInfo,\n 'eli-info-all':eliInfo,\n 'change-page':changePage,\n 'dont-know':dontKnow\n }\n\n if dialogue_act in switcher:\n return switcher[dialogue_act]()\n else: return user_action",
"def scan(text,transition_table,accept_states):\n\t\n\t# initial state\n\tpos = 0\n\tstate = 'q0'\n\t\n\twhile True:\n\t\t\n\t\tc = getchar(text,pos)\t# get next char\n\t\t\n\t\tif state in transition_table and c in transition_table[state]:\n\t\t\n\t\t\tstate = transition_table[state][c]\t# set new state\n\t\t\tpos += 1\t# advance to next char\n\t\t\t\n\t\telse:\t# no transition found\n\n\t\t\t# check if current state is accepting\n\t\t\tif state in accept_states:\n\t\t\t\treturn accept_states[state],pos \t#if current state is accepting, scan() returns it.\n\n\t\t\t# current state is not accepting\n\t\t\treturn 'ERROR_TOKEN',pos \t#if current state is not accepting, scan() returns 'ERROR_TOKEN'.\t"
]
| [
"0.7126944",
"0.6313032",
"0.6273754",
"0.6034373",
"0.5996154",
"0.5985508",
"0.59294426",
"0.58302075",
"0.58178115",
"0.5808534",
"0.57349867",
"0.5706727",
"0.5656725",
"0.5618711",
"0.56094056",
"0.5534585",
"0.5453117",
"0.5448634",
"0.54410994",
"0.5429249",
"0.5428419",
"0.5417247",
"0.54120284",
"0.5405997",
"0.5352301",
"0.535142",
"0.53403676",
"0.53403205",
"0.53186166",
"0.5303647"
]
| 0.80351883 | 0 |
Lookup all of the IP addresses for a given AWS instance name. Multiple instances with the same name is a result of instances belonging to an auto scale group. Useful when an action needs to happen to all machines in an auto scale group. | def machine_lookup_all(session, hostname, public_ip = True):
client = session.client('ec2')
response = client.describe_instances(Filters=[{"Name":"tag:Name", "Values":[hostname]},
{"Name":"instance-state-name", "Values":["running"]}])
addresses = []
items = response['Reservations']
if len(items) > 0:
for i in items:
item = i['Instances'][0]
if 'PublicIpAddress' in item and public_ip:
addresses.append(item['PublicIpAddress'])
elif 'PrivateIpAddress' in item and not public_ip:
addresses.append(item['PrivateIpAddress'])
return addresses | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_instances_by_name_mask(self, mask_name):\n\n instances = []\n\n instance_list = self.nova_cli.servers.list()\n logger.info('Instances list is {0}'.format(instance_list))\n logger.info(\n 'Expected instance name should inlude {0}'.format(mask_name))\n\n for inst in instance_list:\n logger.info('Instance name is {0}'.format(inst.name))\n if inst.name.startswith(mask_name):\n instances.append(inst)\n return instances",
"def get_ips(self, instances):\n public_ips = []\n for instance in instances:\n public_ips.append(instance.public_dns_name)\n return public_ips",
"def get_ips(rg_name, vmss_name):\n\n script = \"az vmss list-instance-public-ips --resource-group {rg} --name {vmss} | grep ipAddress\".format(\n rg=rg_name,\n vmss=vmss_name\n )\n run_script(script)",
"def _get_public_ips(self, ec2_ids):\n public_ips = []\n\n for ec2_id in ec2_ids:\n while True:\n response = self.ec2_client.describe_instances(InstanceIds=[ec2_id])\n ec2_info = response['Reservations'][0]['Instances'][0]\n if ec2_info['PublicIpAddress']:\n logger.info(\"Public IP for EC2 instance \" + ec2_id + \": \" + ec2_info['PublicIpAddress'])\n public_ips.append(ec2_info['PublicIpAddress'])\n break\n else:\n logger.info(\"Still waiting for Public IP to be configured for \" + ec2_id)\n time.sleep(10)\n\n return public_ips",
"def aws_get_instances_by_name(region, name, raw=True):\n return _aws_get_instance_by_tag(region, name, 'tag:Name', raw)",
"def running_instances(hostnames=None):\n\n global api\n\n all_inst = []\n try:\n all_inst = api.get_all_instances()\n except Exception, e:\n logging.error(\"Can't get list of instances (maybe wrong credentials?)\")\n return None\n\n # Resolve IPs\n if hostnames is not None:\n ips = []\n for h in hostnames:\n try:\n ipv4 = gethostbyname(h)\n ips.append(ipv4)\n except Exception:\n # Don't add host if IP address could not be found\n logging.warning(\"Ignoring hostname %s: can't reslove IPv4 address\" % h)\n ips=list(set(ips))\n\n if hostnames is not None:\n logging.debug(\"Input hostnames: %s\" % (','.join(hostnames)))\n logging.debug(\"Input IPs: %s\" % (','.join(ips)))\n else:\n logging.debug(\"No input hostnames given\")\n\n # Add only running instances\n inst = []\n for i in all_inst:\n if i.status(token_id=api.keystone.token_id) == 'running':\n if hostnames is None:\n # Append all\n inst.append(i)\n else:\n found = False\n for ipv4 in ips:\n if i.network_ip(network_name=cf[\"api\"][\"network_name\"]) == ipv4:\n inst.append(i)\n logging.debug(\"Found IP %s corresponding to instance\" % ipv4)\n found = True\n break\n if not found:\n logging.warning(\"Cannot find instance %s in the list of known IPs\" % i.network_ip(network_name=cf[\"api\"][\"network_name\"]))\n\n return inst",
"def get_as_instances(as_connection, as_group_name):\n as_all_instances_list = []\n as_instances_list = []\n get_as_instances = as_connection.get_all_autoscaling_instances()\n as_all_instances_list.extend(get_as_instances)\n\n token = get_as_instances.next_token\n while token is not None:\n get_as_groups = as_connection.get_all_autoscaling_instances(\n next_token=token)\n as_all_instances_list.extend(get_as_groups)\n token = get_as_groups.next_token\n\n for instance in as_all_instances_list:\n if instance.group_name in as_group_name:\n as_instances_list.append(instance.instance_id)\n return as_instances_list",
"def get_ec2(self, name: str) -> list:\n filters = [\n {\n 'Name': 'tag:Name',\n 'Values': [name]\n },\n {\n 'Name': 'instance-state-name',\n 'Values': ['running']\n }\n ]\n\n return list(self.ec2.instances.filter(Filters=filters).all())",
"def get_elb_instance_ids(elbclient, elbname):\r\n try:\r\n resp = elbclient.describe_load_balancers(LoadBalancerNames=[elbname])\r\n except:\r\n print(ex.message)\r\n return None\r\n return list(map(\r\n lambda x:x['InstanceId'],\r\n resp['LoadBalancerDescriptions'][0]['Instances']\r\n ))",
"def get_instances() -> dict:\n url = f\"{app.config['COMPUTE_SERVERS_REF']}/detail\"\n instances_rq = request(\n method=\"GET\", url=url, headers=build_header(), params={\"vm_state\": \"active\"},\n )\n\n if not instances_rq.ok:\n HTTPError(instances_rq.status_code)\n\n answer = {\"servers\": list()}\n for instance in instances_rq.json()[\"servers\"]:\n instance_info = dict(name=instance[\"name\"])\n instance_info[\"ip_addresses\"] = list()\n for network, info in instance[\"addresses\"].items():\n instance_info[\"ip_addresses\"].extend(entry[\"addr\"] for entry in info)\n answer[\"servers\"].append(instance_info)\n\n return answer",
"def list_as_instances(access_key, secret_key, region, autoscaling_group):\n\n aws_as = init_aws_as_conn(access_key, secret_key, region)\n aws_ec2 = init_aws_ec2_conn(access_key, secret_key, region)\n autoscaling_instances = []\n\n vm = aws_as.get_all_groups([autoscaling_group])\n autoscaling_instances_id = [j.instance_id for i in vm for j in i.instances]\n\n for instance_id in autoscaling_instances_id:\n vm = boto.ec2.instance.Instance(aws_ec2)\n vm.id = instance_id\n vm.update()\n autoscaling_instances.append(vm)\n\n return autoscaling_instances",
"def instances(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/instances', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/instances' % endpoint_name, 'GET')\n return body",
"def get_instances_in_instance_group(\n self, name, zone, max_results=None, page_token=None):\n params = {}\n if max_results:\n params['maxResults'] = max_results\n if page_token:\n params['pageToken'] = page_token\n return self.call_api(\n '/zones/%s/instanceGroups/%s/listInstances' % (zone, name),\n method='POST',\n params=params,\n )",
"def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances",
"def list_instances(self):\n print '# AWS EC2 instances'\n self.compute.list_instances()",
"def ip(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(lookup=lookup)\n if ip:\n puts_err(colored.green(ip))\n else:\n puts_err(colored.red(\"Unknown IP address\"))",
"def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances",
"def ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpMappingArgs']]]]:\n return pulumi.get(self, \"ip_addresses\")",
"def ip_addresses(self) -> pulumi.Output[Sequence['outputs.IpMappingResponse']]:\n return pulumi.get(self, \"ip_addresses\")",
"def _get_IP_addresses(hostname):\n try:\n answers, auth, addit = yield DNSclient.lookupAddress(hostname)\n except Exception as exc: # Too many different DNS failures to catch...\n log.exception('DNS Resolution failure: %r for name: %r', exc, hostname)\n returnValue([])\n\n returnValue(\n [answer.payload.dottedQuad()\n for answer in answers if answer.type == dns.A])",
"def endpoint_ips(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/ips', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/ips' % endpoint_name, 'GET')\n return body",
"def ex_describe_addresses(self, nodes):\n if not nodes:\n return {}\n\n params = { 'Action': 'DescribeAddresses' }\n\n if len(nodes) == 1:\n params.update({\n 'Filter.0.Name': 'instance-id',\n 'Filter.0.Value.0': nodes[0].id\n })\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n node_instance_ids = [ node.id for node in nodes ]\n nodes_elastic_ip_mappings = {}\n\n for node_id in node_instance_ids:\n nodes_elastic_ip_mappings.setdefault(node_id, [])\n for element in self._findall(result, 'addressesSet/item'):\n instance_id = self._findtext(element, 'instanceId')\n ip_address = self._findtext(element, 'publicIp')\n\n if instance_id not in node_instance_ids:\n continue\n\n nodes_elastic_ip_mappings[instance_id].append(ip_address)\n return nodes_elastic_ip_mappings",
"def public_ip_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"public_ip_addresses\")",
"def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})",
"def get_instance_and_ip_list_for_stack_id(self,heatcln,stack_id):\n #Get the instance list for this stack\n resources = heatcln.resources.list(stack_id)\n instance_list = []\n ip_list = []\n \n for resource in resources:\n res_info = resource._info\n \n #Add those resources that are instances\n if res_info['resource_type'] == 'AWS::EC2::Instance':\n instance_list.append(resource)\n if res_info['resource_type'] == 'AWS::EC2::EIPAssociation':\n ip_list.append(resource)\n return instance_list,ip_list",
"def describe_elastic_ips(InstanceId=None, StackId=None, Ips=None):\n pass",
"def target_dns_ip_addresses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"target_dns_ip_addresses\")",
"def public_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"public_ip_addresses\")",
"def get_instances(self):\n for server in self.cs.servers.list():\n match = self.cluster_re.match(server.name)\n if match:\n for ip in server.networks['public']:\n if ip.count('.'):\n v4ip = ip\n yield (match.group('role'), v4ip)",
"def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv"
]
| [
"0.690664",
"0.684622",
"0.6602771",
"0.61565304",
"0.6142604",
"0.60926044",
"0.60590464",
"0.60518944",
"0.60219425",
"0.60186154",
"0.6005966",
"0.5953812",
"0.5896932",
"0.5894132",
"0.5887551",
"0.5881828",
"0.5880497",
"0.58594406",
"0.58584875",
"0.5843627",
"0.57725513",
"0.5771376",
"0.5764534",
"0.57623094",
"0.5738564",
"0.57189965",
"0.5708412",
"0.5692253",
"0.56806165",
"0.56643283"
]
| 0.6998335 | 0 |
Lookup the public DNS for a given AWS RDS instance name. | def rds_lookup(session, hostname):
client = session.client('rds')
response = client.describe_db_instances(DBInstanceIdentifier=hostname)
item = response['DBInstances']
if len(item) == 0:
print("Could not find DNS for '{}'".format(hostname))
return None
else:
return item[0]['Endpoint']['Address'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def instance_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]},\n {\"Name\": \"instance-state-name\", \"Values\": [\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'PublicDnsName' in item:\n return item['PublicDnsName']\n return None",
"def get_instance_by_cname ( ec2_conn, r53_conn, dns_cname ) :\n record = get_r53_record_by_name( r53_conn, dns_cname, record_type = 'CNAME' )\n if record :\n instance_dns_name = record.resource_records[ 0 ]\n print \"Getting ec2 instance with DNS name: \" + instance_dns_name\n instances = ec2_conn.get_only_instances( filters = { \"dns-name\": [ instance_dns_name ] } )\n for instance in instances :\n return instance",
"def public_dns(self) -> Optional[str]:\n return pulumi.get(self, \"public_dns\")",
"def cloudfront_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('cloudfront')\n response = client.list_distributions(\n MaxItems='100'\n )\n items = response[\"DistributionList\"][\"Items\"]\n for item in items:\n cloud_front_domain_name = item[\"DomainName\"]\n if item[\"Aliases\"][\"Quantity\"] > 0:\n if hostname in item[\"Aliases\"][\"Items\"]:\n return cloud_front_domain_name\n return None",
"def amazon_public_address() -> str:\n check_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'\n check_timeout = float(CONFIG['network']['check_timeout'])\n try:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n except Exception as error:\n return None",
"def elb_public_lookup(session, hostname):\n\n if session is None:\n return None\n\n client = session.client('elb')\n responses = client.describe_load_balancers()\n\n hostname_ = hostname.replace(\".\", \"-\")\n\n for response in responses[\"LoadBalancerDescriptions\"]:\n if response[\"LoadBalancerName\"].startswith(hostname_):\n return response[\"DNSName\"]\n return None",
"def get_ip_by_name ( route53_conn, dns_name ) :\n record = get_r53_record_by_name( route53_conn, dns_name )\n if record :\n return record.resource_records[ 0 ]\n\n return None",
"def nslookup(name, dnsserver='', prevent_unqualified_dns=True):\n # if it looks like an IP address, don't try to resolve it\n if digitsAndDots.match(name):\n return (0, \"OK\")\n if name != \"localhost\" and prevent_unqualified_dns:\n name = name + \".\" # prevent unqualified DNS lookups\n\n # TODO: we really want to call something along the lines of\n # google2/io/safe_gethostbyname, this will require some python trickery.\n\n # If dnsserver is an empty string, then mkarg() will escape it with\n # quotes and the host call will try to use \"''\" as a dns server and fail\n # So call mkarg only if actually given a non-empty-string dnsserver\n if not dnsserver:\n dnsserver = ''\n if dnsserver != '':\n dnsserver = commands.mkarg(dnsserver)\n\n executestring = commands.mkarg(\n \"host -t a %s %s 2>/dev/null | grep has\\ address | wc -l\"\n % (commands.mkarg(name), dnsserver))\n\n (stat, out) = commands.getstatusoutput('alarm 5 sh -c %s' % executestring)\n if stat != 0:\n return (1, \"TIMEOUT\") # E.g. DNS server does not respond\n\n if int(out) == 0:\n return (2, \"cannot resolve\")\n\n return (0, \"OK\")",
"def public_ip_dns(resolv, nameservers, rdatatype, server, responsetype):\n for ns in nameservers:\n try:\n answer = resolv.query(ns, rdatatype)\n nameserver = answer[0].to_text()\n except Exception as e:\n print(e)\n continue\n resolve_public_ip(nameserver, server, responsetype)",
"def machine_lookup(session, hostname, public_ip = True):\n\n try:\n idx, target = hostname.split('.', 1)\n idx = int(idx) # if it is not a valid number, then it is a hostname\n hostname = target\n except:\n idx = 0\n\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None\n else:\n item.sort(key = lambda i: i['Instances'][0][\"InstanceId\"])\n\n if len(item) <= idx:\n print(\"Could not find IP address for '{}' index '{}'\".format(hostname, idx))\n return None\n else:\n item = item[idx]['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n return item['PublicIpAddress']\n elif 'PrivateIpAddress' in item and not public_ip:\n return item['PrivateIpAddress']\n else:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None",
"def get_r53_record_by_name ( route53_conn, dns_name, record_type = 'A' ) :\n dns_name = dns_name.lower( )\n zone_info = route53_conn.get_hosted_zone( route_53_hosted_zoneid )\n zone_name = zone_info[ 'GetHostedZoneResponse' ][ 'HostedZone' ][ 'Name' ]\n zone = route53_conn.get_zone( zone_name )\n return zone.find_records( name = dns_name, type = record_type )",
"def use_public_dns(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_public_dns\")",
"def dns(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns\")",
"def resolv(hostname):\n\n ips = list()\n\n # Create resolver object\n res = resolver.Resolver()\n\n # Choose the correct DNS servers\n # Blue DNS servers\n if hostname.startswith('b-'):\n res.nameservers = ['172.16.2.10', '172.16.2.11']\n # Green DNS servers\n elif hostname.startswith('g-'):\n res.nameservers = ['10.0.2.10', '10.0.2.11']\n # Default to white DNS servers\n else:\n res.nameservers = ['194.47.252.134', '194.47.252.135']\n\n # Query\n try:\n query = res.query(hostname)\n for answer in query:\n ips.append(answer.address)\n except resolver.NXDOMAIN:\n raise CouldNotResolv\n\n # Return query result\n return ips",
"def host_dns_name(self):\n res = self.search(base='', scope=ldb.SCOPE_BASE, attrs=['dNSHostName'])\n return str(res[0]['dNSHostName'][0])",
"def nslookup(self):\n if len(self.hostnames) == 0:\n st, out = commands.getstatusoutput('get_instance_by_service %s' % self.bns)\n assert st == 0, \"Failure:'get_instance_by_service %s', errno=%d\" % (self.bns, st)\n self.hostnames = out.split('\\n')\n assert self.hostnames, 'No hosts found for bns: \"%s\"' % self.bns",
"def reverse_dns_sna(ipaddress):\n\n r = requests.get(\"http://api.statdns.com/x/%s\" % ipaddress)\n\n if r.status_code == 200:\n names = []\n\n for item in r.json()['answer']:\n name = str(item['rdata']).strip(\".\")\n names.append(name)\n\n return names\n elif r.json()['code'] == 503:\n # NXDOMAIN - no PTR record\n return None",
"def public_ip():\n found_public_ip = False\n try:\n metadata = get_instance_metadata()\n for key, value in metadata.items():\n LOG.info(\"{0}: {1}\".format(key, value))\n\n if metadata['public-ipv4'] is not None:\n try:\n socket.inet_aton(metadata['public-ipv4'])\n found_public_ip = True\n except socket.error:\n found_public_ip = False\n\n except Exception:\n LOG.exception('check_database_connection')\n return False\n return found_public_ip",
"def instanceid_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'InstanceId' in item:\n return item['InstanceId']\n return None",
"def dns_lookup(self, hostname, aux):\n\n resolver = Resolver()\n\n # If the host doesn't have the A record (IPv4),\n # trying to find its AAAA record (IPv6).\n try:\n addr = resolver.query(hostname, \"A\")[0] # <---+\n ver = 4 # |\n except Exception as e: # From the dnspython lib. --------+\n try: # |\n addr = resolver.query(hostname, \"AAAA\")[0] # <---+\n ver = 6\n except Exception as e:\n addr = ver = aux._ERR_PREFIX\n\n return (addr, ver)",
"def get_dns(self):\n return self.mycam.devicemgmt.GetDNS()",
"def get_dns_info(self, name_or_ip) :\n self._logger.debug(\"get_dns_info: entering with name_or_ip=%s\" % \\\n (name_or_ip))\n if not is_name(name_or_ip) : # check for matching ipaddress\n for hostname in afs.CONFIG.hosts :\n if name_or_ip in afs.CONFIG.hosts[hostname] :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % \\\n (name_or_ip, [hostname,],afs.CONFIG.hosts[hostname]))\n self._logger.debug(\"returning %s\" % ({ \"names\" : [hostname, ], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname] }) )\n return { \"names\" : [hostname, ], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname] }\n\n # is a hostname\n \n # hard-mapped, primary Hostname given \n if name_or_ip in afs.CONFIG.hosts.keys() :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % ( name_or_ip, \\\n [name_or_ip, ], afs.CONFIG.hosts[name_or_ip]))\n self._logger.debug(\"returning %s\" % ({\"names\" : [name_or_ip,], \"ipaddrs\" : \\\n afs.CONFIG.hosts[name_or_ip] }) )\n return {\"names\" : [name_or_ip,], \"ipaddrs\" : \\\n afs.CONFIG.hosts[name_or_ip] }\n\n \n # memory_cache \n if name_or_ip in self.memory_cache[\"dns_info\"] :\n self._logger.debug(\"%s in localcache hard-mapped (%s)\" % \\\n (name_or_ip,self.memory_cache[\"dns_info\"][name_or_ip] ))\n self._logger.debug(\"returning %s\" % (self.memory_cache[\"dns_info\"][name_or_ip]))\n return self.memory_cache[\"dns_info\"][name_or_ip]\n \n for srv in self.memory_cache[\"dns_info\"] :\n if name_or_ip in self.memory_cache[\"dns_info\"][srv][\"names\"] :\n self._logger.debug(\"%s is hard-mapped to %s\" % (name_or_ip, \\\n self.memory_cache[\"dns_info\"][srv] ))\n self._logger.debug(\"returning %s\" % (self.memory_cache[\"dns_info\"][srv]) )\n return self.memory_cache[\"dns_info\"][srv]\n\n # lookup from OS\n \n try : \n dns_info = socket.gethostbyaddr(name_or_ip)\n servernames = [dns_info[0]] + dns_info[1]\n ipaddrs = dns_info[2]\n except socket.gaierror :\n if is_name(name_or_ip) :\n raise LookupUtilError(\"Cannot resolve %s\" % name_or_ip)\n else :\n self._logger.warn(\"Cannot resolve %s\" % name_or_ip)\n self._logger.debug(\"returning %s\" % ({\"names\": [], \"ipaddrs\" : [name_or_ip,]}) )\n return {\"names\": [], \"ipaddrs\" : [name_or_ip,]}\n\n\n self._logger.debug(\"%s resolves to %s\" % (name_or_ip, dns_info)) \n # check if resolved ip-address matches (if hostalias was used)\n for hostname in afs.CONFIG.hosts :\n for ipaddr in ipaddrs :\n # ignore IP if we're asked to do so.\n if ipaddr in afs.CONFIG.ignoreIPList : continue\n if ipaddr in afs.CONFIG.hosts[hostname] :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % \\\n (ipaddrs, [hostname,],afs.CONFIG.hosts[hostname]))\n # add this hostalias to list in memory_cache\n if self.memory_cache[\"dns_info\"].has_key(hostname) :\n self.memory_cache[\"dns_info\"][hostname][\"names\"] = \\\n [hostname, ]\n self.memory_cache[\"dns_info\"][hostname][\"ipaddrs\"] = \\\n afs.CONFIG.hosts[hostname]\n else :\n self.memory_cache[\"dns_info\"][hostname] = { \\\n \"names\" : [hostname,], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname]}\n self._logger.debug(\"memory_cache = %s\" % \\\n (self.memory_cache))\n ipaddrs = []\n self._logger.debug(\"returning %s\" % ({ \"names\" : [hostname], \"ipaddrs\" : \\\n afs.CONFIG.hosts[hostname] }) )\n return { \"names\" : [hostname], \"ipaddrs\" : \\\n afs.CONFIG.hosts[hostname] }\n\n if \"nxdomain\" in servernames[0] : \n raise LookupUtilError(\"cannot resolve DNS-entry %s\" % name_or_ip)\n # fill up localcache\n self.memory_cache[\"dns_info\"][servernames[0]] = { \\\n \"names\" : servernames, \"ipaddrs\" : ipaddrs }\n self._logger.debug(\"memory_cache = %s\" % (self.memory_cache))\n self._logger.debug(\"returning %s\" % ({\"names\": servernames, \"ipaddrs\" : ipaddrs}) )\n return {\"names\": servernames, \"ipaddrs\" : ipaddrs}",
"def _lv_dns_lookup(name):\n if dns is None:\n return _lv_pydns_lookup(name)\n resp = dns.resolver.query(name, \"srv\")\n if resp.response.flags & dns.flags.TC:\n resp = dns.resolver.query(name, \"srv\", tcp=True)\n return [(a.priority, a.weight, a.port, a.target.to_text(True)) for a in resp]",
"def get_by_dns_name(cls, dns_name: str) -> \"ELB\":\n _, region, _ = dns_name.split(\".\", maxsplit=2)\n client = BotoClientProxy(\"elb\", region)\n\n response = client.describe_load_balancers()\n next_marker = response.get(\"NextMarker\")\n load_balancers = response[\"LoadBalancerDescriptions\"] # type: List\n while next_marker:\n response = client.describe_load_balancers(Marker=next_marker)\n next_marker = response.get(\"NextMarker\")\n load_balancers.extend(response[\"LoadBalancerDescriptions\"])\n\n for load_balancer in load_balancers:\n if load_balancer[\"DNSName\"] == dns_name:\n return cls.from_boto_dict(load_balancer)\n\n raise ELBNotFound(dns_name)",
"def resolve_public_ip(nameserver, server, responsetype):\n request_resolver = dns.resolver.Resolver()\n request_resolver.nameservers = [nameserver,]\n try:\n answer = request_resolver.query(server, responsetype)\n ip = answer[0].to_text().replace('\"','').strip()\n ipaddress.ip_address(ip)\n print(ip)\n sys.exit()\n except Exception as e:\n print(e)\n return None",
"def dnslookup(url) -> 'text': \n try:\n hn = socket.gethostbyaddr(url)[0] \n except socket.error as msg: \n hn = 'nohost'\n return hn",
"def host_ip(hostname: str) -> str:\n try:\n return socket.gethostbyname(hostname)\n except socket.gaierror:\n return \"No record found.\"",
"def get_dynamic_dns(self):\n return self.mycam.devicemgmt.GetDynamicDNS()",
"def query_dns_server(self) -> str:\n dig_command = [\n \"dig\",\n \"-p\",\n str(self.dns_port),\n \"-t\",\n self.record_type,\n \"+short\",\n self.query_address\n ]\n\n try:\n dig_process = subprocess.Popen(dig_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True)\n output, error = dig_process.communicate()\n\n if dig_process.returncode != 0:\n raise RuntimeError(f\"Failed to query the DNS server: {error.strip()}\")\n\n return output.strip()\n\n except subprocess.SubprocessError as e:\n raise RuntimeError(f\"Failed to query the DNS server: {str(e)}\")",
"def custom_dns_resolver(hostname, type='A'):\n nameservers = globals.config.service.initial_dns\n custom_resolver = dns.resolver.Resolver()\n custom_resolver.nameservers = nameservers\n answer = custom_resolver.query(hostname, type)\n\n return str(random.choice(answer))"
]
| [
"0.7454354",
"0.6793534",
"0.64854825",
"0.638419",
"0.637897",
"0.63712627",
"0.63638",
"0.6352668",
"0.62254494",
"0.6122659",
"0.60128444",
"0.59898543",
"0.59575254",
"0.59271836",
"0.58951855",
"0.58788455",
"0.5849263",
"0.58443725",
"0.5781869",
"0.5759335",
"0.5698314",
"0.5628929",
"0.562694",
"0.5549593",
"0.5546818",
"0.5541585",
"0.5488357",
"0.54814893",
"0.5476527",
"0.5470651"
]
| 0.780593 | 0 |
Locate an item in a list based on a predicate function. | def _find(xs, predicate):
for x in xs:
if predicate(x):
return x
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find(func, list_seq):\n for list_item in list_seq:\n if func(list_item):\n return list_item",
"def list_find(f, items):\n for i, x in enumerate(items):\n if f(x):\n return i\n return None",
"def finditem(func, seq):\n return next((item for item in seq if func(item)))",
"def get_from_list(list_search, key, value):\n # TODO: Handle iteration error better\n return next(element for element in list_search if getattr(element, key) == value)",
"def find(f, seq):\n for item in seq:\n if f(item): \n return item",
"def findItem(items: Iterable, condition):\n for item in items:\n if condition(item):\n return item",
"def find(function, seq):\r\n for item in seq:\r\n if function(item): \r\n return item\r\n return None",
"def find_item(self, func):\n return next((_(x) for x in self._ if func(x)), None)",
"def first(l: iter, predicate):\n for ele in l:\n if predicate(ele):\n return ele\n raise RuntimeError(\"Found nothing to match predicate\")",
"def find(items, term, key=None):\n if key is None:\n key = lambda other: term == other\n \n for item in items:\n if key(item):\n return item",
"def find(f, seq):\n\tfor num,item in enumerate(seq):\n\t\tif f(item): return num\n\treturn -1",
"def find(function, iterable):\n for x in iterable:\n if function(x) == True:\n return x",
"def search_list(search):\n fun_list = basic_list_exception.make_list()\n for x in range(len(fun_list)):\n try:\n location = fun_list.index(search)\n return location\n except ValueError:\n return -1",
"def find(listy, x):\n return listy.index(x) if x in listy else None",
"def index(predicate,iterable):\n for i,value in enumerate(iterable):\n if value.startswith(predicate):\n return i\n raise ValueError",
"def find_in_list_via_attribute(self, objlist, attribute, equals):\n\n for x in objlist:\n if hasattr(x, attribute):\n if getattr(x, attribute) == equals:\n return x\n\n return None",
"def find(f, seq):\n for item in seq:\n if f(item): \n return item\n\n \"\"\"\n Example usage of iterate: \n >>> c = []; \\\n c.append(node(0.5,1,'a')); \\\n c.append(node(0.25,2,'b')); \\\n c.append(node(0.125,3,'c')); \\\n c.append(node(0.125,4,'d')); \\\n iterate(c) ; reportcode(c) # doctest: +NORMALIZE_WHITESPACE\n #Symbol Count Codeword\n a (0.5) 1\n b (0.25) 01\n c (0.12) 000\n d (0.12) 001\n \"\"\"",
"def person_in_list(position: OrderedDict, lst: List[OrderedDict]):\n for p in filter(lambda x: x[\"person\"] == position[\"person\"], lst):\n return p\n return None",
"def find_an_item_in_list(self, level):\n for element in self.list:\n element.find_an_item(element, level)",
"def __call__ (self, item, * args, ** kw) :\n return self.predicate (item, * args, ** kw)",
"def linear_search(mylist, key):\r\n for i in range(len(mylist)):\r\n if mylist[i] == key:\r\n return i\r\n return -1",
"def find(self, item):\n current = self.head\n while current.next != None:\n if current.data == item:\n return current\n current = current.next",
"def onlist(listtocheck, item):\n\n # Return the result\n return item in listtocheck",
"def index_where(iterable, pred):\n # type: (Iterable[T], Callable[[T], bool]) -> Optional[int]\n for i, el in enumerate(iterable):\n if pred(el):\n return i\n return None",
"def linear_search(alist, key):\n for i in range(len(alist)):\n if alist[i] == key:\n return i\n return -1",
"def find_first_match(_list, _item):\n for _list_item in _list:\n if _item[\"pid\"] == _list_item[\"pid\"]:\n return _list_item\n return None",
"def find(self, list, key, value):\n for i, dic in enumerate(list):\n if dic[key] == value:\n return i\n return -1",
"def _search_list(list, key, status_date):\n\n for l in list:\n if l[key] == status_date:\n return l",
"def linear_search(vlist, srchval): # somewhat different from book\n#Look at each item in list. If it equals the value you are looking for, stop.\n # linear_search_2.py\n index = 0\n for item in vlist:\n if item == srchval:\n return index # implicit break\n index += 1\n \n return -1",
"def find(iteratee, seq):\n for item in filter(iteratee, seq):\n return item"
]
| [
"0.7345009",
"0.7329399",
"0.69864964",
"0.68509775",
"0.6848589",
"0.6835385",
"0.6782268",
"0.67755824",
"0.631964",
"0.63177335",
"0.62872744",
"0.6159832",
"0.60666436",
"0.60513777",
"0.60492444",
"0.60366356",
"0.59750605",
"0.59668785",
"0.5961629",
"0.595214",
"0.5874667",
"0.58687407",
"0.5856959",
"0.5836008",
"0.58270127",
"0.5805963",
"0.5777356",
"0.57386255",
"0.5737286",
"0.57169676"
]
| 0.7533438 | 0 |
Terminate all of the instances for an ASG, with the given timeout between each termination. | def asg_restart(session, hostname, timeout, callback=None):
client = session.client('ec2')
resource = session.resource('ec2')
response = client.describe_instances(Filters=[{"Name":"tag:Name", "Values":[hostname]},
{"Name":"instance-state-name", "Values":["running"]}])
for reservation in response['Reservations']:
for instance in reservation['Instances']:
id = instance['InstanceId']
print("Terminating {} instance {}".format(hostname, id))
resource.Instance(id).terminate()
print("Sleeping for {} minutes".format(timeout/60.0))
time.sleep(timeout)
if callback is not None:
callback() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def terminate_all(self):\n self._stop_all('terminate')",
"def terminate_instances(self, ids):\n self.conn.terminate_instances(instance_ids=ids)",
"def terminate_instances(self, props):\n return self._vm_async_apply(props, 'delete')",
"def terminate_instance_in_asg(instance_id):\n if not app_config['DRY_RUN']:\n logger.info('Terminating ec2 instance in ASG {}...'.format(instance_id))\n try:\n response = client.terminate_instance_in_auto_scaling_group(\n InstanceId=instance_id,\n ShouldDecrementDesiredCapacity=True\n )\n if response['ResponseMetadata']['HTTPStatusCode'] == requests.codes.ok:\n logger.info('Termination signal for instance is successfully sent.')\n else:\n logger.info('Termination signal for instance has failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n raise Exception('Termination of instance failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n\n except client.exceptions.ClientError as e:\n if 'DryRunOperation' not in str(e):\n raise",
"def stop_all_instances(self):\n print '# Stopping all the instances'\n number = self.compute.stop_all_instances()\n print '%d instances were stopped' % number",
"def delete_asg(client, asg):\n if len(asg['LoadBalancerNames']) > 0:\n client.detach_load_balancers(\n AutoScalingGroupName=asg['AutoScalingGroupName'],\n LoadBalancerNames=asg['LoadBalancerNames'],\n )\n client.update_auto_scaling_group(\n AutoScalingGroupName=asg['AutoScalingGroupName'],\n MinSize=0,\n MaxSize=0,\n DesiredCapacity=0,\n )\n client.resume_processes(\n AutoScalingGroupName=asg['AutoScalingGroupName'],\n )\n\n wait_for_instances(client, asg, 'Terminated')\n\n client.delete_auto_scaling_group(\n AutoScalingGroupName=asg['AutoScalingGroupName'],\n )",
"def terminate(self) -> None:\n self.robot.terminate_all()",
"def terminate_all_processes(processes):\n for process in processes:\n process.terminate()",
"def _terminateAll(self):\n\n # Termination of all processes\n try :\n for process in self.processes:\n process.terminate()\n except AttributeError:\n pass\n\n return",
"def terminate_preemptible_instances(self, context, instances):\n # NOTE(aloga): we should not delete them directly, but probably send\n # them a signal so that the user is able to save her work.\n elevated = context.elevated()\n for instance in instances:\n LOG.info(_LI(\"Deleting %(uuid)s\") % {\"uuid\": instance[\"uuid\"]})\n instance = self.compute_api.get(elevated,\n instance[\"uuid\"],\n want_objects=True)\n self.compute_api.delete(elevated, instance)",
"def cleanup():\n for th in THREAD_REGISTER.values():\n th.exit()\n th.join(timeout=3)",
"def DelGPActiveTunnels(asg_name):\n\n logger.info('Deleting GP Active Tunnels CloudWatch alarm for ASG: ' + asg_name)\n alarmname= asg_name + '-cw-gpat'\n common_alarm_func_del(alarmname)\n return",
"def kill_children(timeout=1) -> List[psutil.Process]:\n procs = child_manager.children_pop_all()\n for p in procs:\n try:\n p.terminate()\n except psutil.NoSuchProcess:\n pass\n gone, alive = psutil.wait_procs(procs, timeout=timeout)\n for p in alive:\n logger.warning(\"Cleaning up child: %d\", p.pid)\n p.kill()\n return alive",
"def terminateAll(self):\n with self.__queueLock:\n for queue in [self.__queue, self.__clientQueue]:\n queue.clear()\n\n for runList in [self.__running, self.__clientRunning]:\n unfinishedRuns = [run for run in runList if run is not None]\n for run in unfinishedRuns:\n run.kill()",
"def terminate_instance(self, instance_ids):\n instances_terminated = []\n if (len(instance_ids) > 0):\n self.__validate_instance_id(instance_ids)\n euca_conn = self.__make_connection()\n for instance_id in instance_ids:\n try:\n instance = euca_conn.terminate_instances(instance_id.encode(\"latin-1\"))\n instances_terminated.append(instance)\n except Exception, ex:\n self.euca.display_error_and_exit('%s' % ex)\n\n return instances_terminated\n else:\n return False",
"def killall(cleanup=lambda:None, wait_s=16):\n # TODO(infinity0): log this somewhere, maybe\n global _isTerminating, _CHILD_PROCS\n if _isTerminating: return\n _isTerminating = True\n # terminate all\n for proc in _CHILD_PROCS:\n if proc.poll() is None:\n proc.terminate()\n # wait and make sure they're dead\n for i in range(wait_s):\n _CHILD_PROCS = [proc for proc in _CHILD_PROCS\n if proc.poll() is None]\n if not _CHILD_PROCS: break\n time.sleep(1)\n # if still existing, kill them\n for proc in _CHILD_PROCS:\n if proc.poll() is None:\n proc.kill()\n time.sleep(0.5)\n # reap any zombies\n for proc in _CHILD_PROCS:\n proc.poll()\n cleanup()",
"def terminate_instances(self, instance_ids=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_list('TerminateInstances', params,\r\n [('item', Instance)], verb='POST')",
"def terminate(ctx):\n ctl = ctx.ctl\n jobs = ctl('list-avail', '--partition', 'main', flatten=False)\n\n for job in jobs:\n jobid = job['id']\n click.echo('Terminating {}'.format(jobid))\n ctl('terminate', '--jobid', jobid)",
"def terminate(\n instance_id=None,\n name=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n filters=None,\n):\n instances = find_instances(\n instance_id=instance_id,\n name=name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n return_objs=True,\n filters=filters,\n )\n if instances in (False, None, []):\n return instances\n\n if len(instances) == 1:\n instances[0].terminate()\n return True\n else:\n log.warning(\"Refusing to terminate multiple instances at once\")\n return False",
"def terminate(self):\n for processor in self._processors.values():\n Stats.decr(\n \"dag_processing.processes\", tags={\"file_path\": processor.file_path, \"action\": \"terminate\"}\n )\n processor.terminate()",
"def stop(self: AutoScalingCluster, wait: bool = False, timeout: int = None) -> None:\n self.server.stop(wait=wait, timeout=timeout)\n self.autoscaler.stop(wait=wait, timeout=timeout)\n super().stop(wait=wait, timeout=timeout)",
"def terminate(self, hostname):\n # unique names\n matches = list(filter(lambda x: x.name == hostname, self.instances))\n\n if len(matches) == 0:\n # already terminated\n return\n elif len(matches) == 1:\n instance = matches[0]\n # terminate it\n self.names.append(instance.name)\n self.instances.remove(instance)\n # actual deletion from openstack\n status = self.nova.servers.get(instance.id).status\n\n while status == 'BUILD':\n time.sleep(5)\n status = self.nova.servers.get(instance.id).status\n print(\"Waiting for VM to finish BUILD before terminating.\")\n instance.delete()\n print(\"Worker VM \" + hostname + \" deleted.\")\n else:\n # inconsistency in the system\n raise ValueError('More than one of same name in self.instances')",
"def terminate():\n with open (f\"{CLUSTER_FOLDER}/uuid\", \"r\") as f:\n uuid = f.read().strip()\n\n start_time = time.time() \n cluster = delete_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], uuid) \n if(not cluster):\n log(\"Failed to terminate cluster via API.\")\n exit(1)\n\n log(f\"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...\")\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']):\n time.sleep(5)\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n\n if(cluster['status']['failed']):\n log(\"Cluster termination failed.\")\n exit(1)\n\n if(TIMEOUT_SECONDS <= (time.time()-start_time)):\n log(\"Timeout while launching cluster.\")\n exit(1)\n\n log(f\"Cluster '{cluster['id']}' is terminated.\")",
"def terminate(filter=\".*\"):\n list_instances,list_headers = ec2list(filter=filter)\n if not list_instances:\n print(\"No instance matched the filter\")\n sys.exit(1)\n title = \"Pick the instances to terminate\"\n options = [ '{} ---- {} ---- {} ---- {}'.format(\n x[\"name\"],\n x[\"privateip\"],\n x[\"id\"],\n x[\"launchtime\"],\n x[\"state\"]) for x in list_instances ]\n\n list_selected = pick(options, title, multiselect=True, default_index=len(options)-1)\n del(options[:-1])\n list_ips = []\n if not list_selected:\n print(\"No host selected, exiting\")\n return\n list_ids = []\n for option,index in list_selected:\n list_ids.append(list_instances[index]['id'])\n print(\"Terminating instances {}\".format(list_ids))\n boto3.client(\"ec2\").terminate_instances(InstanceIds=list_ids)",
"async def shutdown(self, timeout=10) -> None:\n running: list[asyncio.tasks.Task] = []\n\n # Cancel next task / get running list\n _LOGGER.info(\"Shutting down scheduled tasks\")\n for task in self._tasks:\n if task.next:\n task.next.cancel()\n if not task.job or task.job.done():\n continue\n running.append(task.job)\n task.job.cancel()\n\n if not running:\n return\n\n # Wait until all are shutdown\n try:\n async with async_timeout.timeout(timeout):\n await asyncio.wait(running)\n except asyncio.TimeoutError:\n _LOGGER.error(\"Timeout while waiting for jobs shutdown\")",
"def terminate_instances(self, instance_ids):\n response = instance.terminate_instances(self.url, self.verb,\n self.headers, self.version,\n instance_ids)\n if response is not None :\n res = TerminateInstancesResponse.TerminateInstancesResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None",
"def DelActiveSessions(asg_name):\n logger.info('Deleting Active Sessions CloudWatch alarm for ASG: ' + asg_name)\n\n alarmname= asg_name + '-cw-as'\n common_alarm_func_del(alarmname)\n return",
"def TerminateExpiredMachine(instance_id):\n TerminateMachine(instance_id, enum.MACHINE_STATUS.EXPIRED)",
"def wait_for_instances(client, asg, desired_state=None, desired_health=None,\n desired_count=None):\n for i in range(61):\n if i == 60:\n raise Exception('Tried for 5 minutes, giving up.')\n sleep(10)\n _asg = client.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg['AutoScalingGroupName']],\n )['AutoScalingGroups'][0]\n\n if(\n desired_count is not None and\n len(_asg['Instances']) < desired_count\n ):\n continue\n\n # Check instance states\n all_matching = True\n for instance in _asg['Instances']:\n if(\n desired_state is not None and\n instance['LifecycleState'] != desired_state\n ):\n all_matching = False\n break\n if(\n desired_health is not None and\n instance['HealthStatus'] != desired_health\n ):\n all_matching = False\n break\n if all_matching:\n break",
"def terminate_all_publishers(self):\n for publisher in self.publishers:\n publisher.terminate()"
]
| [
"0.6357317",
"0.6248194",
"0.6084295",
"0.59850127",
"0.5979804",
"0.5917791",
"0.5914561",
"0.58749413",
"0.5839589",
"0.5790333",
"0.5714129",
"0.56740266",
"0.56631154",
"0.5644305",
"0.5632723",
"0.5591108",
"0.557721",
"0.5561397",
"0.55381894",
"0.5480454",
"0.5472366",
"0.54714304",
"0.5467639",
"0.5466995",
"0.5459285",
"0.54589003",
"0.543883",
"0.5431573",
"0.5428279",
"0.542111"
]
| 0.6897586 | 0 |
Lookup the Group name for the ASG creating the EC2 instances with the given hostname | def asg_name_lookup(session, hostname):
if session is None:
return None
client = session.client('autoscaling')
response = client.describe_auto_scaling_groups()
if len(response['AutoScalingGroups']) == 0:
return None
else:
# DP NOTE: Unfortunatly describe_auto_scaling_groups() doesn't allow filtering results
for g in response['AutoScalingGroups']:
t = _find(g['Tags'], lambda x: x['Key'] == 'Name')
if t and t['Value'] == hostname:
return g['AutoScalingGroupName']
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def instanceid_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'InstanceId' in item:\n return item['InstanceId']\n return None",
"def get_group_by_name_v2(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/NameV2/\"))",
"def find_group ( ec2_conn, base_name, group_type ) :\n secgrp = None\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ get_secgrp_name( base_name, group_type ) ] } )\n for s in secgrps :\n secgrp = s\n break\n\n return secgrp",
"def find_asg(client, name):\n describe = client.describe_auto_scaling_groups()\n matches = []\n for row in describe['AutoScalingGroups']:\n _name = row['AutoScalingGroupName']\n if _name == name:\n matches.append((0, row))\n else:\n match = re.match(re.escape(name) + r'\\-([0-9]+)', _name)\n if match:\n ts = match.group(1)\n matches.append((ts, row))\n if len(matches) == 0:\n return None\n else:\n return sorted(matches, key=lambda x: x[0])[-1][1]",
"def _get_sg_name(sg_name, session):\n return session.resource(\"ec2\").SecurityGroup(sg_name).group_name",
"def instance_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]},\n {\"Name\": \"instance-state-name\", \"Values\": [\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'PublicDnsName' in item:\n return item['PublicDnsName']\n return None",
"def get_description(self):\n return self['hostgroup_name']",
"def instance_group(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_group\")",
"def server_group_name(self) -> str:\n return pulumi.get(self, \"server_group_name\")",
"def getHostKey(instance):\n return instance['hostname']",
"def get_hostgroup(self, object_name, user_key = None):\n\t\treturn self.get_object('hostgroup',object_name, user_key = user_key)",
"def get_sg_name(ec2,s_id):\n name = \"\"\n try:\n security_group = ec2.SecurityGroup(s_id)\n # name = security_group.description\n name = security_group.group_name\n except:\n name = s_id\n\n return name",
"def get_lb_secgrp_name ( base_name, app_name ) :\n return get_secgrp_name( base_name, get_lb_secgrp_type( app_name ) )",
"def sg_lookup(session, vpc_id, group_name):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"tag:Name\", \"Values\": [group_name]}])\n\n if len(response['SecurityGroups']) == 0:\n return None\n else:\n return response['SecurityGroups'][0]['GroupId']",
"def _get_group():\n bus = dbus.SystemBus()\n server = dbus.Interface(\n bus.get_object('org.freedesktop.Avahi', '/'),\n 'org.freedesktop.Avahi.Server',\n )\n\n return dbus.Interface(\n bus.get_object('org.freedesktop.Avahi', server.EntryGroupNew()),\n 'org.freedesktop.Avahi.EntryGroup',\n )",
"def find_secgrp ( ec2_conn, secgrp_name ) :\n sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ secgrp_name ] } )\n if len( sec_grps ) > 0 :\n return sec_grps[ 0 ]\n \n return None",
"def machine_lookup(session, hostname, public_ip = True):\n\n try:\n idx, target = hostname.split('.', 1)\n idx = int(idx) # if it is not a valid number, then it is a hostname\n hostname = target\n except:\n idx = 0\n\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None\n else:\n item.sort(key = lambda i: i['Instances'][0][\"InstanceId\"])\n\n if len(item) <= idx:\n print(\"Could not find IP address for '{}' index '{}'\".format(hostname, idx))\n return None\n else:\n item = item[idx]['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n return item['PublicIpAddress']\n elif 'PrivateIpAddress' in item and not public_ip:\n return item['PrivateIpAddress']\n else:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None",
"def _get_group_from_host(self, wanted_group):\n wanted_gid = \"\"\n if (isinstance(wanted_group, int) or\n re.match(\"^\\\\d+$\", wanted_group)):\n wanted_gid = str(wanted_group)\n wanted_group = \"\"\n if wanted_gid:\n try:\n hgr = grp.getgrgid(int(wanted_gid))\n except (IOError, OSError, KeyError):\n return (\"\", \"\", \"\")\n return (str(hgr.gr_name), str(hgr.gr_gid), str(hgr.gr_mem))\n\n try:\n hgr = grp.getgrnam(wanted_group)\n except (IOError, OSError, KeyError):\n return (\"\", \"\", \"\")\n return (str(hgr.gr_name), str(hgr.gr_gid), str(hgr.gr_mem))",
"def rds_lookup(session, hostname):\n\n client = session.client('rds')\n response = client.describe_db_instances(DBInstanceIdentifier=hostname)\n\n item = response['DBInstances']\n if len(item) == 0:\n print(\"Could not find DNS for '{}'\".format(hostname))\n return None\n else:\n return item[0]['Endpoint']['Address']",
"def groupId(self):\r\n return \"pdok-locatie-server\"",
"def get_or_make_group(ec2, name, vpc_id=None, quiet=False):\n groups = ec2.security_groups.all()\n groups = [g for g in groups if g.group_name == name and g.vpc_id == vpc_id]\n if len(groups) > 0:\n return groups[0]\n else:\n if not quiet:\n print(\"Creating security group \" + name)\n vpc_id = vpc_id if vpc_id is not None else ''\n sg = ec2.create_security_group(\n GroupName=name,\n Description='AbStar cluster group',\n VpcId=vpc_id)\n return sg",
"def get_group_name(self):\n return self.groupname",
"def group_name(self) -> str:\n return pulumi.get(self, \"group_name\")",
"def get_group(self, address):\n return self.groups[address]",
"def get_sg_id(sg_name):\n print()\n print(\"Searching for SG ID\")\n client = boto3.client('ec2')\n all_sg = client.describe_security_groups()\n print(sg_name)\n grp_id = \"None\"\n for sec_grp in all_sg['SecurityGroups']:\n print(sec_grp['GroupName'])\n if sg_name == sec_grp['GroupName']:\n grp_id = sec_grp['GroupId']\n print()\n return grp_id",
"def nfvi_get_instance_groups(callback):\n cmd_id = _compute_plugin.invoke_plugin('get_instance_groups',\n callback=callback)\n return cmd_id",
"def _get_group(self):\n if self.resource.group is not None:\n try:\n return grp.getgrnam(self.resource.group).gr_gid\n except KeyError:\n raise error.InvalidGroup()",
"def elb_public_lookup(session, hostname):\n\n if session is None:\n return None\n\n client = session.client('elb')\n responses = client.describe_load_balancers()\n\n hostname_ = hostname.replace(\".\", \"-\")\n\n for response in responses[\"LoadBalancerDescriptions\"]:\n if response[\"LoadBalancerName\"].startswith(hostname_):\n return response[\"DNSName\"]\n return None",
"def _get_group(self, cluster_config=None):\n if cluster_config is None:\n cluster_config = self._session._call_method(\n vutil, \"get_object_property\", self._cluster, \"configurationEx\")\n if not cluster_config:\n # that should never happen. we should not procede with whatever\n # called us\n msg = 'Cluster {} must have an attribute \"configurationEx\".'\n raise exception.ValidationError(msg.format(self._cluster))\n\n group_ret = getattr(cluster_config, 'group', None)\n if not group_ret:\n return None\n\n hg_name = CONF.vmware.bigvm_deployment_free_host_hostgroup_name\n if not hg_name:\n raise exception.ValidationError('Function for special spawning '\n 'were called, but the setting '\n '\"bigvm_deployment_free_host_hostgroup_name\" is unconfigured.')\n\n for group in group_ret:\n # we are only interested in one special group\n if group.name == hg_name:\n return group",
"def test_get_autoscaling_group_properties_valid_asg_name(self):\n mock_asg_resource = Mock(name=\"Mock Autoscaling Client\")\n mock_asg_resource.describe_auto_scaling_groups.return_value = \\\n {\n \"AutoScalingGroups\": [\n {\n \"DesiredCapacity\": 2,\n \"Tags\": [\n {\n \"ResourceType\": \"auto-scaling-group\",\n \"ResourceId\": \"alpha0-test-instance-ServerGroup\",\n \"PropagateAtLaunch\": \"true\",\n \"Value\": \"alpha0-test-instance\",\n \"Key\": \"Name\"\n }\n ],\n \"AutoScalingGroupName\": \"alpha0-test-instance-ServerGroup\"\n }\n ]\n }\n result = ef_utils.get_autoscaling_group_properties(mock_asg_resource, \"alpha0\", \"test-instance\")\n self.assertEquals(result[0][\"DesiredCapacity\"], 2)\n self.assertEquals(result[0][\"AutoScalingGroupName\"], \"alpha0-test-instance-ServerGroup\")\n self.assertEquals(result[0][\"Tags\"][0][\"ResourceId\"], \"alpha0-test-instance-ServerGroup\")"
]
| [
"0.63809437",
"0.63582885",
"0.62746876",
"0.62431866",
"0.61943334",
"0.6136143",
"0.6110822",
"0.6015477",
"0.5964197",
"0.5958102",
"0.59372044",
"0.5880964",
"0.583179",
"0.57972753",
"0.57745284",
"0.57554305",
"0.57206607",
"0.57174826",
"0.5665552",
"0.5599069",
"0.55933475",
"0.5591926",
"0.5571674",
"0.5537693",
"0.5533713",
"0.55279505",
"0.5504349",
"0.5474852",
"0.5455322",
"0.5455231"
]
| 0.7942209 | 0 |
Lookup the Id for the VPC with the given domain name. | def vpc_id_lookup(session, vpc_domain):
if session is None:
return None
client = session.client('ec2')
response = client.describe_vpcs(Filters=[{"Name": "tag:Name", "Values": [vpc_domain]}])
if len(response['Vpcs']) == 0:
return None
else:
return response['Vpcs'][0]['VpcId'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_keystone_v3_domain_id(self, domain_name):\n LOG_OBJ.debug(\"Get the domain ID.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains?name=\" + \\\n str(domain_name)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting the \"\n \"ID of domain\")\n print (\"No response from Server while getting the \"\n \"ID of domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get domain ID Failed with status %s and error \"\n \": %s\" % (response.status, response.data))\n print (\"Get domain ID Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domain details : %s \" % output)\n if len(output['domains']) != 1:\n LOG_OBJ.debug(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n print(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n return\n\n return output['domains'][0]['id']",
"def subnet_id_lookup(session, subnet_domain):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_subnets(Filters=[{\"Name\": \"tag:Name\", \"Values\": [subnet_domain]}])\n if len(response['Subnets']) == 0:\n return None\n else:\n return response['Subnets'][0]['SubnetId']",
"def _find_zone(self, domain):\n while \".\" in domain:\n result = self._get_request(\n \"/1/product?service_name=domain&customer_name={domain}\".format(domain=domain),\n )\n if len(result) == 1:\n return (\n result[0][\"id\"],\n domain,\n )\n domain = domain[domain.find(\".\") + 1:]\n raise errors.PluginError(\"Domain not found\")",
"def _get_sd_id(name):\n cohesity_client = _get_client()\n log.info(\"Getting storage domain with name %s\", name)\n resp = cohesity_client.view_boxes.get_view_boxes(names=name)\n if resp:\n return resp[0].id",
"def get_dnid_by_dnname(self, dnname):\r\n for dn in self.dns:\r\n if dn.name == dnname:\r\n return dn.id\r\n return None",
"def domain_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_id\")",
"def domain_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_id\")",
"def _find_managed_zone(self, domain, record_name):\n\n zone_dns_name_guesses = [record_name] + dns_common.base_domain_name_guesses(domain)\n\n logger.debug(\"Guesses: \")\n for zone_name in zone_dns_name_guesses:\n logger.debug(\" - %s\", zone_name)\n\n for zone_name in zone_dns_name_guesses:\n # get the zone id\n try:\n logger.debug(\"looking for zone: %s\", zone_name)\n try:\n response = self.dns_client.get_zone(zone_name)\n if response.status == 200:\n logger.debug(\"Response data %s\", response.data)\n logger.debug(\"Found zone: %s\", zone_name)\n logger.debug(\"OCID: %s\", response.data.id)\n logger.debug(\"Compartment: %s\", response.data.compartment_id)\n return response.data.id, zone_name\n except oci.exceptions.ServiceError as e:\n logger.debug(\"Zone '%s' not found\", zone_name)\n except errors.PluginError as e:\n pass\n return None, None",
"def _get_zone_id_from_name(self, name):\r\n results = self.client['Account'].getDomains(\r\n filter={\"domains\": {\"name\": query_filter(name)}})\r\n return [x['id'] for x in results]",
"def get_domain_id_by_domainurl(domain_url):\r\n db = connect()\r\n cursor = db.cursor()\r\n sql_statement = \"\"\"\r\n SELECT domain_id FROM `domains` WHERE domain_url = %(d)s\r\n \"\"\"\r\n try:\r\n cursor.execute(sql_statement, {'d':domain_url})\r\n page = cursor.fetchone()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()\r\n return page",
"def domain_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"domain_id\")",
"def lookup(self, domain_name, validate=True):\n try:\n domain = self.get_domain(domain_name, validate)\n except:\n domain = None\n return domain",
"def get_website_id_by_domain(self, domain):\r\n\r\n # Fetch website list\r\n self.fetch_website_list()\r\n\r\n # Loop through website list\r\n for website in self.website_list:\r\n if(domain == website['domain']):\r\n return website['id']",
"def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")",
"def lookup(self, domain_name, validate=True):\r\n try:\r\n domain = self.get_domain(domain_name, validate)\r\n except:\r\n domain = None\r\n return domain",
"def get_id(\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n in_states=None,\n filters=None,\n):\n instance_ids = find_instances(\n name=name,\n tags=tags,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n in_states=in_states,\n filters=filters,\n )\n if instance_ids:\n log.info(\"Instance ids: %s\", \" \".join(instance_ids))\n if len(instance_ids) == 1:\n return instance_ids[0]\n else:\n raise CommandExecutionError(\n \"Found more than one instance matching the criteria.\"\n )\n else:\n log.warning(\"Could not find instance.\")\n return None",
"def _get_domain(self, name=None, domain_id=None):\n try:\n if name != None:\n domain = self.conn.lookupByName(name)\n elif domain_id != None:\n domain = self.conn.lookupByNamtoprettyxmle(domain_id)\n \n self.logger.debug('Get libvirt domain: %s' % name)\n return domain\n except libvirt.libvirtError, ex:\n self.logger.error(ex)\n raise VirtDomainMonitorError(ex)",
"def instanceid_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'InstanceId' in item:\n return item['InstanceId']\n return None",
"def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")",
"def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")"
]
| [
"0.7072395",
"0.66890675",
"0.62828547",
"0.60626096",
"0.60512835",
"0.60456634",
"0.60456634",
"0.5988567",
"0.5966641",
"0.5933054",
"0.5929906",
"0.5865763",
"0.5865732",
"0.5855496",
"0.5855496",
"0.5855496",
"0.5855496",
"0.5855496",
"0.5855496",
"0.583406",
"0.5822499",
"0.5783424",
"0.57458276",
"0.57359296",
"0.57359296",
"0.57359296",
"0.57359296",
"0.57359296",
"0.57359296",
"0.57359296"
]
| 0.81332946 | 0 |
Lookup the Id for the Subnet with the given domain name. | def subnet_id_lookup(session, subnet_domain):
if session is None:
return None
client = session.client('ec2')
response = client.describe_subnets(Filters=[{"Name": "tag:Name", "Values": [subnet_domain]}])
if len(response['Subnets']) == 0:
return None
else:
return response['Subnets'][0]['SubnetId'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_keystone_v3_domain_id(self, domain_name):\n LOG_OBJ.debug(\"Get the domain ID.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains?name=\" + \\\n str(domain_name)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting the \"\n \"ID of domain\")\n print (\"No response from Server while getting the \"\n \"ID of domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get domain ID Failed with status %s and error \"\n \": %s\" % (response.status, response.data))\n print (\"Get domain ID Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domain details : %s \" % output)\n if len(output['domains']) != 1:\n LOG_OBJ.debug(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n print(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n return\n\n return output['domains'][0]['id']",
"def get_dnid_by_dnname(self, dnname):\r\n for dn in self.dns:\r\n if dn.name == dnname:\r\n return dn.id\r\n return None",
"def lookup_netid(self, netid):\n self.setQuery(\"\"\"Select ?uid where {\n ?who <http://vivo.dartmouth.edu/ontology/netId> \"%s\" .\n ?who <http://vivo.dartmouth.edu/ontology/geiselId> ?uid .\n }\"\"\" % (netid))\n\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n return g['results']['bindings'][0]['uid']['value']\n except:\n return None",
"def get_net_id(self, net_name):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/networks\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n result = self.request(\"GET\", _url, _headers, _body)\n\n if result is None:\n LOG_OBJ.error(\n \"No response from Server while trying to\"\n \" get networks of tenant: %s\" %\n self.project_info[\"project_id\"])\n return result\n\n if result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get network Failed with status %s \" % result.status)\n return result.status\n\n output = json.loads(result.data)\n LOG_OBJ.debug(\"Networks: %s\" % output['networks'])\n\n for nets in output['networks']:\n if nets['name'].lower() == net_name.lower() and \\\n net_name == config.extnet_name:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n if nets['name'].lower() == net_name.lower() and \\\n nets['tenant_id'] == self.project_info[\"project_id\"]:\n LOG_OBJ.debug(\"Net ID : %s \" % nets['id'])\n return nets['id']\n\n LOG_OBJ.debug(\"Net:%s Not Found\" % net_name)\n return",
"def subnet_id(self) -> str:\n return pulumi.get(self, \"subnet_id\")",
"def get_website_id_by_domain(self, domain):\r\n\r\n # Fetch website list\r\n self.fetch_website_list()\r\n\r\n # Loop through website list\r\n for website in self.website_list:\r\n if(domain == website['domain']):\r\n return website['id']",
"def _get_domain(self, name=None, domain_id=None):\n try:\n if name != None:\n domain = self.conn.lookupByName(name)\n elif domain_id != None:\n domain = self.conn.lookupByNamtoprettyxmle(domain_id)\n \n self.logger.debug('Get libvirt domain: %s' % name)\n return domain\n except libvirt.libvirtError, ex:\n self.logger.error(ex)\n raise VirtDomainMonitorError(ex)",
"def _get_sd_id(name):\n cohesity_client = _get_client()\n log.info(\"Getting storage domain with name %s\", name)\n resp = cohesity_client.view_boxes.get_view_boxes(names=name)\n if resp:\n return resp[0].id",
"def lookup(self, domain_name, validate=True):\r\n try:\r\n domain = self.get_domain(domain_name, validate)\r\n except:\r\n domain = None\r\n return domain",
"def subnet_id(self) -> Optional[str]:\n return pulumi.get(self, \"subnet_id\")",
"def lookup(self, domain_name, validate=True):\n try:\n domain = self.get_domain(domain_name, validate)\n except:\n domain = None\n return domain",
"def get_domain_ip_via_sni(self, path_tracefile, domain):\n packets = self.get_client_hello_packets(path_tracefile)\n for packet in packets:\n servername = self.get_client_hello_servername(packet)\n if servername == domain:\n ip = packet.getlayer(IP).dst\n return ip\n return -1",
"def get_id(self, fqdn):\n res = self.db.execute(sqlalchemy.select([ model.imaging_servers.c.id ],\n whereclause=(model.imaging_servers.c.fqdn==fqdn)))\n return self.singleton(res)",
"def get(cls, subdomain, name):\n return cls.get_by_key_name(subdomain + ':' + name)",
"def _find_zone(self, domain):\n while \".\" in domain:\n result = self._get_request(\n \"/1/product?service_name=domain&customer_name={domain}\".format(domain=domain),\n )\n if len(result) == 1:\n return (\n result[0][\"id\"],\n domain,\n )\n domain = domain[domain.find(\".\") + 1:]\n raise errors.PluginError(\"Domain not found\")",
"def network_id(tenant_id, auth_token, network_name):\r\n content = common_utils.do_request(\r\n tenant_id, auth_token,\r\n method='GET',\r\n body='', service=\"network\",\r\n path='networks.json')\r\n for network in range(len(content[\"networks\"])):\r\n if content[\"networks\"][network][\"name\"] == network_name:\r\n network_id = content[\"networks\"][network][\"id\"]\r\n return network_id",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"def subnet_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnet_id\")",
"async def fetch_subdomain(self, subdomain: str):\n value = await self.http.check_subdomain(subdomain)\n if (value or {}).get('exists') is True:\n # currently this endpoint returns {} if the subdomain does not\n # exist, but just in case it eventually returns 204 or something,\n # we check more explicitly instead.\n if value.get('teamId'):\n using_subdomain = await self.getch_team(value.get('teamId'))\n elif value.get('userId'):\n using_subdomain = await self.getch_user(value.get('userId'))\n\n return using_subdomain\n\n else:\n return None",
"def domain_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_id\")",
"def domain_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_id\")",
"def domain_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"domain_id\")",
"def get_record_id(self):\n subdomain, record_id = self.key().name().split(':', 1)\n return record_id",
"def id(self): \n if self.cloudnet:\n return self.cloudnet.id\n else:\n return None",
"def id(self):\n return self._domain.id",
"def vpc_id_lookup(session, vpc_domain):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_vpcs(Filters=[{\"Name\": \"tag:Name\", \"Values\": [vpc_domain]}])\n if len(response['Vpcs']) == 0:\n return None\n else:\n return response['Vpcs'][0]['VpcId']",
"def get_dnid_by_dnname(aps, dns, dnname, wlan_support=True):\r\n cnt = _get_ApDnContainer_(aps, dns, wlan_support)\r\n return cnt.get_dnid_by_dnname(dnname)",
"def subnet_id(self):\n return self._subnet_id",
"def _get_domain_for_name(self, name):\n domain = self.connection.lookupByName(name)\n return domain",
"def get_domain_id_by_domainurl(domain_url):\r\n db = connect()\r\n cursor = db.cursor()\r\n sql_statement = \"\"\"\r\n SELECT domain_id FROM `domains` WHERE domain_url = %(d)s\r\n \"\"\"\r\n try:\r\n cursor.execute(sql_statement, {'d':domain_url})\r\n page = cursor.fetchone()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()\r\n return page"
]
| [
"0.62816155",
"0.6252993",
"0.61216253",
"0.6116545",
"0.57401055",
"0.56873554",
"0.5656026",
"0.5638093",
"0.56223834",
"0.5614454",
"0.5596863",
"0.5574551",
"0.55497974",
"0.5529772",
"0.5517432",
"0.5475204",
"0.5445243",
"0.5445243",
"0.54308754",
"0.54301274",
"0.54301274",
"0.54282904",
"0.5391144",
"0.538563",
"0.53768206",
"0.537119",
"0.533256",
"0.53151345",
"0.531056",
"0.5281083"
]
| 0.7532417 | 0 |
Lookup the Id for the AMI with the given name. If ami_name ends with '.boss', the AMI_VERSION environmental variable is used to either search for the latest commit hash tagged AMI ('.bossh') or for the AMI with the specific tag ('.boss'). | def ami_lookup(session, ami_name, version = None):
if session is None:
return None
specific = False
if ami_name.endswith(".boss"):
ami_version = os.environ["AMI_VERSION"] if version is None else version
if ami_version == "latest":
# limit latest searching to only versions tagged with hash information
ami_search = ami_name + "-h*"
else:
ami_search = ami_name + "-" + ami_version
specific = True
else:
ami_search = ami_name
client = session.client('ec2')
response = client.describe_images(Filters=[{"Name": "name", "Values": [ami_search]}])
if len(response['Images']) == 0:
if specific:
print("Could not locate AMI '{}', trying to find the latest '{}' AMI".format(ami_search, ami_name))
return ami_lookup(session, ami_name, version = "latest")
else:
return None
else:
response['Images'].sort(key=lambda x: x["CreationDate"], reverse=True)
image = response['Images'][0]
ami = image['ImageId']
tag = _find(image.get('Tags', []), lambda x: x["Key"] == "Commit")
commit = None if tag is None else tag["Value"]
return (ami, commit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_ami_by_name ( ec2_conn, ami_name ) :\n amis = ec2_conn.get_all_images( filters = { \"name\": [ ami_name ] } )\n for ami in amis :\n return ami",
"def get_ami_by_id ( ec2_conn, ami_id ) :\n amis = ec2_conn.get_all_images( image_ids = [ ami_id ] )\n for ami in amis :\n return ami",
"def minimal_linux_ami(self):\n client = self.aws.get_client('ec2')\n try:\n res = client.describe_images(Owners=['self', '099720109477'],\n Filters=[\n {\n 'Name': 'virtualization-type',\n 'Values': ['hvm']\n },\n {\n 'Name': 'root-device-type',\n 'Values': ['ebs']\n },\n {\n 'Name': 'architecture',\n 'Values': ['x86_64']\n },\n {\n 'Name': 'description',\n 'Values': ['Canonical, Ubuntu, 16.04 LTS, amd64 xenial image*']\n }])\n except ClientError as ex:\n print(ex)\n sys.exit()\n timestep = None\n current_time = datetime.now(timezone.utc)\n ami_id = None\n for image in res['Images']:\n if timestep:\n create_time = parse(image['CreationDate'])\n current_timestep = current_time - create_time\n if current_timestep < timestep:\n timestep = current_timestep\n ami_id = image['ImageId']\n else:\n create_time = parse(image['CreationDate'])\n timestep = current_time - create_time\n ami_id = image['ImageId']\n return ami_id",
"def _get_vm_id_by_name(self, vm_name):\n vm_info = self.connection.compute.find_server(vm_name)\n return (vm_info.id if vm_info else None)",
"def get_apid_by_apname(self, apname):\r\n \r\n for ap in self.aps:\r\n if ap.name == apname:\r\n return ap.get_id()\r\n return None",
"def ami(self):\n return getattr(self._data.ami, self._name, None)",
"def get_ami_keyname ( app_name ) :\n return app_name + '.ami'",
"def get_vmid_by_name(cls, container, datacenter, name):\n\n obj = Query.get_obj(container, datacenter)\n\n # recurse through datacenter object attributes looking for vm that\n # matches hostname.\n if hasattr(obj, 'vmFolder'):\n for virtmachine in obj.vmFolder.childEntity:\n if hasattr(virtmachine, 'childEntity'):\n for virt in virtmachine.childEntity:\n if virt.name == name:\n return virt._moId\n else:\n if virt.name == name:\n return virt._moId\n return None",
"def find_image(image_name):\n imgs = pyrax.images\n image = imgs.list(name=image_name)[0]\n\n # print image.id\n return image.id",
"def ami_by_location(self, location):\n if self.region == 'pytest' or not self.region or not location:\n # Short-circuit if we're running a test or do not have data\n return 'ami-notfound'\n client = boto3.client('ec2', region_name=self.region)\n response = client.describe_images(Filters=[\n {'Name': 'manifest-location', 'Values': [location]},\n ])\n if len(response['Images']) == 0:\n raise RuntimeError('No AMIs found with location: %s' % location)\n if len(response['Images']) > 1:\n raise RuntimeError('Multiple AMIs found: %s' % response['Images'])\n return response['Images'][0]['ImageId']",
"def get_image_id(self, image_name):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" +\\\n self.cloud_admin_info[\"project_id\"] + \"/images/detail\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info[\"token_project\"]}\n _body = None\n\n _result = self.request(\"GET\", _url, _headers, _body)\n if _result is None:\n LOG_OBJ.error(\"No response from server while getting images.\")\n return\n if _result.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get image ID Failed with status %s \" %\n _result.status)\n return _result.status\n\n _output = json.loads(_result.data)\n for _images in _output['images']:\n if _images['name'].lower() == image_name.lower():\n LOG_OBJ.info(\"Image Name: %s, Image ID : %s \" %\n (image_name, _images['id']))\n return _images['id']\n LOG_OBJ.error(\"The image: %s is NOT found\" % image_name)",
"def test_get_f1_ami_id():\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import get_f1_ami_id\n try:\n ami = get_f1_ami_id()\n except Exception as e:\n pytest.fail(f\"get_f1_ami_id() raised {e} and this likely means you need to run 'scripts/update_test_amis.py'\")\n\n if re.match(r\"^ami-[0-9a-f]+$\",ami) is None:\n pytest.fail(f\"'{ami}' doesn't look like a legit AMI ID and this likely means you need to run 'scripts/update_test_amis.py'\")",
"def get_id_from_name(item_name):\n try:\n return next(item for item in mapping if item[\"name\"].lower() == item_name.lower())[\"id\"]\n except StopIteration:\n return None",
"def get_id_from_name(self, the_name: str) -> Optional[str]:\n\n prospective = None\n for key, value in self.labels.items():\n if value == the_name:\n prospective = key\n break\n return prospective",
"def get_ami_by_id(self, image_id):\n images = self._driver.list_images(ex_owner=self.account_id)\n image = [i for i in images if i.id == image_id][0]\n return image",
"def image_version(self, image_name, image_tag, ignore_not_found=False):\n if image_tag == \"local\":\n image_tag = \"latest\"\n try:\n docker_info = self.host.client.inspect_image(\"{}:{}\".format(image_name, image_tag))\n return docker_info['Id']\n except NotFound:\n # TODO: Maybe auto-build if we can?\n if ignore_not_found:\n return None\n else:\n raise ImageNotFoundException(\n \"Cannot find image {}:{}\".format(image_name, image_tag),\n image=image_name,\n image_tag=image_tag,\n )",
"def instanceid_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'InstanceId' in item:\n return item['InstanceId']\n return None",
"def _get_image_id(image_name, instance_profile_arn=None,\n ec2_client=None, region_name=None):\n owners = []\n filters = []\n image_id = image_name\n if not image_name:\n # Amazon has its own Linux distribution that is largely binary\n # compatible with Red Hat Enterprise Linux.\n image_name = 'amzn2-ami-hvm-2.0.????????.?-x86_64-gp2'\n owners = ['amazon']\n filters = [\n {'Name': 'name', 'Values': [image_name]},\n ]\n elif not image_name.startswith('ami-'):\n if not instance_profile_arn:\n raise RuntimeError(\"instance_profile_arn must be defined when\"\\\n \" image_name is not already an id.\")\n look = re.match(r'arn:aws:iam::(\\d+):', instance_profile_arn)\n owners = [look.group(1)]\n filters = [\n {'Name': 'name', 'Values': [image_name]},\n ]\n\n if filters:\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_images(Owners=owners, Filters=filters)\n images = sorted(resp['Images'], key=lambda item: item['CreationDate'],\n reverse=True)\n if len(images) > 1:\n LOGGER.warning(\n \"Found more than one image named '%s' in account '%s',\"\\\n \" picking the first one out of %s\",\n image_name, owners,\n [(image['CreationDate'], image['ImageId'])\n for image in images])\n image_id = images[0]['ImageId']\n return image_id",
"def test_get_image_id_by_name_in_uuid(self):\n img_id = str(uuid.uuid4())\n img_name = str(uuid.uuid4())\n self.my_image.id = img_id\n self.my_image.name = img_name\n self.sahara_client.images.get.side_effect = [\n sahara_base.APIException(error_code=400,\n error_name='IMAGE_NOT_REGISTERED')]\n\n self.sahara_client.images.find.return_value = [self.my_image]\n self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_name))\n\n self.sahara_client.images.get.assert_called_once_with(img_name)\n self.sahara_client.images.find.assert_called_once_with(name=img_name)",
"def get_apid_by_apname(aps, dns, apname, wlan_support=True):\r\n cnt = _get_ApDnContainer_(aps, dns, wlan_support)\r\n return cnt.get_apid_by_apname(apname)",
"def get_ocid_by_name(self, db_name=None, db_workload=None):\n try:\n adb_list = \\\n self.get_client(self.oci_client).list_autonomous_databases(compartment_id=self.compartment_id,\n db_workload=self.db_workload).data\n if self.debug is True:\n self.log.info(\"ADB List: {0}\".format(adb_list))\n for db in adb_list:\n if db.db_name == self.db_name:\n self.database_id = db.id\n return db.id\n else:\n continue\n return None\n except AirflowException as e:\n self.log.error(e.response[\"Error\"][\"Message\"])",
"async def _get_account_id(db, name):\n assert name, 'no account name specified'\n _id = await db.query_one(\"SELECT id FROM hive_accounts WHERE name = :n\", n=name)\n assert _id, \"account not found: `%s`\" % name\n return _id",
"def get_image(name):\r\n return nova.images.find(name=name)",
"def _get_guid(adapter_name = \"\"):\n cmd = \"%s ei\" % _wlantool_cmd\n output = os.popen(cmd)\n buffer = \"\".join(line for line in output)\n\n if adapter_name:\n name_pattern = adapter_name\n else:\n name_pattern = \".*\"\n pattern = r\"Interface [0-9]+:[\\r\\n\\t]+GUID: ([0-9a-fA-F-]+)[\\r\\n\\t]+HWID: [\\\\_&0-9a-zA-Z]+[\\r\\n\\t]+Name: (%s)[\\r\\n]+\" % name_pattern\n\n match = re.search(pattern, buffer)\n if not match:\n raise Exception(\"Unable to get information: %s\" % buffer)\n\n return (match.group(1), match.group(2))",
"def get_bank_id_by_name(bank_name: str) -> int:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select id from bank where name = '{}';\".format(bank_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]",
"def find_employee_id(self,name):\n nam = list(self.emp_id.values())\n val = nam.index(name)\n ids = list(self.emp_id.keys())\n id = ids[val]\n return id",
"def get_pkg_id(pkgs, name, version):\n for pinfo in pkgs:\n if pinfo[\"name\"] == name and pinfo[\"version\"] == version:\n return \"%(name)s/%(version)s/%(id)s\" % pinfo\n raise DerekError(\"No package %s %s in the branch\" % (name, version))",
"def get_id(self, name=None):\n\n # Support using integer IDs directly\n if isinstance(name, int):\n return name\n\n self.ensure_loaded()\n if name is not None:\n ems_systems = self.search('name', name.upper(), searchtype=\"match\")\n if ems_systems.empty:\n sys_names = self.list_all()['name'].to_list()\n raise ValueError(\n 'No matching systems found. You have access to: {0}'.format(sys_names))\n id = ems_systems.iloc[0]['id']\n else:\n ems_systems = self.list_all()\n if ems_systems.shape[0] == 1:\n id = ems_systems.iloc[0]['id']\n else:\n raise LookupError(\n 'Multiple ems systems found. Please select one from the available:\\n{0}'\n .format(ems_systems.loc[:, ['id', 'name']])\n )\n return id",
"def find_flavor_id(flavor_name: str):\n for flavor in get_flavors()[\"flavors\"]:\n if flavor_name == flavor[\"name\"]:\n return flavor[\"id\"]\n\n raise AttributeError(f\"No flavor '{flavor_name}' found\")",
"def GetVoucherAttributeManagerName(ivam):\n return kern.Symbolicate(unsigned(ivam))"
]
| [
"0.6669751",
"0.5976176",
"0.58265173",
"0.5811249",
"0.5770496",
"0.56842625",
"0.56431633",
"0.56096363",
"0.5589823",
"0.55881256",
"0.5372395",
"0.53390926",
"0.5291101",
"0.5243476",
"0.51454204",
"0.5121465",
"0.50918853",
"0.5083589",
"0.5083099",
"0.507428",
"0.5063579",
"0.4997817",
"0.4984547",
"0.49843115",
"0.49720752",
"0.49674743",
"0.4943311",
"0.492802",
"0.492792",
"0.4891916"
]
| 0.74430376 | 0 |
Lookup the Ids for all of the VPC Security Groups. | def sg_lookup_all(session, vpc_id):
if session is None:
return NoneDict()
client = session.client('ec2')
response = client.describe_security_groups(Filters=[{"Name": "vpc-id", "Values": [vpc_id]}])
if len(response['SecurityGroups']) == 0:
return NoneDict()
else:
sgs = NoneDict()
for sg in response['SecurityGroups']:
key = _find(sg.get('Tags', []), lambda x: x["Key"] == "Name")
if key:
key = key['Value']
sgs[key] = sg['GroupId']
return sgs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sg_ids(vpc):\n list = [i.id for i in vpc.security_groups.all()]\n return list",
"def vpc_security_group_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"vpc_security_group_ids\")",
"def vpc_security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"vpc_security_group_ids\")",
"def vpc_security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"vpc_security_group_ids\")",
"def security_group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def security_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def security_group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"security_group_ids\")",
"def _get_security_group_ids(group_names, tag_prefix,\n vpc_id=None, ec2_client=None, region_name=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n resp = ec2_client.describe_security_groups(\n Filters=[{'Name': \"vpc-id\", 'Values': [vpc_id]}])\n group_ids = [None for _ in group_names]\n for security_group in resp['SecurityGroups']:\n for idx, group_name in enumerate(group_names):\n if security_group['GroupName'] == group_name:\n group_ids[idx] = security_group['GroupId']\n for group_id, group_name in zip(group_ids, group_names):\n if group_id:\n LOGGER.info(\"%s found %s security group %s\",\n tag_prefix, group_name, group_id)\n else:\n LOGGER.warning(\"%s cannot find security group %s\",\n tag_prefix, group_name)\n return group_ids",
"def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"security_group_ids\")",
"def sg_lookup(session, vpc_id, group_name):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"tag:Name\", \"Values\": [group_name]}])\n\n if len(response['SecurityGroups']) == 0:\n return None\n else:\n return response['SecurityGroups'][0]['GroupId']",
"def getAdGroupIds(self):\n query = \"\"\"\n select adgroups.id as adgroup_id from adgroups \n join campaigns on campaigns.id = adgroups.campaign_id\n where adgroups.account_id = '%s'\n and campaigns.status = 'enabled'\n and adgroups.status = 'enabled'\n \n \"\"\" % (self.account_id)\n\n df = pd.read_sql(query, Database().createEngine())\n ids = list(df.adgroup_id.values)\n return ids",
"def get_srv_ppgrp_id(self):\n pp_grp_id_lst = list()\n for srv_grp in self.srv_grp_lst:\n pp_grp_id = list()\n for srv in srv_grp:\n pp_id = (\n self.conn.network.find_port(srv['name'] + '_pt_in').id,\n self.conn.network.find_port(srv['name'] + '_pt_out').id\n )\n pp_grp_id.append(pp_id)\n pp_grp_id_lst.append(pp_grp_id)\n return pp_grp_id_lst",
"def get_group_list(ip_address, headers):\n group_list = None\n group_url = 'https://%s/api/GroupService/Groups' % ip_address\n response = requests.get(group_url, headers=headers, verify=False)\n if response.status_code == 200:\n group_response = response.json()\n if group_response['@odata.count'] > 0:\n group_list = [x['Id'] for x in group_response['value']]\n else:\n print(\"No groups found at \", ip_address)\n else:\n print(\"No groups found at \", ip_address)\n return group_list",
"def groups_ids(self) -> List[int]:\n\n _, groups_ids = Skeleton._group_get_ids(self.groups)\n\n return groups_ids",
"def get_security(self):\n users = find_root(self)['users']\n userids_and_groups = []\n for userid in self._groups:\n if userid in users:\n userids_and_groups.append({'userid':userid, 'groups':self.get_groups(userid)})\n return userids_and_groups",
"def get_sg_id(sg_name):\n print()\n print(\"Searching for SG ID\")\n client = boto3.client('ec2')\n all_sg = client.describe_security_groups()\n print(sg_name)\n grp_id = \"None\"\n for sec_grp in all_sg['SecurityGroups']:\n print(sec_grp['GroupName'])\n if sg_name == sec_grp['GroupName']:\n grp_id = sec_grp['GroupId']\n print()\n return grp_id",
"def group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"group_ids\")",
"def get_edges_sg(ec2,sg_id,sg_ids_all):\n security_group = ec2.SecurityGroup(sg_id)\n asso_sgs = []\n rules = security_group.ip_permissions\n colours = False\n for rule in rules:\n if len(rule['UserIdGroupPairs']) > 0:\n for s in rule['UserIdGroupPairs']:\n if s['GroupId'] in sg_ids_all:\n asso_sgs.append(s['GroupId'])\n\n if len(rule['IpRanges']) > 0:\n for ip in rule['IpRanges']:\n if ip['CidrIp'] == '0.0.0.0/0':\n colours = True\n return asso_sgs,colours",
"def security_list_ids(self):\n return self._security_list_ids",
"def group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"group_ids\")",
"def test_aws_service_api_security_groups_get(self):\n pass",
"def security_groups(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"security_groups\")",
"def security_groups(self, oid):\n try:\n path = u'/servers/%s/os-security-groups' % oid\n res = self.client.call(path, u'GET', data=u'', \n token=self.manager.identity.token)\n self.logger.debug(u'Get openstack server security groups: %s' % truncate(res))\n return res[0][u'security_groups']\n except Exception as error:\n self.logger.error(error, exc_info=True)\n data = []\n return res",
"def list_secgroups(self, name=None):",
"def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")",
"def security_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_groups\")",
"def security_identities(self):\n return self._security_identities",
"def nsg_ids(self):\n return self._nsg_ids",
"def index(self, req, tenant_id):\n LOG.debug(\"Index() called with %s\" % (tenant_id))\n\n sec_groups = models.SecurityGroup().find_all(tenant_id=tenant_id,\n deleted=False)\n\n # Construct the mapping from Security Groups to Security Group Rules\n rules_map = dict([(g.id, g.get_rules()) for g in sec_groups])\n\n return wsgi.Result(\n views.SecurityGroupsView(sec_groups,\n rules_map,\n req, tenant_id).list(), 200)"
]
| [
"0.776428",
"0.7407091",
"0.7198566",
"0.7198566",
"0.68077105",
"0.68063724",
"0.67918974",
"0.6652626",
"0.66312367",
"0.66312367",
"0.63980854",
"0.6179441",
"0.6063443",
"0.5990153",
"0.59719115",
"0.59415746",
"0.5895164",
"0.58848244",
"0.5872261",
"0.5848759",
"0.5840419",
"0.5789772",
"0.576865",
"0.5683176",
"0.5668151",
"0.56417453",
"0.56417453",
"0.5639723",
"0.5620078",
"0.55962217"
]
| 0.7723206 | 1 |
Lookup the Id for the VPC Security Group with the given name. | def sg_lookup(session, vpc_id, group_name):
if session is None:
return None
client = session.client('ec2')
response = client.describe_security_groups(Filters=[{"Name": "vpc-id", "Values": [vpc_id]},
{"Name": "tag:Name", "Values": [group_name]}])
if len(response['SecurityGroups']) == 0:
return None
else:
return response['SecurityGroups'][0]['GroupId'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sg_id(sg_name):\n print()\n print(\"Searching for SG ID\")\n client = boto3.client('ec2')\n all_sg = client.describe_security_groups()\n print(sg_name)\n grp_id = \"None\"\n for sec_grp in all_sg['SecurityGroups']:\n print(sec_grp['GroupName'])\n if sg_name == sec_grp['GroupName']:\n grp_id = sec_grp['GroupId']\n print()\n return grp_id",
"def find_secgrp ( ec2_conn, secgrp_name ) :\n sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ secgrp_name ] } )\n if len( sec_grps ) > 0 :\n return sec_grps[ 0 ]\n \n return None",
"def get_group_id(self, group_name):\n uri_vars = {\"q\": group_name, \"start\": 0, \"count\": \"Infinity\"}\n group_uri = urllib.parse.urlencode(uri_vars)\n full_url = self.base_url + f\"/group/group/findgroup.html?{group_uri}\"\n result_str = self.fetch(full_url)\n result = json.loads(result_str)\n group_id = result[\"items\"][0][\"id\"] # Choose ID of first result\n logger.debug(f\"Found {group_id} for group {group_name}\")\n return group_id",
"def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")",
"def security_group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"security_group_id\")",
"def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")",
"def security_group_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"security_group_id\")",
"def find_group ( ec2_conn, base_name, group_type ) :\n secgrp = None\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ get_secgrp_name( base_name, group_type ) ] } )\n for s in secgrps :\n secgrp = s\n break\n\n return secgrp",
"def getGroupId(groupName):\r\n return Group.getGroupId(str(groupName))",
"def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")",
"def security_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"security_group_id\")",
"def get_or_make_group(ec2, name, vpc_id=None, quiet=False):\n groups = ec2.security_groups.all()\n groups = [g for g in groups if g.group_name == name and g.vpc_id == vpc_id]\n if len(groups) > 0:\n return groups[0]\n else:\n if not quiet:\n print(\"Creating security group \" + name)\n vpc_id = vpc_id if vpc_id is not None else ''\n sg = ec2.create_security_group(\n GroupName=name,\n Description='AbStar cluster group',\n VpcId=vpc_id)\n return sg",
"def _get_sg_name(sg_name, session):\n return session.resource(\"ec2\").SecurityGroup(sg_name).group_name",
"def security_group_exists(self, sg_id=None, name=None):\n if sg_id:\n return sg_id in [sg.id for sg in self.get_all_security_groups()]\n elif name:\n return name in [sg.name for sg in self.get_all_security_groups()]",
"def sg_lookup_all(session, vpc_id):\n if session is None:\n return NoneDict()\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]}])\n\n if len(response['SecurityGroups']) == 0:\n return NoneDict()\n else:\n sgs = NoneDict()\n for sg in response['SecurityGroups']:\n key = _find(sg.get('Tags', []), lambda x: x[\"Key\"] == \"Name\")\n if key:\n key = key['Value']\n sgs[key] = sg['GroupId']\n\n return sgs",
"def get_group(self, group_name):\n\n return self._group[group_name]",
"def get_sg_name(ec2,s_id):\n name = \"\"\n try:\n security_group = ec2.SecurityGroup(s_id)\n # name = security_group.description\n name = security_group.group_name\n except:\n name = s_id\n\n return name",
"def get(self, sg_id):\n secgroup = self.client.show_security_group(sg_id).get('security_group')\n sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules'])\n return SecurityGroup(secgroup, sg_dict)",
"def getGroupByName(self, name):\n for group in self.groups:\n if name == group.name:\n return group\n\n return None",
"def create_sg(vpc_id, description, group_name):\n client = boto3.client('ec2')\n security_group = str(group_name + \"_sg\")\n\n # get the security groups\n idle_sg = get_sg()\n\n print(idle_sg)\n print(security_group)\n\n # if security group doesnt exist, create it\n if security_group not in idle_sg:\n print(\"Creating SG\")\n return client.create_security_group(\n Description=description,\n GroupName=security_group,\n VpcId=vpc_id\n )\n return get_sg_id(security_group)",
"def group_id(self) -> str:\n return pulumi.get(self, \"group_id\")",
"def group_id(self) -> str:\n return pulumi.get(self, \"group_id\")",
"def vpc_id_lookup(session, vpc_domain):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_vpcs(Filters=[{\"Name\": \"tag:Name\", \"Values\": [vpc_domain]}])\n if len(response['Vpcs']) == 0:\n return None\n else:\n return response['Vpcs'][0]['VpcId']",
"def _get_security_group_ids(group_names, tag_prefix,\n vpc_id=None, ec2_client=None, region_name=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n resp = ec2_client.describe_security_groups(\n Filters=[{'Name': \"vpc-id\", 'Values': [vpc_id]}])\n group_ids = [None for _ in group_names]\n for security_group in resp['SecurityGroups']:\n for idx, group_name in enumerate(group_names):\n if security_group['GroupName'] == group_name:\n group_ids[idx] = security_group['GroupId']\n for group_id, group_name in zip(group_ids, group_names):\n if group_id:\n LOGGER.info(\"%s found %s security group %s\",\n tag_prefix, group_name, group_id)\n else:\n LOGGER.warning(\"%s cannot find security group %s\",\n tag_prefix, group_name)\n return group_ids",
"def resolve_spritegroup(name):\n if name.value not in spritegroup_list:\n raise generic.ScriptError(\"Unknown identifier encountered: '{}'\".format(name.value), name.pos)\n return spritegroup_list[name.value]",
"def group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_id\")",
"def group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_id\")",
"def group_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"group_id\")",
"def vpc_security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"vpc_security_group_ids\")",
"def vpc_security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"vpc_security_group_ids\")"
]
| [
"0.7680698",
"0.7084317",
"0.6718247",
"0.66819984",
"0.66819984",
"0.6648008",
"0.6648008",
"0.6557056",
"0.65039665",
"0.6483536",
"0.6483536",
"0.6401095",
"0.63624",
"0.62879884",
"0.62802815",
"0.6271635",
"0.62600183",
"0.60661876",
"0.6065279",
"0.5980223",
"0.5909057",
"0.5909057",
"0.58907413",
"0.58625317",
"0.58477473",
"0.58337045",
"0.58337045",
"0.58337045",
"0.58257085",
"0.58257085"
]
| 0.81316435 | 0 |
Lookup the Id for the VPC Route Table with the given name. | def rt_lookup(session, vpc_id, rt_name):
if session is None:
return None
client = session.client('ec2')
response = client.describe_route_tables(Filters=[{"Name": "vpc-id", "Values": [vpc_id]},
{"Name": "tag:Name", "Values": [rt_name]}])
if len(response['RouteTables']) == 0:
return None
else:
return response['RouteTables'][0]['RouteTableId'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def get_id(self, tag_name):\n response = await self.describe(tag_name)\n if response['RouteTables']:\n return response['RouteTables'][0][\"RouteTableId\"]\n else:\n raise RtbDoesntExists",
"def transit_router_route_table_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"transit_router_route_table_id\")",
"def transit_router_route_table_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"transit_router_route_table_id\")",
"def transit_router_route_table_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"transit_router_route_table_id\")",
"def route_table_id(self):\n return self._route_table_id",
"async def get_main_rtb_id_from_vpc(self, vpc_id):\n main_route_table = self._client.describe_route_tables(\n Filters=[\n {\n 'Name': 'vpc-id', 'Values': [vpc_id]\n },\n {\n 'Name': 'association.main', 'Values': [\"true\"]\n\n }\n ])\n try:\n return main_route_table['RouteTables'][0]['Associations'][0]['RouteTableId']\n except IndexError:\n raise RtbDoesntExists",
"def get_t1_router_id_from_name(self, router_name=\"Auto_Test_T1\"):\n T1_GATEWAYS = \"https://{ip}/policy/api/v1/infra/tier-1s\"\n url = T1_GATEWAYS.format(ip=self.nsxt_ip)\n print('Starting GET call to Retrieve information'\n ' about Logical Router : %s' % url)\n t1_id = None\n get_status = None\n\n try:\n response = self.rest.get(\n url, headers=self.headers, auth=(\n self.nsxt_user, self.nsxt_pwd))\n get_status = response.status_code\n print(\n 'Successfully got response object status code %s' % get_status)\n child = json.loads(response.text)\n for t1 in child['results']:\n if t1['display_name'] == router_name:\n t1_id = t1['id']\n print(\"Found router id %s\" % t1_id)\n return t1_id\n except Exception as e:\n print(traceback.format_exc())\n print('get_status: %s' % get_status)\n print('Exception in get a t1 id from name %s' % e)\n print(\"Router %s not found\" % router_name)\n return t1_id",
"async def get_assoc_id(self, tag_name):\n response = await self.describe(tag_name)\n if response['RouteTables'][0][\"Associations\"]:\n return response['RouteTables'][0][\"Associations\"][0][\"RouteTableAssociationId\"]\n else:\n raise RtbDoesntExists",
"def get_route_with_scariness_from_db(route_name):\n connection = administer_route_database.get_route_db_connection()\n route = administer_route_database.get_route_from_db(connection, route_name)\n return route",
"def get_vehicle_id(self, veh_name):\n cond = SQLBinaryExpr(SQLFuncExpr(self.db_func_map[DB_FUNC_NAME_LOWER],\n COL_NAME_VEHICLES_NAME),\n OP_EQ, SQLLiteral(veh_name.lower()))\n entries = self.select_generic_data(select_list=[COL_NAME_VEHICLES_VEHICLEID],\n table_list=[TABLE_NAME_VEHICLES],\n where=cond)\n if len(entries) == 1:\n return entries[0][COL_NAME_VEHICLES_VEHICLEID]\n elif len(entries) > 1:\n raise AdasDBError(\"Vehicle '%s' cannot be resolved because it is ambiguous. (%s)\" % (veh_name, entries))\n\n raise AdasDBError(\"No resolution of '%s'. (%s)\" % (veh_name, entries))",
"async def create(self, tag_name, vpc_id):\n try:\n self._client.create_route_table(\n VpcId=vpc_id,\n TagSpecifications=[\n {\n 'ResourceType': 'route-table',\n 'Tags': [{\n 'Key': 'Name',\n 'Value': tag_name\n }]\n }\n ]\n )\n except Exception:\n raise RtbAlreadyExists",
"def _find_table(name):\n tables = Base.metadata.tables\n table = tables.get(name, None)\n if table is not None:\n return table\n else:\n raise NameError('Unable to locate table: %s' % name)",
"def table_id(self) -> str:\n return pulumi.get(self, \"table_id\")",
"def name_for(cls, table, id):\n try:\n return cls.lookups[table][id]\n except KeyError:\n cur.execute('SELECT id, name FROM \"{}\"'.format(table))\n cls.lookups[table] = {row[0]: row[1] for row in cur.fetchall()}\n return cls.lookups[table][id]",
"def local_gateway_route_table_arn(self) -> Optional[str]:\n return pulumi.get(self, \"local_gateway_route_table_arn\")",
"def rt_name_default(session, vpc_id, new_rt_name):\n client = session.client('ec2')\n response = client.describe_route_tables(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]}])\n\n rt_id = None\n for rt in response['RouteTables']:\n nt = _find(rt['Tags'], lambda x: x['Key'] == 'Name')\n if nt is None or nt['Value'] == '':\n rt_id = rt['RouteTableId']\n\n if rt_id is None:\n print(\"Could not locate unnamed default route table\")\n return\n\n resource = session.resource('ec2')\n rt = resource.RouteTable(rt_id)\n response = rt.create_tags(Tags=[{\"Key\": \"Name\", \"Value\": new_rt_name}])",
"def lookup_name(self, name):\n if name not in self.rule_dict:\n raise PegvmException(\"Failed to find rule named '{}'\".format(name))\n return self.rule_dict[name]",
"def route_table(self) -> Optional['outputs.RouteTableResponse']:\n return pulumi.get(self, \"route_table\")",
"def get_route_id(self):\n\n return self.route_id",
"def get(table_name, record_id):\n with get_connection() as conn:\n return rethink.table(table_name).get(record_id).run(conn)",
"def _get_vm_id_by_name(self, vm_name):\n vm_info = self.connection.compute.find_server(vm_name)\n return (vm_info.id if vm_info else None)",
"async def _get_account_id(db, name):\n assert name, 'no account name specified'\n _id = await db.query_one(\"SELECT id FROM hive_accounts WHERE name = :n\", n=name)\n assert _id, \"account not found: `%s`\" % name\n return _id",
"def create_route_table(vpc_id):\n response = EC2.create_route_table(\n VpcId=vpc_id\n )\n return response",
"def get_id(cls, name):\n assert name, 'name is empty'\n if name in cls._ids:\n return cls._ids[name]\n sql = \"SELECT id FROM hive_communities WHERE name = :name\"\n cid = DB.query_one(sql, name=name)\n if cid:\n cls._ids[name] = cid\n cls._names[cid] = name\n return cid",
"def output_name_to_id(self, name):\n for i, o in list(r.outputs.items()):\n if o.name == name:\n return i",
"def route_table_id(self, route_table_id):\n self._route_table_id = route_table_id",
"def transit_router_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def transit_router_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def vpc_id_lookup(session, vpc_domain):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_vpcs(Filters=[{\"Name\": \"tag:Name\", \"Values\": [vpc_domain]}])\n if len(response['Vpcs']) == 0:\n return None\n else:\n return response['Vpcs'][0]['VpcId']",
"def get_team_id(self, team_name):\n\n teams = self.get_teams()\n for team in teams:\n if team['name'] == team_name:\n return team['id']\n\n return None"
]
| [
"0.7265778",
"0.6650314",
"0.6650314",
"0.65430045",
"0.65120596",
"0.64262307",
"0.6146517",
"0.60185033",
"0.56920457",
"0.5620676",
"0.5594443",
"0.5552543",
"0.5518078",
"0.5444758",
"0.54276913",
"0.5355043",
"0.5323631",
"0.5280393",
"0.5248865",
"0.52361125",
"0.5224945",
"0.52192634",
"0.5215014",
"0.521184",
"0.5206596",
"0.5189057",
"0.51348835",
"0.51348835",
"0.5123643",
"0.5119686"
]
| 0.7589303 | 0 |
Name the default Route Table that is created for a new VPC. Find the default VPC Route Table and give it a name so that it can be referenced latter. Needed because by default the Route Table does not have a name and rt_lookup() will not find it. The default VPC Route Table is determined as the first Route Table without a name. | def rt_name_default(session, vpc_id, new_rt_name):
client = session.client('ec2')
response = client.describe_route_tables(Filters=[{"Name": "vpc-id", "Values": [vpc_id]}])
rt_id = None
for rt in response['RouteTables']:
nt = _find(rt['Tags'], lambda x: x['Key'] == 'Name')
if nt is None or nt['Value'] == '':
rt_id = rt['RouteTableId']
if rt_id is None:
print("Could not locate unnamed default route table")
return
resource = session.resource('ec2')
rt = resource.RouteTable(rt_id)
response = rt.create_tags(Tags=[{"Key": "Name", "Value": new_rt_name}]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rt_lookup(session, vpc_id, rt_name):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_route_tables(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]},\n {\"Name\": \"tag:Name\", \"Values\": [rt_name]}])\n\n if len(response['RouteTables']) == 0:\n return None\n else:\n return response['RouteTables'][0]['RouteTableId']",
"def add_default_route_to_namespace(node, namespace, default_route):\n cmd = f\"ip netns exec {namespace} ip route add default \" \\\n f\"via {default_route}\"\n exec_cmd_no_error(node, cmd, sudo=True)",
"async def create(self, tag_name, vpc_id):\n try:\n self._client.create_route_table(\n VpcId=vpc_id,\n TagSpecifications=[\n {\n 'ResourceType': 'route-table',\n 'Tags': [{\n 'Key': 'Name',\n 'Value': tag_name\n }]\n }\n ]\n )\n except Exception:\n raise RtbAlreadyExists",
"def create_route_table(vpc_id):\n response = EC2.create_route_table(\n VpcId=vpc_id\n )\n return response",
"def get_default_route():\n # Discover the active/preferred network interface \n # by connecting to Google's public DNS server\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.settimeout(2)\n s.connect((\"8.8.8.8\", 80))\n iface_ip = s.getsockname()[0]\n except socket.error:\n sys.stderr.write('IoT Inspector cannot run without network connectivity.\\n')\n sys.exit(1)\n\n while True:\n routes = _get_routes()\n default_route = None\n for route in routes:\n if route[4] == iface_ip:\n # Reassign scapy's default interface to the one we selected\n sc.conf.iface = route[3]\n default_route = route[2:5]\n break\n if default_route:\n break\n\n log('get_default_route: retrying')\n time.sleep(1)\n \n\n # If we are using windows, conf.route.routes table doesn't update.\n # We have to update routing table manually for packets\n # to pick the correct route. \n if sys.platform.startswith('win'):\n for i, route in enumerate(routes):\n # if we see our selected iface, update the metrics to 0\n if route[3] == default_route[1]:\n routes[i] = (*route[:-1], 0)\n\n return default_route",
"def s3_table_name_update_default(cls, data):\n\n table = current.s3db.s3_table\n field = table.name\n\n name = data.get(\"name\")\n if not name or name == field.default:\n # The name currently being written is the default,\n # => set a new default for subsequent writes\n field.default = \"%s_%s\" % (DYNAMIC_PREFIX, cls.s3_table_random_name())",
"def _link_route_table():\n if dry:\n print(\"Would link the VPC and subnet in the route table.\")\n return True\n\n vpc = _existing.vpc\n sub = _existing.sub\n igw = _existing.igw\n rt = [x for x in vpc.route_tables.all()]\n if len(rt) == 0:\n print('No route table have been created alongside the VPC. Not sure what to do here.')\n for r in rt:\n print('Linking sub {s} in route table {r}.'.format(\n s=sub.id,\n r=r.id\n ))\n r.associate_with_subnet(SubnetId=sub.id)\n _tag_resource(r)\n\n r.create_route(\n DestinationCidrBlock='0.0.0.0/0',\n GatewayId=igw.id,\n #InstanceId='string',\n #NetworkInterfaceId='string',\n #VpcPeeringConnectionId='string'\n )",
"def post_route_table_create(self, resource_dict):\n pass",
"def pre_route_table_create(self, resource_dict):\n pass",
"def setDefaultRoute( self, intf ):\n self.cmd( 'ip route flush root 0/0' )\n return self.cmd( 'route add default ' + intf )",
"def pre_interface_route_table_create(self, resource_dict):\n pass",
"def get_or_create(self, config):\n\n created_route_tables = []\n index = 0\n for vpc_id, vpc_config in config.iteritems():\n route_tables = filter_resources(\n self.ec2.route_tables, \"vpc-id\", vpc_id)\n\n if not route_tables:\n route_table = self.ec2.create_route_table(VpcId=vpc_id)\n else:\n route_table = self.ec2.RouteTable(route_tables[0].id)\n\n self.logger.info(\n \"A route table \" +\n \"with ID '%s' and attached to vpc '%s' has been created or already exists\",\n route_table.id,\n vpc_id\n )\n\n tag_with_name_with_suffix(\n route_table, \"rt\", index, self.tag_base_name)\n\n created_route_tables.append(\n {\n \"RouteTableId\": route_table.id\n }\n )\n\n index = index + 1\n return {\n vpc_config[\"VpcId\"]: {\n \"RouteTables\": created_route_tables\n }\n }",
"def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"",
"def post_interface_route_table_create(self, resource_dict):\n pass",
"def route_table(self) -> Optional['outputs.RouteTableResponse']:\n return pulumi.get(self, \"route_table\")",
"def getDefaultName(self): # real signature unknown; restored from __doc__\n pass",
"def _get_default_route(self):\n\t\twith open(\"/proc/net/route\") as fh:\n\t\t\tfor line in fh:\n\t\t\t\tfields = line.strip().split()\n\t\t\t\tif fields[1] != '00000000' or not int(fields[3], 16) & 2:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\treturn socket.inet_ntoa(struct.pack(\"=L\", int(fields[2], 16)))\n\n\t\t\treturn \"1.2.3.4\"",
"def getTableByName(self, tablename):\n pass",
"def __set_name(self):\n table_name = self.get_table_name()\n record, timestamp = self.__get_max_timestamp()\n self.name = \"%s_%s_%s\" % (table_name, record, timestamp)",
"def local_gateway_route_table_arn(self) -> Optional[str]:\n return pulumi.get(self, \"local_gateway_route_table_arn\")",
"async def associate(self, rtb_id, subnet_id):\n self._client.associate_route_table(\n RouteTableId=rtb_id,\n SubnetId=subnet_id,\n )",
"def associate_route_table(route_table_id, subnet_id):\n response = EC2.associate_route_table(\n RouteTableId=route_table_id,\n SubnetId=subnet_id\n )\n return response",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteTableRouteArgs']]]]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'RouteTable':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RouteTableState.__new__(_RouteTableState)\n\n __props__.__dict__[\"disable_bgp_route_propagation\"] = disable_bgp_route_propagation\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"routes\"] = routes\n __props__.__dict__[\"subnets\"] = subnets\n __props__.__dict__[\"tags\"] = tags\n return RouteTable(resource_name, opts=opts, __props__=__props__)",
"def create_default_name_path_rule(endpoint):\r\n path_rule = RERule(**{})\r\n path_rule.pattern = '^(?P<path>.*)'\r\n path_rule.extract = 'template'\r\n path_rule.templates.append('%s\\g<path>' % endpoint)\r\n path_rule.tags.append('name')\r\n return path_rule",
"def __init__(__self__, *,\n resource_group_name: pulumi.Input[str],\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if disable_bgp_route_propagation is not None:\n pulumi.set(__self__, \"disable_bgp_route_propagation\", disable_bgp_route_propagation)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"def subnet(template, name, vpc, availability_zone='eu-west-1a', cidr='10.0.36.0/24', gateway=None, nat=None,\n map_public_ip=False, acl_table=None):\n s = Subnet(name, template=template)\n s.Tags = Tags(Name=aws_name(s.title))\n s.VpcId = Ref(vpc)\n s.CidrBlock = cidr\n s.MapPublicIpOnLaunch = map_public_ip\n\n if availability_zone:\n s.AvailabilityZone = Ref(availability_zone)\n\n if gateway and nat:\n raise(RuntimeError(\"Don't provide an internet gateway (public) and nat gateway (private) at the same time.\"))\n\n # add public route if an internet gateway is given\n if gateway:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.GatewayId = Ref(gateway)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add nat route if an nat gateway is given\n if nat:\n # route table\n rt = RouteTable('{}RouteTable'.format(name), template=template)\n rt.Tags = Tags(Name=aws_name(rt.title))\n rt.VpcId = Ref(vpc)\n\n # route\n r = Route('{}Route'.format(name), template=template)\n r.DestinationCidrBlock = '0.0.0.0/0'\n r.NatGatewayId = Ref(nat)\n # r.DependsOn = InternetGatewayAttachment.title\n r.RouteTableId = Ref(rt)\n\n # associate\n SubnetRouteTableAssociation('{}SubnetRouteTableAssociation'.format(name), template=template,\n RouteTableId=Ref(rt), SubnetId=Ref(s))\n\n # add acl table if one is provided. Defaults to vpc default acl if None is provided\n if acl_table:\n at = SubnetNetworkAclAssociation('{}SubnetAclTableAssociation'.format(name), template=template)\n at.SubnetId = Ref(s)\n at.NetworkAclId = Ref(acl_table)\n\n return s",
"def rename_table(base, tablename: str, table: Table) -> str:\n return snake_to_camel(tablename, upper=True)",
"def __init__(__self__, *,\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input['RouteTableRouteArgs']]]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if disable_bgp_route_propagation is not None:\n pulumi.set(__self__, \"disable_bgp_route_propagation\", disable_bgp_route_propagation)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if resource_group_name is not None:\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n if routes is not None:\n pulumi.set(__self__, \"routes\", routes)\n if subnets is not None:\n pulumi.set(__self__, \"subnets\", subnets)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"async def get_main_rtb_id_from_vpc(self, vpc_id):\n main_route_table = self._client.describe_route_tables(\n Filters=[\n {\n 'Name': 'vpc-id', 'Values': [vpc_id]\n },\n {\n 'Name': 'association.main', 'Values': [\"true\"]\n\n }\n ])\n try:\n return main_route_table['RouteTables'][0]['Associations'][0]['RouteTableId']\n except IndexError:\n raise RtbDoesntExists",
"def __init__(__self__,\n resource_name: str,\n args: RouteTableArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ..."
]
| [
"0.6255291",
"0.5958707",
"0.5942567",
"0.58998066",
"0.5847473",
"0.5646883",
"0.5562245",
"0.5402062",
"0.54012924",
"0.5368249",
"0.5318241",
"0.52420455",
"0.5207002",
"0.51904994",
"0.5187961",
"0.516698",
"0.51573324",
"0.5153503",
"0.5133534",
"0.51282156",
"0.5078571",
"0.5019892",
"0.49704787",
"0.49466917",
"0.49399337",
"0.49365458",
"0.49340075",
"0.49159068",
"0.4912565",
"0.49059203"
]
| 0.84361637 | 0 |
Lookup the names of valid Key Pair. If the SSH_KEY enviro variable is defined and points to a valid keypair, that keypair name is returned. Else all of the keypairs are printed to stdout and the user is prompted to select which keypair to use. | def keypair_lookup(session):
if session is None:
return None
client = session.client('ec2')
response = client.describe_key_pairs()
# If SSH_KEY exists and points to a valid Key Pair, use it
key = os.environ.get("SSH_KEY", None) # reuse bastion.py env vars
if key is not None:
kp_name = os.path.basename(key)
if kp_name.endswith(".pem"):
kp_name = kp_name[:-4]
for kp in response['KeyPairs']:
if kp["KeyName"] == kp_name:
return kp_name
print("Key Pairs")
for i in range(len(response['KeyPairs'])):
print("{}: {}".format(i, response['KeyPairs'][i]['KeyName']))
if len(response['KeyPairs']) == 0:
return None
while True:
try:
idx = input("[0]: ")
idx = int(idx if len(idx) > 0 else "0")
return response['KeyPairs'][idx]['KeyName']
except KeyboardInterrupt:
sys.exit(1)
except:
print("Invalid Key Pair number, try again") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_key_pair(ec2, kp_name):\n if not [i for i in ec2.get_all_key_pairs() if str(i).split(':')[1] == kp_name]:\n sys.stderr.write(\"Key pair: {} does not exist, please import_key_pair prior to running.\\n\".format(kp_name))\n sys.exit(1)",
"def key_pair_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_pair_name\")",
"def key_pair_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_pair_name\")",
"def key_pair_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"key_pair_name\")",
"def find_n2vc_ssh_keys():\n\n paths = []\n paths.append(os.path.expanduser(\"~/.ssh/\"))\n\n for path in paths:\n if os.path.exists(path):\n private = os.path.expanduser(\"{}/id_n2vc_rsa\".format(path))\n public = os.path.expanduser(\"{}/id_n2vc_rsa.pub\".format(path))\n if os.path.exists(private) and os.path.exists(public):\n return (private, public)\n return (None, None)",
"def find_juju_ssh_keys():\n\n paths = []\n paths.append(os.path.expanduser(\"~/.local/share/juju/ssh\"))\n\n for path in paths:\n if os.path.exists(path):\n private = os.path.expanduser(\"{}/juju_id_rsa\".format(path))\n public = os.path.expanduser(\"{}/juju_id_rsa.pub\".format(path))\n if os.path.exists(private) and os.path.exists(public):\n return (private, public)\n return (None, None)",
"def key_is_present(host):\n if(config.HOST_TYPE == 'linux'):\n status, stdout, stderr = host.conn.execute_command('ls /root/.ssh')\n if status:\n return False\n if 'id_rsa.pub' in stdout[0]:\n return True\n return False\n else:\n status, stdout, stderr = host.conn.execute_command('cmd /c dir \"C:\\\\Program Files (x86)\\\\freeSSHd\"')\n if status:\n return False\n for value in stdout:\n if 'RSAKey.cfg' in value:\n return True\n return False",
"def get_key_pair(self, keyname):\r\n try:\r\n return self.get_all_key_pairs(keynames=[keyname])[0]\r\n except IndexError: # None of those key pairs available\r\n return None",
"def key_pair_name(self) -> str:\n return pulumi.get(self, \"key_pair_name\")",
"def keypair_exists(self, name):\n return name in [key.name for key in self.get_all_key_pairs()]",
"def RSA_KEYPAIR_PREFIX() :\n return os.environ.get( \"ATC_KEYPAIR_PREFIX\", \"atc-dev\" )",
"def cli(ctx):\n return ctx.gi.cannedkeys.get_keys()",
"def get_ssh_key_value_and_no_ssh_key(self) -> Tuple[str, bool]:\n # ssh_key_value\n # read the original value passed by the command\n raw_value = self.raw_param.get(\"ssh_key_value\")\n # try to read the property value corresponding to the parameter from the `mc` object\n value_obtained_from_mc = None\n if (\n self.mc and\n self.mc.linux_profile and\n self.mc.linux_profile.ssh and\n self.mc.linux_profile.ssh.public_keys\n ):\n public_key_obj = safe_list_get(\n self.mc.linux_profile.ssh.public_keys, 0, None\n )\n if public_key_obj:\n value_obtained_from_mc = public_key_obj.key_data\n\n # set default value\n read_from_mc = False\n if value_obtained_from_mc is not None:\n ssh_key_value = value_obtained_from_mc\n read_from_mc = True\n else:\n ssh_key_value = raw_value\n\n # no_ssh_key\n # read the original value passed by the command\n no_ssh_key = self.raw_param.get(\"no_ssh_key\")\n\n # consistent check\n if read_from_mc and no_ssh_key:\n raise CLIInternalError(\n \"Inconsistent state detected, ssh_key_value is read from the `mc` object while no_ssh_key is enabled.\"\n )\n\n # these parameters do not need dynamic completion\n\n # validation\n if not no_ssh_key:\n try:\n if not ssh_key_value or not is_valid_ssh_rsa_public_key(\n ssh_key_value\n ):\n raise ValueError()\n except (TypeError, ValueError):\n shortened_key = truncate_text(ssh_key_value)\n raise InvalidArgumentValueError(\n \"Provided ssh key ({}) is invalid or non-existent\".format(\n shortened_key\n )\n )\n return ssh_key_value, no_ssh_key",
"def _auto_prompt(self, key):\n for k in self._available_keywords:\n if k.name == key:\n if self._isbatch:\n if k.default:\n val = k.default\n else:\n break\n else:\n val = k.prompt()\n self._set(key, val)\n return val\n # Unable to prompt user for value\n raise KeyError(\"key '%s' was not found.\")",
"def check_ssh_key(self):\n return True",
"def get_ssh_key(self, profile):\n ssh_key = '/home/ssm-user/bastion'\n if self._value.has_option(profile, 'ssh_key'):\n ssh_key = self._value.get(profile, 'ssh_key')\n self.logger.info(\"%s is selected as a ssh user\" % ssh_key)\n return ssh_key",
"def find_keys(args):\n key = args['--key']\n if key:\n return [key]\n\n keyfile = args['--apikeys']\n if keyfile:\n return read_keyfile(keyfile)\n\n envkey = os.environ.get('TINYPNG_API_KEY', None)\n if envkey:\n return [envkey]\n\n local_keys = join(abspath(\".\"), \"tinypng.keys\")\n\n if isfile(local_keys):\n return read_keyfile(local_keys)\n\n home_keys = join(expanduser(\"~/.tinypng.keys\"))\n if isfile(home_keys):\n return read_keyfile(home_keys)\n\n return []",
"def get(self, name):\n path = '/os-keypairs/%s' % name\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack key pair %s: %s' % (name, truncate(res)))\n return res[0]['keypair']",
"def public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LinuxProfilePropertiesPublicKeysArgs']]]]:\n return pulumi.get(self, \"public_keys\")",
"def ssh_public_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SshPublicKeyArgs']]]]:\n return pulumi.get(self, \"ssh_public_keys\")",
"def describe_key_pairs(self):\n response = key_pair.describe_key_pairs(self.url, self.verb,\n self.headers, self.version)\n if response is not None :\n res = DescribeKeyPairsResponse.DescribeKeyPairsResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None",
"def get_ssh_key():\n path = os.environ.get(\"TUNE_CLUSTER_SSH_KEY\",\n os.path.expanduser(\"~/ray_bootstrap_key.pem\"))\n if os.path.exists(path):\n return path\n return None",
"def getPubKey(User):\n with settings(key_filename='/Users/eric/.ssh/id_rsa.pub', host_string=watt):\n with cd('/home/%s/.ssh' % (User)):\n auth_keyfile = sudo(\n '( [ -f authorized_keys ] && echo \"authorized_keys\" ) || ( [ -f authorized_keys2 ] && echo \"authorized_keys2\" )')\n key = sudo('head -1 %s' % auth_keyfile)\n\n return key",
"def extract_keys(self, console_output=[], short_name=None):\n\n # TODO(dittrich): Simplify logic here.\n\n if console_output is None:\n raise RuntimeError('[-] no console output to process')\n in_fingerprints = False\n in_pubkeys = False\n fields = list()\n public_dns = None\n public_ip = None\n for line in console_output:\n # Find short_name at first opportunity.\n if short_name is None:\n match = self.digital_ocean_name.match(line)\n short_name = match.group(1)\n if line.startswith('ec2: '):\n line = line[5:].strip()\n else:\n line = line.strip()\n if line.find('(remote-exec): Host: ') >= 0:\n # DigitalOcean style from Terraform\n _host = line.split(' Host: ')[1]\n if _host != \"\":\n public_ip = _host\n public_dns = f'{short_name}.{self.domain}'\n if short_name not in self.hostdict:\n self.hostdict[short_name] = dict()\n self.hostdict[short_name]['public_ip'] = public_ip # noqa\n self.hostdict[short_name]['public_dns'] = public_dns # noqa\n elif line.find('[+] Host: ') >= 0:\n # AWS style from Pulumi\n _host = line.split(' Host: ')[1]\n if _host != \"\":\n public_ip = _host\n try:\n public_dns = socket.gethostbyaddr(public_ip)[0]\n except Exception:\n pass\n elif line.find('BEGIN SSH HOST KEY FINGERPRINTS') >= 0:\n in_fingerprints = True\n continue\n elif line.find('END SSH HOST KEY FINGERPRINTS') >= 0:\n in_fingerprints = False\n continue\n elif line.find('BEGIN SSH HOST PUBLIC KEYS') >= 0:\n in_pubkeys = True\n continue\n elif line.find('END SSH HOST PUBLIC KEYS') >= 0:\n in_pubkeys = False\n continue\n if in_fingerprints:\n if line.startswith('ec2:'):\n hostid, fingerprint = _parse_fingerprint_awsconsole(\n line,\n name=short_name)\n else:\n hostid, fingerprint = _parse_fingerprint_terraform(line)\n try:\n self.hostdict[short_name]['fingerprint'].extend([fingerprint]) # noqa\n except KeyError:\n self.hostdict[short_name]['fingerprint'] = [fingerprint]\n if self.debug:\n self.logger.info('fingerprint: %s', fingerprint)\n elif in_pubkeys:\n # TODO(dittrich): Should use regex instead.\n fields = line.split(' ')\n # 'digitalocean_droplet.red (remote-exec): ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIA69uuX+ItFoAAe+xE9c+XggGw7Z2Z7t3YVRJxSHMupv [email protected]' # noqa\n hostid = f\"{public_dns},{public_ip}\"\n if fields[1] == '(remote-exec):':\n pubkey = f\"{hostid} {fields[2]} {fields[3]}\"\n else:\n pubkey = f\"{hostid} {fields[0]} {fields[1]}\"\n try:\n self.hostdict[short_name]['hostkey'].extend([pubkey])\n except KeyError:\n self.hostdict[short_name]['hostkey'] = [pubkey]\n if self.debug:\n self.logger.info('pubkey: %s', pubkey)",
"def list(self, all_tenants=True):\n query = {}\n path = '/os-keypairs'\n if all_tenants is True:\n query['all_tenants'] = 1\n \n path = '%s?%s' % (path, urlencode(query)) \n \n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack key pairs: %s' % truncate(res))\n return res[0]['keypairs']",
"def get_keys(opts):\n hosts = KnownHostsStore()\n serverkey = hosts.serverkey(opts.vip_address)\n key_store = KeyStore()\n publickey = key_store.public\n secretkey = key_store.secret\n return {\"publickey\": publickey, \"secretkey\": secretkey,\n \"serverkey\": serverkey}",
"def get_public_keys(vm_):\n key_filename = config.get_cloud_config_value(\n \"ssh_public_key\", vm_, __opts__, search_global=False, default=None\n )\n if key_filename is not None:\n key_filename = os.path.expanduser(key_filename)\n if not os.path.isfile(key_filename):\n raise SaltCloudConfigError(\n \"The defined ssh_public_key '{}' does not exist\".format(key_filename)\n )\n ssh_keys = []\n with salt.utils.files.fopen(key_filename) as rfh:\n for key in rfh.readlines():\n ssh_keys.append(salt.utils.stringutils.to_unicode(key))\n\n return ssh_keys",
"def read_keys():\n with open('%s/.aws/credentials' % os.getenv('HOME'), 'rt') as infile:\n for line in infile:\n if 'aws_access_key_id' in line:\n aws_access_key_id = line.split('=')[-1].strip()\n if 'aws_secret_access_key' in line:\n aws_secret_access_key = line.split('=')[-1].strip()\n return aws_access_key_id, aws_secret_access_key",
"def _get_key_pair_by_id(key_pair_id):\n\n ec2_client = connection.EC2ConnectionClient().client()\n\n try:\n key_pairs = ec2_client.get_all_key_pairs(keynames=key_pair_id)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n return key_pairs[0] if key_pairs else None",
"def get_key_filename(vm_):\n key_filename = config.get_cloud_config_value(\n \"ssh_private_key\", vm_, __opts__, search_global=False, default=None\n )\n if key_filename is not None:\n key_filename = os.path.expanduser(key_filename)\n if not os.path.isfile(key_filename):\n raise SaltCloudConfigError(\n \"The defined ssh_private_key '{}' does not exist\".format(key_filename)\n )\n\n return key_filename"
]
| [
"0.6621088",
"0.61450076",
"0.61450076",
"0.6030742",
"0.5993051",
"0.5961809",
"0.58859694",
"0.5711315",
"0.55931115",
"0.5490621",
"0.54890466",
"0.5479529",
"0.54452103",
"0.5428441",
"0.54208297",
"0.5379685",
"0.5375169",
"0.53638816",
"0.53498614",
"0.5281988",
"0.5258914",
"0.52561593",
"0.52463216",
"0.5236901",
"0.5228688",
"0.5224669",
"0.5211085",
"0.5197583",
"0.51963615",
"0.515252"
]
| 0.78218615 | 0 |
Look up instance id by hostname (instance name). | def instanceid_lookup(session, hostname):
if session is None:
return None
client = session.client('ec2')
response = client.describe_instances(
Filters=[{"Name": "tag:Name", "Values": [hostname]}])
item = response['Reservations']
if len(item) == 0:
return None
else:
item = item[0]['Instances']
if len(item) == 0:
return None
else:
item = item[0]
if 'InstanceId' in item:
return item['InstanceId']
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getHostKey(instance):\n return instance['hostname']",
"def get_instance_id(self):\n return \"{0}-{1}\".format(self._vc_name, self._host)",
"def instance_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]},\n {\"Name\": \"instance-state-name\", \"Values\": [\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'PublicDnsName' in item:\n return item['PublicDnsName']\n return None",
"def machine_lookup(session, hostname, public_ip = True):\n\n try:\n idx, target = hostname.split('.', 1)\n idx = int(idx) # if it is not a valid number, then it is a hostname\n hostname = target\n except:\n idx = 0\n\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None\n else:\n item.sort(key = lambda i: i['Instances'][0][\"InstanceId\"])\n\n if len(item) <= idx:\n print(\"Could not find IP address for '{}' index '{}'\".format(hostname, idx))\n return None\n else:\n item = item[idx]['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n return item['PublicIpAddress']\n elif 'PrivateIpAddress' in item and not public_ip:\n return item['PrivateIpAddress']\n else:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None",
"def select_host_id(hostname):\n return IMPL.select_host_id(hostname)",
"def get_self_instance_id():\n\n logging.debug('get_self_instance_id()')\n response = urllib2.urlopen('http://169.254.169.254/1.0/meta-data/instance-id')\n instance_id = response.read()\n return instance_id",
"def instance_id(self) -> str:\n return pulumi.get(self, \"instance_id\")",
"def _get_instance_id(self):\n return self.__instance_id",
"def _get_ids_from_hostname(self, hostname):\r\n results = self.list_instances(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]",
"def host_id(self) -> str:\n return pulumi.get(self, \"host_id\")",
"def rds_lookup(session, hostname):\n\n client = session.client('rds')\n response = client.describe_db_instances(DBInstanceIdentifier=hostname)\n\n item = response['DBInstances']\n if len(item) == 0:\n print(\"Could not find DNS for '{}'\".format(hostname))\n return None\n else:\n return item[0]['Endpoint']['Address']",
"def get_instance_id():\n global _instance_id\n if _instance_id == '__unset':\n try:\n _instance_id = _fetch_instance_id()\n except IOError:\n log.exception(\"Exception retrieving InstanceId\")\n _instance_id = None\n\n return _instance_id",
"def get_instance_id(self):\n return self.instance_id",
"def get_host_id(self, hostName):\n cmd = \"svcinfo lshost -filtervalue name=%s -delim :\" % (hostName)\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n return None\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_KEY_HOST_ID)\n hostId = values[index]\n return hostId",
"def instance_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"instance_id\")",
"def get_id(self, fqdn):\n res = self.db.execute(sqlalchemy.select([ model.imaging_servers.c.id ],\n whereclause=(model.imaging_servers.c.fqdn==fqdn)))\n return self.singleton(res)",
"def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"instance_id\")",
"def _get_vm_id_by_name(self, vm_name):\n vm_info = self.connection.compute.find_server(vm_name)\n return (vm_info.id if vm_info else None)",
"def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")",
"def instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"instance_id\")"
]
| [
"0.7457449",
"0.7274183",
"0.7065669",
"0.7018674",
"0.69828963",
"0.6804619",
"0.6804448",
"0.66520125",
"0.665048",
"0.66439986",
"0.65985245",
"0.6568026",
"0.654998",
"0.6524222",
"0.65042883",
"0.65042883",
"0.65042883",
"0.64500105",
"0.6423488",
"0.6423488",
"0.6423488",
"0.6423488",
"0.6423488",
"0.6423488",
"0.6414032",
"0.6401667",
"0.6401667",
"0.6401667",
"0.6401667",
"0.6401667"
]
| 0.8307429 | 0 |
Looks up the ARN for a SSL Certificate | def cert_arn_lookup(session, domain_name):
if session is None:
return None
client = session.client('acm')
response = client.list_certificates()
for certs in response['CertificateSummaryList']:
if certs['DomainName'] == domain_name:
return certs['CertificateArn']
if certs['DomainName'].startswith('*'): # if it is a wildcard domain like "*.thebossdev.io"
cert_name = certs['DomainName'][1:] + '$'
if re.search(cert_name, domain_name) != None:
return certs['CertificateArn']
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_ssl_certificate_arn(environment):\n name = Constants['SslCertificateName'][environment]\n\n certificates = ACM.list_certificates(CertificateStatuses=[ 'ISSUED' ])['CertificateSummaryList']\n arns = [ c['CertificateArn'] for c in certificates if c['DomainName'] == name ]\n\n if len(arns) == 0:\n raise Exception('Missing certificate %s on AWS. Please create it and then re-run this script.' % name)\n\n return arns[0]",
"def get_certificate_from_arn(self, certificate_arn):\n with stats.timer('get_certificate_from_arn'):\n client = confidant.clients.get_boto_client('acm-pca')\n # When a certificate is issued, it may take a while before it's\n # available via get_certificate. We need to keep retrying until it's\n # fully issued.\n i = 0\n while True:\n try:\n response = client.get_certificate(\n CertificateAuthorityArn=self.settings['arn'],\n CertificateArn=certificate_arn,\n )\n break\n except client.exceptions.RequestInProgressException:\n # Sleep for a maximum of 10 seconds\n if i >= 50:\n raise\n logger.debug(\n 'Sleeping in get_certificate_from_arn for {}'.format(\n certificate_arn,\n )\n )\n time.sleep(.200)\n i = i + 1\n return {\n 'certificate': response['Certificate'],\n 'certificate_chain': response['CertificateChain'],\n }",
"def get_cert_arn(region: str, domain: str) -> str:\n client = boto3.client('acm', region_name=region)\n\n # as of 12/2021, we now need to tell boto3's ACM client to list other cyphers so that we see our new certs [jdw]\n includes = {\n 'keyTypes': ['RSA_2048', 'EC_prime256v1', 'EC_secp384r1']\n }\n response = client.list_certificates(Includes=includes)\n for x in response['CertificateSummaryList']:\n if domain in x['DomainName']:\n return x['CertificateArn']\n\n raise ValueError(f\"Cannot find ACM certificate for domain '{domain}' in region {region}\")",
"def get_ssl_certificate():",
"def get_ssl_certificate() :",
"def retrieve_key_and_cert_retryable(acm_util, acm_cert_arn, acm_key_passphrase):\n logger.info(\"Attempting to retrieve cert and key from AWS...\")\n return acm_util.export_certificate(\n CertificateArn=acm_cert_arn, Passphrase=acm_key_passphrase\n )",
"def find_certificate(p): # find_certificate(props, /)\n\n for page in acm.get_paginator('list_certificates').paginate():\n for certificate in page['CertificateSummaryList']:\n log_info(certificate)\n\n if p['DomainName'].lower() == certificate['DomainName']:\n tags = {tag['Key']: tag['Value'] for tag in\n acm.list_tags_for_certificate(**{'CertificateArn': certificate['CertificateArn']})['Tags']}\n\n if (tags.get('cloudformation:' + 'logical-id') == e['LogicalResourceId'] and\n tags.get('cloudformation:' + 'stack-id') == e['StackId'] and\n tags.get('cloudformation:' + 'properties') == hash_func(p)\n ):\n return certificate['CertificateArn']",
"def request_cert():\n\n api_request = shallow_copy(props)\n\n for key in ['ServiceToken', 'Region', 'Tags', 'Route53RoleArn']:\n api_request.pop(key, None)\n\n if 'ValidationMethod' in props:\n if props['ValidationMethod'] == 'DNS':\n\n # Check that we have all the hosted zone information we need to validate\n # before we create the certificate\n for name in set([props['DomainName']] + props.get('SubjectAlternativeNames', [])):\n get_zone_for(name)\n\n del api_request['DomainValidationOptions']\n\n e['PhysicalResourceId'] = acm.request_certificate(\n IdempotencyToken=i_token,\n **api_request\n )['CertificateArn']\n add_tags()",
"def ca_cert_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ca_cert_resource_id\")",
"def ca_cert_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ca_cert_resource_id\")",
"def ca_cert_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ca_cert_resource_id\")",
"def ca_certificate(self) -> str:\n return pulumi.get(self, \"ca_certificate\")",
"def ca_certificate(self) -> str:\n return pulumi.get(self, \"ca_certificate\")",
"def ca_certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ca_certificate\")",
"def ssl_cert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ssl_cert\")",
"def ssl_cert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ssl_cert\")",
"def ssl_cert(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"ssl_cert\")",
"def certificate_renewal_status_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n acm = session.client(\"acm\")\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n try: \n acm_certs = list_certificates(cache, session)\n for carn in acm_certs:\n # Get ACM Cert Details\n cert = acm.describe_certificate(CertificateArn=carn)[\"Certificate\"]\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(cert,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n cDomainName = str(cert['DomainName'])\n cIssuer = str(cert['Issuer'])\n cSerial = str(cert['Serial'])\n cStatus = str(cert['Status'])\n cKeyAlgo = str(cert['KeyAlgorithm'])\n \n #Will trigger key error if certificate type is not AMAZON_ISSUED\n renewal_status = cert['RenewalSummary'].get('RenewalStatus', '')\n if renewal_status == 'FAILED':\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-renewal-status-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.4] ACM Certificates should be renewed successfully\",\n \"Description\": f\"ACM Certificate {carn} renewal has failed\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on certificate renewals, please refer to the Managed Renewal section of the AWS Certificate Manager User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/acm/latest/userguide/check-certificate-renewal-status.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.MA-1\",\n \"NIST SP 800-53 Rev. 4 MA-2\",\n \"NIST SP 800-53 Rev. 4 MA-3\",\n \"NIST SP 800-53 Rev. 4 MA-5\",\n \"NIST SP 800-53 Rev. 4 MA-6\",\n \"AICPA TSC CC8.1\",\n \"ISO 27001:2013 A.11.1.2\",\n \"ISO 27001:2013 A.11.2.4\",\n \"ISO 27001:2013 A.11.2.5\",\n \"ISO 27001:2013 A.11.2.6\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n elif renewal_status == 'PENDING_VALIDATION':\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-renewal-status-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.4] ACM Certificates should be renewed successfully\",\n \"Description\": f\"ACM Certificate {carn} renewal is pending user validation\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on certificate renewals, please refer to the Managed Renewal section of the AWS Certificate Manager User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/acm/latest/userguide/check-certificate-renewal-status.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.MA-1\",\n \"NIST SP 800-53 Rev. 4 MA-2\",\n \"NIST SP 800-53 Rev. 4 MA-3\",\n \"NIST SP 800-53 Rev. 4 MA-5\",\n \"NIST SP 800-53 Rev. 4 MA-6\",\n \"AICPA TSC CC8.1\",\n \"ISO 27001:2013 A.11.1.2\",\n \"ISO 27001:2013 A.11.2.4\",\n \"ISO 27001:2013 A.11.2.5\",\n \"ISO 27001:2013 A.11.2.6\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n elif renewal_status == 'PENDING_AUTO_RENEWAL' or renewal_status == 'SUCCESS':\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-renewal-status-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.4] ACM Certificates should be renewed successfully\",\n \"Description\": f\"ACM Certificate {carn} renewal is in a {str(cert['RenewalSummary']['RenewalStatus'])} state\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on certificate renewals, please refer to the Managed Renewal section of the AWS Certificate Manager User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/acm/latest/userguide/check-certificate-renewal-status.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.MA-1\",\n \"NIST SP 800-53 Rev. 4 MA-2\",\n \"NIST SP 800-53 Rev. 4 MA-3\",\n \"NIST SP 800-53 Rev. 4 MA-5\",\n \"NIST SP 800-53 Rev. 4 MA-6\",\n \"AICPA TSC CC8.1\",\n \"ISO 27001:2013 A.11.1.2\",\n \"ISO 27001:2013 A.11.2.4\",\n \"ISO 27001:2013 A.11.2.5\",\n \"ISO 27001:2013 A.11.2.6\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding\n except KeyError as e:\n pass",
"def ssl_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"ssl_key\")",
"def certificate_status_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n acm = session.client(\"acm\")\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n acm_certs = list_certificates(cache, session)\n for carn in acm_certs:\n # Get ACM Cert Details\n cert = acm.describe_certificate(CertificateArn=carn)[\"Certificate\"]\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(cert,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n cDomainName = str(cert['DomainName'])\n cIssuer = str(cert['Issuer'])\n cSerial = str(cert['Serial'])\n cStatus = str(cert['Status'])\n cKeyAlgo = str(cert['KeyAlgorithm'])\n # this is a passing check\n if cStatus == 'ISSUED':\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-status-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.5] ACM Certificates should be correctly validated\",\n \"Description\": f\"ACM Certificate {carn} is successfully issued\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on certificate issuing, please refer to the Issuing Certificates section of the AWS Certificate Manager User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/acm/latest/userguide/gs.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.MA-1\",\n \"NIST SP 800-53 Rev. 4 MA-2\",\n \"NIST SP 800-53 Rev. 4 MA-3\",\n \"NIST SP 800-53 Rev. 4 MA-5\",\n \"NIST SP 800-53 Rev. 4 MA-6\",\n \"AICPA TSC CC8.1\",\n \"ISO 27001:2013 A.11.1.2\",\n \"ISO 27001:2013 A.11.2.4\",\n \"ISO 27001:2013 A.11.2.5\",\n \"ISO 27001:2013 A.11.2.6\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding\n elif cStatus == 'EXPIRED' or \\\n cStatus == 'VALIDATION_TIMED_OUT' or \\\n cStatus == 'REVOKED' or \\\n cStatus == 'FAILED':\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-renewal-status-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.5] ACM Certificates should be correctly validated\",\n \"Description\": f\"ACM Certificate {carn} has not been successfully issued. State: {cStatus}\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on certificate issuing, please refer to the Issuing Certificates section of the AWS Certificate Manager User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/acm/latest/userguide/gs.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.MA-1\",\n \"NIST SP 800-53 Rev. 4 MA-2\",\n \"NIST SP 800-53 Rev. 4 MA-3\",\n \"NIST SP 800-53 Rev. 4 MA-5\",\n \"NIST SP 800-53 Rev. 4 MA-6\",\n \"AICPA TSC CC8.1\",\n \"ISO 27001:2013 A.11.1.2\",\n \"ISO 27001:2013 A.11.2.4\",\n \"ISO 27001:2013 A.11.2.5\",\n \"ISO 27001:2013 A.11.2.6\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding",
"def get_certificate_authority_certificate(self):\n client = confidant.clients.get_boto_client('acm-pca')\n certificate = client.get_certificate_authority_certificate(\n CertificateAuthorityArn=self.settings['arn'],\n )\n # TODO: support pagination for this call\n tags = client.list_tags(\n CertificateAuthorityArn=self.settings['arn'],\n )\n _tags = {}\n for tag in tags['Tags']:\n _tags[tag['Key']] = tag['Value']\n return {\n 'ca': self.ca_name,\n 'certificate': certificate['Certificate'],\n 'certificate_chain': certificate['CertificateChain'],\n 'tags': _tags,\n }",
"def certificate_in_use_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n acm = session.client(\"acm\")\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for carn in list_certificates(cache, session):\n # Get ACM Cert Details\n cert = acm.describe_certificate(CertificateArn=carn)[\"Certificate\"]\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(cert,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n cDomainName = str(cert['DomainName'])\n cIssuer = str(cert['Issuer'])\n cSerial = str(cert['Serial'])\n cStatus = str(cert['Status'])\n cKeyAlgo = str(cert['KeyAlgorithm'])\n useLen = len(cert[\"InUseBy\"])\n # this is a failing check\n if useLen == 0:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-in-use-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.2] ACM Certificates should be in use\",\n \"Description\": \"ACM Certificate \"\n + carn\n + \" is currently not in use, this can be indicative of an orphaned certificate or that the downstream workloads are no longer active (maliciously or not). Refer to the remediation instructions if this configuration is not intended\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on installing certifactes refer to the Services integrated with AWS Certificate Manager section of the AWS Certificate Manager User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/acm/latest/userguide/acm-services.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-2\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 PM-5\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.1.1\",\n \"ISO 27001:2013 A.8.1.2\",\n \"ISO 27001:2013 A.12.5.1\",\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-in-use-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.2] ACM Certificates should be in use\",\n \"Description\": \"ACM Certificate \"\n + carn\n + \" is in use.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on installing certifactes refer to the Services integrated with AWS Certificate Manager section of the AWS Certificate Manager User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/acm/latest/userguide/acm-services.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-2\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 PM-5\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.1.1\",\n \"ISO 27001:2013 A.8.1.2\",\n \"ISO 27001:2013 A.12.5.1\",\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding",
"def ssl_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ssl_key\")",
"def ssl_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ssl_key\")",
"def certificate(self) -> str:\n return pulumi.get(self, \"certificate\")",
"def certificate_revocation_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n acm = session.client(\"acm\")\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for carn in list_certificates(cache, session):\n # Get ACM Cert Details\n cert = acm.describe_certificate(CertificateArn=carn)[\"Certificate\"]\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(cert,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n cDomainName = str(cert['DomainName'])\n cIssuer = str(cert['Issuer'])\n cSerial = str(cert['Serial'])\n cStatus = str(cert['Status'])\n cKeyAlgo = str(cert['KeyAlgorithm'])\n try:\n # this is a failing check\n revokeReason = str(cert['RevocationReason'])\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-revoke-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Denial of Service\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"CRITICAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.1] ACM Certificates should be monitored for revocation\",\n \"Description\": \"ACM Certificate \"\n + carn\n + \" is currently revoked due to \"\n + revokeReason\n + \". If the Certificate was in use by any applications they are likely unavailable or returning certificate revocation and invalidity warnings to end-users who are attempting to browse to your applications. You should immediately generate new certificates and distribute them to your applications (CloudFront, ALB Listeners, self-managed web applicaitons) and communicate with clients and other end-users. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on revocation of certificates, review the ACM FAQ on the topic of 'Revoke'\",\n \"Url\": \"https://aws.amazon.com/certificate-manager/faqs/\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.MA-1\",\n \"NIST SP 800-53 Rev. 4 MA-2\",\n \"NIST SP 800-53 Rev. 4 MA-3\",\n \"NIST SP 800-53 Rev. 4 MA-5\",\n \"NIST SP 800-53 Rev. 4 MA-6\",\n \"AICPA TSC CC8.1\",\n \"ISO 27001:2013 A.11.1.2\",\n \"ISO 27001:2013 A.11.2.4\",\n \"ISO 27001:2013 A.11.2.5\",\n \"ISO 27001:2013 A.11.2.6\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n except Exception as e:\n if str(e) == \"'RevocationReason'\":\n # this is a passing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-revoke-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Denial of Service\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.1] ACM Certificates should be monitored for revocation\",\n \"Description\": \"ACM Certificate \"\n + carn\n + \" is not currently revoked.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on revocation of certificates, review the ACM FAQ on the topic of 'Revoke'\",\n \"Url\": \"https://aws.amazon.com/certificate-manager/faqs/\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.MA-1\",\n \"NIST SP 800-53 Rev. 4 MA-2\",\n \"NIST SP 800-53 Rev. 4 MA-3\",\n \"NIST SP 800-53 Rev. 4 MA-5\",\n \"NIST SP 800-53 Rev. 4 MA-6\",\n \"AICPA TSC CC8.1\",\n \"ISO 27001:2013 A.11.1.2\",\n \"ISO 27001:2013 A.11.2.4\",\n \"ISO 27001:2013 A.11.2.5\",\n \"ISO 27001:2013 A.11.2.6\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding\n else:\n print(e)",
"def Certificate(self) -> _n_8_t_0:",
"def Certificate(self) -> _n_8_t_0:",
"def certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"certificate\")",
"def certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"certificate\")"
]
| [
"0.7575733",
"0.67465764",
"0.64577544",
"0.64160997",
"0.628164",
"0.6254604",
"0.6073698",
"0.5756983",
"0.5599451",
"0.5599451",
"0.5599451",
"0.5577368",
"0.5577368",
"0.5577027",
"0.5553289",
"0.5553289",
"0.5549295",
"0.5541539",
"0.550942",
"0.5448711",
"0.5382661",
"0.5379329",
"0.5378888",
"0.5378888",
"0.53504974",
"0.53388226",
"0.53292376",
"0.53292376",
"0.5293488",
"0.5293488"
]
| 0.7114357 | 1 |
Lookup the Public DNS name for a EC2 instance | def instance_public_lookup(session, hostname):
if session is None:
return None
client = session.client('ec2')
response = client.describe_instances(
Filters=[{"Name": "tag:Name", "Values": [hostname]},
{"Name": "instance-state-name", "Values": ["running"]}])
item = response['Reservations']
if len(item) == 0:
return None
else:
item = item[0]['Instances']
if len(item) == 0:
return None
else:
item = item[0]
if 'PublicDnsName' in item:
return item['PublicDnsName']
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rds_lookup(session, hostname):\n\n client = session.client('rds')\n response = client.describe_db_instances(DBInstanceIdentifier=hostname)\n\n item = response['DBInstances']\n if len(item) == 0:\n print(\"Could not find DNS for '{}'\".format(hostname))\n return None\n else:\n return item[0]['Endpoint']['Address']",
"def get_instance_by_cname ( ec2_conn, r53_conn, dns_cname ) :\n record = get_r53_record_by_name( r53_conn, dns_cname, record_type = 'CNAME' )\n if record :\n instance_dns_name = record.resource_records[ 0 ]\n print \"Getting ec2 instance with DNS name: \" + instance_dns_name\n instances = ec2_conn.get_only_instances( filters = { \"dns-name\": [ instance_dns_name ] } )\n for instance in instances :\n return instance",
"def machine_lookup(session, hostname, public_ip = True):\n\n try:\n idx, target = hostname.split('.', 1)\n idx = int(idx) # if it is not a valid number, then it is a hostname\n hostname = target\n except:\n idx = 0\n\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None\n else:\n item.sort(key = lambda i: i['Instances'][0][\"InstanceId\"])\n\n if len(item) <= idx:\n print(\"Could not find IP address for '{}' index '{}'\".format(hostname, idx))\n return None\n else:\n item = item[idx]['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n return item['PublicIpAddress']\n elif 'PrivateIpAddress' in item and not public_ip:\n return item['PrivateIpAddress']\n else:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None",
"def amazon_public_address() -> str:\n check_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'\n check_timeout = float(CONFIG['network']['check_timeout'])\n try:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n except Exception as error:\n return None",
"def elb_public_lookup(session, hostname):\n\n if session is None:\n return None\n\n client = session.client('elb')\n responses = client.describe_load_balancers()\n\n hostname_ = hostname.replace(\".\", \"-\")\n\n for response in responses[\"LoadBalancerDescriptions\"]:\n if response[\"LoadBalancerName\"].startswith(hostname_):\n return response[\"DNSName\"]\n return None",
"def public_dns(self) -> Optional[str]:\n return pulumi.get(self, \"public_dns\")",
"def instanceid_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'InstanceId' in item:\n return item['InstanceId']\n return None",
"def cloudfront_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('cloudfront')\n response = client.list_distributions(\n MaxItems='100'\n )\n items = response[\"DistributionList\"][\"Items\"]\n for item in items:\n cloud_front_domain_name = item[\"DomainName\"]\n if item[\"Aliases\"][\"Quantity\"] > 0:\n if hostname in item[\"Aliases\"][\"Items\"]:\n return cloud_front_domain_name\n return None",
"def nslookup(self):\n if len(self.hostnames) == 0:\n st, out = commands.getstatusoutput('get_instance_by_service %s' % self.bns)\n assert st == 0, \"Failure:'get_instance_by_service %s', errno=%d\" % (self.bns, st)\n self.hostnames = out.split('\\n')\n assert self.hostnames, 'No hosts found for bns: \"%s\"' % self.bns",
"def dns(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns\")",
"def getHostKey(instance):\n return instance['hostname']",
"def get_dns(self):\n return self.mycam.devicemgmt.GetDNS()",
"def host_dns_name(self):\n res = self.search(base='', scope=ldb.SCOPE_BASE, attrs=['dNSHostName'])\n return str(res[0]['dNSHostName'][0])",
"def dnslookup(url) -> 'text': \n try:\n hn = socket.gethostbyaddr(url)[0] \n except socket.error as msg: \n hn = 'nohost'\n return hn",
"def public_ip_dns(resolv, nameservers, rdatatype, server, responsetype):\n for ns in nameservers:\n try:\n answer = resolv.query(ns, rdatatype)\n nameserver = answer[0].to_text()\n except Exception as e:\n print(e)\n continue\n resolve_public_ip(nameserver, server, responsetype)",
"def nslookup(name, dnsserver='', prevent_unqualified_dns=True):\n # if it looks like an IP address, don't try to resolve it\n if digitsAndDots.match(name):\n return (0, \"OK\")\n if name != \"localhost\" and prevent_unqualified_dns:\n name = name + \".\" # prevent unqualified DNS lookups\n\n # TODO: we really want to call something along the lines of\n # google2/io/safe_gethostbyname, this will require some python trickery.\n\n # If dnsserver is an empty string, then mkarg() will escape it with\n # quotes and the host call will try to use \"''\" as a dns server and fail\n # So call mkarg only if actually given a non-empty-string dnsserver\n if not dnsserver:\n dnsserver = ''\n if dnsserver != '':\n dnsserver = commands.mkarg(dnsserver)\n\n executestring = commands.mkarg(\n \"host -t a %s %s 2>/dev/null | grep has\\ address | wc -l\"\n % (commands.mkarg(name), dnsserver))\n\n (stat, out) = commands.getstatusoutput('alarm 5 sh -c %s' % executestring)\n if stat != 0:\n return (1, \"TIMEOUT\") # E.g. DNS server does not respond\n\n if int(out) == 0:\n return (2, \"cannot resolve\")\n\n return (0, \"OK\")",
"def get_ip_by_name ( route53_conn, dns_name ) :\n record = get_r53_record_by_name( route53_conn, dns_name )\n if record :\n return record.resource_records[ 0 ]\n\n return None",
"def resolve_public_ip(nameserver, server, responsetype):\n request_resolver = dns.resolver.Resolver()\n request_resolver.nameservers = [nameserver,]\n try:\n answer = request_resolver.query(server, responsetype)\n ip = answer[0].to_text().replace('\"','').strip()\n ipaddress.ip_address(ip)\n print(ip)\n sys.exit()\n except Exception as e:\n print(e)\n return None",
"def public_address() -> str:\n check_timeout = float(CONFIG['network']['check_timeout'])\n check_host_list = CONFIG.get_list('network', 'check_host_list')\n try:\n for check_url in check_host_list:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n return None\n except Exception as error:\n return None",
"def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )",
"def host_ip(hostname: str) -> str:\n try:\n return socket.gethostbyname(hostname)\n except socket.gaierror:\n return \"No record found.\"",
"def dns_server(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_server\")",
"def service_dns_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_dns_name\")",
"def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'",
"def get_dns_info(self, name_or_ip) :\n self._logger.debug(\"get_dns_info: entering with name_or_ip=%s\" % \\\n (name_or_ip))\n if not is_name(name_or_ip) : # check for matching ipaddress\n for hostname in afs.CONFIG.hosts :\n if name_or_ip in afs.CONFIG.hosts[hostname] :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % \\\n (name_or_ip, [hostname,],afs.CONFIG.hosts[hostname]))\n self._logger.debug(\"returning %s\" % ({ \"names\" : [hostname, ], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname] }) )\n return { \"names\" : [hostname, ], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname] }\n\n # is a hostname\n \n # hard-mapped, primary Hostname given \n if name_or_ip in afs.CONFIG.hosts.keys() :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % ( name_or_ip, \\\n [name_or_ip, ], afs.CONFIG.hosts[name_or_ip]))\n self._logger.debug(\"returning %s\" % ({\"names\" : [name_or_ip,], \"ipaddrs\" : \\\n afs.CONFIG.hosts[name_or_ip] }) )\n return {\"names\" : [name_or_ip,], \"ipaddrs\" : \\\n afs.CONFIG.hosts[name_or_ip] }\n\n \n # memory_cache \n if name_or_ip in self.memory_cache[\"dns_info\"] :\n self._logger.debug(\"%s in localcache hard-mapped (%s)\" % \\\n (name_or_ip,self.memory_cache[\"dns_info\"][name_or_ip] ))\n self._logger.debug(\"returning %s\" % (self.memory_cache[\"dns_info\"][name_or_ip]))\n return self.memory_cache[\"dns_info\"][name_or_ip]\n \n for srv in self.memory_cache[\"dns_info\"] :\n if name_or_ip in self.memory_cache[\"dns_info\"][srv][\"names\"] :\n self._logger.debug(\"%s is hard-mapped to %s\" % (name_or_ip, \\\n self.memory_cache[\"dns_info\"][srv] ))\n self._logger.debug(\"returning %s\" % (self.memory_cache[\"dns_info\"][srv]) )\n return self.memory_cache[\"dns_info\"][srv]\n\n # lookup from OS\n \n try : \n dns_info = socket.gethostbyaddr(name_or_ip)\n servernames = [dns_info[0]] + dns_info[1]\n ipaddrs = dns_info[2]\n except socket.gaierror :\n if is_name(name_or_ip) :\n raise LookupUtilError(\"Cannot resolve %s\" % name_or_ip)\n else :\n self._logger.warn(\"Cannot resolve %s\" % name_or_ip)\n self._logger.debug(\"returning %s\" % ({\"names\": [], \"ipaddrs\" : [name_or_ip,]}) )\n return {\"names\": [], \"ipaddrs\" : [name_or_ip,]}\n\n\n self._logger.debug(\"%s resolves to %s\" % (name_or_ip, dns_info)) \n # check if resolved ip-address matches (if hostalias was used)\n for hostname in afs.CONFIG.hosts :\n for ipaddr in ipaddrs :\n # ignore IP if we're asked to do so.\n if ipaddr in afs.CONFIG.ignoreIPList : continue\n if ipaddr in afs.CONFIG.hosts[hostname] :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % \\\n (ipaddrs, [hostname,],afs.CONFIG.hosts[hostname]))\n # add this hostalias to list in memory_cache\n if self.memory_cache[\"dns_info\"].has_key(hostname) :\n self.memory_cache[\"dns_info\"][hostname][\"names\"] = \\\n [hostname, ]\n self.memory_cache[\"dns_info\"][hostname][\"ipaddrs\"] = \\\n afs.CONFIG.hosts[hostname]\n else :\n self.memory_cache[\"dns_info\"][hostname] = { \\\n \"names\" : [hostname,], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname]}\n self._logger.debug(\"memory_cache = %s\" % \\\n (self.memory_cache))\n ipaddrs = []\n self._logger.debug(\"returning %s\" % ({ \"names\" : [hostname], \"ipaddrs\" : \\\n afs.CONFIG.hosts[hostname] }) )\n return { \"names\" : [hostname], \"ipaddrs\" : \\\n afs.CONFIG.hosts[hostname] }\n\n if \"nxdomain\" in servernames[0] : \n raise LookupUtilError(\"cannot resolve DNS-entry %s\" % name_or_ip)\n # fill up localcache\n self.memory_cache[\"dns_info\"][servernames[0]] = { \\\n \"names\" : servernames, \"ipaddrs\" : ipaddrs }\n self._logger.debug(\"memory_cache = %s\" % (self.memory_cache))\n self._logger.debug(\"returning %s\" % ({\"names\": servernames, \"ipaddrs\" : ipaddrs}) )\n return {\"names\": servernames, \"ipaddrs\" : ipaddrs}",
"def query_dns_server(self) -> str:\n dig_command = [\n \"dig\",\n \"-p\",\n str(self.dns_port),\n \"-t\",\n self.record_type,\n \"+short\",\n self.query_address\n ]\n\n try:\n dig_process = subprocess.Popen(dig_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True)\n output, error = dig_process.communicate()\n\n if dig_process.returncode != 0:\n raise RuntimeError(f\"Failed to query the DNS server: {error.strip()}\")\n\n return output.strip()\n\n except subprocess.SubprocessError as e:\n raise RuntimeError(f\"Failed to query the DNS server: {str(e)}\")",
"def getNodeDNS(self,node):\n data = self.connect('get','nodes/%s/dns' % (node),None)\n return data",
"def get_host_name(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetHostName', self.handle)",
"def get_persistent_address(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n try:\n client.describe_addresses(PublicIps=[instance.ip_address])\n return instance.ip_address\n except botocore.client.ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidAddress.NotFound':\n raise\n # Address is not public\n return None\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n try:\n return compute.addresses().get(address=instance.name, project=instance.project, region=instance.region).execute()['address']\n except errors.HttpError as exc:\n if 'was not found' in str(exc):\n return None\n raise\n raise ValueError('Unknown cloud %s' % instance.cloud)",
"def custom_dns_resolver(hostname, type='A'):\n nameservers = globals.config.service.initial_dns\n custom_resolver = dns.resolver.Resolver()\n custom_resolver.nameservers = nameservers\n answer = custom_resolver.query(hostname, type)\n\n return str(random.choice(answer))"
]
| [
"0.7093993",
"0.6950115",
"0.69366145",
"0.6753267",
"0.6640436",
"0.66227126",
"0.6474817",
"0.6400321",
"0.6302448",
"0.62650925",
"0.6223052",
"0.6206862",
"0.61611843",
"0.6119791",
"0.60280055",
"0.6024374",
"0.6014244",
"0.6009959",
"0.5994625",
"0.5985246",
"0.59798884",
"0.59340245",
"0.5922899",
"0.58991104",
"0.5890907",
"0.58888674",
"0.5872953",
"0.5861304",
"0.5852564",
"0.5840921"
]
| 0.77868897 | 0 |
Lookup cloudfront public domain name which has hostname as the origin. | def cloudfront_public_lookup(session, hostname):
if session is None:
return None
client = session.client('cloudfront')
response = client.list_distributions(
MaxItems='100'
)
items = response["DistributionList"]["Items"]
for item in items:
cloud_front_domain_name = item["DomainName"]
if item["Aliases"]["Quantity"] > 0:
if hostname in item["Aliases"]["Items"]:
return cloud_front_domain_name
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def host(self):\n if self.url.startswith(\"dns:\"):\n return self.url[4:]\n else:\n return urlparse(self.url).hostname",
"def get_domain():\n domain=\"\"\n for item in re.split(\"\\.\", env.host)[1:]:\n domain = domain + \".\" + item\n return domain.lstrip(\".\")",
"def _getHostname(fqdn):\n\treturn fqdn.split('.')[0]",
"def bucket_website_domain_name(self) -> str:\n ...",
"def server_domain(self):\n url = self.api.address\n domain_start = url.find('://') + 3 if url.find('://') >= 0 else 0\n domain_end = url.find(':', domain_start) if url.find(':', domain_start) >= 0 else \\\n url.find('/', domain_start) if url.find('/', domain_start) >= 0 else \\\n url.find('?', domain_start) if url.find('?', domain_start) >= 0 else \\\n len(url)\n regex = re.compile('[^a-zA-Z0-9\\.]') # being cautious as changing this later will invalidate everyone's cache\n return regex.sub('_', url[domain_start:domain_end]).lower()",
"def public_dns(self) -> Optional[str]:\n return pulumi.get(self, \"public_dns\")",
"def bucket_domain_name(self) -> str:\n ...",
"def get_hostname(url: str) -> str:\n return urlsplit(url).hostname",
"def __find_hostname(self, url):\n match = self.__REGEX_HOST.search(url)\n if match:\n return match.group(0)\n return None",
"def get_domain_name(url):\n\n parsed_uri = urlparse(url)\n return parsed_uri.netloc",
"def get_external_domain(self):\n if self.charm_config[\"external-domain\"]:\n return self.charm_config[\"external-domain\"]\n return self.get_server_name()",
"def domain_dns_name(self):\n domain_dn = self.get_default_basedn()\n return domain_dn.canonical_str().split('/')[0]",
"def getDomain(self):\n # ui = UrlInfo(url)\n # urlBytes = [ord(i) for i in url]\n host = self.url[self.host_head:self.host_tail]\n domain = self.url[self.domain_head:self.domain_tail]\n\n # domain = url[ui.getDomainHead():ui.getDomainTail()]\n m = re.match(self.ipUrlPattern, host)\n if m:\n domain = m.group(1)\n return domain",
"def resolve_domain(host: str) -> str:\n parts = host.split('.')[-2:]\n return ''.join(parts)",
"def get_subdomain(self):\n return self.key().name().split(':', 1)[0]",
"def get_domain_name(url):\n try:\n results = get_sub_domain_name(url).split('.')\n return results[-2] + '.' + results[-1]\n except:\n return ''",
"def getHostnameFromURL(self, url):\n hostname = urllib.splithost(urllib.splittype(url)[1])[0]\n logging.debug(\"Parsed hostname %r for cert CN matching.\" % hostname)\n return hostname",
"def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")",
"def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")",
"def get_cname(self, host):\n cname = None\n\n if self.is_local(host):\n # Don't perform DNS lookup for localhost.\n cname = host\n else:\n self.log(\"Resolving host: \" + host)\n\n try:\n ans = self._resolver.query(host, 'CNAME')\n\n if len(ans.rrset.items) == 1:\n # Remove last (blank) field from host name.\n labels = ans[0].target.labels[0:-1]\n labels = map(lambda s: str(s, 'utf-8'), labels)\n cname = '.'.join(labels)\n\n except dns.resolver.NoAnswer as e:\n self.log(\"No answer\")\n except dns.resolver.NXDOMAIN as e:\n pass\n except dns.exception.DNSException as e:\n self.log(\"Exception: \" + str(type(e)))\n\n return cname",
"def get_domain_name(self):\n return self.domain_name.get_text()",
"def _metaname_domain_name_for_hostname(self, hostname):\n\n hostname = hostname.strip(\".\").split(\".\", 1)[\n 1\n ] # remove the well-known prefix from the validation hostname\n try:\n zones_in_account = [\n i[\"name\"] for i in self._metaname_client().request(\"dns_zones\")\n ]\n except Exception as e:\n raise errors.PluginError(\n f\"Unable to request the list of hosted DNS zones: {e}\"\n ) from e\n guesses = dns_common.base_domain_name_guesses(hostname)\n for guess in guesses:\n if guess in zones_in_account:\n return guess\n raise errors.PluginError(f\"Unable to find a Metaname DNS zone for {hostname}\")",
"def get_host_name(url):\n return urlparse.urlparse(url)[1]",
"def get_domain(self, response):\n parts = urllib.parse.urlparse(response.url)\n domain = parts.netloc\n return domain",
"def bucket_domain_name(self) -> typing.Optional[str]:\n return self._values.get('bucket_domain_name')",
"def get_domain(url):\n a = urllib.parse.urlsplit(url)\n return str(a.scheme) + \"://\" + str(a.hostname)",
"def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")",
"def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")",
"def get_hostname():\n return re.split(\"\\.\", env.host)[0]",
"def hostname(self, code):\n return self.domain"
]
| [
"0.7322436",
"0.71760046",
"0.7071095",
"0.7029442",
"0.7000298",
"0.69341964",
"0.6933069",
"0.6917955",
"0.6914039",
"0.6898984",
"0.6894848",
"0.6891883",
"0.6890801",
"0.6877511",
"0.681708",
"0.6752261",
"0.6731664",
"0.6730647",
"0.6730647",
"0.67285687",
"0.6695391",
"0.6690316",
"0.66836226",
"0.66706747",
"0.6667506",
"0.6634545",
"0.6633161",
"0.6633161",
"0.65944725",
"0.6591646"
]
| 0.82743466 | 0 |
Lookup the Public DNS name for a ELB | def elb_public_lookup(session, hostname):
if session is None:
return None
client = session.client('elb')
responses = client.describe_load_balancers()
hostname_ = hostname.replace(".", "-")
for response in responses["LoadBalancerDescriptions"]:
if response["LoadBalancerName"].startswith(hostname_):
return response["DNSName"]
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rds_lookup(session, hostname):\n\n client = session.client('rds')\n response = client.describe_db_instances(DBInstanceIdentifier=hostname)\n\n item = response['DBInstances']\n if len(item) == 0:\n print(\"Could not find DNS for '{}'\".format(hostname))\n return None\n else:\n return item[0]['Endpoint']['Address']",
"def instance_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]},\n {\"Name\": \"instance-state-name\", \"Values\": [\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'PublicDnsName' in item:\n return item['PublicDnsName']\n return None",
"def get_by_dns_name(cls, dns_name: str) -> \"ELB\":\n _, region, _ = dns_name.split(\".\", maxsplit=2)\n client = BotoClientProxy(\"elb\", region)\n\n response = client.describe_load_balancers()\n next_marker = response.get(\"NextMarker\")\n load_balancers = response[\"LoadBalancerDescriptions\"] # type: List\n while next_marker:\n response = client.describe_load_balancers(Marker=next_marker)\n next_marker = response.get(\"NextMarker\")\n load_balancers.extend(response[\"LoadBalancerDescriptions\"])\n\n for load_balancer in load_balancers:\n if load_balancer[\"DNSName\"] == dns_name:\n return cls.from_boto_dict(load_balancer)\n\n raise ELBNotFound(dns_name)",
"def nslookup(self):\n if len(self.hostnames) == 0:\n st, out = commands.getstatusoutput('get_instance_by_service %s' % self.bns)\n assert st == 0, \"Failure:'get_instance_by_service %s', errno=%d\" % (self.bns, st)\n self.hostnames = out.split('\\n')\n assert self.hostnames, 'No hosts found for bns: \"%s\"' % self.bns",
"def find_elb ( elb_conn, elb_name ) :\n try :\n elb_r = elb_conn.get_all_load_balancers( load_balancer_names = [ elb_name ] )\n if len( elb_r ) > 0 :\n return elb_r[ 0 ]\n except :\n return None",
"def create_internal_elb_dns_name ( base_name, name ) :\n return 'lb.' + create_dns_name( base_name, name )",
"def get_balancer_dns(self):\n return self.get_balancer_info()['DNSName']",
"def machine_lookup(session, hostname, public_ip = True):\n\n try:\n idx, target = hostname.split('.', 1)\n idx = int(idx) # if it is not a valid number, then it is a hostname\n hostname = target\n except:\n idx = 0\n\n client = session.client('ec2')\n response = client.describe_instances(Filters=[{\"Name\":\"tag:Name\", \"Values\":[hostname]},\n {\"Name\":\"instance-state-name\", \"Values\":[\"running\"]}])\n\n item = response['Reservations']\n if len(item) == 0:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None\n else:\n item.sort(key = lambda i: i['Instances'][0][\"InstanceId\"])\n\n if len(item) <= idx:\n print(\"Could not find IP address for '{}' index '{}'\".format(hostname, idx))\n return None\n else:\n item = item[idx]['Instances'][0]\n if 'PublicIpAddress' in item and public_ip:\n return item['PublicIpAddress']\n elif 'PrivateIpAddress' in item and not public_ip:\n return item['PrivateIpAddress']\n else:\n print(\"Could not find IP address for '{}'\".format(hostname))\n return None",
"def cloudfront_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('cloudfront')\n response = client.list_distributions(\n MaxItems='100'\n )\n items = response[\"DistributionList\"][\"Items\"]\n for item in items:\n cloud_front_domain_name = item[\"DomainName\"]\n if item[\"Aliases\"][\"Quantity\"] > 0:\n if hostname in item[\"Aliases\"][\"Items\"]:\n return cloud_front_domain_name\n return None",
"def lb_lookup(session, lb_name):\n if session is None:\n return None\n\n lb_name = lb_name.replace('.', '-')\n\n client = session.client('elb')\n response = client.describe_load_balancers()\n\n for i in range(len(response['LoadBalancerDescriptions'])):\n if (response['LoadBalancerDescriptions'][i]['LoadBalancerName']) == lb_name:\n return True\n return False",
"def get_hostname(config):\n KEY = os.environ.get(\"DWH_AWS_KEY\")\n SECRET = os.environ.get(\"DWH_AWS_SECRET\")\n redshift = boto3.client('redshift', region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET)\n CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n cluster_props = redshift.describe_clusters(\n ClusterIdentifier=CLUSTER_IDENTIFIER)['Clusters'][0]\n endpoint = cluster_props[\"Endpoint\"][\"Address\"]\n return endpoint",
"def public_dns(self) -> Optional[str]:\n return pulumi.get(self, \"public_dns\")",
"def dnslookup(url) -> 'text': \n try:\n hn = socket.gethostbyaddr(url)[0] \n except socket.error as msg: \n hn = 'nohost'\n return hn",
"def test_missingName(self):\n servers = {\n ('1.1.2.3', 53): {\n (b'foo.example.com', A): {\n 'rCode': ENAME,\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress(b'foo.example.com')\n return self.assertFailure(d, DNSNameError)",
"def amazon_public_address() -> str:\n check_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'\n check_timeout = float(CONFIG['network']['check_timeout'])\n try:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n except Exception as error:\n return None",
"def asg_name_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('autoscaling')\n response = client.describe_auto_scaling_groups()\n if len(response['AutoScalingGroups']) == 0:\n return None\n else:\n # DP NOTE: Unfortunatly describe_auto_scaling_groups() doesn't allow filtering results\n for g in response['AutoScalingGroups']:\n t = _find(g['Tags'], lambda x: x['Key'] == 'Name')\n if t and t['Value'] == hostname:\n return g['AutoScalingGroupName']\n return None",
"def bucket_dual_stack_domain_name(self) -> str:\n ...",
"def getHostKey(instance):\n return instance['hostname']",
"def host_dns_name(self):\n res = self.search(base='', scope=ldb.SCOPE_BASE, attrs=['dNSHostName'])\n return str(res[0]['dNSHostName'][0])",
"def instanceid_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_instances(\n Filters=[{\"Name\": \"tag:Name\", \"Values\": [hostname]}])\n\n item = response['Reservations']\n if len(item) == 0:\n return None\n else:\n item = item[0]['Instances']\n if len(item) == 0:\n return None\n else:\n item = item[0]\n if 'InstanceId' in item:\n return item['InstanceId']\n return None",
"def get_ip_by_name ( route53_conn, dns_name ) :\n record = get_r53_record_by_name( route53_conn, dns_name )\n if record :\n return record.resource_records[ 0 ]\n\n return None",
"def get_hostname(self):\n return self.name",
"def resolve_hostname(request, hostname):\n try:\n ipaddress = usm_wrapper_utils.resolve_hostname(hostname)\n except Exception, e:\n log.exception(e)\n return Response(\n {'message': 'Error while resolving hostname'}, status=417)\n\n return Response({'IP_Address': ipaddress}, status=200)",
"def get_hostname(self):\n module = 'hostname'\n method = 'GET'\n response = self.axapi_call(module, method)\n hostname = response.json()['hostname']['value']\n print(self.device + ' Device hostname is: ' + hostname)",
"def host_ip(hostname: str) -> str:\n try:\n return socket.gethostbyname(hostname)\n except socket.gaierror:\n return \"No record found.\"",
"def hostname_lookup(hostname):\n try:\n # The {host} must be resolved to an IP address; if this fails, this\n # will throw a socket.gaierror.\n host_address = gethostbyname(hostname)\n\n # Reset {host} to the resolved address.\n LOG.debug(\n 'Resolved hostname %s to IP address %s.', hostname, host_address\n )\n return host_address\n\n except gaierror:\n # The {host}-as-hostname did not resolve to an IP address.\n LOG.debug('Could not resolve hostname %s to an IP address.', hostname)\n return hostname",
"def get_instance_by_cname ( ec2_conn, r53_conn, dns_cname ) :\n record = get_r53_record_by_name( r53_conn, dns_cname, record_type = 'CNAME' )\n if record :\n instance_dns_name = record.resource_records[ 0 ]\n print \"Getting ec2 instance with DNS name: \" + instance_dns_name\n instances = ec2_conn.get_only_instances( filters = { \"dns-name\": [ instance_dns_name ] } )\n for instance in instances :\n return instance",
"def get_host_name(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetHostName', self.handle)",
"def get_hostname(self):\n raise NotImplementedError('get_hostname')",
"def dns_lookup(self, hostname, aux):\n\n resolver = Resolver()\n\n # If the host doesn't have the A record (IPv4),\n # trying to find its AAAA record (IPv6).\n try:\n addr = resolver.query(hostname, \"A\")[0] # <---+\n ver = 4 # |\n except Exception as e: # From the dnspython lib. --------+\n try: # |\n addr = resolver.query(hostname, \"AAAA\")[0] # <---+\n ver = 6\n except Exception as e:\n addr = ver = aux._ERR_PREFIX\n\n return (addr, ver)"
]
| [
"0.7090508",
"0.6931591",
"0.66295487",
"0.6527898",
"0.64692634",
"0.6408615",
"0.6370166",
"0.6294847",
"0.62296087",
"0.6155501",
"0.61130655",
"0.60973746",
"0.6082369",
"0.6069316",
"0.60271686",
"0.5922691",
"0.59071743",
"0.58485824",
"0.58454686",
"0.58105326",
"0.5799236",
"0.5791437",
"0.57896054",
"0.5774254",
"0.5684887",
"0.566317",
"0.5652644",
"0.5636482",
"0.5633994",
"0.5633872"
]
| 0.82099265 | 0 |
Lookup up SNS topic ARN given a topic name | def sns_topic_lookup(session, topic_name):
if session is None:
return None
client = session.client('sns')
response = client.list_topics()
topics_list = response['Topics']
for topic in topics_list:
arn_topic_name = topic["TopicArn"].split(':').pop()
if arn_topic_name == topic_name:
return topic["TopicArn"]
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_topic_arn(topic_name):\n # https://stackoverflow.com/a/37723278/1558022\n sts_client = boto3.client('sts')\n account_id = sts_client.get_caller_identity().get('Account')\n\n return f'arn:aws:sns:eu-west-1:{account_id}:{topic_name}'",
"def get_full_topicarn ( base_topicarn, topicname ) :\n return base_topicarn + ':' + topicname",
"def sns_topic_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sns_topic_arn\")",
"def sns_topic_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sns_topic_arn\")",
"def topic_arn(self) -> str:\n return self[\"Sns\"][\"TopicArn\"]",
"def sns_topic_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"sns_topic_arn\")",
"def get_base_topicarn ( sns_conn ) :\n t_result = sns_conn.get_all_topics( )\n sample_topic_arn = t_result[ 'ListTopicsResponse' ][ 'ListTopicsResult' ][ 'Topics' ][ 0 ][ 'TopicArn' ]\n return sample_topic_arn[ 0 : sample_topic_arn.rfind( ':' ) ]",
"def topic_arn(self) -> Optional[str]:\n return pulumi.get(self, \"topic_arn\")",
"def sns_create_topic(session, topic):\n if session is None:\n return None\n\n client = session.client(\"sns\")\n response = client.create_topic(Name=topic)\n print(response)\n if response is None:\n return None\n else:\n return response['TopicArn']",
"def topic_arn(self) -> str:\n return pulumi.get(self, \"topic_arn\")",
"def test_get_snsname_arn_sanity(self, aws_res_mock):\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import get_snsname_arn\n\n arn = get_snsname_arn()\n\n client = boto3.client('sns')\n response = client.get_topic_attributes(TopicArn=arn)\n\n # output will normally be captured and suppressed but printed\n # iff the test fails. So, leaving in something that dumps the response\n # can be useful. See https://docs.pytest.org/en/4.6.x/capture.html\n pprint(response)\n response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n response['Attributes']['TopicArn'].should.equal(arn)\n\n # check that our mock of aws_resource_names was used\n aws_res_mock.assert_called_once()",
"def _create_topic_if_not_exists(self, topic_name):\n creation_result = self.conn.create_topic(topic_name)\n return creation_result['CreateTopicResponse']['CreateTopicResult']['TopicArn']",
"def base_topicarn ( self ) :\n if not self.topic_arn_prefix :\n self.topic_arn_prefix = get_base_topicarn( self.sns_conn( ) )\n return self.topic_arn_prefix",
"def create_topic ( sns_conn, topicname, subscription_email ) :\n t_result = sns_conn.create_topic( topicname )\n topic = t_result[ 'CreateTopicResponse' ][ 'CreateTopicResult' ][ 'TopicArn' ]\n sns_conn.subscribe( topic, 'email', subscription_email )\n\n return topic",
"def setTopic(self, topicName):\n self.topicName = topicName\n topicResponse = self.snsClient.create_topic(Name=topicName)\n self.topicArn = topicResponse['TopicArn']",
"def GetTopicName(args):\n if args.add_topic:\n topic_ref = args.CONCEPTS.add_topic.Parse()\n elif args.remove_topic:\n topic_ref = args.CONCEPTS.remove_topic.Parse()\n else:\n topic_ref = args.CONCEPTS.update_topic.Parse()\n\n return topic_ref.RelativeName()",
"def lambda_arn_lookup(session, lambda_name):\n if session is None:\n return None\n\n client = session.client(\"lambda\")\n response = client.get_function(FunctionName=lambda_name)\n if response is None:\n return None\n else:\n return response['Configuration']['FunctionArn']",
"def topic_identity(self, name=None):\n identity = self._wc2identity.get(name)\n if not identity:\n identity = self.next_topic_identity(name)\n if name:\n self._wc2identity[name] = identity\n return identity",
"def test_get_snsname_arn_auth_exception_handling(self, aws_res_mock):\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import get_snsname_arn\n\n # create a mock SNS client that returns what we tell it to\n client = boto3.client('sns')\n stub = Stubber(client)\n stub.add_client_error('create_topic', service_error_code='AuthorizationError')\n stub.activate()\n\n\n # since firesim manager code doesn't take clients as method parameters\n # now we mock boto3.client to return our stubbed client\n with patch.object(boto3._get_default_session(), 'client', return_value=client) as mock_session:\n topic_arn = get_snsname_arn()\n\n stub.assert_no_pending_responses()\n topic_arn.should.be.none\n\n # TODO we could mock rootLogger.critical to capture it's calls and args and validate that we're seeing the correct \"nice\" message\n\n # make sure get_snsname_arn() actually called out to get a sns\n # client, otherwise we aren't testing what we think we are\n mock_session.assert_called_once_with('sns')\n\n aws_res_mock.assert_called_once()",
"def publish_to_sns(topic_name, message, region=None):\n AWS = AWSCachedClient(region) # cached client object\n\n partition = None\n\n if region:\n partition = partition_from_region(region)\n else:\n partition = 'aws'\n region = 'us-east-1'\n\n topic_arn = 'arn:' + partition + ':sns:' + region + ':' + AWS.account + ':' + topic_name\n\n json_message = json.dumps({\"default\":json.dumps(message)})\n message_id = AWS.get_connection('sns', region).publish(\n TopicArn=topic_arn,\n Message=json_message,\n MessageStructure='json'\n ).get('MessageId', 'error')\n\n return message_id",
"def topic(self):\n return self._topic_name",
"def create_key_name(callback, topic):\n\t\treturn utils.get_hash_key_name(u'%s\\n%s' % (callback, topic))",
"def create_sns_topic(stack, name, endpoint, protocol='https'):\n\n return stack.stack.add_resource(\n Topic(\n '{0}Topic'.format(name.replace('-', '')),\n DisplayName=name,\n Subscription=[Subscription(Endpoint=endpoint, Protocol=protocol)],\n TopicName='{0}Topic'.format(name)))",
"def create_key_name(topic):\n\t\treturn utils.get_hash_key_name(topic)",
"def cert_arn_lookup(session, domain_name):\n if session is None:\n return None\n\n client = session.client('acm')\n response = client.list_certificates()\n for certs in response['CertificateSummaryList']:\n if certs['DomainName'] == domain_name:\n return certs['CertificateArn']\n if certs['DomainName'].startswith('*'): # if it is a wildcard domain like \"*.thebossdev.io\"\n cert_name = certs['DomainName'][1:] + '$'\n if re.search(cert_name, domain_name) != None:\n return certs['CertificateArn']\n return None",
"def read_message(self, topic_name, offset):\n try:\n return self.topics[topic_name][offset]\n except Exception:\n return None",
"def topic(self, topic):\n self.connection.topic(str(self), topic)",
"def get_session_id(cls, topic: str) -> typing.Optional[str]:\n match = re.match(AsrAudioCaptured.TOPIC_PATTERN, topic)\n assert match, \"Not an audioCaptured topic\"\n return match.group(2)",
"def topic(self):\n return self.config.get('topic', f'{NAMESPACE}/{self.id}')",
"def get_by_topic(cls, topic):\n\t\treturn cls.get_by_key_name(get_hash_key_name(topic))"
]
| [
"0.79431546",
"0.72963053",
"0.7135237",
"0.7135237",
"0.7130573",
"0.6910786",
"0.69055796",
"0.6537543",
"0.65364784",
"0.646367",
"0.6283038",
"0.59111613",
"0.5876922",
"0.5643607",
"0.56244606",
"0.55634636",
"0.5465788",
"0.54464096",
"0.54258615",
"0.5421659",
"0.5343895",
"0.53294945",
"0.5274255",
"0.5207474",
"0.51965153",
"0.5183813",
"0.51797855",
"0.5176359",
"0.5166237",
"0.5151378"
]
| 0.8113506 | 0 |
Delete all of the SQS Queues that start with the given domain name | def sqs_delete_all(session, domain):
client = session.client('sqs')
resp = client.list_queues(QueueNamePrefix=domain.replace('.','-'))
for url in resp.get('QueueUrls', []):
client.delete_queue(QueueUrl=url) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_test_queues(prefix=TEST_NAME_PREFIX, region_name=None):\n sqs = boto3.resource('sqs', region_name=region_name)\n num_queues = 0\n try:\n for queue in sqs.queues.all():\n if re.match(r'.+%s\\d+' % TEST_NAME_PREFIX, queue.url):\n queue.delete()\n num_queues += 1\n finally:\n log.info('deleted %s test queues' % num_queues)",
"def delete_queue(client, vhost, queue):\n client.delete_queue(vhost, queue)",
"def deleteQueues(self, queueIDToDelete):\r\n #method = moduleName + '.' + self.className + '.' + 'deleteQueues'\r\n #errorMsg = \"Forcing deletion of management infrastructure for worker queue %s\" %queueIDToDelete\r\n #Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n try: del self.workerQueues[queueIDToDelete]\r\n except: pass\r\n try: del self.depricatedWorkerQueues[queueIDToDelete]\r\n except: pass",
"def delete_queue(queue_name: str, server_url: Optional[str] = None):\n rpc = RemoteProcedure(handle_QMF2_exception,\n 'qmf.default.direct', server_url)\n delete_queue_message = create_QMF2_method_invoke(\n get_broker_id(server_url),\n 'delete', {\n 'type': 'queue',\n 'name': queue_name\n }\n )\n rpc.call(delete_queue_message, timedelta(seconds=5))",
"def delete_queues_handler(event, context):\n logger.info(\"Request: %s\", json.dumps(event))\n # In most other contexts that Lambda is used, the return value is\n # immediately available. However, CloudFormation doesn't use those standard\n # return values, and instead wants the agent to hit a URL to indicate\n # success or failure. If CloudFormation does not receive an explicit\n # notification, it can only rely on timeouts to detect failure, and that\n # can waste human time.\n #\n # We wrap all the logic in a try/except so that we always try to send\n # CloudFormation a status message.\n try:\n sqs_client = boto3.client('sqs')\n handler(event, context, sqs_client)\n send(event, context, SUCCESS)\n except Exception, e:\n logger.error(\"Exception: %r\", e)\n\t# There is no point in trying to notify if we for some reason already\n\t# failed to notify. CloudFormation will eventually time out.\n\tif not isinstance(e, urllib2.URLError): \n send(event, context, FAILED)\n raise",
"def list_queues(region: str = \"\", verbose: bool = False) -> List[str]:\n sqs_client = _client(region=region)\n return [\n (x if verbose else x.split(\"/\")[-1])\n for x in sqs_client.list_queues()[\"QueueUrls\"]\n ]",
"def queue_delete(queue):\n\n for job in queue.jobs:\n job_delete(job)\n if os.path.exists(queue.data_abspath):\n os.rmdir(queue.data_abspath)\n db.session.delete(queue)\n db.session.commit()",
"def remove_from_queue(self, confid):\n\n queued_ids = self.c.select(queued=1, gaid=confid)\n ids = [q.id for q in queued_ids]\n self.c.delete(ids)",
"def delete_sSMS():\n\tfor msg in client.messages.list():\n\t\ttemp = str(msg.from_)\n\t\tif(temp == base):\n\t\t\tclient.messages.delete(msg.sid)",
"def delete_queue(self, queue_name):\n amqp_session = self.__broker.getAmqpSession()\n amqp_session.queue_delete(queue_name)",
"def list_sqs(region, filter_by_kwargs):\n conn = boto.sqs.connect_to_region(region)\n queues = conn.get_all_queues()\n return lookup(queues, filter_by=filter_by_kwargs)",
"def delete_queue(qid):\r\n raise NotImplementedError()",
"def destroy_all(self):\n for name in self.app.config['SIMPLE_DOMAINS']:\n self.connection.delete_domain(name)",
"def clearQueueAll():",
"def removeall(subdomain):\n\tTarget.query.filter(Target.subdomain.like(f\"%{subdomain}%\")).delete(synchronize_session='fetch')\n\tdb.session.commit()\n\tprint(\"deleted\",sub)",
"def clearQueue(self, queue_name, project_id=None):\n if project_id is None:\n project_id = self.project_id\n\n url = \"%sprojects/%s/queues/%s/clear?oauth=%s\" % (self.url, project_id, queue_name, self.token)\n body = self.__post(url)\n return json.loads(body)",
"def _queue_delete(self, queue):\n\n queue.delete()",
"def delete_messages(self):\n if not self.processed_messages:\n LOGGER.error('No processed messages to delete')\n return\n\n while self.processed_messages:\n len_processed_messages = len(self.processed_messages)\n batch = len_processed_messages if len_processed_messages < 10 else 10\n\n # Delete_batch can only process up to 10 messages\n message_batch = [self.processed_messages.pop() for _ in range(batch)]\n\n resp = self.sqs_client.delete_message_batch(\n QueueUrl=self.athena_sqs_url,\n Entries=[{'Id': message['MessageId'],\n 'ReceiptHandle': message['ReceiptHandle']}\n for message in message_batch]\n )\n LOGGER.info('Successfully deleted %s messages from the queue',\n len(resp['Successful']))",
"def _flush_enqueued(self):\n\n msgs = self.RPC.query.all()\n for msg in msgs:\n if msg.enqueued:\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n msg.delete()",
"def delete_sqs_message(sqs_queue_url, msg_receipt_handle):\r\n\r\n # Delete the message from the SQS queue\r\n sqs_client = boto3.client('sqs',region_name=\"us-east-1\")\r\n sqs_client.delete_message(QueueUrl=sqs_queue_url,\r\n ReceiptHandle=msg_receipt_handle)",
"def delete_a_queue(self,index):\n try:\n del self.queues[index]\n return True\n except IndexError:\n return False",
"def purge_pending_jobs(event=None, site=None, queue=None):\n\tpurged_task_count = 0\n\tfor _queue in get_queue_list(queue):\n\t\tq = get_queue(_queue)\n\t\tfor job in q.jobs:\n\t\t\tif site and event:\n\t\t\t\tif job.kwargs[\"site\"] == site and job.kwargs[\"event\"] == event:\n\t\t\t\t\tjob.delete()\n\t\t\t\t\tpurged_task_count += 1\n\t\t\telif site:\n\t\t\t\tif job.kwargs[\"site\"] == site:\n\t\t\t\t\tjob.delete()\n\t\t\t\t\tpurged_task_count += 1\n\t\t\telif event:\n\t\t\t\tif job.kwargs[\"event\"] == event:\n\t\t\t\t\tjob.delete()\n\t\t\t\t\tpurged_task_count += 1\n\t\t\telse:\n\t\t\t\tpurged_task_count += q.count\n\t\t\t\tq.empty()\n\n\treturn purged_task_count",
"def clearQueue(targets):",
"def delete_queue(self, queue_name: str) -> None:\n if queue_name is None:\n raise TypeError(\"Queue name cannot be None.\")\n\n with self.get_conn() as service_mgmt_conn:\n service_mgmt_conn.delete_queue(queue_name)",
"def remove_all(self):\n if self._processed:\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n res, data = self._mailconn.store(msg.decode('utf-8'), '+FLAGS', '\\\\Deleted')\n print(res)",
"def safe_queue_delete(self, queue_name, channel=None):\n channel = channel or self.channel\n full_queue_name = self.full_name(queue_name)\n try:\n yield from channel.queue_delete(full_queue_name, no_wait=False, timeout=1.0)\n except asyncio.TimeoutError:\n logger.warning('Timeout on queue %s deletion', full_queue_name, exc_info=True)\n except Exception:\n logger.error('Unexpected error on queue %s deletion', full_queue_name, exc_info=True)",
"def delete_bucketlist():\n pass",
"def delete_all_onprogress_domains():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM on_progress_domains\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()",
"def purge(self):\n self._rpc(specification.Queue.Purge())",
"def delete_messages(self):\n msgs_body = []\n if not self.messages:\n u_print(\" Queue.delete_messages() ERR - There is no messages or malformed messages on queue. \")\n u_print(json.dumps(self.messages, indent=4))\n sys.exit(1)\n\n try:\n for m in self.messages:\n u_print_d(\" Queue.delete_messages() Deleting the message: {}\".format(m.message_id))\n r = self.queue.delete_messages(Entries=[\n {\n 'Id': m.message_id,\n 'ReceiptHandle': m.receipt_handle\n },\n ])\n u_print_d(\" Queue.delete_messages() Deletied: {}\".format(r))\n self.stats_update('msgs_deleted', m.message_id)\n except:\n raise\n\n return msgs_body"
]
| [
"0.7308705",
"0.6090805",
"0.60719943",
"0.5983654",
"0.59769964",
"0.59579396",
"0.5926879",
"0.5807085",
"0.57812786",
"0.57787776",
"0.57786417",
"0.57765925",
"0.57747895",
"0.5687993",
"0.5681951",
"0.56519437",
"0.5629436",
"0.5589249",
"0.5585626",
"0.55741715",
"0.5567231",
"0.55487067",
"0.5536714",
"0.54918736",
"0.54034376",
"0.5388223",
"0.5370753",
"0.5357685",
"0.53375834",
"0.53321856"
]
| 0.83173805 | 0 |
Lookup up SQS url given a name. | def sqs_lookup_url(session, queue_name):
client = session.client('sqs')
resp = client.get_queue_url(QueueName=queue_name)
return resp['QueueUrl'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def play_name(self, name):\n self.name = name\n self._stream_from_name()\n return self.URL",
"def get_by_url(self, url, pool_name=None):\n\t\tif not pool_name:\n\t\t\treturn self.pool[url]\n\t\treturn getattr(self, pool_name)[url]",
"def image_url(self, name):\r\n s3_key = self._generate_s3_key(name)\r\n return s3_key.generate_url(self.IMAGE_LINK_DURATION)",
"def build_bucket_url(bucket_name) -> str:\n return \"https://s3.console.aws.amazon.com/s3/buckets/{0}\".format(bucket_name)",
"def url(self, name):\n return self.path(name)",
"def get_url(name, version=None):\n global urls\n\n # Only download the URL look up table once.\n if urls is None:\n from six.moves.urllib.request import urlopen\n import json\n f = urlopen(\"http://sncosmo.github.io/data/urls.json\")\n reader = codecs.getreader(\"utf-8\")\n urls = json.load(reader(f))\n f.close()\n\n key = name if (version is None) else \"{0}_v{1}\".format(name, version)\n\n return urls[key]",
"def url_for(self, name):\n return self.app.router.named_resources()[name].url()",
"def get_queue_by_name(name):\n sqs = boto3.resource('sqs')\n return sqs.get_queue_by_name(QueueName=name)",
"def getBucketLocation(self, bucketName):\n\t\t_bucket \t\t= f\"http://{bucketName}.s3.eu-west-1.amazonaws.com\"\n\t\trequest \t\t= get(_bucket)\n\t\tsourceCode \t\t= request.content.decode('UTF-8')\n\t\tregex \t\t\t= r'\\<Endpoint\\>(.*?)\\<\\/Endpoint\\>'\n\t\tlocation \t\t= parseRegex(regex, sourceCode)\n\t\tresult \t\t\t= \"\"\n\t\t\n\t\tif \"s3.amazonaws.com\" in str(location): \n\t\t\tresult \t\t= f\"http://{bucketName}.{location[0]}\"\n\t\t\n\t\telif len(location) == 0: \n\t\t\tresult \t\t= _bucket\n\t\t\n\t\telse: \n\t\t\tresult \t\t= f\"http://{location[0]}\"\n\n\t\twrite(var=\"$\", color=w, data=result)\n\t\treturn(result)",
"def get_url_name(self, row_id):\n return self.con.execute(\n \"SELECT url FROM urllist WHERE rowid={}\".format(row_id)\n ).fetchone()[0]",
"def get_url_name(self, url_id):\n return self.con.execute(\"select url from urllist where rowid=%d\"\n % url_id).fetchone()[0]",
"def get_url(self, resource_name):\r\n return self.__resource_meta.get(resource_name,{}).get(\"url\", None)",
"def lookup(name):",
"def lookup(name):",
"def get_queue_name(namelength = 513):\n\n appender = \"/queues/\" + binascii.b2a_hex(os.urandom(namelength))\n url = common.functionlib.create_url_from_appender(appender)\n return url",
"def _lookup_url(self, endpoint, values):\r\n try:\r\n cont = self.get_container(values['container'])\r\n if cont.cdn_enabled:\r\n return \"%s/%s\" % (cont.cdn_uri, values['filename'])\r\n else:\r\n return None\r\n except: # pragma: no cover\r\n return None",
"def getReferenceImageUrl(self, name):\n bucket = self.productSearch.bucket\n blobName = self._getReferenceImageBlobName(name)\n return bucket.blob(blobName).public_url",
"def find_data_source_url(a_name, url_prefs):\n for row in url_prefs:\n if row[0] == a_name:\n return row[1]\n return None",
"def url(self, name):\n raise NotImplementedError(\"subclasses of Storage must provide a url() method\")",
"def s3_url(row):\n return f's3://{row[\"Bucket\"]}/{row[\"Key\"]}'",
"def get_url_from_album_name(browser, name: str) -> str:\n album_name = name.split(\"-\")[1].strip()\n artist_name = name.split(\"-\")[0].strip()\n artist_url = get_url_from_artist_name(browser, artist_name)\n\n logger.debug(\"Searching for %s at %s\", album_name, artist_url)\n browser.get_url(artist_url)\n soup = browser.get_soup()\n artist_album_list = [\n [x.text.strip(), \"https://rateyourmusic.com\" + x.find(\"a\")[\"href\"]]\n for x in soup.find_all(\"div\", {\"class\": \"disco_mainline\"})\n ]\n artist_album_url = [x[1] for x in artist_album_list]\n artist_album_name = [x[0] for x in artist_album_list]\n\n url_match = artist_album_url[\n artist_album_name.index(\n get_close_matches_icase(album_name, artist_album_name)[0]\n )\n ]\n logger.debug(\"Best match : %s\", url_match)\n return url_match",
"def url_for(filename):\n return \"{}{}\".format(S3_LOCATION, filename)",
"def url_python(name):\n\n return reverse(name)",
"def keybase_lookup_url(username):\n return \"https://keybase.io/_/api/1.0/user/lookup.json?usernames=%s\" \\\n % username",
"def get_url_from_xml(name):\n url_list = []\n url_path = os.path.join(proDir, 'testFile', 'interfaceURL.xml')\n tree = ElementTree.parse(url_path)\n for u in tree.findall('url'):\n url_name = u.get('name')\n if url_name == name:\n for c in u.getchildren():\n url_list.append(c.text)\n\n url ='/'.join(url_list)\n return url",
"def lookup(self, name):\n if not self.running:\n return succeed(None)\n\n return self.resolv.lookup(name)",
"def get_s3_url(iid):\n return \"http://%s.s3-website.%s.amazonaws.com/%s\" % (\n BUCKET_NAME,\n AWS_CLIENT_CONFIG['region_name'],\n iid\n )",
"def getURLForThing(thing):",
"def get_bucket_name_from_url(file_url):\n\tparts = urlparse(file_url)\n\tpaths = parts.path.split(\"/\")\n\treturn paths[1]",
"def url(self, name):\n return '%s/%s' % (self.container_url, name)"
]
| [
"0.570739",
"0.56668997",
"0.55974376",
"0.55352825",
"0.55016017",
"0.54813766",
"0.5397629",
"0.53500366",
"0.5313121",
"0.53008205",
"0.5254056",
"0.5221821",
"0.52081007",
"0.52081007",
"0.5193847",
"0.5184935",
"0.5153158",
"0.5135208",
"0.5130682",
"0.5095509",
"0.5065089",
"0.5051591",
"0.5048215",
"0.50477326",
"0.50351",
"0.5025956",
"0.5009767",
"0.49924684",
"0.49879208",
"0.49837312"
]
| 0.75557625 | 0 |
Requests a certificate in the AWS Certificate Manager for the domain name | def request_cert(session, domain_name, validation_domain):
if session is None:
return None
client = session.client('acm')
validation_options = [
{
'DomainName': domain_name,
'ValidationDomain': validation_domain
},
]
response = client.request_certificate(DomainName=domain_name,
DomainValidationOptions=validation_options)
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request(domain):\n if not domain:\n logger.error(\n \"ctl:info:generate\", \"Choose a fully-qualified domain name of the \"\n \"certificate. Must match a domain present on the system\"\n )\n domain = click.prompt(\"Domain name\")\n try:\n client().certificates.request_acme_certificate(domain)\n except Exception as e:\n raise CLIException(str(e))",
"def get_ssl_certificate():",
"def get_ssl_certificate() :",
"def request_cert():\n\n api_request = shallow_copy(props)\n\n for key in ['ServiceToken', 'Region', 'Tags', 'Route53RoleArn']:\n api_request.pop(key, None)\n\n if 'ValidationMethod' in props:\n if props['ValidationMethod'] == 'DNS':\n\n # Check that we have all the hosted zone information we need to validate\n # before we create the certificate\n for name in set([props['DomainName']] + props.get('SubjectAlternativeNames', [])):\n get_zone_for(name)\n\n del api_request['DomainValidationOptions']\n\n e['PhysicalResourceId'] = acm.request_certificate(\n IdempotencyToken=i_token,\n **api_request\n )['CertificateArn']\n add_tags()",
"def fetch_domain_certs(domain):\n url = BASE_URL.format(domain)\n result = requests.get(url)\n if result.status_code != 200:\n result.raise_for_status()\n return result.json()",
"def test_cert_request(self):\n oim = OIM()\n rc, _, _, msg = oim.request('--hostname', 'test.' + DOMAIN)\n self.assertEqual(rc, 0, \"Failed to request certificate\\n%s\" % msg)\n self.assert_(oim.reqid != '', msg)",
"def find_cert(domain):\n print(CERT_MANAGER.find_matching_cert(domain))",
"def has_certificate(domain):\n all_certs = fetch_domain_certs(domain)\n for cert in all_certs:\n if cert[\"name_value\"] == domain:\n return cert",
"def cert_arn_lookup(session, domain_name):\n if session is None:\n return None\n\n client = session.client('acm')\n response = client.list_certificates()\n for certs in response['CertificateSummaryList']:\n if certs['DomainName'] == domain_name:\n return certs['CertificateArn']\n if certs['DomainName'].startswith('*'): # if it is a wildcard domain like \"*.thebossdev.io\"\n cert_name = certs['DomainName'][1:] + '$'\n if re.search(cert_name, domain_name) != None:\n return certs['CertificateArn']\n return None",
"def get_certificate_request(self, vault_name: str,\n certificate_name: str,\n certificate_version: str) -> dict[str, Any]:\n url = f'https://{vault_name}{self.azure_cloud.suffixes.keyvault_dns}/certificates/{certificate_name}'\n if certificate_version:\n url = url + f'/{certificate_version}'\n response = self.http_request(\n 'GET', full_url=url,\n resource=self.get_vault_resource())\n\n return response",
"def get_certificate(self, url):\n bearer = 'Authorization: Bearer '+str(self.exchanged_token).split('\\n', 1)[0]\n data = json.dumps({\"service_id\": \"x509\"})\n\n headers = StringIO()\n buffers = StringIO()\n\n c = pycurl.Curl()\n c.setopt(pycurl.URL, url)\n c.setopt(pycurl.HTTPHEADER, [bearer, 'Content-Type: application/json'])\n c.setopt(pycurl.POST, 1)\n c.setopt(pycurl.POSTFIELDS, data)\n c.setopt(c.WRITEFUNCTION, buffers.write)\n c.setopt(c.HEADERFUNCTION, headers.write)\n c.setopt(c.VERBOSE, True)\n\n try:\n c.perform()\n status = c.getinfo(c.RESPONSE_CODE)\n c.close()\n body = buffers.getvalue()\n\n if str(status) != \"303\" :\n self.log.error(\"On \\\"get redirect curl\\\": %s , http error: %s \" % (body, str(status)))\n return False \n except pycurl.error, error:\n errno, errstr = error\n self.log.info('An error occurred: %s' % errstr)\n return False\n \n redirect = self.tts\n for item in headers.getvalue().split(\"\\n\"):\n if \"location\" in item:\n redirect = redirect + item.strip().replace(\"location: \", \"\")\n\n headers = {'Authorization': 'Bearer ' + self.exchanged_token.strip()}\n response = requests.get(redirect, headers=headers)\n\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n # Whoops it wasn't a 200\n self.log.error(\"get_certificate() Error: %s \" %str(e))\n return False\n\n with open('/tmp/output.json', 'w') as outf:\n outf.write(response.content)\n else:\n self.log.error(\"No location in redirect response\")\n\n return True",
"def create_ssl_cert_request ( ssl_hostnames ) :\n first_hostname = ssl_hostnames[ 0 ]\n csr_filename = get_ssl_csr_filename( first_hostname )\n key_filename = get_ssl_key_filename( first_hostname )\n openssl_cnf = \"\"\"\n[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = san_ext\n\n[req_distinguished_name]\ncountryName_default = US\nstateOrProvinceName_default = New York\nlocalityName_default = New York\norganizationalUnitName_default = Home Box Office, Inc\ncommonName_default = \"\"\" + first_hostname + \"\"\"\n\n[san_ext]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nsubjectAltName = @sans\n\n[sans]\n\"\"\"\n counter = 0\n for hostname in ssl_hostnames :\n counter += 1\n openssl_cnf += 'DNS.' + str( counter ) + ' = ' + hostname + '\\n'\n\n with open( first_hostname, 'w' ) as f :\n f.write( openssl_cnf )\n cmd = 'openssl req -new -newkey rsa:2048 -nodes -out ' + csr_filename + ' -keyout ' + key_filename\n cmd += ' -config ' + first_hostname + ' -subj \"/C=US/ST=New York/L=New York/O=Home Box Office Inc/CN=' + first_hostname + '\"'\n keygen = subprocess.call( cmd, shell = True )\n os.remove( first_hostname )\n if keygen != 0 :\n print \"Generation of SSL request failed!\"\n return None\n\n return { 'csr-filename' : csr_filename, 'key-filename' : key_filename }",
"def cert_challenge_dns(self) -> 'outputs.CertDnsChallengeResponse':\n return pulumi.get(self, \"cert_challenge_dns\")",
"def get_cert_arn(region: str, domain: str) -> str:\n client = boto3.client('acm', region_name=region)\n\n # as of 12/2021, we now need to tell boto3's ACM client to list other cyphers so that we see our new certs [jdw]\n includes = {\n 'keyTypes': ['RSA_2048', 'EC_prime256v1', 'EC_secp384r1']\n }\n response = client.list_certificates(Includes=includes)\n for x in response['CertificateSummaryList']:\n if domain in x['DomainName']:\n return x['CertificateArn']\n\n raise ValueError(f\"Cannot find ACM certificate for domain '{domain}' in region {region}\")",
"def get_server_certificate(self, cert_name):\r\n params = {'ServerCertificateName' : cert_name}\r\n return self.get_response('GetServerCertificate', params)",
"def get_certificate(self, cert_name, callback=None):\n # TODO: get certificate from DHT (alternative to getting from disk).\n# _log.debug(\"get_certificate:\\n\\tmy_node_name={}\\n\\tcert_name={}\\n\\tcallback={}\".format(self.node_name, cert_name, callback))\n try:\n cert = self.get_certificate_locally(cert_name)\n if cert and callback:\n callback(certstring=cert)\n elif cert:\n return cert\n else:\n try:\n self.node.storage.get_index(['certificate',cert_name],\n cb=CalvinCB(self._get_certificate_from_storage_cb,\n callback=callback))\n except Exception as err:\n _log.debug(\"Certificate could not be found in storage, err={}\".format(err))\n raise\n except Exception as err:\n _log.debug(\"Failed searching for certificate locally, cert_name={}, err={}\".format(cert_name, err))",
"def ssl_get_cert_from_request(request):\r\n certkey = \"SSL_CLIENT_S_DN\" # specify the request.META field to use\r\n\r\n cert = request.META.get(certkey, '')\r\n if not cert:\r\n cert = request.META.get('HTTP_' + certkey, '')\r\n if not cert:\r\n try:\r\n # try the direct apache2 SSL key\r\n cert = request._req.subprocess_env.get(certkey, '')\r\n except Exception:\r\n return ''\r\n\r\n return cert",
"def certificate_auth():\r\n url = 'https://www.12306.cn'\r\n response = requests.get(url, verify=False)\r\n print(response.status_code)\r\n print(response.text)",
"def get(domain_name=None):\n url = 'https://api.cloudns.net/dns/soa-details.json'\n\n params = Parameters({'domain-name': domain_name})\n\n return requests.get(url, params=params.to_dict())",
"def certificate(self) -> str:\n return pulumi.get(self, \"certificate\")",
"def find_certificate(p): # find_certificate(props, /)\n\n for page in acm.get_paginator('list_certificates').paginate():\n for certificate in page['CertificateSummaryList']:\n log_info(certificate)\n\n if p['DomainName'].lower() == certificate['DomainName']:\n tags = {tag['Key']: tag['Value'] for tag in\n acm.list_tags_for_certificate(**{'CertificateArn': certificate['CertificateArn']})['Tags']}\n\n if (tags.get('cloudformation:' + 'logical-id') == e['LogicalResourceId'] and\n tags.get('cloudformation:' + 'stack-id') == e['StackId'] and\n tags.get('cloudformation:' + 'properties') == hash_func(p)\n ):\n return certificate['CertificateArn']",
"def cert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cert\")",
"def cert(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cert\")",
"def get_x509_certificate_by_name(certs, key_name):\n for cert in certs['certificates']:\n if cert['key_name'] == key_name:\n return cert['x509_certificate_pem']\n raise CertificateError('Certificate \\'%s\\' not found' % key_name)",
"def getCertificate(self, req):\n return dumpCert(createCert(parseCertReqStr(req), self._cert,\n self._key))",
"def get_certificate(self, cert_id):\r\n return self.ssl.getObject(id=cert_id)",
"def obtain_certificate(self, domains, csr=None):\n if self.auth_handler is None:\n logging.warning(\"Unable to obtain a certificate, because client \"\n \"does not have a valid auth handler.\")\n\n # Request Challenges\n for name in domains:\n self.auth_handler.add_chall_msg(\n name, self.acme_challenge(name), self.authkey)\n\n # Perform Challenges/Get Authorizations\n self.auth_handler.get_authorizations()\n\n # Create CSR from names\n if csr is None:\n csr = init_csr(self.authkey, domains, self.config.cert_dir)\n\n # Retrieve certificate\n certificate_msg = self.acme_certificate(csr.data)\n\n # Save Certificate\n cert_file, chain_file = self.save_certificate(\n certificate_msg, self.config.cert_path, self.config.chain_path)\n\n revoker.Revoker.store_cert_key(\n cert_file, self.authkey.file, self.config)\n\n return cert_file, chain_file",
"def deploy_cert(self, domain, cert_path, key_path, chain_path, fullchain_path):\n if self.config.rsa_key_size > 2048:\n print(\n \"The maximum public key size allowed for Cloudfront is 2048 (\"\n \"https://docs.aws.amazon.com/AmazonCloudFront/latest\"\n \"/DeveloperGuide/cnames-and-https-requirements.html)\\n\"\n \"Please, use --rsa_key_size 2048 or edit your cli.ini\")\n sys.exit(1)\n client = boto3.client('iam')\n cf_client = boto3.client('cloudfront')\n\n name = \"le-%s\" % domain\n body = open(cert_path).read()\n key = open(key_path).read()\n chain = open(chain_path).read()\n\n suffix = \"-%i\" % int(os.path.getmtime(cert_path))\n\n # Check if certificate already exists\n certificates = client.list_server_certificates(\n PathPrefix=\"/cloudfront/letsencrypt/\"\n )\n cert_id = None\n for cert in certificates['ServerCertificateMetadataList']:\n if cert['ServerCertificateName'] == (name + suffix):\n cert_id = cert['ServerCertificateId']\n\n # If certificate doesn't already exists, upload cert to IAM\n if not cert_id:\n response = client.upload_server_certificate(\n Path=\"/cloudfront/letsencrypt/\",\n ServerCertificateName=name + suffix,\n CertificateBody=body,\n PrivateKey=key,\n CertificateChain=chain\n )\n cert_id = response['ServerCertificateMetadata']['ServerCertificateId']\n\n # Update CloudFront config to use the new one\n cf_cfg = cf_client.get_distribution_config(Id=self.conf('cf-distribution-id'))\n cf_cfg['DistributionConfig']['ViewerCertificate']['IAMCertificateId'] = cert_id\n cf_cfg['DistributionConfig']['ViewerCertificate']['Certificate'] = cert_id\n cf_cfg['DistributionConfig']['ViewerCertificate']['CertificateSource'] = 'iam'\n\n # Set the default mode to SNI-only to avoid surprise charges\n if 'SSLSupportMethod' not in cf_cfg['DistributionConfig']['ViewerCertificate']:\n cf_cfg['DistributionConfig']['ViewerCertificate']['SSLSupportMethod'] = 'sni-only'\n cf_cfg['DistributionConfig']['ViewerCertificate']['MinimumProtocolVersion'] = 'TLSv1'\n\n try:\n cf_cfg['DistributionConfig']['ViewerCertificate'].pop('CloudFrontDefaultCertificate')\n except KeyError:\n pass\n try:\n cf_cfg['DistributionConfig']['ViewerCertificate'].pop('ACMCertificateArn')\n except KeyError:\n pass\n response = cf_client.update_distribution(DistributionConfig=cf_cfg['DistributionConfig'],\n Id=self.conf('cf-distribution-id'),\n IfMatch=cf_cfg['ETag'])\n\n # Delete old certs\n certificates = client.list_server_certificates(\n PathPrefix=\"/cloudfront/letsencrypt/\"\n )\n for cert in certificates['ServerCertificateMetadataList']:\n if (cert['ServerCertificateName'].startswith(name) and\n cert['ServerCertificateName'] != name + suffix):\n try:\n client.delete_server_certificate(\n ServerCertificateName=cert['ServerCertificateName']\n )\n except botocore.exceptions.ClientError as e:\n logger.error(e)",
"def certificate(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"certificate\")",
"def certificate(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"certificate\")"
]
| [
"0.71145135",
"0.68516237",
"0.68167555",
"0.6727174",
"0.63520133",
"0.62255585",
"0.6130389",
"0.61267835",
"0.5947188",
"0.5946464",
"0.587936",
"0.5868433",
"0.5864199",
"0.5861729",
"0.5860213",
"0.58348227",
"0.5812201",
"0.5778908",
"0.57471037",
"0.5649767",
"0.56429166",
"0.563426",
"0.563426",
"0.5621229",
"0.56043935",
"0.55965436",
"0.5592255",
"0.55274594",
"0.5526724",
"0.5526724"
]
| 0.7149025 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.