query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Draw the network to a file. Only label the candidate nodes; the friend nodes should have no labels (to reduce clutter). | def draw_network(graph, users, filename):
###TODO-- Completed
candidate_names = [user['screen_name'] for user in users]
plt.figure(figsize=(12,12))
candidate_labels = {node: node if node in candidate_names else '' for node in graph.nodes_iter()}
#print(candidate_labels)
nx.draw_networkx(graph, labels=candidate_labels, alpha=0.5, node_color='r', node_size=100, width=0.1)
#plt.show()
plt.axis('off')
plt.savefig(filename)
#pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_network(graph, filename):\n plt.figure(figsize=(12,12))\n nx.draw_networkx(graph, with_labels=False, alpha=.5, width=.1, node_size=100)\n plt.axis(\"off\")\n plt.savefig(filename, format=\"PNG\")",
"def draw_graph(self, out_path):\n # Define layout for network, with increased distance between nodes\n spring_layout = nx.spring_layout(self.graph, k=math.sqrt(self.graph.order()))\n\n # Draw network nodes\n nx.draw_networkx_nodes(self.graph, spring_layout, node_size=10, node_color=\"steelblue\", alpha=0.7)\n # Draw network edges\n nx.draw_networkx_edges(self.graph, spring_layout, width=0.5, alpha=0.3)\n # Draw network labels\n nx.draw_networkx_labels(self.graph, spring_layout, font_size=5, verticalalignment=\"bottom\")\n\n # Save the graph\n plt.savefig(out_path, dpi=300, bbox_inches=\"tight\")",
"def to_net(self, filename):\n if len(self.nodes1)>0:\n h = open(filename, \"w\")\n for n1,n2,s in zip(self.nodes1, self.nodes2, self.signs):\n h.write(\"%s -> %s %s\\n\" % (n1, n2, s))\n h.close()",
"def save_friend_nodes(self):\n print \"Exporting to file tsv ...\"\n count_edge = 0\n count_node = 0\n with open('../data/yelp.tsv','w') as f:\n for user in self.df['user_id']:\n for friends in self.df['friends']:\n count_node += 1\n for friend in friends:\n f.write(\"%s\\t%s\\n\" % (user, friend))\n count_edge += 1\n print \"Graph Summary:\", count_node, \"nodes,\", count_edge, \"edges.\"",
"def print_graph(self, filename='', save=False):\n nx.draw_circular(self.graph, node_color='pink', node_size=1000, with_labels=True)\n if save:\n plt.savefig(filename)\n print(f'Saved graph as {filename!r}')\n else:\n plt.show()",
"def label_users(self):\n record_unit = 1000\n print self.friendship_graph.number_of_nodes()\n print self.friendship_graph.number_of_edges()\n\n for num, node in enumerate(self.friendship_graph.nodes()):\n fake_flag = self.determine_spammer_by_percentage(node)\n self.friendship_graph.node[node]['fake'] = fake_flag\n # print self.friendship_graph.node[node]\n if num % record_unit == 0:\n print num\n print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n nx.write_gpickle(self.friendship_graph, \"graph/firendship_new_label%d.pickle\" % num)\n if num != 0:\n os.remove(\"graph/firendship_new_label%d.pickle\" % (num - record_unit))\n\n nx.write_gpickle(self.friendship_graph, \"graph/firendship_0.8fake_%d.pickle\" % num)",
"def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()",
"def save_network(network, pos, color_nodes, weights, i, temporary_folder):\n\n # Plot a save the network without display it\n pos = nx.spring_layout(network, seed=pos, weight=7)\n plt.figure(1)\n plt.title('Random walk on a network')\n nx.draw(network, pos=pos, with_labels=True, node_color=color_nodes, width=weights)\n plt.savefig(temporary_folder + 'plotgraph' + str(i) + '.png', dpi=300, bbox_inches='tight')\n plt.close(1)",
"def print(self):\n dot = \"digraph G {\\nrankdir = UD\\n\"\n\n for i in range(len(self.allNodes)):\n if self.allNodes[i].left is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].left.key) + \"\\n\"\n if self.allNodes[i].right is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].right.key) + \"\\n\"\n\n dot += \"}\"\n\n file = open(\"outputfiles/BinTree.dot\", \"w\")\n file.write(dot)\n file.close()\n\n os.system(\"dot outputfiles/BinTree.dot -Tpng -o outputfiles/BinTree.png\")",
"def output(self):\n\t\t# Sort graph nodes by id\n\t\tnodes = list(self.nodes.values())\n\t\tnodes.sort(key=lambda n:n.id)\n\n\t\tfor n in nodes:\n\t\t\t# Get all edges\n\t\t\tedges = []\n\t\t\tfor edge in n.neighbours:\n\t\t\t\tfor neighbour in n.get_neighbours(edge):\n\t\t\t\t\tedges.append((neighbour.id, edge))\n\t\t\tedges.sort()\n\n\t\t\t# Format edges\n\t\t\tformatted = []\n\t\t\tfor edge in edges:\n\t\t\t\tformatted.append(\"%s:%s\" % (edge[0], edge[1] or \"\"))\n\n\t\t\t# Print format\n\t\t\tprint(\"%s [%s]\" % (n, \", \".join(formatted)))",
"def draw_graph(self):\r\n G=nx.Graph()\r\n \r\n list_location1 = []\r\n list_location2 = []\r\n list_location3 = []\r\n list_location4 = []\r\n \r\n for citizen in self.citizens:\r\n G.add_node(citizen.id)\r\n if citizen.location == 1:\r\n list_location1.append(citizen.id)\r\n elif citizen.location == 2:\r\n list_location2.append(citizen.id)\r\n elif citizen.location == 3:\r\n list_location3.append(citizen.id)\r\n else: \r\n list_location4.append(citizen.id)\r\n\r\n for citizen in self.citizens:\r\n for friend in citizen.friends:\r\n G.add_edge(citizen.id,friend.id)\r\n\r\n pos = nx.random_layout(G)\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location1, node_color='r')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location2, node_color='g')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location3, node_color='b')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location4, node_color='y')\r\n nx.draw_networkx_edges(G,pos, width=1)\r\n\r\n plt.show()",
"def save_graph(graph, file_name):\r\n print \"Saving network into \"+file_name\r\n f = open(file_name, 'w')\r\n f.write(str(len(graph))+'\\n')\r\n for citizen in graph:\r\n f.write(str(citizen.id) + ';' + str(citizen.location) + ';' + str(citizen.influence_level) + ';' + \\\r\n str(citizen.proactivity_level) + '\\n')\r\n for op in citizen.opinions.keys():\r\n value = citizen.opinions[op].weight\r\n f.write(str(op)+':'+str(value)+';')\r\n f.write('\\n')\r\n for friend in citizen.friends:\r\n f.write(str(friend.id) + ';')\r\n f.write('\\n')\r\n f.close()",
"def write_labels():\n with open('../data/labels.txt', 'w') as labels_file:\n labels = generate_labels()\n labels_file.write('\\n'.join(labels))",
"def write(self, outfilename):\n\n nx.write_gpickle(self.graph, outfilename)",
"def draw_graph(graph, start, goal, path=[], save_file=None):\n explored = graph.get_explored_nodes()\n node_pos = {n: graph.nodes[n]['pos'] for n in graph.nodes.keys()}\n edge_labels = {}\n for edge in graph.edges():\n edge_labels[edge] = graph[edge[0]][edge[1]]['weight']\n\n labels = {}\n for node in graph:\n labels[node] = node\n\n nx.draw_networkx_nodes(graph, node_pos, node_color='gray') #, nodelist=romania.nodes, node_color='w', node_size=500)\n nx.draw_networkx_edges(graph, node_pos, style='dashed')\n if len(explored) > 0:\n print(\"Explored = \"+str(explored))\n nx.draw_networkx_nodes(graph, node_pos, nodelist=explored, node_color='r')\n\n if len(path) > 0:\n nx.draw_networkx_nodes(graph, node_pos, nodelist= path, node_color='y')\n edgelist = []\n for i in range(1,len(path)):\n edgelist.append((path[i - 1], path[i]))\n nx.draw_networkx_edges(graph, node_pos, edgelist, edge_color='b', width=3)\n nx.draw_networkx_nodes(graph, node_pos, nodelist=[start, goal], node_color='g')\n\n\n\n nx.draw_networkx_labels(graph, node_pos, labels)\n nx.draw_networkx_edge_labels(graph, node_pos, edge_labels, font_size=8)\n\n plt.axis('off')\n plt.show() # display\n if save_file is not None:\n plt.savefig(save_file) # save as png",
"def writeNetwork(self,nodeFile,linkFile):\n f = open(nodeFile,\"wb\")\n f.write(\"nodeId,x,y\" + os.linesep)\n for id,point in self.nodesDict.iteritems():\n f.write(\",\".join(map(str,(point.nodeId,point.x,point.y))) + os.linesep)\n f.close()\n \n f = open(linkFile,\"wb\")\n f.write(\"fromNode,toNode,linkId,oneWay\" + os.linesep)\n for id,link in self.linksDict.iteritems():\n if link.oneWay == \"FT\":\n oneWay = 1\n if link.oneWay == \"TF\":\n oneWay = -1\n else:\n oneWay = 0\n f.write(\",\".join(map(str,(link.fromNode.nodeId,link.toNode.nodeId,link.linkId,oneWay))) + os.linesep)\n f.close()",
"def disp_graph(graph, output_filename):\n dot = Graph(name=\"Graph\", format=\"png\") # instantiate a graph object\n for node in graph.keys(): # add nodes to the graph\n dot.node(str(node))\n for node in graph.keys(): # for every node in the input graph\n # for every other node in the input graph that the first node is connected to\n for other_node in graph[node].keys():\n dot.edge(str(node), str(other_node)) # create the edge\n dot.render(output_filename, view=True) # visualize the graph and save it",
"def print_node_edge_sets(labels, aside, paths, mode, outf):\n\t#print_gams_set(\"hide(node)\", \"hidden nodes\", aside)\n\t#print \"\"\n\n\t# genes without labels\n\tnovel=set.union(labels[\"unknown\"], aside)\n\tprint_gams_set(\"novelGene(node)\", \"unlabeled or hidden genes\", novel, out=outf)\n\toutf.write(\"\\n\")\n\n\t# interface nodes and edges - assume we've taken care of hiding\n\t# them according to the mode by now\n\thits=set()\n\tintNodes=set()\n\tintEdges=set()\t\n\t\n\t# { pathfinder : { pid : { \"nodes\":[], \"edges\":[] } } }\n\tfor pf in paths:\n\t\tfor pid in paths[pf]:\n\t\t\thits.add(paths[pf][pid][\"nodes\"][0])\n\t\t\tintNodes.add(paths[pf][pid][\"nodes\"][-2])\n\t\t\tintEdges.add(paths[pf][pid][\"edges\"][-1])\n\n\tprint_gams_set(\"hit(node)\", \"hits\", hits, out=outf)\n\toutf.write(\"\\n\")\n\tprint_gams_set(\"intNode(node)\", \"interface nodes\", intNodes, out=outf)\n\toutf.write(\"\\n\")\n\tprint_gams_set(\"intEdge(edge)\", \"interface edges\", intEdges, out=outf)\n\toutf.write(\"\\n\")",
"def draw_graph(self, fpath):\n import networkx as nx\n G = self.to_networkx()\n A = nx.nx_agraph.to_agraph(G)\n\n for proc in self.procs.values():\n nbunch = [proc.name]\n nbunch += [iport.absname() for iport in proc.iports.values()]\n nbunch += [oport.absname() for oport in proc.oports.values()]\n A.add_subgraph(\n nbunch, name='cluster_' + proc.name,\n color='lightgray', style='filled', fillcolor='lightgray')\n # color=lightgray;style=filled;fillcolor=lightgray;\n A.layout(prog='dot')\n A.draw(fpath)",
"def draw_network(G, ds, n = 5, label = False):\n\n top_n = top_n_users(ds,5)\n top_n = [int(i[0]) for i in top_n]\n H = G.subgraph(top_n)\n for m in top_n:\n child = ds[m]\n for item in child:\n H.add_edge(m,item)\n\n print \"Drawing figure...\"\n\n fig = plt.figure()\n nx.draw(H,pos=nx.spring_layout(H), node_size = 1, alpha = 0.25,\n width = 0.25, with_labels = label)\n fig.suptitle('Top 5 nodes by 1st degree connection', fontsize=20)\n# plt.savefig(\"images/TopN.png\", format=\"PNG\")\n plt.show()",
"def saveGraph(self, filename):\n nx.write_yaml(self.G,filename)",
"def plot_dag(\n self,\n filename,\n traverser,\n node_size=500,\n label_font_size=12,\n text_angle=0,\n image_width=16,\n image_height=12,\n ):\n # map nodes to a color for their operation type\n # https://stackoverflow.com/questions/27030473/how-to-set-colors-for-nodes-in-networkx-python\n color_map = []\n colors = [\"#fbb4ae\", \"#b3cde3\", \"#ccebc5\", \"#decbe4\", \"#fed9a6\"]\n for node in self.G2:\n if self.node_map[node] == OperationType.reader.value:\n color_map.append(colors[0])\n elif self.node_map[node] == OperationType.pipeline.value:\n color_map.append(colors[1])\n elif self.node_map[node] == OperationType.model.value:\n color_map.append(colors[2])\n elif self.node_map[node] == OperationType.writer.value:\n color_map.append(colors[3])\n else:\n color_map.append(colors[4])\n\n fig = plt.figure(figsize=(image_width, image_height))\n ax = plt.subplot(111)\n ax.set_title(filename, fontsize=10)\n\n try:\n import pydot\n from networkx.drawing.nx_pydot import graphviz_layout\n except ImportError: # pragma: no cover\n raise ImportError(\n \"This example needs Graphviz and pydot.\"\n \"Please refer to the Plotting requirements in the README\"\n )\n\n # pos = nx.spring_layout(G)\n # pos = nx.circular_layout(G)\n # pos = nx.kamada_kawai_layout(G)\n # pos = nx.shell_layout(G)\n # pos = nx.spectral_layout(G)\n pos = graphviz_layout(self.G2, prog=\"dot\") # , prog='twopi', args='')\n\n nx.draw(\n self.G2,\n pos,\n node_size=node_size,\n node_color=color_map,\n edge_color=\"#939393\",\n font_size=8,\n font_weight=\"bold\",\n )\n # nx.draw_networkx_nodes(G, pos, node_color='b', node_size=500, alpha=0.8)\n\n if len(self.conditional_nodes) > 0:\n cnodes = nx.draw_networkx_nodes(\n self.G2,\n pos,\n node_color=\"#e6b655\",\n node_size=1.5 * node_size,\n alpha=0.8,\n node_shape=\"D\",\n nodelist=list(self.conditional_nodes),\n )\n cnodes.set_edgecolor(\"red\")\n\n # nx.draw_networkx_labels(self.G2,pos, font_size=9)\n\n text = nx.draw_networkx_labels(\n self.G2, pos, font_size=label_font_size\n )\n\n if traverser:\n # map node name to sequence number\n sequence = traverser.traversal_list()\n idx = list(range(1, len(sequence) + 1))\n d = dict(zip(sequence, idx))\n\n # let's plot the sequence numner above the node. How far above it?\n ys = [t._y for _, t in text.items()]\n ysrange = max(ys) - min(ys)\n offset = 0.02 * abs(ysrange)\n\n for _, t in text.items():\n t.set_rotation(text_angle)\n\n if traverser:\n plt.text(t._x, t._y + offset, d[t._text], fontsize=24, color=\"red\")\n\n plt.axis(\"off\")\n plt.tight_layout()\n plt.savefig(filename, format=\"PNG\")\n logging.info(\"Graph written to %s\" % filename)",
"def save_graph(self, filename, fileType):\n if fileType == \"GML Format\":\n nx.write_gml(self.graph, filename+\".gml\")\n if fileType == \"Adjacency list\":\n nx.write_adjlist(self.graph, filename+\".adjlist\")\n if fileType == \"YAML\":\n nx.write_yaml(self.graph, filename + \".yaml\")",
"def showGraph(self, file_name = \"\"):\n \n # prepare edges and weights for visualization\n edges = self.graph.edges()\n weights = [self.graph_data[u]['pheromones'][v] for u,v in edges]\n weights_sum = sum(weights)\n weights = [ (w/weights_sum)*50 for w in weights]\n \n # prepare different shades of red to be used to optionally differentiate\n # between edges with different costs\n # to show more informatiion on the same graph\n colors = []\n max_cost = max([self.graph_data[u]['costs'][v] for u,v in edges])\n for u,v in edges:\n if self.graph_data[u]['costs'][v] <= max_cost/32:\n colors.append('#ff7f7f')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/16:\n colors.append('#ff6666')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/8:\n colors.append('#ff4c4c')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/4:\n colors.append('#ff3232')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/2:\n colors.append('#ff1919')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost:\n colors.append('#ff0000')\n continue\n \n # print the graph \n pos=nx.circular_layout(self.graph)\n nx.draw( self.graph,pos=pos,node_size=200,node_color='#A8A8A8', with_labels=True,edges=edges, edge_color=colors,edge_cmap=plt.cm.Blues, width=weights)\n if file_name != \"\":\n path = \"img/\"+file_name\n plt.savefig(path, format=\"PNG\")\n plt.show()",
"def generate_dot_file(self):\n dot_text = \"digraph blockchain {\"\n frontier = [self.root]\n while frontier != []:\n parent = frontier.pop(0)\n children = parent.children\n for child in children:\n frontier.append(child)\n dot_text += \"\\n\\t{c} -> {p};\".format(p='<' + str(parent.block) + '>',\n c='<' + str(child.block) + '>'\n )\n dot_text += \"\\n}\"\n with open(\"blockchain.gv\", \"w\") as writeFile:\n writeFile.write(dot_text)",
"def draw(self):\n g = self.to_networkx()\n pos = nx.spring_layout(g)\n nx.draw_networkx_edges(g, pos,\n edge_color=EDGE_COLOR,\n width=EDGE_WIDTH)\n obj = nx.draw_networkx_nodes(g, pos, nodelist=self.vs.values(),\n node_size=NODE_SIZE,\n node_color=NODE_COLOR_NORMAL)\n obj.set_linewidth(NODE_BORDER_WIDTH)\n obj.set_edgecolor(NODE_BORDER_COLOR)\n nx.draw_networkx_nodes(g, pos, nodelist=self.fs,\n node_size=FACTOR_NODE_SIZE,\n node_color=FACTOR_NODE_COLOR,\n node_shape=FACTOR_NODE_SHAPE)\n nx.draw_networkx_labels(g, pos, {v: v.name\n for v in self.vs.values()},\n font_color=LABEL_COLOR)",
"def create_graph(dot, filename=\"network\"):\n proc = subprocess.Popen('dot -Tpng > %s.png' % filename,\n shell=True,\n stdin=subprocess.PIPE\n )\n proc.communicate(dot.encode('utf_8'))\n execvp('open', ['open', '%s.png'%filename,])",
"def visualise_graph_on_circle(self, save_to_file, file_name) -> None:\n nodes_number = len(self.adjacency_matrix)\n phi = 2 * math.pi / nodes_number\n # estimate graph radius\n graph_radius = nodes_number * 1.5\n\n nodes = []\n\n for node in range(nodes_number):\n nodes.insert(node, (math.cos(phi * node) * graph_radius, math.sin(phi * node) * graph_radius))\n\n plt.close()\n figure, axes = plt.subplots()\n axes.set_aspect(1)\n figure.set_size_inches(8, 8)\n\n for i in range(len(self.adjacency_matrix)):\n for j in range(len(self.adjacency_matrix[0])):\n if self.adjacency_matrix[i][j] == 1:\n (x, y) = nodes[i]\n (x2, y2) = nodes[j]\n plt.plot([x / 15 + 0.5, x2 / 15 + 0.5], [y / 15 + 0.5, y2 / 15 + 0.5], 'r-', linewidth=2, zorder=1)\n\n i = 0\n for node in nodes:\n (x, y) = node\n i += 1\n circle_border = plt.Circle((x / 15 + 0.5, y / 15 + 0.5), radius=0.07 * nodes_number / 10, color='black',\n zorder=2)\n circle = plt.Circle((x / 15 + 0.5, y / 15 + 0.5), radius=0.06 * nodes_number / 10, color='green', zorder=3)\n axes.add_patch(circle_border)\n axes.add_patch(circle)\n if nodes_number <= 20:\n font_size = 16\n else:\n font_size = 20\n axes.annotate(i, xy=(x / 15 + 0.5, y / 15 + 0.5), fontsize=font_size, color='white',\n verticalalignment='center', horizontalalignment='center')\n\n plt.axis(\"off\")\n axes.set_aspect('equal')\n\n if save_to_file:\n plt.rcParams['savefig.format'] = 'png'\n plt.savefig(\"data/\" + file_name)\n else:\n plt.show()",
"def saveGraph (self, filename) :\n\t\tss = \"digraph {\\n\"\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\tfor rule in rules :\n\t\t\t\tr = [op.val for op in rule]\n\t\t\t\tr = [i.replace (\"-\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\".\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\"\\'\\'\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"\\\"\\\"\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"/\", \"_\") for i in r]\n\t\t\t\tk = key.replace (\"-\", \"\")\n\t\t\t\tk = k.replace (\"/\", \"_\")\n\t\t\t\tk = k.replace (\".\", \"_tok\")\n\t\t\t\tss += \"\\t\" + k + \" -> \" \n\t\t\t\tss += \" -> \".join (r)\n\t\t\t\tss += \" ;\\n\"\n\t\tss += \"}\"\n\t\tfilestream = open (filename + '.dot', 'w') \n\t\tfilestream.write(ss)\n\t\tfilestream.close ()\n\t\tcmd = 'dot -Tpng -o ' + filename + '.png ' + filename + '.dot'\n\t\tos.system (cmd)\n\t\tcmd = 'rm ' + filename + '.dot'\n\t\tos.system (cmd)",
"def export(self, connected_only=True):\r\n export = StringIO()\r\n export.write('digraph G {\\n')\r\n export.write(' graph [rankdir=LR];\\n')\r\n export.write(' node [width=0.75 shape=circle];\\n')\r\n\r\n edges = [\r\n (p, c)\r\n for p, c in self.edges()\r\n if p is not None and c is not None\r\n ]\r\n for parent, child in sorted(edges):\r\n export.write(' \"{}\" -> \"{}\";\\n'.format(parent, child))\r\n\r\n for node in sorted(self.nodes(connected_only=connected_only)):\r\n export.write(' \"{}\" [label=\"{}\"];\\n'.format(node, node.value.capitalize()))\r\n export.write('}')\r\n return export.getvalue()"
] | [
"0.675565",
"0.6560529",
"0.6427169",
"0.6395321",
"0.63243383",
"0.62498444",
"0.6215251",
"0.619651",
"0.6190271",
"0.6129893",
"0.6124176",
"0.60744816",
"0.6064628",
"0.603491",
"0.5996798",
"0.59650636",
"0.5946393",
"0.5939295",
"0.59302545",
"0.592903",
"0.5917938",
"0.59067875",
"0.5870049",
"0.586368",
"0.58498013",
"0.58480716",
"0.58361554",
"0.58314854",
"0.5818725",
"0.576892"
] | 0.7188244 | 0 |
log() if (level <= loglevel), text is appended to logfile with date/time prepended (nothing is ever logged when loglevel is 0). If (level <= adminlevel) then store log in adminlog list (never store anything if adminlevel is 0). | def log(text='', level=1):
if loglevel == 0 and adminlevel == 0:
return 0 # not logged
datetime = time.asctime(time.localtime(time.time()))
threadname = threading.currentThread().getName()
logtext = "%s (%s)[%d]:%s\n" % (datetime,threadname,level,text)
logged = 0 # flag if anything is logged
if level > 0 and level <= loglevel:
# log to logfile
try:
logf = open( logfile, 'a' )
logf.write( logtext )
logf.close()
logged = logged + 1
except IOError:
# Cannot open logfile for writing - save this problem in adminlog
logstr = "<Log>log() - Log warning - cannot write to logfile '%s'" % logfile
print logstr
datetime = time.asctime(time.localtime(time.time()))
logtext = "%s [%d]:%s\n" % (datetime,3,logstr)
adminlog.append( logtext )
if adminlevel > 0 and level <= adminlevel:
# log to adminlog
adminlog.append(logtext)
logged = logged + 1
return logged # 0=not logged, >0=logged | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __log(level, message):\n if level == 1:\n logging.info(\" \" + str(datetime.datetime.now()) + \" \" + message)\n if level == 2:\n logging.error(\" \" + str(datetime.datetime.now()) + \" \" + message)\n if level == 3:\n logging.critical(\" \" + str(datetime.datetime.now()) + \" \" + message)",
"def log(self, loglevel, logstring):\n # filter according to loglevel\n if self.loglevel >= loglevel:\n if loglevel == 0:\n output = \"[ERROR] \" + logstring\n elif loglevel == 1:\n output = \"[INFO] \" + logstring\n elif loglevel == 2:\n output = \"[DEBUG] \" + logstring\n elif loglevel == 3:\n output = \"[WTF] \" + logstring\n\n if self.logfile:\n self.logfile.write(logstring + \"\\n\")\n print(output)",
"def log(level=EVENT_LEVELS.Info, usr=None, msg=''):\n level = level if level in EVENT_LEVELS else EVENT_LEVELS.Info\n usr = None if usr.is_anonymous else usr\n\n if level in log_levels:\n print(f\"{level} Log: {usr} - {msg}\")\n EventLog.objects.create(\n user=usr,\n level=level,\n message=msg\n )",
"def log( loglevel, message ):\n E.log( loglevel, message )",
"def logger(level, log_info):\n log_path = getconfig(\"log\", \"LOG_PATH\")\n log_level = getconfig(\"log\", \"LOG_LEVEL\")\n log_enable = getconfig(\"log\", \"LOG_ENABLE\")\n log_fname = getconfig(\"log\", \"LOG_FNAME\")\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n log_file = os.path.join(log_path, log_fname)\n # base on input string \"DEBUG\",\"ERROR\"... get level number\n lvl = l_type_lst.index(level)\n\n # now, begin to write into log file\n log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n log_pid = os.getpid()\n log_script = sys._getframe().f_back.f_code.co_filename.split('/')[-1]\n log_method = sys._getframe().f_back.f_code.co_name\n log_line = sys._getframe().f_back.f_lineno\n with open(log_file, \"a\") as log:\n if lvl <= int(log_level) and bool(log_enable):\n log.write(\"%s %s %s %s:%s:%s %s\\\n\\n\" % (log_time, log_pid, level, log_script, log_method, log_line, log_info))",
"def l(msg, loglvl=0xFFFFFF):\n global LOG_LEVEL\n if (loglvl & LOG_LEVEL) != 0x0:\n print time.ctime(), ': ' , str(msg)",
"def log_lvl(lvl):\n logs.set_level(logging.getLogger(\"plysp\"), lvl)",
"def handle_log(self, workunit, level, *msg_elements):\r\n if level <= self.settings.log_level:\r\n self.do_handle_log(workunit, level, *msg_elements)",
"def log(self, log_level, text):\n timestamp = '['+str(datetime.datetime.now())+']'\n if log_level == self.ERROR:\n if self.verbose >= self.ERROR:\n print(\"ERROR :\", timestamp, \":\", str(text), file=sys.stderr)\n return\n\n if log_level == self.WARNING:\n if self.verbose >= self.WARNING:\n print(\"WARN :\", timestamp, \":\", str(text), file=sys.stderr)\n return\n\n if log_level == self.INFO:\n if self.verbose >= self.INFO:\n print(\"INFO :\", timestamp, \":\", str(text), file=sys.stderr)\n return\n\n if log_level == self.DEBUG:\n if self.verbose >= self.DEBUG:\n print(\"DEBUG :\", timestamp, \":\", str(text), file=sys.stderr)\n return",
"def write_log(self, level, message): \n \n level = level.lower()\n #print(level, message,str(self.logger))\n if level == 'debug':\n self.logger.debug('%s', message)\n elif level == 'error':\n self.logger.error('%s', message)\n elif level == 'critical':\n self.logger.critical('%s', message)\n elif level == 'warning':\n self.logger.warning('%s', message)\n else:\n self.logger.info('%s', message)",
"def log_msg(level, msg):\n now = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\n level = (level+' '*5)[:5]\n msg = msg.replace('\\r', '').replace('\\n', '|')\n\n line = '[{}][{}]: {}\\n'.format(now, level.upper(), msg)\n with open(CONFIG['reportFile'], 'a') as logfp:\n logfp.write(line)",
"def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()",
"def log(level, message, args=()):\n\tif level >= minimumLogLevel:\n\t\ttry:\n\t\t\tif args:\n\t\t\t\tmessage = message % args\n\t\t\tif level >= screenLogLevel:\n\t\t\t\tlogToScreen(message)\n\t\t\tif level >= fileLogLevel:\n\t\t\t\tLEVEL_PREFIXES = (\n\t\t\t\t\t\"DEBUG: \",\n\t\t\t\t\t\"INFO : \",\n\t\t\t\t\t\"WARN : \",\n\t\t\t\t\t\"ERROR: \",\n\t\t\t\t)\n\t\t\t\tlogToFile(LEVEL_PREFIXES[level] + message)\n\t\texcept UnicodeError:\n\t\t\tpass",
"def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()",
"def logmsg(self, lvl, msg):\n self.logger.log(lvl, msg)",
"def log(level, msg):\n weight = \"?\"\n if level>=LOG_LEVEL:\n if level == 0:\n weight = \"DEBUG\"\n elif level == 1:\n weight = \"INFO\"\n elif level == 2:\n weight = \"WARN\"\n elif level == 3:\n weight = \"ERROR\"\n else:\n log(3, \"Invalid log level: {0}\".format(level))\n print(\"{0}: {1}\".format(weight, msg))",
"def log(self, level, msg, *args, **kwargs):\n pass",
"def set(cls, log_level, log_filename, append):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Log to sys.stderr using log level passed through command line\n if log_level != logging.NOTSET:\n log_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-8s %(message)s\")\n log_handler.setFormatter(formatter)\n log_handler.setLevel(log_level)\n logger.addHandler(log_handler)\n\n # Log to rotating file using DEBUG log level\n log_handler = logging.handlers.RotatingFileHandler(\n log_filename, mode=\"a+\", backupCount=3\n )\n formatter = logging.Formatter(\"%(asctime)s %(levelname)-8s %(message)s\")\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n logger.addHandler(log_handler)\n\n if not append:\n # Create a new log file on every new\n # (i.e. not scheduled) invocation\n log_handler.doRollover()",
"def write_log(self, msg, level = \"DEBUG\"):\r\n if len(self.parent)> 13:\r\n spacer = \"\\t\"\r\n elif len(self.parent) < 8:\r\n spacer = \"\\t\\t\\t\"\r\n else:\r\n spacer = \"\\t\\t\"\r\n \r\n log = level + \"\\t\" + self.parent +spacer +str(msg)\r\n print(log)",
"def log(self, message, level=None):\n\n if level is None or level.lower() == \"all\":\n level = \"notset\"\n level = getattr(logging, level.upper())\n\n self.logger.log(level, message)",
"def set(cls, log_level, log_filename, append):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Log to sys.stderr using log level passed through command line\n if log_level != logging.NOTSET:\n log_handler = logging.StreamHandler(sys.stdout)\n if sys.platform.find('linux') >= 0:\n formatter = ColoredFormatter(cls.COLOR_FORMAT)\n else:\n formatter = ColoredFormatter(cls.NO_COLOR_FORMAT, False)\n log_handler.setFormatter(formatter)\n log_handler.setLevel(log_level)\n logger.addHandler(log_handler)\n\n # Log to rotating file using DEBUG log level\n log_handler = logging.handlers.RotatingFileHandler(log_filename,\n mode='a+',\n backupCount=3)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s '\n '%(message)s')\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n logger.addHandler(log_handler)\n\n if not append:\n # Create a new log file on every new\n # (i.e. not scheduled) invocation\n log_handler.doRollover()",
"def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()",
"def admin_log(out=None, error=None, function=\"\"):\n with open(args.out_folder.strip() + \"/log.log\", 'a') as adminlogfile:\n seperation = 60 * \"=\"\n if out:\n# adminlogfile.write(function + \" \\n\" + seperation + \"\\n\" + out + \"\\n\\n\")\n adminlogfile.write(str(function) + \" \\n\" + str(seperation) + \"\\n\" + str(out) + \"\\n\\n\")\n if error:\n# adminlogfile.write(function + \"\\n\" + seperation + \"\\n\" + error + \"\\n\\n\")\n adminlogfile.write(str(function) + \"\\n\" + str(seperation) + \"\\n\" + error.decode() + \"\\n\\n\")",
"def activate_log(log_lvl='DEBUG', logfile=LOG_FILE, logoff=False):\n logger.setLevel(LOG_LVL[log_lvl])\n\n if logfile:\n # Create handlers\n f_handler = logging.FileHandler(logfile, 'w')\n f_handler.setLevel(LOG_LVL['DEBUG'])\n # Create formatters and add it to handlers\n f_format = logging.Formatter(LOG_FILE_FORMAT)\n f_handler.setFormatter(f_format)\n # Add handlers to the logger\n logger.addHandler(f_handler)\n\n if not logoff:\n # Create handlers\n c_handler = logging.StreamHandler()\n c_handler.setLevel(LOG_LVL['INFO'])\n # Create formatters and add it to handlers\n c_format = logging.Formatter(LOG_CONSOLE_FORMAT)\n c_handler.setFormatter(c_format)\n # Add handlers to the logger\n logger.addHandler(c_handler)",
"def add_log(self, logType: int, message: str) -> None:\n\n if logType not in self.__log_levels:\n logType = self.NOTSET\n\n self.__add_log(logType, message)",
"def logs_add_message(self, level, message):\n pass",
"def logbase(cls, loggerObj, lvl, messages):\n if uselogger == 0 or loggerObj is None:\n return\n if lvl == 1:\n loggerObj.debug(messages)\n if lvl == 2:\n loggerObj.info(messages)\n if lvl == 3:\n loggerObj.warning(messages)\n if lvl == 4:\n loggerObj.error(messages)",
"def log(self, msg, logging_level):\n\n # log\n self.logger.log(logging_level, msg)",
"def do_handle_log(self, workunit, level, *msg_elements):\r\n pass",
"def log(text, ltype=0, channel=\"System\"):\n\tfiles=['', ' events', ' errors', channel]\n\tprint(\"{} -- ({}) {}\".format(time.strftime(\"%c\"), channel, text))\n\ttext=\"{} -- {}\".format(time.strftime(\"%c\"), text)\n\tif ltype==0: return\n\tchannel=channel.replace(\"/\", \" \")\n\tchannel=channel.replace(\"\\\\\", \" \")\n\ttext=text.encode('ascii', 'replace')\n\ttry:\n\t\twith open('./logs/{} ({}).log'.format(config.character, channel, files[ltype]), 'a') as io:\n\t\t\tio.write(text+\"\\n\")\n\n\texcept IOError:\n\t\ta = open('./logs/{} ({}).log'.format(config.character, channel, files[ltype]), 'w')\n\t\ta.write(text+\"\\n\")\n\t\ta.close()"
] | [
"0.7120623",
"0.6867098",
"0.68032545",
"0.67844707",
"0.6657922",
"0.659959",
"0.6585036",
"0.6573275",
"0.6534616",
"0.6531102",
"0.6517555",
"0.64893204",
"0.63999486",
"0.63679504",
"0.6358388",
"0.6331364",
"0.62842757",
"0.62459934",
"0.6222694",
"0.6216095",
"0.6211309",
"0.6210026",
"0.6191211",
"0.61769235",
"0.6154293",
"0.61385167",
"0.6101311",
"0.6100829",
"0.6095554",
"0.60892105"
] | 0.82989186 | 0 |
sendadminlog() send adminlog list to adminemail only if there is something in this list. If override==1 then admin_notify times are ignored. | def sendadminlog( override=0 ):
global admin_notify_time
global adminlog
if override == 0:
# if no admin_notify_time set, set one and return
if admin_notify_time == 0:
admin_notify_time = time.time() + admin_notify
return
# if time hasn't reached admin_notify_time then return
if time.time() < admin_notify_time:
return
# time for notify - set new time and send the adminlog
admin_notify_time = time.time() + admin_notify
# if there isn't anything in adminlog don't bother
if len(adminlog) == 0:
return
headers = 'To: %s\n' % adminemail
headers = headers + 'Subject: [%s] Eddie Admin Messages\n' % hostname
body = "Greetings Eddie Admin '%s', the following log messages are\n" % adminemail
body = body + "being delivered to you for your perusal. Enjoy.\n\n"
body = body + "[Host:%s LogLevel=%d AdminLevel=%d AdminNotify=%s secs]\n" % (hostname,loglevel, adminlevel, admin_notify)
body = body + "------------------------------------------------------------------------------\n"
for i in adminlog:
body = body + "%s" % (i)
body = body + "------------------------------------------------------------------------------\n"
r = utils.sendmail( headers, body )
# clear adminlog
adminlog = [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def emailAdmin(ip, nrLoggedEmails, lastLog):\n \n msg = lastLog[1]\n toEmail = lastLog[2]\n\n msg = \"VARNING! En dator med IP-nummer %s har skickat fler än max-antal e-postmeddelanden under angivet tidsintervall.\\n\\n\" % (ip)\n msg += \"Utdrag från senaste loggade mejlet:\\n\\nIP: %s\\nMottagare: %s\\n\\nFör mer info, kolla loggfilen på: %s\\n\\n\"\\\n % (ip, toEmail, logPath)\n msg += \"----------------------------------------------------------\\n\"\n msg += \"Detta mejl har genererats automatiskt av sMap's email-log\\n\"\n msg += \"----------------------------------------------------------\\n\"\n\n # Add a log summary\n logger = logEmail.EmailLogger(logPath)\n \n logSummary = logger.getLogSummary(secondsBack=None, asText=True)\n msg = msg + \"\\nUtdrag från loggen (visar alla sända mejl uppdelat på IP-nummer):\\n\\n%s\" % (logSummary)\n\n mimeMsg = MIMEText(msg, \"plain\", \"utf-8\")\n mimeMsg['Subject'] = \"Varning från sMaps e-post\"\n mimeMsg['From'] = fromEmail\n mimeMsg['To'] = adminEmails\n\n \"\"\"for debuggning from localhost: sendEmail.sendEmail(\"[email protected]\", \"asdf1234\", adminEmails,\\\n port=port, msg=mimeMsg)\"\"\"\n \n sendEmail.sendEmail(fromEmail, password, adminEmails.split(\",\"),\\\n smtp, port=port, msg=mimeMsg)\n \n # Store data that warning has been sent so that it won't\n # create what it tries to prevent - spamming!!\n blocked = logger.setBlock(ip)",
"def send_admin_report(self, low_use_instances, instances_scheduled_for_deletion):\n return self.send_low_use_email(ADMIN_EMAIL, low_use_instances,\n instances_scheduled_for_deletion,\n TemplateName=self.admin_template_name)",
"def main(connection, info, args) :\r\n connection.msg(info[\"channel\"], _(\"%(sender)s: The current %(botnick)s admin are: %(listofadmins)s\") % dict(sender=info[\"sender\"], botnick=self.nick, listofadmins=\", \".join(self.admin)))",
"def mail_admins(subject, message, fail_silently=False):\n _mail_group(settings.ADMINS, subject, message, fail_silently)",
"def handle_adminlogfilter(bot, event):\n if not event.rest: event.missing(\"<plugname>\") ; return\n if len(event.rest) < 3: event.reply(\"min 3 chars plz\") ; return\n setlogfilter(event.rest)\n event.done()",
"def check_admin(self, update, context):\n\n user = self.User(update)\n output = self.data_base.check_admin(user)\n user.send_message(output)\n self.data_base.log(user, update.message.text, str(output))",
"def do_admins(bot, msg, **kwargs):\n channel = kwargs.get('event').get('channel')\n bot.post_msg(\n text='My admins are: {admins}'.format(\n admins=', '.join([bot.format_user_mention(x) for x in bot.masters.values()])\n ),\n channel_name_or_id=channel\n )\n \n return True",
"def send_admin(msg_type, update, sender=None):\n send(config.get('release_team_address'), msg_type, update, sender)",
"def handle_admindebugon(bot, event):\n event.chan.data.debug = True;\n event.chan.save()\n event.reply(\"debugging is enabled for %s\" % event.channel)",
"def admin_list(message):\n load_users(message._client.users)\n names = list_to_names(user_list.admin_list)\n message.reply('My admins are: {}'.format(\", \".join(names)))",
"async def admin_msg(self, message):\n for channel in self.admin_channels.values():\n if channel:\n await channel.send(message)",
"def handle_adminlogplug(bot, event):\n if not event.rest: event.missing(\"<plugname>\") ; return\n if len(event.rest) < 3: event.reply(\"min 3 chars plz\") ; return\n setlogplug(event.rest)\n event.done()",
"async def sendall(self, ctx):\n if await check_if_logged(server_id=ctx.guild.id):\n c.execute(\"SELECT sendall FROM logging.servers WHERE serverid = %s\", (ctx.guild.id,))\n if fetch_one() == 0:\n c.execute(\"UPDATE logging.servers SET sendall = %s WHERE serverid = %s\", (1, ctx.guild.id))\n await ctx.send(f\"> **All messages will now be sent in the logging channel.**\")\n else:\n c.execute(\"UPDATE logging.servers SET sendall = %s WHERE serverid = %s\", (0, ctx.guild.id))\n await ctx.send(f\"> **Only edited and deleted messages will be sent in the logging channel.**\")\n else:\n await ctx.send(\"> **This server is not being logged.**\")",
"def show_admins(var, wrapper, message):\n cli, nick, chan, rest = wrapper.client, wrapper.source.name, wrapper.target.name, message # FIXME: @cmd\n\n admins = []\n pl = list_players()\n\n if (wrapper.public and var.LAST_ADMINS and var.LAST_ADMINS +\n timedelta(seconds=var.ADMINS_RATE_LIMIT) > datetime.now()):\n cli.notice(nick, messages[\"command_ratelimited\"].format())\n return\n\n if wrapper.public or (var.PHASE in var.GAME_PHASES or nick in pl):\n var.LAST_ADMINS = datetime.now()\n\n if var.ADMIN_PINGING:\n return\n\n var.ADMIN_PINGING = True\n\n def admin_whoreply(event, var, chan, user):\n if not var.ADMIN_PINGING or chan is not channels.Main:\n return\n\n if is_admin(user.nick): # FIXME: Using the old interface for now; user.is_admin() is better\n if user is not users.Bot and not event.params.away:\n admins.append(user.nick) # FIXME\n\n def admin_endwho(event, var, target):\n if not var.ADMIN_PINGING or target is not channels.Main:\n return\n\n admins.sort(key=str.lower)\n\n msg = messages[\"available_admins\"] + \", \".join(admins)\n\n reply(cli, nick, chan, msg)\n\n var.ADMIN_PINGING = False\n\n who_result.remove(\"who_result\")\n who_end.remove(\"who_end\")\n\n who_result = EventListener(admin_whoreply)\n who_result.install(\"who_result\")\n who_end = EventListener(admin_endwho)\n who_end.install(\"who_end\")\n\n channels.Main.who()",
"def test_admin_settings(self):\n\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_ADMIN_EMAIL': ''\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertFalse(settings_vars.get('ADMINS', False))\n\n test_admin_email = '[email protected]'\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_ADMIN_EMAIL': test_admin_email,\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n (('Admins', test_admin_email),),\n settings_vars['ADMINS']\n )\n # Manually set ADMIN to our test setting and verify e-mail\n # goes where we expect\n settings.ADMINS = (('Admins', test_admin_email),)\n mail.mail_admins('Test', 'message')\n self.assertIn(test_admin_email, mail.outbox[0].to)",
"def cmd_addadmin_private(self, argument):\n if self.is_admin:\n users = argument.split()\n for user in users:\n self.bot.admins.add(user)\n self.send(self.nick, _(\"User %s added to admins\"), user)\n self.logger.info(\"User %s added %s to admins\" % (self.nick, user))\n else:\n self.logger.warning(\"User %s tried to use '%s' without being admin\" % (self.nick, \"addadmin\"))",
"def send_email_to_admins(self, template_name, subject, **kw):\n \n mailer = self.app.module_map['mail']\n barcamp = self.barcamp\n new_user = self.user # active user\n for admin in self.barcamp.admin_users:\n print admin\n send_tos = [admin.email]\n kwargs = dict(\n new_user = new_user,\n user = admin,\n barcamp = barcamp,\n url = self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug, _full = True),\n notification_url = self.handler.url_for(\"barcamps.edit\", slug = self.barcamp.slug, _full = True)\n )\n kwargs.update(kw)\n payload = self.handler.render_lang(\"emails/%s.txt\" %template_name, **kwargs)\n mailer.mail(admin.email, subject, payload)",
"def handle_admindebugoff(bot, event):\n event.chan.data.debug = False;\n event.chan.save()\n event.reply(\"debugging is disabled for %s\" % event.channel)",
"def send_admin_notification_callback(sender, **kwargs):\r\n user = kwargs['user']\r\n\r\n studio_request_email = settings.FEATURES.get('STUDIO_REQUEST_EMAIL', '')\r\n context = {'user_name': user.username, 'user_email': user.email}\r\n\r\n subject = render_to_string('emails/course_creator_admin_subject.txt', context)\r\n subject = ''.join(subject.splitlines())\r\n message = render_to_string('emails/course_creator_admin_user_pending.txt', context)\r\n\r\n try:\r\n send_mail(\r\n subject,\r\n message,\r\n studio_request_email,\r\n [studio_request_email],\r\n fail_silently=False\r\n )\r\n except SMTPException:\r\n log.warning(\"Failure sending 'pending state' e-mail for %s to %s\", user.email, studio_request_email)",
"def send_admin_email(message, subject=None):\n send_mail(\n subject=subject if subject else \"Attention Needed\",\n message=message,\n from_email=settings.DEFAULT_FROM_EMAIL,\n recipient_list=[settings.ADMIN_EMAIL],\n fail_silently=True\n )",
"def email_admins(subject, message):\n mail_admins(subject, message=message)",
"def generate_admin_report(**kwargs):\n logger.debug(\"Creating Admin Report...\")\n configs = get_configs()\n\n load_dotenv()\n engine = db.create_engine(os.environ['DATABASE_URL'])\n connection = engine.connect()\n metadata = db.MetaData()\n\n users = db.Table('users', metadata, autoload=True, autoload_with=engine)\n domains = db.Table('domains', metadata, autoload=True, autoload_with=engine)\n dgdomains = db.Table('dg_domains', metadata, autoload=True, autoload_with=engine)\n system_settings = db.Table('system_settings', metadata, autoload=True, autoload_with=engine)\n\n # System settings\n system_raw_dates = get_sys_info(all=True)\n last_logfile_analysis = system_raw_dates['last_logfile_analysis'].strftime('%A %B %d, %Y at %I:%M %p %Z')\n last_ooni_report_generated = system_raw_dates['last_ooni_report_generated'].strftime('%A %B %d, %Y at %I:%M %p %Z')\n last_domain_test = system_raw_dates['last_domain_test'].strftime('%A %B %d, %Y at %I:%M %p %Z')\n\n\n # List admins\n user_query = db.select([users]).where(users.c.admin == True)\n admin_list = connection.execute(user_query).fetchall()\n\n # List Domain Group Owners\n # TODO: Generate reports for domain group owners\n dg_query = \"select * from users, domain_groups where CAST(users.domain_group_id as integer)=domain_groups.id and domain_groups.name != 'None'\"\n dg_list = connection.execute(dg_query).fetchall()\n\n # Get last date\n last_email_report_sent = get_sys_info(request='last_email_report_sent', update=True)\n \n reports = db.Table('reports', metadata, autoload=True, autoload_with=engine)\n report_query = db.select([reports]).where(reports.c.date_reported > last_email_report_sent)\n report_list = connection.execute(report_query).fetchall()\n\n important_reports = \"\"\n number_of_reports = len(report_list)\n number_of_problems = 0\n for report in report_list:\n if ((report['domain_status'] != 200) or (report['mirror_status'] != 200)):\n number_of_problems += 1\n translated_report = translate_reports(report)\n important_reports += translated_report\n\n number_of_ooni_reports, number_of_ooni_problems, ooni_problems = ooni_reports(last_email_report_sent)\n for problem in ooni_problems:\n orept = f\"OONI: URL Accessed: {problem['url_accessed']} Kind of Failure: {problem['failure']} DNS Consistency: {problem['dns_consistency']}\\n\"\n important_reports += orept\n\n if kwargs['mode'] == 'daemon':\n if important_reports:\n message_to_send = f\"\"\" Reporting problematic Domains and/or Alternatives since {last_email_report_sent}: \n There were {number_of_reports} domain testing reports, and {number_of_problems} problems.\n\n There were {number_of_ooni_reports} reports from OONI, with {number_of_ooni_problems} of problems.\n\n The last domain test was {last_domain_test}.\n The last logfile analysis was done on {last_logfile_analysis}.\n and the last OONI report was generated on {last_ooni_report_generated}.\n\n All detailed problem reports are below:\n\n {important_reports}\n \"\"\" \n else:\n message_to_send = f\"\"\"No Problematic Domains or Alternatives since {last_email_report_sent}. \n \n The last domain test was {last_domain_test}.\n The last logfile analysis was done on {last_logfile_analysis}.\n and the last OONI report was generated on {last_ooni_report_generated}.\n \n You might want to check the system.\"\"\"\n \n for user in admin_list:\n if user['notifications'] and user['active']:\n email = send_email(\n user['email'],\n \"Report From BC APP\",\n message_to_send\n )\n logger.debug(f\"Message Sent to {user['email']}: {email}\")\n\n else:\n if important_reports:\n print(f\"\"\" Reporting problematic Domains and/or Alternatives for Today: \n There were {number_of_reports} domain testing reports, and {number_of_problems} problems.\n\n There were {number_of_ooni_reports} reports from OONI, with {number_of_ooni_problems} of problems.\n\n The last domain test was {last_domain_test}.\n The last logfile analysis was done on {last_logfile_analysis}.\n The last OONI report was generated on {last_ooni_report_generated}.\n\n All detailed problem reports are below:\n\n {important_reports}\n \"\"\") \n else:\n print(f\"\"\"No problems reported since {last_email_report_sent}.\n \n The last domain test was {last_domain_test}.\n The last logfile analysis was done on {last_logfile_analysis}.\n and the last OONI report was generated on {last_ooni_report_generated}.\n \n You might want to check the system.\"\"\")\n\n return",
"def update_admin_ids():\n admin_emails_config = Registry.get_config_property(\n 'admin_emails')\n if not admin_emails_config:\n return []\n\n admin_ids = []\n for email in admin_emails_config.value:\n user_id = user_services.get_user_id_from_email(email)\n if user_id is not None:\n admin_ids.append(user_id)\n else:\n raise Exception('Bad admin email: %s' % email)\n return admin_ids",
"def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")",
"def send_infected_file_list_to_admin():\n admins = User.objects.get_superusers()\n admin_emails = []\n for admin in admins:\n admin_emails.append(admin.email)\n c = {\n 'infected_files': list_of_infected_files,\n }\n send_html_email('Virus Detected',\n 'api3/sysadmin/virus_detected_files.html', c, None, admin_emails)",
"def send_mail_to_admin(email_subject, email_body):\n\n app_id = app_identity_services.get_application_id()\n body = '(Sent from %s)\\n\\n%s' % (app_id, email_body)\n system_name_email = '%s <%s>' % (\n feconf.SYSTEM_EMAIL_NAME, feconf.SYSTEM_EMAIL_ADDRESS)\n email_services.send_mail(\n system_name_email, feconf.ADMIN_EMAIL_ADDRESS, email_subject,\n body, body.replace('\\n', '<br/>'), bcc_admin=False)",
"def _set_advance_syslog(zd, **kwargs):\n xlocs = LOCATOR_CFG_SYSTEM_NETWORKMGMT\n adv_opt = ['zd_facility_name', 'zd_priority_level', 'ap_facility_name', 'ap_priority_level']\n adv_cfg = {'pause': 1}\n adv_cfg.update(kwargs)\n \n if zd.s.is_element_present(xlocs['syslog_advanced_setting_collapse']):\n zd.s.click_and_wait(xlocs['syslog_advanced_setting_click'])\n time.sleep(adv_cfg['pause'])\n \n for key in adv_opt:\n if adv_cfg.get(key) is not None:\n zd.s.select_value(xlocs[key], adv_cfg[key])",
"def admin_post():\n try:\n total_sent = send_bulk_emails()\n except:\n return render_template('bulkfail.html')\n return render_template('bulksent.html', total_sent=total_sent)",
"def log_list(self,list_,level='INFO'):\r\n logger.write('\\n'.join(self._log_list(list_)),level)",
"def _sendAdmin(self, p: admin_pb2.AdminMessage, wantResponse=False,\n onResponse=None,\n adminIndex=0):\n\n if adminIndex == 0: # unless a special channel index was used, we want to use the admin index\n adminIndex = self.iface.localNode._getAdminChannelIndex()\n\n return self.iface.sendData(p, self.nodeNum,\n portNum=portnums_pb2.PortNum.ADMIN_APP,\n wantAck=True,\n wantResponse=wantResponse,\n onResponse=onResponse,\n channelIndex=adminIndex)"
] | [
"0.6240995",
"0.55472285",
"0.55336624",
"0.5529787",
"0.5498411",
"0.5475561",
"0.5449685",
"0.53409684",
"0.5289263",
"0.52011275",
"0.5185836",
"0.5131691",
"0.5124156",
"0.510098",
"0.510057",
"0.50931907",
"0.5059404",
"0.50396657",
"0.50167394",
"0.49878448",
"0.49843037",
"0.49111587",
"0.48969147",
"0.48951682",
"0.48862195",
"0.48856947",
"0.4841142",
"0.48355973",
"0.48099715",
"0.47901848"
] | 0.842474 | 0 |
returns the percentage of false classification for the given resultsets produced by different models. Only images useable in all set are being considered | def get_percentage_false_class(arr_of_results):
count_success = np.zeros_like(arr_of_results[:,0], dtype=float)
count_correct_prediction = 0
for i in range(len(arr_of_results[0])):
use = True
for result in arr_of_results[:,i]:
if result["image_target"] != result["prediction_image"] or result["std_noise"] == 0:
use = False
if use:
count_correct_prediction += 1
i2 = 0
for result in arr_of_results[:,i]:
if result["success"]:
count_success[i2] += 1
i2 += 1
errors = proportion_confint(count_success, count_correct_prediction)
count_success = count_success/count_correct_prediction
errors = np.array(errors)
errors[0] = np.abs(count_success - errors[0])
errors[1] = np.abs(count_success - errors[1])
return count_success, errors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_percentage_false_class_for_resultset(results):\n count_success = 0\n count_correct_prediction = 0\n for result in results:\n if result[\"image_target\"] == result[\"prediction_image\"] and result[\"std_noise\"] != 0:\n count_correct_prediction += 1\n if result[\"success\"] == True:\n count_success += 1\n\n error = stats.proportion.proportion_confint(count_success, count_correct_prediction, 0.05)\n\n return np.array([count_success/count_correct_prediction, error])",
"def check_correctness_statistics(classifier_out, mode, image_type):\n labels = image_type.image_data[mode].labels\n num_correct = 0\n total = len(classifier_out)\n for index, label in classifier_out:\n if labels[index] == label:\n num_correct += 1\n return (num_correct / total) * 100",
"def percent_accuracy(self, test_set, predicted_values):\r\n\r\n correct = 0\r\n for i in range(len(test_set)):\r\n if test_set[i].classification == predicted_values[i]:\r\n correct += 1\r\n return correct / len(test_set)",
"def check_correctness(classifier_out, mode, image_type):\n labels = image_type.image_data[mode].labels\n num_correct = 0\n total = len(classifier_out)\n for index, label in classifier_out:\n if labels[index] == label:\n num_correct += 1\n print(f'Got {num_correct} out of {total} correct: {(num_correct / total) * 100}%')",
"def percent_accuracy(self, true_values, predicted_values):\n\n correct = 0\n size = len(true_values)\n for i in range(len(true_values)):\n true_labels = true_values[i]\n predicted_labels = predicted_values[i]\n predicted_index = np.argmax(predicted_labels)\n\n if true_labels[predicted_index] == 1:\n correct += 1",
"def accuracy(outputs, labels):\n predicted = outputs.argmax(dim=1)\n correct = (predicted == labels).sum().item()\n return correct / labels.size(0)",
"def accuracy(outputs, targets):\n\n batch_size = targets.size(0)\n\n _, pred = torch.max(outputs.data, 1)\n correct = (pred == targets).sum().item()\n\n res = 100 * correct / batch_size\n return res",
"def test_model(model: nn.Module, test_set: data.DataLoader, number_of_classes: int) -> Tuple[score.FloatScore, score.DictScore]:\n # model.eval is used for ImageNet models, batchnorm or dropout layers will work in eval mode.\n model.eval()\n\n def test_average() -> score.FloatScore:\n correct = 0\n total = 0\n\n with torch.set_grad_enabled(False):\n for (inputs, yreal) in tqdm(test_set, unit=\"images\", desc=\"Testing model (average)\", leave=True, ascii=True):\n inputs, yreal = inputs.cuda(), yreal.cuda()\n\n ypred = model(inputs)\n _, predicted = torch.max(ypred.data, 1)\n\n total += yreal.size(0)\n correct += (predicted == yreal).sum().item()\n\n accuracy = 100 * correct / total\n log.info(\"Accuracy of the network on the {} test images (average): {}\".format(total, accuracy))\n with open('epoch_logs.txt', 'a+') as file:\n file.write('Test Acc: {}\\n'.format(accuracy))\n return score.FloatScore(accuracy)\n\n def test_per_class() -> score.DictScore:\n class_correct = list(0. for _ in range(number_of_classes))\n class_total = list(0. for _ in range(number_of_classes))\n total = 0\n\n with torch.no_grad():\n for (inputs, yreal) in tqdm(test_set, unit=\"images\", desc=\"Testing model (per class)\", leave=True, ascii=True):\n inputs, yreal = inputs.cuda(), yreal.cuda()\n\n total += yreal.size(0)\n\n ypred = model(inputs)\n _, predicted = torch.max(ypred, 1)\n c = (predicted == yreal).squeeze()\n for i in range(yreal.shape[0]):\n label = yreal[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n log.info(\"Accuracy of the network on the {} test images (per-class):\".format(total))\n\n per_class_accuracy = {}\n for i in range(number_of_classes):\n accuracy = 100 * class_correct[i] / (class_total[i] + 0.0001)\n per_class_accuracy[i] = accuracy\n print('Accuracy of %5s : %2d %%' % (\n i, accuracy))\n\n return score.DictScore(per_class_accuracy)\n\n return test_average(), test_per_class()",
"def _calculate_fp_confidences(images, test_classes):\n confidences = []\n for (response_json, class_name) in predict(images, desc=f\"[{BACKGROUND}] inference\"):\n if response_json[\"status\"] != \"ok\":\n raise Exception(f\"Not OK response in {class_name}\")\n if response_json[\"response\"] in test_classes:\n confidences.append(response_json[\"confidence\"])\n return confidences",
"def _calculate_tp_confidences(images, test_class):\n confidences = []\n for (response_json, class_name) in predict(images, desc=f\"[{test_class}] inference\"):\n if response_json[\"status\"] != \"ok\":\n raise Exception(f\"Not OK response in {class_name}\")\n if class_name == test_class and response_json[\"response\"] == class_name:\n confidences.append(response_json[\"confidence\"])\n return confidences",
"def evaluate(labels, predictions):\n\n truePositiveCounter = 0\n trueNegativeCounter = 0\n truePositiveCorrect = 0\n trueNegativeCorrect = 0\n \n sensitivity = 0\n specificity = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n truePositiveCounter += 1\n if(labels[i] == predictions[i]):\n truePositiveCorrect += 1\n elif labels[i] == 0:\n trueNegativeCounter += 1\n if(labels[i] == predictions[i]):\n trueNegativeCorrect += 1\n\n sensitivity = truePositiveCorrect / truePositiveCounter\n specificity = trueNegativeCorrect / trueNegativeCounter\n\n return sensitivity, specificity",
"def test(self, data_set):\r\n\r\n correct = 0.0\r\n total = 0.0\r\n\r\n for input, target in data_set:\r\n #actual output from neural net\r\n output = self.predict(input)\r\n total += 1.0 #number of total output vectors\r\n\r\n if allclose(output, target, self.converge) == True:\r\n correct += 1.0\r\n\r\n return correct/total",
"def test(self,dataset):\n outputs = self.use(dataset)\n \n costs = np.ones((len(outputs),1))\n # Compute classification error\n for xy,pred,cost in zip(dataset,outputs,costs):\n x,y = xy\n if y == pred[0]:\n cost[0] = 0\n\n return outputs,costs",
"def compute_batch_metrics(y_true, y_pred, num_labels = 4): \n \n # Declarating list to store results\n acc = []\n pre = []\n rec = []\n det = []\n rmse = []\n \n for batch in np.arange(y_true.shape[0]):\n \n # Declarating list to store individual results\n batch_acc = []\n batch_pre = []\n batch_rec = []\n batch_det = []\n batch_rmse = []\n \n for label in np.arange(num_labels):\n \n # Computing and storing metrics for each class\n batch_acc.append(accuracy_score(y_true[batch, label, :], y_pred[batch, label, :]))\n batch_pre.append(precision_score(y_true[batch, label, :], y_pred[batch, label, :], zero_division = 1))\n batch_rec.append(recall_score(y_true[batch, label, :], y_pred[batch, label, :], zero_division = 1))\n batch_det.append(detection_rate(y_true[batch, label, :], y_pred[batch, label, :]))\n batch_rmse.append(sqrt(mse(y_true[batch, label, :], y_pred[batch, label, :])))\n \n # Storing mean results of the instance\n acc.append(np.mean(batch_acc))\n pre.append(np.mean(batch_pre))\n rec.append(np.mean(batch_rec))\n det.append(np.mean(batch_det))\n rmse.append(np.mean(batch_rmse))\n \n # Returning mean of all results\n return np.mean(acc), np.mean(pre), np.mean(rec), np.mean(det), np.mean(rmse)",
"def accuracy(outputs, labels):\r\n outputs = np.argmax(outputs, axis=1)\r\n return np.sum(outputs == labels) / float(labels.size)",
"def classify_images(images_dir, results_dic, model):\n \n # None \n\n first_filename_list = listdir(\"pet_images/\")\n filename_list = []\n for idx in range(0, len(first_filename_list), 1):\n if not first_filename_list[idx].startswith('.'):\n filename_list.append(first_filename_list[idx])\n\n idx = 0\n for key in results_dic:\n # print(\"---------------\")\n\n value=results_dic[key]\n # print(\"\\t-----key={}\".format(key))\n # print(\"\\t-----value={}\".format(value))\n \n path = images_dir + filename_list[idx]\n # print(\"\\t-----path={}\".format(path))\n \n model_label = classifier(path, model)\n model_label = model_label.lower()\n model_label = model_label.strip()\n # print(\"\\t-----model_label={}\".format(model_label))\n \n truth = 0\n if value in model_label:\n truth = 1\n\n results_dic[key] = [ value, model_label, truth ]\n # print(\"\\t-----truth={}\".format(truth))\n idx = idx + 1",
"def make_predictions(model, test_set, val_set):\n \n ## Uses model to predict some amount of images\n predict = model.predict_classes(test_set, batch_size=5, verbose=1)\n \n ## We use the length of these two arrays when we sift through the data to find\n ## the right predictions and wrong predictions\n images = len(test_set)\n\n ## Initialises variables for loop\n correctly_guessed = 0\n\n ## Begins loop to find total correct predictions\n for i in range(images):\n if predict[i] == np.argmax(val_set[i]):\n correctly_guessed += 1\n\n ## Returns amount of predictions were correct\n print('\\nCorrectly guessed = ', correctly_guessed)\n print('Inorrectly guessed = ', (images - correctly_guessed))",
"def compute_metrics(self, results: Sequence[Dict]) -> Dict:\n\n preds = []\n gts = []\n for result in results:\n preds.append(result['pred_labels'])\n gts.append(result['gt_labels'])\n preds = torch.cat(preds)\n gts = torch.cat(gts)\n\n assert preds.max() < self.num_classes\n assert gts.max() < self.num_classes\n\n cared_labels = preds.new_tensor(self.cared_labels, dtype=torch.long)\n\n hits = (preds == gts)[None, :]\n preds_per_label = cared_labels[:, None] == preds[None, :]\n gts_per_label = cared_labels[:, None] == gts[None, :]\n\n tp = (hits * preds_per_label).float()\n fp = (~hits * preds_per_label).float()\n fn = (~hits * gts_per_label).float()\n\n result = {}\n if 'macro' in self.mode:\n result['macro_f1'] = self._compute_f1(\n tp.sum(-1), fp.sum(-1), fn.sum(-1))\n if 'micro' in self.mode:\n result['micro_f1'] = self._compute_f1(tp.sum(), fp.sum(), fn.sum())\n\n return result",
"def accuracy(outputs, labels):\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs==labels)/float(labels.size)",
"def accuracy(outputs, labels):\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs==labels)/float(labels.size)",
"def accuracy(outputs, labels):\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs == labels) / float(labels.size)",
"def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ",
"def accuracy(outputs, labels):\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs == labels)/float(labels.size)",
"def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs",
"def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts",
"def evaluate(labels, predictions):\n correct_positive = 0\n correct_negative = 0\n total_positive = 0\n total_negative = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n total_positive += 1\n if predictions[i] == 1:\n correct_positive += 1\n else:\n total_negative += 1\n if predictions[i] == 0:\n correct_negative += 1\n\n sensitivity = correct_positive / total_positive\n specificity = correct_negative / total_negative\n\n return sensitivity, specificity",
"def one_zero_loss(self, test_set, predicted_values):\r\n\r\n incorrect=0\r\n for i in range(len(test_set)):\r\n if test_set[i].classification != predicted_values[i]:\r\n incorrect += 1\r\n self.performance += incorrect / len(test_set)\r\n self.num_performances += 1\r\n return incorrect / len(test_set)",
"def test_model(net, data_loader):\n net.eval()\n true_preds, count = 0.0, 0\n for imgs, labels in data_loader:\n imgs, labels = imgs.to(device), labels.to(device)\n with torch.no_grad():\n preds = net(imgs).argmax(dim=-1)\n true_preds += (preds == labels).sum().item()\n count += labels.shape[0]\n test_acc = true_preds / count\n return test_acc",
"def evaluate_classifiers(list_of_models, model_names, data_matrix, actual_values, find_features, \\\n output_file, is_distance_matrix=False):\n test_sets = ml.get_test_sets(actual_values, 10)\n predictions, timers = ml.get_cross_validation_results(list_of_models, model_names, data_matrix, \\\n actual_values, test_sets, find_features, is_distance_matrix)\n ml_eval.evaluate_classification_results(model_names, predictions, list(set(actual_values)), timers, output_file)\n return predictions",
"def accuracy(outputs, labels):\n N, C, H, W = outputs.shape\n# outputs = unnormalize(outputs, mean=[0.51371954, 0.40949144, 0.35572536], std= [0.2926419, 0.26180502, 0.25512055])\n # check if we normalize label images #labels = unnormalize(labels, mean=[0.53459634,0.39673596,0.33788489], std= [0.29101071,0.26140346,0.25485687])\n \n nume = np.max(outputs, axis = (1, 2, 3), keepdims = True) #(N,)\n deno = np.sum((outputs.reshape(-1,3,144,144) - labels.reshape(-1,3,144,144))**2, axis = (1, 2, 3), keepdims = True) / C\n deno *= 255 * 255 / H / W # (N,) range from 0-255, pixel avg\n \n psnr = (nume * 255) ** 2 / deno # (N,)\n psnr = np.log(psnr)\n psnr = 10 * np.sum(psnr) \n psnr /= math.log(10) * N\n #print(outputs.shape)\n #print(psnr)\n \n return psnr"
] | [
"0.7694963",
"0.672696",
"0.65284353",
"0.63181716",
"0.6223898",
"0.6206021",
"0.6175322",
"0.61252534",
"0.60787153",
"0.6074429",
"0.6055192",
"0.60469633",
"0.6035008",
"0.6032113",
"0.6022166",
"0.6010247",
"0.60080314",
"0.60068935",
"0.59958446",
"0.59958446",
"0.59928113",
"0.59906113",
"0.5989518",
"0.5964048",
"0.5960893",
"0.5959642",
"0.5958732",
"0.5944746",
"0.5941172",
"0.5940328"
] | 0.7144237 | 1 |
Map a value v in range [0,1] to discrete ordinal classes | def to_ordinal(v, classes):
k = len(classes)
# Map position to discrete space
n1 = k/(1+exp(-v))
# Add Gaussian noise and round
n2 = round(random.gauss(n1, sigma*(k-1)))
n3 = min(k-1, max(n2, 0))
return classes[n3] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_class(numlist,classlist=string.ascii_lowercase):\n\n return np.vectorize(lambda t: classlist[t])(numlist)",
"def to_class(numlist,classlist=string.ascii_lowercase):\n\n return np.vectorize(lambda t: classlist[t])(numlist)",
"def convertclasstoemotion(pred):\n \n label_conversion = {'0': 'neutral',\n '1': 'calm',\n '2': 'happy',\n '3': 'sad',\n '4': 'angry',\n '5': 'fearful',\n '6': 'disgust',\n '7': 'surprised'}\n\n for key, value in label_conversion.items():\n if int(key) == pred:\n label = value\n return label",
"def value_to_class_index(bin_arr, val_arr):\n# return pd.cut(val_arr,bin_arr,labels=False)\n return np.digitize(val_arr,bin_arr,right=True)-1",
"def value_to_class_index(bin_arr, val_arr):\n# return pd.cut(val_arr,bin_arr,labels=False)\n return np.digitize(val_arr,bin_arr,right=True)-1",
"def sentiment_value_to_label_index(self, sentiment_value):\n if 0. <= sentiment_value <= 0.2:\n return 0\n elif 0.2 < sentiment_value <= 0.4:\n return 1\n elif 0.4 < sentiment_value <= 0.6:\n return 2\n elif 0.6 < sentiment_value <= 0.8:\n return 3\n elif 0.8 < sentiment_value <= 1.0:\n return 4\n else:\n raise NotImplementedError",
"def discrete_dataset(data):\n dataset = [0] * 10\n for value in data:\n dataset[value] += 1\n return dataset",
"def to_categorical(index_label, num_classes):\n return index_label, np.eye(num_classes, dtype='uint8')[index_label]",
"def discrete_cmap(N, base_cmap=None):\n base = plt.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)",
"def color_map(val):\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)",
"def site_to_category():\n return {\"UNEW\": 1, \"USFD\": 2, \"CAU\": 3, \"TASMC\": 4, \"RBMF\": 5}",
"def to_class_id(y):\n ret_val = []\n for y_id in range(len(y)):\n if y[y_id] > 3: ret_val.append(2)\n if y[y_id] < 3: ret_val.append(0)\n if y[y_id] == 3: ret_val.append(1)\n return ret_val",
"def ordinal_conversion(value):\n last_digit = value.group(0)[-1]\n value_map = {'1': 'st', '2':'nd', '3':'rd'}\n if value_map.get(last_digit, False):\n return value.group(0) + value_map[last_digit]\n else:\n return value.group(0) + 'th'",
"def get_classification(self, idx):\n if idx in self.min_indices:\n return \"minimum\"\n elif idx in self.max_indices:\n return \"maximum\"\n return \"regular\"",
"def __convert_prob_into_class(self, probs):\n probs = T.set_subtensor(probs[probs > 0.5], 1)\n return T.set_subtensor(probs[probs <= 0.5], 0)",
"def discrete_cmap(N, base_cmap=None):\n # see https://gist.github.com/jakevdp/91077b0cae40f8f8244a\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)",
"def value_to_class_label(bin_arr, val_arr,cla_arr=string.ascii_lowercase):\n\n return [cla_arr[i] for i in value_to_class_index(bin_arr, val_arr)]",
"def value_to_class_label(bin_arr, val_arr,cla_arr=string.ascii_lowercase):\n\n return [cla_arr[i] for i in value_to_class_index(bin_arr, val_arr)]",
"def discrete_cmap(N, base_cmap=None):\n\t# Note that if base_cmap is a string or None, you can simply do\n\t# return plt.cm.get_cmap(base_cmap, N)\n\t# The following works for string, None, or a colormap instance:\n\tbase = plt.cm.get_cmap(base_cmap)\n\tcolor_list = base(np.linspace(0, 1, N))\n\tcmap_name = base.name + str(N)\n\treturn base.from_list(cmap_name, color_list, N)",
"def discrete_cmap(N, base_cmap=None):\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)",
"def discrete_cmap(N, base_cmap=None):\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)",
"def discrete_cmap(N, base_cmap=None):\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)",
"def get_mpg_class(mpg):\n\n if(mpg >= 45):\n return 10\n elif(mpg >= 37 and mpg < 45):\n return 9\n elif(mpg >= 31 and mpg < 37):\n return 8\n elif(mpg >= 27 and mpg < 31):\n return 7\n elif(mpg >= 24 and mpg < 27):\n return 6\n elif(mpg >= 20 and mpg < 24):\n return 5\n elif(mpg >= 17 and mpg < 20):\n return 4\n elif(mpg >= 15 and mpg < 17):\n return 3\n elif(mpg >= 14 and mpg < 15):\n return 2\n else:\n return 1",
"def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return matplotlib.colors.LinearSegmentedColormap \\\n .from_list(cmap_name, color_list, N)",
"def change_class_labels(classes):\n u,indices=np.unique(classes,return_inverse=True)\n return u,indices",
"def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }",
"def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)",
"def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)",
"def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)",
"def discrete_cmap(N, base_cmap=None):\n\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)"
] | [
"0.5957351",
"0.5957351",
"0.59515435",
"0.57738507",
"0.57738507",
"0.5687176",
"0.5631008",
"0.5593928",
"0.5533812",
"0.5503168",
"0.5502713",
"0.5470743",
"0.54443735",
"0.5438498",
"0.54172623",
"0.5414011",
"0.53954643",
"0.53954643",
"0.53868306",
"0.5372418",
"0.5372418",
"0.5372418",
"0.53664666",
"0.53584486",
"0.5354679",
"0.5339336",
"0.5334156",
"0.5334156",
"0.5334156",
"0.5334156"
] | 0.7464597 | 0 |
Returns True if parameter is one of the continuous parameters defined in continuous_params | def is_continuous(parameter):
return sum([isinstance(parameter, p) for p in continuous_params])>0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isActiveFitParam(param):\n return isFitParam(param) and param.isActive()",
"def is_bounded_continuous_variable(self):\n for rv in self.unique_variables:\n if not is_bounded_continuous_variable(rv):\n return False\n return True",
"def check(self, parameters):\n if np.any(parameters < self._lower):\n return False\n if np.any(parameters > self._upper):\n return False\n return True",
"def has(self, param):\n\n if param in self.params:\n return True\n\n return False",
"def param_set(params, param):\n if param in params:\n if params[param] is True:\n return True\n return False",
"def is_parametric(self):\n return self._is_parametric",
"def check_parameter(cls, par: str, value):\n\n global dtParameterDesc\n if par not in dtParameterDesc:\n return False\n\n pardata = dtParameterDesc[par]\n\n if isinstance(value, str):\n try:\n value = float(value.replace(',', '.')) * dtg.units[pardata['dunit']]['multiple']\n except ValueError:\n return False\n\n if pardata['type'] is Integral and value != int(value):\n return False\n\n if 'uplim' in pardata and (value > pardata['uplim'] or value < pardata['lowlim']):\n return False\n\n return True",
"def is_continuous(series: List) -> bool:\n\n if series.dtype in [\n np.int16,\n np.int32,\n np.int64,\n np.float16,\n np.float32,\n np.float64,\n int,\n float,\n ]:\n if (\n len(series.astype(int).unique()) / len(series) == 1\n or \"id\" == series.name.lower()\n ):\n return False\n\n elif sorted(series.unique()) == [0, 1]:\n return False\n elif len(series.unique()) == 1:\n return False\n\n else:\n return True\n else:\n\n return False",
"def isSetConstant(self):\n return _libsbml.Parameter_isSetConstant(self)",
"def _check_whether_has_params(self, params) -> bool:\n\n if params:\n return True\n return False",
"def has_parameter(self, name):\n for par in self.params:\n if par.name == name:\n return True\n return False",
"def has_param_with_name(self, param_name):\n return param_name in self.params",
"def isSetConstant(self):\n return _libsbml.LocalParameter_isSetConstant(self)",
"def has_custom_param(plot):\n return Plot.has_custom_param(plot)",
"def isQuantitativeParameter(*args):\n return _libsbml.SBO_isQuantitativeParameter(*args)",
"def is_constant(x):\n x = np.array(x)\n result = np.all(x == x[1])\n return result",
"def no_params(self) -> bool:\n result = True\n # Fixing issue #92\n if self.properties.parameters:\n return False\n else:\n return True\n # for parameter in self.properties.parameters:\n # if parameter == \"effect\":\n # continue\n # else:\n # result = False\n # break\n # return result",
"def is_valid(self,):\r\n return self.g > 0 and self.l > 0 and self.m1 > 0 and self.m2 > 0 and self.m3 > 0 and self.r1 > 0 and self.r2 > 0 and self.tau > 0 and self.theta1 > 0 and self.theta2 > 0 and self.theta3 > 0",
"def isParameter(self):\n return _libsbml.Rule_isParameter(self)",
"def validate_params(self, params: Scenario) -> bool:\n valid = True\n # Make sure all needed parameters were provided\n valid = valid and \"R\" in params\n valid = valid and \"L\" in params\n\n # Make sure all parameters are physically valid\n valid = valid and params[\"R\"] > 0\n valid = valid and params[\"L\"] > 0\n\n return valid",
"def check_state(self):\n if not self.__is_valid:\n raise GmParamError(\"Parameters of the model has not been\"\\\n \"set yet, please set them using self.set_param()\")\n\n # Check condition number for cov matrix\n if self.mode == 'diag':\n tinfo = N.finfo(self.va.dtype)\n if N.any(self.va < tinfo.eps):\n raise GmParamError(\"variances are singular\")\n elif self.mode == 'full':\n try:\n d = self.d\n for i in range(self.k):\n N.linalg.cholesky(self.va[i*d:i*d+d, :])\n except N.linalg.LinAlgError:\n raise GmParamError(\"matrix %d is singular \" % i)\n\n else:\n raise GmParamError(\"Unknown mode\")\n\n return True",
"def __contains__(self, x: ArrayLike) -> bool:\n\n return bool(\n np.all(\n np.where(\n np.logical_and(\n x >= np.min(self._domain), # pyright: ignore\n x <= np.max(self._domain), # pyright: ignore\n ),\n True,\n False,\n )\n )\n )",
"def is_parameter_present(self, obj):\n val_ref = obj.value_reference\n for p in self.parameters:\n if p.value_reference == val_ref:\n # there is already a parameter in the list with the same value_reference\n logger.error(\"There is already a parameter in the list with the same value reference: {0}\".format(val_ref))\n return True\n return False",
"def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return ((x == 0) | (x == 1)).all() and self.low_limit <= np.count_nonzero(x) <= self.high_limit",
"def _mixed_precision_enabled_for_params(self) -> bool:\n return self.mixed_precision.param_dtype is not None",
"def check_bounds(x, param_name):\n for i in range(len(x)):\n if ((xmin[param_name][i] is not None and x[i] < xmin[param_name][i]) or\n (xmax[param_name][i] is not None and x[i] > xmax[param_name][i])):\n return False\n return True",
"def is_sparsity_enabled(cls):\n total,sp100,sp50 = 0,0,0\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n total += 1\n mask_sum = mask.sum()\n mask_numel = mask.numel()\n if mask_sum == mask_numel:\n sp100 += 1\n elif mask_sum*2 == mask_numel:\n sp50 += 1\n\n assert (total == sp100 or total == sp50), \"Inconsistent model sparsity\"\n if total == sp100:\n return False\n elif total == sp50:\n return True",
"def _isvalid(self, x):\n return (x <= self.n) & (x > 0)",
"def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False",
"def _check_parameter(self, data):\n return self._pre_process_record(data) is not None"
] | [
"0.65586585",
"0.6500868",
"0.63482386",
"0.61473125",
"0.6105592",
"0.60367113",
"0.6024447",
"0.60074943",
"0.5997103",
"0.5964175",
"0.5911737",
"0.5889686",
"0.5884454",
"0.5884159",
"0.57671905",
"0.5730443",
"0.5730346",
"0.57129663",
"0.5712339",
"0.56427324",
"0.5616344",
"0.5614969",
"0.5603045",
"0.55933326",
"0.557896",
"0.55772394",
"0.557242",
"0.557188",
"0.5555802",
"0.5541047"
] | 0.8774252 | 0 |
return all envs and groups from all env groups | def all(self):
for group in self.groups():
yield group
for env in self.envs():
yield env | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def envs(self):\n for member in self.members:\n if not isinstance(member, EnvGroup):\n yield member\n continue\n for member in member.envs():\n yield member",
"def get_all_environments():\n return ENVIRONMENTS",
"def get_environments(self):\n environments = list()\n for group in self._ncfile.groups:\n environments.append( str(group) )\n return environments",
"def envs():\n\n # update and grab the envs from the metadata keys\n metadata = _init()\n return list(metadata.keys())",
"def all_envs():\n return all_tasks.keys()",
"def environments(self):\n envs = self.config[\"tox\"][\"envlist\"]\n #result = re.split(\"[^a-zA-Z0-9]\", envs)\n result = re.split(r'\\n| ,|,', envs)\n #print ([string for string in result if string != \"\"])\n result = (([string.strip() for string in result if string != \"\"]))\n print(list(dict.fromkeys(result)))\n return ((list(dict.fromkeys(result))))",
"def get_all_envs(op_root, op_version=None):\n\n if not op_version:\n op_version = get_oarphpy_version(op_root)\n \n envs = []\n dockers_dir = os.path.join(op_root, 'docker')\n for fname in os.listdir(dockers_dir):\n if fname.endswith('.Dockerfile'):\n class Env(DockerEnv):\n DOCKERFILE_PATH = os.path.join(dockers_dir, fname)\n IMAGE_NAME = fname.replace('.Dockerfile', '')\n IMAGE_VERSION = op_version\n SRC_ROOT = op_root\n envs.append(Env)\n return envs",
"def envs(self) -> Optional[Sequence['outputs.CSIPowerMaxSpecDriverCommonEnvs']]:\n return pulumi.get(self, \"envs\")",
"def envs(self) -> Optional[Sequence['outputs.CSIPowerStoreSpecDriverCommonEnvs']]:\n return pulumi.get(self, \"envs\")",
"def environments(self):\n env_txt = self.config[\"tox\"][\"envlist\"]\n env_lst_raw = env_txt.strip().replace(\"\\n\",\",\").split(\",\")\n env_lst = [x.strip() for x in env_lst_raw if x != \"\"]\n return env_lst",
"def envs(self) -> Optional[Sequence['outputs.CSIUnitySpecDriverCommonEnvs']]:\n return pulumi.get(self, \"envs\")",
"def all_env_ids(self) -> np.ndarray:",
"def envs(self) -> Optional[Sequence['outputs.CSIIsilonSpecDriverCommonEnvs']]:\n return pulumi.get(self, \"envs\")",
"def environments_of(groups):\n types = {}\n for group in groups:\n for env in group.environments:\n et = env.environmentType\n envs = types.setdefault((et.id, et.name), set())\n envs.add((env.id, env.name))\n return types",
"def envs(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverCommonEnvs']]:\n return pulumi.get(self, \"envs\")",
"def get_all_groups(self):\n return self.groups + ['all']",
"def get_environment_vars():\n return {env: os.environ[env] for env in\n params.ENV_DIRS if env in os.environ}",
"def get_all():\n\n return AGE_GROUPS",
"def iter_hosts_and_roles():\n environmentdef = _get_environmentdef()\n\n for host_and_role in environmentdef.all():\n # fabric needs the host if we're calling from main()\n with this_hostname(host_and_role.host):\n yield host_and_role",
"def envs(self) -> Optional[Sequence['outputs.CSIUnitySpecDriverNodeEnvs']]:\n return pulumi.get(self, \"envs\")",
"def envs(self) -> Optional[Sequence['outputs.CSIPowerMaxSpecDriverNodeEnvs']]:\n return pulumi.get(self, \"envs\")",
"def envs(self) -> Optional[Sequence['outputs.CSIPowerStoreSpecDriverNodeEnvs']]:\n return pulumi.get(self, \"envs\")",
"def envs(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverNodeEnvs']]:\n return pulumi.get(self, \"envs\")",
"def envs(self) -> Optional[Sequence['outputs.CSIIsilonSpecDriverNodeEnvs']]:\n return pulumi.get(self, \"envs\")",
"def property_groups(self) -> pulumi.Input[Sequence[pulumi.Input['ApplicationApplicationConfigurationEnvironmentPropertiesPropertyGroupArgs']]]:\n return pulumi.get(self, \"property_groups\")",
"def getGroups():\r\n return Group.getGroups()",
"def _get_env_list(obj, env):\n # add the [default] env\n env_list = [obj.get(\"DEFAULT_ENV_FOR_DYNACONF\")]\n # compatibility with older versions that still uses [dynaconf] as\n # [default] env\n global_env = obj.get(\"ENVVAR_PREFIX_FOR_DYNACONF\") or \"DYNACONF\"\n if global_env not in env_list:\n env_list.append(global_env)\n # add the current env\n if obj.current_env and obj.current_env not in env_list:\n env_list.append(obj.current_env)\n # add a manually set env\n if env and env not in env_list:\n env_list.append(env)\n # add the [global] env\n env_list.append(\"GLOBAL\")\n return [env.lower() for env in env_list]",
"def conda_list_environments():\n conda = '{0}/bin/conda'.format(utils.home('apps', 'miniconda'))\n\n run('{conda} info --envs'.format(conda=conda))",
"def get_environmentals(self):\n for k, v in utils.slurm_envs(default.SBATCH_VARS_FOR_WORKFLOW).items():\n setattr(self, k, v)",
"def all_groups(self):\n return self._all_groups"
] | [
"0.7548125",
"0.73480403",
"0.7321437",
"0.7285775",
"0.7087299",
"0.68996537",
"0.6339232",
"0.6245589",
"0.6203714",
"0.6185317",
"0.6175391",
"0.61400133",
"0.6139779",
"0.61358243",
"0.6125854",
"0.6088253",
"0.60369086",
"0.5976081",
"0.5975712",
"0.59464973",
"0.59317344",
"0.5917731",
"0.5908551",
"0.58584166",
"0.57948923",
"0.5773641",
"0.57698834",
"0.57525486",
"0.57338434",
"0.57239777"
] | 0.7772515 | 0 |
Validate config plugins directories. Check for existence. And build plugins_idx | def _inspect_plugins_dirs(self):
plugins_dirs = getattr(self, 'plugins_dirs')
wrong_dirs = []
for _dir in plugins_dirs:
if not op.isdir(_dir):
wrong_dirs.append(_dir)
else:
# NOTE: if there will be plugins with the same name,
# last founded will be executed
self.plugins_idx.update(self._get_plugins_in_directory(_dir))
if wrong_dirs:
raise IOError('Wrong plugins_dirs: {}'.format(wrong_dirs))
logger.debug('Plugins dirs are inspected. '
'Founded plugins: {}'.format(self.plugins_idx)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index_plugins(self, system, partial_root):\n for root, dirs, files in os.walk(self.directory.path):\n relroot = os.path.relpath(root, self.directory.path)\n splitrelroot = relroot.split(os.sep)\n\n # Skips hidden directories\n hiddendir = False\n for d in splitrelroot:\n if d.startswith('.'):\n hiddendir = True\n if hiddendir:\n continue\n\n if PythonPluginStubFile.can_load_directory(fileman.new(root)):\n file_pid = None\n try:\n # Generate PluginID for directory\n file_pid = PluginId.parse(\".\".join([partial_root] + splitrelroot))\n\n logger.debug(\"Indexing plugin with file_pid '{}'\".format(file_pid))\n\n # Generate description object\n plugin_description = PythonPluginStubFile(fileman.new(root))\n\n # Make the stub\n stub = PluginStub(system, plugin_description, file_pid)\n\n # Check platform to make sure the plugin is valid for the target:\n if stub.check_platform(config_target):\n # Add stub to directory (and system)\n self._add_plugin_stub(system, stub)\n else:\n logger.debug(\"Plugin failed platform check '{}'\".format(stub.id))\n except:\n # TODO(Mason): More specific exceptions\n tb_string = \"\\n\\t\".join((\"\".join(traceback.format_exception(*sys.exc_info()))).split(\"\\n\"))\n logger.error(\"Plugin failed to load, ID per directory is '{}':\\n\\t{}\".format(file_pid, tb_string))",
"def _discover_plugins(plugins_dir=config['plugins_dir']):\n plugin_paths = glob(join(plugins_dir, '*'))\n for plugin_path in plugin_paths:\n if not os.path.isdir(plugin_path):\n continue\n extra_path = get_plugin_path_extension(plugin_path)\n plugin_name = os.path.basename(plugin_path)\n plugin_name = plugin_name.replace('-', '_')\n with set_aside(), patch_path(plugin_name, *extra_path), clear_module_cache(plugin_name):\n site_packages = get_plugin_site_packages_directory(plugin_path)\n for module_name in filter_distribute_modules(get_modules(site_packages)):\n if not module_name.startswith(plugin_name):\n continue\n try:\n importlib.import_module(module_name)\n except Exception as e:\n handle_exception(e, plugin_name, module_name)\n ensure_plugin_module_loaded(plugin_name)",
"def setup_plugins(self) -> None:\n load_success = 0\n load_error = 0\n load_disabled = 0\n\n LOGGER.info(\"Loading plugins...\")\n usable_plugins = plugins.get_usable_plugins(self.settings)\n for name, info in usable_plugins.items():\n plugin_handler, is_enabled = info\n if not is_enabled:\n load_disabled = load_disabled + 1\n continue\n\n try:\n plugin_handler.load()\n except Exception as e:\n load_error = load_error + 1\n LOGGER.exception(\"Error loading %s: %s\", name, e)\n except SystemExit:\n load_error = load_error + 1\n LOGGER.exception(\n \"Error loading %s (plugin tried to exit)\", name)\n else:\n try:\n if plugin_handler.has_setup():\n plugin_handler.setup(self)\n plugin_handler.register(self)\n except Exception as e:\n load_error = load_error + 1\n LOGGER.exception(\"Error in %s setup: %s\", name, e)\n else:\n load_success = load_success + 1\n LOGGER.info(\"Plugin loaded: %s\", name)\n\n total = sum([load_success, load_error, load_disabled])\n if total and load_success:\n LOGGER.info(\n \"Registered %d plugins, %d failed, %d disabled\",\n (load_success - 1),\n load_error,\n load_disabled)\n else:\n LOGGER.warning(\"Warning: Couldn't load any plugins\")",
"def initialize():\n\n # create plugin locations\n for p in (cache_path, config_path, data_path):\n p.mkdir(parents=False, exist_ok=True)",
"def scan_plugin(self):\n pluginpath=_module_path()\n plugins=[]\n for f in os.listdir(pluginpath):\n if os.path.isfile(os.path.join(pluginpath,f)) and os.path.splitext(os.path.join(pluginpath,f))[-1]=='.py' :\n if 'plugin_' in os.path.basename(f):\n logger.debug(\"found plugin : %s\",f)\n plugins.append(f)\n return plugins",
"async def load_plugins(self):\n for plug in os.listdir('plugins'):\n if plug.startswith('.'):\n continue\n if not os.path.isdir('plugins/%s' % plug) or not os.path.isfile('plugins/%s/hook.py' % plug):\n self.log.error('Problem locating the \"%s\" plugin. Ensure CALDERA was cloned recursively.' % plug)\n exit(0)\n plugin = Plugin(name=plug)\n if await plugin.load():\n await self.get_service('data_svc').store(plugin)\n if plugin.name in self.config['plugins']:\n plugin.enabled = True\n for plugin in self.config['plugins']:\n plug = await self._services.get('data_svc').locate('plugins', match=dict(name=plugin))\n [await p.enable(self.get_services()) for p in plug]\n self.log.debug('Enabling %s plugin' % plugin)\n\n templates = ['plugins/%s/templates' % p.name.lower()\n for p in await self.get_service('data_svc').locate('plugins')]\n templates.append('templates')\n aiohttp_jinja2.setup(self.application, loader=jinja2.FileSystemLoader(templates))",
"def _check_required_directories(self) -> None:\n\n if self._all_stages:\n for stage in self._all_stages:\n stage_cfg = self._app_cfg['stages'][stage]\n processor_cfg = stage_cfg['configuration']\n\n # Populate all the directories requested in the configuration.\n for dir_key, dir_id in processor_cfg['dirs'].items():\n dir_path_value = os.path.join(self._data_dir_path, self._app_cfg['dir-paths'][dir_id])\n # Rebuild the key by replacing 'id' with 'path'\n dir_path_key = dir_key.replace('id', 'path')\n processor_cfg[dir_path_key] = dir_path_value\n\n # Create the directory if it doesn't exist.\n self._validate_path(dir_path_value)\n\n # Add the temporary directory.\n processor_cfg['tmp-dir-path'] = self._tmp_dir_path\n\n del processor_cfg['dirs']",
"def _loadPlugins(self, plugin_repo_path):\n try:\n os.stat(plugin_repo_path)\n except OSError:\n \n pass\n \n sys.path.append(plugin_repo_path)\n\n dir_name_regexp = re.compile(r\"^[\\d\\w\\-\\_]+$\")\n for name in os.listdir(plugin_repo_path):\n if dir_name_regexp.match(name):\n try:\n module_path = os.path.join(plugin_repo_path, name)\n sys.path.append(module_path)\n module_filename = os.path.join(module_path, \"plugin.py\")\n self._plugin_modules[name] = imp.load_source(name, module_filename)\n except Exception:\n msg = \"An error ocurred while loading plugin %s.\\n%s\" % (module_filename, traceback.format_exc())\n getLogger(self).error(msg)\n else:\n pass",
"def initialize(self):\n\n # create plugin locations\n for p in (cache_path, config_path, data_path):\n p.mkdir(parents=False, exist_ok=True)",
"def _iter_plugin_files(dirs):\n for plugin_dir in dirs:\n plugin_dir = Path(plugin_dir).expanduser()\n if not plugin_dir.exists(): # pragma: no cover\n continue\n for subdir, dirs, files in os.walk(plugin_dir, followlinks=True):\n subdir = Path(subdir)\n # Skip test folders.\n base = subdir.name\n if 'test' in base or '__' in base or '.git' in str(subdir): # pragma: no cover\n continue\n logger.debug(\"Scanning `%s`.\", subdir)\n for filename in files:\n if (filename.startswith('__') or not filename.endswith('.py')):\n continue # pragma: no cover\n logger.debug(\"Found plugin module `%s`.\", filename)\n yield subdir / filename",
"def find_plugins():\n for root, dirs, files in os.walk(PLUGINS_DIR):\n for file in files:\n if file.endswith('.py'):\n yield os.path.join(root, file)",
"def discover_plugins(dirs):\n # Scan all subdirectories recursively.\n for path in _iter_plugin_files(dirs):\n subdir = path.parent\n modname = path.stem\n if modname in ('phy_config', 'phycontrib_loader'):\n continue\n file, path, descr = imp.find_module(modname, [subdir])\n if file:\n # Loading the module registers the plugin in\n # IPluginRegistry.\n try:\n mod = imp.load_module(modname, file, path, descr) # noqa\n except Exception as e: # pragma: no cover\n logger.exception(e)\n finally:\n file.close()\n return IPluginRegistry.plugins",
"def load_plugins(self, config):\n # 从文件夹中扫描出plugin文件\n plugins_file = []\n try:\n for f in glob.glob(os.path.join(self.directory, '*.py')):\n f = os.path.basename(f)\n if f not in ('__init__.py', 'base.py'):\n plugins_file.append(f[:-3])\n except OSError:\n print(\"Failed to access: %s\" % dir)\n\n # 将文件装置成类对象\n for name in plugins_file:\n path = os.path.relpath(self.directory, os.path.realpath('.'))\n path = path.replace(os.path.sep, '.')\n module = import_module('.%s' % name, path)\n plugin_class = getattr(module, getattr(module, \"__className__\"))\n if hasattr(module, \"__type__\"):\n plugins[name] = {'type': module.__type__, 'plugin_class': plugin_class}\n\n # 恢复初始值\n hook.plugins = {}\n\n # 根据配置顺序向hook注册\n for (type, plugin_list) in config.items():\n for plugin_name in plugin_list:\n if plugin_name in plugins:\n plugin = plugins[plugin_name]\n if type in plugin['type']:\n hook.plugins.setdefault(type, []).append({\n 'name': plugin_name,\n 'plugin_class': plugin['plugin_class'],\n 'toggle': True\n })\n else:\n raise PluginNoSupportException('{} no support {}'.format(plugin_name, type))\n else:\n raise NoFoundPluginException('{} no found'.format(plugin_name))\n\n # 追加未开启的插件\n for name, plugin in plugins.items():\n for type in plugin['type']:\n if name not in config[type]:\n # 添加未开启的插件信息\n hook.plugins.setdefault(type, []).append({\n 'name': name,\n 'plugin_class': plugin['plugin_class'],\n 'toggle': False\n })",
"def setup(self):\n Utils.check_dir(os.path.join(expanduser('~'), '.drupdates', 'plugins'))",
"def _config_files():\n from .plugin import plugins\n return [p for p in (p.config_file() for p in plugins()) if p is not None]",
"def _install_or_update_or_delete_plugins(cls):\n installed_plugins = {}\n for plugin in PluginCollection.all():\n plugin_adapter = wrap_plugin(plugin)\n installed_plugins[plugin_adapter.path_name] = plugin\n\n for plugin_dir in cls._list_plugins_on_fs():\n if plugin_dir in installed_plugins:\n cls._plugin_update(installed_plugins.pop(plugin_dir))\n else:\n cls._plugin_create(plugin_dir)\n for deleted_plugin in installed_plugins.values():\n cls._plugin_delete(deleted_plugin)",
"def find_modules_locations(plugin_path):\n modulepaths = os.environ[\"MAYA_MODULE_PATH\"].split(os.pathsep)\n modulepaths.reverse()\n for path in modulepaths:\n if not os.path.isdir(path):\n try:\n os.makedirs(p)\n except:\n print(\"Module directory doesn't exist, \"\n \"and cannot create it: %s\" % path)\n continue\n if AzureBatchSetup.create_modfile(path, plugin_path):\n return True\n return False",
"def register_xmpp_plugins(self):\n plugins = self.botconfig.findall('plugins/xmpp/plugin')\n if plugins:\n for plugin in plugins:\n try:\n config = plugin.find('config')\n if config is None:\n self.registerPlugin(plugin.attrib['name'])\n else:\n self.registerPlugin(plugin.attrib['name'], config)\n logging.info(\"Registering XMPP plugin %s OK\" % (plugin.attrib['name']))\n except Exception as e:\n logging.info(\"Registering XMPP plugin %s FAILED: %s\" % (plugin.attrib['name'], e))",
"def __load_plugins(self, directory_to_search, plugin_files):\n\n if os.path.abspath(directory_to_search) not in sys.path:\n sys.path.insert(0, os.path.abspath(directory_to_search))\n\n for next_plugin_file in plugin_files:\n next_plugin_module = next_plugin_file[0:-3]\n plugin_class_name = self.__snake_to_camel(next_plugin_module)\n self.__attempt_to_load_plugin(\n next_plugin_module, plugin_class_name, next_plugin_file\n )",
"def _build_search_index(self):\n logger.info(\"Checking if full sphinx index build required\")\n check_files = [\n '/var/lib/sphinxsearch/data/document.spp',\n ]\n needs_init = False\n for check_f in check_files:\n with hide(*fab_quiet):\n check_result = sudo('ls %s' % check_f)\n if check_result.failed:\n needs_init = True\n break\n\n if not needs_init:\n logger.info(\"Sphinx indexes already exist\")\n return False\n\n logger.info(\"Building full sphinxsearch index\")\n with hide(*fab_output_hides):\n # Chown relevant directories to belong to policystat.\n sudo(\n 'chown -R %s '\n '/var/lib/sphinxsearch /var/log/sphinxsearch'\n '' % F_CHOWN\n )\n\n with hide(*fab_quiet):\n # Stop searchd\n sudo('stop sphinxsearch')\n sudo('killall searchd')\n\n # Build the main index then the delta\n index_result = sudo_bg(\n 'indexer document && indexer document_delta',\n user='policystat',\n )\n if index_result.failed:\n logger.critical(\n \"Error building sphinx indexes. Result: %s\",\n index_result,\n )\n\n return True",
"def get_plugins():\n plugin_folders = []\n plugin_folders.append(os.path.dirname(os.path.realpath(__file__)) + \"/plugins\")\n plugin_folders.append(os.path.join(expanduser('~'), '.drupdates', 'plugins'))\n plugins = {}\n for plugin_folder in plugin_folders:\n if not os.path.isdir(plugin_folder):\n continue\n possibleplugins = os.listdir(plugin_folder)\n for i in possibleplugins:\n location = os.path.join(plugin_folder, i)\n if not os.path.isdir(location) or not \"__init__.py\" in os.listdir(location):\n continue\n info = imp.find_module(\"__init__\", [location])\n plugins[i] = ({\"name\": i, \"info\": info})\n return plugins",
"def handle_plugins(plugins, plugins_dir, installation_dir):\n create_plugins_dir(plugins_dir)\n # Install plugins.\n if not isinstance(plugins, dict):\n raise NonRecoverableError(\n 'The plugins value is not valid: {value} '\n 'If you wish to use custom Terraform providers must provide a '\n 'dictionary in the following format: search.path/provider_name.'\n ''\n 'For example:'\n 'plugins: \\n'\n ' registry.terraform.io/hashicorp/template: '\n 'https://releases.hashicorp.com/terraform-provider-template/'\n '2.1.2/'\n 'terraform-provider-template_2.1.2_linux_amd64.zip\\n'.format(\n value=plugins)\n )\n for plugin_name, plugin_url in plugins.items():\n with tempfile.NamedTemporaryFile(\n suffix=\".zip\",\n delete=False,\n dir=installation_dir) as plugin_zip:\n plugin_zip.close()\n ctx.logger.debug('Downloading Terraform plugin: {url}'.format(\n url=plugin_url))\n download_file(plugin_zip.name, plugin_url)\n unzip_path = os.path.join(plugins_dir, plugin_name)\n mkdir_p(os.path.dirname(unzip_path))\n unzip_and_set_permissions(plugin_zip.name, unzip_path)\n os.remove(plugin_zip.name)",
"def plugins():\n pass",
"def dataComponent_pluginBuild(self, **kwargs):\n str_path = '/plugins'\n self.PT = plugin.Plugin_DS(within = self)\n P = self.PT._pluginTree\n\n for key,val in kwargs.iteritems():\n if key == 'path': str_path = val\n\n s = self.contents\n if s.cd(str_path)['status']:\n s.mknode(['run'])\n s.mkcd('available')\n for d in P.lstr_lsnode('/plugins'):\n s.graft(P, '/plugins/%s' % d)",
"def install_plugins(virtual_env):\n logging.info(\"Installing holland plugins\")\n for plugin_dir in open(join(HOLLAND_ROOT, 'plugins', 'ACTIVE')):\n plugin_dir = plugin_dir.rstrip()\n plugin_path = join(HOLLAND_ROOT, 'plugins', plugin_dir)\n ret = run_setup_develop(cwd=plugin_path, env=virtual_env)\n if ret != 0:\n logging.error(\"Failed to install plugin %s\", plugin_dir)\n else:\n logging.info(\"Installed plugin %s\", plugin_dir)",
"def discover_all_plugins(self):\n for v in pkg_resources.iter_entry_points('dgit.plugins'):\n m = v.load()\n m.setup(self)",
"def loadPlugins():\n sys.path.append(basedefs.DIR_PLUGINS)\n fileList = sorted(os.listdir(basedefs.DIR_PLUGINS), cmp=plugin_compare)\n for item in fileList:\n # Looking for files that end with ###.py, example: a_plugin_100.py\n match = re.search(\"^(.+\\_\\d\\d\\d)\\.py$\", item)\n if match:\n try:\n moduleToLoad = match.group(1)\n logging.debug(\"importing module %s, from file %s\", moduleToLoad, item)\n moduleobj = __import__(moduleToLoad)\n moduleobj.__file__ = os.path.join(basedefs.DIR_PLUGINS, item)\n globals()[moduleToLoad] = moduleobj\n checkPlugin(moduleobj)\n controller.addPlugin(moduleobj)\n except:\n logging.error(\"Failed to load plugin from file %s\", item)\n logging.error(traceback.format_exc())\n raise Exception(\"Failed to load plugin from file %s\" % item)",
"async def _configure_plugins(self) -> None:\n logger.debug('Configuring plugins')\n funcs = [\n info['plugin'].configure(\n config=info['config'],\n session=self._session,\n router=self.app.router\n )\n for info in self._plugins.values()\n ]\n\n if funcs:\n await asyncio.gather(*funcs, loop=self._loop)\n logger.debug('Plugins configured')",
"def _get_plugins(self):\n logger.debug('Gathering plugins')\n\n for plugin in plugins.__all__:\n try:\n module = importlib.import_module('plugins.' + plugin)\n\n for plugin_class in dir(module):\n obj = getattr(module, plugin_class)\n\n if inspect.isclass(obj) and issubclass(obj, threading.Thread): # if plugin is subclass of Thread\n try:\n self.plugins.append(PluginInfo(obj))\n logger.info('Plugin found: \\\"{}\\\" with identifier: \\\"{}\\\"'.format(obj.p_name, obj.p_identifier))\n except (AttributeError, ValueError) as err:\n if isinstance(err, AttributeError):\n logger.exception('Plugin: \\\"{}\\\" missing one or more required properties, ignoring...'.format(plugin_class))\n elif isinstance(err, ValueError):\n logger.exception('Plugin: \\\"{}\\\" contains a space in the identifier, ignoring...'.format(plugin_class))\n\n except ImportError:\n logger.error('Could not load plugin: \\\"{}\\\"'.format(plugin))",
"def build_configs():"
] | [
"0.6157365",
"0.59965396",
"0.58586574",
"0.576763",
"0.5736396",
"0.5676494",
"0.55941546",
"0.55827403",
"0.55580056",
"0.55318683",
"0.5510506",
"0.5484215",
"0.5465382",
"0.54620993",
"0.54562",
"0.54523844",
"0.5444869",
"0.5432267",
"0.5404659",
"0.53671473",
"0.5358415",
"0.5346625",
"0.5343593",
"0.53307724",
"0.5323513",
"0.53103584",
"0.529928",
"0.528813",
"0.5278592",
"0.52483094"
] | 0.6379896 | 0 |
Get monitoring checks objects | def get_monitoring_checks(self):
logger.debug('Getting monitoring checks')
plugins_dirs = getattr(self, 'plugins_dirs')
monitoring_checks = []
for service_desc, cmd in getattr(self, 'commands').items():
try:
plugin_name = cmd.split()[0]
except IndexError:
raise ValueError('Wrong plugin command: {}'.format(cmd))
if plugin_name in self.plugins_idx:
monitoring_checks.append(
MonitoringCheck(
getattr(self, 'hostname'),
service_desc,
self.plugins_idx[plugin_name]
)
)
else:
raise ValueError(
"Plugin '{}' is not found in {}".format(
plugin_name, plugins_dirs)
)
logger.debug(monitoring_checks)
return monitoring_checks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checks(self):\r\n return checks.Checks(self)",
"def health_checks(self):\n return [self.check_device_connected, self.check_clear_flags]",
"def get_result(self):\n check_result_list = []\n for check in self.monitoring_checks:\n try:\n result = check.execute()\n except ForbiddenCheckError as err:\n logger.error(err)\n else:\n check_result_list.append(result)\n if check_result_list:\n return check_result_list\n else:\n logger.error(\"Empty check result list\")",
"def get_monitored_changes(self) -> List:\n pass",
"async def items(self):\n response = await self._api.get(\"/v1/agent/checks\")\n return response.body",
"def checklists(self):\r\n return Checklists(self)",
"def getMonitors(self):\n return [self.monitor]",
"def _get_all_checks(self):\n this_class = self.__class__\n\n check_list = [\n getattr(self, func)\n for func in dir(self.__class__)\n if callable(getattr(this_class, func))\n and func.startswith(self.check_prefix)\n ]\n\n return check_list",
"def get_healthchecks(\n self, service_namespace_config: ServiceNamespaceConfig\n ) -> List[HealthcheckDict]:\n\n mode = self.get_healthcheck_mode(service_namespace_config)\n\n graceperiodseconds = self.get_healthcheck_grace_period_seconds()\n intervalseconds = self.get_healthcheck_interval_seconds()\n timeoutseconds = self.get_healthcheck_timeout_seconds()\n maxconsecutivefailures = self.get_healthcheck_max_consecutive_failures()\n\n if mode == \"http\" or mode == \"https\":\n http_path = self.get_healthcheck_uri(service_namespace_config)\n protocol = f\"MESOS_{mode.upper()}\"\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": protocol,\n \"path\": http_path,\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"tcp\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"TCP\",\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"cmd\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"COMMAND\",\n \"command\": self.get_healthcheck_cmd(),\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode is None:\n healthchecks = []\n else:\n raise InvalidHealthcheckMode(\n \"Unknown mode: %s. Only acceptable healthcheck modes are http/https/tcp/cmd\"\n % mode\n )\n return healthchecks",
"def checks(self, all=False):\n if all:\n warn_states = [\"unknown\", \"passing\", \"warning\", \"critical\"]\n else:\n warn_states = [\"unknown\", \"warning\", \"critical\"]\n checks = {}\n for warn_state in warn_states:\n for state in self.consul.health.state(warn_state):\n if not state['Node'] in checks:\n checks[state['Node']] = dict()\n if not state['ServiceID'] in checks[state['Node']]:\n checks[state['Node']][state['ServiceID']] = {\n 'checks': [],\n 'name': state['ServiceName']\n }\n checks[state['Node']][state['ServiceID']]['checks'].append(\n (state['Name'], state['Status'], state['Output'])\n )\n return checks",
"def getChecks(self):\r\n raise AbstractError\r\n return []",
"def checklists(self):\n return self.pods.all().checklists",
"def perform_checks(self):\n retval = []\n retval.extend(self.check_slick_status())\n retval.extend(self.check_java_processes())\n retval.extend(self.check_firefox_processes())\n retval.extend(self.check_disk_space())\n return retval",
"def get(self, request):\n pool_id = request.GET.get('poolId')\n conn = get_sdk_connection(request)\n health_monitor_list = _sdk_object_to_list(\n conn.load_balancer.health_monitors(\n project_id=request.user.project_id\n )\n )\n\n if pool_id:\n health_monitor_list = self._filter_health_monitors(\n health_monitor_list,\n pool_id)\n return {'items': health_monitor_list}",
"def get_persisted_checklists(self):\r\n modulestore = get_modulestore(self.course.location)\r\n return modulestore.get_item(self.course.location).checklists",
"def test_health_checks_constructed(self):\n\n node = Node(\n {\n 'healthchecks': [\n {\n 'command': '/some/basic/example',\n 'on_failure': None,\n 'on_failure_even_if_security_violation': False\n },\n\n {\n 'command': '/some/basic/example',\n 'on_failure': '/some/rescue-command',\n 'on_failure_even_if_security_violation': True\n },\n\n {\n 'command': '/some/basic/example'\n }\n ]\n },\n {},\n mock.Mock()\n )\n\n self.assertEqual(3, len(node.get_health_checks()))",
"def getMonitoringHosts(self):\r\n return self.monitoringClients.values()",
"def get_greenlets(cls):\n return { obj for obj in gc.get_objects() if isinstance(obj, greenlet) and not obj.dead }",
"def watch_list(self) -> list:\n return []",
"def run_checks(self):\n\n try:\n check_obj = self.metadata.get_described_element()\n except ObjectDoesNotExist:\n pass\n\n if self.metadata.is_service_metadata:\n if self.metadata.is_service_type(OGCServiceEnum.WMS):\n self.check_wms(check_obj)\n elif self.metadata.is_service_type(OGCServiceEnum.WFS):\n self.check_wfs(check_obj)\n\n elif self.metadata.is_layer_metadata:\n self.check_layer(check_obj)\n elif self.metadata.is_featuretype_metadata:\n self.check_featuretype(check_obj)\n elif self.metadata.is_dataset_metadata:\n self.check_dataset()\n\n # all checks are done. Calculate the health state for all monitoring results\n health_state = HealthState.objects.create(monitoring_run=self.monitoring_run, metadata=self.metadata)\n health_state.run_health_state()",
"def create_checkers(config):\n\n checkers = []\n if 'checkers' in config:\n for checker_name, checker_config in config['checkers'].iteritems():\n if checker_name in __checkers:\n configs = None\n if type(checker_config) == list:\n configs = checker_config\n else:\n configs = [checker_config]\n for config in configs:\n ch = __checkers[checker_name]()\n ch.set_config(config)\n if ch:\n checkers.append(ch)\n return checkers",
"def monitoredVars():\n return _monitored",
"def list_health_monitors(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('health_monitors', self.health_monitors_path,\r\n retrieve_all, **_params)",
"def monitoring_group(ctx):\n pass",
"def get_checks(self, target_type, group=None, severity=None, tags=None):\n check_files = self._get_check_files(group=group,\n severity=severity)\n groups = {}\n for (group, check_files) in iteritems(check_files):\n checks = []\n for severity, check_file in check_files:\n\n check_classes = load_check_implementation(path=check_file, severity=severity)\n for check_class in check_classes:\n if is_compatible(target_type, check_class, severity, tags):\n checks.append(check_class)\n\n groups[group] = checks\n return groups",
"def monitor_nodes(self) -> List[str]:\n return self._monitor_nodes.copy()",
"def get_health_monitor(self):\n return self.manager.get_health_monitor(self)",
"def get_all(isvgAppliance, check_mode=False, force=False):\n return isvgAppliance.invoke_get(\"Get all snmp objects\",\n \"/rsp_snmp_objs\")",
"def _get_monitor_tasks(self, desired_config):\n create_monitors = list()\n delete_monitors = list()\n update_monitors = list()\n\n for hm_type in ['http', 'https', 'tcp', 'icmp', 'udp']:\n existing = self._bigip.get_monitors(hm_type)\n config_key = \"{}_monitors\".format(hm_type)\n desired = desired_config.get(config_key, dict())\n\n (create_hm, update_hm, delete_hm) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n create_monitors += create_hm\n update_monitors += update_hm\n delete_monitors += delete_hm\n\n return (create_monitors, update_monitors, delete_monitors)",
"def get_common_monitors(monitor):\n monitor_content_loss = MonitorSeries(\n 'content loss', monitor, interval=20)\n monitor_gen_loss = MonitorSeries(\n 'generator loss', monitor, interval=20)\n monitor_warp_loss = MonitorSeries(\n 'warp loss', monitor, interval=20)\n monitor_lr = MonitorSeries(\n 'learning rate', monitor, interval=20)\n monitor_time = MonitorTimeElapsed(\n \"training time per iteration\", monitor, interval=20)\n Monitor_common = collections.namedtuple('Monitor_common',\n ['monitor_content_loss', 'monitor_gen_loss', 'monitor_warp_loss',\n 'monitor_lr', 'monitor_time'])\n return Monitor_common(monitor_content_loss, monitor_gen_loss, monitor_warp_loss, monitor_lr, monitor_time)"
] | [
"0.65733325",
"0.6363684",
"0.62054473",
"0.60702723",
"0.6051944",
"0.59617114",
"0.5931713",
"0.5846798",
"0.57369745",
"0.5709225",
"0.5654841",
"0.5577679",
"0.55522555",
"0.5534278",
"0.5504183",
"0.5498289",
"0.54719675",
"0.53983575",
"0.5397788",
"0.5354331",
"0.53518724",
"0.5347532",
"0.53339183",
"0.53228307",
"0.5310926",
"0.52939653",
"0.5286839",
"0.5273794",
"0.5272279",
"0.5271906"
] | 0.7412881 | 0 |
Get monitoring check result for monitoring checks list | def get_result(self):
check_result_list = []
for check in self.monitoring_checks:
try:
result = check.execute()
except ForbiddenCheckError as err:
logger.error(err)
else:
check_result_list.append(result)
if check_result_list:
return check_result_list
else:
logger.error("Empty check result list") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_ResultCheck(self):\n try:\n cmd = self.ExecutionTask.get_param().split(',')\n logging.debug(\"%s-%s-%s-%s-%s\" % ( TestScriptSymbolTable.get_value_from_sym_tab(cmd[0], TestScriptSymbolTable.test_script_sym_tab),cmd[0], cmd[1], cmd[2], cmd[3]))\n\n checkval = cmd[0].split('!') \n \n cval = TestScriptSymbolTable.get_value_from_sym_tab(checkval[1], TestScriptSymbolTable.capi_cmd_ret_sym_tab)\n\n if int(cval) >= int(cmd[1]):\n result = cmd[2]\n else:\n result = cmd[3]\n\n logging.info(\"\\nRESULT CHECK---> %15s\", result) \n self.setTestResult(result)\n \n #if result == 'FAIL':\n if 'FAIL' in result:\n self.setStatus('STOP')\n self.setTestResult('FAIL')\n except OSError:\n logging.info(\"\\nException - ResultCheck\")",
"def get_monitoring_checks(self):\n logger.debug('Getting monitoring checks')\n plugins_dirs = getattr(self, 'plugins_dirs')\n monitoring_checks = []\n for service_desc, cmd in getattr(self, 'commands').items():\n try:\n plugin_name = cmd.split()[0]\n except IndexError:\n raise ValueError('Wrong plugin command: {}'.format(cmd))\n\n if plugin_name in self.plugins_idx:\n monitoring_checks.append(\n MonitoringCheck(\n getattr(self, 'hostname'),\n service_desc,\n self.plugins_idx[plugin_name]\n )\n )\n else:\n raise ValueError(\n \"Plugin '{}' is not found in {}\".format(\n plugin_name, plugins_dirs)\n )\n\n logger.debug(monitoring_checks)\n return monitoring_checks",
"def results(self, checkid):\r\n return results.Results(self, checkid)",
"def check_slick_status(self):\n retval = []\n slick = SlickAsPy(self.environment.slickurl + \"/api\")\n status = slick.get_host_status(self.name)\n if status['currentWork'] is None:\n seconds_since_last_checkin = (int(time.time() * 1000) - status['lastCheckin'])\n if seconds_since_last_checkin < 300000:\n retval.append(CheckStatus(self, CheckStatus.CHECK_SLICK_CHECKIN, CheckStatus.STATUS_PASS))\n else:\n retval.append(CheckStatus(self, CheckStatus.CHECK_SLICK_CHECKIN, CheckStatus.STATUS_FAIL, \"It's been {} minutes since the last checkin.\".format(seconds_since_last_checkin / 60000)))\n retval.append(CheckStatus(self, CheckStatus.CHECK_TEST_RUNTIME, CheckStatus.STATUS_NA))\n else:\n retval.append(CheckStatus(self, CheckStatus.CHECK_TEST_RUNTIME, CheckStatus.STATUS_NA))\n seconds_since_test_started = (int(time.time() * 1000) - status['currentWork']['recorded'])\n if seconds_since_test_started < 900000:\n retval.append(CheckStatus(self, CheckStatus.CHECK_TEST_RUNTIME, CheckStatus.STATUS_PASS))\n else:\n retval.append(CheckStatus(self, CheckStatus.CHECK_TEST_RUNTIME, CheckStatus.STATUS_FAIL, \"It's been {} minutes since the current test started.\".format(seconds_since_test_started / 60000)))\n return retval",
"def perform_checks(self):\n retval = []\n retval.extend(self.check_slick_status())\n retval.extend(self.check_java_processes())\n retval.extend(self.check_firefox_processes())\n retval.extend(self.check_disk_space())\n return retval",
"def health_checks(self):\n return [self.check_device_connected, self.check_clear_flags]",
"async def items(self):\n response = await self._api.get(\"/v1/agent/checks\")\n return response.body",
"def health_checks(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"health_checks\")",
"def get_health_check(self):\n return util.create_response(output=\"OK\")",
"def run_monitor(self):\n data = self.get_site_to_check(self.config_file_path)\n results = self.check_sites_status(data)\n self.notify_failures(results)",
"def get_status(self, list_for_check):\n d = {} # dictionary for return\n if len(list_for_check) > 990:\n return \"Error, too much to check\"\n elif len(list_for_check) == 0:\n logging.warning(\"[Collector] Passed empty list to check\")\n return \"Error, nothing to check\"\n\n vkids = \",\".join((str(i) for i in list_for_check))\n logging.info(\"[Collector] Making getProfiles API request...\")\n request = VK_GETPROFILES_BASE+vkids+\"&fields=online\"\n\n try:\n jsondata = json.loads(urllib2.urlopen(request, None, 25).read())\n except (URLError, HTTPError):\n logging.error(\"[Collector] Some error happaned during getProfiles API request\")\n # if jsondata['error']: logging.error(\"Cannot get correct API response.\")\n\n connection = sqlite3.connect('vk.db')\n cursor = connection.cursor()\n\n for i in jsondata['response']:\n d[i['uid']] = i['online']\n cursor.execute(\"SELECT * from u\" + str(i['uid']) + \" order by time desc limit 1\")\n last_status = cursor.fetchone()\n #print(i['uid'],last_status[1],i['online'])\n if last_status[1] != i['online']:\n cursor.execute(\"INSERT INTO u\" + str(i['uid']) + \"(time, status) VALUES (\" + str(int(time.time())) + \",\" + str(i['online']) + \")\")\n logging.info(\"[Collector] Add record for : \" + str(i['uid']) + \" \")\n logging.info(\"[Collector] Request has been parsed, records: \"+str(len(d))+\" \")\n connection.commit()\n connection.close()\n return d",
"def test_list_monitorings_success(self):\n project_id = util.MOCK_UUID_1\n deployment_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.get(\n f\"/projects/{project_id}/deployments/{deployment_id}/monitorings\"\n )\n result = rv.json()\n expected = util.MOCK_MONITORING_LIST\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 200)",
"def amtool_status(self, mess, args):\n self.log.info(\"Current config {0}\".format(self.config))\n self.log.info(\n \"Alertmanager @ {0}\".format(self.config['server_address']))\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n result = helper.get_status()\n return result",
"def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()",
"def Bg_get_ping_result():\r\n return BgPing.analyse_result()",
"def check_alerts(self):\n status = self._mcp9600.get('STATUS')\n return status.alert_1, status.alert_2, status.alert_3, status.alert_4",
"def _checker_worker(self):\n results = {}\n for cmd in self.check_cmds:\n res = subprocess.call(cmd.split(), stdout=open('/dev/null', 'w'))\n self.log(\"'%s' finished, result: %s\" % (cmd, res))\n results[cmd] = res\n if rospy.is_shutdown():\n return\n with self._lock:\n # just add results into the data structure\n self._results.add(results)",
"def after_watch_NEMO_hindcast(msg, config, checklist):\n next_workers = {\"crash\": [], \"failure\": [], \"success\": []}\n if msg.type == \"success\":\n next_workers[msg.type].extend(\n [\n NextWorker(\n \"nowcast.workers.download_results\",\n args=[\n msg.payload[\"hindcast\"][\"host\"],\n \"hindcast\",\n \"--run-date\",\n msg.payload[\"hindcast\"][\"run date\"],\n ],\n ),\n NextWorker(\n \"nowcast.workers.watch_NEMO_hindcast\",\n args=[msg.payload[\"hindcast\"][\"host\"]],\n ),\n NextWorker(\n \"nowcast.workers.run_NEMO_hindcast\",\n args=[msg.payload[\"hindcast\"][\"host\"]],\n ),\n ]\n )\n return next_workers[msg.type]",
"def after_watch_NEMO_agrif(msg, config, checklist):\n next_workers = {\"crash\": [], \"failure\": [], \"success\": []}\n if msg.type == \"success\":\n next_workers[msg.type].append(\n NextWorker(\n \"nowcast.workers.download_results\",\n args=[\n msg.payload[\"nowcast-agrif\"][\"host\"],\n \"nowcast-agrif\",\n \"--run-date\",\n msg.payload[\"nowcast-agrif\"][\"run date\"],\n ],\n )\n )\n return next_workers[msg.type]",
"def health_check():\n return dict(api_status='OK')",
"def get_aem_health_check(self, opts):\n self.call_params = {**self.call_params, **opts}\n return self.client.call(self.__class__.__name__, 'get_aem_health_check', self.call_params)",
"def monitor_check(monitor, config, file_list, event_list, display_event):\n global job_sets\n global active_transfers\n global transfer_list\n # if there are already three or more transfers in progress\n # hold off on starting any new ones until they complete\n if active_transfers >= 2:\n return\n event_list = push_event(event_list, \"Running check for remote files\")\n monitor.check()\n new_files = monitor.new_files\n patterns = config.get('global').get('output_patterns')\n for file_info in new_files:\n for folder, file_type in patterns.items():\n if file_type in file_info['filename']:\n file_info['type'] = folder\n break\n\n checked_new_files = []\n\n for new_file in new_files:\n file_type = new_file.get('type')\n if not file_type:\n event_list = push_event(event_list, \"Failed accessing remote directory, do you have access permissions?\")\n continue\n file_key = \"\"\n if file_type in ['ATM', 'MPAS_AM', 'MPAS_CICE', 'MPAS_RST']:\n file_key = filename_to_file_list_key(new_file['filename'])\n elif file_type == 'MPAS_CICE_IN':\n file_key = 'mpas-cice_in'\n elif file_type == 'MPAS_O_IN':\n file_key = 'mpas-o_in'\n elif file_type == 'STREAMS':\n file_key = 'streams.cice' if 'cice' in new_file['filename'] else 'streams.ocean'\n elif file_type == 'RPT':\n if 'ocn' in new_file['filename']:\n file_key = 'rpointer.ocn'\n elif 'atm' in new_file['filename']:\n file_key = 'rpointer.atm'\n else:\n continue\n try:\n status = file_list[file_type][file_key]\n except KeyError:\n continue\n if not status:\n continue\n if status == SetStatus.DATA_READY:\n local_path = os.path.join(\n config.get('global').get('data_cache_path'),\n new_file['type'],\n new_file['filename'].split('/')[-1])\n if not os.path.exists(local_path):\n checked_new_files.append(new_file)\n continue\n if not int(os.path.getsize(local_path)) == int(new_file['size']):\n os.remove(local_path)\n checked_new_files.append(new_file)\n if status == SetStatus.NO_DATA:\n checked_new_files.append(new_file)\n\n # if there are any new files\n if not checked_new_files:\n # print 'no new files'\n return\n else:\n # print pformat(checked_new_files)\n pass\n\n # find which year set the data belongs to\n frequencies = config.get('global').get('set_frequency')\n for file_info in checked_new_files:\n if file_info['type'] != 'ATM':\n continue\n for freq in frequencies:\n year_set = filename_to_year_set(file_info['filename'], freq)\n for job_set in job_sets:\n if job_set.set_number == year_set and job_set.status == SetStatus.NO_DATA:\n job_set.status = SetStatus.PARTIAL_DATA\n # Spawn jobs for that yearset\n job_set = add_jobs(job_set)\n\n t_config = config.get('transfer')\n g_config = config.get('global')\n m_config = config.get('monitor')\n\n transfer_config = {\n 'size': t_config.get('size'),\n 'file_list': checked_new_files,\n 'globus_username': t_config.get('globus_username'),\n 'globus_password': t_config.get('globus_password'),\n 'source_username': m_config.get('compute_username'),\n 'source_password': m_config.get('compute_password'),\n 'destination_username': t_config.get('processing_username'),\n 'destination_password': t_config.get('processing_password'),\n 'source_endpoint': t_config.get('source_endpoint'),\n 'destination_endpoint': t_config.get('destination_endpoint'),\n 'source_path': t_config.get('source_path'),\n 'destination_path': g_config.get('data_cache_path') + '/',\n 'recursive': 'False',\n 'pattern': config.get('global').get('output_patterns'),\n 'ncclimo_path': config.get('ncclimo').get('ncclimo_path')\n }\n\n # Check if the user is logged in, and all endpoints are active\n endpoints = [config['transfer']['source_endpoint'], config['transfer']['destination_endpoint']]\n client = get_client()\n for endpoint in endpoints:\n r = client.endpoint_autoactivate(endpoint, if_expires_in=3600)\n if r[\"code\"] == \"AutoActivationFailed\":\n display_event.set()\n sleep(3)\n while not setup_globus(endpoints):\n sleep(1)\n display_event.clear()\n diaplay_thread = threading.Thread(target=start_display, args=(config, display_event))\n diaplay_thread.start()\n \n transfer = Transfer(transfer_config, event_list)\n\n for item in transfer.config.get('file_list'):\n item_name = item['filename'].split('/').pop()\n item_type = item['type']\n if item_type in ['ATM', 'MPAS_AM']:\n file_key = filename_to_file_list_key(item_name)\n elif item_type == 'MPAS_CICE':\n file_key = 'mpas-cice_in'\n elif item_type == 'MPAS_O':\n file_key = 'mpas-o_in'\n elif item_type == 'MPAS_RST':\n file_key = '0002-01-01'\n elif item_type == 'RPT':\n file_key = 'rpointer.ocn' if 'ocn' in item_name else 'rpointer.atm'\n elif item_type == 'STREAMS':\n file_key == 'streams.cice' if 'cice' in item_name else 'streams.ocean'\n file_list[item_type][file_key] = SetStatus.IN_TRANSIT\n\n start_file = transfer.config.get('file_list')[0]['filename']\n end_file = transfer.config.get('file_list')[-1]['filename']\n index = start_file.find('-')\n start_readable = start_file[index - 4: index + 3]\n index = end_file.find('-')\n end_readable = end_file[index - 4: index + 3]\n message = 'Found {0} new remote files, creating transfer job from {1} to {2}'.format(\n len(checked_new_files),\n start_readable,\n end_readable)\n event_list = push_event(event_list, message)\n logging.info('## ' + message)\n\n if not config.get('global').get('dry_run', False):\n while True:\n try:\n thread = threading.Thread(target=handle_transfer, args=(transfer, checked_new_files, thread_kill_event, event_list))\n except:\n sleep(1)\n else:\n thread_list.append(thread)\n thread.start()\n break",
"def after_collect_river_data(msg, config, checklist):\n next_workers = {\"crash\": [], \"failure\": [], \"success\": []}\n return next_workers[msg.type]",
"def check_for_list(check):",
"def all_results(self):\n res = [(True, result) for result in self.successes]\n res.extend([(False, result) for result in self.failures])\n return res",
"def checkResults(self):\n return self.checkResultsAsJson(self._spark_session, self)",
"def do_health_checks(self, list_of_ips):\n # Calculate a decent overall timeout time for a ping attempt: 3/4th of\n # the monitoring interval. That way, we know we're done with this ping\n # attempt before the next monitoring attempt is started.\n ping_timeout = self.get_monitor_interval() * 0.75\n\n # Calculate a decent number of retries. For very short intervals we\n # shouldn't have any retries, for very long ones, we should have\n # several ones. Converting the timeout to an integer gives us what we\n # want: For timeouts less than 1 we have no retry at all.\n num_retries = int(ping_timeout)\n\n try:\n self.ping_count += len(list_of_ips)\n responses, no_responses = multiping.multi_ping(\n list_of_ips, ping_timeout, num_retries)\n self.update_stats(responses, no_responses)\n\n except Exception as e:\n logging.error(\"Exception while trying to monitor servers: %s\" %\n str(e))\n # Need to assume all IPs failed\n no_responses = list_of_ips\n\n return no_responses, [] # return empty list for questionable IPs",
"def after_launch_remote_worker(msg, config, checklist):\n return []",
"async def get_status():",
"def check_result(self, result):\n self.log.info(\"--check_result, result= %s\", result)\n if result[0]['exit_status'] != 0:\n self.fail(\"##Error detected from check_result\")\n else:\n self.log.info(\"--check_result passed\")"
] | [
"0.63706166",
"0.62025386",
"0.6151091",
"0.6064086",
"0.6039532",
"0.5976944",
"0.5972677",
"0.58945674",
"0.58764076",
"0.58144605",
"0.5788281",
"0.57788694",
"0.5717188",
"0.57067704",
"0.56888074",
"0.56824183",
"0.5677777",
"0.5654893",
"0.56247807",
"0.5588808",
"0.5571923",
"0.55541116",
"0.5551551",
"0.5476667",
"0.54681677",
"0.5453099",
"0.54428464",
"0.54389817",
"0.54330987",
"0.5422002"
] | 0.80645454 | 0 |
This URL is a test to be sure that the DaemonServer can handle a request | def index(request):
return requests.get(DaemonServer._mock_url + '/') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200",
"def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1",
"def test_server_is_up_and_running(self):\n r = requests.get(self.get_server_url())\n self.assertEqual(r.status_code, 200)",
"def test_connection(self):\n req = requests.get(\"http://{}:{}\".format(self.config.options.get(\"Server\", \"ListenAddress\"),\n self.config.options.get(\"Server\", \"Port\")))\n\n self.assertEqual(req.status_code, 200)",
"def test_valid_request(self):\n print(\"Testing valid request...\")\n # create a file with random data in the server folder\n self.__create_test_file()\n valid_path = os.path.join(os.path.dirname(self.client_path), \\\n os.path.basename(self.test_file))\n os.chdir(os.path.dirname(self.client_path))\n subprocess.call([self.client_path, \\\n \"{}:{}\".format(self.args.ip, self.args.port), \\\n os.path.basename(valid_path)])\n self.assertTrue(os.path.isfile(valid_path))\n self.__compare_files(valid_path, self.test_file)\n os.remove(valid_path)",
"def test_health(self):\n self.assert_request('get', '/_health')",
"def test_health_check(self):\n self.url = reverse(\"health-check\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)",
"def test_live_server(live_server):\n # With pytest-flask we don't need to be in the application context\n # to use `flask.url_for`.\n url = flask.url_for(\"security.login\", _external=True)\n response = urlopen(url)\n assert response\n assert response.code == 200",
"def url_health():\n return \"OK\"",
"def test_server_runnin(self, client):\n\n res = client.get('/')\n assert res.status_code == 200\n assert res.json['message'] == 'Server running'\n assert res.json['status'] == 2000",
"def basicRequest(self):\n endpoint = \"/foo\"\n\n def verify(request):\n o(request.method).equals(\"GET\")(\"Checking basic request method.\")\n o(request.url).equals(endpoint)(\"Checking basic request url.\")\n request.respond(200)\n self.testServer.respondWith(verify)\n\n server.request(endpoint)\n self.testServer.respond()",
"def __check(self):\n status = '200 OK'\n try:\n response = get(self.__url)\n status = '{} {}'.format(\n response.status_code,\n http.client.responses[response.status_code]\n )\n except Exception as e:\n status = e.__class__.__name__\n \n if status[:3] == '200':\n self.__notify_up()\n else:\n if not self.downtime_info:\n self.downtime_info = DowntimeInfo(status)\n self.__notify_down()",
"def test_server_connection():\n response = client.get(\"/\")\n assert response.ok\n assert response.json() == {\"ID\": \"8dbaaa72-ff7a-4f95-887c-e3109e577edd\"}",
"def test_get_host_access(self):\n pass",
"def test_allow(self) -> None:\n response = self.request(\"/\", method=\"HEAD\")\n self.assert_allowed(response, (\"GET\", \"POST\"))",
"def test_orchestrator_http_simple(self):\n pass",
"def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)",
"def test_download_host(self):\n pass",
"async def _test_url(self, client, url):\n\n with async_timeout.timeout(10):\n websession = async_get_clientsession(self.hass)\n client = Client(websession, base_url=url)\n try:\n await client.exists()\n except NamfError:\n return False\n return True",
"def check(self, target, port):\n pass",
"def test_server_info(self):\n pass",
"async def _test_server_handler(self, request):\n conn = await request.accept()\n with suppress(ConnectionClosed):\n await listen_browser(conn, self.bounds)",
"def test_perform_host_action(self):\n pass",
"def can_connect(test_url):\n try:\n requests.get(test_url)\n except (OSError):#connection error\n logger.warning('couldn\\'t reach server on: {test_url}')\n return False\n return True",
"def do_GET(self):\n sep = self.path.find('?')\n path = self.path if sep == -1 else self.path[:sep]\n if path == '/externalpolicydata':\n http_response, raw_reply = self.HandleExternalPolicyDataRequest()\n elif path == '/configuration/test/exit':\n # This is not part of the standard DM server protocol.\n # This extension is added to make the test server exit gracefully\n # when the test is complete.\n self.server.stop = True\n http_response = 200\n raw_reply = 'OK'\n elif path == '/test/ping':\n # This path and reply are used by the test setup of host-driven tests for\n # Android to determine if the server is up, and are not part of the\n # DM protocol.\n http_response = 200\n raw_reply = 'Policy server is up.'\n else:\n http_response = 404\n raw_reply = 'Invalid path'\n self.send_response(http_response)\n self.end_headers()\n self.wfile.write(raw_reply)",
"def _api_call(self, url, response_checker):\n self.request_compare(url)",
"def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data",
"def test_GET(self):\n if not self.url:\n return\n response = self.client.get(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def test_minifiers(self):\n response = self.client.get(\"/\")",
"def test_connection():\n response = echo_client(\"GET webroot/sample.txt HTTP/1.1\")\n print response\n assert \"HTTP/1.1 200 OK\" in response"
] | [
"0.70002085",
"0.66061354",
"0.65519416",
"0.6421083",
"0.63537407",
"0.6349542",
"0.6303565",
"0.62201357",
"0.62109095",
"0.62078255",
"0.6168145",
"0.6095292",
"0.60952",
"0.60858065",
"0.6082953",
"0.60637474",
"0.6036982",
"0.6025642",
"0.6015954",
"0.5986596",
"0.59813386",
"0.59575045",
"0.595232",
"0.5951795",
"0.5947485",
"0.59242743",
"0.5905235",
"0.58814347",
"0.58806425",
"0.5873605"
] | 0.67284894 | 1 |
Get a specific plugin | def get_plugin(request):
res = requests.get(DaemonServer._mock_url + '/plugins/' + request.url_vars['id'])
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_plugin(self, name):",
"def get_plugin(group, name):\n return _get_plugins(group, name)[name]",
"def get_plugin(name):\n for plugin in IPluginRegistry.plugins:\n if name in plugin.__name__:\n return plugin\n raise ValueError(\"The plugin %s cannot be found.\" % name)",
"def getPlugin(self, *args):\n return _libsbml.SBase_getPlugin(self, *args)",
"def get_plug(self, name):\n return self.plug_dict[name]",
"def find_plugin(plugin_type):\n for plugin in get_plugins():\n if plugin.type == plugin_type:\n return plugin\n return None",
"def getPlugin(self, plugin_name):\n\t\tif plugin_name in self.plugins:\n\t\t\treturn self.plugins[plugin_name][\"module\"].getPluginInstance()\n\t\telse:\n\t\t\treturn None",
"def get_plugin_by_type(name):\n\n return plugins_by_name.get(name)",
"def get_plugin(version):\n build_version = get_build_version(current_app, version)\n if build_version:\n pid = request.args.get('id')\n return _get_plugin(current_app, build_version, pid)\n else:\n return invalid_api_version(404)",
"def get(self, plugin_id, _include=None, **kwargs):\n return get_storage_manager().get(\n models.Plugin,\n plugin_id,\n include=_include\n )",
"def get_plugin_by_name(self, name, category):\r\n return self.plugmanc.getPluginByName(name, category)",
"def get(self, integrate_with, uid, default=None):\n item = self._registry[integrate_with].get(uid, default)\n\n if not item:\n err_msg = self.plugin_not_found_error_message.format(\n uid, self.__class__\n )\n if self.fail_on_missing_plugin:\n logger.error(err_msg)\n raise self.plugin_not_found_exception_cls(err_msg)\n else:\n logger.debug(err_msg)\n\n return item",
"def getInstance(config):\n return Plugin(config)",
"def getInstance(config):\n return Plugin(config)",
"def get(plugin_id, logger, client, tenant_name, get_data):\n utils.explicit_tenant_name_message(tenant_name, logger)\n logger.info('Retrieving plugin %s...', plugin_id)\n plugin = client.plugins.get(plugin_id, _get_data=get_data)\n columns = PLUGIN_COLUMNS + GET_DATA_COLUMNS if get_data else PLUGIN_COLUMNS\n plugin['installed on'] = _format_installation_state(plugin)\n\n if get_global_json_output():\n # for json, also include installation_state because it's useful\n print_single(columns + ['installation_state'], plugin, 'Plugin:', 50)\n return\n\n states = {}\n for state in plugin.pop('installation_state', []):\n if state.get('manager'):\n label = 'Manager {0}'.format(state['manager'])\n elif state.get('agent'):\n label = 'Agent {0}'.format(state['agent'])\n states[label] = state['state']\n print_details({\n col: plugin.get(col) for col in columns\n }, 'Plugin:')\n print_details(states, 'Plugin installation state:')",
"def get(self, uid, default=None):\n item = self._registry.get(uid, default)\n\n if not item:\n err_msg = self.plugin_not_found_error_message.format(\n uid, self.__class__\n )\n if self.fail_on_missing_plugin:\n logger.error(err_msg)\n raise self.plugin_not_found_exception_cls(err_msg)\n else:\n logger.debug(err_msg)\n\n return item",
"def get_plugin_interface(self):",
"def _get_plugin_from_registry(self, trans, visualization_name):\n if not trans.app.visualizations_registry:\n raise HTTPNotFound('No visualization registry (possibly disabled in galaxy.ini)')\n return trans.app.visualizations_registry.get_plugin(visualization_name)",
"def load_plugin(self, plugin):\n return imp.load_module(self._main_module, *plugin[\"info\"])",
"def plugins_get_mgr():\n global pluginmgr\n return pluginmgr",
"def getPlugin(self, *args):\n return _libsbml.ASTNode_getPlugin(self, *args)",
"def plugin_instance(self):\n return self.__plugin_instance",
"def get_tool_by_plugin_instance(self, plugin, package_name=None):\n\n if not package_name:\n package_name = plugin.PACKAGE if hasattr(plugins, 'PACKAGE') else None\n if not package_name:\n LOGGER.error('Impossible to retrieve data from plugin with undefined package!')\n return None\n\n if package_name not in self._plugins:\n LOGGER.error(\n 'Impossible to retrieve data from instance: package \"{}\" not registered!'.format(package_name))\n return None\n\n if hasattr(plugin, 'ID'):\n return self.get_tool_by_id(tool_id=plugin.ID, package_name=plugin.PACKAGE)\n\n return None",
"def get_plugin_settings(plugin, directory=None):\n repo = require_repo(directory)\n plugins = get_value(repo, 'plugins')\n return plugins.get(plugin) if isinstance(plugins, dict) else None",
"def get_plugin_widget(registry, plugin_uid, request=None, as_instance=False,\n theme=None):\n if not theme:\n theme = get_theme(request=request, as_instance=True)\n\n return registry.get(\n BasePluginWidgetRegistry.namify(theme.uid, plugin_uid)\n )",
"def load_plugin():\n return HostTestPluginCopyMethod_Shell()",
"def __getitem__(self, key):\n if key in self.plugin:\n return self.plugin[key]\n else:\n log.warning(\"\"\"Plugin \"%s\" is not loaded.\"\"\" % key)\n return False",
"def plugin_one():\n return \"one\"",
"def driver(self):\r\n ext = self.extensions[0]\r\n return ext.obj if ext.obj else ext.plugin",
"def request(self, plugin_identifier, version=None):\n result = super(ComponentFactory, self).request(plugin_identifier, version)\n\n if result:\n return result\n\n for plugin in self.plugins():\n if plugin_identifier in plugin.legacy_identifiers:\n return plugin\n\n return None"
] | [
"0.81705546",
"0.764261",
"0.7561231",
"0.75498444",
"0.7343164",
"0.73150814",
"0.7270925",
"0.7243109",
"0.7181925",
"0.6945842",
"0.69209474",
"0.6799585",
"0.6742429",
"0.6742429",
"0.6724079",
"0.6690348",
"0.6649153",
"0.66339684",
"0.66067374",
"0.65749896",
"0.6569533",
"0.64830667",
"0.6468173",
"0.64584094",
"0.64436257",
"0.64339143",
"0.6432796",
"0.6421422",
"0.6388321",
"0.63379437"
] | 0.7715446 | 1 |
Start the DaemonServer by listening on the specified adress | def run(self, adress='127.0.0.1', port=8001):
self._httpd = HTTPServer((adress, port), HTTPRequestHandler)
self._is_running = True
self._th = Thread(None, self._httpd.serve_forever)
self._th.start()
print('DaemonServer is listening on %s:%d' % (adress, port)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_server():\n server.bind(constants.ADDRESS)\n server.listen()\n print(\"Server listening on: \" + constants.HOST + \" on port \" + str(constants.PORT) + \"...\")",
"def start(args):\n # Create the controller\n factory = ServerFactory(args)\n \n protocol = dns.DNSDatagramProtocol(controller=factory)\n \n reactor.listenUDP(args.port, protocol, args.addr)\n reactor.listenTCP(args.port, factory, 50, args.addr)\n\n _LOG.info(\"DNS server listening on %s:%d...\", args.addr, args.port)\n reactor.run()",
"def start_server(self):\n server_port = 8800\n incoming_addr = \"\"\n address = (incoming_addr, server_port)\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.SO_REUSEADDR)\n server_socket.bind(address)\n server_socket.listen(5)\n\n print(\"\\nServer Listening\\n\")\n return server_socket",
"def listen(self, port, address=\"\"):\n sockets = self.bind_udp_sockets(port, address=address)\n self.add_sockets(sockets)",
"async def listen(self, maddr):\n self.server = await asyncio.start_server(\n self.handler,\n maddr.value_for_protocol('ip4'),\n maddr.value_for_protocol('tcp'),\n )\n socket = self.server.sockets[0]\n self.multiaddrs.append(_multiaddr_from_socket(socket))\n\n return True",
"def listen(self,addr=None,port=None):\n\n self.type = 'listen'\n\n if addr == None:\n #addr = gethostbyname(gethostname())\n addr = '' # equivalent to INADDR_ANY\n host_location = (addr,0)\n else:\n\n host_location = (addr,port)\n\n s = socket(AF_INET,SOCK_STREAM)\n s.settimeout(0.0)\n s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n s.bind(host_location)\n s.listen(5)\n self.sock = s\n self.status = 'listening'\n\n return s",
"async def listen(self, maddr: Multiaddr) -> bool:",
"def start(self):\n\n self.socket.bind((self.ip, self.port))\n self.socket.listen(self.listenNumber)\n self.printLine()\n print(\"start for listening \")",
"def listen(self):\n\n msg = _(u'Serving on host %(bind)s:%(port)s')\n msg %= {'bind': self._wsgi_conf.bind, 'port': self._wsgi_conf.port}\n LOG.info(msg)\n\n httpd = simple_server.make_server(self._wsgi_conf.bind,\n self._wsgi_conf.port,\n self.app)\n httpd.serve_forever()",
"def start_server(host, port, handler):\n httpd = socketserver.TCPServer((host, port), handler)\n print_server_message(host, port, handler)\n httpd.serve_forever() # This is stopped by using the handler",
"def start_service(\n listen_address: Optional[str] = None, reload: Optional[bool] = False\n): # pragma: no cover\n\n setup_logging()\n\n if listen_address is None:\n listen_address = \"127.0.0.1:7891\"\n\n host, port = listen_address.split(\":\")\n port = int(port)\n app.server.run(host=host, port=port, debug=reload)",
"def start(self):\n listener = eventlet.listen(cfg.CONF.dhcp_lease_relay_socket,\n family=socket.AF_UNIX)\n eventlet.spawn(eventlet.serve, listener, self._handler)",
"def start_server():\n host = 'localhost'\n port = 8080\n listener = socket.socket(socket.AF_INET)\n listener.bind((host, port))\n print 'Serving on {0}:{1}.'.format(host, port)\n listener.listen(0)\n while 1:\n connection, address = listener.accept()\n print 'Got connection from {}'.format(address)\n threading.Thread(\n target=Proxy, args=(connection, )).run()",
"def start(self):\n self.bind()\n logging.info(\"Statring UDP server\")\n self.lthread = Thread(target=self.listen, args=())\n self.lthread.name = \"UDP listening thread\"\n self.lthread.start()",
"def listen(self):\n self.socket.listen(6)",
"def server(host, port, debug):\n run_server(host, port, debug)",
"def start(self):\n self.port = self.conn.evalInServer(server_code.format(key=self.key))",
"def startservers():\n try:\n dns = subprocess.Popen(['python', FAKE_LOC, '-c', DNS_LOC])\n except IOError:\n sys.exit('>> Unable to locate FakeDns')\n\n try:\n httpd = MyTCPServer(('', 80), MyHandler)\n except socket.error:\n dns.kill()\n sys.exit('>> Port 80 already in use')\n try:\n print '>> Starting HTTP Server...'\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.shutdown()\n httpd.server_close()\n dns.kill()\n sys.exit()",
"def server(port, wsgi_app):\n try:\n httpd = wsgiref.simple_server.make_server(self._host, port, wsgi_app)\n except socket.error:\n # Try IPv6\n httpd = wsgiref.simple_server.make_server(\n self._host, port, wsgi_app, server_class=WsgiServerIpv6)\n started.set()\n httpd.timeout = 30\n while not stopping.is_set():\n httpd.handle_request()\n stopped.set()",
"def accept(self):\n possible_addrs = ['127.0.0.1', 'localhost']\n \n if not self.LOCAL_ONLY:\n possible_addrs += self.WHITE_LIST\n \n while True:\n client_socket, (addr, p) = self.socket.accept()\n \n if self.ALLOW_ALL or addr in possible_addrs:\n return DaemonServerSocket(port=p, \n bufferSize=self.BUFFER_SIZE,\n encoding=self.ENCODING,\n altsocket=client_socket)\n # otherwise close\n self.socket.close()",
"async def start(self):\n server = await asyncio.start_server(\n self.handle_request, self.host, self.port)\n\n addr = server.sockets[0].getsockname()\n print(f'Serving on {addr}')\n\n async with server:\n await server.serve_forever()",
"def StartDebugServer(address=net_utils.LOCALHOST, port=5339):\n socketserver.ThreadingTCPServer.allow_reuse_address = True\n server = socketserver.ThreadingTCPServer(\n (address, port), DebugRequestHandler)\n thread = process_utils.StartDaemonThread(target=server.serve_forever,\n name='tcp-debug-server')\n\n logging.info('Debug server started on %s:%d', address, port)\n return server, thread",
"def start():\n\n start_server()",
"def listen(self):\n # first create the server socket\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((self.host, self.port))\n while True:\n client_socket, client_addr = server.accept()\n handler_thread = threading.Thread(target=self.handle_node, args=[client_socket])\n handler_thread.start()",
"async def _start_server(self) -> None:\n # First, figure out what address to listen on. Open a connection to\n # the Hubitat hub and see what address it used. This assumes this\n # machine and the Hubitat hub are on the same network.\n with _open_socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.connect((self.host, 80))\n address = s.getsockname()[0]\n\n self._server = server.create_server(\n self._process_event, address, self.port or 0, self.ssl_context\n )\n self._server.start()\n _LOGGER.debug(\n \"Listening on %s:%d with SSL %s\",\n address,\n self._server.port,\n \"disabled\" if self.ssl_context is None else \"enabled\",\n )\n\n await self.set_event_url(self.event_url)",
"def start_srv(self, address, family, proto=socket.IPPROTO_UDP):\n assert address\n assert address[0] # host\n assert address[1] # port\n assert family\n assert proto\n if family == socket.AF_INET6:\n if not socket.has_ipv6:\n raise NotImplementedError(\"[start_srv] IPv6 is not supported by socket {0}\"\n .format(socket))\n elif family != socket.AF_INET:\n raise NotImplementedError(\"[start_srv] unsupported protocol family {0}\".format(family))\n\n if proto == socket.IPPROTO_TCP:\n socktype = socket.SOCK_STREAM\n elif proto == socket.IPPROTO_UDP:\n socktype = socket.SOCK_DGRAM\n else:\n raise NotImplementedError(\"[start_srv] unsupported protocol {0}\".format(proto))\n\n if self.thread is None:\n self.thread = threading.Thread(target=self.query_io)\n self.thread.start()\n with self.condition:\n self.condition.wait()\n\n for srv_sock in self.srv_socks:\n if (srv_sock.family == family\n and srv_sock.getsockname()[:2] == address\n and srv_sock.proto == proto):\n return\n\n sock = socket.socket(family, socktype, proto)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Add address to interface when running from Deckard\n if self.if_manager is not None:\n if address[0] not in self.if_manager.added_addresses:\n self.if_manager.add_address(address[0])\n\n # A lot of addresses are added to the interface while runnning from Deckard in\n # the small amount of time which caused ocassional hiccups while binding to them\n # right afterwards in testing. Therefore, we retry a few times.\n final_ex = None\n for i in range(self.RETRIES_ON_BIND):\n try:\n sock.bind(address)\n break\n except OSError as ex:\n # Exponential backoff\n time.sleep((2 ** i) + random.random())\n final_ex = ex\n continue\n else:\n print(final_ex, address)\n raise final_ex\n\n if proto == socket.IPPROTO_TCP:\n sock.listen(5)\n self.srv_socks.append(sock)",
"def start(self):\n self.listener.listen(self.backlog)\n h, p = self.listener.getsockname()\n self.logger.info(\"server started on %s:%s\", h, p)\n self.active = True\n if self.auto_register:\n t = THG.Thread(target = self._bg_register)\n t.setDaemon(True)\n t.start()\n #if sys.platform == \"win32\":\n # hack so we can receive Ctrl+C on windows\n self.listener.settimeout(0.5)\n try:\n try:\n while True:\n self.accept()\n except EOFError:\n pass # server closed by another thread\n except SystemExit:\n self.logger.warn(\"System exit\")\n except KeyboardInterrupt:\n self.logger.warn(\"keyboard interrupt!\")\n finally:\n self.logger.info(\"server has terminated\")\n self.close()",
"def start_socket(ip, port):\n sock = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n # the server binds itself to a certain socket\n sock.bind((ip, port))\n # listening to the socket\n sock.listen(LISTEN)\n return sock",
"def start_socket(ip, port):\n sock = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n # the server binds itself to a certain socket\n sock.bind((ip, port))\n # listening to the socket\n sock.listen(LISTEN)\n return sock",
"def start_server(self):\n app.run(host=str(self.__constants.host),\n port=int(self.__constants.port),\n debug=bool(self.__constants.runindebug))"
] | [
"0.7455192",
"0.7074131",
"0.69037455",
"0.6892851",
"0.6880499",
"0.68164194",
"0.68122846",
"0.6771114",
"0.6757552",
"0.67320347",
"0.67300195",
"0.66931385",
"0.66920185",
"0.6678358",
"0.6541331",
"0.653898",
"0.6517115",
"0.6511139",
"0.6464294",
"0.64501506",
"0.6410228",
"0.6374418",
"0.6348268",
"0.6346287",
"0.6307014",
"0.6303508",
"0.62873256",
"0.6286811",
"0.6286811",
"0.6277052"
] | 0.73815656 | 1 |
Check a packetin message. Build and output a packetout. | def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
port = msg.match['in_port']
gateway = self.gateway_get(datapath.id)
if gateway is None:# or gateway.idc_id != CONF.idc_id:
return
pkt = packet.Packet(msg.data)
pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
if not pkt_ethernet:
LOG.info(_LI("drop non-ethernet packet"))
return
pkt_arp = pkt.get_protocol(arp.arp)
pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
if pkt_arp:
self.packet_arp.run(msg, pkt_ethernet, pkt_arp, gateway)
elif pkt_ipv4:
pkt_tp = pkt.get_protocol(tcp.tcp) or \
pkt.get_protocol(udp.udp) or \
pkt.get_protocol(icmp.icmp)
if pkt.get_protocol(icmp.icmp):
LOG.error("packet-in msg %s %s %s from %s", datapath.id, pkt_ipv4, pkt_tp, port)
LOG.debug("packet-in msg %s %s %s from %s",
datapath.id, pkt_ipv4, pkt_tp, port)
if pkt_tp and port:
self.packet_ipv4.run(msg, pkt_ethernet, pkt_ipv4, pkt_tp, gateway)
else:
LOG.debug(_LI("drop non-arp and non-ip packet")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)",
"def _handle_PacketIn (self, event):\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n \n self.do_firewall(packet, packet_in, event)",
"def _handle_PacketIn (self, event):\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n self.do_final(packet, packet_in, event.port, event.dpid)",
"def _handle_PacketIn(self, event):\n\n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n\n packet_in = event.ofp # The actual ofp_packet_in message.\n\n # Comment out the following line and uncomment the one after\n # when starting the exercise.\n # self.act_like_hub(packet, packet_in)\n # self.act_like_switch(packet, packet_in)\n self.act_like_router(packet, packet_in)",
"def _packet_in_debug(self, ev, in_port):\n #*** Extract parameters:\n msg = ev.msg\n datapath = msg.datapath\n dpid = datapath.id\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocol(ethernet.ethernet)\n eth_src = eth.src\n eth_dst = eth.dst\n pkt_ip4 = pkt.get_protocol(ipv4.ipv4)\n pkt_ip6 = pkt.get_protocol(ipv6.ipv6)\n pkt_tcp = pkt.get_protocol(tcp.tcp)\n\n #*** Some debug about the Packet In:\n if pkt_ip4 and pkt_tcp:\n self.logger.debug(\"event=pi_ipv4_tcp dpid=%s \"\n \"in_port=%s ip_src=%s ip_dst=%s tcp_src=%s \"\n \"tcp_dst=%s\",\n dpid, in_port, pkt_ip4.src, pkt_ip4.dst,\n pkt_tcp.src_port, pkt_tcp.dst_port)\n elif pkt_ip6 and pkt_tcp:\n self.logger.debug(\"event=pi_ipv6_tcp dpid=%s \"\n \"in_port=%s ip_src=%s ip_dst=%s tcp_src=%s \"\n \"tcp_dst=%s\",\n dpid, in_port, pkt_ip6.src, pkt_ip6.dst,\n pkt_tcp.src_port, pkt_tcp.dst_port)\n elif pkt_ip4:\n self.logger.debug(\"event=pi_ipv4 dpid=\"\n \"%s in_port=%s ip_src=%s ip_dst=%s proto=%s\",\n dpid, in_port,\n pkt_ip4.src, pkt_ip4.dst, pkt_ip4.proto)\n elif pkt_ip6:\n self.logger.debug(\"event=pi_ipv6 dpid=%s \"\n \"in_port=%s ip_src=%s ip_dst=%s\",\n dpid, in_port,\n pkt_ip6.src, pkt_ip6.dst)\n else:\n self.logger.debug(\"event=pi_other dpid=%s \"\n \"in_port=%s eth_src=%s eth_dst=%s eth_type=%s\",\n dpid, in_port, eth_src, eth_dst, eth.ethertype)",
"def packet_in_handler(self, ev):\n msg = ev.msg\n datapath = msg.datapath\n inPort = msg.match['in_port']\n\n packet = Packet(msg.data)\n etherFrame = packet.get_protocol(ethernet)\n\n if etherFrame.ethertype == ether.ETH_TYPE_LLDP:\n # ignore lldp packet\n return\n\n if etherFrame.ethertype == ether.ETH_TYPE_ARP:\n self.receive_arp(datapath, packet, etherFrame, inPort)\n elif etherFrame.ethertype == ether.ETH_TYPE_IP:\n self.receive_ip(datapath, packet, etherFrame, inPort)\n else:\n LOG.debug(\"receive Unknown packet %s => %s (port%d)\"\n % (etherFrame.src, etherFrame.dst, inPort))\n self.print_etherFrame(etherFrame)\n LOG.debug(\"Drop packet\")\n return 1\n return 0",
"def _handle_PacketIn (self, event):\n\n packet = event.parsed\n\n def flood (message = None):\n \"\"\" Floods the packet \"\"\"\n msg = of.ofp_packet_out()\n if time.time() - self.connection.connect_time >= _flood_delay:\n # Only flood if we've been connected for a little while...\n\n if self.hold_down_expired is False:\n # Oh yes it is!\n self.hold_down_expired = True\n #log.info(\"%s: Flood hold-down expired -- flooding\",\n # dpid_to_str(event.dpid))\n\n if message is not None: log.debug(message)\n #log.debug(\"%i: flood %s -> %s\", event.dpid,packet.src,packet.dst)\n # OFPP_FLOOD is optional; on some switches you may need to change\n # this to OFPP_ALL.\n msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))\n else:\n pass\n #log.info(\"Holding down flood for %s\", dpid_to_str(event.dpid))\n msg.data = event.ofp\n msg.in_port = event.port\n self.connection.send(msg)\n\n def drop (duration = None):\n \"\"\"\n Drops this packet and optionally installs a flow to continue\n dropping similar ones for a while\n \"\"\"\n if duration is not None:\n if not isinstance(duration, tuple):\n duration = (duration,duration)\n msg = of.ofp_flow_mod()\n msg.match = of.ofp_match.from_packet(packet)\n msg.idle_timeout = duration[0]\n msg.hard_timeout = duration[1]\n msg.buffer_id = event.ofp.buffer_id\n self.connection.send(msg)\n elif event.ofp.buffer_id is not None:\n msg = of.ofp_packet_out()\n msg.buffer_id = event.ofp.buffer_id\n msg.in_port = event.port\n self.connection.send(msg)\n\n def checkDropList():\n if packet.type == ethpkt.IP_TYPE:\n ip_packet = packet.payload\n if ip_packet.protocol == ippkt.UDP_PROTOCOL or ip_packet.protocol == ippkt.TCP_PROTOCOL:\n if self.client.src_ip == ip_packet.srcip: #2\n self.client.counter += 1\n if str(self.client.counter) in self.client.droplist: #2a\n log.debug(\"Dropping client packet: number %d\" %\n (self.client.counter))\n drop()\n return True\n else:\n return False\n elif self.server.src_ip == ip_packet.srcip: #2\n self.server.counter += 1\n if str(self.server.counter) in self.server.droplist: #2a\n log.debug(\"Dropping server packet: number %d\" %\n (self.server.counter))\n drop()\n return True\n else:\n return False\n else:\n return False\n else:\n return False\n else:\n return False\n self.macToPort[packet.src] = event.port #1\n\n if (checkDropList()):\n return\n\n if not self.transparent: # 3\n if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered():\n drop() # 3a\n return\n\n if packet.dst.is_multicast:\n flood() # 4a\n else:\n if packet.dst not in self.macToPort: #5\n flood(\"Port for %s unknown -- flooding\" % (packet.dst,)) # 5a\n else:\n port = self.macToPort[packet.dst]\n if port == event.port: #6\n # 6a\n log.warning(\"Same port for packet from %s -> %s on %s.%s. Drop.\"\n % (packet.src, packet.dst, dpid_to_str(event.dpid), port))\n drop(10)\n return\n #7\n #log.debug(\"installing flow for %s.%i -> %s.%i\" %\n # (packet.src, event.port, packet.dst, port))\n msg = of.ofp_packet_out()\n msg.actions.append(of.ofp_action_output(port = port))\n msg.data = event.ofp\n self.connection.send(msg)",
"def _handle_PacketIn(self, event):\r\n\r\n packet = event.parsed # This is the parsed packet data.\r\n if not packet.parsed:\r\n log.warning(\"Ignoring incomplete packet\")\r\n return\r\n\r\n packet_in = event.ofp # The actual ofp_packet_in message.\r\n\r\n # Comment out the following line and uncomment the one after\r\n # when starting the exercise.\r\n #self.act_like_hub(packet, packet_in)\r\n self.act_like_switch(packet, packet_in)\r\n #self.act_like_router(packet, packet_in)\r",
"def _handle_PacketIn(self, event):\n msg = of.ofp_packet_out()\n msg.data = event.ofp\n msg.in_port = event.port\n msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))\n event.connection.send(msg)",
"def process_packet(self, in_port, packet):\n \n buf = bytearray(packet)\n for idx in range((len(packet) + 19)/20):\n logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20))\n\n if self.disabled:\n logging.debug(\"Switch is disabled; discarding packet\")\n return\n\n parsed_packet = ParsedPacket(buf, self.metadata)\n logging.debug(\"Processing packet %d from port %d with %s\" % \n (parsed_packet.id, in_port,\n self.first_processor.name))\n self.first_processor.process(parsed_packet)",
"def _handle_PacketIn (self, event):\n packet_in = event.ofp # The actual ofp_packet_in message.\n \n packet = event.parsed # This is the parsed packet data.\n if not packet.parsed:\n log.warning(\"%i %i ignoring unparsed packet\", dpid, inport)\n return\n \n if packet.type == ethernet.LLDP_TYPE:\n # Ignore LLDP packets\n return\n\n # self.act_like_hub(packet, packet_in)\n\n arpp = packet.find('arp')\n if arpp is not None:\n arp_type = \"unknown\"\n if arpp.opcode == arp.REQUEST:\n arp_type = \"request\"\n # return\n # NOTE: in a triangle topology, forwarding arp requests is not necessary\n # but it may be if topology changes\n elif arpp.opcode == arp.REPLY:\n arp_type = \"reply\"\n log.info(\"ARP %s for dst %s from source %s recieved by switch %d on port %d\", \n arp_type, arpp.protodst, arpp.protosrc, self.dpid, event.port)\n self.act_like_switch(packet, packet_in, event.port)\n # # Learn IP to MAC address mapping.\n # if self._mac_learn(packet.src, arpp.protosrc):\n # log.info(\"switch %s learned %s -> %s by ARP\", self.dpid, arpp.protosrc, packet.src)\n # # dpid = event.connection.dpid\n # inport = event.port \n # if packet.src not in self.mac_to_port:\n # self.mac_to_port[packet.src] = inport\n # # Respond to ARP request if appropriate.\n # if arpp.opcode == arp.REQUEST and ipinfo(arpp.protosrc)[0] == 1:\n # log.info(\"ARP request for dst %s from source %s recieved by switch %d on port %d\", arpp.protodst, arpp.protosrc, self.dpid, inport)\n # e = self._arp_response_pkt(arpp, packet)\n # msg = of.ofp_packet_out()\n # msg.data = e.pack()\n # msg.actions.append(of.ofp_action_output(port = of.OFPP_IN_PORT))\n # msg.in_port = inport\n # event.connection.send(msg)\n\n ipp = packet.find('ipv4')\n if ipp is not None:\n log.info(\"IP packet received by switch %d on port %d. src is %s, dst is %s\",\n self._id, event.port, ipp.srcip, ipp.dstip)\n self.act_like_switch(packet, packet_in, event.port)",
"def process_udp_packet(self, packet_data, packet_source):\n # Add your logic here, after your logic is done,\n # add the packet to be sent to self.packet_buffer\n # feel free to remove this line\n print(f\"Received a packet from {packet_source}\")\n in_packet = self._parse_udp_packet(packet_data)\n out_packet = self._do_some_logic(in_packet)\n\n # This shouldn't change.\n self.packet_buffer.append(out_packet)\n\n return in_packet",
"def _handle_PacketIn(self, event):\n\n packet = event.parsed # Packet is the original L2 packet sent by the switch\n if not packet.parsed:\n log.warning(\"Ignoring incomplete packet\")\n return\n # ignore lldp packets\n if event.parsed.type == ethernet.LLDP_TYPE:\n return\n # act like switch\n packet_in = event.ofp # packet_in is the OpenFlow packet sent by the switch\n self.act_like_switch(packet, packet_in)",
"def _do_some_logic(self, packet):\n\n\n pass",
"def spoof_packet(packet):",
"def process_net_message(message, address):\n if message[0] == '<' and message[-1] == '>':\n message = message[1:-1]\n if \":\" in message:\n command, data = message.split(\":\")\n else:\n command = message\n data = None\n\n if command == \"JOIN\":\n print(\"added player to player list:\", data, address)\n ip_address, port = address\n active_player_dict[str(address)] = Player(ip_address, port, data, random.randint(0, 639),\n random.randint(0, 479))\n elif command == \"QUIT\":\n print(\"player removed from player list:\", address)\n del active_player_dict[str(address)]\n elif command == \"KD\":\n data = chr(int(data))\n if data not in active_player_dict[str(address)].keys_down:\n active_player_dict[str(address)].keys_down.append(data)\n elif command == \"KU\":\n data = chr(int(data))\n if data in active_player_dict[str(address)].keys_down:\n active_player_dict[str(address)].keys_down.remove(data)\n elif command == \"keepAlive\":\n data = int(data)\n if active_player_dict[str(address)].alive > 0: #time for player to be alive is not zero\n active_player_dict[str(address)].alive = data\n currentTime = time.time()\n else:\n print(\"invalid message.\")",
"def on_ofp_message(self, message: IncomingMessage) -> None:\n with message.process():\n log.debug(f\"received [x] {message.routing_key}:{message.body}\")\n (version, msg_type, msg_len, xid) = ofproto_parser.header(message.body)\n log.debug(\n f\"msg {version} {msg_type} {msg_len} {xid} {len(message.body)} {type(message.body)}\"\n )\n msg = ofproto_parser.msg(\n version, msg_type, msg_len, xid, message.body[:msg_len]\n )\n if msg_type == self.ofproto.OFPT_PACKET_IN:\n pkt_in = self.ofparser.OFPPacketIn.parser(msg_len, xid, msg.buf)\n pkt_in.serialize()\n dpid = int(message.routing_key.split(\".\")[-1])\n self.loop.create_task(self.handle_pktin(pkt_in, dpid))",
"def packetCheck(packet):\n info = [packet[i : i + 2] for i in range(0, len(packet), 2)]\n MagicNo = int.from_bytes(info[0], \"big\")\n PacketType = int.from_bytes(info[1], \"big\")\n RequestType = int.from_bytes(info[2], \"big\")\n if MagicNo != 0x497E:\n return False\n if PacketType != 0x0001:\n return False\n if RequestType != 0x0001 and RequestType != 0x0002:\n return False\n return True",
"def process(self, packet):\n pass",
"def test_process_packet_message(self):\n\n pkt = {'type': 'message',\n 'endpoint': '',\n 'data': 'woot'}\n data = self.ns.process_packet(pkt)\n self.assertEqual(data, pkt['data'])\n assert not self.environ['socketio'].error.called\n\n # processing a message packet with id and endpoint\n pkt = {'type': 'message',\n 'id': 5,\n 'ack': True,\n 'endpoint': '/tobi',\n 'data': ''}\n data = self.ns.process_packet(pkt)\n self.assertEqual(data, pkt['data'])\n assert not self.environ['socketio'].error.called",
"def process_incoming_packet(self, data, address):\n try:\n new_message = MessageFactory.create_message(\n packet_data=data,\n origin_address=address,\n destination_node=self.node)\n self._put_new_message_in_queue(new_message)\n self.report()\n if new_message.TYPE_STRING != \"ack\":\n ack_message = MessageFactory.generate_ack_message(new_message)\n self.send_message(ack_message)\n except Exception as e:\n print(e)",
"def pin_check_subroutine(self, pin):\n self.socket.sendall(self.make_packet(\"CMD\", \"PIN CHECK\"))\n \n response = self.socket.recv(4096)\n \n if response:\n response_hdr, response_msg, response_sdr = self.parse_packet(response)\n \n if response_hdr == \"ACK\" and response_msg == \"PIN CHECK\":\n # ready to send PIN to server\n self.socket.sendall(self.make_packet(\"DATA\", pin))\n \n pin_check = self.socket.recv(4096)\n \n if pin_check:\n pin_check_header, pin_check_str, pin_check_sender = self.parse_packet(pin_check) \n\n if pin_check_header == \"DATA\":\n if pin_check_str == \"PIN CHECK FAIL\":\n \n print(\"DEBUG: incorrect PIN\")\n \n if self.debug == 'n':\n # tell Arduino access is denied\n self.arduino.write(\"AD\")\n \n else:\n # PIN was good\n print(\"DEBUG: correct PIN\")\n\n if self.debug == 'n':\n # tell Arduino access is granted\n self.arduino.write(\"AG\" + pin_check_str)",
"def check_packet(packet, recipient_type, pkt_type, exp_src, exp_dst):\n packet_count = 0\n\n # If recipient is AVP, check for simple packet\n if (\"AVP\" in recipient_type):\n for pkt in packet.values():\n # Boolean to store if packet is correct. Set to False initially\n correctly_received = False\n\n # Check 1: Check src and dst MACs match expected\n if ((pkt['Ethernet']['src'] in exp_src) and\n (pkt['Ethernet']['dst'] == exp_dst)):\n\n # Check 2: If packet type is LLDP, check raw string for \"lldp\"\n if (pkt_type == \"lldp\"):\n # If \"lldp\" is present, set correctly_received to True\n if (\"lldp\" in pkt['Ethernet']['Raw']['load']):\n correctly_received = True\n\n # Packet type is not LLDP, so only Check 1 is required\n # Set correctly_received to True\n else:\n correctly_received = True\n\n # Update total packet count with value of correctly_received\n packet_count += correctly_received\n\n # If recipient is NVP, then check for VXLAN encapsulated packet\n elif (\"NVP\" in recipient_type):\n for pkt in packet.values():\n # Boolean to store if packet is correct. Set to False initially\n correctly_received = False\n\n # Check 1: Check if VxLAN fields are correct\n if ((pkt['Ethernet']['IP']['src'] == S1_LO_IP) and\n (pkt['Ethernet']['IP']['dst'] == VTEP_PEER_IP) and\n (pkt['Ethernet']['IP']['UDP']['dport'] == str(UDP_DPORT)) and\n (pkt['Ethernet']['IP']['UDP']['VXLAN']['vni'] == VNI_HEX) and\n (pkt['Ethernet']['IP']['UDP']['VXLAN']['Ethernet']['src'] in exp_src) and\n (pkt['Ethernet']['IP']['UDP']['VXLAN']['Ethernet']['dst'] == exp_dst)):\n\n # Check 2: If packet type is LLDP, check raw string for \"lldp\"\n if (pkt_type == \"lldp\"):\n # If \"lldp\" is present, set correctly_received to True\n if (\"lldp\" in pkt['Ethernet']['IP']['UDP']['VXLAN']['Ethernet']['Raw']['load']):\n correctly_received = True\n # Packet type is not LLDP, so only Check 1 is required\n # Set correctly_received to True\n else:\n correctly_received = True\n\n # Update total packet count with value of correctly_received\n packet_count += correctly_received\n\n return packet_count",
"def processpacket(p):\n\n\tglobal SynSentToTCPService\n\tglobal SynAckSentToTCPClient\n\tglobal LiveTCPService\n\tglobal LiveTCPClient\n\tglobal LiveUDPService\n\tglobal LiveUDPClient\n\tglobal NmapServerDescription\n\tglobal ManualServerDescription\n\tglobal ClientDescription\n\tglobal MacAddr\n\tglobal OSDescription\n\tglobal ServiceFPs\n\tglobal SipPhoneMatch\n\tglobal Devel\n\tglobal IsRouter\n\tglobal DNSRecord\n\tglobal HostIPs\n\n\tif (type(p) == Dot3) and (type(p['LLC']) == LLC):\n\t\tUnhandledPacket(p)\n\t\t#Spanning Tree Protocol\n\t\t#Debug(\"802.3\")\n\t\t#p.show()\n\t\t#print type(p['LLC'])\n\telif (p['Ethernet'] == None):\n\t\tDebug(\"non-ethernet packet\")\t\t#Need more details on how to handle.\n\t\tUnhandledPacket(p)\n\t\t#p.show()\n\t\t#print type(p)\n\t\t#quit()\n\telif p['Ethernet'].type == 0x0806:\t\t#ARP\n\t\t#pull arp data from here instead of tcp/udp packets, as these are all local\n\t\tif (p['ARP'].op == 1):\t\t\t#1 is request (\"who-has\")\n\t\t\tpass\n\t\tif (p['ARP'].op == 2):\t\t\t#2 is reply (\"is-at\")\n\t\t\tif (p['ARP.psrc'] != None) and (p['ARP.hwsrc'] != None):\n\t\t\t\tIPAddr=p['ARP.psrc']\n\t\t\t\tMyMac=p['ARP.hwsrc'].upper()\n\t\t\t\tif (not MacAddr.has_key(IPAddr)) or (MacAddr[IPAddr] != MyMac):\n\t\t\t\t\tReportId(\"MA\", IPAddr, 'Ethernet', MyMac, '')\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x0800:\t\t#IP\n\t\tsIP=str(p['IP'].src)\n\t\tdIP=str(p['IP'].dst)\n\t\t#Best to get these from arps instead; if we get them from here, we get router macs for foreign addresses.\n\t\t#if not MacAddr.has_key(sIP):\n\t\t#\tReportId(\"MA\", sIP, \"Ethernet\", p['Ethernet'].src, '')\n\t\t#if not MacAddr.has_key(dIP):\n\t\t#\tReportId(\"MA\", dIP, \"Ethernet\", p['Ethernet'].dst, '')\n\n\t\tif p['IP'].proto == 1:\t\t\t#ICMP\n\t\t\tType = p['ICMP'].type\n\t\t\tCode = p['ICMP'].code\n\n\t\t\tif (Type == 0):\t\t\t\t\t\t#Echo reply\n\t\t\t\tif (not(OSDescription.has_key(sIP))):\n\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", 'icmp echo reply')\n\t\t\telif (Type == 3) and (type(p[IPerror]) == IPerror):\t#Unreachable, check that we have an actual embedded packet\n\t\t\t\t#if (type(p[IPerror]) != IPerror):\n\t\t\t\t#\tp.show()\n\t\t\t\t#\tprint type(p[IPerror])\n\t\t\t\t#\tquit()\n\t\t\t\tOrigdIP = p[IPerror].dst\n\t\t\t\tif (Code == 0):\t\t\t\t\t#Net unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"NetUn\", \"router\", \"\")\n\t\t\t\telif (Code == 1):\t\t\t\t#Host unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"HostUn\", \"router\", \"\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 17):\t#Port unreachable and embedded protocol = 17, UDP, as it should be\n\t\t\t\t\tDNSServerLoc = p[IPerror].src + \",UDP_53\"\n\t\t\t\t\tif (p[UDPerror].sport == 53) and (ManualServerDescription.has_key(DNSServerLoc)) and (ManualServerDescription[DNSServerLoc] == \"dns/server\"):\n\t\t\t\t\t\t#If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect)\n\t\t\t\t\t\t#Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways.\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\t#If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed\n\t\t\t\t\t\tOrigDPort = str(p[UDPerror].dport)\n\t\t\t\t\t\tOrigDstService = OrigdIP + \",UDP_\" + OrigDPort\n\t\t\t\t\t\tif ((not LiveUDPService.has_key(OrigDstService)) or (LiveUDPService[OrigDstService] == True)):\n\t\t\t\t\t\t\tLiveUDPService[OrigDstService] = False\n\t\t\t\t\t\t\tReportId(\"US\", OrigdIP, \"UDP_\" + OrigDPort, \"closed\", \"port unreachable\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 6) and (p[TCPerror].dport == 113):\t#Port unreachable and embedded protocol = 6, TCP, which it shouldn't. May be the same firewall providing the TCP FR's\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 6):\t\t\t\t#Net unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unknown')\n\t\t\t\telif (Code == 7):\t\t\t\t#Host unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unknown')\n\t\t\t\telif (Code == 9):\t\t\t\t#Network Administratively Prohibited\n\t\t\t\t\tpass\t\t\t\t\t#Can't tell much from this type of traffic. Possibly list as firewall?\n\t\t\t\telif (Code == 10):\t\t\t\t#Host Administratively Prohibited\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 11):\t\t\t\t#Network unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 12):\t\t\t\t#Host unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 13):\t\t\t\t#Communication Administratively prohibited\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (Type == 8):\t\t\t\t\t#ping\n\t\t\t\t#FIXME - check payload for ping sender type, perhaps\n\t\t\t\tpass\n\t\t\telif (Type == 11):\t\t\t\t\t#Time exceeded\n\t\t\t\tif (Code == 0):\t\t\t\t\t#TTL exceeded\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\t#FIXME - put original target IP as column 5?\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"TTLEx\", \"router\", \"\")\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 2:\t\t#IGMP\n\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 6:\t\t#TCP\n\t\t\tsport=str(p['TCP'].sport)\n\t\t\tdport=str(p['TCP'].dport)\n\t\t\t#print p['IP'].src + \":\" + sport + \" -> \", p['IP'].dst + \":\" + dport,\n\t\t\tif (p['TCP'].flags & 0x17) == 0x12:\t#SYN/ACK (RST and FIN off)\n\t\t\t\tCliService = dIP + \",TCP_\" + sport\n\t\t\t\tif not SynAckSentToTCPClient.has_key(CliService):\n\t\t\t\t\tSynAckSentToTCPClient[CliService] = True\n\n\t\t\t\t#If we've seen a syn sent to this port and have either not seen any SA/R, or we've seen a R in the past:\n\t\t\t\t#The last test is for a service that was previously closed and is now open; report each transition once.\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == False)) ):\n\t\t\t\t\tLiveTCPService[Service] = True\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", '')\n\t\t\telif (p['TCP'].flags & 0x17) == 0x02:\t#SYN (ACK, RST, and FIN off)\n\t\t\t\tService = dIP + \",TCP_\" + dport\n\t\t\t\tif not SynSentToTCPService.has_key(Service):\n\t\t\t\t\tSynSentToTCPService[Service] = True\n\t\t\t\t#Debug(\"trying to fingerprint \" + sIP)\n\t\t\t\ttry:\n\t\t\t\t\tp0fdata = p0f(p)\n\t\t\t\t\t#FIXME - reasonably common occurence, don't whine, just fix it.\n\t\t\t\t\t#if (len(p0fdata) >1):\n\t\t\t\t\t#\tDebug(\"More than one OS fingerprint for \" + sIP + \", using the first.\")\n\t\t\t\t\tif (len(p0fdata) >=1):\n\t\t\t\t\t\tPDescription = p0fdata[0][0] + \" \" + p0fdata[0][1] + \" (\" + str(int(p0fdata[0][2]) + 1)\t#FIXME - Grabbing just the first candidate, may need to compare correlation values; provided?\n\t\t\t\t\t\tif (p0fdata[0][2] == 0):\n\t\t\t\t\t\t\tPDescription = PDescription + \" hop away)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPDescription = PDescription + \" hops away)\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t#[N][2] param appears to be distance away in hops (but add 1 to this to get real hop count?)\n\t\t\t\t\t\tPDescription = PDescription.replace(',', ';')\t\t#Commas are delimiters in output\n\t\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\t\texcept:\n\t\t\t\t\tPDescription = 'p0f failure'\n\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\tDebug(\"P0f failure in \" + sIP + \":\" + sport + \" -> \" + dIP + \":\" + dport)\n\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\telif (p['TCP'].flags & 0x07) == 0x01:\t#FIN (SYN/RST off)\n\t\t\t\tCliService = sIP + \",TCP_\" + dport\n\t\t\t\tif ( (SynAckSentToTCPClient.has_key(CliService)) and ((not LiveTCPClient.has_key(CliService)) or (LiveTCPClient[CliService] == False)) ):\n\t\t\t\t\tLiveTCPClient[CliService] = True\n\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", '')\n\t\t\telif (p['TCP'].flags & 0x07) == 0x04:\t#RST (SYN and FIN off)\n\t\t\t\t#FIXME - handle rst going in the other direction?\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == True)) ):\n\t\t\t\t\tLiveTCPService[Service] = False\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"closed\", '')\n\t\t\telif ((p['TCP'].flags & 0x3F) == 0x15) and (sport == \"113\"):\t#FIN, RST, ACK (SYN, PSH, URG off)\n\t\t\t\t#This may be a firewall or some other device stepping in for 113 with a FIN/RST.\n\t\t\t\tpass\n\t\t\telif (p['TCP'].flags & 0x17) == 0x10:\t#ACK (RST, SYN, and FIN off)\n\t\t\t\t#FIXME - check for UnhandledPacket placement in ACK\n\t\t\t\tFromPort = sIP + \",TCP_\" + sport\n\t\t\t\tToPort = dIP + \",TCP_\" + dport\n\t\t\t\tPayload = str(p['Raw.load'])\t\t\t#For some reason this doesn't handle p['Raw'].load\n\t\t\t\tif ( (LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True) and (LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\n\t\t\t\t\tprint \"Logic failure: both \" + FromPort + \" and \" + ToPort + \" are listed as live services.\"\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\telif ((LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True)):\t#If the \"From\" side is a known TCP server:\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort) ):\t\t#Check nmap fingerprint strings for this server port\n\t\t\t\t\t\tif (ServiceFPs.has_key(int(sport))):\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs[int(sport)]:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\t#Debugging:\n\t\t\t\t\t\t\t\t\t#FIXME - removeme once understood:\n\t\t\t\t\t\t\t\t\t#File \"/home/wstearns/med/programming/python/passer/passer.py\", line 504, in processpacket\n\t\t\t\t\t\t\t\t\t#OutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\t#TypeError: expected a character buffer object\n\t\t\t\t\t\t\t\t\tif (OneTuple[1] == None):\n\t\t\t\t\t\t\t\t\t\tDebug(\"Null description for \" + OneTuple[0])\n\t\t\t\t\t\t\t\t\t\t#quit()\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\t#Example: Replace \"$1\" with MatchObj.group(1)\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), str(MatchObj.group(Index)))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t#Exit for loop, no need to check any more fingerprints now that we've found a match\n\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort)):\t\t#If the above loop didn't find a server description\n\t\t\t\t\t\tif (ServiceFPs.has_key('all')):\t\t\t\t#Now recheck against regexes not associated with a specific port (port 'all').\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs['all']:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif (not ManualServerDescription.has_key(FromPort) ):\n\t\t\t\t\t\tif (sport == \"22\") and (Payload != None) and (Payload.find('SSH-') > -1):\n\t\t\t\t\t\t\tif ( (Payload.find('SSH-1.99-OpenSSH_') > -1) or (Payload.find('SSH-2.0-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/openssh\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/openssh\"\n\t\t\t\t\t\t\telif (Payload.find('SSH-1.5-') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/generic\"\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' ESMTP Sendmail ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/sendmail\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/sendmail\"\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' - Welcome to our SMTP server ESMTP') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t#Check for port 80 and search for \"Server: \" once\n\t\t\t\t\t\telif (sport == \"80\") and (Payload != None) and (Payload.find('Server: ') > -1):\n\t\t\t\t\t\t\tif (Payload.find('Server: Apache') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/apache\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/apache\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Embedded HTTP Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/embedded\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/embedded\"\n\t\t\t\t\t\t\telif (Payload.find('Server: gws') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/gws\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/gws\"\n\t\t\t\t\t\t\telif (Payload.find('Server: KFWebServer') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/kfwebserver\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/kfwebserver\"\n\t\t\t\t\t\t\telif (Payload.find('Server: micro_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/micro-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/micro-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Microsoft-IIS') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/iis\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/iis\"\n\t\t\t\t\t\t\telif (Payload.find('Server: lighttpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/lighttpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/lighttpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: MIIxpc') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mirrorimage\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mirrorimage\"\n\t\t\t\t\t\t\telif (Payload.find('Server: mini_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mini-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mini-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nc -l -p 80') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nc\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nc\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nginx/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nginx\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nginx\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Nucleus') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nucleus\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nucleus\"\n\t\t\t\t\t\t\telif (Payload.find('Server: RomPager') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/rompager\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/rompager\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/server\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/server\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Sun-ONE-Web-Server/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/sun-one\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/sun-one\"\n\t\t\t\t\t\t\telif (Payload.find('Server: TrustRank Frontend') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/trustrank\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/trustrank\"\n\t\t\t\t\t\t\telif (Payload.find('Server: YTS/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/yahoo\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/yahoo\"\n\t\t\t\t\t\t\telif (Payload.find('HTTP/1.0 404 Not Found') > -1) or (Payload.find('HTTP/1.1 200 OK') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/generic\"\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"110\") and (Payload != None) and (Payload.find('POP3 Server Ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"pop3/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"pop3/generic\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find('* OK dovecot ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/dovecot\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/dovecot\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find(' IMAP4rev1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"783\") and (Payload != None) and (Payload.find('SPAMD/1.1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"spamd/spamd\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"spamd/spamd\"\n\t\t\t\t\t\telif ( (sport == \"3128\") or (sport == \"80\") ) and (Payload != None) and (Payload.find('Via: ') > -1) and (Payload.find(' (squid/') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"proxy/squid\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"proxy/squid\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\telif ((LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\t\t#If the \"To\" side is a known TCP server:\n\t\t\t\t\tClientKey = sIP + \",TCP_\" + dport\t#Note: CLIENT ip and SERVER port\n\t\t\t\t\tif (not ClientDescription.has_key(ClientKey)):\n\t\t\t\t\t\tif (dport == \"22\") and (Payload != None) and ( (Payload.find('SSH-2.0-OpenSSH_') > -1) or (Payload.find('SSH-1.5-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"ssh/openssh\")\n\t\t\t\t\t\t#As cute as it is to catch this, it miscatches any relay that's carrying a pine-generated mail.\n\t\t\t\t\t\t#elif (dport == \"25\") and (Payload != None) and (Payload.find('Message-ID: <Pine.') > -1):\n\t\t\t\t\t\t#\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"smtp/pine\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: libwww-perl/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/libwww-perl\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Lynx') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/lynx\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Mozilla') > -1) and (Payload.find(' Firefox/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/firefox\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Wget/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/wget\")\n\t\t\t\t\t\telif (dport == \"143\") and (Payload != None) and (Payload.find('A0001 CAPABILITY') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"imap/generic\")\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\t\t\telif (dport == \"783\") and (Payload != None) and (Payload.find('PROCESS SPAMC') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"spamd/spamc\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\telse:\t#Neither port pair is known as a server\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t#Following is debugging at best; it should only show up early on as the sniffer listens to conversations for which it didn't hear the SYN/ACK\n\t\t\t\t\t#print \"note: neither \" + FromPort + \" nor \" + ToPort + \" is listed as a live service.\"\n\t\t\telse:\t#Other TCP flag combinations here\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 17 and (type(p['UDP']) == UDP):\t\t#UDP. We have to check the object type as well as we do get (corrupted? truncated?) packets with type 17 that aren't udp: AttributeError: 'NoneType' object has no attribute 'sport'\n\t\t\t#FIXME - possibly run udp packets through ServiceFPs as well?\n\t\t\tsport=str(p['UDP'].sport)\n\t\t\tdport=str(p['UDP'].dport)\n\t\t\tSrcService = sIP + \",UDP_\" + sport\n\t\t\tDstService = dIP + \",UDP_\" + dport\n\t\t\tSrcClient = sIP + \",UDP_\" + dport\n\t\t\tPayload = p['Raw.load']\n\n\t\t\t#Multicast DNS: http://files.multicastdns.org/draft-cheshire-dnsext-multicastdns.txt\n\t\t\t#- usually sent to 224.0.0.251 (or FF02::FB) (link-local multicast).\n\t\t\t#\t- if \".local.\" in query, these MUST be the target IPs\n\t\t\t#\t- non-local queries may be sent to these or normal dns servers\n\t\t\t#\t- rdns queries for \"254.169.in-addr.arpa.\" MUST be sent to 224.0.0.251\n\t\t\t#\t- rdns queries for \"8.e.f.ip6.arpa.\", \"9.e.f.ip6.arpa.\",\"a.e.f.ip6.arpa.\", and \"b.e.f.ip6.arpa.\" MUST be sent to the IPv6 mDNS link-local multicast address FF02::FB.\n\t\t\t#- sent to udp port 5353\n\t\t\t#- generic clients may use \"single-dns-object.local.\", such as \"sparrow.local.\"\n\t\t\t#- responses have IP TTL = 255 to check that packet originated on-lan\n\n\t\t\t#Multicast DNS, placed next to normal dns, out of numerical order\n\t\t\tif (dport == \"5353\") and ( (p['IP'].ttl == 1) or (p['IP'].ttl == 255) ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcClient)) or (LiveUDPService[SrcClient] == False)):\n\t\t\t\t\tLiveUDPService[SrcClient] = True\n\t\t\t\t\tif (dIP == \"224.0.0.251\"):\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/broadcastclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/client\")\n\n\t\t\t\t\t#Extract dns answers like with 53; change elif to if and add 5353 to ports on next if?\n\t\t\t\t\t#At the moment, no; scapy does not appear to parse 5353 as dns.\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tUnhandledPacket(p)\n\t\t\t#FIXME - add check for \"if isinstance(p['DNS'], whatevertype):\there and at all p[] accesses.\n\t\t\telif (sport == \"53\") and (isinstance(p['DNS'], DNS)) and (p['DNS'].qr == 1):\t\t#qr == 1 is a response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t#FIXME - Also report the TLD from one of the query answers to show what it's willing to answer for?\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"dns/server\")\n\t\t\t\t#Now we extract dns answers. First, check that there's no dns error:\n\t\t\t\tif (p['DNS'].rcode == 0):\t\t\t#No error\n\t\t\t\t\tDNSBlocks = [ ]\n\t\t\t\t\tCNAMERecs = [ ]\t\t\t\t#We hold onto all cnames until we've processed all PTR's and A's here\n\t\t\t\t\tif (p['DNS'].ancount > 0):\t\t#If we have at least one answer from the answer block, process it\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].an)\n\t\t\t\t\tif (p['DNS'].arcount > 0):\t\t#Likewise for the \"additional\" block\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].ar)\n\t\t\t\t\tfor OneAn in DNSBlocks:\n\t\t\t\t\t\t#Thanks to Philippe Biondi for showing me how to extract additional records.\n\t\t\t\t\t\t#Debug(\"Start dns extract\" + str(p['DNS'].ancount))\n\t\t\t\t\t\t#OneAn = p[DNS].an\n\t\t\t\t\t\t#while OneAn is not NoPayload:\t\t#This doesn't seem to stop at the end of the list; incorrect syntax.\n\t\t\t\t\t\twhile isinstance(OneAn,DNSRR):\t\t#Somewhat equivalent:\twhile not isinstance(an, NoPayload):\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t#print \"Type: \" + str(type(OneAn))\t\t#All of type scapy.DNSRR\n\t\t\t\t\t\t\tif (OneAn.rclass == 1) and (OneAn.type == 1):\t\t#\"IN\" class and \"A\" type answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\t#Check new hostname to see if it's in the list.\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",A\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",A\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"A\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 2):\t\t\t#\"IN\" class and \"NS\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Perhaps later\n\t\t\t\t\t\t\t\t#Like cnames, this is object -> nameserver hostname, so these would need to be queued like cnames until we're done with A's and PTR's.\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 5):\t\t\t#\"IN\" class and \"CNAME\" answer\n\t\t\t\t\t\t\t\tCNAMERecs.append(OneAn)\t\t\t\t\t#Remember the record; we'll process these after the PTR's and A's\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 6):\t\t\t#\"IN\" class and \"SOA\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Not immediately useful, perhaps later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 12):\t\t#\"IN\" class and \"PTR\" type answer\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For input of '182.111.59.66.in-addr.arpa.' :\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rrname.replace(\".in-addr.arpa.\", \"\")\t\t# '182.111.59.66'\n\t\t\t\t\t\t\t\tDNSIPAddr = DNSIPAddr.split('.')\t\t\t\t# ['182', '111', '59', '66']\n\t\t\t\t\t\t\t\tDNSIPAddr.reverse()\t\t\t\t\t\t# ['66', '59', '111', '182']\n\t\t\t\t\t\t\t\tDNSIPAddr = string.join(DNSIPAddr, '.')\t\t\t\t# '66.59.111.182'\n\t\t\t\t\t\t\t\t#Check that we end up with a legal IPv4 address before continuing; we're getting garbage.\n\t\t\t\t\t\t\t\tif (re.search('^[1-9][0-9\\.]*[0-9]$', DNSIPAddr) == None):\n\t\t\t\t\t\t\t\t\tDebug(\"Odd PTR rrname: \" + OneAn.rrname)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tDNSHostname = OneAn.rdata.lower()\n\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",PTR\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",PTR\"])):\n\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"PTR\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 15):\t\t#\"IN\" class and \"MX\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Possibly later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 28):\t\t#\"IN\" class and \"AAAA\" answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata.upper()\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",AAAA\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",AAAA\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"AAAA\", DNSHostname, \"\")\n\n\t\t\t\t\t\t\t#Move to the next DNS object in the \"an\" block\n\t\t\t\t\t\t\tOneAn = OneAn.payload\n\t\t\t\t\tfor OneCNAME in CNAMERecs:\t\t#Now that we have all A/PTR's, go back and turn cname records into pseudo-A's\n\t\t\t\t\t\tif isinstance(OneCNAME,DNSRR):\n\t\t\t\t\t\t\tAlias = OneCNAME.rrname.lower()\n\t\t\t\t\t\t\tExisting = OneCNAME.rdata.lower()\n\t\t\t\t\t\t\tif isFQDN(Alias) and isFQDN(Existing):\n\t\t\t\t\t\t\t\tif HostIPs.has_key(Existing):\n\t\t\t\t\t\t\t\t\tfor OneIP in HostIPs[Existing]:\t\t\t\t#Loop through each of the IPs for the canonical name, and\n\t\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(OneIP + \",CNAME\")) or (not(Alias in DNSRecord[OneIP + \",CNAME\"])):\n\t\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", OneIP, \"CNAME\", Alias, \"\")\t#report them as kind-of A records for the Alias.\n\t\t\t\t\t\t\t\t#If we don't have a A/PTR record for \"Existing\", just ignore it. Hopefully we'll get the Existing A/PTR in the next few answers, and will re-ask for the CNAME later, at which point we'll get a full cname record.\n\t\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t#\tDebug(\"CNAME \" + Alias + \" -> \" + Existing + \" requested, but no IP's for the latter, skipping.\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tDebug(\"One of \" + Alias + \" and \" + Existing + \" isn't an FQDN, skipping cname processing.\")\n\t\t\t\telif (p['DNS'].rcode == 1):\t\t\t#FormErr: server responding to an improperly formatted request\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 2):\t\t\t#ServFail: domain exists, root nameservers list authoritative name servers, but authNS's won't answer queries\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 3):\t\t\t#NXDOMAIN: root nameservers don't have any listing (domain doesn't exist or is on hold)\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 5):\t\t\t#Query refused\n\t\t\t\t\tpass\n\t\t\t\telse:\t#rcode indicates an error\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"53\") and (type(p['DNS']) == DNS) and (p['DNS'].qr == 0):\t#dns query\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"dns/client\")\n\t\t\telif (sport == \"67\") and (dport == \"68\"):\t\t#Bootp/dhcp server talking to client\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"bootpordhcp/server\")\n\t\t\telif (sport == \"68\") and (dport == \"67\"):\t\t#Bootp/dhcp client talking to server\n\t\t\t\tif (sIP != \"0.0.0.0\"):\t\t\t\t#If the client is simply renewing an IP, remember it.\n\t\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"bootpordhcp/client\")\n\t\t\t\t#else:\t\t\t\t\t\t#If you want to record which macs are asking for addresses, do it here.\n\t\t\t\t#\tpass\n\t\t\telif (sport == \"123\") and (dport == \"123\") and (p['NTP'].stratum != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/generic\")\n\t\t\telif (dport == \"123\") and ( (dIP == \"216.115.23.75\") or (dIP == \"216.115.23.76\") or (dIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ntp/vonageclient\")\n\t\t\telif (sport == \"123\") and ( (sIP == \"216.115.23.75\") or (sIP == \"216.115.23.76\") or (sIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/vonageserver\")\n\t\t\telif (dport == \"137\"):\t\t\t#netbios-ns\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (p['Ethernet'].dst.upper() == \"FF:FF:FF:FF:FF:FF\"):\t\t\t#broadcast\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/broadcastclient\")\n\t\t\t\t\telif (Payload != None) and (Payload.find('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') > -1):\t#wildcard\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/wildcardclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/unicastclient\")\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"500\") and (dport == \"500\") and (p['ISAKMP'].init_cookie != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"isakmp/generic\")\n\t\t\telif (dport == \"512\"):\t\t\t#BIFF\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('@') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"biff/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (dport == \"1026\") or (dport == \"1027\") or (dport == \"1028\") ):\t#winpopup spam client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and ( (Payload.find('Download Registry Update from:') > -1) or (Payload.find('CRITICAL ERROR MESSAGE! - REGISTRY DAMAGED AND CORRUPTED.') > -1) or (Payload.find('Your system registry is corrupted and needs to be cleaned immediately.') > -1) or (Payload.find('CRITICAL SYSTEM ERRORS') > -1) ):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"winpopup/spamclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"1434\"):\t\t#Probable mssql attack\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Qh.dll') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mssql/clientattack\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"1900\") and (dport == \"1900\") and (dIP == \"239.255.255.250\"):\t\t#SSDP\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('NOTIFY') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ssdp/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"3865\") and (dIP == \"255.255.255.255\"):\t\t#XPL, http://wiki.xplproject.org.uk/index.php/Main_Page\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"xpl/client\")\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (dIP == \"216.115.30.28\") or (dIP == \"69.59.227.77\") or (dIP == \"69.59.232.33\") or (dIP == \"69.59.240.84\") ):\t\t#Vonage SIP client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061 SIP/2.0') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tSipMatch = SipPhoneMatch.search(Payload)\n\t\t\t\t\t\tif (SipMatch != None) and (len(SipMatch.groups()) >= 1):\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client, phone number: \" + SipMatch.group(1))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (sIP == \"216.115.30.28\") or (sIP == \"69.59.227.77\") or (sIP == \"69.59.232.33\") or (sIP == \"69.59.240.84\") ):\t#Vonage SIP server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061>') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"sip/vonage_server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"6515\") and (dport == \"6514\") and (dIP == \"255.255.255.255\"):\t\t#mcafee ASaP broadcast, looking for a proxy out. http://www.myasap.de/intl/EN/content/virusscan_asap/faq_new.asp\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('<rumor version=') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"asap/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"9052\") or (sport == \"9053\") or (sport == \"9054\") ) and ( (sIP == \"205.188.146.72\") or (sIP == \"205.188.157.241\") or (sIP == \"205.188.157.242\") or (sIP == \"205.188.157.243\") or (sIP == \"205.188.157.244\") or (sIP == \"64.12.51.145\") or (sIP == \"64.12.51.148\") or (sIP == \"149.174.54.131\") ):\t#Possibly AOL dns response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('dns-01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"aoldns/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27005\") and ( (dport == \"27016\") or (dport == \"27017\") ):\t\t\t\t#Halflife client live game\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27013\") and (dIP == \"207.173.177.12\"):\t\t\t\t#variable payload, so can't (Payload != None) and (Payload.find('Steam.exe') > -1)\t\t\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (sport == \"27013\") and (sIP == \"207.173.177.12\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (sport == \"27016\") or (sport == \"27017\") ) and (dport == \"27005\"):\t\t\t\t#halflife server live game\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"27015\") or (dport == \"27016\") or (dport == \"27025\") or (dport == \"27026\") ):\t\t#Variable payload, so can't: (Payload != None) and (Payload.find('basic') > -1)\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27017\") and ( (dIP == \"69.28.148.250\") or (dIP == \"69.28.156.250\") or (dIP == \"72.165.61.161\") or (dIP == \"72.165.61.185\") or (dIP == \"72.165.61.186\") or (dIP == \"72.165.61.188\") or (dIP == \"68.142.64.164\") or (dIP == \"68.142.64.165\") or (dIP == \"68.142.64.166\") ):\t#Steamfriends client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"steamfriends/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27017\") and ( (sIP == \"69.28.148.250\") or (sIP == \"69.28.156.250\") or (sIP == \"72.165.61.161\") or (sIP == \"72.165.61.185\") or (sIP == \"72.165.61.186\") or (sIP == \"72.165.61.188\") or (sIP == \"68.142.64.164\") or (sIP == \"68.142.64.165\") or (sIP == \"68.142.64.166\") ):\t#Steamfriends server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"steamfriends/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"21020\") or (sport == \"21250\") or (sport == \"27016\") or (sport == \"27017\") or (sport == \"27018\") or (sport == \"27030\") or (sport == \"27035\") or (sport == \"27040\") or (sport == \"28015\") ):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Team Fortress') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27019\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"1265\") or (dport == \"20100\") or (dport == \"21550\") or (dport == \"27000\") or (dport == \"27017\") or (dport == \"27018\") or (dport == \"27019\") or (dport == \"27022\") or (dport == \"27030\") or (dport == \"27035\") or (dport == \"27050\") or (dport == \"27078\") or (dport == \"27080\") or (dport == \"28015\") or (dport == \"28100\") or (dport == \"45081\") ):\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Source Engine Query') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"24441\"):\t\t\t#Pyzor\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('User:') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"pyzor/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t#FIXME - interesting issue; the ttl<5 test will catch traceroutes coming into us, but not ones we're creating to go out. Hmmm.\n\t\t\telif ( (dport >= \"33434\") and (dport <= \"33524\") ) and (p['IP'].ttl <= 5):\t#udptraceroute client\n\t\t\t\tif ((not LiveUDPClient.has_key(sIP + \"UDP_33434\")) or (LiveUDPClient[sIP + \"UDP_33434\"] == False)):\n\t\t\t\t\tLiveUDPClient[sIP + \"UDP_33434\"] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_33434\", \"open\", \"udptraceroute/client\")\n\t\t\telif (dport == \"40348\"):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('HLS') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (p['IP'].frag > 0):\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"207.46.51.74\") or (sIP == \"65.55.251.10\"):\t\t\t\t#Bigfish.com - dns?\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"61.215.106.146\"):\t\t\t\t#junk\n\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tDebug(\"Other IP protocol (\" + str(p['IP'].src) + \"->\" + str(p['IP'].dst) + \"): \" + str(p['IP'].proto))\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x86DD:\t\t#IPv6\n\t\tUnhandledPacket(p)\n\telse:\n\t\tprint \"Unregistered ethernet type:\", p['Ethernet'].type\n\t\tUnhandledPacket(p)",
"def reader(self):\n while self.alive:\n try:\n data = self.serial.read_until(b'~')[:-1]\n packet = ethernet.Ethernet(data)\n if packet[icmp.ICMP]:\n packet[ethernet.Ethernet].dst_s = \"dc:a6:32:00:a7:8b\"\n packet[ip.IP].dst_s = \"192.168.1.35\"\n packet[icmp.ICMP].sum = b'0x1783'\n print(\"\\n\\n__________________RESPONSE FROM VISIBLE PI__________________\")\n print(packet)\n if data:\n self.write(packet.bin())\n except socket.error as msg:\n break\n self.alive = False",
"def packet_check(argument, lineno):\n \n if argument not in symbol_table.keys() or symbol_table[argument] != 'PACKET':\n print_error(\"\\tError : undefined packet '\"+str(argument)+\"'\", str(lineno))",
"def launch (infile, outfile, in_only=False, out_only = False):\n global _writer, _of_port, _in_only, _out_only\n _in_only = in_only\n _out_only = out_only\n\n data = open(infile, \"r\").read()\n p = pxparse.PCapParser(callback=pi_cb)\n _writer = pxwriter.PCapRawWriter(open(outfile, \"w\"))\n p.feed(data)\n\n log.info(\"%i packet_ins, %i packet_outs\", _pis, _pos)\n\n core.quit()",
"def parse_packet(data):\n ip = IPPacket(data)\n icmp = ICMPPacket(ip.payload)\n print('ICMP message from %s, type %d (%s), code %d, %d byte payload.') % (\n ip.src_addr, icmp.type, ICMP_TYPES[icmp.type], icmp.code,\n len(icmp.payload))\n return len(icmp.payload)",
"def _icmp_send(dp, port_out, ip_src=DISCOVERY_IP_SRC, ip_dst=DISCOVERY_IP_DST,\n eth_src='02:b0:00:00:00:b5', eth_dst='02:bb:bb:bb:bb:bb',\n icmp_type=8, icmp_code=0):\n\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n pkt = packet.Packet()\n pkt.add_protocol(ethernet.ethernet(ethertype=0x0800,\n dst=eth_dst,\n src=eth_src))\n\n pkt.add_protocol(ipv4.ipv4(dst=ip_dst,\n src=ip_src,\n proto=1))\n\n ##Latency measurement\n my_clock = str(time.clock())\n\n ##TODO: Rework payload and codes to properly work with Fragmentation needed\n pkt.add_protocol(icmp.icmp(type_=icmp_type,\n code=icmp_code,\n csum=0,\n data=icmp.echo(1,1,\"{'dpid' : \"+str(dp.id)+\",'port_out' : \"+str(port_out)+\",'clock' : \"+my_clock+\"}\")))\n pkt.serialize()\n data=pkt.data\n actions=[parser.OFPActionOutput(port_out,0)]\n out=parser.OFPPacketOut(datapath=dp, buffer_id=ofp.OFP_NO_BUFFER, in_port=ofp.OFPP_CONTROLLER, actions=actions, data=data)\n ##LOG.debug('***ICMP DEBUG*** Sending ICMP with Payload: ' + \"{'dpid' : \"+str(dp.id)+\",'port_out' : \"+str(port_out)+\",'clock' : \"+my_clock+\"}\" )\n dp.send_msg(out)",
"def to_network_layer(packet):\r\n print(f\"[to_network_layer] packet:{packet}\")"
] | [
"0.6235294",
"0.61506945",
"0.6109243",
"0.601377",
"0.59785503",
"0.5961816",
"0.5922923",
"0.5854635",
"0.5806374",
"0.55882084",
"0.55190057",
"0.5492686",
"0.54621166",
"0.54290426",
"0.5427576",
"0.541186",
"0.5380287",
"0.53389305",
"0.53269714",
"0.5293727",
"0.5293053",
"0.52690774",
"0.51974",
"0.5189775",
"0.51804584",
"0.516548",
"0.5159419",
"0.515365",
"0.5119272",
"0.510829"
] | 0.6294702 | 0 |
Start all remote servers and one local server. | def _start_servers(self):
for user, host, port in self.server_addresses:
remoteHost = "%s@%s" % (user, host)
logger.info("starting remote server %s:%s", host, port)
command = ("cd ~/goaway;" +
"find . -name '*.pyc' -delete ;" +
"DEBUG=true goaway/cmdserver.py %s %s %s >> server.std.log 2>&1" % (
host,
port,
self._config.remote_path,
))
logger.debug("Starting server:%s remoteHost with command:%s" % (remoteHost, command))
## subprocess.call blocks, while subprocces.Popen doesn't block.
sshPopen = subprocess.Popen(["ssh", remoteHost, command],
shell = False, stdout= subprocess.PIPE, stderr = subprocess.PIPE)
self._start_local_server() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start(self):\r\n for srv in self._servers:\r\n srv.start()",
"def start_servers(self, **kwargs):\n self.cleanup()\n\n # Start up the API and default conductor server\n\n # We start the conductor server first, as the API server config\n # depends on the conductor port - this ordering allows for\n # retrying the launch on a port clash\n self.start_with_retry(self.conductor_server, 'conductor_port', 3,\n **kwargs)\n kwargs['conductor_port'] = self.conductor_server.bind_port\n\n self.start_with_retry(self.api_server, 'api_port', 3, **kwargs)",
"def run(self):\n self._list_servers()",
"def start():\n\n start_server()",
"def run_all_servers(options):\r\n settings = getattr(options, 'settings', 'dev')\r\n settings_lms = getattr(options, 'settings_lms', settings)\r\n settings_cms = getattr(options, 'settings_cms', settings)\r\n worker_settings = getattr(options, 'worker_settings', 'dev_with_worker')\r\n fast = getattr(options, 'fast', False)\r\n\r\n if not fast:\r\n args = ['lms', '--settings={}'.format(settings_lms), '--skip-collect']\r\n call_task('pavelib.assets.update_assets', args=args)\r\n\r\n args = ['studio', '--settings={}'.format(settings_cms), '--skip-collect']\r\n call_task('pavelib.assets.update_assets', args=args)\r\n\r\n call_task('pavelib.assets.watch_assets', options={'background': True})\r\n run_multi_processes([\r\n django_cmd('lms', settings_lms, 'runserver', '--traceback', '--pythonpath=.', \"0.0.0.0:{}\".format(DEFAULT_PORT['lms'])),\r\n django_cmd('studio', settings_cms, 'runserver', '--traceback', '--pythonpath=.', \"0.0.0.0:{}\".format(DEFAULT_PORT['studio'])),\r\n django_cmd('lms', worker_settings, 'celery', 'worker', '--loglevel=INFO', '--pythonpath=.')\r\n ])",
"def run():\n server = current_server()\n server._auto_stop = True\n return start()",
"def startservers():\n try:\n dns = subprocess.Popen(['python', FAKE_LOC, '-c', DNS_LOC])\n except IOError:\n sys.exit('>> Unable to locate FakeDns')\n\n try:\n httpd = MyTCPServer(('', 80), MyHandler)\n except socket.error:\n dns.kill()\n sys.exit('>> Port 80 already in use')\n try:\n print '>> Starting HTTP Server...'\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.shutdown()\n httpd.server_close()\n dns.kill()\n sys.exit()",
"def start(self, wait_for_port=False):\n for c in self.openstack_endpoints.values():\n c.compute = self.compute\n c.manage = self.manage\n c.server_thread = threading.Thread(target=c._start_flask, args=())\n c.server_thread.daemon = True\n c.server_thread.name = c.__class__.__name__\n c.server_thread.start()\n if wait_for_port:\n self._wait_for_port(c.ip, c.port)",
"def start(self):\n if self.is_alive:\n self.logger.warning('Already started!')\n return\n self._create_tunnels()\n if not self.is_active:\n self._raise(BaseSSHTunnelForwarderError,\n reason='Could not establish session to SSH gateway')\n for _srv in self._server_list:\n thread = threading.Thread(\n target=self._serve_forever_wrapper,\n args=(_srv, ),\n name='Srv-{0}'.format(address_to_str(_srv.local_port))\n )\n thread.daemon = self.daemon_forward_servers\n thread.start()\n self._check_tunnel(_srv)\n self.is_alive = any(self.tunnel_is_up.values())\n if not self.is_alive:\n self._raise(HandlerSSHTunnelForwarderError,\n 'An error occurred while opening tunnels.')",
"def start(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.start_server(server)\n return r",
"def connect_to_server(self):\n\t\tself.outside.start()\n\t\tself.outside.register(self.config.server_ip, self.config.server_port)\n\n\t\tself.thin.start()\n\t\tself.thin.register(self.config.server_ip, self.config.server_port)",
"def init():\r\n if not env.hosts:\r\n _init_local()\r\n else:\r\n _init_remote()",
"def __runRemoteListeningSockets(self, address, localHostName):\n ## Get the local machine name and the remote nodes one\n remoteNodesIP = self.__getLocalAndRemoteMachineNames()\n\n ## Strip out the nodes' names\n availableNodes = [node.strip() for node in self.runInfoDict['Nodes']]\n\n ## Get unique nodes\n uniqueNodes = list(set(availableNodes) - set([localHostName]))\n servers = []\n self.remoteServers = {}\n if len(uniqueNodes) > 0:\n ## There are remote nodes that need to be activated\n ## Modify the python path used by the local environment\n localEnv = os.environ.copy()\n pathSeparator = os.pathsep\n if \"PYTHONPATH\" in localEnv and len(localEnv[\"PYTHONPATH\"].strip()) > 0:\n previousPath = localEnv[\"PYTHONPATH\"].strip()+pathSeparator\n else:\n previousPath = \"\"\n localEnv[\"PYTHONPATH\"] = previousPath+pathSeparator.join(sys.path)\n ## Start\n for nodeId in uniqueNodes:\n ## Check how many processors are available in the node\n ntasks = availableNodes.count(nodeId)\n remoteHostName = remoteNodesIP[nodeId]\n\n ## Activate the remote socketing system\n ## let's build the command and then call the os-agnostic version\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"Setting up RAY server in node: \"+nodeId.strip())\n runScript = os.path.join(self.runInfoDict['FrameworkDir'],\"RemoteNodeScripts\",\"start_remote_servers.sh\")\n command=\" \".join([runScript,\"--remote-node-address\",nodeId, \"--address\",address, \"--num-cpus\",str(ntasks),\" --working-dir \",self.runInfoDict['WorkingDir'],\" --raven-framework-dir\",self.runInfoDict[\"FrameworkDir\"],\"--remote-bash-profile\",self.runInfoDict['RemoteRunCommand']])\n self.raiseADebug(\"command is: \"+command)\n command += \" --python-path \"+localEnv[\"PYTHONPATH\"]\n self.remoteServers[nodeId] = utils.pickleSafeSubprocessPopen([command],shell=True,env=localEnv)\n elif self._parallelLib == ParallelLibEnum.dask:\n remoteServerScript = os.path.join(self.runInfoDict['FrameworkDir'],\n \"RemoteNodeScripts\",\"start_dask.sh\")\n outputFile = os.path.join(self.runInfoDict['WorkingDir'],\"server_debug_\"+nodeId)\n command = ['ssh',nodeId,remoteServerScript,outputFile,\n self.daskSchedulerFile,str(ntasks),\n self.runInfoDict[\"FrameworkDir\"],\n self.runInfoDict['RemoteRunCommand'],\n self.runInfoDict['WorkingDir']]\n self.raiseADebug(\"command is: \"+\" \".join(command))\n command.append(self.__removeLibPythonFromPath(localEnv[\"PYTHONPATH\"]))\n self.remoteServers[nodeId] = utils.pickleSafeSubprocessPopen(command, env=localEnv)\n ## update list of servers\n servers.append(nodeId)\n if self._parallelLib == ParallelLibEnum.ray or self._parallelLib == ParallelLibEnum.dask:\n #wait for the servers to finish starting (prevents zombies)\n for nodeId in uniqueNodes:\n self.remoteServers[nodeId].wait()\n self.raiseADebug(\"server \"+str(nodeId)+\" result: \"+str(self.remoteServers[nodeId]))\n\n return servers",
"def start_peers(self):\n for i in self.nodes:\n i.start()",
"def start(self):\n self.serve_forever()",
"def start(self):\n self.serve_forever()",
"def start_server(self):\n if not self._server:",
"def start_server(self):\n if self.esp_mgr.ap:\n self.server_socket = adafruit_esp32spi_socket.socket()\n self.esp_mgr.esp.start_server(23, self.server_socket.socknum)",
"def local_webserver_start():\n if not _is_webserver_running():\n local(_webserver_command())",
"def learn_local_servers(self):\n assert len(self.mylinks) > 0\n assert len(self.switches) > 0\n assert self.graph != None\n\n localservers = []\n for srv in self.servers:\n neighbor_sw = self.graph.neighbors(srv)\n if len(neighbor_sw) != 1:\n raise NotImplementedError(\"Single server links only\")\n else:\n neighbor_sw = neighbor_sw[0]\n if (neighbor_sw in self.switches):\n localservers.append(srv)\n\n # remove duplicates\n self.localservers = list(set(localservers))",
"def peer_server(self):\n try:\n listener_thread = threading.Thread(target=self.peer_server_listener)\n listener_thread.setDaemon(True)\n\n operations_thread = threading.Thread(target=self.peer_server_host)\n operations_thread.setDaemon(True)\n\n listener_thread.start()\n operations_thread.start()\n\n threads = []\n threads.append(listener_thread)\n threads.append(operations_thread)\n\n for t in threads:\n t.join()\n except Exception as e:\n print \"Peer Server Error, %s\" % e\n sys.exit(1)",
"def open(self):\n self._server = socketserver.ThreadingTCPServer(\n server_address=('localhost', self._requested_local_port),\n RequestHandlerClass=self._create_handler(self._ssh_client, self._remote_host, self._remote_port),\n )\n\n threading.Thread(target=self.serve_forever).start()\n\n print('Forwarding local port {} to remote {}:{}'.format(self.local_port, self.remote_host, self.remote_port))",
"def __wait_for_master_ssh( self ):\n for _ in itertools.count( ):\n s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n try:\n s.settimeout( 5 )\n s.connect( ('mesos-master', 22) )\n return\n except socket.error:\n pass\n finally:\n s.close( )",
"def start_server(self):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n\n # The two services we added in the proto. You can find these functions in\n # jellybeanrobot_pb2_grpc.py.\n jellybeanrobot_pb2_grpc.add_JellyServicer_to_server(Robot(), server)\n\n # Start listening on a port.\n server.add_insecure_port(\"localhost:%d\" % self.port)\n print \"Listening on localhost:%d!\\n\" % self.port\n server.start()\n\n try:\n while True:\n time.sleep(3600) # one hour. \n except KeyboardInterrupt:\n server.stop(0)",
"def start(self):\n threading.Thread(target=self.serve_forever).start()",
"def __run_server(self):\n os.chdir(os.path.dirname(self.server_path))\n self.server_process = subprocess.Popen([self.server_path, \\\n \"{}:{}\".format(self.args.ip, self.args.port)])",
"def remotes():",
"def run(self):\n self._server = self._get_server()\n self._server.serve_forever()",
"def start_server(auth_parms, server_data):\n server_uuid = server_data[0]\n server_state = get_server_state(auth_parms, server_uuid)\n if server_state == 'STOPPED':\n rc = change_server_status(auth_parms=auth_parms, server_uuid=server_uuid, state='RUNNING')\n # change_server_status() waits on the server getting to the requested state, so we don't\n # need to call wait_for_server() here. However, we do (1) need to check the status and (2)\n # wait on the server actually being accessible (as opposed to having a RUNNING state in\n # FCO, which really just means that the underlying kvm process has started).\n #\n # 1. Check rc (0 is good)\n if (rc != 0):\n raise Exception(\"Failed to put server \" + server_uuid + \" in to running state\")\n\n server_resultset = list_resource_by_uuid(auth_parms, uuid=server_uuid, res_type='SERVER')\n print(\"Server result set is:\")\n print server_resultset\n\n server_ip = server_resultset['list'][0]['nics'][0]['ipAddresses'][0]['ipAddress'] # yuk\n print(\"server IP=\" + server_ip)\n\n # Step 2. Wait on it being accessible. It is possible that the server doesn't have ssh installed,\n # or it is firewalled, so don't fail here if we can't connect, just carry on and let\n # the caller deal with any potential issue. The alternative is a hard-coded sleep, or\n # trying a ping (platform specific and/or root privs needed).\n is_ssh_port_open(server_ip, 30)\n\n server_data.append(server_ip)\n return server_data",
"def connect_servers(self):\r\n\r\n for srvr in self.server_list_text:\r\n try:\r\n self.connect_server(srvr[\"serverID\"], srvr[\"serverIP\"], int(srvr[\"serverPort\"]), srvr[\"nickname\"])\r\n except Exception as exp:\r\n print(\"Error occurred.\\nWhy: {0}\".format(exp)) # TOOD: posílat takovéhle errory klientům\r"
] | [
"0.7768984",
"0.7032133",
"0.6721577",
"0.6698385",
"0.65687376",
"0.6535655",
"0.6447551",
"0.6397013",
"0.63623345",
"0.6346815",
"0.6252066",
"0.6190625",
"0.6175915",
"0.61642206",
"0.6139815",
"0.6139815",
"0.6128373",
"0.6106354",
"0.60582894",
"0.60406834",
"0.60241026",
"0.60135597",
"0.5989884",
"0.59897137",
"0.5939332",
"0.5927691",
"0.592745",
"0.590314",
"0.58980227",
"0.58831173"
] | 0.8314969 | 0 |
Wait for all servers to become alive. | def wait_for_servers(self, timeout):
for user, host, port in self.server_addresses:
if not self.wait_for_server(user, host, port, timeout):
logging.warn("could not start server %s:%s:%s", user, host, port)
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait_all():\n global alive\n\n try:\n while alive > 0:\n gevent.sleep(1)\n finally: \n signal.setitimer(signal.ITIMER_REAL, 0)",
"def __wait_for_master_ssh( self ):\n for _ in itertools.count( ):\n s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n try:\n s.settimeout( 5 )\n s.connect( ('mesos-master', 22) )\n return\n except socket.error:\n pass\n finally:\n s.close( )",
"def wait_for_container():\n for i in xrange(30):\n print(\"Waiting for service to come up\")\n try:\n requests.get(URL).raise_for_status()\n return True\n except Exception as e:\n print e\n sleep(1)\n\n return False",
"def wait(self, retries=30):\n running = check_server(self.adj.host, self.adj.port,\n '/__application__', retries=retries)\n if running:\n return True\n try:\n self.shutdown()\n finally:\n return False",
"def wait_for_termination(self):\n self.server.wait_for_termination()",
"def run(self):\n for req, resp in self.servings:\n resp.check_timeout()",
"def wait_for_termination(self):\n self.server.wait_for_termination()",
"def _wait_for_server():\n start_time = time.time()\n\n while True:\n try:\n urllib.request.urlopen('http://localhost:5000/')\n break\n except Exception:\n time.sleep(.1)\n if time.time() - start_time > 2:\n raise",
"def wait(self):\n AbstractDaemon.wait(self, SCHEDULER_PERIOD)",
"def wait_for_completion(self):\n self.logger.debug(\"Waiting for completion\")\n finished = False\n while not finished:\n if self._all_workers_are_idle():\n self.logger.info(\"Finished\")\n finished = True",
"def wait(self):\n [p.join() for p in self._downloaders]\n self._ckq.join()\n [p.terminate() for p in self._checkers]\n [p.join() for p in self._checkers]",
"def wait(self):\n try:\n self._server.wait()\n except greenlet.GreenletExit:\n LOG.info(_(\"WSGI server has stopped.\"))",
"def WaitUntilServing(self, timeout=30.0):\n assert self._process, 'server was not started'\n finish_time = time.time() + timeout\n while time.time() < finish_time:\n if self._process.poll() is not None:\n raise Error('server has already exited with return: %r',\n self._process.returncode)\n if self._CanConnect():\n return\n time.sleep(0.2)\n raise Error('server did not start after %f seconds', timeout)",
"def wait(self):\n try:\n if self._server is not None:\n self._server.wait()\n except greenlet.GreenletExit:\n LOG.info(_(\"WSGI server has stopped.\"))",
"def wait_for_marathon():\n marathon_service = get_service_connection_string('marathon')\n while True:\n print 'Connecting marathon on %s' % marathon_service\n try:\n response = requests.get('http://%s/ping' % marathon_service, timeout=5)\n except (\n requests.exceptions.ConnectionError,\n requests.exceptions.Timeout,\n ):\n time.sleep(5)\n continue\n if response.status_code == 200:\n print \"Marathon is up and running!\"\n break",
"def wait_for_marathon():\n marathon_service = get_marathon_connection_string()\n while True:\n print('Connecting to marathon on %s' % marathon_service)\n try:\n response = requests.get(\n 'http://%s/ping' % marathon_service, timeout=2)\n except (\n requests.exceptions.ConnectionError,\n requests.exceptions.Timeout,\n ):\n time.sleep(2)\n continue\n if response.status_code == 200:\n print(\"Marathon is up and running!\")\n break",
"def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()",
"def wait_forever(self):\r\n while True:\r\n time.sleep(0.5)",
"def wait_for_all_cache_tasks(self):\n if self.is_server_process:\n self.update_queue.join()\n self.refresh_queue.join()",
"def serve_forever(servers, poll_interval=0.5):\n while True:\n ready, wait, excep = select.select(servers, [], [], poll_interval)\n for server in servers:\n if server in ready:\n server.handle_request()",
"def healthcheck(self):\n while True:\n time.sleep(NAMENODE_HEALTH_CHECK_INTERVAL)\n self.check_datanodes()",
"def wait_for_cadvisor_up(self):\n ping = None\n while ping is None:\n ping = requests.get(self._url_prefix, timeout=CONNECTION_TIMEOUT)\n if ping is None:\n logger.debug(\"Unable to connect to cadvisor %s. Will sleep for %s sec\",\n self._url_prefix, CHECK_LIVELINESS_INTERVAL)\n time.sleep(CHECK_LIVELINESS_INTERVAL)\n logger.info(\"cAdvisor client is up for endpoint %s\", self._url_prefix)",
"def wait(self):\n for _ in range(15):\n time.sleep(10)\n if self.ready:\n break\n else:\n raise RuntimeError('timeout, lease failed to start')",
"def wait_all_process_done(self) -> None:\n while len(self.process_queue) > 0:\n self.check_process_done()",
"def forget_dead_hosts(self):\r\n for s in self.servers:\r\n s.deaduntil = 0",
"def forget_dead_hosts(self):\n\t\tfor server in self.servers:\n\t\t\tserver.dead_until = 0",
"def wait_for_workers(self, workers_db_key):\n timeout = time.time() + 10\n while True:\n n_workers = self.tempstore.conn.scard(workers_db_key)\n self.logger.info('Got redis scard resp: %s', n_workers)\n if n_workers > 0:\n break\n\n if time.time() > timeout:\n raise Exception('Workers did not come up - please check syslog')\n\n time.sleep(1)\n\n self.logger.info('Workers successfully started')",
"def wait_for_ssh(self):\n self.wait_for_status(16)\n printy(\"The instance is now running ...\")\n # The instance is running, but we give it 60 more seconds for running\n # SSHD\n printy(\"Waiting 60 seconds for SSH server to start ...\")\n time.sleep(60)",
"def forget_dead_hosts(self):\n for s in self.servers:\n s.dead_until = 0",
"def loop_wait(self):\n # Do in reverse so sockets (first) can send anything the other loops\n # produce\n self.log_debug(\"Waiting for loop to finish\")\n loops = reversed(getattr(self, \"_loops\", []))\n for loop in loops:\n loop.loop_wait()\n self.remove_loop(loop)\n loops = None\n self.loop_confirm_stopped()"
] | [
"0.74015445",
"0.718989",
"0.68756485",
"0.67319417",
"0.6638677",
"0.66021174",
"0.6596358",
"0.6572008",
"0.6525765",
"0.64510643",
"0.64422387",
"0.64345455",
"0.64319974",
"0.64055943",
"0.6364728",
"0.6302922",
"0.6285464",
"0.6269691",
"0.62687176",
"0.62614435",
"0.6230971",
"0.62196165",
"0.6204539",
"0.61812276",
"0.61656505",
"0.61437225",
"0.61414737",
"0.6121731",
"0.61114955",
"0.6064694"
] | 0.76050067 | 0 |
Wait until this many bytes available in the serial buffer. | def waitforAndRead(self, size):
while self.device.inWaiting() < size:
pass
else:
return self.device.read(size) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _serial_bytes_available(self):\n return self.serial.in_waiting",
"async def _wait_for_data(self, current_command, number_of_bytes):\n while number_of_bytes:\n next_command_byte = await self.read()\n current_command.append(next_command_byte)\n number_of_bytes -= 1\n return current_command",
"async def _wait_for_data(self, current_command, number_of_bytes):\n while number_of_bytes:\n next_command_byte = await self.read_next_byte()\n current_command.append(next_command_byte)\n number_of_bytes -= 1\n return current_command",
"def wait(self):\n self.stream.read_until(\"\\n\", self._on_read)",
"def wait_till_read_out():\n\n\trespond = send_command('waitreadout')",
"def waitReadable( self, timeoutms=None ):\n if len( self.readbuf ) == 0:\n self.pollOut.poll( timeoutms )",
"def wait_for_data(receiver):\n\n while not receiver.available(pipes[1]):\n time.sleep(0.01)",
"def wait_to_be_ready(self):\n count = 0\n while count < 6:\n try:\n line = self.stdout_reader.get(timeout=10)\n if \"waiting for input\" in line:\n self.cec_logger.info('CEC is ready')\n break\n except Empty:\n self.cec_logger.warning(\"haven't received a line from CEC\")\n count += 3",
"def __wait_for(self, cmd_byte, rx_bytes, timeout_seconds=1.0):\n if not self.is_valid():\n return False\n t = time.time()\n remain = timeout_seconds\n while 1:\n #num_read = self.__usb_if.MPUSBRead(self.__handle_read, rx_bytes, int(remain*1000))\n #if (num_read > 0) and (rx_bytes[0]==cmd_byte):\n # return True\n rx = self.__usb_if.read(int(remain*1000))\n num_read = len(rx)\n if rx:\n rx_bytes[:] = rx\n if (num_read > 0) and (rx_bytes[0]==cmd_byte):\n return True\n remain = timeout_seconds - (time.time()-t)\n if remain <= 0:\n break\n time.sleep(0.001)\n #end 1 loop\n return False\n #end __wait_for()",
"def worker_serial_read(self):\r\n while self.active_flag.is_set():\r\n if not self.data_recieved_flag.is_set() and self.serial_data.in_waiting > 0:\r\n # strtmp=self.serial_data.read_until(b'\\x02\\x01\\x04\\x03\\x06\\x05\\x08\\x07');\r\n strtmp = self.serial_data.read_all()\r\n if (strtmp != b''):\r\n # self.buffer_busy_flag.wait();\r\n self.buffer_busy_flag.clear()\r\n # self.recieved_data=[self.recieved_data,strtmp];\r\n self.recieved_data = strtmp\r\n self.buffer_busy_flag.set()\r\n self.data_recieved_flag.set()\r\n else:\r\n time.sleep(0.001)\r\n\r\n return",
"def _read_nowait(self, n: int) -> bytes:\n ...",
"def waitbuffer(self, ptr, bufsize):\n timeout = ct.c_int(20000)\n self.lib.AT_WaitBuffer(self.AT_H, ct.byref(ptr), ct.byref(bufsize),\n timeout)",
"def wait(self):\n while not self.done:\n self.device._handle_events(1000)",
"def read_until(\n self,\n min_num_bytes: int,\n ending: bytes,\n timeout: float = 10.0,\n data_consumer=None,\n ):\n\n data = b''\n\n # If a miniumum number of bytes is given, wait till at least\n # that number of bytes are received. If the value is 0, then\n # continue, and rely on the terminator and timeout values.\n if min_num_bytes:\n data = self.con.read(min_num_bytes)\n # debug(f'read {data=}')\n if data_consumer:\n data_consumer(data)\n\n timeout_count = 0\n while True:\n if ending and data.endswith(ending):\n break\n else:\n # debug(f\"{ending=} was not found\")\n pass\n\n if self.con.inWaiting() > 0:\n new_data = self.con.read(1)\n # debug(f'read {new_data=}')\n data = data + new_data\n # if len(data) > 80:\n # debug(f'data: len={len(data)} {data[-80:]=}')\n # else:\n # debug(f'data: len={len(data)} {data=}')\n if data_consumer:\n data_consumer(new_data)\n # timeout_count = 0\n else:\n timeout_count += 1\n # debug(f'{timeout_count=}')\n if timeout is not None and timeout_count >= 100 * timeout:\n if not data:\n debug(f\"TIMEOUT - No data received within {timeout} seconds\")\n else:\n debug(f\"TIMEOUT - data {data} did not end with {ending}\")\n break\n time.sleep(0.01)\n debug(f\"read_until returns {data=}\")\n return data",
"def wait(self):\n with self.__lock:\n while not self.__complete:\n self.__lock.wait()",
"def __wait_for_available_bytes(self, overall_deadline):\n # For testing purposes, it is important that we capture the time before we invoke `peek`. That's because\n # all methods that write bytes will advance the clock... so we can tell if there may be new data by seeing\n # if the time has changed since the captured time.\n last_busy_loop_time = self.__time()\n while self._is_running():\n (num_bytes_available, result) = self.__channel.peek()\n if result != 0:\n raise RedirectorError(\n \"Error while waiting for more bytes from redirect server error=%d\"\n % result\n )\n if num_bytes_available > 0:\n return True\n self._sleep_for_busy_loop(\n overall_deadline, last_busy_loop_time, \"more bytes to be read\"\n )\n last_busy_loop_time = self.__time()\n return False",
"def wait(self, signal):\n while True:\n s = self.receive()\n if s == signal:\n break",
"def data_available(self):\n\n self.run = True\n self.serial.reset_input_buffer()\n while self.run:\n if self.serial.in_waiting:\n data: str = self.serial.readline().decode(\"utf-8\")\n data = data.replace(\">>>\", \"\").lstrip()\n\n if len(data) > 0:\n self.output_text.config(state=NORMAL)\n self.output_text.insert(END, data)\n self.output_text.see(END)\n self.output_text.config(state=DISABLED)\n else:\n time.sleep(0.1)",
"def wait(self):\n time.sleep(0.010)",
"def wait(self):\n try:\n buf = os.read(self._fd, 8)\n return struct.unpack(\"Q\", buf)[0]\n except OSError as e:\n if e.errno == errno.EAGAIN:\n return 0\n else:\n raise e",
"def wait(self):\n time.sleep(self.next())",
"def recv_bytes(self):\r\n try:\r\n received = self.socket.recv(4096)\r\n # printf(\"Bytes received: %d\" % (len(received)))\r\n self.recvBuffer += received\r\n # printf(\"recvbuflen=%d\" % (len(recvbuf)))\r\n except BlockingIOError: # no data available\r\n return\r\n except:\r\n # print(\"Socket broken in recv\")\r\n return",
"def wait(self):\n self.drain_call_queue()",
"async def wait_until_chunked(self) -> None:\n await self._finished_chunking.wait()",
"def _wait_ready(self):\n command = self._recv_from_client()\n while command != \"READY\":\n command = self._client.recv_from_client()",
"def wait(self, wait_timeout=10):\n if self._TransferInitiated == 0:\n return\n Error = \"DMA wait timed out.\"\n with timeout(seconds = wait_timeout, error_message = Error):\n while True:\n if libdma.XAxiDma_Busy(self.DMAengine,self.direction) == 0:\n break",
"def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()",
"def wait_until_empty(self):\n while not self.is_empty():\n self.sleep(10)",
"def waitForReadDone(self):\n data = 0x0\n for i in range(10):\n if i >= 10:\n print \"I2C Failure\"\n break\n time.sleep(0.0005)\n data = self.spam.read(self.cmd_reg)\n if(self.cmdDone(data)):\n break\n\n return data",
"def wait(self):\n self.queue.join()"
] | [
"0.7088853",
"0.6867825",
"0.6855532",
"0.67480516",
"0.67391396",
"0.6630244",
"0.66225225",
"0.6618195",
"0.6595421",
"0.6574406",
"0.6466141",
"0.6452985",
"0.644309",
"0.6389591",
"0.6388041",
"0.6383351",
"0.63777477",
"0.6309587",
"0.6277611",
"0.6259124",
"0.6258118",
"0.62334615",
"0.62214476",
"0.6220866",
"0.61847866",
"0.6145106",
"0.61327463",
"0.6132004",
"0.6108986",
"0.6087791"
] | 0.69288653 | 1 |
si le nombre de reponse fausse est superieur a 1, le mot fautes'accorde au pluriel | def singPlur(repFausses):
if repFausses <= 1:
return "faute"
else:
return "fautes" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nom(self, i):\n pass",
"def editar_repet(self, repet: int):\n comprep(repet)\n self.repet = repet",
"def fim_da_rodada(self, recompensa, m, numero_de_cacadores):\n #print('Jogador 4 {}'.format(self.historico[-1]))\n pass",
"def enchere(self):\n\n i = 0\n while i < 5 and self.annonce < 4:\n paroleJ = self.joueurs[i].parler(self.annonce)\n if paroleJ != 0:\n self.annonce = paroleJ\n self.indiceJoueurQuiPrend = i\n i += 1\n\n print(\"joueur qui prend : \" + str(self.indiceJoueurQuiPrend))\n if self.indiceJoueurQuiPrend != -1:\n print(\"annonce : \" + str(self.annonce))\n if self.annonce == 1 or self.annonce == 2:\n self.joueurs[self.indiceJoueurQuiPrend].possedeChien = True\n self.joueurs[self.indiceJoueurQuiPrend].construireChien()\n self.debuterPartie()\n\n else:\n self.finirPartie()",
"def reais(um, dois, tres):\n print(f'Aposta 1 -> R${um:.2f}, Aposta 2 -> '\n f'R${dois:.2f}, Aposta 3 -> R${tres:.2f}')",
"def suffixe(self, autre):\n if autre.tete is None: # liste vide\n return\n\n partage = False # boolean qui indique si la liste partage au moins une cellule ou pas\n for cel in self.cellules():\n if cel.utilisation > 1: # dès qu'on trouve une cellule partagée\n partage = True\n break\n\n if partage: # si la liste partage des cellules\n vu_premiere = True\n doublage = False\n premiere = True\n prec = self.tete\n for cel in self.cellules():\n if vu_premiere and cel.utilisation > 1: # dès qu'on trouve une cel partagée (1 fois)\n vu_premiere = False\n doublage = True\n cel.utilisation -= 1\n if doublage: # si on commence à doubler la partie partagée\n cel_double = Cellule(cel.valeur)\n if premiere: # cas initial (1 fois)\n premiere = False\n if cel == self.tete: # si c'est la première cellule qu'on supprime\n self.tete = cel_double\n else: # cas général\n prec.suivant = cel_double\n else: # cas itératif\n prec.suivant = cel_double\n prec = cel_double\n else:\n prec = cel\n prec.suivant = autre.tete\n autre.tete.utilisation += 1\n\n else: # sinon, si la liste ne partage aucune cellule\n if partage is False:\n for cel in self.cellules():\n if cel.suivant is None:\n cel.suivant = autre.tete\n autre.tete.utilisation += 1\n\n self.taille += autre.taille",
"def nom(meal, i1):\n # Iterators\n #numOfIts = len(meal)\n #its = [i1 for i1 in range(0,numOfIts)]\n #its = [0,2,1,6, len(meal)]\n #its = createIts(meal)\n numOfIts = len(its)\n \n newnom = 0\n for i2 in range(0,numOfIts):\n newnom += int(meal[its[i2]+i1]) + primes[i2]\n #print(primes[i2])\n #print(\"newnom =\", newnom)\n \n newnom = str(newnom % 10)\n return newnom",
"def NuevaPartida(self,):\n\t\"\"\" Numeros Disponibles \"\"\"\n\tDisponibles[0] = True\n\tDisponibles[1] = True\n\tDisponibles[2] = True\n\tDisponibles[3] = True\n\tDisponibles[4] = True\n\tDisponibles[5] = True\n\t\"\"\" Jugador Uno \"\"\"\n\tJ1[0] = 0\n\tJ1[1] = 0\n\tJ1[2] = 0\n\tJ1[3] = 0\n\tJ1[4] = 0\n\tJ1[5] = 0\n\t\"\"\" Jugador Dos \"\"\"\n\tJ2[0] = 0\n\tJ2[1] = 0\n\tJ2[2] = 0\n\tJ2[3] = 0\n\tJ2[4] = 0\n\tJ2[5] = 0\n\t\"\"\" Jugador Tres \"\"\"\n\tJ3[0] = 0\n\tJ3[1] = 0\n\tJ3[2] = 0\n\tJ3[3] = 0\n\tJ3[4] = 0\n\tJ3[5] = 0\n\t\"\"\" Jugador Cuatro \"\"\"\n\tJ4[0] = 0\n\tJ4[1] = 0\n\tJ4[2] = 0\n\tJ4[3] = 0\n\tJ4[4] = 0\n\tJ4[5] = 0",
"def __init__(self):\n self.nombre_roues = 4\n self.nombre_fauteils = 1\n self.moteur = False\n self.volant = True",
"def cantidad_participantes(self):\n print(\n f\"\"\"\n ================================================\n ======== SE ENCONTRARON {len(self.__disparos)} PARTICIPANTES ========\n ================================================\n \"\"\"\n )",
"def reprime(self):\n self.__primed = 1",
"def on_pushButton_precedent_clicked(self):\n \n if self.lineEdit_temperature.text() !=\"\":\n num_pt =int(self.label_pt.text())\n if num_pt - 1 < 1:\n pass\n else:\n \n #effacement\n for ligne in range(11):\n for colonne in range(8):\n if colonne !=6:\n \n self.tableWidget_mesures.setItem(ligne, colonne, QtGui.QTableWidgetItem(None))\n self.lineEdit_temperature.clear()\n self.lineEdit_stab_max.clear()\n self.lineEdit_u_stab_max.clear() \n self.lineEdit_hom_max_2.clear()\n self.lineEdit_u_hom_max.clear()\n \n else:\n pass\n #reafctation des donnees \n self.reaffectation_table_widget_mesures(str(int(self.label_pt.text())-1)) \n \n #presentation textEdit n°pt de la mesure\n self.label_pt.setText(str(num_pt -1))",
"def repetido_sansanito():\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT * FROM\n\t\t\t\t(SELECT nombre\n\t\t\t\tFROM sansanito\n\t\t\t\tGROUP BY nombre\n\t\t\t\tORDER BY COUNT(*) DESC)\n\t\t\t\tWHERE ROWNUM <= 1\n\t\t\t\t\"\"\")\n\tprint_table(hdrs_sansanito)",
"def _pega_no(self, index):\n ponteiro = self.inicio\n for i in range(index):\n if ponteiro:\n ponteiro = ponteiro.prox\n else:\n raise IndexError(\"list index out of range\")\n return ponteiro",
"def NUMBER_OF_REC_CHOICE():\n return 13",
"def add_neurone(self, couche, nbr=1):\n if self.control == 0:\n if couche >= 0 and couche <= len(self.couche) - 1 and nbr > 0:\n self.couche[couche] += nbr\n else:\n print(\"Le réseau est deja créé, vous en pouvez plus le modifier\")",
"def add_all_neurone(self, tab):\n if self.control == 0:\n if len(tab) == len(self.couche):\n for i in range(0, len(tab)):\n self.add_neurone(i, tab[i])\n else:\n print(\"Le tableau doit etre de taille\" + str(len(self.couche)))\n else:\n print(\"Le réseau est deja créé, vous en pouvez plus le modifier\")",
"def nombre_monomes(self):\n\t\tif self.__tete:\n\t\t\treturn self.__tete.nombre_monomes()\n\t\telse:\n\t\t\treturn 0",
"def repeat_count_m1(self):\n if hasattr(self, '_m_repeat_count_m1'):\n return self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None\n\n self._m_repeat_count_m1 = self.repeat_count_m1_raw.value\n return self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None",
"def affiche_joueur(self, max_taille_nom):\n\n print(\"{0: <{width}} : {1}\".format(self.get_nom(), self.get_score(),\n width=max_taille_nom - len(self.get_nom())))",
"def is_repetition(self):\n return self.id == 1",
"def ngens(self):\n return 1",
"def reprographie():\n nombreDePhotocopie = int(input(\"Entrez le nombre de photocopie a effectuer \"))\n PREMIER_PRIX = 0.10\n DEUXIEME_PRIX = 0.09\n TROISIEME_PRIX = 0.08\n PREMIERE_TRANCHE = 10\n DEUXIEME_TRANCHE = 20\n TROISIEME_TRANCHE = 30\n resultat = 0\n if(nombreDePhotocopie>TROISIEME_TRANCHE):\n resultat = DEUXIEME_TRANCHE*DEUXIEME_PRIX+1+(nombreDePhotocopie-30)*TROISIEME_PRIX\n elif(nombreDePhotocopie<=TROISIEME_TRANCHE):\n if(nombreDePhotocopie/10>1):\n resultat = (nombreDePhotocopie-10)*DEUXIEME_PRIX+(PREMIERE_TRANCHE*PREMIER_PRIX)\n else:\n resultat = nombreDePhotocopie*PREMIER_PRIX\n return resultat",
"def cliquer(self):\n self.nb_clic += 1\n self.message[\"text\"] = \"Vous avez cliqué {} fois.\".format(self.nb_clic)",
"def nao_tem_passageiros(self):\n return self.counter.ja_viajaram == self.counter.num_passageiros",
"def possessif(nom):\n\n CA = nom[1]\n\n\n rand = randint(0,5)\n\n if CA == \"-1\" or CA == \"-3\" or CA == \"-5\" or CA == \"-7\" or CA == \"-8\" or CA == \"-4\" or Premiere_lettre_voyelle(nom[0]):\n if rand == 0:\n return \"mon \"\n elif rand == 1:\n return \"ton \"\n elif rand == 2:\n return \"son \"\n elif rand == 3:\n return \"notre \"\n elif rand == 4:\n return \"votre \"\n elif rand == 5:\n return \"leur \"\n \n elif (CA == \"-2\" or CA == \"-6\" or CA == \"-9\"):\n if rand == 0:\n return \"ma \"\n elif rand == 1:\n return \"ta \"\n elif rand == 2:\n return \"sa \"\n elif rand == 3:\n return \"notre \"\n elif rand == 4:\n return \"votre \"\n elif rand == 5:\n return \"leur \"\n else:\n return False",
"def mostrarBicicletasDisponiveis(self) -> int:\n estoque_atual = Loja().mostrarEstoque()\n print(f'Bicicletas disponíveis: {estoque_atual}')\n return estoque_atual",
"def affichage_creation_tournoi():\n nom = \"\"\n lieu = \"\"\n date = \"\"\n nb_tours = 4\n joueurs = []\n temps = \"\"\n note = \"\"\n\n print(\"\\n---------------------------\")\n while len(nom) == 0:\n try:\n nom = str(input(\"\\nNom : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nom valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(lieu) == 0:\n try:\n lieu = str(input(\"\\nLieu : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un lieu valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(date) == 0:\n try:\n date = str(input(\"\\nDate\\nFormat : jj/mm/aaaa : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une date valide.\")\n sl(2)\n continue\n test_date = OutilsControleurs.test_date(date)\n if test_date == 0:\n print(\"\\nVous avez saisi une valeur trop grande.\")\n date = \"\"\n if test_date == 1:\n print(\"\\nVous avez saisi une valeur trop petite.\")\n date = \"\"\n if test_date == 2:\n break\n if test_date == 3:\n print(\"\\nVous avez saisi un format de date incorrect.\")\n date = \"\"\n\n print(\"\\n---------------------------\")\n nb_tours_modif = \"\"\n while nb_tours_modif != 2 or nb_tours_modif != 1:\n try:\n print(\"\\nNombre de tours\\nPar default le nombre est de 4\\nVoulez-vous modifier cette valeur ?\")\n nb_tours_modif = int(input(\"\\n1 - Oui\\n2 - Non\\n\\nVotre choix: \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours_modif == 1:\n while nb_tours == 4:\n try:\n nb_tours = int(input(\"\\nNombre de tours : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours == 4:\n break\n break\n if nb_tours_modif == 2:\n break\n\n print(\"\\n---------------------------\\n\\nListe des joueurs :\\n\")\n liste_joueurs_tournois = Joueur.joueurs_tournoi()\n if liste_joueurs_tournois == 0:\n print(\"Il n'y a pas ou pas suffisament de joueurs pour organiser un tounois.\")\n print(\"Veuillez ajouter des joueurs via le menu.\")\n input(\"\\nAppuyer sur entrer pour continuer\")\n return\n\n for arg in liste_joueurs_tournois:\n print(arg)\n x = 8\n while x != 0:\n try:\n joueur = int(input(\"Saisir encore {} indice de joueurs : \".format(x)))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un indice valide.\")\n sl(2)\n continue\n if joueur > 0 and joueur <= len(liste_joueurs_tournois):\n if joueur not in joueurs:\n joueurs.append(joueur)\n else:\n print(\"Vous avez deja saisi ce joueur.\")\n x += 1\n else:\n x += 1\n x -= 1\n\n y = 1\n nom_joueurs = []\n for arg in liste_joueurs_tournois:\n arg = arg[:-15]\n nom_joueurs.append(str(arg).replace(\"Indice joueur : {}\\n \".format(y), \"\").replace(\"\\n \", \"\"))\n y += 1\n joueurs = Joueur.get_joueurs_tournoi(joueurs, nom_joueurs)\n\n print(\"\\n---------------------------\")\n temps_choix = 0\n while temps_choix != 1 or temps_choix != 2 or temps_choix != 3:\n try:\n temps_choix = int(input(\"\\nContrôle de temps\\n1 - Bullet\\\n \\n2 - Blitz\\n3 - Coup rapide\\n\\nVotre choix : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if temps_choix == 1:\n temps = \"Bullet\"\n break\n if temps_choix == 2:\n temps = \"Blitz\"\n break\n if temps_choix == 3:\n temps = \"Coup rapide\"\n break\n\n print(\"\\n---------------------------\")\n while len(note) == 0:\n try:\n note = str(input(\"\\nDescription : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if len(note) == 0:\n break\n return nom, lieu, date, nb_tours, joueurs, temps, note",
"def __mul__(self, autre):\n\t\tif self.__valide and autre.__valide:\n\t\t\tp = polynome()\n\t\t\ta = self.liste_decroissante()\n\t\t\tb = autre.liste_decroissante()\n\t\t\tfor m in a:\n\t\t\t\tfor n in b:\n\t\t\t\t\tp.ajouter_monome(monome.produit(m, n))\n\t\t\treturn p\n\t\telse:\n\t\t\treturn polynome(False)",
"def atender(self):\n\n if self.enfila>0: #Para que atiendan solamente si e que hay alguien en la fila\n\n self.enfila-=1\n self.fila.pop(0) #Saco primer elemento de la fila (Atienden al primer cliente)"
] | [
"0.62046045",
"0.60625553",
"0.60084254",
"0.5907415",
"0.5833081",
"0.5792889",
"0.57927155",
"0.5534922",
"0.55008006",
"0.54808915",
"0.54408365",
"0.54359406",
"0.5429279",
"0.5413033",
"0.5403856",
"0.53419846",
"0.5325032",
"0.53236884",
"0.53136164",
"0.5293597",
"0.5284393",
"0.5265289",
"0.526003",
"0.52320117",
"0.52249384",
"0.5207618",
"0.5206951",
"0.52065897",
"0.5206181",
"0.5186451"
] | 0.63280576 | 0 |
Calculate fee based in the transaction size and the price per KiB. | def estimate_fee(estimated_size: int, fee_kb: int) -> int:
return int(estimated_size * fee_kb / 1024.0 + 0.5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fee(self, prices, fee):\n return self.volume(prices) * fee.value / Config.FEE_TOKEN_PRICE",
"def get_fee(self):\n fee = round(self.order_payment.amount * Decimal(0.015), 2)\n return fee",
"def calc_fee(fee_rate, memo=''):\n compiled_memo = compile_memo(memo) if memo else None\n fee = get_fee([], fee_rate, compiled_memo)\n return fee",
"def ebay_fee(sell_price):\r\n\r\n p50 = 0.13 # for amount $50 and lower\r\n p50_to_1000 = 0.05 # for $50.01-$1000\r\n p1000 = 0.02 # for $1000.01 and higher\r\n fee = 0.50 # fee to list item\r\n\r\n if sell_price <= 50:\r\n fee = fee + (sell_price*p50)\r\n elif sell_price <= 1000:\r\n fee = fee + (50*p50) + ((sell_price-50)*p50_to_1000)\r\n else:\r\n fee = fee + (50*p50) + ((1000-50)*p50_to_1000) \\\r\n + ((sell_price-1000)*p1000)\r\n return fee",
"def compute_fee_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n result = 0\n # Check if the session has min threshold and max threshold to get the right value for result\n if supplier_item.get('has_session_fee') and supplier_item.get(\n 'has_minimum_billing_threshold') and supplier_item.get('has_max_session_fee'):\n if supplier_item.get('min_billing_amount', 0) > supplier_item.get('session_fee', 0):\n result = supplier_item.get('min_billing_amount', 0)\n elif supplier_item.get('max_session_fee') > supplier_item['session_fee'] > supplier_item[\n 'min_billing_amount']:\n result = supplier_item.get('session_fee', 0)\n elif supplier_item.get('session_fee', 0) > supplier_item.get('max_session_fee'):\n result = supplier_item.get('max_session_fee')\n # Check for min threshold only to get the min bill\n elif supplier_item.get('has_session_fee') and supplier_item.get('has_minimum_billing_threshold'):\n if supplier_item.get('min_billing_amount') > supplier_item.get('session_fee'):\n result = supplier_item.get('min_billing_amount')\n elif supplier_item.get('session_fee') > supplier_item.get('min_billing_amount'):\n result = supplier_item.get('session_fee')\n return result",
"def _charge_transaction_fee(self,\n context: 'IconScoreContext',\n params: dict,\n status: int,\n step_used: int) -> (int, int):\n version: int = params.get('version', 2)\n from_: 'Address' = params['from']\n\n step_price = context.step_counter.step_price\n\n if version < 3:\n # Support coin transfer based on protocol v2\n # 0.01 icx == 10**16 loop\n # FIXED_FEE(0.01 icx) == step_used(10**6) * step_price(10**10)\n step_used = 10 ** 6\n\n if status == TransactionResult.FAILURE:\n # protocol v2 does not charge a fee for a failed tx\n step_price = 0\n elif context.is_service_flag_on(IconServiceFlag.fee):\n # 0.01 icx == 10**16 loop\n # FIXED_FEE(0.01 icx) == step_used(10**6) * step_price(10**10)\n step_price = 10 ** 10\n\n # Charge a fee to from account\n fee: int = step_used * step_price\n try:\n self._icx_engine.charge_fee(context, from_, fee)\n except BaseException as e:\n if hasattr(e, 'message'):\n message = e.message\n else:\n message = str(e)\n Logger.exception(message, ICON_SERVICE_LOG_TAG)\n step_used = 0\n\n # final step_used and step_price\n return step_used, step_price",
"def get_transfer_fee(value: float) -> float:\n return (value * (0.99 / 100)) + 4.9",
"def calculate_buy_order_size(buy_price: float):\n if CONF.exchange == 'bitmex':\n poi = get_position_info()\n total = get_crypto_balance()['total']\n if CONF.apply_leverage:\n total *= CONF.leverage_default\n if poi is not None:\n pnl = poi['unrealisedGrossPnl'] * CONF.satoshi_factor # negative if loss\n if poi['homeNotional'] < 0:\n size = (total + pnl + abs(poi['homeNotional']) / 0.99) / 1.01\n else:\n size = (total + pnl - (poi['homeNotional']) / 0.99) / 1.01\n else:\n size = total / 1.01\n else:\n size = to_crypto_amount(get_fiat_balance()['total'] / 1.01, buy_price)\n if CONF.exchange == 'kraken':\n # no position and no fiat - so we will buy crypto with crypto\n if size == 0.0:\n size = get_margin_balance()['free'] / 1.01\n # size = get_crypto_balance()['total'] / 1.01\n # kraken fees are a bit higher\n size /= 1.04\n return size if size > MIN_ORDER_SIZE else None",
"def get_fee(inputs: List[UTXO], fee_rate: float, data: Optional[bytes]=None):\n lst_reduce = 0\n if len(inputs) > 0:\n for x in inputs:\n lst_reduce += TX_INPUT_BASE + \\\n (len(x.witness_utxo.script)\n if x.witness_utxo.script else TX_INPUT_PUBKEYHASH)\n\n sum = TX_EMPTY_SIZE + lst_reduce + \\\n len(inputs) + TX_OUTPUT_BASE + TX_OUTPUT_PUBKEYHASH + \\\n TX_OUTPUT_BASE + TX_OUTPUT_PUBKEYHASH\n if data:\n sum = sum + TX_OUTPUT_BASE + len(data)\n fee = sum * fee_rate\n result = fee if fee > MIN_TX_FEE else MIN_TX_FEE\n return result",
"def seller_transaction_fee(self) -> int:\n assert self._transaction_fees is not None, \"Transaction fee not set!\"\n return self._transaction_fees[\"seller_tx_fee\"]",
"def buyer_transaction_fee(self) -> int:\n assert self._transaction_fees is not None, \"Transaction fee not set!\"\n return self._transaction_fees[\"buyer_tx_fee\"]",
"def get_fee_pct(self, contract_type: str) -> Tuple[float, float]:\n if contract_type == 'forex':\n return (0.00002, 0.00002)\n elif contract_type == 'crypto':\n if self.CRYPTO_EXCHANGE == 'binance':\n if self.trade_volume < 50_000:\n return (.001, .001)\n elif self.trade_volume < 100_000:\n return (.0009, .0009)\n elif self.trade_volume < 5000_000:\n return (.0009, .0008)\n elif self.trade_volume < 1_000_000:\n return (.0008, .0007)\n elif self.trade_volume < 5_000_000:\n return (.0007, .0005)\n elif self.trade_volume < 10_000_000:\n return (.0006, .0004)\n elif self.trade_volume < 25_000_000:\n return (.0006, 0)\n elif self.trade_volume < 100_000_000:\n return (.0005, 0)\n elif self.trade_volume < 250_000_000:\n return (.0004, 0)\n elif self.trade_volume < 500_000_000:\n return (.0003, 0)\n else: return (.0002, 0)\n elif self.CRYPTO_EXCHANGE == 'kraken':\n if self.trade_volume < 50_000:\n return (.0026, .0016)\n elif self.trade_volume < 100_000:\n return (.0024, .0014)\n elif self.trade_volume < 250_000:\n return (.0022, .0012)\n elif self.trade_volume < 500_000:\n return (.002, .001)\n elif self.trade_volume < 1_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 2_500_000:\n return (.0016, .0006)\n elif self.trade_volume < 5_000_000:\n return (.0014, .0004)\n elif self.trade_volume < 10_000_000:\n return (.0012, .0002)\n else: return (.001, 0)\n elif self.CRYPTO_EXCHANGE == 'coinbase':\n if self.trade_volume < 10_000:\n return (.005, .005)\n elif self.trade_volume < 50_000:\n return (.0035, .0035)\n elif self.trade_volume < 100_000:\n return (.0025, .0015)\n elif self.trade_volume < 1_000_000:\n return (.002, .001)\n elif self.trade_volume < 10_000_000:\n return (.0018, .0008)\n elif self.trade_volume < 50_000_000:\n return (.0015, .0005)\n elif self.trade_volume < 300_000_000:\n return (.0007, 0)\n elif self.trade_volume < 500_000_000:\n return (.0005, 0)\n else: return (.0004, 0)\n elif self.CRYPTO_EXCHANGE == 'robinhood':\n return (0.0001, 0.0001)\n return (0, 0)",
"def buy_cost(self, buy_price, count):\n fee = 20 if math.floor(count*buy_price*1000*self.fee_count*self.handling_fee) <= 20 else math.ceil(count*buy_price*1000*self.fee_count*self.handling_fee)\n return int(buy_price*1000*count+fee)",
"def calculate_price(self):\n\n cargo_weight = self.cargo.weight\n tax_rate = Decimal(0.18)\n\n untaxed_total = Decimal(cargo_weight) * Decimal(self.price_per_unit_weight)\n\n total_price = (untaxed_total * tax_rate) + untaxed_total\n\n return total_price",
"def do_fee(self,args):\n totalamount,fee,howmanyto,nexttier = bitstamp.fee_schedule()\n print \"Your 30 day volume is: %.5f. Your trade fee is: %.2f%%\" % (totalamount,fee)\n print \"You are $%s away from the next tier of: $%s\" % (howmanyto,nexttier)",
"def calculate_sell_order_size():\n total = get_crypto_balance()['total']\n used = calculate_percentage_used()\n if CONF.apply_leverage:\n total *= CONF.leverage_default\n if CONF.exchange == 'bitmex':\n poi = get_position_info()\n if poi is not None:\n if poi['homeNotional'] > 0:\n pnl = poi['unrealisedGrossPnl'] * CONF.satoshi_factor # negative if loss\n diff = (total - (poi['homeNotional'] * 1.01)) / (100 / CONF.short_in_percent)\n factor = (100 + CONF.short_in_percent) / 100\n size = ((poi['homeNotional'] * factor) + diff) + pnl\n return size if size > MIN_ORDER_SIZE else None\n if used > CONF.short_in_percent:\n return None\n diff = CONF.short_in_percent - used\n if diff <= 0:\n return None\n size = total / (100 / diff)\n size /= 1.01\n # kraken fees are a bit higher\n if CONF.exchange == 'kraken':\n size /= 1.04\n return size if size > MIN_ORDER_SIZE else None",
"def get_fee(self, pair, order_type):\n fees = self.p_state._getvalue()['fees']\n if fees:\n\n return float(fees[self._handler[order_type]][pair]['fee'])\n\n else:\n\n return 0.0",
"def compute_total(price):\n\n quantity = 20\n return price * quantity",
"def calc_fee(self, shares):\n return max(self.commission_min, abs(self.commission_pct * shares))",
"def sell_cost(self, sell_price, count):\n\n g_cost = math.floor(self.g_fee * sell_price * 1000 * count)\n handling_cost = math.ceil(self.handling_fee * self.fee_count * sell_price * 1000 * count)\n new_fee = g_cost + handling_cost\n print(sell_price, self.g_fee, self.handling_fee, self.fee_count, new_fee)\n return int(sell_price*1000*count-new_fee)",
"def _compute_calculate_cost(self):\n for order in self:\n amount_calculate_cost = 0.0\n for line in order.order_line:\n amount_calculate_cost += (line.product_id.standard_price * line.product_uom_qty)\n order.update({\n 'amount_calculate_cost': amount_calculate_cost\n })",
"def total_after_fees(amount, fees=None, is_payer=True):\n if not isinstance(fees, (tuple, list)):\n msg = 'fees must be of type list or tuple'\n raise TypeError(msg)\n if not isinstance(amount, Decimal):\n msg = 'Transaction amount must be of type decimal.Decimal.'\n raise TypeError(msg)\n\n stripe_fee = [f for f in fees if f.name == 'stripe-transaction']\n stripe_fee = stripe_fee[0] if stripe_fee else None\n other_fees = [f for f in fees if f.name != 'stripe-transaction']\n\n ledger = {'charge': {'initial': amount, },\n 'fees': list()}\n\n charge_amount = amount\n multiplier = 1 if is_payer else -1\n for other_fee in other_fees:\n fee_total = other_fee.flat + other_fee.fractional_pct * amount\n fee_total = fee_total\n charge_amount += multiplier * fee_total\n entry = {'name': other_fee.name,\n 'id': other_fee.id,\n 'fee': fee_total.quantize(QUANTIZE_DOLLARS)}\n ledger['fees'].append(entry)\n\n if stripe_fee is not None:\n # Percentages are stored as percentages in the db, convert it to a decimal\n new_charge_amount = (stripe_fee.flat + charge_amount) / (Decimal('1.0000') - stripe_fee.fractional_pct)\n stripe_fee_amount = new_charge_amount - charge_amount\n entry = {'name': stripe_fee.name,\n 'id': stripe_fee.id,\n 'fee': stripe_fee_amount.quantize(QUANTIZE_DOLLARS)}\n ledger['fees'].append(entry)\n charge_amount = new_charge_amount\n\n ledger['charge']['final'] = charge_amount.quantize(QUANTIZE_DOLLARS)\n return ledger",
"def calculate(\n self,\n meat: Meat\n ) -> float:\n result = 0.0\n if meat.size() == '125g':\n result = 2.0\n elif meat.size() == '250g':\n result = 2.5\n elif meat.size() == '380g':\n result = 3.5\n meat.set_price(price=result)\n return result",
"def get_fees(self, ordertype, quantity, price):\n params = {\n 'ordertype': ordertype,\n 'quantity': quantity,\n 'price': price\n }\n ret = self.Request.fetch('calculatefees',params=params)\n print ret\n return 0",
"def calc_feepoints(self, sim, mempoolstate,\n max_wait_delta=60, min_num_pts=20):\n mempool_sizefn = mempoolstate.get_sizefn()\n maxcap = sim.cap.capfn[-1][1]\n minfeepoint = None\n txratepts = list(sim.cap.txbyteratefn)\n txratepts.append((MINRELAYTXFEE, sim.cap.txbyteratefn(MINRELAYTXFEE)))\n txratepts.sort()\n for feerate, txbyterate in txratepts:\n if feerate < sim.stablefeerate:\n continue\n capdelta = maxcap - txbyterate\n assert capdelta > 0\n mempoolsize = mempool_sizefn(feerate)\n if mempoolsize / capdelta < 10800:\n # Roughly 3 hours to clear\n minfeepoint = feerate\n break\n if minfeepoint is None:\n minfeepoint = feerate\n # No need to process transactions with fee rate lower than minfeepoint\n sim.stablefeerate = max(sim.stablefeerate, minfeepoint)\n\n if not self.stats:\n # Use default feepoints - even spacing\n return None\n\n waitfn = self.stats.expectedwaits\n minwait = waitfn._y[-1]\n maxwait = waitfn._y[0]\n wait_delta = min(max_wait_delta,\n (maxwait - minwait) / (min_num_pts - 1))\n wait_delta = max(wait_delta, 1)\n num_pts = 1 + int(round((maxwait - minwait) / wait_delta))\n wait_pts = [minwait + wait_delta*i for i in range(num_pts)]\n feepoints = [int(round(waitfn.inv(wait))) for wait in wait_pts]\n\n maxfeepoint = sim.cap.inv_util(0.05)\n # maxfeepoint must also be at least the 0.95 cap feerate\n for feerate, cap in sim.cap.capfn:\n if cap >= 0.95*maxcap:\n alt_maxfeepoint = feerate\n break\n # maxfeepoint must also be at least so that mempoolsize is \"small\"\n alt_maxfeepoint2 = int(mempool_sizefn.inv(\n 0.1*maxcap*EXPECTED_BLOCK_INTERVAL, use_upper=True))\n maxfeepoint = max(maxfeepoint, alt_maxfeepoint, alt_maxfeepoint2)\n\n minfeepoint = sim.stablefeerate\n\n feepoints.extend([minfeepoint, maxfeepoint])\n feepoints = filter(\n lambda feerate: minfeepoint <= feerate <= maxfeepoint,\n sorted(set(feepoints)))\n return feepoints",
"def convenience_fee_amount(self):\n return self._convenience_fee_amount",
"def calculate_price(self, amount: float, action: str, price:float, contract:str):\n def get_ticked_price(amount, action, price):\n if contract not in self.tick_information:\n return price\n if (action == 'Close'):\n if amount > 0:\n return (price - self.tick_information[contract] * self.tick) * (1-self.fee)\n else:\n return (price + self.tick_information[contract] * self.tick) * (1+self.fee)\n elif action == 'Long':\n return (price + self.tick_information[contract] * self.tick) * (1+self.fee)\n elif action == 'Short':\n return (price - self.tick_information[contract] * self.tick) * (1-self.fee)\n return get_ticked_price(amount, action, price)",
"def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))",
"def total_calculator(description, receipt_input, fees_input, tax_input, tip_input):\n rf = receiptFormat()\n # a dictionary of name(s) and sum of amount\n raw_pairs = [(\n rf.parse_alpha(alpha),\n sum([float(i) for i in rf.parse_numbers(numbers)])\n ) for (alpha, numbers) in re.findall(rf.pattern, receipt_input)]\n # combine all split costs with the people involved\n data = {}\n for (people, amount) in raw_pairs:\n for person in [person.capitalize() for person in people]:\n if not person in data:\n data[person] = round(amount/len(people),2)\n else:\n data[person] += round(amount/len(people),2)\n\n precheck_sum = sum(data.values())\n total_value = round(precheck_sum+tax_input+tip_input+fees_input,2) # prefill the total\n total_input = st.number_input(\"Calculated Total*\",step=10.0,value=total_value) \n return total_input, data",
"def test_calculate_retention_fee():\n assert calculate_retention_fee(2578) == Decimal('128.91')"
] | [
"0.65787834",
"0.6210678",
"0.6102024",
"0.6040983",
"0.60349977",
"0.60058254",
"0.59498155",
"0.59323287",
"0.58386",
"0.5809553",
"0.5750729",
"0.5715536",
"0.5650437",
"0.56102574",
"0.5606875",
"0.5598308",
"0.5562563",
"0.5497193",
"0.5484398",
"0.5471336",
"0.54295206",
"0.54177445",
"0.54174554",
"0.53999186",
"0.53514326",
"0.5341893",
"0.52977484",
"0.5296963",
"0.527277",
"0.5261303"
] | 0.69844055 | 0 |
Guess the transaction size based in the number of inputs and outputs. | def guess_transaction_size(inputs: list, outputs: dict) -> (str, int):
return 11 + 180 * len(inputs) + 34 * len(outputs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def estimateInputSize(scriptSize):\n return (\n 32 + 4 + 1 + 8 + 4 + 4 + wire.varIntSerializeSize(scriptSize) + scriptSize + 4\n )",
"def estimateSerializeSize(scriptSizes, txOuts, changeScriptSize):\n # Generate and sum up the estimated sizes of the inputs.\n txInsSize = 0\n for size in scriptSizes:\n txInsSize += estimateInputSize(size)\n\n inputCount = len(scriptSizes)\n outputCount = len(txOuts)\n changeSize = 0\n if changeScriptSize > 0:\n changeSize = estimateOutputSize(changeScriptSize)\n outputCount += 1\n # 12 additional bytes are for version, locktime and expiry.\n return (\n 12\n + (2 * wire.varIntSerializeSize(inputCount))\n + wire.varIntSerializeSize(outputCount)\n + txInsSize\n + sumOutputSerializeSizes(txOuts)\n + changeSize\n )",
"def estimateOutputSize(scriptSize):\n return 8 + 2 + wire.varIntSerializeSize(scriptSize) + scriptSize",
"def estimateSerializeSizeFromScriptSizes(inputSizes, outputSizes, changeScriptSize):\n # Generate and sum up the estimated sizes of the inputs.\n txInsSize = 0\n for inputSize in inputSizes:\n txInsSize += estimateInputSize(inputSize)\n\n # Generate and sum up the estimated sizes of the outputs.\n txOutsSize = 0\n for outputSize in outputSizes:\n txOutsSize += estimateOutputSize(outputSize)\n\n inputCount = len(inputSizes)\n outputCount = len(outputSizes)\n changeSize = 0\n if changeScriptSize > 0:\n changeSize = estimateOutputSize(changeScriptSize)\n outputCount += 1\n\n # 12 additional bytes are for version, locktime and expiry.\n return (\n 12\n + (2 * varIntSerializeSize(inputCount))\n + varIntSerializeSize(outputCount)\n + txInsSize\n + txOutsSize\n + changeSize\n )",
"def input_size(self):\n raise NotImplementedError()",
"def get_input_size_unet(bottom_size):\n # compute the relation between the input size and the bottom size\n input_size = 4 + 2 * (4 + 2 * (4 + 2 * (4 + 2 * bottom_size)))\n\n return input_size",
"def output_size(self) -> int:\n return self.out_sz",
"def determine_size(self):\n size = np.inf\n while size >= self.n:\n size = np.random.pareto(0.2)\n size = int(math.ceil(size))\n return size",
"def inner_transactions_size(self) -> int:\n return sum(i.catbuffer_size() for i in self.inner_transactions)",
"def _input_size(self):\n return self.embedding_size + self.hidden_size",
"def getSize(self) -> int:\n ...",
"def getSize(self) -> int:\n ...",
"def target_size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"target_size\")",
"def part1():\n program = read_input()\n root = build_filesystem(program)\n all_sizes = root.make_size_list()\n return sum(size for size in all_sizes if size <= 100000)",
"def calcNumOfWrites(size, seedingFile, currentUnit):\n\tadjust = SeedFiles[seedingFile]\n\tif (currentUnit == 'gb'):\n\t\tsize = size * 1024 # convert to mb\n\tadjust = float(size) / float(adjust)\n\treturn int(math.ceil(adjust))",
"def calc_size(self):\r\n pass",
"def input_size(self):\n return self.env.input_size",
"def sumOutputSerializeSizes(outputs): # outputs []*wire.TxOut) (serializeSize int) {\n serializeSize = 0\n for txOut in outputs:\n serializeSize += txOut.serializeSize()\n return serializeSize",
"def __len__(self) -> int:\n return sum(target.quantity for target in self.target_sizes)",
"def output_mb(self):\n total_output_size = sum([t.shuffle_mb_written for t in self.tasks])\n return total_output_size",
"def outputSize(in_size, kernel_size, stride, padding):\n output = int((in_size - kernel_size + 2 * padding) / stride) + 1\n return output",
"def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize",
"def size(self):\n return self.num_inputs, self.num_outputs",
"def calculate_size(\n inputs: DETECTED_INPUT_OUTPUT_TYPES, batch_dim: Optional[int]\n ) -> List[int]:\n\n def nested_list_size(inputs: Sequence[Any]) -> List[int]:\n \"\"\" Flattens nested list size. \"\"\"\n if hasattr(inputs, \"tensors\"):\n return nested_list_size(inputs.tensors) # type: ignore\n if isinstance(inputs[0], dict):\n return nested_list_size(list(inputs[0].items()))\n if hasattr(inputs[0], \"size\") and callable(inputs[0].size):\n return list(inputs[0].size())\n if isinstance(inputs, (list, tuple)):\n return nested_list_size(inputs[0])\n return []\n\n size = []\n # pack_padded_seq and pad_packed_seq store feature into data attribute\n if isinstance(inputs, (list, tuple)) and inputs and hasattr(inputs[0], \"data\"):\n size = list(inputs[0].data.size())\n if batch_dim is not None:\n size = size[:batch_dim] + [-1] + size[batch_dim + 1 :]\n\n elif isinstance(inputs, dict):\n # TODO avoid overwriting the previous size every time?\n for _, output in inputs.items():\n size = list(output.size())\n if batch_dim is not None:\n size = [size[:batch_dim] + [-1] + size[batch_dim + 1 :]]\n\n elif isinstance(inputs, torch.Tensor):\n size = list(inputs.size())\n if batch_dim is not None:\n size[batch_dim] = -1\n\n elif isinstance(inputs, (list, tuple)):\n size = nested_list_size(inputs)\n\n else:\n raise TypeError(\n \"Model contains a layer with an unsupported \"\n f\"input or output type: {inputs}\"\n )\n\n return size",
"def main_chain_size(validator):\n return validator.highest_justified_checkpoint.height + 1",
"def estimate_size(shape):\n total_bytes = reduce(np.multiply, shape) * 8\n return total_bytes / 1E6",
"def SendPacketsSendSize(self) -> int:",
"def get_input_size_with_dependencies(combiner_output_size: int, dependencies: List[str], other_output_features):\n input_size_with_dependencies = combiner_output_size\n for feature_name in dependencies:\n if other_output_features[feature_name].fc_stack.num_layers:\n input_size_with_dependencies += other_output_features[feature_name].fc_stack.output_shape[-1]\n else:\n input_size_with_dependencies += other_output_features[feature_name].input_size\n return input_size_with_dependencies",
"def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()",
"def calcsize(bc):\r\n\r\n stacksize = 0\r\n for instr in bc:\r\n arg = instr.arg if instr.require_arg() else None\r\n if instr.name != \"NOP\":\r\n stacksize += dis.stack_effect(instr.opcode, arg)\r\n\r\n try:\r\n othersize = bc.compute_stacksize()\r\n except RuntimeError as e:\r\n if hasattr(e, 'stacksize'):\r\n othersize = e.stacksize\r\n else:\r\n othersize = 0\r\n\r\n return max(stacksize, othersize)"
] | [
"0.7029825",
"0.7007875",
"0.6962185",
"0.67556745",
"0.65276957",
"0.64963466",
"0.6445923",
"0.6393347",
"0.6393045",
"0.6308914",
"0.63059366",
"0.63059366",
"0.6296995",
"0.62882775",
"0.6285022",
"0.6279971",
"0.62792987",
"0.62775767",
"0.6254468",
"0.62371427",
"0.62223727",
"0.6183608",
"0.6156725",
"0.6150444",
"0.61333567",
"0.6105557",
"0.60994565",
"0.6088358",
"0.6075951",
"0.6066954"
] | 0.842755 | 0 |
This method handles the GET requests to retrieve status on agents from the Registrar Server. Currently, only agents resources are available for GETing, i.e. /agents. All other GET uri's will return errors. agents requests require a single agent_id parameter which identifies the agent to be returned. If the agent_id is not found, a 404 response is returned. | def do_GET(self):
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
common.echo_json_response(self, 400, "uri not supported")
logger.warning('GET returning 400 response. uri not supported: ' + self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
agent = self.server.db.get_agent(agent_id)
if agent is None:
common.echo_json_response(self, 404, "agent_id not found")
logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not found.')
return
if not agent['active']:
common.echo_json_response(self, 404, "agent_id not yet active")
logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not yet active.')
return
response = {
'aik': agent['aik'],
'ek': agent['ek'],
'ekcert': agent['ekcert'],
'regcount': agent['regcount'],
}
if agent['virtual']:
response['provider_keys']= agent['provider_keys']
common.echo_json_response(self, 200, "Success", response)
logger.info('GET returning 200 response for agent_id:' + agent_id)
else:
# return the available registered uuids from the DB
json_response = self.server.db.get_agent_ids()
common.echo_json_response(self, 200, "Success", {'uuids':json_response})
logger.info('GET returning 200 response for agent_id list')
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self):\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('GET returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n agent = self.db.get_agent(agent_id)\n if agent is not None:\n response = cloud_verifier_common.process_get_status(agent)\n common.echo_json_response(self, 200, \"Success\", response)\n #logger.info('GET returning 200 response for agent_id: ' + agent_id)\n\n else:\n #logger.info('GET returning 404 response. agent id: ' + agent_id + ' not found.')\n common.echo_json_response(self, 404, \"agent id not found\")\n else:\n # return the available keys in the DB\n json_response = self.db.get_agent_ids()\n common.echo_json_response(self, 200, \"Success\", {'uuids':json_response})\n logger.info('GET returning 200 response for agent_id list')",
"def test_get_agents( self ):\n\n with self.app.app_context():\n url = '/donation/agents'\n\n # Ensure a GET with no saved agents returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n # Create some agents to retrieve.\n agent_models = []\n agent_jsons = get_agent_jsons()\n for agent_json in agent_jsons:\n agent_model = AgentSchema().load( agent_json ).data\n agent_models.append( agent_model )\n database.session.bulk_save_objects( agent_models )\n database.session.commit()\n\n # Ensure GET returns all agents.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), len( agent_jsons ) )",
"def agents_status(self):\n return self._get('agents/status')",
"def get_agent(self, account_id, agent_id, filters=None):\n return self.rest_request.get('accounts/' + str(account_id) +\n '/agents/' + str(agent_id), filters)",
"def show_agent(self, agent, **_params):\r\n return self.get(self.agent_path % (agent), params=_params)",
"def RetrieveAllAgent(**argd):\n flag, ret = CGateway.core.RetrieveAllAgent(argd[\"session\"])\n xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd[\"session\"])\n if xFlag is not None:\n return xFlag\n hmBuilder = []\n for hm in ret:\n hmBuilder.append(hm.ToJsonDict())\n return CGateway._SuccessResponse({'return': hmBuilder})",
"def list_agents(self):\n\n agents = self.vip.rpc.call(CONTROL, \"list_agents\").get(timeout=5)\n versions = self.vip.rpc.call(CONTROL, \"agent_versions\").get(timeout=5)\n status_running = self.status_agents()\n uuid_to_status = {}\n # proc_info has a list of [startproc, endprox]\n for a in agents:\n pinfo = None\n is_running = False\n for uuid, name, proc_info in status_running:\n if a['uuid'] == uuid:\n is_running = proc_info[0] > 0 and proc_info[1] == None\n pinfo = proc_info\n break\n\n uuid_to_status[a['uuid']] = {\n 'is_running': is_running,\n 'version': versions[a['uuid']][1],\n 'process_id': None,\n 'error_code': None,\n 'permissions': {\n 'can_stop': is_running,\n 'can_start': not is_running,\n 'can_restart': True,\n 'can_remove': True\n }\n }\n\n if pinfo:\n uuid_to_status[a['uuid']]['process_id'] = proc_info[0]\n uuid_to_status[a['uuid']]['error_code'] = proc_info[1]\n\n if 'volttroncentral' in a['name'] or \\\n 'vcplatform' in a['name']:\n uuid_to_status[a['uuid']]['permissions']['can_stop'] = False\n uuid_to_status[a['uuid']]['permissions']['can_remove'] = False\n\n # The default agent is stopped health looks like this.\n uuid_to_status[a['uuid']]['health'] = {\n 'status': 'UNKNOWN',\n 'context': None,\n 'last_updated': None\n }\n\n if is_running:\n identity = self.vip.rpc.call(CONTROL, 'agent_vip_identity',\n a['uuid']).get(timeout=30)\n try:\n status = self.vip.rpc.call(identity,\n 'health.get_status').get(\n timeout=5)\n uuid_to_status[a['uuid']]['health'] = status\n except gevent.Timeout:\n _log.error(\"Couldn't get health from {} uuid: {}\".format(\n identity, a['uuid']\n ))\n except Unreachable:\n _log.error(\n \"Couldn't reach agent identity {} uuid: {}\".format(\n identity, a['uuid']\n ))\n for a in agents:\n if a['uuid'] in uuid_to_status.keys():\n _log.debug('UPDATING STATUS OF: {}'.format(a['uuid']))\n a.update(uuid_to_status[a['uuid']])\n return agents",
"def do_PUT(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"agent id not found in uri\")\n logger.warning('PUT agent returning 400 response. agent id not found in uri ' + self.path)\n return\n\n try:\n content_length = int(self.headers.get('Content-Length', 0))\n if content_length == 0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('PUT for ' + agent_id + ' returning 400 response. Expected non zero content length.')\n return\n\n post_body = self.rfile.read(content_length)\n json_body = json.loads(post_body)\n\n if \"activate\" in rest_params:\n auth_tag=json_body['auth_tag']\n\n agent = self.server.db.get_agent(agent_id)\n if agent is None:\n raise Exception(\"attempting to activate agent before requesting registrar for %s\"%agent_id)\n\n if agent['virtual']:\n raise Exception(\"attempting to activate virtual AIK using physical interface for %s\"%agent_id)\n\n if common.STUB_TPM:\n self.server.db.update_agent(agent_id, 'active',True)\n else:\n ex_mac = crypto.do_hmac(agent['key'],agent_id)\n if ex_mac == auth_tag:\n self.server.db.update_agent(agent_id, 'active',True)\n else:\n raise Exception(\"Auth tag %s does not match expected value %s\"%(auth_tag,ex_mac))\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT activated: ' + agent_id)\n elif \"vactivate\" in rest_params:\n deepquote = json_body.get('deepquote',None)\n\n agent = self.server.db.get_agent(agent_id)\n if agent is None:\n raise Exception(\"attempting to activate agent before requesting registrar for %s\"%agent_id)\n\n if not agent['virtual']:\n raise Exception(\"attempting to activate physical AIK using virtual interface for %s\"%agent_id)\n\n # get an physical AIK for this host\n registrar_client.init_client_tls(config, 'registrar')\n provider_keys = registrar_client.getKeys(config.get('general', 'provider_registrar_ip'), config.get('general', 'provider_registrar_tls_port'), agent_id)\n # we already have the vaik\n tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=agent['tpm_version'])\n if not tpm.check_deep_quote(hashlib.sha1(agent['key']).hexdigest(),\n agent_id+agent['aik']+agent['ek'],\n deepquote,\n agent['aik'],\n provider_keys['aik']):\n raise Exception(\"Deep quote invalid\")\n\n self.server.db.update_agent(agent_id, 'active',True)\n self.server.db.update_agent(agent_id, 'provider_keys',provider_keys)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT activated: ' + agent_id)\n else:\n pass\n except Exception as e:\n common.echo_json_response(self, 400, \"Error: %s\"%e)\n logger.warning(\"PUT for \" + agent_id + \" returning 400 response. Error: %s\"%e)\n logger.exception(e)\n return",
"def test_list_agents(self):\n admin_resource_id = self.agent['id']\n with (self.override_role_and_validate_list(\n admin_resource_id=admin_resource_id)) as ctx:\n ctx.resources = self.agents_client.list_agents(\n id=admin_resource_id)[\"agents\"]",
"def put(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n agent = self.db.get_agent(agent_id)\n\n if agent is not None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('PUT returning 404 response. agent id: ' + agent_id + ' not found.')\n\n if \"reactivate\" in rest_params:\n agent['operational_state']=cloud_verifier_common.CloudAgent_Operational_State.START\n asyncio.ensure_future(self.process_agent(agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n elif \"stop\" in rest_params:\n # do stuff for terminate\n logger.debug(\"Stopping polling on %s\"%agent_id)\n self.db.update_agent(agent_id,'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"PUT returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n self.finish()",
"def run(self, agent_args=None):\n agent_args = agent_args or {}\n self.neutron.list_agents(**agent_args)",
"def get_agents(self, state=None, agent_id=None):\n query = \"SELECT * FROM agents\"\n \n if state:\n query += \" WHERE state='\" + state + \"'\"\n elif agent_id:\n query += \" WHERE agent_id='\" + str(agent_id) + \"'\"\n \n response = self.execute(query + \";\")\n \n if response:\n if agent_id:\n return response.fetchall()[0]\n else:\n return response.fetchall()\n else:\n return False",
"def server_agent_show(ctx, args):\n for agent_id in args:\n data = ctx.obj.get_agent_by_agent_id(agent_id)\n output_json_data(data)",
"def get_agents(self, account_id, filters=None):\n return self.rest_request.get('accounts/' + str(account_id) +\n '/agents', filters)",
"def get_agent(self, agent_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"agents\", \"agent_id\", agent_id)",
"def do_DELETE(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n if self.server.db.remove_agent(agent_id):\n #send response\n common.echo_json_response(self, 200, \"Success\")\n return\n else:\n #send response\n common.echo_json_response(self, 404)\n return\n else:\n common.echo_json_response(self, 404)\n return",
"def describe_agents(agentIds=None, filters=None, maxResults=None, nextToken=None):\n pass",
"def do_POST(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('POST agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"agent id not found in uri\")\n logger.warning('POST agent returning 400 response. agent id not found in uri ' + self.path)\n return\n\n try:\n content_length = int(self.headers.get('Content-Length', 0))\n if content_length == 0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('POST for ' + agent_id + ' returning 400 response. Expected non zero content length.')\n return\n\n post_body = self.rfile.read(content_length)\n json_body = json.loads(post_body)\n\n ek = json_body['ek']\n ek_tpm = json_body['ek_tpm']\n ekcert = json_body['ekcert']\n aik = json_body['aik']\n aik_name = json_body['aik_name']\n tpm_version = int(json_body['tpm_version'])\n\n # try to encrypt the AIK\n tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=tpm_version)\n (blob,key) = tpm.encryptAIK(agent_id,aik,ek,ek_tpm,aik_name)\n # special behavior if we've registered this uuid before\n regcount = 1\n agent = self.server.db.get_agent(agent_id)\n\n if agent is not None:\n\n # keep track of how many ek-ekcerts have registered on this uuid\n regcount = agent['regcount']\n if agent['ek'] != ek or agent['ekcert'] != ekcert:\n logger.warning('WARNING: Overwriting previous registration for this UUID with new ek-ekcert pair!')\n regcount += 1\n\n # force overwrite\n logger.info('Overwriting previous registration for this UUID.')\n # self.server.db.remove_agent(agent_id)\n self.server.db.remove_agent(agent_id)\n # Add values to database\n d={}\n d['ek']=ek\n d['aik']=aik\n d['ekcert']=ekcert\n d['virtual']=int(ekcert=='virtual')\n d['active']=int(False)\n d['key']=key\n d['tpm_version']=tpm_version\n d['provider_keys']={}\n d['regcount']=regcount\n self.server.db.add_agent(agent_id, d)\n response = {\n 'blob': blob,\n }\n common.echo_json_response(self, 200, \"Success\", response)\n\n logger.info('POST returning key blob for agent_id: ' + agent_id)\n return\n except Exception as e:\n common.echo_json_response(self, 400, \"Error: %s\"%e)\n logger.warning(\"POST for \" + agent_id + \" returning 400 response. Error: %s\"%e)\n logger.exception(e)\n return",
"def post(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('POST returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None: # this is for new items\n content_length = len(self.request.body)\n if content_length==0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('POST returning 400 response. Expected non zero content length.')\n else:\n json_body = json.loads(self.request.body)\n d = {}\n d['v'] = json_body['v']\n d['ip'] = json_body['cloudagent_ip']\n d['port'] = int(json_body['cloudagent_port'])\n d['operational_state'] = cloud_verifier_common.CloudAgent_Operational_State.START\n d['public_key'] = \"\"\n d['tpm_policy'] = json_body['tpm_policy']\n d['vtpm_policy'] = json_body['vtpm_policy']\n d['metadata'] = json_body['metadata']\n d['ima_whitelist'] = json_body['ima_whitelist']\n d['revocation_key'] = json_body['revocation_key']\n d['tpm_version'] = 0\n d['accept_tpm_hash_algs'] = json_body['accept_tpm_hash_algs']\n d['accept_tpm_encryption_algs'] = json_body['accept_tpm_encryption_algs']\n d['accept_tpm_signing_algs'] = json_body['accept_tpm_signing_algs']\n d['hash_alg'] = \"\"\n d['enc_alg'] = \"\"\n d['sign_alg'] = \"\"\n\n new_agent = self.db.add_agent(agent_id,d)\n\n # don't allow overwriting\n if new_agent is None:\n common.echo_json_response(self, 409, \"Agent of uuid %s already exists\"%(agent_id))\n logger.warning(\"Agent of uuid %s already exists\"%(agent_id))\n else:\n asyncio.ensure_future(self.process_agent(new_agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('POST returning 200 response for adding agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"POST returning 400 response. uri not supported\")\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"POST returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n\n self.finish()",
"def get_agents(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AgentList(self._results, runtime=self._runtime)",
"def compute_agents(self):\n path = '/os-agents'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack compute agents: %s' % truncate(res))\n return res[0]['agents']",
"def doRegistrarList(\n registrar_ip: str, registrar_port: str, tls_context: Optional[ssl.SSLContext]\n) -> Optional[Dict[str, Any]]:\n client = RequestsClient(f\"{registrar_ip}:{registrar_port}\", True, tls_context=tls_context)\n response = client.get(f\"/v{api_version}/agents/\")\n response_body: Dict[str, Any] = response.json()\n\n if response.status_code != 200:\n logger.warning(\"Registrar returned: %s Unexpected response from registrar.\", response.status_code)\n keylime_logging.log_http_response(logger, logging.WARNING, response_body)\n return None\n\n return response_body",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def delete(self):\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE returning 400 response. uri not supported: ' + self.request.path)\n\n agent = self.db.get_agent(agent_id)\n\n if agent is None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('DELETE returning 404 response. agent id: ' + agent_id + ' not found.')\n return\n\n op_state = agent['operational_state']\n if op_state == cloud_verifier_common.CloudAgent_Operational_State.SAVED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.FAILED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.TERMINATED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.INVALID_QUOTE:\n self.db.remove_agent(agent_id)\n common.echo_json_response(self, 200, \"Success\")\n logger.info('DELETE returning 200 response for agent id: ' + agent_id)\n else:\n self.db.update_agent(agent_id, 'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TERMINATED)\n common.echo_json_response(self, 202, \"Accepted\")\n logger.info('DELETE returning 202 response for agent id: ' + agent_id)",
"def list(self):\n response = self._client.get('scanners/1/agents')\n return AgentList.from_json(response.text)",
"def init_agent(self, kwargs):\n\n exp_params = [('agents_info', is_string),\n ('ip', is_string)]\n try:\n agents_info, agent_ip = check_arguments(exp_params, kwargs)\n agents_info = simplejson.loads(agents_info)\n except Exception as ex:\n return HttpErrorResponse(\"%s\" % ex)\n\n self.logger.info('Setting agent environment')\n\n target_dir = self.VAR_CACHE\n with open(join(target_dir, 'agents.json'), 'w') as outfile:\n simplejson.dump(agents_info, outfile)\n\n agent_role = [i['role'] for i in agents_info if i['ip'] == agent_ip][0]\n master_ip = [i['ip'] for i in agents_info if i['role'] == 'master'][0]\n\n self.env.update({'MY_IP':agent_ip})\n self.env.update({'MY_ROLE':agent_role})\n self.env.update({'MASTER_IP':master_ip})\n\n self.logger.info('Agent initialized')\n return HttpJsonResponse()",
"def getAgent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_event_agents_responder(self):\n pass",
"def get_agents_stats(self, account_id, filters=None):\n return self.rest_request.get('accounts/' + str(account_id) +\n '/agents/stats', filters)",
"def retrieve(cls: Type[T], agent_id: int, datastore: Datastore) -> T:\n agent = cls.optionally_retrieve(agent_id, datastore)\n if agent is None:\n raise NotFound\n return agent"
] | [
"0.81517375",
"0.65542585",
"0.6447016",
"0.6362145",
"0.6153569",
"0.6149206",
"0.60159767",
"0.59956706",
"0.5976985",
"0.59005296",
"0.58650035",
"0.5843438",
"0.5789323",
"0.5787094",
"0.57232255",
"0.56539154",
"0.5580563",
"0.5537036",
"0.551887",
"0.54844165",
"0.5474555",
"0.54382527",
"0.54333323",
"0.5427865",
"0.53825986",
"0.5322905",
"0.52888507",
"0.52315927",
"0.51682276",
"0.5148543"
] | 0.83628625 | 0 |
This method handles the DELETE requests to remove agents from the Registrar Server. Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors. agents requests require a single agent_id parameter which identifies the agent to be deleted. | def do_DELETE(self):
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
common.echo_json_response(self, 400, "uri not supported")
logger.warning('DELETE agent returning 400 response. uri not supported: ' + self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
if self.server.db.remove_agent(agent_id):
#send response
common.echo_json_response(self, 200, "Success")
return
else:
#send response
common.echo_json_response(self, 404)
return
else:
common.echo_json_response(self, 404)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self):\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE returning 400 response. uri not supported: ' + self.request.path)\n\n agent = self.db.get_agent(agent_id)\n\n if agent is None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('DELETE returning 404 response. agent id: ' + agent_id + ' not found.')\n return\n\n op_state = agent['operational_state']\n if op_state == cloud_verifier_common.CloudAgent_Operational_State.SAVED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.FAILED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.TERMINATED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.INVALID_QUOTE:\n self.db.remove_agent(agent_id)\n common.echo_json_response(self, 200, \"Success\")\n logger.info('DELETE returning 200 response for agent id: ' + agent_id)\n else:\n self.db.update_agent(agent_id, 'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TERMINATED)\n common.echo_json_response(self, 202, \"Accepted\")\n logger.info('DELETE returning 202 response for agent id: ' + agent_id)",
"def delete_agent(self, agent):\r\n return self.delete(self.agent_path % (agent))",
"async def delete(self):\r\n try:\r\n data = await self.request.json()\r\n agent_uuid = data.get(\"agent_uuid\")\r\n agent_to_delete = Agent.filter(Agent.uuid == agent_uuid).first()\r\n sys_id = (\r\n System.select().where(System.agent_uuid == agent_to_delete).execute()\r\n )\r\n if sys_id:\r\n logger.error(\"Agent not deleted\")\r\n return web.Response(text=\"Agent not deleted.\")\r\n else:\r\n agent_to_delete.delete_instance()\r\n logger.info(\"Agent deleted successfully\")\r\n return web.Response(text=\"Agent deleted successfully.\")\r\n except Exception as ex:\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=error_message, status=500)",
"def doRegistrarDelete(\n registrar_ip: str, registrar_port: str, agent_id: str, tls_context: Optional[ssl.SSLContext]\n) -> Dict[str, Any]:\n\n client = RequestsClient(f\"{registrar_ip}:{registrar_port}\", True, tls_context=tls_context)\n response = client.delete(f\"/v{api_version}/agents/{agent_id}\")\n response_body: Dict[str, Any] = response.json()\n\n if response.status_code == 200:\n logger.debug(\"Registrar deleted.\")\n else:\n logger.warning(\"Status command response: %s Unexpected response from registrar.\", response.status_code)\n keylime_logging.log_http_response(logger, logging.WARNING, response_body)\n\n return response_body",
"def delete(self):\n self.model.remove_agents(self)",
"def deleteAgent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def delete(self, agent_id):\n self._client.delete('scanners/1/agents/%(agent_id)s', path_params={'agent_id': agent_id})\n return True",
"def remove_agents(self, agents):\n for agent in list(make_list(agents)): # Soft copy as list is changed\n self._agents.remove(agent)\n agent.envs.remove(self)",
"def remove_agent(self):\n self.model.grid.remove_agent(self)\n self.model.schedule.remove(self)\n\n if self.agent_type == \"zombie\":\n self.model.infected -= 1\n elif self.agent_type == \"human\":\n self.model.susceptible -= 1\n del self",
"async def stop_agents_controller(self, request):\n coroutines = self.stop_agents()\n await asyncio.gather(*coroutines)\n return {\"status\": \"done\"}",
"def catalog_delete(self, args):\n headers = DEFAULT_HEADERS.copy()\n headers.update(args.headers)\n try:\n catalog = self.server.connect_ermrest(args.id)\n catalog.delete(args.path, headers)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n else:\n raise e",
"def _remove(self, arn, targets):\n # TODO: In the future, add support for the optional Port and\n # AvailabilityZone parameters. For now, keeping this dead simple.\n targets = [{'Id': t} for t in targets]\n\n try:\n yield self.api_call(\n self.elbv2_conn.deregister_targets,\n TargetGroupArn=arn,\n Targets=targets)\n except botocore.exceptions.ClientError as e:\n raise exceptions.UnrecoverableActorFailure(str(e))",
"def test_delete_router_from_l3_agent(self):\n self.agents_client.create_router_on_l3_agent(\n self.agent['id'], router_id=self.router['id'])\n self.addCleanup(\n test_utils.call_and_ignore_notfound_exc,\n self.agents_client.delete_router_from_l3_agent,\n self.agent['id'], router_id=self.router['id'])\n\n with self.override_role():\n self.agents_client.delete_router_from_l3_agent(\n self.agent['id'], router_id=self.router['id'])",
"def remove_router_from_l3_agent(self, l3_agent, router_id):\r\n return self.delete((self.agent_path + self.L3_ROUTERS + \"/%s\") % (\r\n l3_agent, router_id))",
"def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def agent_cleanup(self):\n pass",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def delete(self, ids):\r\n params = base.get_params(None, locals())\r\n request = http.Request('DELETE', self.get_url(), params)\r\n return request, parsers.parse_json",
"def revoke_agent(request):\n from .models import Agent\n\n request.agent = Agent.untrusted_agent(request.user)",
"def delete(ctx):\n delete_listeners(ctx)\n delete_balancer(ctx)\n delete_target_groups(ctx)\n\n ctx.info('Load balancers deletion completed.')",
"def delete(self, *args, **kwargs):\n return self.handle_delete_request()"
] | [
"0.83328235",
"0.717082",
"0.70636654",
"0.6921048",
"0.69137836",
"0.670038",
"0.66952145",
"0.6065606",
"0.5701796",
"0.56838745",
"0.5650526",
"0.5548786",
"0.54742306",
"0.5472352",
"0.54185456",
"0.54131997",
"0.54131997",
"0.54131997",
"0.54131997",
"0.54131997",
"0.54131997",
"0.54131997",
"0.5393426",
"0.53859293",
"0.53859293",
"0.53859293",
"0.53859293",
"0.53402865",
"0.5334774",
"0.530513"
] | 0.8437115 | 0 |
This method handles the POST requests to add agents to the Registrar Server. Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's will return errors. POST requests require an an agent_id identifying the agent to add, and json | def do_POST(self):
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
common.echo_json_response(self, 400, "uri not supported")
logger.warning('POST agent returning 400 response. uri not supported: ' + self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
common.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('POST agent returning 400 response. agent id not found in uri ' + self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
common.echo_json_response(self, 400, "Expected non zero content length")
logger.warning('POST for ' + agent_id + ' returning 400 response. Expected non zero content length.')
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
ek = json_body['ek']
ek_tpm = json_body['ek_tpm']
ekcert = json_body['ekcert']
aik = json_body['aik']
aik_name = json_body['aik_name']
tpm_version = int(json_body['tpm_version'])
# try to encrypt the AIK
tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=tpm_version)
(blob,key) = tpm.encryptAIK(agent_id,aik,ek,ek_tpm,aik_name)
# special behavior if we've registered this uuid before
regcount = 1
agent = self.server.db.get_agent(agent_id)
if agent is not None:
# keep track of how many ek-ekcerts have registered on this uuid
regcount = agent['regcount']
if agent['ek'] != ek or agent['ekcert'] != ekcert:
logger.warning('WARNING: Overwriting previous registration for this UUID with new ek-ekcert pair!')
regcount += 1
# force overwrite
logger.info('Overwriting previous registration for this UUID.')
# self.server.db.remove_agent(agent_id)
self.server.db.remove_agent(agent_id)
# Add values to database
d={}
d['ek']=ek
d['aik']=aik
d['ekcert']=ekcert
d['virtual']=int(ekcert=='virtual')
d['active']=int(False)
d['key']=key
d['tpm_version']=tpm_version
d['provider_keys']={}
d['regcount']=regcount
self.server.db.add_agent(agent_id, d)
response = {
'blob': blob,
}
common.echo_json_response(self, 200, "Success", response)
logger.info('POST returning key blob for agent_id: ' + agent_id)
return
except Exception as e:
common.echo_json_response(self, 400, "Error: %s"%e)
logger.warning("POST for " + agent_id + " returning 400 response. Error: %s"%e)
logger.exception(e)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('POST returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None: # this is for new items\n content_length = len(self.request.body)\n if content_length==0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('POST returning 400 response. Expected non zero content length.')\n else:\n json_body = json.loads(self.request.body)\n d = {}\n d['v'] = json_body['v']\n d['ip'] = json_body['cloudagent_ip']\n d['port'] = int(json_body['cloudagent_port'])\n d['operational_state'] = cloud_verifier_common.CloudAgent_Operational_State.START\n d['public_key'] = \"\"\n d['tpm_policy'] = json_body['tpm_policy']\n d['vtpm_policy'] = json_body['vtpm_policy']\n d['metadata'] = json_body['metadata']\n d['ima_whitelist'] = json_body['ima_whitelist']\n d['revocation_key'] = json_body['revocation_key']\n d['tpm_version'] = 0\n d['accept_tpm_hash_algs'] = json_body['accept_tpm_hash_algs']\n d['accept_tpm_encryption_algs'] = json_body['accept_tpm_encryption_algs']\n d['accept_tpm_signing_algs'] = json_body['accept_tpm_signing_algs']\n d['hash_alg'] = \"\"\n d['enc_alg'] = \"\"\n d['sign_alg'] = \"\"\n\n new_agent = self.db.add_agent(agent_id,d)\n\n # don't allow overwriting\n if new_agent is None:\n common.echo_json_response(self, 409, \"Agent of uuid %s already exists\"%(agent_id))\n logger.warning(\"Agent of uuid %s already exists\"%(agent_id))\n else:\n asyncio.ensure_future(self.process_agent(new_agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('POST returning 200 response for adding agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"POST returning 400 response. uri not supported\")\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"POST returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n\n self.finish()",
"async def post(self):\r\n data = await self.request.json()\r\n register_date = data[\"register_date\"]\r\n ip_address = data[\"ip_address\"]\r\n try:\r\n Agent.create(register_date=register_date, ip_address=ip_address)\r\n response_obj = {\"status\": \"success\"}\r\n return web.Response(text=str(response_obj), status=201)\r\n except Exception as exception:\r\n response_obj = {\"status\": \"failed\", \"reason\": exception}\r\n error_message = str(exception)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)",
"def do_PUT(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"agent id not found in uri\")\n logger.warning('PUT agent returning 400 response. agent id not found in uri ' + self.path)\n return\n\n try:\n content_length = int(self.headers.get('Content-Length', 0))\n if content_length == 0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('PUT for ' + agent_id + ' returning 400 response. Expected non zero content length.')\n return\n\n post_body = self.rfile.read(content_length)\n json_body = json.loads(post_body)\n\n if \"activate\" in rest_params:\n auth_tag=json_body['auth_tag']\n\n agent = self.server.db.get_agent(agent_id)\n if agent is None:\n raise Exception(\"attempting to activate agent before requesting registrar for %s\"%agent_id)\n\n if agent['virtual']:\n raise Exception(\"attempting to activate virtual AIK using physical interface for %s\"%agent_id)\n\n if common.STUB_TPM:\n self.server.db.update_agent(agent_id, 'active',True)\n else:\n ex_mac = crypto.do_hmac(agent['key'],agent_id)\n if ex_mac == auth_tag:\n self.server.db.update_agent(agent_id, 'active',True)\n else:\n raise Exception(\"Auth tag %s does not match expected value %s\"%(auth_tag,ex_mac))\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT activated: ' + agent_id)\n elif \"vactivate\" in rest_params:\n deepquote = json_body.get('deepquote',None)\n\n agent = self.server.db.get_agent(agent_id)\n if agent is None:\n raise Exception(\"attempting to activate agent before requesting registrar for %s\"%agent_id)\n\n if not agent['virtual']:\n raise Exception(\"attempting to activate physical AIK using virtual interface for %s\"%agent_id)\n\n # get an physical AIK for this host\n registrar_client.init_client_tls(config, 'registrar')\n provider_keys = registrar_client.getKeys(config.get('general', 'provider_registrar_ip'), config.get('general', 'provider_registrar_tls_port'), agent_id)\n # we already have the vaik\n tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=agent['tpm_version'])\n if not tpm.check_deep_quote(hashlib.sha1(agent['key']).hexdigest(),\n agent_id+agent['aik']+agent['ek'],\n deepquote,\n agent['aik'],\n provider_keys['aik']):\n raise Exception(\"Deep quote invalid\")\n\n self.server.db.update_agent(agent_id, 'active',True)\n self.server.db.update_agent(agent_id, 'provider_keys',provider_keys)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT activated: ' + agent_id)\n else:\n pass\n except Exception as e:\n common.echo_json_response(self, 400, \"Error: %s\"%e)\n logger.warning(\"PUT for \" + agent_id + \" returning 400 response. Error: %s\"%e)\n logger.exception(e)\n return",
"async def post(self):\r\n\r\n data = await self.request.json()\r\n agent_uuid = data.get(\"agent_uuid\")\r\n agent = Agent.get(Agent.uuid == agent_uuid)\r\n if not agent:\r\n response_obj = {\"status\": \"failed\", \"reason\": \"agent not present\"}\r\n logger.info(\"agent not present\")\r\n return web.Response(text=str(response_obj), status=404)\r\n try:\r\n System.create(agent_uuid=agent)\r\n logger.info(\"System created successfully!!!\")\r\n return web.Response(text=\"Successful\", status=201)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\", \"reason\": \"agent not added\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)",
"def doRegisterAgent(\n registrar_ip: str,\n registrar_port: str,\n agent_id: str,\n ek_tpm: bytes,\n ekcert: Optional[Union[bytes, str]],\n aik_tpm: bytes,\n mtls_cert: Optional[bytes] = None,\n contact_ip: Optional[str] = None,\n contact_port: Optional[str] = None,\n) -> Optional[str]:\n\n data: Dict[str, Any] = {\n \"ekcert\": ekcert,\n \"aik_tpm\": aik_tpm,\n }\n if ekcert is None or ekcert == \"emulator\":\n data[\"ek_tpm\"] = ek_tpm\n\n if mtls_cert is not None:\n data[\"mtls_cert\"] = mtls_cert\n else:\n data[\"mtls_cert\"] = \"disabled\"\n logger.error(\"Most actions require the agent to have mTLS enabled, but no cert was provided!\")\n if contact_ip is not None:\n data[\"ip\"] = contact_ip\n if contact_port is not None:\n data[\"port\"] = contact_port\n\n response = None\n try:\n # The agent accesses the registrar without mTLS, meaning without client\n # certificate\n # TODO the registrar could be accessed using TLS, but without client\n # certificate verification. Currently it is accessed without TLS at all\n client = RequestsClient(f\"{registrar_ip}:{registrar_port}\", False)\n response = client.post(f\"/v{api_version}/agents/{agent_id}\", data=json.dumps(data))\n response_body = response.json()\n\n if response.status_code != 200:\n logger.error(\"Error: unexpected http response code from Registrar Server: %s\", response.status_code)\n keylime_logging.log_http_response(logger, logging.ERROR, response_body)\n return None\n\n logger.info(\"Agent registration requested for %s\", agent_id)\n\n if \"results\" not in response_body:\n logger.critical(\"Error: unexpected http response body from Registrar Server: %s\", response.status_code)\n return None\n\n if \"blob\" not in response_body[\"results\"]:\n logger.critical(\"Error: did not receive blob from Registrar Server: %s\", response.status_code)\n return None\n\n return str(response_body[\"results\"][\"blob\"])\n except Exception as e:\n if response and response.status_code == 503:\n logger.error(\"Agent cannot establish connection to registrar at %s:%s\", registrar_ip, registrar_port)\n sys.exit()\n else:\n logger.exception(e)\n\n return None",
"def put(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n agent = self.db.get_agent(agent_id)\n\n if agent is not None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('PUT returning 404 response. agent id: ' + agent_id + ' not found.')\n\n if \"reactivate\" in rest_params:\n agent['operational_state']=cloud_verifier_common.CloudAgent_Operational_State.START\n asyncio.ensure_future(self.process_agent(agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n elif \"stop\" in rest_params:\n # do stuff for terminate\n logger.debug(\"Stopping polling on %s\"%agent_id)\n self.db.update_agent(agent_id,'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"PUT returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n self.finish()",
"def _register_agent(self, agent, agent_avatar: AgentBody):\n\n # Random seed for agent between 1 and 10000000, might need to be adjusted still\n agent_seed = self.__rnd_gen.randint(1, 1000000)\n\n # check if the agent can be succesfully placed at that location\n self.__validate_obj_placement(agent_avatar)\n\n # Add agent to registered agents\n self.__registered_agents[agent_avatar.obj_id] = agent_avatar\n\n if self.__verbose:\n print(f\"@{os.path.basename(__file__)}: Created agent with id {agent_avatar.obj_id}.\")\n\n # Get all properties from the agent avatar\n avatar_props = agent_avatar.properties\n\n if agent_avatar.is_human_agent is False:\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed)\n else: # if the agent is a human agent, we also assign its user input action map\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed,\n key_action_map=agent_avatar.properties[\"key_action_map\"])\n\n return agent_avatar.obj_id",
"def associate_agent_with_registration(\n self, agent_id: str, request_id: str, registration_id: str\n ) -> None:\n channel_id = self.request_id_to_channel_id[request_id]\n self.agent_id_to_channel_id[agent_id] = channel_id\n self.agents_by_registration_id[registration_id] = agent_id",
"def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])",
"def init_agent(self, kwargs):\n\n exp_params = [('agents_info', is_string),\n ('ip', is_string)]\n try:\n agents_info, agent_ip = check_arguments(exp_params, kwargs)\n agents_info = simplejson.loads(agents_info)\n except Exception as ex:\n return HttpErrorResponse(\"%s\" % ex)\n\n self.logger.info('Setting agent environment')\n\n target_dir = self.VAR_CACHE\n with open(join(target_dir, 'agents.json'), 'w') as outfile:\n simplejson.dump(agents_info, outfile)\n\n agent_role = [i['role'] for i in agents_info if i['ip'] == agent_ip][0]\n master_ip = [i['ip'] for i in agents_info if i['role'] == 'master'][0]\n\n self.env.update({'MY_IP':agent_ip})\n self.env.update({'MY_ROLE':agent_role})\n self.env.update({'MASTER_IP':master_ip})\n\n self.logger.info('Agent initialized')\n return HttpJsonResponse()",
"def _add(self, arn, targets):\n\n # TODO: In the future, add support for the optional Port and\n # AvailabilityZone parameters. For now, keeping this dead simple.\n targets = [{'Id': t} for t in targets]\n\n try:\n yield self.api_call(\n self.elbv2_conn.register_targets,\n TargetGroupArn=arn,\n Targets=targets)\n except botocore.exceptions.ClientError as e:\n raise exceptions.UnrecoverableActorFailure(str(e))",
"def make_agent(agent_id, **kwargs):\n return agent_register[agent_id](**kwargs)",
"def request_post_attention(self, agent):\n self.agents_to_settle.add(agent)",
"def add_agents(self, agents=1, agent_class=Agent, **kwargs):\n\n # Check if object is environment or model\n is_env = True if self != self.model else False\n\n # Case 1 - Create new agents\n if isinstance(agents, int):\n agents = AgentList([agent_class(self.model, **kwargs)\n for _ in range(agents)], model=self.model)\n if is_env: # Add agents to master list\n self.model._agents.extend(agents)\n\n # Case 2 - Add existing agents\n else:\n if not isinstance(agents, AgentList):\n agents = AgentList(make_list(agents), model=self.model)\n\n # Add environment to agents\n if is_env:\n for agent in agents:\n agent.envs.append(self)\n\n # Add agents to environment\n self._agents.extend(agents)\n\n return agents",
"def add_router_to_l3_agent(self, l3_agent, body):\r\n return self.post((self.agent_path + self.L3_ROUTERS) % l3_agent,\r\n body=body)",
"def post(self):\n data = request.json\n create_ue(data)\n return None, 201",
"async def post_agent(self, recv_did: str, path_query_fragment: str, msg_json: str) -> None:\n\n pass # TODO: implement",
"def add(self, agent):\n self._agents[agent.unique_id] = agent\n self.logger.add(agent)",
"def post(self, id):\n\n data = json.loads(request.get_data())\n response = add_location(data, id)\n return response",
"def post(self):\n reg = self.request.get('registry')\n region_name = self.request.get('region')\n if reg and len(reg) > 0 and reg.isalnum() and validate_region(region_name):\n region = get_region_id(region_name)\n # Create Registry on IOT Core\n iot = IOT()\n success, message = iot.create_registry(region,reg)\n if success:\n # Add registry to Datastore\n ds = Datastore()\n status = ds.add_registry(reg, region_name)\n self.response.headers['Content-Type'] = 'text/plain'\n if status:\n self.response.write('Registry Added')\n else:\n self.response.write('Registry already exists')\n else:\n self.response.write(message)\n else:\n self.response.write('invalid parameters: ' + reg + \" \" + region_name )",
"def _register_agent(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_location_description()\n self._register(description, \"registering agent on SOEF.\")",
"async def add_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n room_inventory = request.args[\"room_inventory\"][0]\n model.add_inventory(hotel_id, room_type, room_inventory)\n return json({\"success\": True})",
"async def _post(self,\n method: RpcMethod,\n body: Dict,\n route_args: Optional[Dict] = None,\n **kwargs) -> Dict:\n self._validate_method_type(method)\n payload = self._build_payload(body)\n self._id_count += 1\n async with self._session.post(self._get_complete_url(method, route_args),\n json=payload,\n **kwargs) as response:\n return await self._handle_response(response)",
"def _insert_agent_device(self):\n # Initialize key variables\n idx_agent = 1\n idx_device = 1\n\n # Add agent\n if db_agent.idx_agent_exists(idx_agent) is False:\n # Generate a UID and add a record in the database\n record = Agent(\n id_agent=general.encode(self.reserved),\n name=general.encode(self.reserved))\n database = db.Database()\n database.add(record, 1109)\n\n # Add device\n if db_device.idx_device_exists(idx_device) is False:\n record = Device(\n description=general.encode(self.reserved),\n devicename=general.encode(self.reserved)\n )\n database = db.Database()\n database.add(record, 1106)\n\n # Add to Agent / Device table\n if db_deviceagent.device_agent_exists(idx_device, idx_agent) is False:\n record = DeviceAgent(idx_device=idx_device, idx_agent=idx_agent)\n database = db.Database()\n database.add(record, 1107)",
"def do_POST(self):\n try:\n if self.path.endswith(\"/restaurant/new\"):\n ctype, pdict = cgi.parse_header(self.headers.getheader('Content-type'))\n if ctype == 'multipart/form-data':\n fields = cgi.parse_multipart(self.rfile, pdict)\n restaurantArray = fields.get('restaurant')\n\n # create a new Restaurant\n newRestaurantObject = Restaurant()\n newRestaurantObject.save(restaurantArray[0])\n\n self.send_response(301)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Location', '/restaurants')\n self.end_headers()\n return\n except:\n pass",
"def do_DELETE(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n if self.server.db.remove_agent(agent_id):\n #send response\n common.echo_json_response(self, 200, \"Success\")\n return\n else:\n #send response\n common.echo_json_response(self, 404)\n return\n else:\n common.echo_json_response(self, 404)\n return",
"def test_get_agents( self ):\n\n with self.app.app_context():\n url = '/donation/agents'\n\n # Ensure a GET with no saved agents returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n # Create some agents to retrieve.\n agent_models = []\n agent_jsons = get_agent_jsons()\n for agent_json in agent_jsons:\n agent_model = AgentSchema().load( agent_json ).data\n agent_models.append( agent_model )\n database.session.bulk_save_objects( agent_models )\n database.session.commit()\n\n # Ensure GET returns all agents.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), len( agent_jsons ) )",
"def add_actor():\n if not request.json or 'json_class' not in request.json or 'name' not in request.json or 'age' not in request.json \\\n or 'total_gross' not in request.json or 'movies' not in request.json:\n abort(400)\n\n name = request.json['name']\n\n actors_data[name] = request.json\n\n return make_response(jsonify(actors_data[name]), 201)",
"def append(self, agent):\n self.agents.append(agent)",
"def add_agent(self, agent):\n\t\tif not (agent in self.agents_in_site):\n\t\t\tif (agent.site != None):\n\t\t\t\tagent.site.agents_in_site.remove(agent) \n\t\t\tself.agents_in_site.append(agent)\n\t\t\tagent.site = self"
] | [
"0.7403621",
"0.65014863",
"0.63130796",
"0.616501",
"0.57305866",
"0.56719697",
"0.5531976",
"0.55056393",
"0.5445917",
"0.5390362",
"0.5374244",
"0.5350049",
"0.5341212",
"0.53108996",
"0.5275562",
"0.52541816",
"0.5252334",
"0.52314633",
"0.519691",
"0.51821196",
"0.5157355",
"0.51399714",
"0.51310295",
"0.51138115",
"0.51125795",
"0.509326",
"0.5080117",
"0.5074148",
"0.505919",
"0.5045817"
] | 0.7357061 | 1 |
This method handles the PUT requests to add agents to the Registrar Server. Currently, only agents resources are available for PUTing, i.e. /agents. All other PUT uri's will return errors. | def do_PUT(self):
rest_params = common.get_restful_params(self.path)
if rest_params is None:
common.echo_json_response(self, 405, "Not Implemented: Use /agents/ interface")
return
if "agents" not in rest_params:
common.echo_json_response(self, 400, "uri not supported")
logger.warning('PUT agent returning 400 response. uri not supported: ' + self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
common.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('PUT agent returning 400 response. agent id not found in uri ' + self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
common.echo_json_response(self, 400, "Expected non zero content length")
logger.warning('PUT for ' + agent_id + ' returning 400 response. Expected non zero content length.')
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
if "activate" in rest_params:
auth_tag=json_body['auth_tag']
agent = self.server.db.get_agent(agent_id)
if agent is None:
raise Exception("attempting to activate agent before requesting registrar for %s"%agent_id)
if agent['virtual']:
raise Exception("attempting to activate virtual AIK using physical interface for %s"%agent_id)
if common.STUB_TPM:
self.server.db.update_agent(agent_id, 'active',True)
else:
ex_mac = crypto.do_hmac(agent['key'],agent_id)
if ex_mac == auth_tag:
self.server.db.update_agent(agent_id, 'active',True)
else:
raise Exception("Auth tag %s does not match expected value %s"%(auth_tag,ex_mac))
common.echo_json_response(self, 200, "Success")
logger.info('PUT activated: ' + agent_id)
elif "vactivate" in rest_params:
deepquote = json_body.get('deepquote',None)
agent = self.server.db.get_agent(agent_id)
if agent is None:
raise Exception("attempting to activate agent before requesting registrar for %s"%agent_id)
if not agent['virtual']:
raise Exception("attempting to activate physical AIK using virtual interface for %s"%agent_id)
# get an physical AIK for this host
registrar_client.init_client_tls(config, 'registrar')
provider_keys = registrar_client.getKeys(config.get('general', 'provider_registrar_ip'), config.get('general', 'provider_registrar_tls_port'), agent_id)
# we already have the vaik
tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=agent['tpm_version'])
if not tpm.check_deep_quote(hashlib.sha1(agent['key']).hexdigest(),
agent_id+agent['aik']+agent['ek'],
deepquote,
agent['aik'],
provider_keys['aik']):
raise Exception("Deep quote invalid")
self.server.db.update_agent(agent_id, 'active',True)
self.server.db.update_agent(agent_id, 'provider_keys',provider_keys)
common.echo_json_response(self, 200, "Success")
logger.info('PUT activated: ' + agent_id)
else:
pass
except Exception as e:
common.echo_json_response(self, 400, "Error: %s"%e)
logger.warning("PUT for " + agent_id + " returning 400 response. Error: %s"%e)
logger.exception(e)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def put(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n agent = self.db.get_agent(agent_id)\n\n if agent is not None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('PUT returning 404 response. agent id: ' + agent_id + ' not found.')\n\n if \"reactivate\" in rest_params:\n agent['operational_state']=cloud_verifier_common.CloudAgent_Operational_State.START\n asyncio.ensure_future(self.process_agent(agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n elif \"stop\" in rest_params:\n # do stuff for terminate\n logger.debug(\"Stopping polling on %s\"%agent_id)\n self.db.update_agent(agent_id,'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"PUT returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n self.finish()",
"def post(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('POST returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None: # this is for new items\n content_length = len(self.request.body)\n if content_length==0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('POST returning 400 response. Expected non zero content length.')\n else:\n json_body = json.loads(self.request.body)\n d = {}\n d['v'] = json_body['v']\n d['ip'] = json_body['cloudagent_ip']\n d['port'] = int(json_body['cloudagent_port'])\n d['operational_state'] = cloud_verifier_common.CloudAgent_Operational_State.START\n d['public_key'] = \"\"\n d['tpm_policy'] = json_body['tpm_policy']\n d['vtpm_policy'] = json_body['vtpm_policy']\n d['metadata'] = json_body['metadata']\n d['ima_whitelist'] = json_body['ima_whitelist']\n d['revocation_key'] = json_body['revocation_key']\n d['tpm_version'] = 0\n d['accept_tpm_hash_algs'] = json_body['accept_tpm_hash_algs']\n d['accept_tpm_encryption_algs'] = json_body['accept_tpm_encryption_algs']\n d['accept_tpm_signing_algs'] = json_body['accept_tpm_signing_algs']\n d['hash_alg'] = \"\"\n d['enc_alg'] = \"\"\n d['sign_alg'] = \"\"\n\n new_agent = self.db.add_agent(agent_id,d)\n\n # don't allow overwriting\n if new_agent is None:\n common.echo_json_response(self, 409, \"Agent of uuid %s already exists\"%(agent_id))\n logger.warning(\"Agent of uuid %s already exists\"%(agent_id))\n else:\n asyncio.ensure_future(self.process_agent(new_agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('POST returning 200 response for adding agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"POST returning 400 response. uri not supported\")\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"POST returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n\n self.finish()",
"def do_POST(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('POST agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"agent id not found in uri\")\n logger.warning('POST agent returning 400 response. agent id not found in uri ' + self.path)\n return\n\n try:\n content_length = int(self.headers.get('Content-Length', 0))\n if content_length == 0:\n common.echo_json_response(self, 400, \"Expected non zero content length\")\n logger.warning('POST for ' + agent_id + ' returning 400 response. Expected non zero content length.')\n return\n\n post_body = self.rfile.read(content_length)\n json_body = json.loads(post_body)\n\n ek = json_body['ek']\n ek_tpm = json_body['ek_tpm']\n ekcert = json_body['ekcert']\n aik = json_body['aik']\n aik_name = json_body['aik_name']\n tpm_version = int(json_body['tpm_version'])\n\n # try to encrypt the AIK\n tpm = tpm_obj.getTPM(need_hw_tpm=False,tpm_version=tpm_version)\n (blob,key) = tpm.encryptAIK(agent_id,aik,ek,ek_tpm,aik_name)\n # special behavior if we've registered this uuid before\n regcount = 1\n agent = self.server.db.get_agent(agent_id)\n\n if agent is not None:\n\n # keep track of how many ek-ekcerts have registered on this uuid\n regcount = agent['regcount']\n if agent['ek'] != ek or agent['ekcert'] != ekcert:\n logger.warning('WARNING: Overwriting previous registration for this UUID with new ek-ekcert pair!')\n regcount += 1\n\n # force overwrite\n logger.info('Overwriting previous registration for this UUID.')\n # self.server.db.remove_agent(agent_id)\n self.server.db.remove_agent(agent_id)\n # Add values to database\n d={}\n d['ek']=ek\n d['aik']=aik\n d['ekcert']=ekcert\n d['virtual']=int(ekcert=='virtual')\n d['active']=int(False)\n d['key']=key\n d['tpm_version']=tpm_version\n d['provider_keys']={}\n d['regcount']=regcount\n self.server.db.add_agent(agent_id, d)\n response = {\n 'blob': blob,\n }\n common.echo_json_response(self, 200, \"Success\", response)\n\n logger.info('POST returning key blob for agent_id: ' + agent_id)\n return\n except Exception as e:\n common.echo_json_response(self, 400, \"Error: %s\"%e)\n logger.warning(\"POST for \" + agent_id + \" returning 400 response. Error: %s\"%e)\n logger.exception(e)\n return",
"async def put(self):\r\n data = await self.request.json()\r\n agent_uuid = data[\"agent_uuid\"]\r\n ip_address = data[\"ip_address\"]\r\n agent_obj = Agent.filter(Agent.uuid == agent_uuid).first()\r\n if not agent_obj:\r\n response_obj = {\"status\": \"failed\"}\r\n logger.error(\"No agent found!!!\")\r\n return web.Response(text=str(response_obj), status=500)\r\n try:\r\n Agent.update(ip_address=ip_address).where(Agent.uuid == agent_uuid)\r\n logger.info(\"Agent updated!!!\")\r\n return web.Response(text=\"successful\", status=200)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)",
"def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])",
"def load_agents(self, agents):\n self.agents = agents",
"def add_agents(self, agents=1, agent_class=Agent, **kwargs):\n\n # Check if object is environment or model\n is_env = True if self != self.model else False\n\n # Case 1 - Create new agents\n if isinstance(agents, int):\n agents = AgentList([agent_class(self.model, **kwargs)\n for _ in range(agents)], model=self.model)\n if is_env: # Add agents to master list\n self.model._agents.extend(agents)\n\n # Case 2 - Add existing agents\n else:\n if not isinstance(agents, AgentList):\n agents = AgentList(make_list(agents), model=self.model)\n\n # Add environment to agents\n if is_env:\n for agent in agents:\n agent.envs.append(self)\n\n # Add agents to environment\n self._agents.extend(agents)\n\n return agents",
"def _register_agent(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_location_description()\n self._register(description, \"registering agent on SOEF.\")",
"def set_agents(self, agents):\n if self.single_agent_mode:\n raise ValueError(\n \"Setting agent in single agent mode or human mode is not allowed.\"\n )\n\n self.agents = agents\n # If at least one agent needs raw data, we set self.allow_raw_data = True\n for agent in self.agents:\n if agent.use_raw:\n self.allow_raw_data = True\n break",
"def test_list_agents(self):\n admin_resource_id = self.agent['id']\n with (self.override_role_and_validate_list(\n admin_resource_id=admin_resource_id)) as ctx:\n ctx.resources = self.agents_client.list_agents(\n id=admin_resource_id)[\"agents\"]",
"def init_agent(self, kwargs):\n\n exp_params = [('agents_info', is_string),\n ('ip', is_string)]\n try:\n agents_info, agent_ip = check_arguments(exp_params, kwargs)\n agents_info = simplejson.loads(agents_info)\n except Exception as ex:\n return HttpErrorResponse(\"%s\" % ex)\n\n self.logger.info('Setting agent environment')\n\n target_dir = self.VAR_CACHE\n with open(join(target_dir, 'agents.json'), 'w') as outfile:\n simplejson.dump(agents_info, outfile)\n\n agent_role = [i['role'] for i in agents_info if i['ip'] == agent_ip][0]\n master_ip = [i['ip'] for i in agents_info if i['role'] == 'master'][0]\n\n self.env.update({'MY_IP':agent_ip})\n self.env.update({'MY_ROLE':agent_role})\n self.env.update({'MASTER_IP':master_ip})\n\n self.logger.info('Agent initialized')\n return HttpJsonResponse()",
"def test_get_agents( self ):\n\n with self.app.app_context():\n url = '/donation/agents'\n\n # Ensure a GET with no saved agents returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n # Create some agents to retrieve.\n agent_models = []\n agent_jsons = get_agent_jsons()\n for agent_json in agent_jsons:\n agent_model = AgentSchema().load( agent_json ).data\n agent_models.append( agent_model )\n database.session.bulk_save_objects( agent_models )\n database.session.commit()\n\n # Ensure GET returns all agents.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), len( agent_jsons ) )",
"def associate_agent_with_registration(\n self, agent_id: str, request_id: str, registration_id: str\n ) -> None:\n channel_id = self.request_id_to_channel_id[request_id]\n self.agent_id_to_channel_id[agent_id] = channel_id\n self.agents_by_registration_id[registration_id] = agent_id",
"def do_GET(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('GET returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n agent = self.server.db.get_agent(agent_id)\n\n if agent is None:\n common.echo_json_response(self, 404, \"agent_id not found\")\n logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not found.')\n return\n\n if not agent['active']:\n common.echo_json_response(self, 404, \"agent_id not yet active\")\n logger.warning('GET returning 404 response. agent_id ' + agent_id + ' not yet active.')\n return\n\n response = {\n 'aik': agent['aik'],\n 'ek': agent['ek'],\n 'ekcert': agent['ekcert'],\n 'regcount': agent['regcount'],\n }\n\n if agent['virtual']:\n response['provider_keys']= agent['provider_keys']\n\n common.echo_json_response(self, 200, \"Success\", response)\n logger.info('GET returning 200 response for agent_id:' + agent_id)\n else:\n # return the available registered uuids from the DB\n json_response = self.server.db.get_agent_ids()\n common.echo_json_response(self, 200, \"Success\", {'uuids':json_response})\n logger.info('GET returning 200 response for agent_id list')\n\n return",
"def add_agent(self, agent):\n\t\tif not (agent in self.agents_in_site):\n\t\t\tif (agent.site != None):\n\t\t\t\tagent.site.agents_in_site.remove(agent) \n\t\t\tself.agents_in_site.append(agent)\n\t\t\tagent.site = self",
"def do_DELETE(self):\n rest_params = common.get_restful_params(self.path)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE agent returning 400 response. uri not supported: ' + self.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n if self.server.db.remove_agent(agent_id):\n #send response\n common.echo_json_response(self, 200, \"Success\")\n return\n else:\n #send response\n common.echo_json_response(self, 404)\n return\n else:\n common.echo_json_response(self, 404)\n return",
"def get(self):\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('GET returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n agent = self.db.get_agent(agent_id)\n if agent is not None:\n response = cloud_verifier_common.process_get_status(agent)\n common.echo_json_response(self, 200, \"Success\", response)\n #logger.info('GET returning 200 response for agent_id: ' + agent_id)\n\n else:\n #logger.info('GET returning 404 response. agent id: ' + agent_id + ' not found.')\n common.echo_json_response(self, 404, \"agent id not found\")\n else:\n # return the available keys in the DB\n json_response = self.db.get_agent_ids()\n common.echo_json_response(self, 200, \"Success\", {'uuids':json_response})\n logger.info('GET returning 200 response for agent_id list')",
"def append(self, agent):\n self.agents.append(agent)",
"def add_router_to_l3_agent(self, l3_agent, body):\r\n return self.post((self.agent_path + self.L3_ROUTERS) % l3_agent,\r\n body=body)",
"def registerTeam(self, agentsOnTeam):\n\n self.agentsOnTeam = agentsOnTeam",
"def _register_agent(self, agent, agent_avatar: AgentBody):\n\n # Random seed for agent between 1 and 10000000, might need to be adjusted still\n agent_seed = self.__rnd_gen.randint(1, 1000000)\n\n # check if the agent can be succesfully placed at that location\n self.__validate_obj_placement(agent_avatar)\n\n # Add agent to registered agents\n self.__registered_agents[agent_avatar.obj_id] = agent_avatar\n\n if self.__verbose:\n print(f\"@{os.path.basename(__file__)}: Created agent with id {agent_avatar.obj_id}.\")\n\n # Get all properties from the agent avatar\n avatar_props = agent_avatar.properties\n\n if agent_avatar.is_human_agent is False:\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed)\n else: # if the agent is a human agent, we also assign its user input action map\n agent._factory_initialise(agent_name=agent_avatar.obj_name,\n agent_id=agent_avatar.obj_id,\n action_set=agent_avatar.action_set,\n sense_capability=agent_avatar.sense_capability,\n agent_properties=avatar_props,\n customizable_properties=agent_avatar.customizable_properties,\n callback_is_action_possible=self.__check_action_is_possible,\n rnd_seed=agent_seed,\n key_action_map=agent_avatar.properties[\"key_action_map\"])\n\n return agent_avatar.obj_id",
"def updateAgent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"async def post(self):\r\n\r\n data = await self.request.json()\r\n agent_uuid = data.get(\"agent_uuid\")\r\n agent = Agent.get(Agent.uuid == agent_uuid)\r\n if not agent:\r\n response_obj = {\"status\": \"failed\", \"reason\": \"agent not present\"}\r\n logger.info(\"agent not present\")\r\n return web.Response(text=str(response_obj), status=404)\r\n try:\r\n System.create(agent_uuid=agent)\r\n logger.info(\"System created successfully!!!\")\r\n return web.Response(text=\"Successful\", status=201)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\", \"reason\": \"agent not added\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)",
"def doRegisterAgent(\n registrar_ip: str,\n registrar_port: str,\n agent_id: str,\n ek_tpm: bytes,\n ekcert: Optional[Union[bytes, str]],\n aik_tpm: bytes,\n mtls_cert: Optional[bytes] = None,\n contact_ip: Optional[str] = None,\n contact_port: Optional[str] = None,\n) -> Optional[str]:\n\n data: Dict[str, Any] = {\n \"ekcert\": ekcert,\n \"aik_tpm\": aik_tpm,\n }\n if ekcert is None or ekcert == \"emulator\":\n data[\"ek_tpm\"] = ek_tpm\n\n if mtls_cert is not None:\n data[\"mtls_cert\"] = mtls_cert\n else:\n data[\"mtls_cert\"] = \"disabled\"\n logger.error(\"Most actions require the agent to have mTLS enabled, but no cert was provided!\")\n if contact_ip is not None:\n data[\"ip\"] = contact_ip\n if contact_port is not None:\n data[\"port\"] = contact_port\n\n response = None\n try:\n # The agent accesses the registrar without mTLS, meaning without client\n # certificate\n # TODO the registrar could be accessed using TLS, but without client\n # certificate verification. Currently it is accessed without TLS at all\n client = RequestsClient(f\"{registrar_ip}:{registrar_port}\", False)\n response = client.post(f\"/v{api_version}/agents/{agent_id}\", data=json.dumps(data))\n response_body = response.json()\n\n if response.status_code != 200:\n logger.error(\"Error: unexpected http response code from Registrar Server: %s\", response.status_code)\n keylime_logging.log_http_response(logger, logging.ERROR, response_body)\n return None\n\n logger.info(\"Agent registration requested for %s\", agent_id)\n\n if \"results\" not in response_body:\n logger.critical(\"Error: unexpected http response body from Registrar Server: %s\", response.status_code)\n return None\n\n if \"blob\" not in response_body[\"results\"]:\n logger.critical(\"Error: did not receive blob from Registrar Server: %s\", response.status_code)\n return None\n\n return str(response_body[\"results\"][\"blob\"])\n except Exception as e:\n if response and response.status_code == 503:\n logger.error(\"Agent cannot establish connection to registrar at %s:%s\", registrar_ip, registrar_port)\n sys.exit()\n else:\n logger.exception(e)\n\n return None",
"def add_vehicle_for_the_route_successfully(self):\n route = self.get_route_object()\n response = self.client.patch(\n api_reverse('route:route', args=[route.id]),\n self.vehicle_id ,\n HTTP_AUTHORIZATION='token {}'.format(self.token_two))\n return response",
"def _add(self, arn, targets):\n\n # TODO: In the future, add support for the optional Port and\n # AvailabilityZone parameters. For now, keeping this dead simple.\n targets = [{'Id': t} for t in targets]\n\n try:\n yield self.api_call(\n self.elbv2_conn.register_targets,\n TargetGroupArn=arn,\n Targets=targets)\n except botocore.exceptions.ClientError as e:\n raise exceptions.UnrecoverableActorFailure(str(e))",
"def useridagents(self, useridagent_id, data, tenant_id=None, api_version=\"v2.0\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/useridagents/{}\".format(api_version,\n tenant_id,\n useridagent_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)",
"def queue_agents(self, agents):\n logger.info('Preparing agents...')\n for agent in tqdm(agents):\n self.data['agent_trip_types'][agent.id] = agent.public\n ev = self.route_agent(agent)\n if ev is not None:\n self.queue(*ev)",
"def delete(self):\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('DELETE returning 400 response. uri not supported: ' + self.request.path)\n\n agent = self.db.get_agent(agent_id)\n\n if agent is None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('DELETE returning 404 response. agent id: ' + agent_id + ' not found.')\n return\n\n op_state = agent['operational_state']\n if op_state == cloud_verifier_common.CloudAgent_Operational_State.SAVED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.FAILED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.TERMINATED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED or \\\n op_state == cloud_verifier_common.CloudAgent_Operational_State.INVALID_QUOTE:\n self.db.remove_agent(agent_id)\n common.echo_json_response(self, 200, \"Success\")\n logger.info('DELETE returning 200 response for agent id: ' + agent_id)\n else:\n self.db.update_agent(agent_id, 'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TERMINATED)\n common.echo_json_response(self, 202, \"Accepted\")\n logger.info('DELETE returning 202 response for agent id: ' + agent_id)",
"def test_actors_405(self):\n actor_id = random.choice(Actor.query.all()).id\n\n res = self.client().put(\n '/api/actors/' + str(actor_id),\n json=self.actor_update_request)\n self.assertEqual(res.status_code, 405)"
] | [
"0.7346292",
"0.66388345",
"0.63099504",
"0.5964193",
"0.59574366",
"0.58821076",
"0.56671065",
"0.56290895",
"0.56259966",
"0.5612662",
"0.5565945",
"0.55049163",
"0.5471636",
"0.5446584",
"0.5415216",
"0.5295331",
"0.51881486",
"0.5173341",
"0.5171275",
"0.5155832",
"0.51348644",
"0.50826657",
"0.50792027",
"0.50551337",
"0.5016712",
"0.5016078",
"0.49776983",
"0.49431008",
"0.49326622",
"0.4929978"
] | 0.77525806 | 0 |
Build a task representation like `MyTask(param1=1.5, param2='5')` | def __repr__(self):
params = self.get_params()
param_values = self.get_param_values(params, [], self.param_kwargs)
# Build up task id
repr_parts = []
param_objs = dict(params)
for param_name, param_value in param_values:
if param_objs[param_name].significant and \
param_objs[param_name].visibility == luigi.parameter.ParameterVisibility.PUBLIC:
repr_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))
task_str = '{}({})'.format(self.get_task_family(), ', '.join(repr_parts))
return task_str | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_task(module_name, args=[], kwargs={}, module_attrs={}):\n kwargs = copy.deepcopy(kwargs) # Copy to avoid argument passed by reference issue\n if args:\n kwargs[\"_raw_params\"] = \" \".join(args)\n\n task_data = {\n \"action\": {\n \"module\": module_name,\n \"args\": kwargs\n },\n }\n if module_attrs:\n task_data.update(module_attrs)\n\n return task_data",
"def build(arg_dict):\n\n task_item = Task()\n\n try:\n task_item.key = arg_dict['key']\n except KeyError:\n task_item.key = None\n\n try:\n task_item.title = arg_dict['title']\n except KeyError:\n task_item.title = None\n\n try:\n task_item.notes = arg_dict['notes']\n except KeyError:\n task_item.notes = None\n\n return task_item",
"def factory(self, taskname, *args, **kwargs):\n import etc\n return str(apply(etc.tasks[taskname], args, kwargs))",
"def generate_tasks(self, task):",
"def make_task(self, data, **kwargs):\n return QlikMetric(name=data.get('name'), task_type=data.get('task_type'),\n source_system=data.get('source_system'),\n source_subsystem=data.get('source_subsystem'),\n app_id=data.get('app_id'),\n dimensions=data.get('dimensions'),\n measures=data.get('measures'),\n selections=data.get('selections'),\n yaml_file=data.get('yaml_file'), env=data.get('env'),\n thread_name=data.get('thread_name'), color=data.get('color')\n )",
"def humanize_task_params(taskparams):\n taskparams_list = []\n jobparams_list = []\n\n for k in taskparams:\n rec = {'name': k, 'value': taskparams[k]}\n taskparams_list.append(rec)\n taskparams_list = sorted(taskparams_list, key=lambda x: x['name'].lower())\n\n jobparams = taskparams['jobParameters']\n if 'log' in taskparams:\n jobparams.append(taskparams['log'])\n\n for p in jobparams:\n if p['type'] == 'constant':\n ptxt = p['value']\n elif p['type'] == 'template':\n ptxt = \"<i>{} template:</i> value='{}' \".format(p['param_type'], p['value'])\n for v in p:\n if v in ['type', 'param_type', 'value']:\n continue\n ptxt += \" {}='{}'\".format(v, p[v])\n else:\n ptxt = '<i>unknown parameter type {}:</i> '.format(p['type'])\n for v in p:\n if v in ['type', ]:\n continue\n ptxt += \" {}='{}'\".format(v, p[v])\n jobparams_list.append(ptxt)\n jobparams_list = sorted(jobparams_list, key=lambda x: x.lower())\n\n return taskparams_list, jobparams_list",
"def create_task(self, name, value):\n pass",
"def get_tasks_params(self):\n params = {}\n tasks = []\n\n for cmdparam in self.cmdline.params:\n if \":\" in cmdparam:\n # task:NAME=VALUE:NAME=VALUE:NAME=VALUE\n parts = cmdparam.split(\":\")\n taskparams = {}\n for taskparam in parts[1:]:\n if \"=\" in taskparam:\n (name, value) = taskparam.split(\"=\", 1)\n if name[:1] == \"_\" or name[-1:] == \"_\":\n raise Error(\"Setting special from command line not allowed\")\n taskparams[name] = value\n\n tasks.append((parts[0], taskparams))\n elif \"=\" in cmdparam:\n # NAME=VALUE\n (name, value) = cmdparam.split(\"=\", 1)\n if name[:1] == \"_\" or name[-1:] == \"_\":\n raise Error(\"Setting special _VARIABLES_ from command line not allowed\")\n params[name] = value\n else:\n # taskname\n tasks.append((cmdparam, {}))\n\n return (tasks, params)",
"def _reconstruct_task(task_record, hints, requirements, inputs, outputs):\n rec = task_record[\"t\"]\n return Task(name=rec[\"name\"], base_command=rec[\"base_command\"], hints=hints,\n requirements=requirements, inputs=inputs, outputs=outputs, stdout=rec[\"stdout\"],\n stderr=rec[\"stderr\"], workflow_id=rec[\"workflow_id\"], task_id=rec[\"id\"])",
"def format_task_call(task: \"Task\", args: Tuple, kwargs: dict) -> str:\n all_args = OrderedDict()\n sig = task.signature\n\n for i, param in enumerate(sig.parameters.values()):\n if i < len(args):\n # Positional argument.\n all_args[param.name] = args[i]\n\n else:\n # Keyword argument.\n all_args[param.name] = kwargs.get(param.name, param.default)\n\n args_text = \", \".join(format_arg(arg_name, value) for arg_name, value in all_args.items())\n return \"{task}({args})\".format(\n task=task.fullname,\n args=args_text,\n )",
"def create_task():",
"def _format_task(task: \"asyncio.Task[Any]\") -> str:\n coro = _format_coroutine(task.get_coro()).partition(\" \")[0]\n return f\"<Task name={task.get_name()} coro={coro}>\"",
"def task(self, **task):\n task[\"name\"] = task[\"name\"].replace(\"=\", \"--\")\n return task",
"def make_task(self):\n return Task()",
"def __init__(self, task_type, task):\n self.task = task\n self.task_type = task_type",
"def __repr__(self):\n\n return '<Task id={id} title={title}>'.format(id=self.id,\n title=self.title,\n )",
"def get_task_parameters_as_string(self):\n\t\treturn call_sdk_function('PrlRunningTask_GetTaskParametersAsString', self.handle)",
"def Params(cls):\n p = super().Params()\n p.Define('train_task', None, 'Underlying task')\n p.Define('decode_task', None, 'Underlying task')\n p.Define('train_dataset_name', None, '')\n p.Define('decode_dataset_name', None, '')\n p.Define('train_steps_per_loop', 0, '')\n p.Define('decode_steps_per_loop', 0, '')\n return p",
"def task_gen(self):\n pass",
"def makeTask(self, parsedCmd=None, args=None):\n if parsedCmd is not None:\n butler = parsedCmd.butler\n elif args is not None:\n dataRefList, kwargs = args\n butler = dataRefList[0].butlerSubset.butler\n else:\n raise RuntimeError(\"parsedCmd or args must be specified\")\n return self.TaskClass(config=self.config, log=self.log, butler=butler)",
"def task_wrapper(serialized_task):\n task = pickle.loads(serialized_task)\n logging.info('Running %s', str(task))\n return cloudpickle.dumps(task())",
"def task(self, *args, **options):\n\n def inner_create_task_cls(**options):\n\n def _create_task_cls(fun):\n options[\"app\"] = self\n options.setdefault(\"accept_magic_kwargs\", False)\n base = options.pop(\"base\", None) or self.Task\n\n @wraps(fun, assigned=(\"__module__\", \"__name__\"))\n def run(self, *args, **kwargs):\n return fun(*args, **kwargs)\n\n # Save the argspec for this task so we can recognize\n # which default task kwargs we're going to pass to it later.\n # (this happens in celery.utils.fun_takes_kwargs)\n run.argspec = getargspec(fun)\n\n cls_dict = dict(options, run=run,\n __module__=fun.__module__,\n __doc__=fun.__doc__)\n T = type(fun.__name__, (base, ), cls_dict)()\n return registry.tasks[T.name] # global instance.\n\n return _create_task_cls\n\n if len(args) == 1 and callable(args[0]):\n return inner_create_task_cls(**options)(*args)\n return inner_create_task_cls(**options)",
"def format_task(oldtask):\n\n newtask = {\n 'name': oldtask['title'],\n 'notes': [],\n 'priority': format_priority(oldtask['priority']),\n 'repeat': format_repeat(oldtask['recurrence'], oldtask['repeat_until']),\n # make a copy so we can modify it\n 'tags': list(oldtask['tags']),\n }\n\n if oldtask['notes']:\n newtask['notes'].append(oldtask['notes'])\n\n # datetime\n for ts in ('due_date',):\n newtask[ts] = format_date(oldtask[ts])\n\n # seconds to minutes\n # RTM doesn't do 'elapsed'.\n for ts in ('estimated',):\n newtask[ts] = format_estimate(oldtask[ts])\n\n # bool (RTM doesn't take dates for these).\n for ts in ('completed', 'deleted'):\n newtask[ts] = bool(oldtask[ts])\n if newtask[ts]:\n newtask['tags'].append('astrid-' + ts)\n\n if newtask['notes']:\n newtask['tags'].append('astrid-notes')\n\n if 'alarms' in oldtask and oldtask['alarms']:\n newtask['tags'].append('astrid-alarms')\n newtask['notes'].append(\"\\n\".join(['astrid-alarms:'] + [\n ts.isoformat() for ts in oldtask['alarms']\n ]))\n\n if not newtask['notes']:\n newtask['notes'] = None\n\n newtask['smart_add'] = smart_add(\n name = newtask['name'],\n due_date = format_date(oldtask['due_date'], local=True),\n priority = newtask['priority'],\n tags = newtask['tags'],\n repeat = format_repeat(oldtask['recurrence'],\n oldtask['repeat_until'], local=True,\n ),\n estimated = newtask['estimated'],\n )\n\n return newtask",
"def gen_task_item(self) -> Dict[str, Any]:\n return {}",
"def __init__(self, task_params):\n self.seq_width = task_params[\"seq_width\"]\n self.min_seq_len = task_params[\"min_seq_len\"]\n self.max_seq_len = task_params[\"max_seq_len\"]\n self.min_repeat = task_params[\"min_repeat\"]\n self.max_repeat = task_params[\"max_repeat\"]\n self.in_dim = task_params['seq_width'] + 2\n self.out_dim = task_params['seq_width'] + 1",
"def for_func(cls, task_definition, task_args, task_kwargs, task_name=None):\n # type: (Type[TrackingTask], TaskDefinition, List[Any], Dict[str,Any], str) -> TrackingTask\n param_values = build_func_parameter_values(\n task_definition, task_args, task_kwargs\n )\n # we need to add RESULT param\n if RESULT_PARAM in task_definition.task_param_defs:\n param = task_definition.task_param_defs[RESULT_PARAM]\n if isinstance(param, FuncResultParameter):\n for param_name in param.names:\n # we want to get the parameter evolved with the task_definition as owner\n inner_param = task_definition.task_param_defs[param_name]\n result_param_value = build_result_param(\n task_definition.task_passport, param_def=inner_param\n )\n\n param_values.append(result_param_value)\n\n result_param_value = build_result_param(\n task_definition.task_passport, param_def=param\n )\n param_values.append(result_param_value)\n\n task_params = Parameters(source=\"tracking_task\", param_values=param_values)\n\n return cls(\n task_name=task_name or task_definition.task_family,\n task_definition=task_definition,\n task_params=task_params,\n )",
"def parse_settings(self, requested_kwargs):\n kwargs = {}\n task_list = []\n for qb in self.qubits:\n task = {}\n task_list_fields = requested_kwargs['task_list_fields']\n\n transition_name_v = task_list_fields.get('transition_name')\n tr_name = self.get_param_value('transition_name',\n qubit=qb.name,\n default=transition_name_v[1])\n task['transition_name'] = tr_name\n\n value_params = {'v_low': None, 'v_high': None, 'pts': None}\n # The information about the custom parameters above could be\n # Saved somewhere else to generalize all wrappers\n\n default = self.get_param_value(f'default_{tr_name}_amp180',\n qubit=qb.name)\n current = qb.parameters[f'{tr_name}_amp180']()\n max = self.get_param_value('max_drive_amp', qubit=qb.name)\n n = self.get_param_value('n', qubit=qb.name)\n\n for name, value in value_params.items():\n value = self.get_param_value(name, qubit=qb.name)\n if isinstance(value, str):\n value = eval(\n value.format(current=current,\n max=max,\n default=default,\n n=n))\n value_params[name] = value\n\n sweep_points_v = task_list_fields.get('sweep_points', None)\n if sweep_points_v is not None:\n # Get first dimension (there is only one)\n # TODO: support for more dimensions?\n sweep_points_kws = next(iter(\n self.kw_for_sweep_points.items()))[1]\n values = np.linspace(value_params['v_low'],\n value_params['v_high'],\n value_params['pts'])\n task['sweep_points'] = SweepPoints()\n task['sweep_points'].add_sweep_parameter(values=values,\n **sweep_points_kws)\n qb_v = task_list_fields.get('qb', None)\n if qb_v is not None:\n task['qb'] = qb.name\n\n for k, v in task_list_fields.items():\n if k not in task:\n task[k] = self.get_param_value(k,\n qubit=qb.name,\n default=v[1])\n\n task_list.append(task)\n\n kwargs['task_list'] = task_list\n\n kwargs_super = super().parse_settings(requested_kwargs)\n kwargs_super.update(kwargs)\n\n return kwargs_super",
"def make_task(task_name, override_kwargs=None, max_code_length=100,\n require_correct_syntax=False,\n do_code_simplification=False,\n correct_bonus=2.0, code_length_bonus=1.0):\n logging.info('Making paper-config task.')\n n = 16 # Number of test cases.\n task_mapping = {\n 'print-hello': (\n PrintTask, dict(base=27, fixed_string=[8, 5, 12, 12, 15])),\n 'print': (PrintIntTask, dict(base=256, fixed_string=[1, 2, 3, 4, 5])),\n 'echo': (EchoTask, dict(base=27, min_length=1, max_length=6)),\n 'remove-char': (\n RemoveCharTask, dict(base=256, n=n, min_len=1, max_len=6)),\n 'reverse': (\n ReverseTask, dict(base=256, n=n, min_len=1, max_len=6)),\n 'reverse-tune': (\n ReverseTaskV2, dict(base=256, reward_type='static-bylen')),\n 'remove-char-tune': (RemoveCharTaskV2, dict(base=27)),\n 'prefix': (CommonPrefixTask, dict(base=27)),\n 'find': (FindSubStrTask, dict(base=27)),\n 'sort3': (SortFixedTaskV2, dict(base=27, n=150, length=3)),\n 'count-char': (CountCharTaskV2, dict(n=n, max_len=6)),\n 'bool-logic': (BooleanLogicTask, dict()),\n 'add': (AddTask, dict(n=9)),\n 'echo-twice': (EchoTwiceTask, dict(n=n)),\n 'echo-thrice': (EchoThriceTask, dict(n=n)),\n 'copy-reverse': (CopyReverseTask, dict(n=n)),\n 'zero-cascade': (EchoZeroCascadeTask, dict(n=n)),\n 'cascade': (EchoCascadeTask, dict(n=n)),\n 'shift-left': (ShiftLeftTask, dict(n=n)),\n 'shift-right': (ShiftRightTask, dict(n=n)),\n 'riffle': (RiffleTask, dict(n=n)),\n 'unriffle': (UnriffleTask, dict(n=n)),\n 'middle-char': (MiddleCharTask, dict(n=n)),\n 'remove-last': (RemoveLastTask, dict(n=n)),\n 'remove-last-two': (RemoveLastTwoTask, dict(n=n)),\n 'echo-alternating': (EchoAlternatingTask, dict(n=n)),\n 'echo-half': (EchoHalfTask, dict(n=n)),\n 'length': (LengthTask, dict(n=n)),\n 'echo-second-seq': (EchoSecondSequenceTask, dict(n=n)),\n 'echo-nth-seq': (EchoNthSequenceTask, dict(n=n)),\n 'substring': (SubstringTask, dict(n=n)),\n 'divide-2': (Divide2Task, dict(n=n)),\n 'dedup': (DedupTask, dict(n=n)),\n 'remove-target-char': (RemoveTargetCharTask, dict(n=n)),\n 'list-index': (ListIndexTask, dict(n=n)),\n 'fib': (FibonacciTask, dict()),\n 'count-down': (BottlesOfBeerTask, dict()),\n 'split': (SplitTask, dict()),\n 'trim-left': (TrimLeftTask, dict()),\n 'circle-route': (\n JudgeRouteCircleTask, dict(n=100, max_len=32)),\n 'multiply': (MultiplyTask, dict(n=100)),\n 'divmod': (DivModTask, dict(n=100)),\n }\n\n if task_name not in task_mapping:\n # Test tasks.\n if task_name == 'test-hill-climb':\n return test_tasks.BasicTaskManager(test_tasks.HillClimbingTask())\n raise ValueError('Unknown task type \"%s\"' % task_name)\n task_cls, kwargs = task_mapping[task_name]\n\n if override_kwargs:\n if not isinstance(override_kwargs, dict):\n raise ValueError(\n 'override_kwargs must be a dict, got: %s', override_kwargs)\n kwargs.update(override_kwargs)\n\n task = task_cls(**kwargs)\n\n reward_fn = r.absolute_distance_reward\n # reward_fn = r.absolute_mod_distance_reward\n # reward_fn = r.absolute_log_distance_reward\n logging.info('Using reward function: %s', reward_fn.__name__)\n\n # We want reward with and without code simplification to be scaled the same\n # way. Without code simplification, give the maximum code length bonus\n # every time.\n min_code_length = 0.0 if do_code_simplification else max_code_length\n\n return MultiIOTaskManager(\n task=task, correct_bonus=correct_bonus,\n code_length_bonus=code_length_bonus,\n max_code_length=max_code_length, min_code_length=min_code_length,\n reward_fn=reward_fn, require_correct_syntax=require_correct_syntax)",
"def msg_to_task(msg):\n if not isinstance(msg, dict):\n return None\n t = Task()\n t.args = msg[MessageBuilder.FIELD_DATA]\n t.isFault = msg[MessageBuilder.FIELD_ISF]\n t.seqNum = msg[MessageBuilder.FIELD_SEQNUM]\n t.timestamp = msg[MessageBuilder.FIELD_TIME]\n t.duration = msg[MessageBuilder.FIELD_DUR]\n t.cores = msg[MessageBuilder.FIELD_CORES] if MessageBuilder.FIELD_CORES in msg else None\n return t",
"def _create_task(self, body, *, task_cls=Task):\n return task_cls(self, body)"
] | [
"0.64628965",
"0.6422724",
"0.6353564",
"0.63443846",
"0.625492",
"0.62483037",
"0.61929274",
"0.61775887",
"0.6086991",
"0.6076463",
"0.60502404",
"0.59647053",
"0.5902241",
"0.58599955",
"0.5852859",
"0.5821661",
"0.5809856",
"0.57718843",
"0.57576394",
"0.5703558",
"0.56928945",
"0.56896466",
"0.56602937",
"0.5603942",
"0.5600008",
"0.55974406",
"0.5593321",
"0.5583968",
"0.5583248",
"0.555161"
] | 0.70445263 | 0 |
Find the source candidates (the ones who have not been found infected) Checks the final configurations (from data_["test"]) | def get_source_candidates(all_data_epigen):
candids = {s:
[np.where(np.array(c[1])!=0)[0] for c in mdata["test"] ]
for s, mdata in all_data_epigen.items()}
return candids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_candidates_list(self):\n pass",
"def test_check_source_2(self):\n self.eval_flags[\"check_id_typo\"] = False\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def test_check_source_6(self):\n self.src1.lab_host = \"\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def get_all_candidates(self) -> list:",
"def test_check_source_5(self):\n self.src1.host = \"\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def test_check_source_1(self):\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 4)",
"def test_check_source_4(self):\n self.src1.organism = \"\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)",
"def check_files(self) -> None:\n notfound = False\n give_neuro_data_hint = False\n fullpaths = [f for f, _ in self.input_sources]\n if self.target_sources is not None:\n fullpaths.extend([f for f, _ in self.target_sources])\n for p in fullpaths:\n if not os.path.exists(p):\n print('{} not found.'.format(p))\n notfound = True\n if 'neuro_data_cdhw' in p:\n give_neuro_data_hint = True\n if give_neuro_data_hint:\n print('\\nIt looks like you are referencing the neuro_data_cdhw dataset.\\n'\n 'To install the neuro_data_xzy dataset to the default location, run:\\n'\n ' $ wget https://github.com/ELEKTRONN/elektronn.github.io/releases/download/neuro_data_cdhw/neuro_data_cdhw.zip\\n'\n ' $ unzip neuro_data_cdhw.zip -d ~/neuro_data_cdhw')\n if notfound:\n print('\\nPlease fetch the necessary dataset and/or '\n 'change the relevant file paths in the network config.')\n sys.stdout.flush()\n sys.exit(1)",
"def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list",
"def ComputeCandidatesInner( self, request_data ):\n candidates = []\n\n for i in self.completers:\n candidates.extend(i.ProduceTargets())\n\n print(request_data['query'], sys.stderr)\n\n return candidates",
"def test_candidates_retrieve(self):\n pass",
"def test_check_source_11(self):\n self.src1._organism_host_genus = \"Mycobacterio\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\")\n self.assertEqual(count, 0)",
"def test_check_source_7(self):\n self.src1._organism_name = \"Trixie\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)",
"def match_candidates(self):\n for event in self._events:\n event.match_candidates()",
"def _check_sources(self):\n avail_sources = []\n for source in self.__video_sources:\n avail_sources.append(source)\n\n # Try to start each video source known. Naovideo is not in this list,\n # because without an explicit request, no IP and port is known so no\n # connection can be made.\n for source in [\"webcam\", \"kinect\"]:\n if source in avail_sources:\n continue\n\n # Status unknown, try to start and stop\n self.__logger.info(\"Trying to see if source %s is available\" % source)\n try:\n if self._start_video_source(source):\n avail_sources.append(source)\n self._stop_video_source(source)\n except Exception, e:\n self.__logger.info(\"Error starting source %s, (error: %s) must not be available\" % (source, str(e)))\n \n return avail_sources",
"def test_check_source_8(self):\n self.src1._organism_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)",
"def test_check_source_3(self):\n self.eval_flags[\"check_host_typo\"] = False\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 1)",
"def _generate_base_candidates(self, target_text):\n\n result_list = []\n tagged_text = tag(target_text)\n\n for i in range(1, 5):\n temp = []\n grams = find_ngrams(tagged_text, i)\n\n for gram in grams:\n phrase = \" \".join(list(map(lambda x: x[0], gram)))\n pos = \" \".join(list(map(lambda x: x[1], gram)))\n\n if pos in self.candidate_pattern:\n temp.append(phrase)\n\n result_list.append(temp)\n\n return result_list",
"def getValidTests(sourceTree):\n\n tests = getSections()\n newTests = tests[:]\n\n # [main] is reserved for test suite parameters\n newTests.remove(\"main\")\n\n removeList = []\n \n for test in newTests:\n\n print \" \"\n print \"checking parameters for test %s\" % (test)\n \n # check for the manditory parameters\n if (not (keyIsValid(\"%s.buildDir\" % (test)) and\n keyIsValid(\"%s.inputFile\" % (test)) and\n (sourceTree == \"fParallel\" or \n keyIsValid(\"%s.probinFile\" % (test)) ) and\n keyIsValid(\"%s.needs_helmeos\" % (test)) and\n keyIsValid(\"%s.dim\" % (test)) ) ):\n warning(\" WARNING: manditory runtime parameters for test %s not set\" % (test))\n warning(\" skipping test\")\n removeList.append(test)\n continue\n\n\n # check for optional parameters\n\n # restartTest\n if (not keyIsValid(\"%s.restartTest\" % (test)) ):\n warning(\" Assuming test is not restart run.\\n\")\n globalParams[\"%s.restartTest\" % (test)] = 0\n else:\n\n if (getParam(\"%s.restartTest\" % (test)) ):\n\n # make sure that the file number to restart from is defined\n if (not keyIsValid(\"%s.restartFileNum\" % (test)) ):\n warning(\"WARNING: test %s is a restart test, but is missing the restartFileNum parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n \n\n # compileTest\n if (not keyIsValid(\"%s.compileTest\" % (test)) ):\n warning(\" Assuming test is not compilation test run.\\n\")\n globalParams[\"%s.compileTest\" % (test)] = 0\n\n\n # selfTest\n if (not keyIsValid(\"%s.selfTest\" % (test)) ):\n warning(\" Assuming test is not a self-test.\\n\")\n globalParams[\"%s.selfTest\" % (test)] = 0\n else:\n\n if (getParam(\"%s.selfTest\" % (test)) ):\n \n # make sure that the success string is defined\n if (not keyIsValid(\"%s.stSuccessString\" % (test)) ):\n warning(\"WARNING: test %s is a self-test, but is missing stSuccessString parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n\n\n # useMPI\n if (not keyIsValid(\"%s.useMPI\" % (test)) ):\n warning(\" Assuming normal (not MPI) run.\\n\")\n globalParams[\"%s.useMPI\" % (test)] = 0\n else:\n\n if (getParam(\"%s.useMPI\" % (test)) ):\n\n # make sure that the number of processors is defined\n if (not keyIsValid(\"%s.numprocs\" % (test)) ):\n warning(\"WARNING: test %s is a parallel test, but did not specify the numprocs parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n \n\n # doVis\n if (not keyIsValid(\"%s.doVis\" % (test)) ):\n warning(\" Assuming no visualization.\\n\")\n globalParams[\"%s.doVis\" % (test)] = 0\n else:\n\n if (getParam(\"%s.doVis\" % (test)) ):\n\n # find out what variable to plot\n if (not keyIsValid(\"%s.visVar\" % (test)) ):\n warning(\"WARNING: test %s requested visualization but did not specify the visVar parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n \n\n # remove the invalid tests\n for test in removeList:\n newTests.remove(test)\n \n return newTests",
"def Step1(self):\n import random\n print('get mask for seedpoints NELLIX is used')\n # Check if we can go\n if self._vol is None or self._params is None:\n raise ValueError('Data or params not yet given.')\n \n t0 = time.time()\n \n # Detect points\n th = self._params.seed_threshold\n pp = get_stent_likely_positions(self._vol, th) # call below\n \n # Create nodes object from found points\n nodes = stentgraph.StentGraph()\n for p in pp:\n p_as_tuple = tuple(p.flat) # todo: perhaps seed detector should just yield list of tuples.\n nodes.add_node(p_as_tuple)\n \n t1 = time.time()\n if self._verbose:\n print()\n print('Found %i seed points, which took %1.2f s.' % (len(nodes), t1-t0))\n \n # Store the nodes\n self._nodes1 = nodes\n \n # Draw?\n if self._draw:\n self.Draw(1)\n \n return nodes",
"def ComputeCandidatesInner( self, request_data ):\n LOG.debug(\"compute candidates %d\" % self.complete_target)\n if self.complete_target == self.LABELS:\n return self._FindLabels()\n if self.complete_target == self.CITATIONS:\n return self._FindBibEntries()\n\n self.complete_target = self.NONE\n\n return self._FindLabels() + self._FindBibEntries()",
"def associate(conn, detected_sources, imobj, search_radius, save):\n # Find image resolution class\n for config, res_range in res_dict.items():\n if res_range[0] < imobj.bmin <= res_range[1]:\n res_class = config\n \n # Extract all previously detected sources in the same FOV\n assoc_rows = cone_search(conn, 'assoc_source', imobj.obs_ra,\n imobj.obs_dec, search_radius)\n match_logger.info('Extracted {} sources from assoc_source table '\n 'within {} degrees.'.format(\n len(assoc_rows), search_radius))\n # Limit to sources taken from images of similar resolution\n if len(assoc_rows) > 0:\n filtered_assoc_rows = filter_res(assoc_rows, res_class)\n else:\n filtered_assoc_rows = []\n\n if not filtered_assoc_rows:\n # No previous sources found in that sky region at that resolution\n for src in detected_sources:\n src.res_class = res_class\n src.ndetect = 1\n detected_matched = []\n detected_unmatched = detected_sources\n assoc_matched = []\n assoc_unmatched = []\n else:\n # Translate row dictionaries to DetectedSource objects\n assoc_sources = []\n assoc_ids = []\n for asrc in filtered_assoc_rows:\n assoc_ids.append(asrc['id'])\n assoc_sources.append(dbclasses.DetectedSource())\n dbclasses.dict2attr(assoc_sources[-1], asrc)\n match_logger.info('Attempting to match {} sources from this image to '\n '{} sources previously detected in VLITE images...'.\n format(len(detected_sources), len(assoc_sources)))\n\n detected_matched = []\n detected_unmatched = []\n assoc_matched = []\n assoc_unmatched = []\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n # Print results without saving to database\n if not save:\n # Dump detected_sources into temporary table\n sql = (\n '''\n CREATE TEMP TABLE temp_source (\n src_id INTEGER,\n ra DOUBLE PRECISION,\n dec DOUBLE PRECISION\n );\n ''')\n cur.execute(sql)\n conn.commit()\n for src in detected_sources:\n cur.execute('''INSERT INTO temp_source (\n src_id, ra, dec) VALUES (%s, %s, %s)''', (\n src.src_id, src.ra, src.dec))\n conn.commit()\n # Find nearest neighbor & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS assoc_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM temp_source AS a, LATERAL (\n SELECT b.* FROM assoc_source AS b WHERE b.id IN %s\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1)\n AS bb'''\n values = (0.5*imobj.bmin, tuple(assoc_ids))\n cur.execute(sql, values)\n rows = cur.fetchall()\n cur.execute('DROP TABLE temp_source')\n conn.commit()\n match_logger.info('-----------------------------------------------'\n '-----------------------------------------------'\n '---------------------------------')\n match_logger.info('src_id match assoc_id\\tra\\t\\te_ra\\t\\t\\tdec\\t\\t'\n 'e_dec\\t\\tseparation (arcsec)\\tndetect')\n match_logger.info('-----------------------------------------------'\n '-----------------------------------------------'\n '---------------------------------')\n # Save association results for database\n else:\n # Find nearest neighbor & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS assoc_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM detected_source AS a, LATERAL (\n SELECT b.* FROM assoc_source AS b\n WHERE a.image_id = %s AND b.id IN %s ORDER BY\n q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (0.5*imobj.bmin, imobj.id, tuple(assoc_ids))\n cur.execute(sql, values)\n rows = cur.fetchall()\n\n cur.close()\n\n # Create dictionary of src_id keys & associated values\n rowdict = {}\n for row in rows:\n rowdict[row['src_id']] = [row['assoc_id'], row['sep'], row['match']]\n\n for src in detected_sources:\n # Get the associated source object\n asrc = [msrc for msrc in assoc_sources if \\\n msrc.id == rowdict[src.src_id][0]][0]\n if rowdict[src.src_id][2]:\n # It's a match!\n src.assoc_id = asrc.id\n detected_matched.append(src)\n # Compute weighted averages\n cur_sigra_sq = asrc.e_ra * asrc.e_ra\n cur_sigdec_sq = asrc.e_dec * asrc.e_dec\n asrc.e_ra = np.sqrt(1. / (\n (1. / cur_sigra_sq) + (1. / (src.e_ra * src.e_ra))))\n asrc.ra = (asrc.e_ra * asrc.e_ra) * (\n (asrc.ra / cur_sigra_sq) + (src.ra / (\n src.e_ra * src.e_ra)))\n asrc.e_dec = np.sqrt(1. / (\n (1. / cur_sigdec_sq) + (1. / (src.e_dec * src.e_dec))))\n asrc.dec = (asrc.e_dec * asrc.e_dec) * (\n (asrc.dec / cur_sigdec_sq) + (src.dec / (\n src.e_dec * src.e_dec)))\n asrc.ndetect += 1\n assoc_matched.append(asrc)\n else:\n # No match -- new source\n src.res_class = res_class\n src.ndetect = 1\n detected_unmatched.append(src)\n assoc_unmatched.append(asrc)\n if not save:\n match_logger.info('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(\n src.src_id, rowdict[src.src_id][2], asrc.id, asrc.ra,\n asrc.e_ra, asrc.dec, asrc.e_dec, rowdict[src.src_id][1],\n asrc.ndetect))\n\n match_logger.info(' -- number of matches: {}'.format(len(detected_matched)))\n match_logger.info(' -- number of new sources to add: {}'.format(\n len(detected_unmatched)))\n\n return detected_matched, detected_unmatched, assoc_matched, assoc_unmatched",
"def test_check_source_10(self):\n self.src1._lab_host_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)",
"def random_test(self, source):\r\n ret = 1\r\n for seed in range(1, 40):\r\n if source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x)**2+10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}) != \\\r\n source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x) ** 2 + 10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}):\r\n ret = 0\r\n if ret == 0:\r\n if self.verbosity > 0:\r\n print(\"ERROR: Random seed non functional, results cannot be replicated.\")\r\n return 0\r\n else:\r\n if self.verbosity > 1:\r\n print(\"Random seed functional, results replicable if a seed is used.\")\r\n return 1",
"def gather_candidates(self, context):\n candidates = []\n\n with open(context['data_file'], 'r') as fp:\n try:\n config = load(fp)\n except JSONDecodeError:\n err_string = 'Decode error for' + context['data_file']\n error(self.vim, err_string)\n config = []\n\n for obj in config:\n candidates.append({\n 'word': obj['option'],\n '__option': obj['option'],\n '__shortname': obj['shortname'],\n '__description': obj['description'],\n 'abbr': f\"{obj['option']:<15}│{obj['shortname']:<10}│{obj['description']:<15}\",\n })\n\n return candidates",
"def test_all_good_recovered(self):\n # Start sampling\n self.driver.start_sampling()\n\n self.create_sample_data_set_dir(\n \"DOS15908_1st7_step1.DAT\",\n RECOV_DIR,\n \"DOS15908.DAT\",\n copy_metadata=False\n )\n self.assert_data(\n (DostadParserRecoveredDataParticle, DostadParserRecoveredMetadataDataParticle),\n 'test_data_1r.txt.result.yml',\n count=2,\n timeout=10\n )\n\n self.create_sample_data_set_dir(\n \"DOS15908_1st7_step2.DAT\",\n RECOV_DIR,\n \"DOS15909.DAT\",\n copy_metadata=False\n )\n self.assert_data(\n DostadParserRecoveredDataParticle,\n 'test_data_2r.txt.result.yml',\n count=1\n )",
"def test_check_cds_2(self):\n self.eval_flags[\"check_locus_tag\"] = False\n import_genome.check_cds(self.cds1, self.eval_flags)\n self.assertEqual(len(self.cds1.evaluations), 11)",
"def test_check_source_9(self):\n self.src1._host_host_genus = \"Gordonia\"\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n count = count_status(self.src1, \"error\", \"warning\")\n self.assertEqual(count, 1)",
"def test_missing_data_sources(self):",
"def collect_confs(self):\n\n sim_confs = []\n failed_sims = []\n solfile = self.gconf['General']['solution_file']\n # Find the data files and instantiate Simulation objects\n base = os.path.expandvars(self.gconf['General']['base_dir'])\n self.log.info(base)\n for root, dirs, files in os.walk(base):\n conf_path = os.path.join(root, 'sim_conf.yml')\n if 'sim_conf.yml' in files and solfile in files:\n self.log.info('Gather sim at %s', root)\n # sim_obj = Simulation(Config(conf_path))\n conf = Config(conf_path)\n # sim_obj.conf.expand_vars()\n sim_confs.append(conf)\n elif 'sim_conf.yml' in files:\n # sim_obj = Simulation(Config(conf_path))\n conf = Config(conf_path)\n self.log.error('Sim %s is missing its data file',\n conf['General']['sim_dir'])\n failed_sims.append(conf)\n self.sim_confs = sim_confs\n self.failed_sims = failed_sims\n if not sim_confs:\n self.log.error('Unable to find any successful simulations')\n raise RuntimeError('Unable to find any successful simulations')\n return sim_confs, failed_sims"
] | [
"0.5934083",
"0.5801863",
"0.57194805",
"0.57038945",
"0.5695275",
"0.5662381",
"0.5629502",
"0.5607914",
"0.55140257",
"0.5501297",
"0.54986674",
"0.5470249",
"0.5362881",
"0.53098935",
"0.52713174",
"0.5263354",
"0.52497125",
"0.52446026",
"0.52430683",
"0.52268773",
"0.5200193",
"0.51946354",
"0.5165078",
"0.51533675",
"0.5150704",
"0.51498944",
"0.5133455",
"0.5120154",
"0.511871",
"0.51169896"
] | 0.7038606 | 0 |
Get the source position (fraction of the number of candidates) Uses marginal distribution in shape N x T x q | def get_src_posit_obs_margs(margs, msources, candids):
psources = margs[:,0,1][candids]
idx=candids[psources.argsort()[::-1]]
#print(idx)
pos = np.mean([np.argmax(idx == s) for s in msources])
return pos/len(candids) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loc(self):\n return self.distribution.loc",
"def getPositionDistribution(self, position):\n dist = util.Counter()\n (x, y) = position\n total = 1.0\n dist[position] = 1.0\n\n if not self.walls[x + 1][y]:\n dist[(x + 1, y)] = 1.0\n total += 1.0\n if not self.walls[x - 1][y]:\n dist[(x - 1, y)] = 1.0\n total += 1.0\n if not self.walls[x][y + 1]:\n dist[(x, y + 1)] = 1.0\n total += 1.0\n if not self.walls[x][y - 1]:\n dist[(x, y - 1)] = 1.0\n total += 1.0\n dist[(x, y)] /= total\n if (x + 1, y) in dist.keys():\n dist[(x + 1, y)] /= total\n if (x - 1, y) in dist.keys():\n dist[(x - 1, y)] /= total\n if (x, y+ 1) in dist.keys():\n dist[(x, y + 1)] /= total\n if (x, y - 1) in dist.keys():\n dist[(x, y - 1)] /= total\n return dist",
"def determine_measure_position(self):\n green_probs = []\n net_size = len(self.net)\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in range(0, net_size):\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.net[i].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.net[i].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.net[i].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.net[i].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.net[i].value * self.ct[4][0]\n green_probs.append(accum)\n #Returns the position in which the probability of\n #obtaining green when measuring is the highest.\n return self.net[np.argmax(green_probs)].id",
"def get_marginal(self, target):\n if not self._calibrated:\n self._upward()\n self._downward()\n self._calibrated = True\n\n if self._verbosity > 0:\n print(\"calculating the marginals for {} target variables\".format(len(target)), flush=True)\n\n target_set = set(target)\n p1 = {}\n for c, clique in enumerate(self.cliques):\n cl_var_set = set(clique)\n for v in target_set.intersection(cl_var_set):\n v_ind = clique.index(v)\n summation_inds = list(set(range(len(cl_var_set))).difference({v_ind}))\n mrg = np.sum(self.clique_beliefs[c], axis=tuple(summation_inds))\n\n mrg /= np.sum(mrg)\n p1[v] = mrg[1]\n p1_arr = np.array([p1[v] for v in target])\n return p1_arr",
"def get_marginal(\n target=None\n ):\n pass",
"def mh(N, disttype):\n xs = np.array([])\n ys = np.array([])\n pos_now = (0,0)\n accept = 0\n for i in range(N):\n pos_cand = proposal_pdf(pos_now)\n prob_stay = target_pdf(pos_now, disttype)\n prob_move = target_pdf(pos_cand, disttype)\n if prob_move / prob_stay > np.random.uniform(0,1,1):\n pos_now = pos_cand\n xs = np.append(xs, pos_now[0])\n ys = np.append(ys, pos_now[1])\n accept += 1\n return xs, ys, accept/N",
"def sample_source_position_by_random_coordinate(config, n_spk, room_size, array_center, forbidden_rect=None):\n source_position = np.zeros((3, n_spk))\n\n d_from_wall = config['min_dist_from_wall'] if \"min_dist_from_wall\" in config.keys() else [0.0, 0.0] # minimum distance from wall\n d_from_array = config['min_dist_from_array'] if \"min_dist_from_array\" in config.keys() else 0.1 # minimum distnace from mic array\n d_from_other = config['min_dist_from_other'] if \"min_dist_from_other\" in config.keys() else 0.2 # minimum distance from other sources\n x_distribution = get_distribution_template(\"comment\", min=d_from_wall[0],\n max=room_size[0] - d_from_wall[0])\n y_distribution = get_distribution_template(\"comment\", min=d_from_wall[0],\n max=room_size[1] - d_from_wall[1])\n if \"height\" in config.keys():\n z_distribution = config['height']\n else:\n z_distribution = get_distribution_template(\"comment\", min=0.0, max=room_size[2])\n\n for i in range(n_spk):\n cnt = 0\n while 1:\n cnt += 1\n x = get_sample(x_distribution)[0]\n y = get_sample(y_distribution)[0]\n z = get_sample(z_distribution)[0]\n curr_pos = np.asarray([x, y, z])\n if np.linalg.norm(curr_pos[:2]-array_center[:2]) >= d_from_array:\n if forbidden_rect is None or (np.prod(curr_pos[0] - forbidden_rect[0, :]) > 0 or np.prod(curr_pos[1] - forbidden_rect[1, :]) > 0):\n if i == 0 or (np.linalg.norm(curr_pos[:2,np.newaxis]-source_position[:2, :i], axis=0) >= d_from_other).all():\n source_position[:, i] = curr_pos[:]\n break\n if cnt > 1000:\n raise Exception(\"Maximum number (1000) of trial finished but still not able to find acceptable position for speaker position. \")\n\n return source_position",
"def calc_pos(x):\n a = torch.arange(1, x.shape[1] + 1).unsqueeze(0).to(x.device)\n p = a.expand(x.shape[0], -1)\n mask = (x != 0).long()\n return p * mask",
"def min_entropy_pos(self):\n min_entropy = float(\"inf\")\n for Knot in self.wait_to_collapse:\n noise = random.random() / 1000\n # Add some noise to mix things up a little\n if self[Knot].entropy - noise < min_entropy:\n position = Knot[:]\n min_entropy = self[position].entropy - noise\n return position",
"def pos(self):\n return (self.raw - self.raw_zero) / self.ratio",
"def fit(prob_pos_X):\n prob_pos = [p for p in prob_pos_X]+[2-p for p in prob_pos_X]\n pos_mu, pos_std = dist_model.fit(prob_pos)\n return pos_mu, pos_std",
"def _relative_5prime_pos(self, gRNAHit_objs) -> float:\n return sum((hit.range[0] if hit.target.sense != '-'\n else (hit.target_len - hit.range[1]))\n for hit in gRNAHit_objs)/len(gRNAHit_objs)",
"def marginalDistribution(self, x, variable):\n return self._distribution.marginal(x, variable)",
"def distance_modulus(self):\n return 5*np.log10(self.parallax.to(u.pc, u.parallax())/10*u.pc)",
"def relative_5prime_pos(self) -> float:\n return self._relative_5prime_pos(self.hits)",
"def DCG_p(results, topic, p):\n rel = lambda label: gold_topic_labels[topic][label]\n top_p = results[:p]\n dcg = 0\n for idx, label in enumerate(top_p):\n rank = idx + 1\n if idx == 0:\n dcg += rel(label)\n continue\n dcg += rel(label)/ math.log(rank,2)\n return dcg",
"def _cage_pts(xyz, neighbor_xyzs, sigma, neighbor_diameters, L, M, R):\n pts = rand_sphere(M) * R + xyz\n for nxyz, nsig in zip(neighbor_xyzs, neighbor_diameters):\n dpts = np.remainder(pts - nxyz + L / 2.0, L) - L / 2.0\n dists_sq = np.sum(dpts**2, axis=1)\n goodix = dists_sq >= ((nsig + sigma) / 2.0)**2\n pts = pts[goodix, :]\n return pts",
"def determine_move_position(self):\n green_probs = []\n net_size = len(self.net)\n adjacents = self.net[self.current_pos].adjacents\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in adjacents:\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i-1, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.enemy_net[i-1].value * self.ct[4][0]\n green_probs.append((i, accum))\n #Returns the position in which the probability of\n #obtaining green when measuring is the lowest.\n return min(green_probs, key=itemgetter(1))[0]",
"def marginal(self):\n m = np.zeros(len(self.domain))\n for fnode in self.neighbors:\n m += self.received[fnode]\n return np.exp(normalize(m))",
"def target_distribution(q):\n weight = q ** 2 / q.sum(0)\n return (weight.T / weight.sum(1)).T",
"def getPositionDistribution(self, gameState, position):\n dist = util.Counter()\n conf = game.Configuration(position, game.Directions.STOP)\n newState = gameState.deepCopy()\n newState.data.agentStates[self.index] = game.AgentState(conf, False)\n\n for action in getLegalActionsNoStop(newState, self.index):\n successorPosition = newState.getSuccessor(position, action)\n if (action is game.Directions.STOP):\n dist[successorPosition] += .1\n else:\n dist[successorPosition] += 1\n return dist",
"def log_marginal(self):\n #\n # Predictive covariance of x is sum of covariance of phi a and covariance of x|a\n x_Sigma = self.phi @ self.phi.T + np.diag(self.sigma_n**2 * np.ones(self.M))\n #\n # Predictive mean is 0 by symmetry\n # so given that x is distributed as a MVN, the exact marginal is\n lp_exact = st.multivariate_normal.logpdf(self.x, cov=x_Sigma)\n #\n return lp_exact",
"def get_position(self, position):",
"def _sample_pos(self, assign_result, num_expected, **kwargs):\n pos_inds = torch.nonzero(assign_result.gt_inds > 0)\n if pos_inds.numel() != 0:\n pos_inds = pos_inds.squeeze(1)\n if pos_inds.numel() <= num_expected:\n return pos_inds\n else:\n return self.random_choice(pos_inds, num_expected)",
"def calc_nearest_ind(self, robot_pose):\n pass",
"def get_center_of_mass_allies(self,obs):",
"def get_center_of_mass_enemies(self,obs):",
"def node_pos(self, spacing, type, layer, node):\n width = (self.num_hidden_layers + 3) * spacing * 2\n values = [self.num_input_nodes, self.num_hidden_nodes, self.num_output_nodes]\n values.sort(reverse=True)\n height = (values[0] + 1) * spacing\n h_percentile = height - spacing\n w_percentile = (width - (spacing * 2)) / (self.num_hidden_layers + 2)\n if type == 'input':\n pos_y = h_percentile / (self.num_input_nodes + 1)\n pos_y *= (node + 1)\n pos_x = w_percentile\n return (pos_x, pos_y)\n elif type == 'hidden':\n pos_y = h_percentile / (self.num_hidden_nodes + 1)\n pos_y *= (node + 1)\n pos_x = w_percentile * (layer + 2)\n return (pos_x, pos_y)\n elif type == 'output':\n pos_y = h_percentile / (self.num_output_nodes + 1)\n pos_y *= (node + 1)\n pos_x = w_percentile * (self.num_hidden_layers + 2)\n return (pos_x, pos_y)\n else:\n print(\"Invalid argument: type\")\n return 1",
"def _choose_position(self):\n \n \n # function used to find R given M\n def _root_function(r, func, uval, m_tot):\n \n return uval * m_tot - func(r)\n \n # optimization switches\n if self.optimize:\n mass_func = self._interpolate_cumulative_mass\n \n umin = self._mass_umin\n umax = self._mass_umax\n else:\n # use exact profile\n mass_func = self.DF.dprof.cumulative_mass\n \n umin = mass_func(self.DF.dprof.small_r) / self.DF.dprof.M_sys\n umax = mass_func(self.DF.dprof.large_r) / self.DF.dprof.M_sys\n \n \n failed = True\n # root finder may fail occasionally if r is too close to zero\n # keep drawing random number until it works.\n # alternate soln would be to draw from M(small_r)/M_tot to\n # M(large_r) / M_tot instead of 0 to 1... \n #while failed:\n i = 0\n while (failed and i < 100):\n \n try:\n u = np.random.rand()*(umax - umin) + umin\n\n r = opt.brentq(_root_function, self.DF.dprof.small_r, self.DF.dprof.large_r, \n args=(mass_func ,u, self.DF.dprof.M_sys,))\n failed = False\n \n except:\n failed = True\n _my_print('Root finder for position failing for the %004i time. Re-rolling.'%(i))\n \n \n i = i + 1\n # except: \n # failed = True\n\n \n \n return r",
"def sample_source_position(config, room_size, array_center):\n # sample the number of speakers\n n_spk = get_sample(config['num_spk'])\n n_spk = n_spk[0]\n\n if config['position_scheme'] == \"random_coordinate\":\n source_position = sample_source_position_by_random_coordinate(config, n_spk, room_size, array_center)\n else:\n raise Exception(\"Unknown speech source position scheme: %s\" % config['position_scheme'])\n\n return source_position"
] | [
"0.59985954",
"0.5844991",
"0.57710886",
"0.57066786",
"0.56944394",
"0.5637397",
"0.56237406",
"0.55892",
"0.5523165",
"0.5514936",
"0.54958504",
"0.5458601",
"0.5358698",
"0.53497237",
"0.5329066",
"0.53273976",
"0.5306407",
"0.53056574",
"0.52929187",
"0.5288644",
"0.5282052",
"0.5271898",
"0.52583236",
"0.5256828",
"0.52422094",
"0.5231751",
"0.5217389",
"0.52034885",
"0.5192377",
"0.5183241"
] | 0.631222 | 0 |
Constructs the SAM topic weights file from the rest of the config. | def get_topic_weight_filename(config):
base = os.path.splitext(config['corpus'])[0]
return '%s--%dT.arff' % (base, config['T']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_from_file(self,params,weights_dict):\n\n self.name = params[keys._name]\n self.topology = params[keys._topology]\n self.learningRate = params[keys._learning_rate]\n self.momentum = params[keys._momentum]\n #self._outActiv_fun_key = params[keys._output_activation]\n #self._hiddenActiv_fun_key = params[keys._hidden_activation]\n #self.output_activation = self.set_outActivation_fun(func=self._outActiv_fun_key)\n #self.hidden_activation = self.set_hiddenactivation_fun(func=self._hiddenActiv_fun_key)\n\n #unpack weights\n self.weights = [weights_dict[layer_mat] for layer_mat in weights_dict]\n self.size = len(self.weights)\n self.Gradients = [None]*self.size",
"def setup_files(args):\n postfix = 'reinforce'\n has_value_model = False\n if args.baseline:\n postfix = \"reinforce-baseline\"\n has_value_model = True\n elif args.actor_critic:\n postfix = \"actor-critic\"\n has_value_model = True\n elif args.a2c:\n postfix = \"a2c\"\n has_value_model = True\n elif args.random:\n postfix = \"random\"\n\n # create the folder for log files\n try:\n os.mkdir(postfix)\n except FileExistsError:\n print(postfix, \" folder exists\")\n\n fileid = \"%s-%d\" % (postfix, int(time.time()))\n actor_weights = \"actor_weights-%s.h5\" % fileid\n actor_weights = os.path.join(postfix, actor_weights)\n encoder_weights = \"encoder_weights-%s.h5\" % fileid\n encoder_weights = os.path.join(postfix, encoder_weights)\n value_weights = None\n if has_value_model:\n value_weights = \"value_weights-%s.h5\" % fileid\n value_weights = os.path.join(postfix, value_weights)\n\n outdir = \"/tmp/%s\" % postfix\n\n misc = (postfix, fileid, outdir, has_value_model)\n weights = (actor_weights, encoder_weights, value_weights)\n\n return weights, misc",
"def init_weights(self):\n if self.init_cfg:\n super().init_weights()\n else:\n # Use smaller std for better stability and performance. We\n # use 0.1. See more details in \"ESRGAN: Enhanced Super-Resolution\n # Generative Adversarial Networks\"\n for m in [\n self.conv_first, self.conv_body, self.conv_up1,\n self.conv_up2, self.conv_hr, self.conv_last\n ]:\n default_init_weights(m, 0.1)",
"def makeSearchPhaseConfig(seriesName,configTemplate, window,mass, nPseudoExps):\n #----check for file\n if not os.path.isfile(configTemplate):\n print(\" the template input file does not exist: \", configTemplate)\n print(\" ----------Aborting-------------\")\n raise RuntimeError\n workTag = \"sensitivity_\"+seriesName+'_'+str(mass)+\"ww\"+window\n configOutDir=\"submitConfigs/\"+workTag\n if not os.path.isdir(configOutDir):\n os.mkdir(configOutDir)\n configOutName = \"submitConfigs/\"+workTag+\"/Step1_SearchPhase_window{0}.config\".format(window)\n configOut = open(configOutName,'w')\n with open(configTemplate) as configInData :\n for line in configInData :\n if \"nPseudoExp\" in line :\n line = \"nPseudoExp {0}\\n\".format(nPseudoExps)\n if \"swift_nBinsLeft\" in line :\n line = \"swift_nBinsLeft {0}\\n\".format(window)\n elif \"swift_nBinsRight\" in line :\n line = \"swift_nBinsRight {0}\\n\".format(window)\n configOut.write(line)\n configOut.close()\n modcommand = \"chmod 744 {0}\".format(configOut)\n os.system(modcommand)\n return configOutName",
"def make_batman_config(tmin, tmax, tstep, wmin, wmax, wnum, wlog=True, suffix=\"\", path=\".\"):\n params = {}\n params[\"curves_fname\"] = p.join(path, 'batmanCurves{}.csv'.format(suffix))\n params[\"params_fname\"] = p.join(path, 'batmanParams{}.csv'.format(suffix))\n params[\"tmin\"] = tmin\n params[\"tmax\"] = tmax\n params[\"tstep\"] = tstep\n params[\"wmin\"] = wmin\n params[\"wmax\"] = wmax\n params[\"wnum\"] = wnum\n params[\"wlog\"] = wlog\n\n outfile = p.join(path, 'batmanConfig{}.param'.format(suffix))\n with open(outfile, \"w+\") as f:\n json.dump(params, f)\n print(\"Batman config written to {}\".format(outfile))",
"def __init__( self, weights, topics ):\n\n # Number of topics and dictionary size\n self.W, self.K = topics.shape\n assert( self.W > self.K )\n\n self.topics = topics\n MixtureModel.__init__(self, weights, topics)",
"def mkconfig():\n basedir = os.path.join(os.path.expanduser('~'), '.strikepackage')\n\n # Try to populate dirs\n defaultdirs = [os.path.join(basedir, leaf)\n for leaf in ['examples', 'keys', 'templates']]\n\n for dirpath in defaultdirs:\n if not os.path.exists(dirpath):\n try:\n os.makedirs(dirpath, 0755)\n except (os.error, IOError) as ex:\n warn(\"Error while creating default directory: {}\".format(ex))\n\n # Try to place example confs if not present\n exdir = os.path.join(basedir, 'examples')\n exfiles = [(os.path.join(exdir, exfile[0]), exfile[1])\n for exfile in [('config.yaml', config_src),\n ('metadata.jinja2', metadata_src),\n ('userdata.jinja2', userdata_src)]]\n for exfile in exfiles:\n if not os.path.isfile(exfile[0]):\n try:\n with open(exfile[1], 'r') as f:\n src = f.read()\n with open(exfile[0], 'w+') as f:\n f.write(src)\n except IOError as ex:\n warn(\"Error writing example file: {}\".format(ex))",
"def _basic_setup(self):\n\n if not self.label.isalnum():\n raise ValueError(\n f\"Label '{self.label}' is not alphanumeric,\"\n \" which is incompatible with the SFTv3 naming specification\"\n \" ( https://dcc.ligo.org/T040164-v2/public ).\"\n \" Please avoid underscores, hyphens etc.\"\n )\n if len(self.label) > 60:\n raise ValueError(\n f\"Label {self.label} is too long to comply with SFT naming rules\"\n f\" ({len(self.label)}>60).\"\n )\n\n os.makedirs(self.outdir, exist_ok=True)\n self.config_file_name = os.path.join(self.outdir, self.label + \".cff\")\n self.theta = np.array([self.phi, self.F0, self.F1, self.F2])\n\n if self.h0 and np.any(\n [getattr(self, k, None) is None for k in self.required_signal_parameters]\n ):\n raise ValueError(\n \"If h0>0, also need all of ({:s})\".format(\n \",\".join(self.required_signal_parameters)\n )\n )\n\n incompatible_with_TS = [\"tstart\", \"duration\", \"noiseSFTs\"]\n TS_required_options = [\"Tsft\"]\n no_noiseSFTs_options = [\"tstart\", \"duration\", \"Tsft\", \"detectors\"]\n\n if getattr(self, \"timestamps\", None) is not None:\n if np.any(\n [getattr(self, k, None) is not None for k in incompatible_with_TS]\n ):\n raise ValueError(\n \"timestamps option is incompatible with\"\n f\" ({','.join(incompatible_with_TS)}).\"\n )\n if np.any([getattr(self, k, None) is None for k in TS_required_options]):\n raise ValueError(\n \"With timestamps option, need also all of\"\n f\" ({','.join(TS_required_options)}).\"\n )\n self._get_setup_from_timestamps()\n elif self.noiseSFTs is not None:\n logger.info(\n \"noiseSFTs is not None: Inferring tstart, duration, Tsft. \"\n \"Input tstart and duration will be treated as SFT constraints \"\n \"using lalpulsar.SFTConstraints; Tsft will be checked for \"\n \"internal consistency accross input SFTs.\"\n )\n self._get_setup_from_noiseSFTs()\n elif np.any([getattr(self, k, None) is None for k in no_noiseSFTs_options]):\n raise ValueError(\n \"Need either noiseSFTs, timestamps or all of ({:s}).\".format(\n \",\".join(no_noiseSFTs_options)\n )\n )\n else:\n self._get_setup_from_tstart_duration()\n\n self.sftfilenames = [os.path.join(self.outdir, fn) for fn in self.sftfilenames]\n self.sftfilepath = \";\".join(self.sftfilenames)\n\n if self.tref is None:\n self.tref = self.tstart\n\n if getattr(self, \"SFTWindowBeta\", None):\n raise ValueError(\n \"Option 'SFTWindowBeta' is defunct, please use 'SFTWindowParam'.\"\n )\n if getattr(self, \"SFTWindowType\", None):\n try:\n lal.CheckNamedWindow(\n self.SFTWindowType, self.SFTWindowParam is not None\n )\n except RuntimeError:\n raise ValueError(\n \"XLAL error on checking SFT window options.\"\n f\" Likely either SFTWindowType={self.SFTWindowType} is not a recognised window name,\"\n \" or it requires also setting an SFTWindowParam.\"\n )",
"def create_yml(self):\n fid = open(os.path.join(RESOURCE_PATH,\n '11079419_SNA_SNA.txt'),\n MODE_ASCII_READ)\n\n stream_handle = fid\n\n self.create_parser(stream_handle, True)\n\n particles = self.parser.get_records(1000)\n\n self.particle_to_yml(particles, '11079419_SNA_SNA_telem.yml')\n fid.close()",
"def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n # read the file in, sample-by-sample\n # build the dictionary recursively\n # add rle file also to generated cfg files\n # print integrations per job as well!\n # consider more than 1 file per jobs -- the jobs are splitted by MEM integration anyways\n\n rle_filters = self.get_filter() if self.rle_filter_file else {}\n statistics = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n if not os.path.exists(sample_info['local_paths'][0]['path']):\n logging.warning(\"Skipping sample {sample_name}\".format(sample_name = sample_name))\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_addMEM, process_name))\n is_mc = (sample_info[\"type\"] == \"mc\")\n if self.rle_filter_file:\n assert(process_name in rle_filters)\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n # typically, the analysis ends here and starts looping b/c the smallest unit of work processes\n # at least one file; we need, however, to split the file into event ranges in such a way that\n # each job performs mem_integrations_per_job MEM integrations\n\n # so what we are going to do is to open each set of files in inputFileList, read the variable\n # requestMEM_*l_*tau and try to gather the event ranges such that each event range\n # performs up to mem_integrations_per_job integrations per job\n memEvtRangeDict = self.memJobList(inputFileList, rle_filters[process_name] if self.rle_filter_file else [])\n\n for jobId in memEvtRangeDict.keys():\n\n key_dir = getKey(sample_name)\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = memEvtRangeDict[jobId]['input_fileset']\n\n # there should always be a job\n assert(self.inputFiles[key_file] > 0), \"More than one input file: %s ?? !!\" % \\\n ', '.join(self.inputFiles[key_file])\n\n #assert(len(self.inputFiles[key_file]) == 1), \"There is more than one input file!\"\n self.cfgFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i_cfg.py\" % (self.channel, process_name, jobId)\n )\n self.shFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i.sh\" % (self.channel, process_name, jobId)\n )\n self.outputFiles[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_NTUPLES], \"%s_%i.root\" % (process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"addMEM_%s_%s_%i.log\" % (self.channel, process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = get_log_version((self.logFiles_addMEM[key_file],))[0]\n self.createCfg_addMEM(\n self.inputFiles[key_file],\n memEvtRangeDict[jobId]['event_range'][0],\n memEvtRangeDict[jobId]['event_range'][1],\n self.outputFiles[key_file],\n self.era,\n sample_info[\"sample_category\"],\n is_mc,\n self.cfgFiles_addMEM_modified[key_file],\n memEvtRangeDict[jobId]['whitelist'],\n )\n\n # associate the output file with the fileset_id\n #UDPATE: ONE OUTPUT FILE PER SAMPLE!\n fileset_id = memEvtRangeDict[jobId]['fileset_id']\n hadd_output_dir = os.path.join(\n self.dirs[key_dir][DKEY_FINAL_NTUPLES],\n '%04d' % (fileset_id // 1000)\n )\n if not os.path.exists(hadd_output_dir):\n os.makedirs(hadd_output_dir)\n hadd_output = os.path.join(\n hadd_output_dir, '%s_%i.root' % ('tree', fileset_id) # UDPATE: ADDED\n #hadd_output_dir, \"tree.root\" # UDPATE: REMOVED\n )\n if hadd_output not in self.hadd_records:\n self.hadd_records[hadd_output] = {}\n self.hadd_records[hadd_output]['output_files'] = []\n self.hadd_records[hadd_output]['fileset_id'] = fileset_id\n self.hadd_records[hadd_output]['output_files'].append(self.outputFiles[key_file])\n self.hadd_records[hadd_output]['process_name'] = process_name\n\n # let's sum the number of integration per sample\n nofEntriesMap = {}\n for v in memEvtRangeDict.values():\n if v['fileset_id'] not in nofEntriesMap:\n nofEntriesMap[v['fileset_id']] = {\n 'nof_entries' : v['nof_entries'],\n }\n statistics[process_name] = {\n 'nof_int' : sum([entry['nof_int'] for entry in memEvtRangeDict.values()]),\n 'nof_entries' : sum([entry['nof_entries'] for entry in nofEntriesMap.values()]),\n 'nof_events_pass' : sum([entry['nof_events_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_int_pass' : sum([entry['nof_int_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_zero' : sum([entry['nof_zero'] for entry in memEvtRangeDict.values()]),\n 'nof_jobs' : len(memEvtRangeDict),\n }\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_addMEM)\n self.createScript_sbatch()\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_addMEM(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n self.createMakefile(lines_makefile)\n\n ws_len = max([len(kk) + 1 for kk in statistics.keys()])\n total_nof_integrations_sum = sum(x['nof_int'] for x in statistics.values())\n total_nof_entires = sum(x['nof_entries'] for x in statistics.values())\n total_nof_zero_int = sum(x['nof_zero'] for x in statistics.values())\n total_nof_jobs = sum(x['nof_jobs'] for x in statistics.values())\n total_nof_pass = sum(x['nof_events_pass'] for x in statistics.values())\n total_nof_int_pass_avg = float(sum(x['nof_int_pass'] for x in statistics.values())) / total_nof_pass\n total_nof_integrations_avg = float(total_nof_integrations_sum) / total_nof_entires\n total_nof_int_per_job = float(total_nof_integrations_sum) / total_nof_jobs\n for k, v in statistics.iteritems():\n if v['nof_entries'] == 0:\n int_per_event = 0.\n evt_pass = 0.\n else:\n int_per_event = float(v['nof_int']) / v['nof_entries']\n evt_pass = (100 * float(v['nof_events_pass']) / v['nof_entries'])\n if v['nof_events_pass'] == 0:\n nof_int_pass = 0.\n else:\n nof_int_pass = float(v['nof_int_pass']) / v['nof_events_pass']\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d (%.2f%%) evt pass; %.2f int/evt pass; %d evt 0int)' %\n (k,\n ' ' * (ws_len - len(k)),\n v['nof_int'],\n v['nof_entries'],\n v['nof_jobs'],\n int_per_event,\n v['nof_events_pass'],\n evt_pass,\n nof_int_pass,\n v['nof_zero'],\n )\n )\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d evt pass; %.2f int/evt pass; '\n '%.2f int/job pass; %d evt 0int)' %\n ('total',\n ' ' * (ws_len - len('total')),\n total_nof_integrations_sum,\n total_nof_entires,\n total_nof_jobs,\n total_nof_integrations_avg,\n total_nof_pass,\n total_nof_int_pass_avg,\n total_nof_int_per_job,\n total_nof_zero_int,\n )\n )\n\n if self.max_mem_integrations > 0 and total_nof_integrations_sum > self.max_mem_integrations:\n logging.error(\"Will not start the jobs (max nof integrations exceeded)!\")\n return False\n else:\n logging.info(\"Done\")\n return True",
"def create_samfile(self):",
"def fwordweights(self):\n return self.prefix + 'wordweights.txt'",
"def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)",
"def _setupFilename(self):\n try:\n os.mkdir('./.netModel')\n except:\n pass # hope it's already there...\n filenames = os.listdir('./.netModel')\n configNum = 1\n i = 0\n configNumString = '%(c)04d' % {'c':configNum}\n while i < len(filenames):\n configNumString = '%(c)04d' % {'c':configNum}\n if (filenames[i][:4]==configNumString):\n configNum += 1\n i = 0\n else:\n i += 1\n return os.path.realpath('.')+'/.netModel/'+configNumString",
"def file_setup(self):\n #output a .edf file if the input is txt\n if self.args.input_format == 'txt':\n signals = []\n headers = []\n \n #read sample frequency from a .xml file\n if self.args.is_test:\n self.sample_rate = 1024\n else:\n xml_file = open(self.args.input_path + self.args.input_name + '.xml', \"r\")\n xml_content = xml_file.read()\n my_ordered_dict = xmltodict.parse(xml_content)\n dict = json.loads(json.dumps(my_ordered_dict))\n self.sample_rate = eval(dict['RECORD_INFO']['Record']['SamplesFreq'])\n \n #define header, needed for .edf file\n# header = {'label':'ch_name', \n# 'dimension': 'uV',\n# 'sample_rate': self.sample_rate,\n# 'physical_max': 5000,\n# \"physical_min\": -5000,\n# 'digital_max': 5000,\n# 'digital_min': -5000,\n# 'transducer': 'None',\n# 'prefilter': 'None'}\n\n# j = 0\n for i in self.files:\n if i[-3:] != 'xml' and i[-4:] != 'xysw':\n raw = np.loadtxt(self.args.input_path + i)\n self.physical_max.append(np.max(raw))\n self.physical_min.append(np.min(raw))\n \n \n signals.append(raw)\n# new_header = header.copy()\n# new_header['label'] = 'ch' + str(j)\n# new_header['physical_max'] = np.max(raw)\n# new_header['physical_min'] = np.min(raw)\n\n# j = j+1\n# headers.append(new_header)\n self.ch_num = self.ch_num+1\n \n #write edf\n with open(self.output_edf_original, 'w') as output:\n flag = pyedflib.highlevel.write_edf_quick(output.name, signals, self.sample_rate, digital=False)\n if flag == False:\n print('unable to save file into .edf')\n exit()\n else:\n print('txt data loaded into edf, edf saved at ./output_edf as: ' + self.output_edf_original)\n self.raw=mne.io.read_raw_edf(self.output_edf_original,preload=True)\n self.ch_names = self.raw.ch_names\n \n #if already a .edf\n elif self.args.input_format == 'bdf':\n self.raw = mne.io.read_raw_bdf(self.args.input_path + self.files[0], preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n elif self.args.input_format == 'edf':\n self.raw = mne.io.read_raw_edf(self.args.input_path + self.files[0], preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n elif self.args.input_format =='mne':\n mne_exp = mne.datasets.eegbci.load_data(1, 2, path=None, force_update=False, update_path=None, base_url='https://physionet.org/files/eegmmidb/1.0.0/', verbose=None)[0]\n self.raw = mne.io.read_raw_edf(mne_exp, preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n \n \n return self.raw",
"def createSTP(self, stp_filename, parameters):\n wordsize = parameters[\"wordsize\"]\n rounds = parameters[\"rounds\"]\n weight = parameters[\"sweight\"]\n\n with open(stp_filename, 'w') as stp_file:\n stp_file.write(\"% Input File for STP\\n% Salsa w={}\"\n \"rounds={}\\n\\n\\n\".format(wordsize, rounds))\n\n # Setup variables\n a = [\"a{}r{}\".format(j, i) for i in range(rounds + 1) for j in range(16)]\n b = [\"b{}r{}\".format(j, i) for i in range(rounds) for j in range(16)]\n w = [\"w{}r{}\".format(j, i) for i in range(rounds) for j in range(16)]\n\n stpcommands.setupVariables(stp_file, a, wordsize)\n stpcommands.setupVariables(stp_file, b, wordsize)\n stpcommands.setupVariables(stp_file, w, wordsize)\n\n # Ignore MSB\n stpcommands.setupWeightComputation(stp_file, weight, w, wordsize, 1)\n\n for rnd in range(rounds):\n if rnd % 2 != 0:\n #Rowround\n for row in range(4):\n a_in = [a[(i + row) % 4 + 4 * row + 16 * rnd] for i in range(4)]\n a_out = [a[(i + row) % 4 + 4 * row + 16 * (rnd + 1)] for i in range(4)]\n tmp_b = [b[i + 4 * row + 16 * rnd] for i in range(4)]\n tmp_w = [w[i + 4 * row + 16 * rnd] for i in range(4)]\n self.setupQuarterRound(stp_file, a_in, tmp_b, a_out, tmp_w, wordsize)\n else:\n #Columnround\n for col in range(4): \n a_in = [a[(i * 4 + 4 * col + col) % 16 + 16 * rnd] for i in range(4)]\n a_out = [a[(i * 4 + 4 * col + col) % 16 + 16 * (rnd + 1)] for i in range(4)]\n tmp_b = [b[i * 4 + col + 16 * rnd] for i in range(4)]\n tmp_w = [w[i * 4 + col + 16 * rnd] for i in range(4)]\n self.setupQuarterRound(stp_file, a_in, tmp_b, a_out, tmp_w, wordsize)\n\n # No all zero characteristic\n stpcommands.assertNonZero(stp_file, a, wordsize)\n\n for key, value in parameters[\"fixedVariables\"].items():\n stpcommands.assertVariableValue(stp_file, key, value)\n\n for char in parameters[\"blockedCharacteristics\"]:\n stpcommands.blockCharacteristic(stp_file, char, wordsize)\n\n stpcommands.setupQuery(stp_file)\n\n return",
"def _create_weights(self):\n\n self.mu_W = tf.get_variable(\n name=\"mu_W\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim, self.z_dim])\n\n self.mu_b = tf.get_variable(\n name=\"mu_b\", initializer=tf.random_normal_initializer(),\n shape=[self.z_dim])\n\n self.log_sig_sq_W = tf.get_variable(\n name=\"log_sig_sq_W\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim, self.z_dim])\n\n self.log_sig_sq_b = tf.get_variable(\n name=\"log_sig_sq_b\", initializer=tf.random_normal_initializer(),\n shape=[self.z_dim])\n \n self.y_W = tf.get_variable(\n name=\"y_W\", initializer=tf.random_normal_initializer(),\n shape=[self.z_dim, self.rnn_dim])\n\n self.y_b = tf.get_variable(\n name=\"y_b\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim])\n \n self.softmax_W = tf.get_variable(\n name=\"softmax_W\", initializer=tf.random_normal_initializer(),\n shape=[self.rnn_dim, self.vocabulary_size])\n \n self.softmax_b = tf.get_variable(\n name=\"softmax_b\", initializer=tf.random_normal_initializer(),\n shape=[self.vocabulary_size])",
"def setup_config_folder(self):\n\n config_dir = self.config_folder\n \n logging.info(\"using config folder: %s\" % (config_dir))\n if not os.path.isdir(config_dir):\n os.mkdir(config_dir)\n\n try:\n f = open(config_dir + self.wordlist_file,'r')\n except IOError:\n f = open(config_dir + self.wordlist_file,'a+')\n f.close()\n\n try:\n f = open(config_dir + self.score_file,'r')\n except IOError:\n f = open(config_dir + self.score_file,'a+')\n f.close()",
"def init_weights(self):\n\n super().init_weights()\n\n init_type = None if self.init_cfg is None else self.init_cfg.get(\n 'type', None)\n if init_type != 'Pretrained' and self.with_tsa:\n for module in [\n self.fusion.feat_fusion, self.fusion.spatial_attn1,\n self.fusion.spatial_attn2, self.fusion.spatial_attn3,\n self.fusion.spatial_attn4, self.fusion.spatial_attn_l1,\n self.fusion.spatial_attn_l2, self.fusion.spatial_attn_l3,\n self.fusion.spatial_attn_add1\n ]:\n kaiming_init(\n module.conv,\n a=0.1,\n mode='fan_out',\n nonlinearity='leaky_relu',\n bias=0,\n distribution='uniform')",
"def __init__(self):\n self.model = {'mol':[], 'nmol':0}\n self.template = {} \n self.config = {}\n self.config['tfile'] = 'gau-template-bsse.gjf'\n self.config['xyzfile'] = 'model.xyz'\n self.config['jobfile'] = 'gau.gjf'\n self.config['job_prefix'] = self.config['jobfile'].split(\".\")[0]\n self.config['incr'] = 1\n \n self.rd_cmd_stream()\n return",
"def _random_weight_initialization(self, mean=0, stddev=1):\n self.slp_weights = np.random.normal(mean, stddev, size=(self.n_rbfs, self.n_targets))",
"def _read_config(self):\n if not os.path.exists(self.config_file):\n raise Exception(\"Can't read the SGDM config file\")\n config = toml.load(self.config_file)\n\n self.initial_model = config[\"initial_model\"]\n self.alpha = config[\"alpha\"]\n self.beta = config[\"beta\"] # decay factor for first moments\n self.smoothing_timestep = config[\"smoothing_timestep\"]\n\n # Perturbation decay per iteration as a percentage of the relative\n # deviation to the initial model\n self.perturbation_decay = config[\"perturbation_decay\"]\n self.roughness_decay_type = config[\"roughness_decay_type\"]\n if self.roughness_decay_type not in [\"relative_perturbation\", \"absolute\"]:\n raise Exception(\n \"Roughness decay type should be either \"\n \"'relative_perturbation' or 'absolute'\"\n )\n self.update_smoothing_length = config[\"update_smoothing_length\"]\n self.roughness_decay_smoothing_length = config[\n \"roughness_decay_smoothing_length\"\n ]\n\n # Gradient scaling factor to avoid issues with floats, this should be constant throughout the inversion\n self.grad_scaling_fac = config[\"gradient_scaling_factor\"]\n # Regularization parameter to avoid dividing by zero\n if \"max_iterations\" in config.keys():\n self.max_iterations = config[\"max_iterations\"]\n else:\n self.max_iterations = None",
"def read_weights(F_DIST):\n global F_DIST_w1, w, num_features, N_IDEN_PROB, MORE_THAN_1_W\n\n # read weights file first line, get json information into a dict\n with open(WEIGHTS_FNAME, \"r\") as f:\n WEIGHTS_FDATA = json.loads(f.readline())\n # has keys LAYERS, J, NUM_FEATURES\n\n # get and verify data from the weights file\n N_IDEN_PROB = WEIGHTS_FDATA['LAYERS']\n MORE_THAN_1_W = (N_IDEN_PROB > 2) # more than 2 layers === more than 1 weight\n if J > WEIGHTS_FDATA['J']:\n raise RuntimeError(\"J in weights file is less than J provided to the \"\n \"script; former >= latter\")\n if WEIGHTS_FDATA['NUM_FEATURES'] != num_features:\n raise RuntimeError(\"num_features in weights file is not the same as \"\n \"num_features provided to this script\")\n\n # read weights file\n # no. of weights is 1 less than no. of layers in weights file\n list_of_w = ad.read_weights_file(\n WEIGHTS_FNAME, N_IDEN_PROB-1, WEIGHTS_FDATA['J'], J, num_features\n )\n list_of_w[-1] = np.expand_dims(list_of_w[-1], axis=2)\n\n # split w[0]; multiply the F_DIST portion of w[0] with F_DIST\n w1_for_fdist, w1_for_r = np.split(list_of_w[0], [num_features-1], axis=1)\n F_DIST_w1 = F_DIST.bmm(torch.from_numpy(w1_for_fdist))\n\n w['first_for_r'] = torch.from_numpy(w1_for_r)\n w['except_first'] = [torch.from_numpy(wi) for wi in list_of_w[1:]]",
"def generateSexConfig(outFile, configFile,\r\n paramsFile, inputModelsfile, weightImage, sexFile,\r\n matchRadius): \r\n f = open(configFile, 'w')\r\n\r\n \"\"\"\r\n sexConfig = dict(CATALOG_NAME='matched.cat', \r\n CATALOG_TYPE='ASCII_HEAD', \r\n PARAMETERS_NAME='assoc.param', \r\n DETECT_TYPE='CCD',\r\n DETECT_MINAREA='8.78733031674',\r\n DETECT_THRESH='1.25',\r\n ANALYSIS_THRESH='1.25',\r\n FILTER='Y',\r\n FILTER_NAME=SExroot+'tophat_3.0_3x3.conv',\r\n DEBLEND_NTHRESH='64',\r\n DEBLEND_MINCONT='0.0002',\r\n CLEAN='Y',\r\n CLEAN_PARAM='1.',\r\n MASK_TYPE='CORRECT',\r\n PHOT_APERTURES='14.0',\r\n PHOT_AUTOPARAMS='2.5,3.5',\r\n SATUR_LEVEL='50000.0',\r\n MAG_ZEROPOINT='31.2671969775',\r\n MAG_GAMMA='4.0',\r\n GAIN='13.4213208113',\r\n PIXEL_SCALE='0.221',\r\n SEEING_FWHM='1.060202',\r\n STARNNW_NAME=SExroot+'default.nnw',\r\n BACK_SIZE='64',\r\n BACK_FILTERSIZE='3',\r\n BACKPHOTO_TYPE='LOCAL',\r\n BACKPHOTO_THICK='102',\r\n MEMORY_OBJSTACK='15000',\r\n MEMORY_PIXSTACK='2600000',\r\n MEMORY_BUFSIZE='4600',\r\n ASSOC_NAME='input_model.coo',\r\n ASSOC_PARAMS='1,2',\r\n ASSOC_RADIUS='5.0',\r\n ASSOCSELEC_TYPE='MATCHED',\r\n ASSOC_TYPE='NEAREST',\r\n ASSOC_DATA='1,2,3,4,5',\r\n VERBOSE_TYPE='NORMAL',\r\n #WEIGHT_GAIN='Y',\r\n WEIGHT_IMAGE='/home/walter/Dropbox/Completeness/f02p01_F814W_2.swp.weight.fits',\r\n WEIGHT_TYPE='MAP_WEIGHT'\r\n )\r\n \"\"\"\r\n\r\n sexdata = open(sexFile, \"r\")\r\n \r\n sexdict = {l.split()[0] : l.split()[1] for l in sexdata.readlines() if l[0] != \" \"}\r\n sexdata.close()\r\n \r\n sexConfig = sexdict\r\n\r\n #... then change the information with some input, image-specific, info\r\n sexConfig['CATALOG_NAME']=outFile\r\n sexConfig['PARAMETERS_NAME']=paramsFile\r\n sexConfig['ASSOC_RADIUS']=str(matchRadius)\r\n sexConfig['ASSOC_NAME']=inputModelsfile\r\n aperture = float(sexConfig['SEEING_FWHM'])/float(sexConfig['PIXEL_SCALE'])\r\n sexConfig['PHOT_APERTURES']=str(aperture)+','+str(2*aperture)+','+str(3*aperture)\r\n sexConfig['WEIGHT_IMAGE']=str(weightImage)\r\n sexConfig['FILTER_NAME']=SExroot+sexConfig['FILTER_NAME']\r\n sexConfig['STARNNW_NAME']=SExroot+sexConfig['STARNNW_NAME']\r\n sexConfig['ASSOC_PARAMS']='1,2'\r\n sexConfig['ASSOCSELEC_TYPE']='MATCHED'\r\n sexConfig['ASSOC_TYPE']='NEAREST'\r\n sexConfig['ASSOC_DATA']='1,2,3,4,5'\r\n \r\n #Now write them out in a config file\r\n for key, value in sexConfig.iteritems():\r\n f.write(key+' '+value+'\\n')\r\n f.close()",
"def dump_config_and_makefile(_config):\n experiment_dir = Path(_config['trainer']['storage_dir'])\n makefile_path = Path(experiment_dir) / \"Makefile\"\n\n if not makefile_path.exists():\n from padertorch.contrib.examples.source_separation.tasnet.templates import \\\n MAKEFILE_TEMPLATE_TRAIN\n\n config_path = experiment_dir / \"config.json\"\n pt.io.dump_config(_config, config_path)\n\n makefile_path.write_text(\n MAKEFILE_TEMPLATE_TRAIN.format(\n main_python_path=pt.configurable.resolve_main_python_path(),\n experiment_name=experiment_name,\n eval_python_path=('.'.join(\n pt.configurable.resolve_main_python_path().split('.')[:-1]\n ) + '.evaluate')\n )\n )",
"def setup_training(args: argparse.Namespace) -> None:\n # 1. Read hyperparameters from file\n hp = HParams.from_yaml(args.path_config)\n # check if GPU available and add it to parameters\n hp[\"device\"] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # 2. Create extension of the architecture of the model and timestamp for this run (use to\n # identify folders and files created for this run)\n # format: f(params_file)_t(n_tiers)_l(n_layers)_hd(hidden_size)_gmm(gmm_size).\n extension_architecture = f\"d{hp.name}_t{hp.network.n_tiers}_\" \\\n f\"l{'.'.join(map(str, hp.network.layers))}_\" \\\n f\"hd{hp.network.hidden_size}_gmm{hp.network.gmm_size}\"\n timestamp = f\"{datetime.now().strftime('%Y%m%d-%H%M%S')}\"\n\n # 3 Create directories for saving logs and model weights if they do not exist\n # 3.1 Create model weights directory for this run (the same directory will be used for different\n # runs of a model with same architecture and the difference will be in the file stored)\n hp[\"training\"][\"dir_chkpt\"] = hp.training.dir_chkpt + extension_architecture\n Path(hp.training.dir_chkpt).mkdir(parents=True, exist_ok=True)\n # 3.2 Create general log directory for this run (the same directory will be used for different\n # runs of a model with same architecture and the difference will be in the file stored)\n hp[\"logging\"][\"dir_log\"] = hp.logging.dir_log + extension_architecture\n Path(hp.logging.dir_log).mkdir(parents=True, exist_ok=True)\n\n # 4. Setup general logging (it will use the folder previously created and the filename will be:\n tier = str(args.tier) if args.tier is not None else 'ALL'\n filename = f\"{hp.logging.dir_log}/tier{tier}_{timestamp}\"\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s',\n handlers=[\n logging.FileHandler(filename=filename), # handler to save the log to a file\n logging.StreamHandler() # handler to output the log to the terminal\n ])\n logger = logging.getLogger()\n\n # 5. Show device that will be used for training: CPU or GPU\n logger.info(f\"Device for training: {hp.device}\")\n\n # 6. Start training of the model (or a single tier, depending on args)\n train_model(args, hp, extension_architecture, timestamp, logger)",
"def write_WF(WF, file_name, sample_rate):\n create_directory_for_file_if_needed(file_name)\n sf.write(file_name, WF, sample_rate)",
"def build_from_configuration_parameters(cls, config_filename, args=None):\n\n # XXX: in case the user doesn't specify a name,\n # this will help auto-generate unique ones in a sequence.\n global __scale_client_n_anon_apps_added__\n __scale_client_n_anon_apps_added__ = 0\n\n if config_filename is None and args is None:\n raise ValueError(\"can't build from configuration parameters when both filename and args are None!\")\n\n # Dummy config dict in case no config file\n cfg = {'eventsinks': {}, 'sensors': {}, 'applications': {}, 'networks': {}}\n\n if config_filename is not None:\n try:\n cfg = cls.load_configuration_file(config_filename)\n # log.debug(\"Final configuration: %s\" % cfg)\n except IOError as e:\n log.error(\"Error reading config file: %s\" % e)\n exit(1)\n\n def __make_event_sink(_class, broker, event_reporter, **config):\n res = _class(broker, **config)\n event_reporter.add_sink(res)\n return res\n\n ### BEGIN ACTUAL CONFIG FILE USAGE\n # We call appropriate handlers for each section in the appropriate order,\n # starting by getting any relevant command line parameters to create the client.\n\n client = cls(quit_time=args.quit_time, raise_errors=args.raise_errors)\n\n # TODO: include command line arguments when some are added\n if 'main' in cfg:\n client.setup_broker(cfg['main'])\n client.setup_reporter(cfg['main'])\n else: # use defaults\n client.setup_broker({})\n client.setup_reporter({})\n\n # These components are all handled almost identically.\n\n # EventSinks\n configs = cls.__join_configs_with_args(cfg.get('eventsinks', {}), args.event_sinks \\\n if args is not None and args.event_sinks is not None else [])\n client.setup_components(configs, 'scale_client.event_sinks', __make_event_sink, client.__reporter)\n\n # Set defaults if none were made\n if len(client.__reporter.get_sinks()) == 0:\n log.info(\"No event_sinks loaded: adding default LogEventSink\")\n LogEventSink = None\n try:\n from ..event_sinks.log_event_sink import LogEventSink\n except ValueError:\n # relative import error when this script called directly (isn't a package)\n try:\n from scale_client.event_sinks.log_event_sink import LogEventSink\n except ImportError as e:\n log.error(\"can't import LogEventSink! Error: %s\" % e)\n exit(1)\n default_sink = LogEventSink(client.__broker)\n client.__reporter.add_sink(default_sink)\n\n # Sensors\n log.info(\"Setting up Sensors...\")\n configs = cls.__join_configs_with_args(cfg.get('sensors', {}), args.sensors \\\n if args is not None and args.sensors is not None else [], \"anon_vs\")\n client.setup_sensors(configs)\n\n # Networks\n log.info(\"Setting up Networks...\")\n configs = cls.__join_configs_with_args(cfg.get('networks', {}), args.networks \\\n if args is not None and args.networks is not None else [], \"anon_network_app\")\n client.setup_networks(configs)\n\n # Applications\n log.info(\"Setting up other Applications...\")\n configs = cls.__join_configs_with_args(cfg.get('applications', {}), args.applications \\\n if args is not None and args.applications is not None else [])\n client.setup_applications(configs)\n\n # TODO: set some defaults if no applications, sensors, or networking components are enabled (heartbeat?)\n\n return client",
"def create_config():\n config = configparser.ConfigParser()\n section = 'Settings'\n config.add_section(section)\n config.set(section, 'font', 'Courier')\n config.set(section, 'font_size', '10')\n config.set(section, 'font_style', 'normal')\n # Interpolation\n config.set(section, 'font_info', \"You are using %(font)s at %(font_size)s pt\")\n\n with open(path, 'w') as config_file:\n config.write(config_file)",
"def generate_setups(self,filename=DEFAULT_FILENAME):\n \n self._create_main_shape()\n self._create_margin_shape()\n\n for section, setup in self.setups.iteritems():\n self._generate_section_structures(setup['distance'],\n setup['radius'],\n setup['structure'],\n section)\n self.write(filename)"
] | [
"0.5445772",
"0.52090114",
"0.5197359",
"0.51935154",
"0.51659477",
"0.51528746",
"0.5152159",
"0.50725466",
"0.50518197",
"0.5050541",
"0.5045867",
"0.5042122",
"0.50095546",
"0.500033",
"0.4996364",
"0.49739787",
"0.49723807",
"0.49618277",
"0.49463946",
"0.49391994",
"0.49358803",
"0.49311003",
"0.4925141",
"0.4922999",
"0.49204135",
"0.49162343",
"0.49098",
"0.49094266",
"0.48952347",
"0.4888557"
] | 0.6615563 | 0 |
Constructs the SAM model filename from the rest of the config. | def get_model_filename(config):
base = os.path.splitext(config['corpus'])[0]
return '%s--%dT.model' % (base, config['T']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _setupFilename(self):\n try:\n os.mkdir('./.netModel')\n except:\n pass # hope it's already there...\n filenames = os.listdir('./.netModel')\n configNum = 1\n i = 0\n configNumString = '%(c)04d' % {'c':configNum}\n while i < len(filenames):\n configNumString = '%(c)04d' % {'c':configNum}\n if (filenames[i][:4]==configNumString):\n configNum += 1\n i = 0\n else:\n i += 1\n return os.path.realpath('.')+'/.netModel/'+configNumString",
"def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name",
"def filename(self):\n return self.config.get('filename', self.id) + f'_{self.file_suffix}'",
"def generateModelFilename(args, type):\n opt = []\n if args.letters:\n opt.append('l')\n if args.symbols:\n opt.append('s')\n if args.digits:\n opt.append('d')\n opt.sort()\n return \"models/model_{0}_{1}.yml\".format(type, ''.join(opt))",
"def GetModelName(filename, model):\n\n is_srn_model = translator.IsSrnModel(model)\n if(is_srn_model):\n model_name = filename + \"SrnModel\"\n else:\n model_name = filename + \"CellCycleModel\"\n\n return model_name",
"def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)",
"def modelconfigfile(modelfile):\n return os.path.splitext(modelfile)[0] + '.vars'",
"def extractFileName(fileType, modelName, modelVersion, modelState):\n fileName = '{}_{}_{}'.format(modelName, modelVersion, fileType) if modelState == 'national' else '{}_{}_{}_{}'.format(modelName, modelVersion, modelState, fileType)\n return fileName",
"def configFilename(self):\n return self.name()+'.py'",
"def construct_name_file(size_sample, randomness, pos_equal_neg, kernel):\n if randomness:\n randomness = \"rand\"\n else:\n randomness = \"nrand\"\n\n if pos_equal_neg:\n pos_equal_neg = \"pos-neg-eq\"\n else:\n pos_equal_neg = \"pos-neg-neq\"\n\n return \"{}_{}_{}_{}.json\".format(size_sample, randomness, pos_equal_neg, kernel)",
"def file_name(product, ext='json'):\n return f\"./output/{product}_{datetime.now().strftime('%Y-%m-%d_%H%M%S')}_transformed_{version}.{ext}\"",
"def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)",
"def generate_raw_filename(self, source_name, table_name, environment, seq_number, upload_time, load_type,\n file_format):\n file_date = upload_time.strftime(\n \"%Y-%m-%d-%H-%M-%S-%f\")[:-3] # [:-3] => Removing the 3 last characters as %f is for millis.\n res = f'{source_name}/{source_name}_{table_name}/' \\\n f'{source_name}_{environment}_{table_name}_{str(seq_number).zfill(3)}_' \\\n f'{file_date}_utc_{load_type}.{file_format}'\n res = res.lower()\n\n # Check if no illegal chars were passed\n #test = FileNameStandardConvention(res)\n #test.check_naming_convention()\n return res",
"def generate_filename(self, evla_conf, subband):\n\n node = os.uname()[1]\n node_idx = node.split('-')[-1] # Assumes cbe-node-XX naming\n\n # This is the old pulsar version:\n #self.data_dir = \"/lustre/evla/pulsar/data\"\n #self.outfile_base = \"%s.%s.%s.%s\" % (evla_conf.source,\n # evla_conf.projid, evla_conf.seq, node)\n\n # New version, 'normal' VLA data sets (SDM+BDF) are stored\n # using datasetId as the main folder name. Store here using\n # node-specific subdirs because there are lots of files..\n # Could make a subdir for each datasetId..\n self.data_dir = \"/lustre/evla/pulsar/data/%s\" % node\n #self.outfile_base = \"%s.%d.%s.%s\" % (evla_conf.datasetId,\n # int(evla_conf.seq),evla_conf.source,node_idx)\n #self.outfile_base = \"%s.%d.%s.%s-%02d\" % (evla_conf.datasetId,\n # int(evla_conf.seq), evla_conf.source,\n # subband.IFid, subband.swIndex-1)\n # New-new version, use scan+subscan number rather than seq number, \n # remove source name from filename\n self.outfile_base = \"%s.%d.%d.%s-%02d\" % (evla_conf.datasetId,\n evla_conf.scanNo, evla_conf.subscanNo,\n subband.IFid, subband.swIndex-1)",
"def _get_station_filename():\n output_dir = os.path.join(output, state, station)\n if not os.path.isdir(output_dir):\n logger.debug(\"Creating directory %s\", output_dir)\n os.makedirs(output_dir)\n return os.path.join(output_dir, \"%s.%s\" % (c_time, format))",
"def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))",
"def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))",
"def hap_filename(self, filetype):\n if filetype == 'events':\n return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)\n # return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'aeff':\n return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'edisp':\n return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)\n elif filetype == 'psf_3gauss':\n return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)\n else:\n raise ValueError('Invalid {} {}'.format(filetype))",
"def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))",
"def build_model_name(cls, name='modelName', output_name='output'):\n obj = cls(name)\n obj.exporter = 'generate_model_name'\n obj.output_name = output_name\n return obj",
"def config_identifier(converter, model_name):\n return model_name.lower().replace('-', '_') + '_' + converter",
"def _gen_basename(param_dict, clargs):\n if param_dict['output_basename'] in ['', 'auto']:\n return clargs.input_fname.lower().split('.json')[0]\n\n else:\n return param_dict['output_basename']",
"def _make_fname(song, ext=None, av=None, subdir=None):\n # pylint: disable=E1103\n # Instance of 'bool' has no 'extension' member (some types not inferable)\n ddir = os.path.join(Config.DDIR.get, subdir) if subdir else Config.DDIR.get\n if not os.path.exists(ddir):\n os.makedirs(ddir)\n\n if ext:\n extension = ext\n\n else:\n stream = streams.select(streams.get(song),\n audio=av == \"audio\", m4a_ok=True)\n extension = stream['ext']\n\n # filename = song.title[:59] + \".\" + extension\n filename = song.title + \".\" + extension\n filename = os.path.join(ddir, mswinfn(filename.replace(\"/\", \"-\")))\n filename = filename.replace('\"', '')\n return filename",
"def model_1_0(*filename):\n return os.path.join(check.MODELS_1_0_DIR, *filename)",
"def filename(self):\n # Just the name of the file\n filename = self.use_name\n if self.extension:\n filename = \"{0}.{1}\".format(self.use_name, self.extension)\n # Architecture sub-folder\n arch_folder_conf = spack.config.get(\"modules:%s:arch_folder\" % self.conf.name, True)\n if arch_folder_conf:\n # include an arch specific folder between root and filename\n arch_folder = str(self.spec.architecture)\n filename = os.path.join(arch_folder, filename)\n # Return the absolute path\n return os.path.join(self.dirname(), filename)",
"def get_model_name(file_path_model):\n\n tmp = parse_file_path(file_path_model)[1]\n model_name = tmp[:len(tmp) - len('.h5')]\n\n return model_name",
"def MakeFilename(filename, filetype, configmapping):\n config_version = config_friendlyname = config_hostname = device_hostname = ''\n\n if 'version' in configmapping:\n config_version = GetVersionStr( int(str(configmapping['version']), 0) )\n if 'friendlyname' in configmapping:\n config_friendlyname = re.sub('_{2,}', '_', re.sub('[^0-9a-zA-Z]','_', str(configmapping['friendlyname'][0])).strip('_'))\n if 'hostname' in configmapping:\n if str(configmapping['hostname']).find('%') < 0:\n config_hostname = re.sub('_{2,}', '_', re.sub('[^0-9a-zA-Z]','_', str(configmapping['hostname'])).strip('_'))\n if filename.find('@H') >= 0 and args.device is not None:\n device_hostname = GetTasmotaHostname(args.device, args.port, username=args.username, password=args.password)\n if device_hostname is None:\n device_hostname = ''\n\n dirname = basename = ext = ''\n\n # split file parts\n dirname = os.path.normpath(os.path.dirname(filename))\n basename = os.path.basename(filename)\n name, ext = os.path.splitext(basename)\n\n # make a valid filename\n try:\n name = name.translate(dict((ord(char), None) for char in r'\\/*?:\"<>|'))\n except:\n pass\n name = name.replace(' ','_')\n\n # append extension based on filetype if not given\n if len(ext) and ext[0]=='.':\n ext = ext[1:]\n if filetype is not None and args.extension and (len(ext)<2 or all(c.isdigit() for c in ext)):\n ext = filetype.lower()\n\n # join filename + extension\n if len(ext):\n name_ext = name+'.'+ext\n else:\n name_ext = name\n\n # join path and filename\n try:\n filename = os.path.join(dirname, name_ext)\n except:\n pass\n\n filename = filename.replace('@v', config_version)\n filename = filename.replace('@f', config_friendlyname )\n filename = filename.replace('@h', config_hostname )\n filename = filename.replace('@H', device_hostname )\n\n return filename",
"def generate_model_name(self):\n now = datetime.datetime.now()\n name = '%s_%s_%s_%s_%s_%s' % (now.day, now.month, 'rnn', self._controller_type, self._num_layers, self._layer_size)\n if self._dropout > 0:\n name += '_dropout'\n\n return name",
"def _make_config_file_name(environment, out=False):\n return os.path.join(PH_HOME_DIR, \"etc/config\", \"%s.conf\" % environment) if out else \\\n os.path.join(PH_HOME_DIR, \"config\", \"%s.conf.in\" % environment)",
"def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\""
] | [
"0.7139478",
"0.6578594",
"0.65595764",
"0.65387714",
"0.6311835",
"0.62951124",
"0.6235641",
"0.6173565",
"0.61575484",
"0.61424035",
"0.6047662",
"0.6041903",
"0.602598",
"0.60108405",
"0.6004139",
"0.5995963",
"0.5995963",
"0.5995963",
"0.5995401",
"0.5993016",
"0.5983836",
"0.5945221",
"0.59410566",
"0.59159434",
"0.5898361",
"0.58638996",
"0.58638364",
"0.5850926",
"0.5825285",
"0.5821515"
] | 0.7508373 | 0 |
walk over files in provided directory and return a list of files | def walk_directory(self, path):
files = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
files.append(os.path.join(dirpath, filename))
return files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_all_files(in_dir):\n\n for dirname, dirs, files in os.walk(in_dir):\n for filename in files:\n yield op.join(dirname, filename)",
"def get_all_files(directory):\r\n for dirpath, _dirnames, filenames in os.walk(directory):\r\n for filename in filenames:\r\n yield (filename, dirpath)",
"def getFilePaths(directory):\r\n\tfor folder, subs, files in os.walk(directory):\r\n\t\tfor filename in files:\r\n\t\t\tyield os.path.join(folder, filename)",
"def _get_all_files(dir_path):\n for root, _, filenames in os.walk(dir_path):\n for name in filenames:\n target = os.path.join(root, name)\n yield target",
"def listFiles(root):\n for dirpath, dirnames, filenames in os.walk(root):\n for file in filenames:\n yield os.path.join(dirpath, file)",
"def walkdir(self, folder):\n for dirpath, dirs, files in os.walk(folder):\n for filename in files:\n yield os.path.abspath(os.path.join(dirpath, filename))",
"def getFiles(dir):\n # dig looking for files\n a= os.walk(dir)\n b = True\n filenames = []\n \n while (b):\n try:\n (dirpath, dirnames, files) = a.next()\n filenames.append([dirpath, tuple(files)])\n except:\n b = False\n return filenames",
"def list_files_into_directory(directory_path: str) -> [str]:\n for root, directory_names, file_names in walk(directory_path):\n return file_names",
"def getFiles(dir):\n # dig looking for files\n a = os.walk(dir)\n b = True\n filenames = []\n \n while b:\n try:\n (dirpath, dirnames, files) = a.next()\n filenames.append([dirpath, tuple(files)])\n except:\n b = False\n return filenames",
"def get_files(dir: str) -> List[str]:\n ret = []\n for root, dirs, files in os.walk(dir):\n for name in dirs:\n ret.extend(get_files(os.path.join(root, name)))\n for name in files:\n ret.append(os.path.join(root, name))\n return ret",
"def find_files(d):\n for root, dirs, files in os.walk(d):\n for f in files:\n yield path.abspath(path.join(root, f))",
"def getListOfFiles(directory):\n listOfFiles = []\n for path, dirs, files in os.walk(directory):\n for eachFile in files:\n filePath = os.path.join(path, eachFile)\n listOfFiles.append(filePath)\n return listOfFiles",
"def walk(rootdir):\n flist = []\n for root, dirs, files in os.walk(rootdir):\n flist = flist + [os.path.join(root, x) for x in files]\n return flist",
"def walkdir(folder):\n for dirpath, dirs, files in os.walk(folder):\n for filename in files:\n yield os.path.abspath(os.path.join(dirpath, filename))",
"def get_all_files_walk(folder):\n files = []\n for root, dirs, filenames in os.walk(folder):\n files.extend(os.path.join(root, f) for f in filenames)\n return files",
"def get_files_in_dir(path):\n return [os.path.join(dir_name, file)\n for dir_name, subdirs, files in os.walk(path)\n for file in files]",
"def list_all_files(dir):\n\n result = []\n for root, _, filenames in os.walk(dir):\n for name in filenames:\n filename, ext = os.path.splitext(name)\n if ext == '.cs' or ext == '.xaml':\n result.append(os.path.join(root, name))\n return result",
"def files_in_dir(path):\n return os.listdir(path)",
"def all_files_under(path):\r\n for cur_path, dirnames, filenames in os.walk(path):\r\n for filename in filenames:\r\n yield os.path.join(cur_path, filename)",
"def _iter_files_in_dir(directory):\n for filename in os.listdir(directory):\n filepath = os.path.join(directory, filename)\n if os.path.isfile(filepath):\n yield filepath",
"def list_files(startpath):\n for root, _, files in os.walk(startpath):\n for f in files:\n yield os.path.join(root, f)",
"def get_files_in_dir(dir_path: str) -> List[FileInfo]:\n dir_walk_items = os.walk(dir_path)\n\n all_files = []\n for dir_walk_item in dir_walk_items:\n path_to_dir = dir_walk_item[0]\n file_names = dir_walk_item[2]\n for file_name in file_names:\n if file_name not in IGNORED_FILES:\n all_files.append(\n FileInfo.create(path_to_dir, file_name)\n )\n\n return all_files",
"def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False):\n for root, _, file_names in walk(dir_pathname,\n recursive, topdown, followlinks):\n for file_name in file_names:\n yield absolute_path(os.path.join(root, file_name))",
"def gen_recursive_filelist(d):\n \n for root, directories, files in os.walk(d):\n for file in files:\n yield os.path.join(root, file)",
"def get_files_list(tree):\n result = list()\n for (dir_path, _, file_names) in walk(tree):\n if file_names:\n for file in file_names:\n if file.lower().endswith(('.png', '.jpg', '.jpeg')):\n result.append(path.join(dir_path, file))\n\n return result",
"def file_list(start_dir):\n file_list = []\n for root, dirs, files in os.walk(start_dir):\n for f in files:\n if f[0] != '.':\n file_list.append(f)\n return file_list",
"def find_all_files(path):\n for root, dirs, files in os.walk(os.path.join(path)):\n for filename in files:\n yield os.path.join(root, filename)",
"def _list_files(folder, pattern):\n for root, folders, files in os.walk(folder):\n for filename in files:\n if fnmatch.fnmatch(filename, pattern):\n yield os.path.join(root, filename)",
"def _list_files(folder, pattern):\n for root, folders, files in os.walk(folder):\n for filename in files:\n if fnmatch.fnmatch(filename, pattern):\n yield os.path.join(root, filename)",
"def get_files_in_dir(dir: str):\n\n file_list = [f\"{dir}/{file}\" for file in listdir(dir) if isfile(join(dir, file))]\n\n return file_list"
] | [
"0.7925665",
"0.78335404",
"0.7786359",
"0.7776031",
"0.7694388",
"0.76942396",
"0.7658098",
"0.7648747",
"0.75843465",
"0.757489",
"0.75511634",
"0.7523276",
"0.75155026",
"0.75122464",
"0.7501486",
"0.74656945",
"0.74481636",
"0.74200726",
"0.7414121",
"0.74086905",
"0.73910105",
"0.7383427",
"0.7351662",
"0.7347721",
"0.7333475",
"0.7310299",
"0.7285262",
"0.7278769",
"0.7278769",
"0.72738665"
] | 0.7963085 | 0 |
Check whether or not a string is a valid Roman numeral. | def is_roman_numeral(s: str) -> bool:
if not isinstance(s, str):
raise TypeError("Only strings may be tested ")
return bool(_romanNumeralPattern.match(s)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fromRoman(s):\n pass",
"def fromRoman(s):\n if not s:\n raise InvalidRomanNumeralError, 'Input can not be blank'\n if not romanNumeralPattern.search(s):\n raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s\n\n result = 0\n index = 0\n for numeral, integer in romanNumeralMap:\n while s[index:index+len(numeral)] == numeral:\n result += integer\n index += len(numeral)\n return result",
"def from_roman(s: str) -> Integral:\n if not isinstance(s, str):\n raise TypeError(\"The argument to from_roman must be a string.\")\n if not _romanNumeralPattern.search(s):\n raise InvalidRomanNumeralError(f\"Invalid Roman numeral: {s}\")\n\n result = 0\n index = 0\n for numeral, integer in _romanNumeralMap:\n while s[index : index + len(numeral)] == numeral:\n result += integer\n index += len(numeral)\n return result",
"def toRoman(n):\n pass",
"def roman_to_int(roman_string):\n\n NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])\n roman_string = roman_string.upper()\n if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:\n raise ValueError('{0} does not seem to be a roman numeral'.format(\n roman_string))\n i = result = 0\n for integer, numeral in NUMERAL_MAP:\n while roman_string[i:i + len(numeral)] == numeral:\n result += integer\n i += len(numeral)\n if result < 1:\n raise ValueError('Can not interpret Roman Numeral {0}'.format(roman_string))\n return result",
"def decToRoman(numStr):\n try:\n n = int(numStr)\n if n >= 4000:\n return 'Error!'\n romans = [\n (1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'),\n (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'),\n (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'),\n (1, 'I')\n ]\n result = ''\n for value, letters in romans:\n while n >= value:\n result += letters\n n -= value\n return result\n except:\n result = 'Error!'\n return result",
"def roman_numerals(text):\n return re.findall(r\"\\b([IVXLCDM]+)\\b\", text)",
"def validate(input):\n regex = re.compile(r'(UL)?\\d{1,' + re.escape(str(barcode_digit_length)) + '}$', flags=re.IGNORECASE)\n if regex.match(input):\n is_valid = True\n else:\n is_valid = False\n return is_valid",
"def roman_number(value):\n try:\n value = to_roman(value)\n except RomanError as e:\n raise TemplateSyntaxError(\"roman_number error: %s\" % str(e))\n return value",
"def romanify(num):\n result = \"\"\n return result",
"def isDec(string):\n return (True)",
"def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False",
"def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]",
"def validate_NRIC(nric):\n\tif len(nric) != 9: # invalid length\n\t\treturn \"Invalid length (must be exactly 9 characters, was given %d.)\" % len(\n\t\t nric)\n\n\t# Constants\n\tNRIC_ID = nric[0]\n\tLAST_LETTER = nric[-1]\n\tNUMBERS = nric[1:-1]\n\n\tif not match(r'[STFG]', nric):\n\t\t# First letter is not S, T, F or G\n\t\treturn \"Invalid NRIC ID: %s\" % NRIC_ID\n\n\t# The NRIC first and last letters should be a letter, the middle should\n\t# be all numbers (7 numbers exactly)\n\tif match(r'[STFG][0-9]+[A-Z]', nric) is None:\n\t\treturn \"Invalid format: %s\" % nric\n\n\tchecksum = calculate_checksum(NRIC_ID, NUMBERS)\n\tlast_letter_value = checksum % 11\n\tif last_letter_value == get_value(LAST_LETTER, NRIC_ID):\n\t\treturn \"Okay.\"\n\telse:\n\t\treturn \"Invalid NRIC, last letter must be %s.\" % get_letter(\n\t\t last_letter_value, NRIC_ID)",
"def testFromRomanKnownValues(self):\n for integer, numeral in self.knownValues:\n result = roman.fromRoman(numeral)\n self.assertEqual(integer, result)",
"def validateFormat(barcode):\r\n validatesymbol = 0\r\n delimitedsymbol = 0\r\n if barcode[0] == '' or barcode[-1] == '':\r\n validatesymbol += 1\r\n for i in range(len(barcode)):\r\n try:\r\n int(barcode[i])\r\n except ValueError:\r\n if barcode[i] == '-':\r\n delimitedsymbol += 1\r\n else:\r\n validatesymbol += 1\r\n if delimitedsymbol == 0 and validatesymbol == 0:\r\n if len(barcode) == 12 or len(barcode) == 13:\r\n pass\r\n else:\r\n validatesymbol += 1\r\n if validatesymbol == 0:\r\n return True\r\n else:\r\n return False",
"def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False",
"def isAlphaNumeric(string):\n return (True)",
"def formatRomanNumeral(rn, key):\n # Something of \"I\" and \"I\" of something\n if rn == \"I/I\":\n rn = \"I\"\n return rn",
"def roman2int(s):\n if not s or not isinstance(s, str):\n raise InputError(s, \"Input value must be a non-empty string.\")\n elif __roman_numeral_regex.search(s) is None:\n raise InputError(s, \"Input is not a valid Roman numeral representation of numbers in the 1-4999 range.\")\n\n return sum([__bmap[i] if __bmap[i] >= __bmap[j] else -__bmap[i] for i, j in zip(s, s[1:])]) + __bmap[s[-1]]",
"def ascii_numeric(s: str) -> bool:\n return frozenset(s).issubset(_ascii_n)",
"def roman_numerals_decoder(roman):\n roman_numerals = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n result = 0\n for i, c in enumerate(roman):\n if (i + 1) == len(roman) or roman_numerals[c] >= roman_numerals[roman[i + 1]]:\n result += roman_numerals[c]\n else:\n result -= roman_numerals[c]\n return result",
"def romanize(string: str, locale: t.Union[Locale, str]) -> str:\n locale = validate_locale(locale)\n\n if locale not in (Locale.RU, Locale.UK, Locale.KK):\n raise ValueError(f\"Romanization is not available for: {locale}\")\n\n table = _get_translation_table(locale)\n\n return string.translate(table)",
"def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True",
"def is_rna(string):\r\n RNA = ['A','U','G','C']\r\n return False if False in [ str in RNA for str in string] else True",
"def is_numeral(self, symbol: str) -> bool:\n return symbol in self.numerals",
"def is_nine_pandigital(number):\n digits = str(number)\n return bool(len(digits) == len(ALL_NINE) and set(digits) == ALL_NINE)",
"def teststring(self):\n self.assertRaises(palindrome.NotStringError,palindrome.palin, 4)",
"def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)",
"def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])"
] | [
"0.6835023",
"0.64675707",
"0.61907345",
"0.6095126",
"0.607554",
"0.59310186",
"0.58298",
"0.5818829",
"0.58053446",
"0.5773457",
"0.5767107",
"0.5709555",
"0.56931996",
"0.56590647",
"0.56424683",
"0.5632415",
"0.5631892",
"0.56194246",
"0.5570805",
"0.5566121",
"0.55595434",
"0.5546257",
"0.5535388",
"0.54501337",
"0.54325515",
"0.5431252",
"0.5404765",
"0.53814816",
"0.5360989",
"0.5342889"
] | 0.8254919 | 0 |
search for user in the ban list | async def banlist(self, ctx, *, username=None):
bans = await ctx.guild.bans()
list_of_matched_users = []
for ban in bans:
if username is None or username.lower() in ban.user.name.lower():
list_of_matched_users.append(ban)
entries = []
for ban in list_of_matched_users:
entries.append((f"{ban.user.name}#{ban.user.discriminator}", f"<@!{ban.user.id}>: {ban.reason}"))
text_pages = paginator.FieldPages(ctx, entries=entries)
await text_pages.paginate() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_by_user_name(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return user",
"def search_user(message, search):\n found = []\n search = search.lower()\n users = hf.get_users()\n for user in users:\n if search in user['name'].lower():\n found.append('{} ({})'.format(user['name'], user[\"id\"]))\n if len(found) == 0:\n message.reply('No user found by that key: {}.'.format(search))\n return\n message.reply('Users found: {}'.format(', '.join(found)))",
"def search_user(message, search):\n found = []\n search = search.lower()\n for userid, user in iteritems(message._client.users):\n if search in user['name'].lower():\n found.append('{} ({})'.format(user['name'], userid))\n if len(found) == 0:\n message.reply('No user found by that key: {}.'.format(search))\n return\n message.reply('Users found: {}'.format(', '.join(found)))",
"def __searchUser(self, args = []):\n\n try:\n if len(args) == 0:\n self.__cm.send(p.T_QUERY, '')\n else:\n self.__cm.send(p.T_QUERY, args)\n\n reply = self.__cm.receive()\n\n if (reply is not None and reply.type == p.T_RESULT):\n [ self.__parseUserRecord(r) for r in reply.payload.split() ] \n self.__agent.printList(self.__userList)\n else:\n raise Exception, \"An error occured while fetching user data! The user list is outdated.\"\n \n except Exception, e:\n self.__handleError('List', e)",
"def get_user_list(self):\n self.user_list = db.get_user_list()\n for each in self.user_list:\n print each[1] # username\n while(True):\n selection = raw_input(\"Enter username to use\")\n if selection in self.user_list:\n return selection",
"async def bans(self, ctx):\n try:\n bans = await self.bot.get_bans(ctx.message.server)\n except discord.Forbidden:\n await self.bot.say('I do not have the proper permissions')\n except discord.HTTPException:\n await self.bot.say('Getting bans failed')\n else:\n await self.bot.say('\\N{SMALL ORANGE DIAMOND}'.join(user.name for user in bans))",
"def list_users():\n\n search = request.args.get('q')\n \n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n\n if not search:\n users = User.query.filter(User.id.notin_(users_blocking)).all()\n else:\n users = (User.query.filter(\n User.username.like(f\"%{search}%\"),\n User.id != g.user.id,\n User.id.notin_(users_blocking))\n .all())\n\n return render_template('users/index.html', users=users)",
"async def get_bans(self) -> 'typing.List[dt_user.User]':\n if not self.me.guild_permissions.ban_members:\n raise PermissionsError(\"ban_members\")\n\n bans = await self._bot.http.get_bans(self.id)\n users = []\n\n for user_data in bans:\n # TODO: Audit log stuff, if it ever comes out.\n user_data = user_data.get(\"user\", None)\n users.append(dt_user.User(self._bot, **user_data))\n\n return users",
"def ban(sock, user):\r\n chat(sock, \"/ban {}\".format(user))",
"def get_banned(self):\n return self.execute(TABELLE['id_users']['select']['banned'])",
"async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n try:\r\n self.settings['blacklist'].append(user.id)\r\n await ctx.send(\"User blacklisted.\")\r\n except:\r\n await ctx.send(\"An error occured.\")\r\n else:\r\n await ctx.send(\"User already blacklisted.\")",
"def ban_user(self, user):\n # salvo l'id dell'utente o del bot\n # print(\"Sto negando l'accesso all'user \" + str(user['id']))\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, False, False, True))",
"def get_banned_user_obj_list(user_obj):\n banned_user_obj_list = [user_banned_list_obj.banned_user for user_banned_list_obj in user_obj.banned_user_set.all()]\n return banned_user_obj_list",
"def check_username(search_username):\n for find_username in USERS_LIST:\n if find_username[\"username\"] == search_username:\n return True\n return False",
"def find_user_like(self, query):\n # if cache is empty, fill it\n if self.user_cache is None:\n self.user_cache = self.get_users()\n\n # if it's still empty, something's wrong\n if self.user_cache is not None:\n # search the names first\n for user in self.user_cache:\n if query in user[\"name\"]:\n return user\n # then search the emails\n for user in self.user_cache:\n if query in user[\"email\"]:\n return user\n return None",
"async def examine(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n search_string = ' '.join(args).lower()\n for member in ctx.guild.members:\n if member.nick is not None:\n if search_string in member.nick.lower():\n target = User.objects.get(id=member.id)\n break\n if search_string in member.name.lower():\n target = User.objects.get(id=member.id)\n break\n else:\n await ctx.send(f'Could not find {search_string} in server.')\n return\n\n await ctx.send(users.print_account(target))",
"async def ban_users(request: Request, user_list: list[User]) -> ModBan:\n conn: Connection = request.state.db_conn\n users = [user.user_id for user in user_list]\n\n records = await conn.fetch(\"SELECT * FROM users WHERE user_id=any($1::bigint[])\", tuple(users))\n db_users = [record[\"user_id\"] for record in records]\n\n non_db_users = set(users) - set(db_users)\n\n async with conn.transaction():\n # Ref:\n # https://magicstack.github.io/asyncpg/current/faq.html#why-do-i-get-postgressyntaxerror-when-using-expression-in-1\n await conn.execute(\"UPDATE users SET is_banned=TRUE WHERE user_id=any($1::bigint[])\", db_users)\n await conn.execute(\"UPDATE pixel_history SET deleted=TRUE WHERE user_id=any($1::bigint[])\", db_users)\n\n await request.state.canvas.sync_cache(conn, skip_check=True)\n\n return ModBan(banned=db_users, not_found=list(non_db_users))",
"def find_user_by_password(cls,password):\n\n for user in cls.user_list:\n if user.password == password:\n return user",
"async def search(self, ctx, *, item: str):\n has = {}\n for data in [fossilData, diyData]:\n for user in data:\n for i in set(data[user]):\n if item.lower() in i.lower():\n if user not in has:\n has[user] = []\n has[user].append(i.title())\n if has:\n message = \"\"\n members = [m.name + '#' + m.discriminator for m in ctx.message.author.guild.members]\n for u in has:\n if u not in members:\n continue\n message += \"{}:\\n```\".format(u)\n for item in sorted(has[u]):\n message += \"- {}\\n\".format(item)\n message = message[:-1] + \"```\\n\"\n await ctx.send(message)\n else:\n await ctx.send(\"Nobody has anything *close* to a `{}`. Are you sure it exists?\".format(item.title()))",
"def add_to_bannedlist_view(request):\n data = {'success': False, 'msg': ''}\n if request.method == 'GET':\n # Check if the current user has already logged in.\n # If user has not logged in, return an error msg to frontend.\n # If user has logged in, let user add banned user he/she doesn't like, to his/her blacklist\n if not request.session.get('login_flag', None):\n data['msg'] = 'user does not log in'\n return JsonResponse(data)\n # else current use is logged in\n curr_user_name = request.session.get('name', None)\n # return curr_user_obj by curr_user_name from login.models.User database\n try:\n curr_user_obj = login.models.User.objects.get(name=curr_user_name)\n except ObjectDoesNotExist:\n data['msg'] = 'does not have user: ' + str(curr_user_name)\n return JsonResponse(data)\n\n try:\n req = simplejson.loads(request.body)\n banned_user_id = req['banned_user_id'].strip()\n except:\n banned_user_id = request.GET.get('banned_user_id')\n # check if input is empty\n if banned_user_id is None:\n data['msg'] = 'banned_user_id is required'\n return JsonResponse(data)\n\n # else input is not empty\n # check if banned_user_id is a positive integer\n try:\n banned_user_id = int(banned_user_id)\n if not (banned_user_id > 0):\n data['msg'] = 'banned_user_id must be a positive integer'\n return JsonResponse(data)\n except:\n data['msg'] = 'banned_user_id must be a positive integer'\n return JsonResponse(data)\n\n try:\n banned_user_obj = login.models.User.objects.get(uid=banned_user_id)\n except ObjectDoesNotExist:\n data['msg'] = 'does not have user with banned_user_id: ' + str(banned_user_id)\n return JsonResponse(data)\n\n if curr_user_obj.uid == banned_user_obj.uid:\n data['msg'] = 'user cannot add itself to its blacklist'\n return JsonResponse(data)\n\n try:\n models.User_banned_list.objects.create(user=curr_user_obj, banned_user=banned_user_obj)\n except IntegrityError:\n data['msg'] = 'banned_user_id: ' + str(banned_user_id) + ' already in blacklist'\n return JsonResponse(data)\n else:\n data['success'] = True\n data['msg'] = 'successfully insert banned_user_id: ' + str(banned_user_id) + ' into blacklist'\n return JsonResponse(data)\n\n else:\n data['msg'] = 'please use GET'\n return JsonResponse(data)",
"async def unban(self, ctx, *, member): # don't convert to discord.Member as it isn't a server member, just a string\n banned_users = await ctx.guild.bans() # pulls ban list\n member_name, member_discriminator = member.split('#') # split the member name from the numerical discriminator\n for ban_entry in banned_users:\n user = ban_entry.user\n if (user.name, user.discriminator) == (member_name, member_discriminator):\n await ctx.guild.unban(user)\n await ctx.send(f'Unbanned {user.name}#{user.discriminator}')\n return",
"def users(message):\n message.reply(Strings['USERS_FOUND'].format(len(hf.get_users())))",
"async def is_blacklisted(user_id: int) -> bool:\n async with aiosqlite.connect(DATABASE_PATH) as db:\n async with db.execute(\n \"SELECT * FROM blacklist WHERE user_id=?\", (user_id,)\n ) as cursor:\n result = await cursor.fetchone()\n return result is not None",
"async def ban(self, ctx, user: discord.User = None, *, reason: str = None):\r\n mem = await ctx.guild.fetch_member(user.id)\r\n if mem.top_role >= ctx.author.top_role:\r\n return await ctx.send(\"you can't ban that person\")\r\n\r\n try:\r\n if user is None:\r\n return await ctx.reply(\"Please specify a member to ban\", delete_after=3)\r\n \r\n if user.id == self.bot.user.id:\r\n return await ctx.reply(\"I can't ban myself\", delete_after=3)\r\n\r\n if reason is None:\r\n reason = f\"banned by {ctx.author.name}\"\r\n await ctx.guild.ban(discord.Object(id=user.id), reason=f\"banned by {ctx.author.name}\")\r\n return await ctx.reply(f\"{user.mention} has been banned\", mention_author=False)\r\n\r\n else:\r\n await ctx.reply(f\"{user.mention} has been banned for **`{reason}`**\", mention_author=False)\r\n reason += f\" banned by {ctx.author.name}\"\r\n await ctx.guild.ban(discord.Object(id=user.id), reason=reason)\r\n\r\n except Exception as e:\r\n await ctx.reply(e, delete_after=5)\r\n return print(e)",
"async def blacklist_show(self, ctx: commands.Context, target):\r\n table = \"user_blacklist\" if isinstance(target, discord.User) else \"guild_blacklist\"\r\n check = await self.check_user(target.id, table)\r\n\r\n if check[0]:\r\n embed = discord.Embed(color=self.bot.colors.neutral)\r\n if isinstance(target, discord.User):\r\n entry = await self.get_blacklist_entry(target.id, table)\r\n u = discord.utils.get(self.bot.users, id=target.id)\r\n if u:\r\n embed.set_author(name=f\"User {u} ({u.id})\", icon_url=u.avatar_url_as(static_format=\"png\"))\r\n else:\r\n embed.set_author(name=f\"User {u.id}\")\r\n else:\r\n entry = await self.get_blacklist_entry(target, table)\r\n g = discord.utils.get(self.bot.guilds, id=target)\r\n if g:\r\n embed.set_author(name=f\"Guild {g} ({g.id})\", icon_url=g.icon_url_as(static_format=\"png\"))\r\n else:\r\n embed.set_author(name=f\"Guild {g.id}\")\r\n embed.add_field(name=\"Reason:\", value=entry['reason'])\r\n await ctx.send(embed=embed)\r\n else:\r\n await ctx.error(description=f\"{table.split('_')[0].title()} is not blacklisted.\")",
"def __find_matching_user(self, user):\n if not user.id in self.__users.keys():\n return user\n return self.__users[user.id]",
"def ban(sock, chan, user):\n chat(sock, \".ban {}\\r\\n\".format(user))\n console.info(\"banned user {} from channel {}\".format(user, chan))",
"def my_get_user(users_list, user_id):\n for user in users_list:\n if user.get(\"user_id\") == user_id:\n return user\n return None",
"async def ban(self, ctx, user: discord.Member, reason=\"Banned from guild by Talos\"):\n await user.ban(reason=reason)\n await self.bot.mod_log(ctx, \"ban\", user, reason)\n await ctx.send(f\"User {user} banned\")",
"async def bot_check(self, ctx):\n blocked = await self.db.fetchrow(\n \"\"\"\n SELECT *\n FROM blocks\n WHERE user_id=$1\n \"\"\",\n ctx.author.id,\n )\n if blocked is None:\n return True\n raise BlackListed"
] | [
"0.6635181",
"0.65845996",
"0.64267707",
"0.6291678",
"0.6161224",
"0.6127906",
"0.6092408",
"0.60619473",
"0.6052187",
"0.60402244",
"0.6016852",
"0.6006078",
"0.5991084",
"0.5965134",
"0.5931113",
"0.59273094",
"0.5911802",
"0.59044695",
"0.58706963",
"0.5839877",
"0.58294207",
"0.5822346",
"0.5801656",
"0.5772219",
"0.57435",
"0.5710451",
"0.56963766",
"0.5691522",
"0.5685174",
"0.5668378"
] | 0.72738934 | 0 |
removes the last x messages from the channel it was called in (defaults to 10) | async def channel_(self, ctx, number=10):
number = number if number <= 100 else 100
question = await ctx.send(f"this will delete the last {number} messages from ALL users. Continue?")
await question.add_reaction(self.reactions[0])
await question.add_reaction(self.reactions[1])
def check_is_author(reaction, user):
return reaction.message.id == question.id and user.id == ctx.author.id and \
reaction.emoji in self.reactions
try:
reaction, user = await self.bot.wait_for("reaction_add", check=check_is_author, timeout=20)
if reaction.emoji == self.reactions[1]:
await question.delete()
return
except asyncio.TimeoutError:
await question.delete()
return
try:
messages = await ctx.channel.purge(limit=number+1)
await ctx.send(f"deleted the last {len(messages)-1} messages from this channel")
except (discord.ClientException, discord.Forbidden, discord.HTTPException) as e:
await ctx.send(str(e))
except Exception as ex:
import traceback
owner = ctx.guild.get_member(self.bot.owner_id)
if owner:
await owner.send(traceback.print_exc())
self.error_log.error(traceback.print_exc()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def clear(ctx, number):\n \"\"\":param: ctx\"\"\"\n \"\"\":param: number\"\"\"\n \"\"\"return \"\"\"\n number = int(number) \n counter = 0\n async for x in bot.logs_from(ctx.message.channel, limit = number):\n if counter < number:\n await bot.delete_message(x)\n counter += 1\n await asyncio.sleep(1.2) #1.2 second timer so the deleting process can be even",
"async def clear(ctx, amount=10):\n\tawait ctx.channel.purge(limit=amount + 1)",
"async def clear(ctx, amount=100):\r\n channel = ctx.message.channel\r\n messages = []\r\n amount = int(amount) + 1\r\n async for message in cleintt.logs_from(channel, limit=amount):\r\n messages.append(message)\r\n await bot.delete_messages(messages)",
"async def clear(ctx, messages=5):\n await ctx.channel.purge(\n limit=messages + 1\n ) # the clear command counts as a message, so be sure to remove it too",
"async def prune(self, ctx, n=1):\n n = abs(n)\n if n > Commands.prune_cutoff:\n await ctx.channel.send('You can only delete up to 25 messages at a time.')\n return\n log.debug(f'Purging {n + 1} message(s)...') # Includes the invoking command\n await ctx.message.remove_reaction(\"\\U000023F3\", ctx.me) # Hourglass not done\n await ctx.channel.purge(limit=n + 1)\n\n title = f'{ctx.message.author} deleted {n} message'\n title += 's!' if n > 1 else '!'\n embed = discord.Embed(title=title, colour=discord.Colour(0xe7d066))\n await ctx.send(embed=embed)",
"def _cull_oldest(self, n=1):\n for msg_id in self.get_history()[:n]:\n self.log.debug(\"Culling record: %r\", msg_id)\n self._culled_ids.add(msg_id)\n self.drop_record(msg_id)",
"async def clear(self, ctx, amount: int, user: discord.Member = None):\n amount += 1\n\n def clear_x(m):\n return m.author == user\n if not user:\n everyone = True\n else:\n everyone = False\n if amount <= 101:\n if not everyone:\n await ctx.channel.purge(limit=amount, check=clear_x, bulk=True)\n elif everyone:\n await ctx.channel.purge(limit=amount, bulk=True)\n log.console(f\"Pruned {amount} messages from {ctx.channel.id}\")\n if amount >= 102:\n if amount > 1000:\n amount = 1000\n number = (amount // 100)\n await ctx.send(\n f\"> **{amount}** messages will be deleted in 5 seconds and will be split in intervals of 100.\")\n for _ in range(number):\n await asyncio.sleep(0)\n if not everyone:\n await ctx.channel.purge(limit=100, check=clear_x, bulk=True)\n elif everyone:\n await ctx.channel.purge(limit=100, bulk=True)\n log.console(f\"Pruned 100 messages from {ctx.channel.id}\")\n await ctx.send(f\"> **{amount}** messages have been pruned from {ctx.channel.id}.\")",
"async def cleanup(self, ctx):\r\n msgs = await ctx.message.channel.history(limit=100).flatten()\r\n msgs = [msg for msg in msgs if msg.author.id == self.amethyst.user.id]\r\n\r\n if (len(msgs) > 0 and\r\n ctx.me.permissions_in(ctx.channel).manage_messages):\r\n await ctx.channel.delete_messages(msgs)\r\n elif len(msgs) > 0:\r\n for msg in msgs:\r\n await msg.delete()\r\n else:\r\n return\r\n\r\n msg = await ctx.send(\"Cleaned `{}`\".format(len(msgs)))\r\n await asyncio.sleep(2.5)\r\n await msg.delete()",
"def clear(self, page_size=10, vtimeout=10):\r\n n = 0\r\n l = self.get_messages(page_size, vtimeout)\r\n while l:\r\n for m in l:\r\n self.delete_message(m)\r\n n += 1\r\n l = self.get_messages(page_size, vtimeout)\r\n return n",
"async def clear(self, ctx, limit):\n self.log_command_call(\"clear\", ctx.message)\n error = self._validate_clear_args(limit)\n if error is not None:\n error_embed = create_error_embed(description=error)\n await ctx.send(embed=error_embed)\n else:\n limit = int(limit) + 1 # To account for THIS command call\n await ctx.channel.purge(\n limit=limit, check=lambda msg: self._should_delete(msg, ctx)\n )\n # Send some feedback\n auto_destruct_timer = 5\n feedback_embed = create_embed(\n title=\"Purge recap\",\n description=f\"Check the last {limit-1} message for deletion\",\n )\n feedback_embed.set_footer(\n text=f\"This message will auto-destruct in {auto_destruct_timer} seconds\"\n )\n message = await ctx.send(embed=feedback_embed)\n # Then we delete the call and our feedback\n time.sleep(auto_destruct_timer)\n await ctx.message.delete()\n await message.delete()",
"def get_new_messages(self):\n inbox = list(self.reddit.inbox.unread(limit=10))\n inbox.reverse()\n return inbox",
"async def purge(self, ctx, msg_number: int = 10):\n\n if ctx.guild.id == 202724765218242560:\n return\n\n if msg_number > 100:\n await ctx.error(\"No more than 100 messages can be purged at a time.\")\n return\n\n deleted = await ctx.channel.purge(limit=msg_number)\n s = \"s\" if len(deleted) > 1 else \"\"\n result_msg = await ctx.info(f'Deleted {len(deleted)} message{s}.')\n await asyncio.sleep(3)\n await result_msg.delete()",
"async def nuke(ctx, count):\n global STOPNUKE\n launcher = await is_launcher(ctx)\n staff = await is_staff(ctx)\n if not (staff or (launcher and ctx.message.channel.name == \"welcome\")):\n return await ctx.send(\"APOLOGIES. INSUFFICIENT RANK FOR NUKE.\")\n if STOPNUKE:\n return await ctx.send(\"TRANSMISSION FAILED. ALL NUKES ARE CURRENTLY PAUSED. TRY AGAIN LATER.\")\n if int(count) > 100:\n return await ctx.send(\"Chill. No more than deleting 100 messages at a time.\")\n channel = ctx.message.channel\n if int(count) < 0:\n history = await channel.history(limit=105).flatten()\n message_count = len(history)\n print(message_count)\n if message_count > 100:\n count = 100\n else:\n count = message_count + int(count) - 1\n if count <= 0:\n return await ctx.send(\"Sorry, you can not delete a negative amount of messages. This is likely because you are asking to save more messages than there are in the channel.\")\n await ctx.send(\"=====\\nINCOMING TRANSMISSION.\\n=====\")\n await ctx.send(\"PREPARE FOR IMPACT.\")\n for i in range(10, 0, -1):\n await ctx.send(f\"NUKING {count} MESSAGES IN {i}... TYPE `!stopnuke` AT ANY TIME TO STOP ALL TRANSMISSION.\")\n await asyncio.sleep(1)\n if STOPNUKE:\n return await ctx.send(\"A COMMANDER HAS PAUSED ALL NUKES FOR 20 SECONDS. NUKE CANCELLED.\")\n if not STOPNUKE:\n async for m in channel.history(limit=(int(count) + 13)):\n if not m.pinned and not STOPNUKE:\n await m.delete()\n msg = await ctx.send(\"https://media.giphy.com/media/XUFPGrX5Zis6Y/giphy.gif\")\n await asyncio.sleep(5)\n await msg.delete()",
"async def purge(self, ctx, count: int):\n await ctx.channel.purge(limit=count+1)",
"async def channel(self, ctx, limit: int=100, channel: discord.TextChannel=None):\n\n if channel is None:\n channel = ctx.channel\n\n # noinspection PyUnresolvedReferences\n messages = await channel.purge(limit=limit)\n messages = len(messages)\n\n plural = '' if messages == 1 else 's'\n\n await ctx.send('Purged {} message{}.'.format(messages, plural), delete_after=10)",
"def drop_message(self, client, channel, i):\n del self.storage[client][channel][i]",
"async def delete_bot_msg(self, channel):\n await channel.purge(limit=100, check=self.is_me)",
"def clear_messages(self):\n self.redis_client.delete(self.message_list)",
"async def clear(self, ctx, amount: int = 2):\n loading_msg = await ctx.send(content=f\"Deleting {amount} messages.\")\n\n def check(m):\n return m.id != loading_msg.id\n\n await ctx.channel.purge(limit=amount, check=check)\n await loading_msg.edit(content=f\"{amount} messages have been deleted.\")",
"def _popN(self, n):\n for _ in range(n):\n self._buffer.popleft()",
"async def process_prune(\n channel, amount, user_id: int, ctx=None, inter=None, allowed_mentions=None\n):\n user = await User.get(user_id)\n if amount not in range(PRUNE_MIN, PRUNE_MAX):\n return await send_message(\n PRUNE_MIN,\n PRUNE_MAX,\n key=\"not_in_range\",\n user=user,\n ctx=ctx,\n inter=inter,\n allowed_mentions=allowed_mentions,\n )\n\n await channel.purge(limit=amount, bulk=True)\n return await send_message(\n amount if inter else amount - 1,\n key=\"messages_cleared\",\n user=user,\n ctx=ctx,\n inter=inter,\n allowed_mentions=allowed_mentions,\n delete_after=5,\n )",
"def clear_messages(self):\n with self.message_lock:\n self.messages = self.messages[self._processed_messages:]\n self._processed_messages = 0",
"def delete_packets(self, num):\n for i in range(num):\n del self._packets[0]",
"def popMsg(self):\n\n if not self.queue:\n return []\n returned_msgs = []\n for msg, delay in self.queue:\n delay -= 1\n if delay < 1:\n returned_msgs.append(msg)\n else:\n self.pushMsg(msg, delay)\n self.queue = []\n return returned_msgs",
"def pop_messages(self):\n msge = self.received_messages\n self.received_messages = []\n return msge",
"async def purge(self, ctx, target: discord.Member=None, amount: int=10):\n if amount > 100:\n return ctx.send('Maximum messages is 100')\n if target is None:\n target = ctx.me\n await ctx.message.delete()\n delete = []\n async for m in ctx.channel.history(limit=200):\n if m.author == target:\n delete.append(m)\n await ctx.channel.delete_messages(delete[:amount - 1])",
"def _remove_old_items(self):\n if self.size_limit is not None:\n while len(self) > self.size_limit:\n self.popitem(last=False)",
"def get_last_messages(self, count):\n return self.buffer.get_last(count)",
"async def purge(self, ctx, num_msg: int = 100):\n if num_msg > 100:\n return await ctx.error('Number of messages to be deleted must not exceed 100.')\n\n try:\n await ctx.channel.purge(limit=num_msg)\n except Exception as e:\n await ctx.error(f'Failed to delete messages.\\n ```py\\n{e}```')",
"async def chatchart(self, ctx, channel: Optional[discord.TextChannel] = None, messages:int = 5000):\n if channel is None:\n channel = ctx.channel\n\n # --- Early terminations\n if channel.permissions_for(ctx.message.author).read_messages is False:\n return await ctx.send(\"You're not allowed to access that channel.\")\n if channel.permissions_for(ctx.guild.me).read_messages is False:\n return await ctx.send(\"I cannot read the history of that channel.\")\n blacklisted_channels = await self.config.guild(ctx.guild).channel_deny()\n if channel.id in blacklisted_channels:\n return await ctx.send(f\"I am not allowed to create a chatchart of {channel.mention}.\")\n if messages < 5:\n return await ctx.send(\"Don't be silly.\")\n\n message_limit = await self.config.limit()\n if (message_limit != 0) and (messages > message_limit):\n messages = message_limit\n\n embed = discord.Embed(\n title=f\"Fetching messages from #{channel.name}\",\n description=\"This might take a while...\",\n colour=await self.bot.get_embed_colour(location=channel)\n )\n loading_message = await ctx.send(embed=embed)\n try:\n history = await self.fetch_channel_history(channel, loading_message, messages)\n except discord.errors.Forbidden:\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(\"No permissions to read that channel.\")\n\n msg_data = self.calculate_member_perc(history)\n # If no members are found.\n if len(msg_data[\"users\"]) == 0:\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(f\"Only bots have sent messages in {channel.mention} or I can't read message history.\")\n\n top_twenty, others = self.calculate_top(msg_data)\n chart = await self.create_chart(top_twenty, others, channel)\n\n try:\n await loading_message.delete()\n except discord.NotFound:\n pass\n await ctx.send(file=discord.File(chart, \"chart.png\"))"
] | [
"0.67335105",
"0.6715486",
"0.66315895",
"0.66250706",
"0.6528937",
"0.6269455",
"0.62468845",
"0.6244145",
"0.62168974",
"0.620329",
"0.6188422",
"0.6178319",
"0.6148279",
"0.6100332",
"0.6079787",
"0.6058955",
"0.59551233",
"0.5944647",
"0.59410363",
"0.5928811",
"0.5881828",
"0.5869317",
"0.5825282",
"0.5802768",
"0.5797338",
"0.57653165",
"0.5711308",
"0.56848377",
"0.56842595",
"0.5663346"
] | 0.682575 | 0 |
use '[.,!]report setup' in the channel that should become the report channel | async def setup(self, ctx):
self.report_channel = ctx.message.channel
with open('data/report_channel.json', 'w') as f:
json.dump({"channel": self.report_channel.id}, f)
await ctx.send('This channel is now the report channel') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def report(self, ctx: commands.Context, report: typing.Optional[str], args: commands.Greedy[typing.Union[discord.User, discord.TextChannel]]):\n author = ctx.message.author\n if report == 'setup':\n if checks.is_owner_or_moderator_check(ctx.message):\n await ctx.invoke(self.setup)\n return\n else:\n await ctx.send(\"You don't have permission to do this\")\n ctx.command.reset_cooldown(ctx)\n return\n if not await self.report_checks(report, ctx):\n return\n embed, file_list_reply, file_list = await self.build_message(ctx.message, report, args)\n user_copy = await ctx.author.send(f\"going to send the following report message:\"\n f\"\\n check with {self.reactions[0]} to send\"\n f\" or {self.reactions[1]} to abort\",\n files=file_list_reply, embed=embed)\n for reaction in self.reactions:\n await user_copy.add_reaction(reaction)\n\n def react_check(reaction, user):\n if user is None or user.id != ctx.author.id:\n return False\n if reaction.message.id != user_copy.id:\n return False\n if reaction.emoji in self.reactions:\n return True\n return False\n try:\n reaction, user = await self.bot.wait_for('reaction_add', check=react_check, timeout=60)\n except asyncio.TimeoutError as tm:\n await user_copy.edit(content=\"You waited too long, use the command again to send a report\")\n await user_copy.remove_reaction(self.reactions[0], self.bot.user)\n await user_copy.remove_reaction(self.reactions[1], self.bot.user)\n ctx.command.reset_cooldown(ctx)\n return\n else:\n if reaction.emoji == self.reactions[0]:\n await self.report_channel.send(embed=embed, files=file_list)\n self.logger.info('User %s#%s(id:%s) reported: \"%s\"', author.name, author.discriminator, author.id, report)\n await author.send(\"successfully sent\")\n else:\n await user_copy.delete()\n ctx.command.reset_cooldown(ctx)",
"def report():\n pass",
"def setup(bot):\n global report\n global opened\n\n update_report(\"\", \"\", \"\")\n \n set_state(CLOSED)\n print(\"Variable reset.\")",
"def report(self, report):\n\n self._report = report",
"def report():\n Robot.report()",
"def init_report(self, report):\n report.text('warning', 'init_report() not implemented for this class.')",
"def report(self, **options):\n pass",
"async def report(self, ctx, user: discord.Member, *, reason):\n\n data = await self.bot.mongo.db.guild.find_one({\"_id\": ctx.guild.id})\n channel = ctx.guild.get_channel(data[\"report_channel_id\"])\n\n await channel.send(\n f\"{ctx.author.mention} reported {user.mention} in {ctx.channel.mention} for:\\n> {reason}\"\n )\n await ctx.send(f\"Reported **{user}**.\")",
"def reports_cli():",
"def report(self, output_dir):",
"def __send_reports__(self,config,mockdb):\n numbers = config.get('Flowcell_reports','numbers').split(',')\n for number in numbers:\n flowcell_report_key = getattr(self,'flowcell_report_' + str(number) + '_key')\n if flowcell_report_key is None:\n continue\n report = mockdb['FlowcellStatisticReport'].objects[flowcell_report_key]\n if report.report_sent is True: #If the report is already sent, next.\n continue\n if not report.__is_complete__(): #If the qsub script is still running, next.\n continue\n if self.sequencing_run_type == 'RapidRun' and str(number) == '16':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n elif self.sequencing_run_type == 'HighThroughputRun' and str(number) == '64':\n recipients = config.get('Flowcell_reports','last_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"last_report\")\n #Add samples to the all sample list\n sample_keys = self.__completed_samples_list__(mockdb)\n write_list_file(sample_keys,config.get('Filenames','all_samples'),original_list_file=config.get('Filenames','all_samples'))\n self.__finish__()\n else:\n recipients = config.get('Flowcell_reports','subset_recipients')\n subject, body = report.__generate_flowcell_report_text__(config,mockdb,report_type=\"subset_report\")\n files = []\n files.append(report.report_pdf)\n files.append(report.full_report)\n files.append(report.current_report)\n send_email(subject,body,recipients=recipients,files=files)\n report.__finish__()\n report.report_sent = True\n return 1",
"def gReport(self, event):\n \n reports.createReports()",
"def report_handler(bot, new_report):\n event_count = report[2]\n \n # Count events and take report & time\n if event_count == 0:\n event_count = new_report.count(\"|\")\n else:\n event_count += new_report.count(\"|\")\n\n timestamp = datetime.now()\n reporttime = timestamp.strftime(\"[%H:%M]\")\n\n #Console log\n print(timestamp.strftime(\"[%d %b, %H:%M]\") + \" -- \" + report)\n\n update_report(new_report, reporttime, event_count)\n \n bot.say(\"Understood.\")\n \n update_topic(bot, new_report, sopel.tools.target.Channel(CHANNEL))",
"def pytest_runtest_makereport(item, call):\n report = (yield).get_result() # pytest.TestReport\n config = item.config\n enabled = config.getvalue('yagot')\n if enabled:\n if report.when == \"call\" and not report.passed:\n import yagot\n tracker = yagot.GarbageTracker.get_tracker()\n tracker.ignore()",
"def setUp(self):\n super().setUp()\n self.report = {\n \"report_uuid\": \"report_uuid\",\n \"title\": \"Report\",\n \"subjects\": {\"subject_uuid\": {\"name\": \"Subject\", \"type\": \"software\", \"metrics\": {}}},\n }",
"def _setReport(self, pReport):\n bytessent = self._dev.ctrl_transfer(bmRequestType = 0x21, # Set_Report Request\n bRequest = 0x09, # SET_REPORT\n wValue = 0x200, # report type = output, report ID = 0\n wIndex = 0, # interface 0\n data_or_wLength = pReport)\n print(f\"Sent {bytessent} over interface\")",
"def initial_reporting(config, run_tracker):\r\n reports_dir = config.get('reporting', 'reports_dir',\r\n default=os.path.join(config.getdefault('pants_workdir'), 'reports'))\r\n link_to_latest = os.path.join(reports_dir, 'latest')\r\n if os.path.exists(link_to_latest):\r\n os.unlink(link_to_latest)\r\n\r\n run_id = run_tracker.run_info.get_info('id')\r\n if run_id is None:\r\n raise ReportingError('No run_id set')\r\n run_dir = os.path.join(reports_dir, run_id)\r\n safe_rmtree(run_dir)\r\n\r\n html_dir = os.path.join(run_dir, 'html')\r\n safe_mkdir(html_dir)\r\n os.symlink(run_dir, link_to_latest)\r\n\r\n report = Report()\r\n\r\n # Capture initial console reporting into a buffer. We'll do something with it once\r\n # we know what the cmd-line flag settings are.\r\n outfile = StringIO()\r\n capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, log_level=Report.INFO,\r\n color=False, indent=True, timing=False,\r\n cache_stats=False)\r\n capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)\r\n report.add_reporter('capturing', capturing_reporter)\r\n\r\n # Set up HTML reporting. We always want that.\r\n template_dir = config.get('reporting', 'reports_template_dir')\r\n html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,\r\n html_dir=html_dir,\r\n template_dir=template_dir)\r\n html_reporter = HtmlReporter(run_tracker, html_reporter_settings)\r\n report.add_reporter('html', html_reporter)\r\n\r\n # Add some useful RunInfo.\r\n run_tracker.run_info.add_info('default_report', html_reporter.report_path())\r\n port = ReportingServerManager.get_current_server_port()\r\n if port:\r\n run_tracker.run_info.add_info('report_url', 'http://localhost:%d/run/%s' % (port, run_id))\r\n\r\n return report",
"async def _report(self, ctx: Context, *, msg: str):\n\n report_str = (\n f\"`{datetime.utcnow().replace(microsecond=0)}` {ctx.author}\"\n f\" (`{ctx.author.id}`) reported from `{ctx.guild or 'DM'}`: **{msg}**\"\n )\n\n channel_id = await self.config.report_channel()\n\n channel = None\n if channel_id:\n channel = self.bot.get_channel(channel_id)\n\n if channel:\n await channel.send(report_str)\n else:\n owner = self.bot.get_user(self.bot.owner_id)\n await owner.send(report_str)\n\n await ctx.send(\n \"Thank you for sending a report. Your issue\"\n \" will be resolved as soon as possible.\"\n )",
"async def report(ctx, *args):\n server = bot.get_guild(SERVER_ID)\n reports_channel = discord.utils.get(server.text_channels, name=CHANNEL_REPORTS)\n message = args[0]\n if len(args) > 1:\n message = ' '.join(args)\n poster = str(ctx.message.author)\n embed = assemble_embed(\n title=f\"Report Received (using `!report`)\",\n webcolor=\"red\",\n authorName = poster,\n authorIcon = ctx.message.author.avatar_url_as(format=\"jpg\"),\n fields = [{\n \"name\": \"Message\",\n \"value\": message,\n \"inline\": False\n }]\n )\n message = await reports_channel.send(embed=embed)\n REPORT_IDS.append(message.id)\n await message.add_reaction(\"\\U00002705\")\n await message.add_reaction(\"\\U0000274C\")\n await ctx.send(\"Thanks, report created.\")",
"def __set_report_path(self):\n self.report_path = os.path.join(self.get_report_path(), \"cyclomatic_report\")\n Path(self.report_path).mkdir(parents=True, exist_ok=True)",
"def _generate_report(self):\n raise NotImplementedError",
"def enable_reporting(self):\n self.reporting = True\n msg = chr(REPORT_DIGITAL + self.port_number)\n msg += chr(1)\n self.board.sp.write(msg)\n for pin in self.pins:\n if pin.mode == INPUT:\n pin.reporting = True # TODO Shouldn't this happen at the pin?",
"def buildReports(self):\n pass",
"def pytest_runtest_makereport(item):\n # execute all other hooks to obtain the report object\n outcome = yield\n rep = outcome.get_result()\n\n # set a report attribute for each phase of a call, which can\n # be \"setup\", \"call\", \"teardown\"\n\n setattr(item, \"rep_\" + rep.when, rep)",
"def initialize_reporting(self):\n reporting_params = self.reporting_params\n reporting_params[\"heartbeat_path\"] = self.result_paths[\"current_heartbeat\"]\n reporting_handler = ReportingHandler(**reporting_params)\n\n #################### Make Unified Logging Globally Available ####################\n G.log = reporting_handler.log\n G.debug = reporting_handler.debug\n G.warn = reporting_handler.warn",
"def report(self, report_options=None):\n raise NotImplementedError()",
"def pytest_runtest_makereport(item, call):\n # execute all other hooks to obtain the report object\n outcome = yield\n rep = outcome.get_result()\n\n # set a report attribute for each phase of a call, which can\n # be \"setup\", \"call\", \"teardown\"\n # used to e.g. during browser tests to take screenshot on failure\n setattr(item, \"rep_\" + rep.when, rep)",
"def take_report_tg(bot, trigger):\n new_report = trigger.group(0).split(\"eporting: \")[1]\n\n # Check preliminaries and then call report handler\n if trigger.sender == CHANNEL:\n if get_state():\n if new_report is None:\n # Silly...\n bot.reply(\"Information content zero..\")\n else:\n # Take report and count events in it, if any\n report_handler(bot, new_report)\n else:\n # How hard can it be...\n bot.reply(\"Room is not open\")\n else:\n # Seriously...\n bot.reply(\"You have to do this from \" + CHANNEL)",
"async def report(self, ctx, *, suggestion: str = None):\n\n channel = self.bot.get_channel(485679174309249046)\n\n if suggestion is None:\n msg = await ctx.send('What would you like to report/feedback on?')\n\n def check(message):\n return message.author.id == ctx.author.id\n\n try:\n report = await self.bot.wait_for('message', check=check, timeout=180)\n except asyncio.TimeoutError:\n return await msg.delete()\n\n embed = discord.Embed(title=f'Bug Report/Feedback', colour=discord.Color.dark_blue(),\n description=f'```css\\n{report.content}\\n```')\n embed.add_field(name='User', value=f'**{ctx.author}** ({ctx.author.id})')\n embed.add_field(name='Guild', value=f'**{ctx.guild.name}** ({ctx.guild.id})')\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n embed.set_footer(text='Received ').timestamp = datetime.datetime.utcnow()\n embed.set_thumbnail(url=ctx.author.avatar_url)\n await channel.send(embed=embed)\n await ctx.send(\"Thanks for the feedback or report!\")\n\n else:\n embed = discord.Embed(title=f'Bug Report/Feedback', colour=discord.Color.dark_blue(),\n description=f'```css\\n{suggestion}\\n```')\n embed.add_field(name='User', value=f'**{ctx.author}** ({ctx.author.id})')\n embed.add_field(name='Guild', value=f'**{ctx.guild.name}** ({ctx.guild.id})')\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n embed.set_footer(text='Received ').timestamp = datetime.datetime.utcnow()\n embed.set_thumbnail(url=ctx.author.avatar_url)\n await channel.send(embed=embed)\n await ctx.send(\"Thanks for the feedback or report!\")",
"def gen_report(self):\n self.report = '#Report for {0}\\n'.format(self.ip)\n self.report += 'This report was generated by the chameleon pentest bot. We cannot grant 100% accurate results.\\n'\n self.report += '###Services:\\n'\n for service in self.services:\n self.report += '#####{0}:\\n- Port: {1}\\n- Info:{2}'.format(service.name, service.port, service.info)\n self.report += '###Vulnerabilities:\\n'\n for vuln in self.vulns:\n self.report += '- {0}\\n'.format(vuln.name)\n self.report += 'Open an issue for wrong results at github.com/coretool/chameleon.'"
] | [
"0.683029",
"0.6375262",
"0.613417",
"0.59820586",
"0.59680825",
"0.5906083",
"0.57938373",
"0.57611376",
"0.57535285",
"0.57166386",
"0.5662714",
"0.56567514",
"0.56311053",
"0.5629033",
"0.56285334",
"0.5610708",
"0.5606113",
"0.5603995",
"0.5593045",
"0.55677265",
"0.5555029",
"0.55538166",
"0.55468875",
"0.5546586",
"0.5545531",
"0.55375403",
"0.5530332",
"0.5495527",
"0.54893017",
"0.5488836"
] | 0.72447664 | 0 |
selfmute yourself for certain amount of time | async def selfmute(self, ctx, amount:int, time_unit:str):
length, error_msg = self.convert_mute_length(amount, time_unit)
if not length:
await ctx.send(error_msg)
return
if length > 7 * self.units["days"]:
question = await ctx.send(f"Are you sure you want to be muted for {(length/self.units['days']):.2f} days?\n"
f"answer with Y[es] or N[o]")
def msg_check(message):
return message.author.id == ctx.author.id and message.channel.id == ctx.message.channel.id
try:
message = await self.bot.wait_for("message", check=msg_check, timeout=20.0)
if re.match(r"y(es)?", message.content.lower()):
pass
else:
await question.edit(content="self mute aborted")
return
except asyncio.TimeoutError:
await question.edit(content="Timeout: mute aborted")
return
unmute_ts = datetime.datetime.utcnow() + datetime.timedelta(seconds=length)
await ctx.author.add_roles(self.mute_role)
await ctx.send("You have been muted")
await self.add_mute_to_mute_list(ctx.author.id, unmute_ts) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def selfmute(ctx, *args):\n user = ctx.message.author\n if await is_staff(ctx):\n return await ctx.send(\"Staff members can't self mute.\")\n time = \" \".join(args)\n await _mute(ctx, user, time, self=True)",
"async def _mute(ctx, user:discord.Member, time: str, self: bool):\n if user.id in PI_BOT_IDS:\n return await ctx.send(\"Hey! You can't mute me!!\")\n if time == None:\n return await ctx.send(\"You need to specify a length that this used will be muted. Examples are: `1 day`, `2 months, 1 day`, or `indef` (aka, forever).\")\n role = None\n if self:\n role = discord.utils.get(user.guild.roles, name=ROLE_SELFMUTE)\n else:\n role = discord.utils.get(user.guild.roles, name=ROLE_MUTED)\n parsed = \"indef\"\n if time != \"indef\":\n parsed = dateparser.parse(time, settings={\"PREFER_DATES_FROM\": \"future\"})\n if parsed == None:\n return await ctx.send(\"Sorry, but I don't understand that length of time.\")\n CRON_LIST.append({\"date\": parsed, \"do\": f\"unmute {user.id}\"})\n await user.add_roles(role)\n eastern = pytz.timezone(\"US/Eastern\")\n await ctx.send(f\"Successfully muted {user.mention} until `{str(eastern.localize(parsed))} EST`.\")",
"async def tempmute(self, ctx,\n\t\ttarget: discord.Member,\n\t\tduration: DurationConverter,\n\t\t*, reason: str = \"No reason given.\"\n\t):\n\n\t\tself.check_perms(ctx.author, target)\n\n\t\thandler = await Handler.new(self.bot, ctx.guild)\n\t\tawait handler.mute(ctx.author, target, reason, duration)\n\n\t\tawait ctx.success(f\"{target} (`{target.id}`) has been muted for {time_since(seconds=duration)} for:\\n{reason}\")",
"async def mute(self, ctx, member: discord.Member, *, time:TimeConverter = None):\r\n\r\n if member.top_role >= ctx.author.top_role:\r\n return await ctx.send(\"you can't mute that person\")\r\n\r\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\r\n await member.add_roles(role)\r\n await ctx.reply((f\"Muted {member} for {time}s\" if time else f\"Muted {member}\"))\r\n\r\n if time:\r\n await asyncio.sleep(time)\r\n await member.remove_roles(role)",
"async def mute(self, ctx, user: discord.Member, amount: int, time_unit: str, *, reason: typing.Optional[str]):\n length, error_msg = self.convert_mute_length(amount, time_unit)\n if not length:\n await ctx.send(error_msg)\n return\n unmute_ts = datetime.datetime.utcnow() + datetime.timedelta(seconds=length)\n mute_message = f\"user {user.mention} was muted\"\n await user.add_roles(self.mute_role)\n await ctx.send(mute_message)\n if reason:\n mute_message = f\"{mute_message} for the following reason:\\n{reason}\"\n await self.add_mute_to_mute_list(user.id, unmute_ts)\n await self.check_channel.send(mute_message)",
"async def mute(self, ctx, member: discord.Member, time='15m'):\n guild_permissions = member.guild_permissions\n wait_time = parse_time(time).total_seconds()\n # Because sometimes members have nicknames with markdown\n escaped_name = escape_markdown(member.display_name)\n\n if guild_permissions.kick_members:\n # do not mute someone who has permissions to kick members\n await ctx.send(f'Cannot mute {escaped_name} due to roles.')\n\n elif member.bot:\n # do not mute bots\n await ctx.send(f'Cannot mute {escaped_name} (is a bot).')\n\n else:\n overwrite = discord.PermissionOverwrite(\n add_reactions=False,\n send_messages=False,\n )\n\n log_str = (f'{ctx.author.display_name} has muted '\n f'member {member} (<@{member.id}>) for {time}.')\n logger.info(log_str)\n\n for channel in ctx.guild.text_channels:\n permissions = channel.permissions_for(member)\n\n if permissions.read_messages:\n await channel.set_permissions(member, overwrite=overwrite)\n\n await asyncio.sleep(wait_time)\n await ctx.invoke(self.unmute, member)",
"def mute(self, msg, args):\n if self.mute:\n self.mute=False\n return \"Yay, I can make noise again!\"\n else:\n self.mute=True\n return \"OK, I'll shut up now!\"",
"async def mute(self, ctx, user: discord.Member, time_and_unit=None, *, reason: str = None):\r\n server = ctx.message.guild\r\n channel = ctx.message.channel\r\n author = ctx.message.author\r\n if channel.permissions_for(user).administrator:\r\n await ctx.send(\"That user has administrator perms, why would I even try :no_entry:\")\r\n return\r\n if user.top_role.position >= author.top_role.position:\r\n if author == server.owner:\r\n pass\r\n else:\r\n await ctx.send(\"You can not mute someone higher than your own role :no_entry:\")\r\n return\r\n if not time_and_unit:\r\n time2 = 600\r\n time = \"10\"\r\n unit = \"minutes\"\r\n else:\r\n try:\r\n unit = time_and_unit[len(time_and_unit) - 1:len(time_and_unit)]\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n try:\r\n time = time_and_unit[0:len(time_and_unit) - 1]\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if unit == \"s\":\r\n try:\r\n time2 = int(time)\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if time == \"1\":\r\n unit = \"second\"\r\n else:\r\n unit = \"seconds\"\r\n elif unit == \"m\":\r\n try:\r\n time2 = int(time) * 60\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if time == \"1\":\r\n unit = \"minute\"\r\n else:\r\n unit = \"minutes\"\r\n elif unit == \"h\":\r\n try:\r\n time2 = int(time) * 3600\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if time == \"1\":\r\n unit = \"hour\"\r\n else:\r\n unit = \"hours\"\r\n elif unit == \"d\":\r\n try:\r\n time2 = int(time) * 86400\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if time == \"1\":\r\n unit = \"day\"\r\n else:\r\n unit = \"days\"\r\n else:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n action = \"Mute ({} {})\".format(time, unit)\r\n if str(server.id) not in self.d:\r\n self.d[str(server.id)] = {}\r\n dataIO.save_json(self.file, self.d)\r\n if str(user.id) not in self.d[str(server.id)]:\r\n self.d[str(server.id)][str(user.id)] = {}\r\n dataIO.save_json(self.file, self.d)\r\n if \"toggle\" not in self.d[str(server.id)][str(user.id)]:\r\n self.d[str(server.id)][str(user.id)][\"toggle\"] = False\r\n dataIO.save_json(self.file, self.d)\r\n if \"time\" not in self.d[str(server.id)][str(user.id)]:\r\n self.d[str(server.id)][str(user.id)][\"time\"] = None\r\n dataIO.save_json(self.file, self.d)\r\n if \"amount\" not in self.d[str(server.id)][str(user.id)]:\r\n self.d[str(server.id)][str(user.id)][\"amount\"] = None\r\n dataIO.save_json(self.file, self.d)\r\n role = discord.utils.get(server.roles, name=\"Muted - Sensei\")\r\n overwrite = discord.PermissionOverwrite()\r\n overwrite.send_messages = False\r\n perms = discord.PermissionOverwrite()\r\n perms.speak = False\r\n if not role:\r\n role = await server.create_role(name=\"Muted - Sensei\")\r\n for channels in ctx.guild.text_channels:\r\n await channels.set_permissions(role, overwrite=overwrite)\r\n for channels in ctx.guild.voice_channels:\r\n await channels.set_permissions(role, overwrite=perms)\r\n if role in user.roles:\r\n await ctx.send(\"**{}** is already muted :no_entry:\".format(user))\r\n return\r\n try:\r\n await user.add_roles(role)\r\n except:\r\n await ctx.send(\"I cannot add the mute role to the user :no_entry:\")\r\n return\r\n await ctx.send(f\"**{user}** has been muted for {time} {unit} {self.bot.get_emoji(470063310386233344)}\")\r\n try:\r\n await self._log(author, server, action, reason, user)\r\n except:\r\n pass\r\n self.d[str(server.id)][str(user.id)][\"toggle\"] = True\r\n self.d[str(server.id)][str(user.id)][\"amount\"] = time2\r\n self.d[str(server.id)][str(user.id)][\"time\"] = ctx.message.created_at.timestamp()\r\n dataIO.save_json(self.file, self.d)\r\n try:\r\n s = discord.Embed(title=\"You have been muted in {} :speak_no_evil:\".format(server.name), colour=0xfff90d,\r\n timestamp=__import__('datetime').datetime.utcnow())\r\n s.add_field(name=\"Moderator\", value=\"{} ({})\".format(author, str(author.id)), inline=False)\r\n s.add_field(name=\"Time\", value=\"{} {}\".format(time, unit), inline=False)\r\n if reason:\r\n s.add_field(name=\"Reason\", value=reason, inline=False)\r\n await user.send(embed=s)\r\n except:\r\n pass",
"async def mute(\n self, ctx, target: discord.Member, *, reason: Union[ModerationUserFriendlyTime, str]\n ):\n\n if target.guild_permissions.kick_members:\n return await ctx.send(\"You can't punish that person!\")\n\n if isinstance(reason, time.UserFriendlyTime):\n expires_at = reason.dt\n reason = reason.arg\n else:\n expires_at = None\n\n action = Mute(\n target=target,\n user=ctx.author,\n reason=reason,\n guild_id=ctx.guild.id,\n created_at=ctx.message.created_at,\n expires_at=expires_at,\n )\n await action.execute(ctx)\n await action.notify()\n if action.duration is None:\n await ctx.send(f\"Muted **{target}**.\")\n else:\n await ctx.send(f\"Muted **{target}** for **{time.human_timedelta(action.duration)}**.\")",
"def handle_mic_mute(_):\n loop.mute()",
"def disable_mute(self):\n self.mute = False",
"def mute():\n request_command(tv_command=TVCommand.mute)",
"async def tradingmute(\n self, ctx, target: discord.Member, *, reason: Union[ModerationUserFriendlyTime, str]\n ):\n\n if target.guild_permissions.kick_members:\n return await ctx.send(\"You can't punish that person!\")\n\n if isinstance(reason, time.UserFriendlyTime):\n expires_at = reason.dt\n reason = reason.arg\n else:\n expires_at = None\n\n action = TradingMute(\n target=target,\n user=ctx.author,\n reason=reason,\n guild_id=ctx.guild.id,\n created_at=ctx.message.created_at,\n expires_at=expires_at,\n )\n await action.execute(ctx)\n await action.notify()\n if action.duration is None:\n await ctx.send(f\"Muted **{target}** in trading channels.\")\n else:\n await ctx.send(\n f\"Muted **{target}** in trading channels for **{time.human_timedelta(action.duration)}**.\"\n )",
"async def mute(self, *args, **kwargs):\n self.muted = not self.muted # toogle\n if self.muted:\n self.just_muted = True\n return \"I've been muted :(\"\n return \"I'm back! :D\"",
"def auto_unmute():\n muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n # put user IDs of people you want to remain muted here\n users_keep_muted = set([])\n \n # mute all \n for user_id in muted:\n if user_id not in users_keep_muted:\n t.mutes.users.destroy(user_id=user_id)\n print(\"unmuted %d\" % (user_id))",
"async def poweroff(ctx):\n await ctx.send(\"Bye\")\n await bot.logout()",
"def mute(self, nick, chan, arg):\n if not arg:\n \treturn bot.msg(chan, get_doc())\n self.state.mute(arg)\n self.msg(chan, \"%s: You are temporarily prohibited from using this bot\" % (arg))",
"async def mute(self, ctx, member: discord.Member, expire_after = 10*60):\n # Get the muted role\n tanjo_muted_role = discord.utils.get(ctx.guild.roles, name='Tanjo-Muted')\n\n # Create role\n if tanjo_muted_role is None:\n tanjo_muted_role = await ctx.guild.create_role(name='Tanjo-Muted')\n\n # Ensure they aren't allowed to speak server-wide\n for channel in ctx.guild.channels:\n await channel.set_permissions(tanjo_muted_role, send_messages=False)\n\n # Actually mute the user\n await member.add_roles(tanjo_muted_role, reason=reason)\n\n # Create embed\n em = discord.Embed(title=f'Muted: {member}', color=self.color)\n em.description = f'Reason: {reason}'\n await ctx.send(embed=em)",
"async def muterole(self, ctx, *, role: discord.Role):\n await queries.update_setting(ctx, \"guild_settings\", \"mute_role_id\", role.id)\n await util.send_success(ctx, f\"Muting someone now gives them the role {role.mention}\")",
"def vibrate(self, duration):\n self.wm.rumble = 1\n sleep(duration)\n self.wm.rumble = 0",
"def temporarily_allow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = True\n update.message.reply_text(\"Temprarily allowed!\")",
"async def remove_mute(id: int) -> None:\n\n guild = BOT_GLOBAL.get_guild(BOT_GLOBAL.settings.guild_id)\n if guild is not None:\n mute_role = BOT_GLOBAL.settings.guild().role_mute\n mute_role = guild.get_role(mute_role)\n if mute_role is not None:\n user = guild.get_member(id)\n if user is not None:\n await user.remove_roles(mute_role)\n case = Case(\n _id=BOT_GLOBAL.settings.guild().case_id,\n _type=\"UNMUTE\",\n mod_id=BOT_GLOBAL.user.id,\n mod_tag=str(BOT_GLOBAL.user),\n reason=\"Temporary mute expired.\",\n )\n await BOT_GLOBAL.settings.inc_caseid()\n await BOT_GLOBAL.settings.add_case(user.id, case)\n\n u = await BOT_GLOBAL.settings.user(id=user.id)\n u.is_muted = False\n u.save()\n\n log = await prepare_unmute_log(BOT_GLOBAL.user, user, case)\n\n log.remove_author()\n log.set_thumbnail(url=user.avatar_url)\n\n public_chan = guild.get_channel(\n BOT_GLOBAL.settings.guild().channel_public)\n \n dmed = True\n try:\n await user.send(embed=log)\n except Exception:\n dmed = False\n \n await public_chan.send(user.mention if not dmed else \"\", embed=log)\n\n else:\n case = Case(\n _id=BOT_GLOBAL.settings.guild().case_id,\n _type=\"UNMUTE\",\n mod_id=BOT_GLOBAL.user.id,\n mod_tag=str(BOT_GLOBAL.user),\n reason=\"Temporary mute expired.\",\n )\n await BOT_GLOBAL.settings.inc_caseid()\n await BOT_GLOBAL.settings.add_case(id, case)\n\n u = await BOT_GLOBAL.settings.user(id=id)\n u.is_muted = False\n u.save()",
"def toggle_mute(cls) -> bool:\n raise NotImplementedError",
"async def mute(self, ctx,\n\t\ttarget: discord.Member,\n\t\t*, reason: str = \"No reason given.\"\n\t):\n\n\t\tself.check_perms(ctx.author, target)\n\t\t\n\t\thandler = await Handler.new(self.bot, ctx.guild)\n\t\tawait handler.mute(ctx.author, target, reason)\n\t\t\n\t\tawait ctx.success(f\"{target} (`{target.id}`) has been muted for:\\n{reason}\")",
"async def stealthtorment(self, ctx, *, member = None, times : int = None):\r\n\r\n\t\tchannel = ctx.message.channel\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\t\tmessage = ctx.message\r\n\r\n\t\t# Only allow owner\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\treturn\r\n\t\telif isOwner == False:\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\tusage = 'Usage: `{}torment [role/member] [times]`'.format(ctx.prefix)\r\n\r\n\t\tisRole = False\r\n\r\n\t\tif member == None:\r\n\t\t\tawait ctx.channel.send(usage)\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\t# Check for formatting issues\r\n\t\tif times == None:\r\n\t\t\t# Either xp wasn't set - or it's the last section\r\n\t\t\tif type(member) is str:\r\n\t\t\t\t# It' a string - the hope continues\r\n\t\t\t\troleCheck = DisplayName.checkRoleForInt(member, server)\r\n\t\t\t\tif roleCheck and roleCheck[\"Role\"]:\r\n\t\t\t\t\tisRole = True\r\n\t\t\t\t\tmember = roleCheck[\"Role\"]\r\n\t\t\t\t\ttimes = roleCheck[\"Int\"]\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Role is invalid - check for member instead\r\n\t\t\t\t\tnameCheck = DisplayName.checkNameForInt(member, server)\r\n\t\t\t\t\tif not nameCheck:\r\n\t\t\t\t\t\tawait ctx.channel.send(usage)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tif not nameCheck[\"Member\"]:\r\n\t\t\t\t\t\tmsg = 'I couldn\\'t find that user or role on the server.'.format(member)\r\n\t\t\t\t\t\tawait ctx.channel.send(msg)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tmember = nameCheck[\"Member\"]\r\n\t\t\t\t\ttimes = nameCheck[\"Int\"]\r\n\t\t\t\t\t\r\n\t\t# Set the torment flag\r\n\t\tself.toTorment = True\r\n\r\n\t\tif times == None:\r\n\t\t\t# Still no times - roll back to default\r\n\t\t\ttimes = 25\r\n\t\t\t\r\n\t\tif times > 100:\r\n\t\t\ttimes = 100\r\n\t\t\t\r\n\t\tif times == 0:\r\n\t\t\tawait ctx.channel.send('Oooooh - I bet they feel *sooooo* tormented...')\r\n\t\t\treturn\r\n\t\t\r\n\t\tif times < 0:\r\n\t\t\tawait ctx.channel.send('I just uh... *un-tormented* them. Yeah.')\r\n\t\t\treturn\r\n\r\n\t\t# Delete original torment message\r\n\t\tawait message.delete()\r\n\t\t\r\n\t\tfor i in range(0, times):\r\n\t\t\t# Do this over time\r\n\t\t\ttry:\r\n\t\t\t\tif member.name == \"@everyone\" and type(member) is discord.Role:\r\n\t\t\t\t\ttmessage = await ctx.channel.send(\"{}\".format(member.name),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\t\telse:\r\n\t\t\t\t\ttmessage = await ctx.channel.send('{}'.format(member.mention),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\t\tawait tmessage.delete()\r\n\t\t\texcept Exception:\r\n\t\t\t\tpass\r\n\t\t\tfor j in range(0, self.waitBetween):\r\n\t\t\t\t# Wait for 1 second, then check if we should cancel - then wait some more\r\n\t\t\t\tawait asyncio.sleep(1)\r\n\t\t\t\tif not self.toTorment:\r\n\t\t\t\t\treturn",
"def shake(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)\n r.go(-25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)",
"def handle_mic_unmute(_):\n loop.unmute()",
"async def resettimer(self, ctx:commands.Context, member: Member = None):\r\n\r\n await self.config.member(member if not member == None else ctx.message.author).currently_fishing.set(False)\r\n await ctx.send('Fishing cooldown reset')",
"async def async_turn_on(self):\n await self.async_mute_volume(False)",
"async def remainder_command(self, ctx, time: TimeConverter, *, reason):\n timers.Timer(\n self.client, \"remainder\", time, args=(ctx.channel.id, ctx.author.id, reason)\n ).start()\n embed = Embed(color=Color.blurple())\n embed.set_author(\n name=f\"Set a remainder for reason - {reason}\",\n icon_url=ctx.author.avatar_url,\n )\n await ctx.send(embed=embed)"
] | [
"0.75939256",
"0.68576396",
"0.6521713",
"0.6515285",
"0.64817613",
"0.64531577",
"0.64433974",
"0.63983494",
"0.6394465",
"0.6356057",
"0.6314334",
"0.6289779",
"0.6265375",
"0.6244864",
"0.6233572",
"0.6076269",
"0.6051343",
"0.5984072",
"0.59546584",
"0.59510046",
"0.59105337",
"0.5908882",
"0.59045416",
"0.58636075",
"0.5841728",
"0.58301806",
"0.58174556",
"0.5814605",
"0.5803383",
"0.5802236"
] | 0.7218142 | 1 |
mutes a user from voice for the whole server | async def voice_mute(self, ctx, member: discord.Member, *,reason: typing.Optional[str]):
await member.edit(mute=True, reason=reason[:512])
await ctx.send(f"User {member.mention} successfully muted from voice")
if reason:
await self.check_channel.send(f"user {member.mention} muted from voice for the following reason:\n"
f"{reason}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def mute(self, ctx):\n author = ctx.message.author\n channel = author.voice.channel\n members = channel.members\n for member in members:\n user = ctx.guild.get_member(member.id)\n await user.edit(mute=True)\n\n embed = await embeds.generate_embed(ctx, author, members,\n description=\":white_check_mark: Successfully muted the following users:\",\n title=channel.name)\n await ctx.send(embed=embed)",
"async def voice_unmute(self, ctx, member: discord.Member, *, reason: typing.Optional[str]):\n if member.voice and member.voice.mute:\n await member.edit(mute=False, reason=reason[:512])\n await ctx.send(f\"User {member.mention} successfully unmuted from voice\")\n return\n if member.voice and not member.voice.mute:\n await ctx.send(\"User is not muted\")\n return\n self.to_unmute.append(member.id)\n await self.add_to_unmutes(member.id)\n await ctx.send(f\"User {member.mention} added to users that will be unmuted\")",
"def mute():\n request_command(tv_command=TVCommand.mute)",
"async def _mute(ctx, user:discord.Member, time: str, self: bool):\n if user.id in PI_BOT_IDS:\n return await ctx.send(\"Hey! You can't mute me!!\")\n if time == None:\n return await ctx.send(\"You need to specify a length that this used will be muted. Examples are: `1 day`, `2 months, 1 day`, or `indef` (aka, forever).\")\n role = None\n if self:\n role = discord.utils.get(user.guild.roles, name=ROLE_SELFMUTE)\n else:\n role = discord.utils.get(user.guild.roles, name=ROLE_MUTED)\n parsed = \"indef\"\n if time != \"indef\":\n parsed = dateparser.parse(time, settings={\"PREFER_DATES_FROM\": \"future\"})\n if parsed == None:\n return await ctx.send(\"Sorry, but I don't understand that length of time.\")\n CRON_LIST.append({\"date\": parsed, \"do\": f\"unmute {user.id}\"})\n await user.add_roles(role)\n eastern = pytz.timezone(\"US/Eastern\")\n await ctx.send(f\"Successfully muted {user.mention} until `{str(eastern.localize(parsed))} EST`.\")",
"async def mute(self, ctx, user: discord.Member, time_and_unit=None, *, reason: str = None):\r\n server = ctx.message.guild\r\n channel = ctx.message.channel\r\n author = ctx.message.author\r\n if channel.permissions_for(user).administrator:\r\n await ctx.send(\"That user has administrator perms, why would I even try :no_entry:\")\r\n return\r\n if user.top_role.position >= author.top_role.position:\r\n if author == server.owner:\r\n pass\r\n else:\r\n await ctx.send(\"You can not mute someone higher than your own role :no_entry:\")\r\n return\r\n if not time_and_unit:\r\n time2 = 600\r\n time = \"10\"\r\n unit = \"minutes\"\r\n else:\r\n try:\r\n unit = time_and_unit[len(time_and_unit) - 1:len(time_and_unit)]\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n try:\r\n time = time_and_unit[0:len(time_and_unit) - 1]\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if unit == \"s\":\r\n try:\r\n time2 = int(time)\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if time == \"1\":\r\n unit = \"second\"\r\n else:\r\n unit = \"seconds\"\r\n elif unit == \"m\":\r\n try:\r\n time2 = int(time) * 60\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if time == \"1\":\r\n unit = \"minute\"\r\n else:\r\n unit = \"minutes\"\r\n elif unit == \"h\":\r\n try:\r\n time2 = int(time) * 3600\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if time == \"1\":\r\n unit = \"hour\"\r\n else:\r\n unit = \"hours\"\r\n elif unit == \"d\":\r\n try:\r\n time2 = int(time) * 86400\r\n except ValueError:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n if time == \"1\":\r\n unit = \"day\"\r\n else:\r\n unit = \"days\"\r\n else:\r\n await ctx.send(\"Invalid time unit :no_entry:\")\r\n return\r\n action = \"Mute ({} {})\".format(time, unit)\r\n if str(server.id) not in self.d:\r\n self.d[str(server.id)] = {}\r\n dataIO.save_json(self.file, self.d)\r\n if str(user.id) not in self.d[str(server.id)]:\r\n self.d[str(server.id)][str(user.id)] = {}\r\n dataIO.save_json(self.file, self.d)\r\n if \"toggle\" not in self.d[str(server.id)][str(user.id)]:\r\n self.d[str(server.id)][str(user.id)][\"toggle\"] = False\r\n dataIO.save_json(self.file, self.d)\r\n if \"time\" not in self.d[str(server.id)][str(user.id)]:\r\n self.d[str(server.id)][str(user.id)][\"time\"] = None\r\n dataIO.save_json(self.file, self.d)\r\n if \"amount\" not in self.d[str(server.id)][str(user.id)]:\r\n self.d[str(server.id)][str(user.id)][\"amount\"] = None\r\n dataIO.save_json(self.file, self.d)\r\n role = discord.utils.get(server.roles, name=\"Muted - Sensei\")\r\n overwrite = discord.PermissionOverwrite()\r\n overwrite.send_messages = False\r\n perms = discord.PermissionOverwrite()\r\n perms.speak = False\r\n if not role:\r\n role = await server.create_role(name=\"Muted - Sensei\")\r\n for channels in ctx.guild.text_channels:\r\n await channels.set_permissions(role, overwrite=overwrite)\r\n for channels in ctx.guild.voice_channels:\r\n await channels.set_permissions(role, overwrite=perms)\r\n if role in user.roles:\r\n await ctx.send(\"**{}** is already muted :no_entry:\".format(user))\r\n return\r\n try:\r\n await user.add_roles(role)\r\n except:\r\n await ctx.send(\"I cannot add the mute role to the user :no_entry:\")\r\n return\r\n await ctx.send(f\"**{user}** has been muted for {time} {unit} {self.bot.get_emoji(470063310386233344)}\")\r\n try:\r\n await self._log(author, server, action, reason, user)\r\n except:\r\n pass\r\n self.d[str(server.id)][str(user.id)][\"toggle\"] = True\r\n self.d[str(server.id)][str(user.id)][\"amount\"] = time2\r\n self.d[str(server.id)][str(user.id)][\"time\"] = ctx.message.created_at.timestamp()\r\n dataIO.save_json(self.file, self.d)\r\n try:\r\n s = discord.Embed(title=\"You have been muted in {} :speak_no_evil:\".format(server.name), colour=0xfff90d,\r\n timestamp=__import__('datetime').datetime.utcnow())\r\n s.add_field(name=\"Moderator\", value=\"{} ({})\".format(author, str(author.id)), inline=False)\r\n s.add_field(name=\"Time\", value=\"{} {}\".format(time, unit), inline=False)\r\n if reason:\r\n s.add_field(name=\"Reason\", value=reason, inline=False)\r\n await user.send(embed=s)\r\n except:\r\n pass",
"async def unmute(self, ctx, user: discord.Member, *, reason: str = None):\r\n server = ctx.message.guild\r\n channel = ctx.message.channel\r\n author = ctx.message.author\r\n action = \"Unmute\"\r\n role = discord.utils.get(server.roles, name=\"Muted - Sensei\")\r\n if not role:\r\n await ctx.send(\"No-one is muted in this server :no_entry:\")\r\n return\r\n if role not in user.roles:\r\n await ctx.send(\"**{}** is not muted :no_entry:\".format(user))\r\n return\r\n try:\r\n await user.remove_roles(role)\r\n except:\r\n await ctx.send(\"I cannot remove the mute role from the user :no_entry:\")\r\n return\r\n await ctx.send(f\"**{user}** has been unmuted {self.bot.get_emoji(470063310386233344)}\")\r\n try:\r\n await self._log(author, server, action, reason, user)\r\n except:\r\n pass\r\n self.d[str(server.id)][str(user.id)][\"toggle\"] = False\r\n self.d[str(server.id)][str(user.id)][\"time\"] = None\r\n self.d[str(server.id)][str(user.id)][\"amount\"] = None\r\n dataIO.save_json(self.file, self.d)\r\n try:\r\n s = discord.Embed(title=\"You have been unmuted early in {}\".format(server.name), colour=000000,\r\n timestamp=datetime.datetime.utcnow())\r\n s.add_field(name=\"Moderator\", value=\"{} ({})\".format(author, str(author.id)))\r\n await user.send(embed=s)\r\n except:\r\n pass",
"async def mute(self, ctx, user: discord.Member, amount: int, time_unit: str, *, reason: typing.Optional[str]):\n length, error_msg = self.convert_mute_length(amount, time_unit)\n if not length:\n await ctx.send(error_msg)\n return\n unmute_ts = datetime.datetime.utcnow() + datetime.timedelta(seconds=length)\n mute_message = f\"user {user.mention} was muted\"\n await user.add_roles(self.mute_role)\n await ctx.send(mute_message)\n if reason:\n mute_message = f\"{mute_message} for the following reason:\\n{reason}\"\n await self.add_mute_to_mute_list(user.id, unmute_ts)\n await self.check_channel.send(mute_message)",
"async def x5lol(ctx):\n #Shortcut to Author of the Message\n atr = ctx.author\n #Shortcut to Author Current Voice Channel\n currentvc = atr.voice.channel.name\n #Shortcut to Voice Channel Members list\n usrs = atr.voice.channel.members\n #Specify what role will use \n role = ctx.guild.get_role(\"\"\"Insert role id\"\"\")\n \n pot = []\n #await ctx.send(atr.name +' '+currentvc)\n for i in usrs:\n if role in i.roles:\n #await ctx.send('O ' + str(i) + ' É ' + str(role))\n nick = (str(i.nick))\n if nick != 'None':\n pot.append(str(i.nick))\n else:\n pot.append(str(i))\n \n \n #NOTE:The math part of team ballance isn't done yet\n # For now, Bot shuffle the names, then send to Text Channel on Discord \n random.shuffle(pot)\n await ctx.send(pot)",
"async def unmute(self, ctx, user: Redeemed):\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unmute yourself!\")\n return \n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\"Muted\"))\n await ctx.send(f\"{user.mention} has been unmuted\")",
"async def voice(self, ctx, voice: str):\n global voice_type\n\n voice_dict = {\n 'IN_F': 'en-IN-Wavenet-A',\n 'IN_M': 'en-IN-Wavenet-C',\n 'US_F': 'en-US-Wavenet-G',\n 'US_M': 'en-US-Wavenet-B',\n 'GB_F': 'en-GB-Wavenet-A',\n 'GB_M': 'en-GB-Wavenet-B',\n 'AU_F': 'en-AU-Wavenet-C',\n 'AU_M': 'en-AU-Wavenet-B'\n }\n if voice in voice_dict:\n voice_type = voice_dict[voice]\n else:\n voice_type = voice\n await ctx.send(\"New voice set to: \" + voice_type)\n audioClip = TTSSource(voice_type, \"New voice set.\")\n while not audioClip.done:\n await asyncio.sleep(1)\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(audioClip.filename, **ffmpeg_options))\n ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)",
"def handle_mic_mute(_):\n loop.mute()",
"async def omar(self, ctx, user):\n user = user.replace(\"<\",\"\").replace(\">\",\"\").replace(\"@\",\"\").replace(\"!\",\"\")\n print(user)\n user_member = await ctx.guild.fetch_member(user)\n if user_member is not None:\n kick_channel = await ctx.guild.create_voice_channel(\"kicked\")\n await user_member.move_to(kick_channel, reason=\"you have been kicked by Omar.\")\n await kick_channel.delete()\n else:\n print(\"user invalid for omar()\")",
"async def reset(self, ctx, user : str=None):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n userFound = False\n if (user == \"bot\"):\n self.intro_message = None\n else:\n for stream in self.twitch_streams:\n if (user):\n if (stream[\"NAME\"] == user):\n stream[\"MESSAGE\"] = None\n stream[\"ALREADY_ONLINE\"] = False\n stream[\"CHANNEL\"] = self.stream_channel\n userFound = True\n else:\n stream[\"MESSAGE\"] = None\n stream[\"ALREADY_ONLINE\"] = False\n stream[\"CHANNEL\"] = self.stream_channel\n\n if (user):\n if (userFound):\n await self.bot.say(\"Reset complete.\")\n else:\n await self.bot.say(\"User does not exist!\")\n else:\n await self.bot.say(\"Reset complete.\")\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")",
"def auto_unmute():\n muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n # put user IDs of people you want to remain muted here\n users_keep_muted = set([])\n \n # mute all \n for user_id in muted:\n if user_id not in users_keep_muted:\n t.mutes.users.destroy(user_id=user_id)\n print(\"unmuted %d\" % (user_id))",
"async def music_voice(self, user, before, after):\n if after.channel is None and user.id == self.bot.user.id:\n try:\n self.player[user.guild.id]['queue'].clear()\n except KeyError:\n # NOTE: server ID not in bot's local self.player dict\n # Server ID lost or was not in data before disconnecting\n print(f\"Failed to get guild id {user.guild.id}\")",
"async def musicbot(self, ctx, the_state):\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n if is_mod:\r\n if the_state == \"1\":\r\n self.music_off = False\r\n await ctx.send(\"Music Bot features now on\")\r\n else:\r\n self.music_off = True\r\n await ctx.send(\"Music Bot features now off\")\r\n else:\r\n await ctx.send(\"**Error:** You are not allowed to use this command!\")",
"def mute(self, msg, args):\n if self.mute:\n self.mute=False\n return \"Yay, I can make noise again!\"\n else:\n self.mute=True\n return \"OK, I'll shut up now!\"",
"async def on_message(message):\n\n async def bad_word_check():\n if any(\n bad_word.search(message.clean_content) is not None for bad_word in BAD_WORDS\n ):\n shame_channel = message.channel\n try:\n for channel in message.guild.text_channels:\n if SHAME_CHANNEL_PATTERN.fullmatch(channel.name):\n shame_channel = channel\n break\n except AttributeError:\n pass # Message has no guild\n await shame_channel.send(\n \"{} SAID A BAD WORD\".format(message.author.display_name.upper())\n )\n\n async def speak_muted():\n if (\n not isinstance(message.channel, discord.DMChannel)\n and \"muted\" in message.channel.name.lower()\n and message.author.voice\n and not message.content.startswith(\"!\")\n ):\n await _joinvoice(message.guild.voice_client, message.author.voice.channel)\n temp_file = tempfile.TemporaryFile()\n tts = gTTS(\n re.split(r\"\\W+\", message.author.display_name, maxsplit=1)[0]\n + \" said: \"\n + message.clean_content\n )\n tts.write_to_fp(temp_file)\n temp_file.seek(0)\n source = discord.FFmpegPCMAudio(temp_file, pipe=True)\n message.guild.voice_client.play(source)\n\n await asyncio.gather(bad_word_check(), speak_muted(), bot.process_commands(message))",
"async def mute(self, ctx, member : discord.Member, *, reason : str):\r\n mutedRole = discord.utils.get(ctx.guild.roles, name = \"Muted\")\r\n if not mutedRole:\r\n channels = 0\r\n mutedRole = await ctx.guild.create_role(name=\"Muted\")\r\n for channel in ctx.guild.text_channels:\r\n await channel.set_permissions(mutedRole, send_messages=False)\r\n channels += 1 \r\n await ctx.send(f\"Successfully applied overwrites for {channels} channels\")\r\n await member.add_roles(mutedRole)\r\n embed = discord.Embed(title=\"Muted\", description = f\"You have been muted in **{ctx.guild.name}** by **{ctx.author}** **indefinetly** for reason **{reason}**\", colour = ctx.author.color, timestamp = datetime.datetime.now())\r\n await member.send(embed=embed)",
"def set_mute(self, track, xclip, ident, value = None):\n if track in self.song().tracks + self.song().return_tracks:\n if value in KEYWORDS:\n track.mute = KEYWORDS[value]\n else:\n track.mute = not(track.mute)",
"async def selfmute(ctx, *args):\n user = ctx.message.author\n if await is_staff(ctx):\n return await ctx.send(\"Staff members can't self mute.\")\n time = \" \".join(args)\n await _mute(ctx, user, time, self=True)",
"async def mute(self, *args, **kwargs):\n self.muted = not self.muted # toogle\n if self.muted:\n self.just_muted = True\n return \"I've been muted :(\"\n return \"I'm back! :D\"",
"async def mute(self, ctx, member: discord.Member, time='15m'):\n guild_permissions = member.guild_permissions\n wait_time = parse_time(time).total_seconds()\n # Because sometimes members have nicknames with markdown\n escaped_name = escape_markdown(member.display_name)\n\n if guild_permissions.kick_members:\n # do not mute someone who has permissions to kick members\n await ctx.send(f'Cannot mute {escaped_name} due to roles.')\n\n elif member.bot:\n # do not mute bots\n await ctx.send(f'Cannot mute {escaped_name} (is a bot).')\n\n else:\n overwrite = discord.PermissionOverwrite(\n add_reactions=False,\n send_messages=False,\n )\n\n log_str = (f'{ctx.author.display_name} has muted '\n f'member {member} (<@{member.id}>) for {time}.')\n logger.info(log_str)\n\n for channel in ctx.guild.text_channels:\n permissions = channel.permissions_for(member)\n\n if permissions.read_messages:\n await channel.set_permissions(member, overwrite=overwrite)\n\n await asyncio.sleep(wait_time)\n await ctx.invoke(self.unmute, member)",
"async def remove_mute(id: int) -> None:\n\n guild = BOT_GLOBAL.get_guild(BOT_GLOBAL.settings.guild_id)\n if guild is not None:\n mute_role = BOT_GLOBAL.settings.guild().role_mute\n mute_role = guild.get_role(mute_role)\n if mute_role is not None:\n user = guild.get_member(id)\n if user is not None:\n await user.remove_roles(mute_role)\n case = Case(\n _id=BOT_GLOBAL.settings.guild().case_id,\n _type=\"UNMUTE\",\n mod_id=BOT_GLOBAL.user.id,\n mod_tag=str(BOT_GLOBAL.user),\n reason=\"Temporary mute expired.\",\n )\n await BOT_GLOBAL.settings.inc_caseid()\n await BOT_GLOBAL.settings.add_case(user.id, case)\n\n u = await BOT_GLOBAL.settings.user(id=user.id)\n u.is_muted = False\n u.save()\n\n log = await prepare_unmute_log(BOT_GLOBAL.user, user, case)\n\n log.remove_author()\n log.set_thumbnail(url=user.avatar_url)\n\n public_chan = guild.get_channel(\n BOT_GLOBAL.settings.guild().channel_public)\n \n dmed = True\n try:\n await user.send(embed=log)\n except Exception:\n dmed = False\n \n await public_chan.send(user.mention if not dmed else \"\", embed=log)\n\n else:\n case = Case(\n _id=BOT_GLOBAL.settings.guild().case_id,\n _type=\"UNMUTE\",\n mod_id=BOT_GLOBAL.user.id,\n mod_tag=str(BOT_GLOBAL.user),\n reason=\"Temporary mute expired.\",\n )\n await BOT_GLOBAL.settings.inc_caseid()\n await BOT_GLOBAL.settings.add_case(id, case)\n\n u = await BOT_GLOBAL.settings.user(id=id)\n u.is_muted = False\n u.save()",
"def voice(phenny, input):\n if not input.admin:\n return phenny.say(\"Not an admin!\")\n if not input.sender.startswith('#'):\n return phenny.say(\"Command must be given in channel!\")\n nick = input.group(2)\n verify = auth_check(phenny, input.nick, nick)\n if verify:\n channel = input.sender\n if not nick:\n nick = input.nick\n phenny.write(['MODE', channel, \"+v\", nick])\n else:\n phenny.say(\"Nick not verified\")",
"async def edit(\n self,\n *,\n nick: Optional[str] = MISSING,\n mute: bool = MISSING,\n deafen: bool = MISSING,\n suppress: bool = MISSING,\n roles: Collection[discord.abc.Snowflake] = MISSING,\n voice_channel: Optional[VocalGuildChannel] = MISSING,\n timed_out_until: Optional[datetime.datetime] = MISSING,\n bypass_verification: bool = MISSING,\n reason: Optional[str] = None,\n ) -> Optional[Member]:\n http = self._state.http\n guild_id = self.guild.id\n me = self._state.self_id == self.id\n payload: Dict[str, Any] = {}\n\n if nick is not MISSING:\n nick = nick or ''\n if me:\n await http.change_my_nickname(guild_id, nick, reason=reason)\n else:\n payload['nick'] = nick\n\n if deafen is not MISSING:\n payload['deaf'] = deafen\n\n if mute is not MISSING:\n payload['mute'] = mute\n\n if suppress is not MISSING:\n voice_state_payload: Dict[str, Any] = {\n 'suppress': suppress,\n }\n\n if self.voice is not None and self.voice.channel is not None:\n voice_state_payload['channel_id'] = self.voice.channel.id\n\n if suppress or self.bot:\n voice_state_payload['request_to_speak_timestamp'] = None\n\n if me:\n await http.edit_my_voice_state(guild_id, voice_state_payload)\n else:\n if not suppress:\n voice_state_payload['request_to_speak_timestamp'] = datetime.datetime.utcnow().isoformat()\n await http.edit_voice_state(guild_id, self.id, voice_state_payload)\n\n if voice_channel is not MISSING:\n payload['channel_id'] = voice_channel and voice_channel.id\n\n if roles is not MISSING:\n payload['roles'] = tuple(r.id for r in roles)\n\n if timed_out_until is not MISSING:\n if timed_out_until is None:\n payload['communication_disabled_until'] = None\n else:\n if timed_out_until.tzinfo is None:\n raise TypeError(\n 'timed_out_until must be an aware datetime. Consider using discord.utils.utcnow() or datetime.datetime.now().astimezone() for local time.'\n )\n payload['communication_disabled_until'] = timed_out_until.isoformat()\n\n if bypass_verification is not MISSING:\n flags = MemberFlags._from_value(self._flags)\n flags.bypasses_verification = bypass_verification\n payload['flags'] = flags.value\n\n if payload:\n data = await http.edit_member(guild_id, self.id, reason=reason, **payload)\n return Member(data=data, guild=self.guild, state=self._state)",
"def mute(self, nick, chan, arg):\n if not arg:\n \treturn bot.msg(chan, get_doc())\n self.state.mute(arg)\n self.msg(chan, \"%s: You are temporarily prohibited from using this bot\" % (arg))",
"async def on(self, ctx, *, nickname=\"\"):\n nickname = nickname.strip()\n mention_here = True\n mention_everyone = True\n if nickname == \"\":\n nickname = \"Dank Bot |Music on voice!\"\n try:\n await self.bot.change_nickname(ctx.message.server.me, nickname)\n await self.bot.say(\"Hey, music is playing on voice channel come! @here\")\n await self.bot.delete_message(ctx.message)\n except discord.Forbidden:\n await self.bot.say(\"I cannot do that, I miss the `Change Nickname` or `Manage Messages` permission\")",
"async def shush(self, ctx):\n author = ctx.message.author\n channel = author.voice.channel\n members = channel.members\n for member in members:\n user = ctx.guild.get_member(member.id)\n await user.edit(mute=True, deafen=True)\n\n embed = await embeds.generate_embed(ctx, author, members,\n description=\":white_check_mark: Successfully shushed the following users:\",\n title=channel.name)\n await ctx.send(embed=embed)",
"async def set_mute(self, value: bool):\n await self._pytheos.api.player.set_mute(self.id, value)"
] | [
"0.70429116",
"0.6485882",
"0.64345485",
"0.6267976",
"0.6264996",
"0.6263961",
"0.62500525",
"0.61775255",
"0.61728716",
"0.61710095",
"0.61693317",
"0.61162615",
"0.6115093",
"0.6084224",
"0.60821134",
"0.60459745",
"0.60407513",
"0.60330546",
"0.60249496",
"0.6010342",
"0.6005473",
"0.59675175",
"0.5966744",
"0.59596044",
"0.5954227",
"0.5953477",
"0.59311247",
"0.59277135",
"0.5926123",
"0.5918206"
] | 0.68234724 | 1 |
Delete an award. This is used on the person edit page. | def award_delete(request, award_id, person_id=None):
award = get_object_or_404(Award, pk=award_id)
badge_name = award.badge.name
award.delete()
messages.success(request, 'Award was deleted successfully.',
extra_tags='awards')
if person_id:
# if a second form of URL, then return back to person edit page
return redirect(person_edit, person_id)
return redirect(reverse(badge_details, args=[badge_name])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def award_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n award_reference = get_object_or_404(Award, id=id,company=company)\n\n #deletes the view and redirects to the page.\n award_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))",
"def del_awcomment(request, pk):\n comment = get_object_or_404(AwardComment, pk=pk)\n comment.delete()\n award = comment.award\n url = '../../' + str(comment.award.pk)\n return redirect(url)",
"def delete(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.remove()\n return {'msg':'Academic achievement deleted.'}, 200\n\n except Exception as e:\n print(e)\n return {'msg':'Could not delete academic achievement.'}, 500",
"def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))",
"def delete(self, _id):",
"def delete(request, reward_id):\n \n # check if the reward_id passed in is invalid and raise a 404 if so.\n # reward ids are integers\n try:\n reward_id = int(str(reward_id))\n except ValueError:\n raise Http404\n \n account = request.session['account']\n store = SESSION.get_store(request.session)\n rewards = store.get('rewards')\n rewards_map = { reward['reward_id']:reward for reward in rewards }\n\n # reward cannot be found for deletion. Redirect user to the rewards\n # page with a success message (maybe this should be an error message instead?).\n try: \n reward = rewards_map[reward_id]\n except KeyError:\n return redirect(reverse('rewards_index')+\\\n \"?%s\" % urllib.urlencode({'success':\\\n 'Reward has been removed.'}))\n \n # notify other dashboards of this change\n payload = {\n COMET_RECEIVE_KEY_NAME: COMET_RECEIVE_KEY,\n \"deletedReward\": {\"reward_id\":reward[\"reward_id\"]}\n }\n comet_receive(store.objectId, payload)\n \n # update session cache\n store.array_remove('rewards', [reward])\n store.rewards = None\n store.get('rewards')\n request.session['store'] = store\n \n return redirect(reverse('rewards_index')+\\\n \"?%s\" % urllib.urlencode({'success':\\\n 'Reward has been removed.'}))",
"def del_accomment(request, pk):\n\n comment = get_object_or_404(ActorComment, pk=pk)\n comment.delete()\n actor = comment.actor\n url = '../../' + str(comment.actor.pk)\n return redirect(url)",
"def acquisition_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n acquisition_reference = get_object_or_404(Acquisition, id=id,company=company)\n\n #deletes the view and redirects to the page.\n acquisition_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))",
"def delete(self):\n self.request().delete()",
"def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)",
"def delete(self, *args, **kwargs):\n pass",
"def delete(self, *args, **kwargs):\n pass",
"def delete_exam(request, exam_id):\n\n\temp = models.Employee.objects.get(user=request.user)\n\tif not emp.exam_permit:\n\t\traise Http404\n\texam = models.ExamName.objects.filter(\n\t\tpk=exam_id, soft_delete=False\n\t).first()\n\tif not exam:\n\t\traise Http404\n\texam.soft_delete = True\n\tactivity = 'Deleted Exam' + str(exam) + '.\\n'\n\texam.save(update_fields=['soft_delete'])\n\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"delete exam\"\n\t\t\t)\n\thistory.save()\n\treturn HttpResponseRedirect('/view-exams')",
"def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n user = request.user\n success_url = reverse_lazy('muxic:user', kwargs={'username': user.username})\n self.object.delete()\n return HttpResponseRedirect(success_url)",
"def delete(request, slug, username):\n delete_album_contributor(slug, username)\n \n response = HttpResponse(status=204)\n response['Cache-Control'] = 'no-cache'\n return response",
"def delete(self, request, *args, **kwargs):\r\n self.object = self.get_object()\r\n success_url = self.get_success_url()\r\n self.object.delete()\r\n messages.success(self.request, self.success_message)\r\n return HttpResponseRedirect(success_url)",
"def delete(anime_viewed_id):\n if isinstance(anime_viewed_id, int):\n anime_viewed = AnimeViewed.query.filter_by(id=anime_viewed_id).first()\n\n if not anime_viewed:\n abort(Response(f'The anime viewed with the ID {anime_viewed_id} was not found.', 404))\n\n anime_viewed.delete()\n\n return make_response(jsonify({}), 200)\n else:\n abort(Response(f'The specified anime viewed ID is invalid. Is not a number.', 400))",
"def funding_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n funding_reference = get_object_or_404(Funding, id=id,company=company)\n\n #deletes the view and redirects to the page.\n funding_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))",
"def delete(damage_id):\n logged_in_user = g.user\n damage_id = str(damage_id)\n\n deleted_damage = libdamage.delete_damage(damage_id=damage_id, \n logged_in_user=logged_in_user)\n\n rci_id = deleted_damage['rci_id']\n\n return redirect(url_for('rci.edit', rci_id=rci_id))",
"def delete_view(self, request, object_id):\r\n obj = self.get_object(unquote(object_id))\r\n obj.delete()\r\n return HttpResponse(\"Deleted\")",
"def delete_answer(request, answer_id):\n raise NotImplementedError",
"def delete(self, *args, **kwargs):\n self.portrait.delete()\n super(Giza, self).delete(*args, **kwargs)",
"def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass",
"def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)",
"def delete():"
] | [
"0.7830026",
"0.7739207",
"0.68277454",
"0.65321404",
"0.64024395",
"0.6380566",
"0.6345336",
"0.63208514",
"0.62701005",
"0.62636906",
"0.62052655",
"0.62052655",
"0.6161522",
"0.6154171",
"0.6137985",
"0.6120464",
"0.61112374",
"0.6087378",
"0.60695326",
"0.6067862",
"0.60647255",
"0.6033762",
"0.60269403",
"0.6026541",
"0.6026541",
"0.6026541",
"0.6026541",
"0.6018903",
"0.60011995",
"0.59853345"
] | 0.85680723 | 0 |
Discard EventRequest, ie. set it to inactive. | def eventrequest_discard(request, request_id):
eventrequest = get_object_or_404(EventRequest, active=True, pk=request_id)
eventrequest.active = False
eventrequest.save()
messages.success(request,
'Workshop request was discarded successfully.')
return redirect(reverse('all_eventrequests')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Discard(self, request, global_params=None):\n config = self.GetMethodConfig('Discard')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Discard(self, request, global_params=None):\n config = self.GetMethodConfig('Discard')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def test_request_discarded(self):\n # add a minimal request\n er = EventRequest.objects.create(\n name='Harry Potter', email='[email protected]',\n affiliation='Hogwarts', location='United Kingdom',\n country='GB', workshop_type='dc',\n )\n rv = self.client.get(reverse('eventrequest_set_state',\n args=[er.pk, 'discarded']))\n self.assertEqual(rv.status_code, 302)\n er.refresh_from_db()\n self.assertEqual(er.state, 'd')",
"def test_request_discarded(self):\n # add a minimal request\n er = EventRequest.objects.create(\n name='Harry Potter', email='[email protected]',\n affiliation='Hogwarts', location='United Kingdom',\n country='GB', workshop_type='swc',\n )\n rv = self.client.get(reverse('eventrequest_set_state',\n args=[er.pk, 'discarded']))\n self.assertEqual(rv.status_code, 302)\n er.refresh_from_db()\n self.assertEqual(er.state, 'd')",
"def ignore(self, event):\n return not self.active",
"def Reject(self, request, global_params=None):\n config = self.GetMethodConfig('Reject')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def cancel(self, request):\n self.clear(request)",
"def ignore(self):\n self.accepted = False",
"def ignore(self):\n self.accepted = False",
"def rejectEvent(self, status):\n\t\t# set all meta data to -1\n\t\t[ self._setRejectMetadata(mdHead) for mdHead in self.__dict__.keys() if mdHead.startswith('md')==True ]\n\t\t# set processing status to status\n\t\tself.mdProcessingStatus=status",
"def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def requestCancelled(builder, request):",
"def profileupdaterequest_discard(request, request_id):\n profileupdate = get_object_or_404(ProfileUpdateRequest, active=True,\n pk=request_id)\n profileupdate.active = False\n profileupdate.save()\n\n messages.success(request,\n 'Profile update request was discarded successfully.')\n return redirect(reverse('all_profileupdaterequests'))",
"def on_reject(self):\n self.state = REJECTED\n self._reject()",
"def hideEvent(self, event):\n self.stop_threading()\n event.accept()",
"def DismissApprovalRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def canceled(self):\n self.reject()",
"def cancel(self):\n self.__canceled = True",
"def unsave(self, event):\n self.saved_events.remove(event)",
"def on_cancel(self):\n self.state = CANCELED\n self._reject()",
"def reqGlobalCancel(self):\r\n self.ib.reqGlobalCancel()\r\n logging.info('reqGlobalCancel')",
"def consider_deactivation(self):\n pass",
"def clearRequest():\n setRequest(marker)",
"def cancel(self):",
"def cancel(self):",
"def cancel(self):",
"def discard(self):\n return self._discard",
"async def reject_event(self, envelope: Envelope, requeue: bool = False) -> None:\n await self.channel.basic_reject(\n delivery_tag=envelope.delivery_tag, requeue=requeue\n )"
] | [
"0.6764851",
"0.6764851",
"0.62319267",
"0.62042",
"0.6156134",
"0.61215484",
"0.6118604",
"0.60858923",
"0.60858923",
"0.599714",
"0.59970295",
"0.59970295",
"0.59970295",
"0.596022",
"0.5953863",
"0.59002227",
"0.58789396",
"0.58766323",
"0.5814083",
"0.56593174",
"0.56283796",
"0.5626455",
"0.56147176",
"0.5611576",
"0.5605472",
"0.559551",
"0.559551",
"0.559551",
"0.5583771",
"0.5539452"
] | 0.76252353 | 0 |
Discard ProfileUpdateRequest, ie. set it to inactive. | def profileupdaterequest_discard(request, request_id):
profileupdate = get_object_or_404(ProfileUpdateRequest, active=True,
pk=request_id)
profileupdate.active = False
profileupdate.save()
messages.success(request,
'Profile update request was discarded successfully.')
return redirect(reverse('all_profileupdaterequests')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unblock_profile(self, request, *args, **kwargs):\n context = {\n 'conversation': self.get_object(),\n 'request': request\n }\n serializer = UnblockProfileSerializer(data=request.data, context=context)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)",
"def remove_profile(self, request, *args, **kwargs):\n context = {\n 'conversation': self.get_object(),\n 'request': request\n }\n serializer = RemoveProfileSerializer(data=request.data, context=context)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)",
"def eventrequest_discard(request, request_id):\n eventrequest = get_object_or_404(EventRequest, active=True, pk=request_id)\n eventrequest.active = False\n eventrequest.save()\n\n messages.success(request,\n 'Workshop request was discarded successfully.')\n return redirect(reverse('all_eventrequests'))",
"def block_profile(self, request, *args, **kwargs):\n context = {\n 'conversation': self.get_object(),\n 'request': request\n }\n serializer = BlockProfileSerializer(data=request.data, context=context)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)",
"def unfollow_profile(self):\n self.find_clickable_element(self.ISFOLLOWED_BTN).click()",
"def Discard(self, request, global_params=None):\n config = self.GetMethodConfig('Discard')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Discard(self, request, global_params=None):\n config = self.GetMethodConfig('Discard')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def make_inactive(self, request, queryset):\n queryset.update(is_active=False)",
"def cancel(self, update: Update, context: CallbackContext) -> None:\n update.message.reply_text(\"취소 되었습니다.\")\n context.user_data.clear()",
"def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Disable(self, request, global_params=None):\n config = self.GetMethodConfig('Disable')\n return self._RunMethod(\n config, request, global_params=global_params)",
"async def test_not_update_with_account_token(self):\n provisioning_client = ProvisioningProfileClient(httpClient, 'token')\n try:\n await provisioning_client.update_provisioning_profile('id', {'name': 'new name'})\n except Exception as err:\n assert err.__str__() == 'You can not invoke update_provisioning_profile method, because you ' + \\\n 'have connected with account access token. Please use API access token from ' + \\\n 'https://app.metaapi.cloud/token page to invoke this method.'",
"def account_api_password_disable(request):\n if request.method != 'POST':\n return render(request, 'agda/account/api_password_disable.html')\n profile = request.user\n profile.set_api_password(None)\n profile.save()\n profile.log_change(request.user, \"Deleted own api password.\")\n messages.success(request, \"Your api password has been disabled.\")\n return redirect(account_edit)",
"def reject(self):\n self.skype.conn(\"PUT\", \"{0}/users/{1}/invites/8:{2}/decline\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, self.userId),\n auth=SkypeConnection.Auth.SkypeToken)",
"def unrequest_changes(self):\n self._check_if_open()\n return super(BitbucketCloudBase, self).delete(\"request-changes\")",
"def disable(self,\n profile_id=None):\n if profile_id is None:\n self._enabled = False\n else:\n self._profiles[profile_id] = False",
"def unblock(self):\n data = {'container': self._reddit.user.me().fullname,\n 'name': str(self), 'type': 'enemy'}\n url = API_PATH['unfriend'].format(subreddit='all')\n # PRAW5 REMOVE (return statement)\n return self._reddit.post(url, data=data)",
"def profile_unlogged():\n cookie = {'session_id': None}\n response = requests.get(f'{URL}/profile', cookies=cookie)\n assert response.status_code == 403",
"def cancelRequest(self, json):\n uID = json.get('uID')\n print(RequestsDAO().getRequestByuID(uID))\n if not RequestsDAO().getRequestByuID(uID):\n return jsonify(Error=\"No request found\"), 404\n else:\n\n if uID:\n RequestsDAO().deleteRequest(uID)\n return jsonify(User=\"User deleted\"), 200\n else:\n return jsonify(Error=\"Unexpected attributes in update request\"), 400",
"def set_discarded(self, review_request_id):\r\n self.api_call('api/review-requests/%s/' % review_request_id, {\r\n 'status': 'discarded',\r\n }, method='PUT')",
"def cancel(self):\n self.is_active = False\n self.save()",
"def cancel(self):\n self.is_active = False\n self.save()",
"def disable(self):\n # Check for new results and cache a copy in Django model\n self.update(do_update_assignments=True)\n self.connection.dispose_hit(self.mturk_id)",
"def DismissApprovalRequest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def reject(self, feedback=None):\n self.hit.generate_connection()\n self.hit.connection.reject_assignment(self.mturk_id, feedback=feedback)\n self.update()",
"def _unregister(self):\n try:\n self._profilemgr_proxy.proxy.UnregisterProfile(\n HFP_DBUS_PROFILE_ENDPOINT)\n logger.debug(\"Unregistered HFP profile.\")\n except Exception:\n logger.exception(\"Error unregistering profile endpoint.\")\n\n self._profile = None",
"def update_assign_unassign(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update_assign_unassign\"), kwargs)",
"def is_update_active_no_pause(self):\n return self._update_action_without_pause",
"def denyRequest(self, json):\n uID = json.get('uID')\n if not RequestsDAO().getRequestByuID(uID):\n return jsonify(Error=\"User speak request not found\"), 404\n else:\n approval = RequestsDAO().denyTurn(uID)\n mapped_result = self.buildGrantDenyToDict(uID, approval[0])\n return jsonify(TURN=mapped_result), 200"
] | [
"0.7594404",
"0.58762586",
"0.57745874",
"0.5689538",
"0.56809133",
"0.5572069",
"0.5572069",
"0.54156774",
"0.5396515",
"0.53649014",
"0.53649014",
"0.53649014",
"0.5338151",
"0.533734",
"0.53143615",
"0.5303479",
"0.5261646",
"0.5238159",
"0.5198251",
"0.5191427",
"0.514967",
"0.51336145",
"0.51336145",
"0.51042414",
"0.5044797",
"0.5042982",
"0.50348276",
"0.50331444",
"0.4998443",
"0.49614212"
] | 0.8068111 | 0 |
Delete a TodoItem. This is used on the event details page. | def todo_delete(request, todo_id):
todo = get_object_or_404(TodoItem, pk=todo_id)
event_ident = todo.event.get_ident()
todo.delete()
messages.success(request, 'TODO was deleted successfully.',
extra_tags='todos')
return redirect(event_details, event_ident) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, item):\n self._createAction(item, \"delete\")",
"def delete(self, todo_id):\n todo = self.get_todo_by_user_id(todo_id)\n todo.delete()\n return '', 204",
"def delete_item(self, list_name: str, item_name: str) -> None:\n todo_list = self.get_list(list_name)\n for index, item in enumerate(todo_list.items):\n if item.name == item_name:\n todo_list.delete_item(index)",
"def delete(self):\n\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n # the agenda_item is ad hoc if it has a document but no proposal\n if self.agenda_item.has_document and not self.agenda_item.has_proposal:\n document = self.agenda_item.resolve_document()\n trasher = ITrashable(document)\n trasher.trash()\n\n self.agenda_item.remove()\n\n return JSONResponse(self.request).info(\n _(u'agenda_item_deleted',\n default=u'Agenda Item Successfully deleted')).dump()",
"def delete(request, todo_id):\n\n todo = get_object_or_404(Todo, pk=todo_id)\n todo.delete()\n\n return redirect('index')",
"def delete(self, itemId):\n\n table = self.__getTable()\n table.delete_item(itemId = itemId)",
"def remove_todo(self, todo):\n self.deleted_items.append(todo)\n print 'removed \"%s\"' % todo.text",
"def delete_item(item_id: uuid.UUID):\n coll_items = data_access.get_items_collection()\n\n item = coll_items.find_one({\"item_id\": item_id})\n if item is None:\n raise HTTPException(status.HTTP_404_NOT_FOUND,\n f\"Could not find the item with id {item_id}\")\n\n coll_items.delete_one({\"item_id\": item_id})",
"def task_delete(request, tasklist_id):\n tasklist = get_object_or_404(Todo, pk=tasklist_id)\n tasklist.delete()\n print(tasklist)\n messages.success(request, \"Successfully deleted\")\n return redirect('lists:alllist')",
"def delete_item(self):\n\n\t\tdb.session.delete(self)\n\t\tdb.session.commit()",
"def delete_item(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n Item.user_id == current_user.id\n ).first()\n\n if not item:\n flash(\"Couldn't find the item\", category='warning')\n return redirect(request.referrer)\n\n item_name = item.name\n db.session.delete(item)\n db.session.commit()\n flash(\n \"Successfully deleted item '{}'\".format(item_name),\n \"success\")\n\n return redirect(url_for('url.index'))",
"def delete(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:\n self.object = self.get_object()\n success_url = self.get_success_url()\n success_message = _(f'Successfully deleted todo list: {self.object}')\n\n self.object.delete()\n\n messages.success(request=request, message=success_message)\n\n return redirect(success_url)",
"def do_del_item(self, arg):\n try:\n del_item = arg[\"<list_name>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n del_item_str = \" \".join(del_item)\n print(del_item_str)\n elif choice == \"id\":\n del_item_str = int(\" \".join(del_item))\n print (del_item_str)\n app.ToDoApp.to_delete_item(del_item_str)\n print (\"Item deleted\")\n\n\n \n except ValueError as e:\n cprint((e), 'red')",
"def delete(self):\n response = settings.database.delete_item(Key={'id': str(self.id)})\n raise_for_response(response)",
"def remove_items(item_title):\n\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_item = ToDoItem.query.filter_by(item_title=item_title).first()\n\n db.session.delete(to_do_item)\n db.session.commit()\n\n return \"OK\"",
"def delete_item(self, item_id):\n # open a cursor\n cur = self.get_cursor()\n\n delete_item_statement = \"DELETE FROM transaction_items \" + \\\n \"WHERE transaction_item_id={0}\".format(item_id)\n\n cur.execute(delete_item_statement)\n\n # close the cursor\n self.close_cursor()",
"def delete_item_details(item_id):\n item = is_user_the_creator(item_id)\n item_name = item.Item.name\n if request.method == 'GET':\n return render_template('item_delete_confirm.html', item_name=item_name, item_id=item_id,\n login_session=login_session,\n csrf_token=generate_csrf_token())\n else:\n session.delete(item.Item)\n session.commit()\n flash(item_name + \" deleted\")\n return redirect(url_for('show_homepage'))",
"def delete(self):\r\n self.domain.delete_item(self)",
"def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}",
"def delete_item(request, shoppinglist_id, item_id):\n Item.objects.filter(pk=item_id,\n shoppinglist__pantry__owner=request.user).delete()\n return redirect('shoppinglists.views.detail', shoppinglist_id)",
"def SendDeleteEvent(self, item):\r\n\r\n event = TreeEvent(wxEVT_TREE_DELETE_ITEM, self.GetId())\r\n event._item = item\r\n event.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(event)",
"def delete(self, item_id, **params):\n\n self.queue('delete', item_id=item_id, **params)",
"def delete(self):\n return self.items.delete(item_id=self.id)",
"def delete_item(self, id: str, user: User) -> bool:",
"def taskdetail_delete(td):\n return IMPL.taskdetail_delete(td)",
"def test_deleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO3\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo in event.todoitem_set.all()\n\n self.client.get(reverse('todo_delete', args=[todo.pk]))\n\n assert event.todoitem_set.all().count() == 0",
"def delete_item(id):\n return '', 201",
"def remove_item(self, item_id):\n\t\tself.todolist.remove(item_id) \n\t\tstore = self.store\n\t\tfor row in store:\n\t\t\tif row[0] == item_id:\n\t\t\t\tstore.remove(row.iter)\n\t\t\t\tbreak",
"def delete_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to delete the item because\n # the item was created by a different user, send a 403\n elif current_user != item.user:\n abort(403)\n\n form = DeleteItemForm()\n\n # If the form is submitted, delete the item from the database,\n # send a flash message, and redirect home\n if form.validate_on_submit():\n db.session.delete(item)\n db.session.commit()\n flash(f'\"{item.name}\" has been deleted.', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('delete_item.html', item=item, form=form)",
"def remove_item(self):\n\n self.todo_scroll_cell.remove_selected_item()"
] | [
"0.75566703",
"0.7229479",
"0.7206156",
"0.71743983",
"0.7149615",
"0.71283984",
"0.71145236",
"0.7077117",
"0.700297",
"0.69892603",
"0.6944391",
"0.69187385",
"0.69040143",
"0.6893845",
"0.68695515",
"0.68588835",
"0.683616",
"0.6788384",
"0.6778612",
"0.6743664",
"0.6702072",
"0.669464",
"0.66812867",
"0.6578887",
"0.65720785",
"0.6563319",
"0.6555159",
"0.654488",
"0.65244424",
"0.65142345"
] | 0.7717475 | 0 |
Set obj.assigned_to. This view helper works with both POST and GET | def _assign(request, obj, person_id):
try:
if request.method == "POST":
person_id = request.POST.get('person_1', None)
if person_id is None:
obj.assigned_to = None
else:
person = Person.objects.get(pk=person_id)
obj.assigned_to = person
obj.save()
except Person.DoesNotExist:
raise Http404("No person found matching the query.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assigned_to(self) -> Optional[str]:\n return pulumi.get(self, \"assigned_to\")",
"def assigned_to(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"assigned_to\")",
"def assigned_to_changed(self, ar):\n # self.add_change_watcher(self.assigned_to)\n\n if (self.assigned_to is not None and\n self.assigned_to != ar.user and\n dd.is_installed('notify')):\n ctx = dict(user=ar.user, what=ar.obj2memo(self))\n def msg(user, mm):\n subject = _(\"{user} has assigned you to ticket: {what}\").format(**ctx)\n return (subject , tostring(E.span(subject)))\n\n mt = rt.models.notify.MessageTypes.tickets\n\n rt.models.notify.Message.emit_notification(\n ar, self, mt, msg,\n [(self.assigned_to, self.assigned_to.mail_mode)]\n )",
"def assigned_by_user(self, assigned_by_user):\n\n self._assigned_by_user = assigned_by_user",
"def assigned_user(self, assigned_user):\n self._assigned_user = assigned_user",
"def send_referral_assigned(cls, referral, assignment, assigned_by):\n\n template_id = settings.SENDINBLUE[\"REFERRAL_ASSIGNED_TEMPLATE_ID\"]\n\n # Get the path to the referral detail view from the unit inbox\n link_path = FrontendLink.unit_referral_detail(\n unit=assignment.unit.id, referral=referral.id\n )\n\n data = {\n \"params\": {\n \"assigned_by\": assigned_by.get_full_name(),\n \"case_number\": referral.id,\n \"link_to_referral\": f\"{cls.location}{link_path}\",\n \"referral_users\": referral.get_users_text_list(),\n \"title\": referral.title or referral.object,\n \"topic\": referral.topic.name,\n \"unit_name\": assignment.unit.name,\n \"urgency\": referral.urgency_level.name,\n },\n \"replyTo\": cls.reply_to,\n \"templateId\": template_id,\n \"to\": [{\"email\": assignment.assignee.email}],\n }\n\n cls.send(data)",
"def assign(self, assignee, created_by, unit):\n assignment = ReferralAssignment.objects.create(\n assignee=assignee,\n created_by=created_by,\n referral=self,\n unit=unit,\n )\n ReferralActivity.objects.create(\n actor=created_by,\n verb=ReferralActivityVerb.ASSIGNED,\n referral=self,\n item_content_object=assignee,\n )\n # Notify the assignee by sending them an email\n Mailer.send_referral_assigned(\n referral=self,\n assignment=assignment,\n assigned_by=created_by,\n )\n\n if self.state in [ReferralState.IN_VALIDATION, ReferralState.PROCESSING]:\n return self.state\n\n return ReferralState.ASSIGNED",
"def assigned_user(self):\n return self._assigned_user",
"def __init__(__self__, *,\n assigned_to: Optional[pulumi.Input[str]] = None,\n email: Optional[pulumi.Input[str]] = None,\n object_id: Optional[pulumi.Input[str]] = None,\n user_principal_name: Optional[pulumi.Input[str]] = None):\n if assigned_to is not None:\n pulumi.set(__self__, \"assigned_to\", assigned_to)\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if object_id is not None:\n pulumi.set(__self__, \"object_id\", object_id)\n if user_principal_name is not None:\n pulumi.set(__self__, \"user_principal_name\", user_principal_name)",
"def set_assignment(self, updates, original=None):\n if not original:\n original = {}\n\n self.set_type(updates, original)\n\n if not updates.get('assigned_to'):\n if updates.get('priority'):\n # Priority was edited - nothing to set here\n return\n else:\n updates['assigned_to'] = {}\n\n assigned_to = updates.get('assigned_to') or {}\n if (assigned_to.get('user') or assigned_to.get('contact')) and not assigned_to.get('desk'):\n raise SuperdeskApiError.badRequestError(message=\"Assignment should have a desk.\")\n\n # set the assignment information\n user = get_user()\n if original.get('assigned_to', {}).get('desk') != assigned_to.get('desk'):\n if original.get('assigned_to', {}).get('state') in \\\n [ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS, ASSIGNMENT_WORKFLOW_STATE.SUBMITTED]:\n raise SuperdeskApiError.forbiddenError(\n message=\"Assignment linked to content. Desk reassignment not allowed.\")\n\n assigned_to['assigned_date_desk'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_desk'] = user.get(config.ID_FIELD)\n\n if assigned_to.get('user') and original.get('assigned_to', {}).get('user') != assigned_to.get('user'):\n assigned_to['assigned_date_user'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_user'] = user.get(config.ID_FIELD)\n\n if not original.get(config.ID_FIELD):\n updates['original_creator'] = str(user.get(config.ID_FIELD)) if user else None\n updates['assigned_to'][\n ITEM_STATE] = get_next_assignment_status(updates, updates['assigned_to'].get(ITEM_STATE) or\n ASSIGNMENT_WORKFLOW_STATE.ASSIGNED)\n else:\n # In case user was removed\n if not assigned_to.get('user'):\n assigned_to['user'] = None\n else:\n # Moving from submitted to assigned after user assigned after desk submission\n if original.get('assigned_to')['state'] == ASSIGNMENT_WORKFLOW_STATE.SUBMITTED:\n updates['assigned_to']['state'] = get_next_assignment_status(updates,\n ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS)\n\n updates['version_creator'] = str(user.get(config.ID_FIELD)) if user else None",
"def render_assigned_user(self, value):\n return value.get_full_name() or value",
"def sync_assignee_outbound(self, external_issue, user, assign=True, **kwargs):\n raise NotImplementedError",
"def sync_assignee_outbound(self, external_issue, user, assign=True, **kwargs):\n raise NotImplementedError",
"def case_detail_assign_view(request, pk):\n issue = _get_issue(request, pk)\n serializer = IssueAssignmentSerializer(data=request.data, instance=issue)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({\"issue\": IssueDetailSerializer(issue).data})",
"def _assignment(info):\n\n return info.ui.context['object']",
"def assure_tender_assigned_to_user(self, tender_new_id, assigned_user):\n tenders_from_admin = ToDoTenders(division_admin_login, universal_password) # only admin see all chains\n\n all_tender_id_responsibles_chains = tenders_from_admin.get_all_assigned_users_for_tenders(\n tenders_from_admin.get_tenders_with_responsibles('in_work'))\n\n for chain in all_tender_id_responsibles_chains:\n if chain['tender_new_id'] == tender_new_id:\n for res in chain['responsibles']:\n if res['emailAddress'] == assigned_user:\n return True\n else:\n pass",
"def task_assignment(request, task_id, task_assignment_id):\n try:\n task = Task.objects.get(id=task_id)\n except ObjectDoesNotExist:\n messages.error(request, 'Cannot find Task with ID {}'.format(task_id))\n return redirect(index)\n try:\n task_assignment = TaskAssignment.objects.get(id=task_assignment_id)\n except ObjectDoesNotExist:\n messages.error(request,\n 'Cannot find Task Assignment with ID {}'.format(task_assignment_id))\n return redirect(index)\n\n if request.user.is_authenticated:\n if request.user != task_assignment.assigned_to:\n messages.error(\n request,\n 'You do not have permission to work on the Task Assignment with ID {}'.\n format(task_assignment.id))\n return redirect(index)\n else:\n if task_assignment.assigned_to is not None:\n messages.error(\n request,\n 'You do not have permission to work on the Task Assignment with ID {}'.\n format(task_assignment.id))\n return redirect(index)\n\n auto_accept_status = request.session.get('auto_accept_status', False)\n\n if request.method == 'GET':\n http_get_params = \"?assignmentId={}&hitId={}&workerId={}&urlSubmitTo={}\".format(\n task_assignment.id,\n task.id,\n request.user.id,\n urllib.parse.quote(\n reverse('task_assignment', kwargs={\n 'task_id': task.id, 'task_assignment_id': task_assignment.id}),\n safe=''))\n return render(\n request,\n 'turkle/task_assignment.html',\n {\n 'auto_accept_status': auto_accept_status,\n 'http_get_params': http_get_params,\n 'task': task,\n 'task_assignment': task_assignment,\n },\n )\n else:\n task_assignment.answers = dict(request.POST.items())\n task_assignment.completed = True\n task_assignment.save()\n if request.user.is_authenticated:\n logger.info('User(%i) submitted Task(%i)', request.user.id, task.id)\n else:\n logger.info('Anonymous user submitted Task(%i)', task.id)\n\n if request.session.get('auto_accept_status'):\n return redirect(accept_next_task, task.batch.id)\n else:\n return redirect(index)",
"def send_referral_assigned_unit(\n cls, referral, assignment, assignunit_explanation, assigned_by\n ):\n template_id = settings.SENDINBLUE[\"REFERRAL_ASSIGNED_UNIT_TEMPLATE_ID\"]\n\n # Get the path to the referral detail view from the unit inbox\n link_path = FrontendLink.unit_referral_detail(\n unit=assignment.unit.id, referral=referral.id\n )\n\n for owner in assignment.unit.members.filter(\n unitmembership__role=UnitMembershipRole.OWNER\n ):\n data = {\n \"params\": {\n \"assigned_by\": assigned_by.get_full_name(),\n \"case_number\": referral.id,\n \"link_to_referral\": f\"{cls.location}{link_path}\",\n \"referral_users\": referral.get_users_text_list(),\n \"title\": referral.title or referral.object,\n \"topic\": referral.topic.name,\n \"unit_name\": assignment.unit.name,\n \"urgency\": referral.urgency_level.name,\n \"message\": assignunit_explanation,\n },\n \"replyTo\": cls.reply_to,\n \"templateId\": template_id,\n \"to\": [{\"email\": owner.email}],\n }\n\n cls.send(data)",
"def send_assignment_notification(self, updates, original=None, force=False):\n # No notifications for 'draft' assignments\n if self.is_assignment_draft(updates, original):\n return\n\n # No assignment notification sent on start work\n if original.get('assigned_to', {}).get('state') == ASSIGNMENT_WORKFLOW_STATE.ASSIGNED and \\\n updates.get('assigned_to', {}).get('state') == ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS:\n return\n\n assigned_to = updates.get('assigned_to', {})\n assignment_id = (updates.get('_id') or assigned_to.get('assignment_id', 'Unknown'))\n if not original:\n original = {}\n else:\n assignment_id = original.get('_id')\n\n if not force and not self.is_assignment_modified(updates, original):\n return\n\n user = get_user()\n\n # Determine the name of the desk that the assigment has been allocated to\n assigned_to_desk = get_resource_service('desks').find_one(req=None, _id=assigned_to.get('desk'))\n desk_name = assigned_to_desk.get('name') if assigned_to_desk else 'Unknown'\n\n # Determine the display name of the assignee\n assignee = None\n if assigned_to.get('contact'):\n assigned_to_contact = get_resource_service('contacts').find_one(\n req=None,\n _id=assigned_to.get('contact')\n )\n if assigned_to_contact and len(assigned_to_contact.get('contact_email') or []):\n assignee = '{} {} ({})'.format(\n assigned_to_contact.get('first_name') or '',\n assigned_to_contact.get('last_name') or '',\n assigned_to_contact['contact_email'][0]\n )\n\n if assignee is None and assigned_to.get('user'):\n assigned_to_user = get_resource_service('users').find_one(\n req=None,\n _id=assigned_to.get('user')\n )\n if assigned_to_user and assigned_to_user.get('slack_username'):\n assignee = '@' + assigned_to_user.get('slack_username')\n else:\n assignee = assigned_to_user.get('display_name') if assigned_to_user else 'Unknown'\n\n coverage_type = updates.get('planning', original.get('planning', {})).get('g2_content_type', '')\n slugline = updates.get('planning', original.get('planning', {})).get('slugline', 'with no slugline')\n\n client_url = app.config['CLIENT_URL']\n\n assignment = deepcopy(original)\n assignment.update(updates)\n planning_id = assignment.get('planning_item', -1)\n planning_item = get_resource_service('planning').find_one(req=None, _id=planning_id)\n if planning_item and planning_item.get('event_item'):\n event_item = get_resource_service('events').find_one(req=None, _id=planning_item.get('event_item'))\n contacts = []\n for contact_id in event_item.get('event_contact_info', []):\n contact_details = get_resource_service('contacts').find_one(req=None, _id=contact_id)\n if contact_details:\n contacts.append(contact_details)\n if len(contacts):\n event_item['event_contact_info'] = contacts\n else:\n event_item = None\n\n # The assignment is to an external contact or a user\n if assigned_to.get('contact') or assigned_to.get('user'):\n # If it is a reassignment\n meta_msg = 'assignment_details_internal_email' if assigned_to.get('user') else 'assignment_details_email'\n if original.get('assigned_to'):\n # it is being reassigned by the original assignee, notify the new assignee\n if original.get('assigned_to', {}).get('user', '') == str(user.get(config.ID_FIELD, None)):\n PlanningNotifications().notify_assignment(target_user=assigned_to.get('user'),\n message='assignment_reassigned_1_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n desk=desk_name,\n client_url=client_url,\n assignment_id=assignment_id,\n assignment=assignment,\n event=event_item,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n # notify the desk\n if assigned_to.get('desk'):\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n message='assignment_reassigned_3_msg',\n meta_message=meta_msg,\n assignee=assignee,\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True)\n\n else:\n # if it was assigned to a desk before, test if there has been a change of desk\n if original.get('assigned_to') and original.get('assigned_to').get('desk') != updates.get(\n 'assigned_to').get('desk'):\n # Determine the name of the desk that the assigment was allocated to\n assigned_from_desk = get_resource_service('desks').find_one(req=None,\n _id=original.get('assigned_to').get(\n 'desk'))\n desk_from_name = assigned_from_desk.get('name') if assigned_from_desk else 'Unknown'\n assigned_from = original.get('assigned_to')\n assigned_from_user = get_resource_service('users').find_one(req=None,\n _id=assigned_from.get('user'))\n old_assignee = assigned_from_user.get('display_name') if assigned_from_user else ''\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n target_desk2=original.get('assigned_to').get('desk'),\n message='assignment_reassigned_2_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assignee=assignee,\n desk=desk_name,\n old_assignee=old_assignee,\n client_url=client_url,\n assignment_id=assignment_id,\n old_desk=desk_from_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else:\n # it is being reassigned by someone else so notify both the new assignee and the old\n PlanningNotifications().notify_assignment(target_user=original.get('assigned_to').get('user'),\n target_desk=original.get('assigned_to').get(\n 'desk') if original.get('assigned_to').get(\n 'user') is None else None,\n message='assignment_reassigned_3_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assignee=assignee,\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=original.get('assigned_to').get('contact'))\n # notify the assignee\n assigned_from = original.get('assigned_to')\n assigned_from_user = get_resource_service('users').find_one(req=None,\n _id=assigned_from.get('user'))\n old_assignee = assigned_from_user.get('display_name') if assigned_from_user else None\n PlanningNotifications().notify_assignment(target_user=assigned_to.get('user'),\n message='assignment_reassigned_4_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assignor=user.get('display_name', ''),\n old_assignee=' from ' + old_assignee\n if old_assignee else '',\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n event=event_item,\n assignment=assignment,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else: # A new assignment\n # Notify the user the assignment has been made to unless assigning to your self\n if str(user.get(config.ID_FIELD, None)) != assigned_to.get('user', ''):\n PlanningNotifications().notify_assignment(target_user=assigned_to.get('user'),\n message='assignment_assigned_msg',\n meta_message=meta_msg,\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n client_url=client_url,\n assignment_id=assignment_id,\n assignor='by ' + user.get('display_name', '')\n if str(\n user.get(config.ID_FIELD, None)) != assigned_to.get(\n 'user', '') else 'to yourself',\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else: # Assigned/Reassigned to a desk, notify all desk members\n # if it was assigned to a desk before, test if there has been a change of desk\n if original.get('assigned_to') and original.get('assigned_to').get('desk') != updates.get(\n 'assigned_to', {}).get('desk'):\n # Determine the name of the desk that the assigment was allocated to\n assigned_from_desk = get_resource_service('desks').find_one(req=None,\n _id=original.get('assigned_to').get('desk'))\n desk_from_name = assigned_from_desk.get('name') if assigned_from_desk else 'Unknown'\n if original.get('assigned_to', {}).get('user', '') == str(user.get(config.ID_FIELD, None)):\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n message='assignment_to_desk_msg',\n meta_message='assignment_details_email',\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assign_type='reassigned',\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else:\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n target_desk2=original.get('assigned_to').get('desk'),\n message='assignment_submitted_msg',\n meta_message='assignment_details_email',\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n desk=desk_name,\n client_url=client_url,\n assignment_id=assignment_id,\n from_desk=desk_from_name,\n assignment=assignment,\n event=event_item,\n is_link=True,\n contact_id=assigned_to.get('contact'))\n else:\n assign_type = 'reassigned' if original.get('assigned_to') else 'assigned'\n PlanningNotifications().notify_assignment(target_desk=assigned_to.get('desk'),\n message='assignment_to_desk_msg',\n meta_message='assignment_details_email',\n coverage_type=get_coverage_type_name(coverage_type),\n slugline=slugline,\n assign_type=assign_type,\n client_url=client_url,\n assignment_id=assignment_id,\n desk=desk_name,\n assignor=user.get('display_name'),\n assignment=assignment,\n event=event_item,\n omit_user=True,\n is_link=True,\n contact_id=assigned_to.get('contact'))",
"def get_assign(self):\n return self.assign",
"def is_assigned(self):\n if \"isAssigned\" in self._prop_dict:\n return self._prop_dict[\"isAssigned\"]\n else:\n return None",
"def send(self, **kwargs):\n if hasattr(self.object, 'member'):\n self.add_to(self.object.member.user.email)\n elif hasattr(self.object, 'membership'):\n self.add_to(self.object.created_by.email)\n return super(GrantedAccessMailer, self).send(**kwargs)",
"def status_assignee_reset(self):\n self.assigned_to = None\n self.status = 'new'\n self.primary_statute = None",
"def project_assign(request, project_code):\n if request.user.is_authenticated:\n projects = Project.objects.all()\n context = {'projects': projects}\n selected_project = get_object_or_404(Project, code=project_code)\n try:\n selected_project.status = 2 # project is assigned\n selected_project.save()\n\n # getting the head\n assigned_head = User.objects.get(department=selected_project.department,\n role__name__iexact=role_department_head)\n # create new task history object\n task_history = TaskHistory()\n task_history.project = selected_project\n task_history.description = (model_to_dict(selected_project))\n task_history.status = 'New Project'\n task_history.user = assigned_head\n task_history.save()\n\n \"\"\" Setting notification as project is assigned to a head \"\"\"\n assigned_head.notification_count += 1\n assigned_head.save()\n selected_project.assigned_at = datetime.now() # setting the assigned time\n selected_project.save()\n # print(assigned_head, '------------------------------------------*********************',\n # assigned_head.notification_count)\n messages.success(request, f\"Project '{selected_project.name}' is assigned to the department head.\")\n return redirect('project-list')\n except Exception as e:\n # print('error at assign project ====', e)\n messages.error(request, f\"Error: {e}\")\n return render(request, 'projectmanager/project_list.html', context)",
"def is_assigned(self):\n if self.status == \"ASSIGNED\":\n return True\n else:\n return False",
"def handle_added(self):\n if 'assigned' not in self.__dict__:\n self.assigned = date.today()\n\n if 'assigned_by' not in self.__dict__:\n self.assigned_by = default_assigned_by()",
"def save(self, user, project, commit=True):\n task = super(TaskForm, self).save(commit=False)\n task.project = project\n task.editor = user\n if not task.id:\n task.author = user\n task.created_at = datetime.now()\n if commit:\n task.save()\n\n def assign_resource(\n resource): return task.assigned_resources.add(resource)\n map(assign_resource, self.cleaned_data['assigned_resources'])\n return task",
"def assign_user_to_issue(self, issue, JIRAUsername):\r\n # TODO: Review docs\r\n self.jira.assign_issue(issue=issue, assignee=JIRAUsername)",
"def assigned_todos(self):\r\n return AssignedTodos(self)",
"def get_user_assigned_identity_object_id(self, user_assigned_identity=None) -> str:\n assigned_identity = user_assigned_identity if user_assigned_identity else self.get_assign_identity()\n if assigned_identity is None or assigned_identity == \"\":\n raise RequiredArgumentMissingError(\"No assigned identity provided.\")\n return self.get_identity_by_msi_client(assigned_identity).principal_id"
] | [
"0.7043154",
"0.68540734",
"0.6465051",
"0.6450169",
"0.6339605",
"0.6332004",
"0.622697",
"0.59485155",
"0.58828086",
"0.5880116",
"0.5737545",
"0.5694664",
"0.5694664",
"0.5586917",
"0.5578563",
"0.55712336",
"0.5555433",
"0.54242676",
"0.53915334",
"0.5375355",
"0.5294603",
"0.5292526",
"0.5277717",
"0.52749324",
"0.52220887",
"0.52163863",
"0.52117944",
"0.52039737",
"0.51994556",
"0.5161732"
] | 0.75293905 | 0 |
Use the TCIA client to retrieve a zip of DICOMS associated with a uid | def download_dicom_series(uid, output_folder):
filename_zipped = os.path.join(output_folder, uid + ".zip")
filename = re.sub(".zip", "", filename_zipped)
if not (os.path.exists(filename_zipped) or os.path.isdir(filename)):
client.get_image(
seriesInstanceUid=uid, downloadPath=output_folder, zipFileName=uid + ".zip"
)
return filename | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sample_zip(self, sha256):\n return self.__make_api_call('get/sample/{}/zip'.format(sha256))",
"def unzip_citibike_data(zip_dir):\n# zip_dir = \"data/citibike-tripdata-nyc/\"\n# csv_dir = \"data/citibike-tripdata-nyc/csv\"\n extension = \".zip\"\n\n # for each zip file in zip_dir extract data\n for item in os.listdir(zip_dir):\n if item.endswith(extension):\n\n # create zipfile object and extract\n file_name = zip_dir + item\n with zipfile.ZipFile(file_name, \"r\") as zip_ref:\n zip_ref.extractall(zip_dir)\n print(item + \" done\")",
"def fetch(data_dir):\n file_path = os.path.join(data_dir, DESTINATION, ZIP_NAME)\n result_path = os.path.join(data_dir, DESTINATION, NAME)\n return utils.fetch(URL, file_path, result_path)",
"def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])",
"def download_dataset():\n \n ID = \"1-3_oB5iSF-c_V65-uSdUlo024NzlgSYZ\"\n script1 = f\"\"\"\n wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id='{ID} -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=\"{ID} -O Data.zip && rm -rf /tmp/cookies.txt\n \"\"\"\n script2 = \"\"\"unzip Data.zip\"\"\"\n\n os.system(script1)\n os.system(script2)",
"def getROZip(self, rouri):\n (status, reason, headers, uri, data) = self.doRequestFollowRedirect(rouri,\n method=\"GET\", accept=\"application/zip\")\n if status in [200, 404]:\n return (status, reason, headers, URIRef(uri), data)\n raise self.error(\"Error retrieving RO as ZIP file\",\n \"%03d %s\"%(status, reason))",
"def unzip() -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # define the destination\n destination = project_dir / 'data' / 'raw'\n\n # extract zip\n zip_file = ZipFile(destination / \"original.zip\")\n zip_file.extractall(destination)",
"def GetArchive(self, userName = \"\", yr = 2007, mon = 0, offset = 0, limit = 0):\n\n u = self.userName\n if (userName != \"\"):\n u = userName\n \n d = \"\"\n\n if (yr > 2007 and mon > 0):\n d = \"/\"+str(yr)+\"/\"+str(mon)\n\n url = \"/users/\"+u+\"/archives\"+d\n q = []\n \n if (offset > 0):\n q.append(\"offset=\"+str(offset))\n \n if (limit > 0):\n q.append(\"limit=\"+str(limit))\n\n if (len(q)):\n url += \"?\"+\"&\".join(q)\n\n# print url\n return self.__GetJson(url, False)",
"def get_my_zip_code(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetMyZipCode.create(\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"async def get_my_zip_code_async(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetMyZipCode.create(\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )",
"def unzip_data(zip_f,data_folder_path): \n\n with zipfile.ZipFile(zip_f,\"r\") as zip_ref:\n zip_ref.extractall(data_folder_path)",
"def download(date_array, tag, inst_id, data_path=None, user=None, password=None,\n compression_type='o'):\n\n if tag not in tags:\n raise ValueError('Uknown CHAIN tag')\n elif (user is None) or (password is None):\n raise ValueError('CHAIN user account information must be provided.')\n\n top_dir = os.path.join(data_path)\n\n for date in date_array:\n logger.info('Downloading COSMIC data for ' + date.strftime('%D'))\n sys.stdout.flush()\n yr = date.strftime('%Y')\n doy = date.strftime('%j')\n\n # try download\n try:\n # ftplib uses a hostname not a url, so the 'ftp://' is not here\n # connect to ftp server and change to desired directory\n hostname = ''.join(('chain.physics.unb.ca'))\n ftp = ftplib.FTP(hostname)\n ftp.login(user, password)\n ftp_dir = ''.join(('/gps/data/', tag, '/', yr, '/', doy, '/',\n yr[-2:], compression_type, '/'))\n ftp.cwd(ftp_dir)\n\n # setup list of station files to iterate through\n files = []\n ftp.retrlines('LIST', files.append)\n files = [file.split(None)[-1] for file in files]\n\n # iterate through files and download each one\n for file in files:\n if inst_id:\n print(inst_id)\n if file[0:3] != inst_id:\n continue\n save_dir = os.path.join(top_dir)\n print(save_dir)\n # make directory if it doesn't already exist\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n save_file = os.path.join(save_dir, file)\n with open(save_file, 'wb') as f:\n print('Downloading: ' + file + ', and saving to ' +\n save_file)\n ftp.retrbinary(\"RETR \" + file, f.write)\n\n except ftplib.error_perm as err:\n # pass error message through and log it\n estr = ''.join((str(err)))\n print(estr)\n logger.info(estr)\n\n ftp.close()\n return",
"def extract(*args):\r\n bank_rut= args[0]\r\n bank_id= args[1]\r\n\r\n while True:\r\n try:\r\n print(\"Downloading file for...\" + str(args[0]),end=\"\\n\")\r\n myfile = requests.get(\"https://www.sbif.cl/sbifweb/internet/bancos/balances/\"+str(YEAR)+\"/\"+bank_id+\".zip\", allow_redirects=True)\r\n time.sleep(rd.randint(4,7))\r\n break\r\n except:\r\n print(\"request failed\")\r\n pass\r\n \r\n open(str(PATH.joinpath(\"./data_banks/\"+bank_id+\".zip\")), 'wb').write(myfile.content)\r\n time.sleep(rd.randint(1,2))\r\n \r\n yield (bank_rut,bank_id)",
"def unzip_data():\n zip_ref = zipfile.ZipFile(data_zip, 'r')\n zip_ref.extractall('')\n zip_ref.close()",
"def download_extract_zip(url):\n response = requests.get(url)\n with ZipFile(BytesIO(response.content)) as thezip:\n for zipinfo in thezip.infolist():\n with thezip.open(zipinfo) as thefile:\n df = pd.read_csv(thefile)\n return (df)",
"def unzip(zfile, md=False):\n\tbasedir = ''\n\tcount = -1\n\tif md:\n\t\tbasedir = prepareBaseDir(zfile)\n\t\n\tzfile = zipfile.ZipFile(zfile, 'r')\n\tfor name in zfile.namelist():\n\t\tcount+=1\n\t\tuname = name.decode('gbk')\n\t\tif uname.endswith('.DS_Store'):\n\t\t\tcontinue\n\t\t\n\t\t#prepare directory\n\t\tdirs = os.path.dirname(uname)\n\t\tif basedir:\n\t\t\tdirs = os.path.join(basedir, dirs)\n\t\tprint 'Extracting: ' + uname\n\t\tif dirs and not os.path.exists(dirs):\n\t\t\tprint 'Prepare directories: ', dirs\n\t\t\tos.makedirs(dirs)\n\t\tif (count == 0):\n\t\t\thomeDir = uname[:-1]\n\t\t#ready to unzip file\n\t\tdata = zfile.read(name)\n\t\tif basedir:\n\t\t\tuname = os.path.join(basedir, uname)\n\t\tif not os.path.exists(uname):\n\t\t\tfo = open(uname, 'w')\n\t\t\tfo.write(data)\n\t\t\tfo.close()\n\tzfile.close()\n\treturn homeDir",
"def retrieve_citibike_data(start_year=2013, end_year=2021, target=\"data/citibike_trips_nyc/\"):\n\n for year in range(start_year, end_year):\n for month in range(1, 13):\n\n date_format = str(year) + '{:02d}'.format(month)\n print(date_format)\n # retrieve data from citibike's s3 buckets and store in zip directory\n # weird change in zip naming convention before 2017\n if year < 2017:\n urllib.request.urlretrieve(\"https://s3.amazonaws.com/tripdata/\" + date_format +\n \"-citibike-tripdata.zip\", target + date_format + \"-citibike-tripdata.zip\")\n else:\n urllib.request.urlretrieve(\"https://s3.amazonaws.com/tripdata/\" + date_format +\n \"-citibike-tripdata.csv.zip\", target + date_format + \"-citibike-tripdata.zip\")\n print(str(year) + \"-\" + str(month) + \" done\")",
"def getZipCode(dbpath) -> (int, float, float):\n conn = sqlite3.connect(str(dbpath))\n c = conn.cursor()\n c.execute(\"select zipcode, lat, long from user where id=1\")\n conn.commit()\n zipcode = c.fetchone()\n conn.close()\n return zipcode[0], zipcode[1], zipcode[2]",
"def unzipper(data_address, target_directory):\n import zipfile\n data = \"/home/sharoonsaxena/Datasets/dogs-vs-cats.zip\"\n zip_ref = zipfile.ZipFile(data, \"r\")\n zip_ref.extractall(\"/home/sharoonsaxena/Datasets/extracted/\")\n zip_ref.close()",
"def extract_zip(dataset_path, target_path):\n dataset_path = os.path.join(dataset_path,'covidx-cxr2.zip')\n print(f'Extracting zip file: {dataset_path}')\n with ZipFile(file=dataset_path) as zip_file:\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n zip_file.extract(member=file, path=os.path.join(target_path, 'xray'))\n os.remove(dataset_path)",
"def fetch_wdi() -> None:\n\n log.info(\"Started fetching WDI.\")\n url = \"http://databank.worldbank.org/data/download/WDI_csv.zip\"\n common.fetch_source_simply(name=\"wdi\", url=url)\n log.info(\"Finished fetchign WDI.\")",
"def quickScanZip(args, fh):\n # 100 bytes is the smallest .zip possible\n\n fh.seek(0, 2)\n fsize = fh.tell()\n if fsize==0:\n print(\"Empty file\")\n return\n if fsize<100:\n print(\"Zip too small: %d bytes, minimum zip is 100 bytes\" % fsize)\n return\n fh.seek(-100, 2)\n\n eoddata = fh.read()\n iEND = eoddata.find(b'PK\\x05\\x06')\n if iEND==-1:\n # try with larger chunk\n ofs = max(fh.tell()-0x10100, 0)\n fh.seek(ofs, 0)\n eoddata = fh.read()\n iEND = eoddata.find(b'PK\\x05\\x06')\n if iEND==-1:\n print(\"expected PK0506 - probably not a PKZIP file\")\n return\n else:\n ofs = fh.tell()-0x100\n eod = EndOfCentralDir(ofs, eoddata, iEND+4)\n yield eod\n\n dirofs = eod.dirOffset\n for _ in range(eod.thisEntries):\n fh.seek(dirofs)\n dirdata = fh.read(46)\n if dirdata[:4] != b'PK\\x01\\x02':\n print(\"expected PK0102\")\n return\n dirent = CentralDirEntry(dirofs, dirdata, 4)\n\n yield dirent\n dirofs = dirent.endOffset",
"def unzipArchives(zip_file, password):\n with ZipFile(zip_file) as archive:\n archive.extractall(pwd=bytes(password, \"utf8\"))",
"def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()",
"def collect_nrcan_dem(nts, tmp_dir=None, ftp_address='ftp.geogratis.gc.ca',\n ftp_dir='/pub/nrcan_rncan/elevation/cdem_mnec'):\n # Create temporary directory\n if tmp_dir is None:\n tmp_dir = tempfile.gettempdir()\n tmp_dir = os.path.join(tmp_dir, 'nrcan_dem')\n if not os.path.isdir(tmp_dir):\n os.mkdir(tmp_dir)\n\n # Iterate NTS and collect rasters\n for num, lets in nts.items():\n num = str(num)\n if len(num) == 1:\n num = '00{}'.format(num)\n elif len(num) == 2:\n num = '0{}'.format(num)\n\n # Open the ftp connection\n ftp = FTP(ftp_address)\n ftp.login()\n ftp.cwd(ftp_dir)\n\n try:\n ftp.cwd(num)\n except:\n print(\"Warning: cannot access {}\".format(num))\n ftp.quit()\n continue\n\n dirs = ftp.nlst()\n ftp.quit()\n if lets == 'all':\n # Include all\n inc = dirs\n else:\n inc = [d for d in dirs if\n any([let.lower() in d.replace('cdem_dem_', '').replace('_tif', '').replace('.zip', '').lower()\n for let in lets])]\n if len(inc) == 0:\n print(\"Warning: None of the desired letters found in {}\".format(num))\n for d in inc:\n print(\"Collecting {}\".format(d))\n tmpfile = os.path.join(tmp_dir, d)\n\n with open(tmpfile, 'wb') as zipf:\n # Reopen the ftp connection for the download\n ftp = FTP(ftp_address)\n ftp.login()\n ftp.cwd(ftp_dir)\n ftp.cwd(num)\n print(\" Downloading...\")\n ftp.retrbinary('RETR ' + d, zipf.write)\n ftp.quit()\n z = zf.ZipFile(tmpfile, 'r')\n print(\" Extracting...\")\n z.extractall(tmp_dir)\n del z\n os.remove(tmpfile)\n\n # Merge\n print(\"Merging rasters\")\n files = [os.path.join(tmp_dir, f) for f in os.listdir(tmp_dir) if f.split('.')[-1] == 'tif']\n outpath = os.path.join(tmp_dir, 'nrcan_dem.tif')\n\n # Pre-check files to avoid errors at this stage\n _files = []\n for f in files:\n try:\n r = Raster(f)\n _files.append(f)\n except:\n print(\"Warning: cannot read file {}\".format(f))\n\n if len(_files) > 0:\n command = 'gdalwarp -r cubic -overwrite \"{}\" \"{}\"'.format('\" \"'.join(_files), outpath)\n os.system(command)\n else:\n raise Exception(\"No files available for DEM merge operation\")\n\n return Raster(outpath)",
"def getTenantByUid(self,uid):\n\n logger.debug(\"Call to getTenantByUid - uid: {}\".format(uid))\n\n try:\n response = self.httpHandler.sendHttpRequest(CIC_TENANT_ENDPOINT+\"?uuid=\"+uid)\n except urllib2.HTTPError as e:\n\n logger.debug(traceback.format_exc())\n\n if e.code == 404:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise KeyError(\"Tenant with uuid {} could not be found in TMS.\".format(uid),\"CIC_TENANT_UID_NOT_FOUND_ERR\")\n\n elif e.code == 403:\n\n body = e.read()\n logger.debug(\"Response code: {}, response body: {}\".format(e.code, body))\n raise RuntimeError(\n \"User {} has no permission to look up 'tenants' in {} {}\".format(self.cicUser, self.cicUrl, body),\n \"CIC_NO_ACCESS\"\n )\n\n else:\n raise\n else:\n responseString = response.read()\n return json.loads(responseString)",
"def download_LIDC(output_folder, debug=False):\n\n # Creating config file with path to dataset\n _, config_file = create_config(output_folder, debug, \"fed_lidc_idri\")\n\n # Get patient X study\n patientXstudy = pd.read_json(\n client.get_patient_study(collection=\"LIDC-IDRI\").read().decode(\"utf-8\")\n )\n\n # Get study X series\n series = pd.read_json(\n client.get_series(modality=\"CT\", collection=\"LIDC-IDRI\").read().decode(\"utf-8\")\n )\n\n # Join both of them\n patientXseries = patientXstudy.merge(series).iloc[:]\n\n # there are some images with missing slices. We remove them\n # for reference their loc: 385, 471, 890, 129, 110, 245, 80, 618, 524\n bad_patientID = [\n \"LIDC-IDRI-0418\",\n \"LIDC-IDRI-0514\",\n \"LIDC-IDRI-0672\",\n \"LIDC-IDRI-0146\",\n \"LIDC-IDRI-0123\",\n \"LIDC-IDRI-0267\",\n \"LIDC-IDRI-0085\",\n \"LIDC-IDRI-0979\",\n \"LIDC-IDRI-0572\",\n ]\n patientXseries = patientXseries[~patientXseries[\"PatientID\"].isin(bad_patientID)]\n\n if debug:\n patientXseries = patientXseries[:10]\n\n # Download associated DICOMs\n pool = multiprocessing.Pool(processes=n_cpus)\n downloaded_paths = pool.starmap(\n download_dicom_series,\n zip(patientXseries.SeriesInstanceUID.tolist(), itertools.repeat(output_folder)),\n )\n\n # Download XML annotations\n annotations_path = download_zip_from_url(ANNOTATION_URL, output_folder)\n\n # Unzip everything and remove archives\n zipped_folders = [\n str(p) for p in Path(output_folder).glob(\"./*/\") if str(p).endswith(\".zip\")\n ]\n\n # Check zip integrity, and download corrupted files again\n for zipped_f in zipped_folders:\n try:\n while zipfile.ZipFile(zipped_f).testzip() is not None:\n os.remove(zipped_f)\n download_dicom_series(os.path.splitext(zipped_f)[0], output_folder)\n except zipfile.BadZipFile:\n os.remove(zipped_f)\n download_dicom_series(os.path.splitext(zipped_f)[0], output_folder)\n print(f\"Bad zip file: {zipped_f}\")\n\n for zipped_f in zipped_folders:\n with zipfile.ZipFile(zipped_f, \"r\") as zip_ref:\n zip_file_name = re.sub(\".zip\", \"\", zipped_f)\n # extract only if it does not exist or it is empty\n if not os.path.isdir(zip_file_name) or len(os.listdir(zip_file_name)) == 0:\n os.makedirs(re.sub(\".zip\", \"\", zipped_f), exist_ok=True)\n zip_ref.extractall(zip_file_name)\n os.remove(zipped_f)\n\n # For each patient we record the location of its DICOM\n patientXseries[\"extraction_location\"] = downloaded_paths\n\n # We tie back annotations to the original DICOMS\n xmlfiles = glob.glob(os.path.join(annotations_path, \"tcia-lidc-xml\", \"*\", \"*.xml\"))\n df = pd.DataFrame()\n df[\"annotation_file\"] = xmlfiles\n # We initialize a dask dataframe to speed up computations\n ddf = dd.from_pandas(df, npartitions=8)\n df[\"SeriesInstanceUID\"] = ddf.map_partitions(\n lambda d: d[\"annotation_file\"].apply(get_SeriesUID_from_xml)\n ).compute(scheduler=\"processes\")\n df = df[df.SeriesInstanceUID != \"not found\"]\n df = df[df.SeriesInstanceUID != \"notfound\"]\n # there are several xml files which have the same seriesInstanceUID\n # but the same content, therefore here df has len of 1026.\n # Next, we are removing the duplicates. The correct number of files will be now 1018\n df = df.drop_duplicates(subset=[\"SeriesInstanceUID\"], keep=\"first\")\n patientXseries = df.merge(patientXseries, on=\"SeriesInstanceUID\")\n # Update yaml file\n write_value_in_config(config_file, \"download_complete\", True)\n return patientXseries",
"def generate_attachments_zip_export(\n export_type, username, id_string, export_id=None, options=None, xform=None\n):\n export_type = options.get(\"extension\", export_type)\n filter_query = options.get(\"query\")\n sort = options.get(\"sort\")\n\n if xform is None:\n xform = XForm.objects.get(user__username=username, id_string=id_string)\n\n if options.get(\"dataview_pk\"):\n dataview = DataView.objects.get(pk=options.get(\"dataview_pk\"))\n attachments = Attachment.objects.filter(\n instance_id__in=[\n rec.get(\"_id\")\n for rec in dataview.query_data(\n dataview,\n all_data=True,\n filter_query=filter_query,\n sort=sort,\n )\n ],\n instance__deleted_at__isnull=True,\n )\n else:\n instance_ids = query_fields_data(\n xform, fields=[\"_id\"], query=filter_query, sort=sort\n )\n attachments = Attachment.objects.filter(instance__deleted_at__isnull=True)\n if xform.is_merged_dataset:\n attachments = attachments.filter(\n instance__xform_id__in=list(\n xform.mergedxform.xforms.filter(\n deleted_at__isnull=True\n ).values_list(\"id\", flat=True)\n )\n ).filter(instance_id__in=[i_id[\"_id\"] for i_id in instance_ids])\n else:\n attachments = attachments.filter(instance__xform_id=xform.pk).filter(\n instance_id__in=[i_id[\"_id\"] for i_id in instance_ids]\n )\n\n filename = (\n f'{id_string}_{datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")}'\n f\".{export_type.lower()}\"\n )\n file_path = os.path.join(username, \"exports\", id_string, export_type, filename)\n zip_file = None\n\n with NamedTemporaryFile() as zip_file:\n create_attachments_zipfile(attachments, zip_file)\n with open(zip_file.name, \"rb\") as temp_file:\n filename = default_storage.save(file_path, File(temp_file, file_path))\n\n export = get_or_create_export(export_id, xform, export_type, options)\n export.filedir, export.filename = os.path.split(filename)\n export.internal_status = Export.SUCCESSFUL\n export.save()\n\n return export",
"def download_data():\n url = 'https://www.dropbox.com/s/8oehplrobcgi9cq/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()",
"def unzip(filename,destination=None,force=False):\n if not destination:\n destination=os.path.splitext(os.path.basename(filename))[0]\n destination = os.path.join(tmpdir,destination)\n if os.path.exists(destination):\n if force:\n shutil.rmtree(destination)\n else:\n zipname = filename.split('/')[-1]\n zip_url = \"https://geo.colorado.edu/apps/geolibrary/datasets/{0}\".format(zipname)\n if not os.path.isfile(\"/data/static/geolibrary/datasets/{0}\".format(zipname)):\n shutil.copy(filename,\"/data/static/geolibrary/datasets/{0}\".format(zipname))\n os.remove(filename)\n return {\"folder\": destination,\"zipdata\":False,\"zipurl\":zip_url}\n zip_ref = zipfile.ZipFile(filename,'r')\n zip_ref.extractall(destination)\n zipname = filename.split('/')[-1]\n shutil.copy(filename,\"/data/static/geolibrary/datasets/{0}\".format(zipname))\n zip_url = \"https://geo.colorado.edu/apps/geolibrary/datasets/{0}\".format(zipname)\n os.remove(filename)\n return {\"folder\": destination,\"zipdata\":True,\"zipurl\":zip_url}"
] | [
"0.55942416",
"0.5435304",
"0.51368004",
"0.5116804",
"0.50765246",
"0.5062138",
"0.5018735",
"0.5002108",
"0.4990074",
"0.49663532",
"0.49403724",
"0.49035048",
"0.4894721",
"0.48720554",
"0.48528156",
"0.48371184",
"0.47926164",
"0.4776363",
"0.47761512",
"0.47272655",
"0.47243908",
"0.4723241",
"0.47163117",
"0.47119933",
"0.4690371",
"0.4688244",
"0.46852607",
"0.46676406",
"0.4638339",
"0.462901"
] | 0.56900346 | 0 |
Downloads a zip file from a link in the download_dir folder | def download_zip_from_url(url, download_dir="."):
filename = url.split("/")[-1].split("?")[0].strip("\\")
os.makedirs(download_dir, exist_ok=True)
filename_zipped = os.path.join(download_dir, filename)
filename = re.sub(".zip", "", filename_zipped)
if not (os.path.exists(filename_zipped) or os.path.isdir(filename)):
print("downloading: ", url)
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(filename_zipped, "wb") as f:
for data in r:
f.write(data)
return filename | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)",
"def download_zip(url, folder=None):\n\n # get this file folder name and save the file name\n if not folder:\n folder = os.path.dirname(os.path.abspath(__file__))\n file_name = os.path.split(url)[1]\n\n # Download the file from \"url\" and save it locally under \"file_name\":\n try:\n with urllib.request.urlopen(url) as response, open(folder + \"/\" + file_name, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except urllib.error.URLError as e:\n print('urllib.error.URLError')\n raise Exception(e)\n except Exception as e:\n raise Exception(e)\n else:\n return folder,file_name",
"def _download_from_url(self, url):\n target_file_name = self.dir + \"/\" + url.split('/')[-1].split('?')[0]\n urllib.urlretrieve (url, target_file_name)",
"def download(url, to):\n filename = url.rstrip('/').split('/')[-1] + '.zip'\n r = requests.get(url, stream=True)\n\n outpath = os.path.join(to, filename)\n\n with open(outpath, 'wb') as fd:\n for chunk in r.iter_content(1024 * 1024):\n fd.write(chunk)\n\n return outpath",
"def download_file(driver, link, filename):\n download_path = os.path.join(os.environ['HOME'], \"Downloads\", filename)\n # TODO: copy cookies, user agent, ect to session\n s = requests.session()\n r = s.get(link, stream=True)\n with open(download_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n return download_path",
"def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)",
"def download_zip(self, path: Path) -> Path:\n if not self.url:\n raise ValueError(\"Release must have a valid url to download the zip.\")\n\n with requests.get(self.url, stream=True) as response:\n with open(path, \"wb\") as download_file:\n shutil.copyfileobj(response.raw, download_file)\n\n return path",
"def download_file(download_url, save_path):\n url = \"https://www.encodeproject.org/\" + download_url\n urllib.request.urlretrieve(url, save_path)",
"def download_file(url, path):\n file_name = path + url.split(\"/\")[-1]\n req = requests.get(url)\n zipped_info = req.content\n print(file_name)\n if not os.path.isfile(file_name):\n print(\"file doesnt exist, writing\", file_name)\n with open(file_name, 'wb') as f:\n f.write(zipped_info)\n else:\n print(\"file exists\", file_name)",
"def download_file(url, download_path):\n\n # Extract the filename from the URL\n parsed = urlparse(url)\n filename = basename(parsed.path)\n\n # Ensure the output directory exists\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n\n # Get a temporary file path for the compressed file download\n downloaded_file = os.path.join(tempfile.gettempdir(), filename)\n\n # Download the file\n urlretrieve(url, downloaded_file)\n\n # Move the file to the destination folder\n destination_path = os.path.join(download_path, filename)\n os.rename(downloaded_file, destination_path)",
"def _download(data_folder): # pragma: no cover\n\n logger.info(f\"Downloading {SOURCE_URL}.\")\n\n with urlopen(SOURCE_URL) as zipresp:\n with zipfile.ZipFile(io.BytesIO(zipresp.read())) as zfile:\n zfile.extractall(data_folder)",
"def _maybe_download_and_extract(self, filename):\n if not os.path.exists(self.work_dir):\n os.mkdir(self.work_dir)\n filepath = os.path.join(self.work_dir, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n log.info('Extracting zip file ... ')\n f = zipfile.ZipFile(filepath)\n f.extractall(path=self.work_dir)\n log.info('Extraction finished ... ')",
"def main(file_url, file_path):\n\n # extract file from the link\n\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n r = requests.get(str(file_url))\n\n #unzip the zip file\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path = file_path)",
"def download_file(dwn_url, dwn_folder):\n # Prepare path\n _, dwn_fil = split(dwn_url)\n dwn_dir = join(dwn_folder, dwn_fil)\n\n # download_tile = requests.get(dwn_url)\n open(dwn_dir, 'wb').write(requests.get(dwn_url).content)\n\n # Message for successful download\n status_msg = dwn_fil + ' succsesfully downloaded'\n\n return status_msg, dwn_fil",
"def download(url, path):\n response = requests.get(url)\n\n if response.ok:\n print(\"response is ok file is downloading ... \")\n # start to download file from url.\n with open(path, \"wb\") as f:\n f.write(response.content)\n else:\n print(\"Error!\", response.status_code)\n return False\n\n print(\"File downloaded succusfully.\")\n return True",
"def download_url(url, path=None, name=None):\n r = requests.get(url, allow_redirects=True)\n if path:\n paths = []\n paths.append(path)\n make_dir_from_list(paths)\n open(os.path.join(paths[0], name), 'wb').write(r.content)\n return r.content.decode('utf-8')",
"def download_url(filename, url):\n latest_package_url = request.urlopen(url).read().decode(\"utf-8\")\n print(\"Downloading latest package:\\n{}\".format(latest_package_url))\n request.urlretrieve(latest_package_url, filename, reporthook=download_progress_callback)",
"def _download_zip(self, zip_url, dest_dir):\n # TODO(jsirois): Wrap with workunits, progress meters, checksums.\n self.context.log.info('Downloading {}...'.format(zip_url))\n sess = requests.session()\n sess.mount('file://', self.LocalFileAdapter())\n res = sess.get(zip_url)\n if not res.status_code == requests.codes.ok:\n raise TaskError('Failed to download {} ({} error)'.format(zip_url, res.status_code))\n\n with open_zip(BytesIO(res.content)) as zfile:\n safe_mkdir(dest_dir)\n for info in zfile.infolist():\n if info.filename.endswith('/'):\n # Skip directories.\n continue\n # Strip zip directory name from files.\n filename = os.path.relpath(info.filename, get_basedir(info.filename))\n f = safe_open(os.path.join(dest_dir, filename), 'w')\n f.write(zfile.read(info))\n f.close()",
"def download(self, download_path):\n return",
"def download_zip(url, save_path, chunk_size=128):\n r = requests.get(url, stream=True)\n with open(save_path, 'wb') as fd:\n for chunk in r.iter_content(chunk_size=chunk_size):\n fd.write(chunk)",
"def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)",
"def download_file(directory, file_name, output_dir):\n endpoint_url = BASE_URL + \"/\" + directory\n final_file = \"lib/\" + output_dir + \"/\" + file_name\n if not os.path.exists(\"lib/\" + output_dir):\n os.makedirs(\"lib/\" + output_dir)\n print('Downloading ' + endpoint_url + \"/\" + file_name + ' ...')\n opener = urllib.URLopener()\n opener.retrieve(endpoint_url + \"/\" + file_name, final_file)\n os.chmod(final_file, 0o755)",
"def download_addon(self, url, target_path):\n try:\n filename = url.split('?')[0].rstrip('/').rsplit('/', 1)[-1]\n target_path = os.path.join(target_path, filename)\n\n print \"Downloading %s to %s\" % (url, target_path)\n urllib.urlretrieve(url, target_path)\n\n return target_path\n except Exception, e:\n print e",
"def download_file(url, target_pkg_dir, filename):\n abs_file_path = \"/\".join([target_pkg_dir, filename])\n try:\n urllib.request.urlretrieve(url, abs_file_path)\n except Exception as ex:\n raise Exception(\"HTTP error for url: {url}\\nError message: {msg}\\nHTTP code: {code}\".format(\n url=ex.url, msg=ex.msg, code=ex.code))",
"def download_file_from_url(url, PATH, file_name):\n with requests.get(url) as r:\n with open(PATH+'/'+file_name, 'wb') as f:\n f.write(r.content)",
"def maybe_download(directory, filename, url):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath",
"def download_file(url_path):\n local_filename = url_path.split('/')[-3] + \"-\" + url_path.split('/')[-1]\n local_filename = OUT_DIR + local_filename\n print local_filename\n url = \"https://commoncrawl.s3.amazonaws.com/\" + url_path\n # NOTE the stream=True parameter\n req = requests.get(url, stream=True)\n with open(local_filename, 'wb') as write_f:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n write_f.write(chunk)\n write_f.close()\n return local_filename",
"def download_one_zip(data_url, data_dir):\r\n\r\n zipfile_path, unzip_dir = zip_file_name_from_url(data_url, data_dir)\r\n if not is_there_file(zipfile_path, unzip_dir):\r\n if not os.path.isdir(unzip_dir):\r\n os.makedirs(unzip_dir)\r\n r = requests.get(data_url, stream=True)\r\n with open(zipfile_path, \"wb\") as py_file:\r\n for chunk in r.iter_content(chunk_size=1024): # 1024 bytes\r\n if chunk:\r\n py_file.write(chunk)\r\n unzip_nested_zip(zipfile_path, unzip_dir), download_small_file",
"def maybe_download(directory, filename, url):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath",
"def ftp_download(url, dir):\n filename = url.split('/')[-1]\n with closing(request.urlopen(url)) as r:\n with open(dir + filename, 'wb+') as f:\n shutil.copyfileobj(r, f)\n return dir + filename"
] | [
"0.7673379",
"0.7671466",
"0.7451294",
"0.74270606",
"0.73973536",
"0.73620456",
"0.73142874",
"0.7294725",
"0.7197982",
"0.7170576",
"0.70669204",
"0.7042603",
"0.7016436",
"0.7009908",
"0.7009856",
"0.6993355",
"0.69703954",
"0.6968875",
"0.6919285",
"0.6912825",
"0.6911805",
"0.68894136",
"0.6874046",
"0.6857154",
"0.683316",
"0.68283904",
"0.6826224",
"0.68048704",
"0.67928994",
"0.679109"
] | 0.7997585 | 0 |
Retrieves SeriesUID from the xml under scrutiny. | def get_SeriesUID_from_xml(path):
try:
return [
e.text
for e in ET.parse(path).getroot().iter()
if e.tag == "{http://www.nih.gov}SeriesInstanceUid"
][0]
except Exception:
return "notfound" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def id(self):\n return self._fetch_element('uid')",
"def uid(self):\n return safeInt(self.tag(\"uid\"))",
"def series_instance_uid(self) -> Optional[str]:\n return self._series_instance_uid",
"def identifier(self):\n return self.element.xpath('./@Id')",
"def uid(self) -> str:\n return self.get_main_information()['MainDicomTags']['SOPInstanceUID']",
"def uid(self):\n return self.primary_header['WOD unique cast number']",
"def uid(self) -> str:\n return pulumi.get(self, \"uid\")",
"def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")",
"def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")",
"def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")",
"def read_uid(self):\n self.check_validity()\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_READ_UID, (), '', 12, 'I')",
"def uid(self):\n return self._serial_number",
"def GetStudyUIDToSeriesUIDMap(has_study_uid=None):\n return _GetStudyUIDMaps(has_study_uid)[0]",
"def sid(self):\n return self.data[''].sid",
"def read_uid(self):\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_READ_UID, (), '', 'I')",
"def get_sid(self):\n resdat = self.req().read() #phew, that was easy :)\n print resdat\n resdat = self.parse_response(resdat)\n if (resdat[0][1][0] != \"c\"):\n return None\n sid = resdat[0][1][1]\n return sid",
"def get_userid(node_name):\n url = XCATUrl().lsdef_node(''.join(['/', node_name]))\n info = xcat_request('GET', url)['info']\n\n with expect_invalid_xcat_resp_data():\n for s in info[0]:\n if s.__contains__('userid='):\n return s.strip().rpartition('=')[2]",
"def series_id(self) -> str:\n return self.get_main_information()['ParentSeries']",
"def get_doc_id(element_tree):\n id_element = element_tree.xpath('labels[@name=\"id\"]')[0]\n return id_element.attrib['valueString']",
"def get_tsuid_from_fid(self, fid):\n check_is_fid_valid(fid=fid, raise_exception=True)\n\n # empty result => throws IkatsNotFoundError\n res = self.search_functional_identifiers(criterion_type='funcIds', criteria_list=[fid])\n\n assert (isinstance(res, list)), \"get_tsuid_from_func_id: failed to retrieve json result as list\"\n assert (isinstance(res[0], dict)), \"get_tsuid_from_func_id: failed to retrieve first item from result list\"\n return res[0]['tsuid']",
"def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id, \"sensor_temp\", self._name\n )",
"def get_user_id(self, details, response):\n return response['uid']",
"def get_series_info(self, series_id):\n url = \"http://api.stlouisfed.org/fred/series?series_id=%s&api_key=%s\" % (series_id, self.api_key)\n root = self.__fetch_data(url)\n if root is None:\n raise ValueError('No info exists for series id: ' + series_id)\n from pandas import Series\n info = Series(root.getchildren()[0].attrib)\n return info",
"def uid(self) -> str:\n return self._uid",
"def known_uid():\n return '48ee71d9-20f0-41fc-a99f-c518121a880e'",
"def uid (self):\n return self.__uid",
"def get_uuid(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetUuid', self.handle)",
"def uid(self):\n\n return self._uid",
"def study_instance_uid(self) -> Optional[str]:\n return self._study_instance_uid",
"def get_semeval_id(element):\n translation = {'OrgQuestion': 'ORGQ_ID',\n 'RelQuestion': 'RELQ_ID',\n 'RelComment': 'RELC_ID',\n 'Thread': 'THREAD_SEQUENCE'}\n\n if element.tag in translation.keys():\n return element.attrib[translation[element.tag]]\n return None"
] | [
"0.6043538",
"0.579238",
"0.57903546",
"0.5624048",
"0.5610217",
"0.56094086",
"0.55660325",
"0.5546598",
"0.5546598",
"0.5546598",
"0.55310714",
"0.5504808",
"0.54589754",
"0.54261786",
"0.5418715",
"0.5418603",
"0.53924775",
"0.53796464",
"0.5324746",
"0.5287385",
"0.52031845",
"0.51816523",
"0.51597494",
"0.51254934",
"0.5124649",
"0.51234037",
"0.5101957",
"0.50992024",
"0.50893897",
"0.5088682"
] | 0.74645054 | 0 |
Download the LIDC dataset in the output_folder folder and link downloaded DICOMs with annotation files. | def download_LIDC(output_folder, debug=False):
# Creating config file with path to dataset
_, config_file = create_config(output_folder, debug, "fed_lidc_idri")
# Get patient X study
patientXstudy = pd.read_json(
client.get_patient_study(collection="LIDC-IDRI").read().decode("utf-8")
)
# Get study X series
series = pd.read_json(
client.get_series(modality="CT", collection="LIDC-IDRI").read().decode("utf-8")
)
# Join both of them
patientXseries = patientXstudy.merge(series).iloc[:]
# there are some images with missing slices. We remove them
# for reference their loc: 385, 471, 890, 129, 110, 245, 80, 618, 524
bad_patientID = [
"LIDC-IDRI-0418",
"LIDC-IDRI-0514",
"LIDC-IDRI-0672",
"LIDC-IDRI-0146",
"LIDC-IDRI-0123",
"LIDC-IDRI-0267",
"LIDC-IDRI-0085",
"LIDC-IDRI-0979",
"LIDC-IDRI-0572",
]
patientXseries = patientXseries[~patientXseries["PatientID"].isin(bad_patientID)]
if debug:
patientXseries = patientXseries[:10]
# Download associated DICOMs
pool = multiprocessing.Pool(processes=n_cpus)
downloaded_paths = pool.starmap(
download_dicom_series,
zip(patientXseries.SeriesInstanceUID.tolist(), itertools.repeat(output_folder)),
)
# Download XML annotations
annotations_path = download_zip_from_url(ANNOTATION_URL, output_folder)
# Unzip everything and remove archives
zipped_folders = [
str(p) for p in Path(output_folder).glob("./*/") if str(p).endswith(".zip")
]
# Check zip integrity, and download corrupted files again
for zipped_f in zipped_folders:
try:
while zipfile.ZipFile(zipped_f).testzip() is not None:
os.remove(zipped_f)
download_dicom_series(os.path.splitext(zipped_f)[0], output_folder)
except zipfile.BadZipFile:
os.remove(zipped_f)
download_dicom_series(os.path.splitext(zipped_f)[0], output_folder)
print(f"Bad zip file: {zipped_f}")
for zipped_f in zipped_folders:
with zipfile.ZipFile(zipped_f, "r") as zip_ref:
zip_file_name = re.sub(".zip", "", zipped_f)
# extract only if it does not exist or it is empty
if not os.path.isdir(zip_file_name) or len(os.listdir(zip_file_name)) == 0:
os.makedirs(re.sub(".zip", "", zipped_f), exist_ok=True)
zip_ref.extractall(zip_file_name)
os.remove(zipped_f)
# For each patient we record the location of its DICOM
patientXseries["extraction_location"] = downloaded_paths
# We tie back annotations to the original DICOMS
xmlfiles = glob.glob(os.path.join(annotations_path, "tcia-lidc-xml", "*", "*.xml"))
df = pd.DataFrame()
df["annotation_file"] = xmlfiles
# We initialize a dask dataframe to speed up computations
ddf = dd.from_pandas(df, npartitions=8)
df["SeriesInstanceUID"] = ddf.map_partitions(
lambda d: d["annotation_file"].apply(get_SeriesUID_from_xml)
).compute(scheduler="processes")
df = df[df.SeriesInstanceUID != "not found"]
df = df[df.SeriesInstanceUID != "notfound"]
# there are several xml files which have the same seriesInstanceUID
# but the same content, therefore here df has len of 1026.
# Next, we are removing the duplicates. The correct number of files will be now 1018
df = df.drop_duplicates(subset=["SeriesInstanceUID"], keep="first")
patientXseries = df.merge(patientXseries, on="SeriesInstanceUID")
# Update yaml file
write_value_in_config(config_file, "download_complete", True)
return patientXseries | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch(data_dir, dest=\"aida\"):\n\n # Get CoNLL03\n conll_dir = conll03.fetch(data_dir)\n\n # Create folder\n aida_dir = os.path.join(data_dir, dest)\n utils.create_folder(aida_dir)\n\n # Download AIDA\n aida_file = os.path.join(aida_dir, AIDA_FILE)\n if not os.path.exists(aida_file):\n utils.urlretrieve(AIDA_URL, aida_file)\n\n # Extract annotations\n final_dir = os.path.join(aida_dir, AIDA_NAME)\n if not os.path.exists(final_dir):\n with zipfile.ZipFile(aida_file, \"r\") as aida:\n aida.extractall(aida_dir)\n\n # Run AIDA script\n final_file = os.path.join(final_dir, AIDA_FINAL_FILE)\n if not os.path.exists(final_file):\n os.chdir(final_dir)\n subprocess.call(AIDA_SCRIPT.format(conll_dir), shell=True)\n\n return final_dir",
"def auto_download(self, dataDir, dataType, dataYear):\n\n # Setup paths and file names\n if dataType == \"minival\" or dataType == \"valminusminival\":\n imgDir = \"{}/{}{}\".format(dataDir, \"val\", dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, \"val\", dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(\"val\", dataYear)\n else:\n imgDir = \"{}/{}{}\".format(dataDir, dataType, dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, dataType, dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(dataType, dataYear)\n # print(\"Image paths:\"); print(imgDir); print(imgZipFile); print(imgURL)\n\n # Create main folder if it doesn't exist yet\n if not os.path.exists(dataDir):\n os.makedirs(dataDir)\n\n # Download images if not available locally\n if not os.path.exists(imgDir):\n os.makedirs(imgDir)\n print(\"Downloading images to \" + imgZipFile + \" ...\")\n with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + imgZipFile)\n with zipfile.ZipFile(imgZipFile, \"r\") as zip_ref:\n zip_ref.extractall(dataDir)\n print(\"... done unzipping\")\n print(\"Will use images in \" + imgDir)\n\n # Setup annotations data paths\n annDir = \"{}/annotations\".format(dataDir)\n if dataType == \"minival\":\n annZipFile = \"{}/instances_minival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_minival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0\"\n unZipDir = annDir\n elif dataType == \"valminusminival\":\n annZipFile = \"{}/instances_valminusminival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_valminusminival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0\"\n unZipDir = annDir\n else:\n annZipFile = \"{}/annotations_trainval{}.zip\".format(dataDir, dataYear)\n annFile = \"{}/instances_{}{}.json\".format(annDir, dataType, dataYear)\n annURL = \"http://images.cocodataset.org/annotations/annotations_trainval{}.zip\".format(dataYear)\n unZipDir = dataDir\n # print(\"Annotations paths:\"); print(annDir); print(annFile); print(annZipFile); print(annURL)\n\n # Download annotations if not available locally\n if not os.path.exists(annDir):\n os.makedirs(annDir)\n if not os.path.exists(annFile):\n if not os.path.exists(annZipFile):\n print(\"Downloading zipped annotations to \" + annZipFile + \" ...\")\n with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + annZipFile)\n with zipfile.ZipFile(annZipFile, \"r\") as zip_ref:\n zip_ref.extractall(unZipDir)\n print(\"... done unzipping\")\n print(\"Will use annotations in \" + annFile)",
"def auto_download(dataDir, dataType, dataYear):\n\n # Setup paths and file names\n if dataType == \"minival\" or dataType == \"valminusminival\":\n imgDir = \"{}/{}{}\".format(dataDir, \"val\", dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, \"val\", dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(\"val\", dataYear)\n else:\n imgDir = \"{}/{}{}\".format(dataDir, dataType, dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, dataType, dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(dataType, dataYear)\n # print(\"Image paths:\"); print(imgDir); print(imgZipFile); print(imgURL)\n\n # Create main folder if it doesn't exist yet\n if not os.path.exists(dataDir):\n os.makedirs(dataDir)\n\n # Download images if not available locally\n if not os.path.exists(imgDir):\n os.makedirs(imgDir)\n print(\"Downloading images to \" + imgZipFile + \" ...\")\n with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + imgZipFile)\n with zipfile.ZipFile(imgZipFile, \"r\") as zip_ref:\n zip_ref.extractall(dataDir)\n print(\"... done unzipping\")\n print(\"Will use images in \" + imgDir)\n\n # Setup annotations data paths\n annDir = \"{}/annotations\".format(dataDir)\n if dataType == \"minival\":\n annZipFile = \"{}/instances_minival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_minival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0\"\n unZipDir = annDir\n elif dataType == \"valminusminival\":\n annZipFile = \"{}/instances_valminusminival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_valminusminival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0\"\n unZipDir = annDir\n else:\n annZipFile = \"{}/annotations_trainval{}.zip\".format(dataDir, dataYear)\n annFile = \"{}/instances_{}{}.json\".format(annDir, dataType, dataYear)\n annURL = \"http://images.cocodataset.org/annotations/annotations_trainval{}.zip\".format(dataYear)\n unZipDir = dataDir\n # print(\"Annotations paths:\"); print(annDir); print(annFile); print(annZipFile); print(annURL)\n\n # Download annotations if not available locally\n if not os.path.exists(annDir):\n os.makedirs(annDir)\n if not os.path.exists(annFile):\n if not os.path.exists(annZipFile):\n print(\"Downloading zipped annotations to \" + annZipFile + \" ...\")\n with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + annZipFile)\n with zipfile.ZipFile(annZipFile, \"r\") as zip_ref:\n zip_ref.extractall(unZipDir)\n print(\"... done unzipping\")\n print(\"Will use annotations in \" + annFile)",
"def download_coco(): \n file_type = '.zip'\n img_to_download = ['val','test','train']\n ann_to_download = ['annotations_trainval','image_info_test']\n base_url_images = 'http://images.cocodataset.org/zips/'\n base_url_ann = 'http://images.cocodataset.org/annotations/'\n\n\n click.echo(click.style(f\"\\n DOWNLOAD ANNOTATIONS \\n\", bg='green', bold=True, fg='white'))\n for ann in ann_to_download:\n\n ## build Urls\n ann_url = base_url_ann + ann + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\nDownloading of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be downloaded')\n\n zip_filename_location = save_zip_from_url(ann_url,cfg.PATH_ANNOTATIONS)\n #zip_filename_location = \"/home/kamgo-gpu/Schreibtisch/stuff_annotations_trainval2017.zip\"\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n\n click.echo(click.style(f'\\n Extraction of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be extracted and the zip-file will be deleted')\n\n # Extract zip to annotation directory\n Extract_zip_file(zip_filename_location,cfg.PATH_ANNOTATIONS)\n\n click.echo(click.style(f\"\\n DOWNLOAD IMAGES \\n\", bg='green', bold=True, fg='white'))\n for dataset in img_to_download:\n ## build Urls\n dataset_img_url = base_url_images + dataset + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\n Downloading of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be downloaded')\n\n zip_filename_location = save_zip_from_url(dataset_img_url,cfg.PATH_IMAGES)\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n click.echo(click.style(f'\\n Extraction of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be extracted and the zip-File will be deleted')\n\n # set complet Path to save images\n Extract_zip_file(zip_filename_location,cfg.PATH_IMAGES)\n\n click.echo(click.style(f'\\n Download and extraction termined successfull {dataset} ...\\n', bg='green', bold=True, fg='white'))",
"def download(directory: str) -> None:\n path = f'{directory}/m5/datasets'\n if not os.path.exists(path):\n download_file(directory=path,\n source_url=M5.source_url,\n decompress=True)",
"def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())",
"def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()",
"def download_datasets():\n if not os.path.exists(\"__data__/cornell/movie_conversations.txt\") \\\n or not os.path.exists(\"__data__/cornell/movie_lines.txt\"):\n subprocess.call(['scripts/download_cornell.sh'])\n if not os.path.isdir('__data__/opensubs'):\n subprocess.call(['scripts/download_opensubs.sh'])",
"def main(cdl_ws, cdl_year='', overwrite_flag=False):\r\n logging.info('\\nDownload and extract CONUS CDL rasters')\r\n site_url = 'ftp://ftp.nass.usda.gov/download/res'\r\n\r\n cdl_format = '{}_30m_cdls.{}'\r\n\r\n for cdl_year in list(util.parse_int_set(cdl_year)):\r\n logging.info('Year: {}'.format(cdl_year))\r\n zip_name = cdl_format.format(cdl_year, 'zip')\r\n zip_url = site_url + '/' + zip_name\r\n zip_path = os.path.join(cdl_ws, zip_name)\r\n\r\n cdl_path = os.path.join(cdl_ws, cdl_format.format(cdl_year, 'img'))\r\n if not os.path.isdir(cdl_ws):\r\n os.makedirs(cdl_ws)\r\n\r\n if os.path.isfile(zip_path) and overwrite_flag:\r\n os.remove(zip_path)\r\n if not os.path.isfile(zip_path):\r\n logging.info(' Download CDL files')\r\n logging.debug(' {}'.format(zip_url))\r\n logging.debug(' {}'.format(zip_path))\r\n try:\r\n urllib.urlretrieve(zip_url, zip_path)\r\n except IOError as e:\r\n logging.error(' IOError, skipping')\r\n logging.error(e)\r\n\r\n if os.path.isfile(cdl_path) and overwrite_flag:\r\n util.remove_file(cdl_path)\r\n if os.path.isfile(zip_path) and not os.path.isfile(cdl_path):\r\n logging.info(' Extracting CDL files')\r\n with zipfile.ZipFile(zip_path) as zf:\r\n zf.extractall(cdl_ws)",
"def build_lidc_dataset(self,lidc_dir, output_dir, version='v9'):\r\n if version == 'v9':\r\n order = 'Location'\r\n elif version == 'v15':\r\n order = 'InstanceNumber'\r\n else:\r\n raise ValueError('Unknown version')\r\n \r\n if not os.path.isdir(output_dir):\r\n os.makedirs(output_dir)\r\n \r\n uid2lidc = np.load(r'C:\\Users\\csce\\Desktop\\nova\\lung_tumer\\data\\LIDC-IDRI\\uid2lidc.pkl')\r\n lidc2img = dict()\r\n label_dict = dict()\r\n vol_dataset = h5py.File(os.path.join(output_dir, 'vol.hdf5'), 'x')\r\n \r\n # main\r\n patient_dirs = os.listdir(lidc_dir)\r\n patient_dirs.sort()\r\n for patient_dir in patient_dirs:\r\n for study_dir in glob('%s/%s/*' % (lidc_dir, patient_dir)):\r\n for series_name in os.listdir(study_dir):\r\n series_dir = os.path.join(study_dir, series_name)\r\n if not self.dicom_parser.check_dcm_dir(series_dir):\r\n continue\r\n \r\n scan_name = uid2lidc.get(series_name) # scans of interest\r\n if scan_name is None:\r\n continue\r\n else:\r\n print(scan_name)\r\n \r\n vol,sop, loc, spacing, slice_thickness, series_uid = self.dicom_parser.parse_dicom(series_dir, order=order)\r\n #parse xml file to get roi info\r\n xml_files = glob(\"%s/*.xml\" % series_dir)\r\n #assert (len(xml_files)==1),\"One dicom directory must only have one xml file %s\" % series_dir\r\n xml_file = xml_files[0]\r\n nodule_slices = self.ann_parser.parse(xml_file)\r\n vols = self.build_vol(nodule_slices,sop)\r\n #assert(len(nodule_slices)==len(vols)), \"num of nodules and num of vols dont match %d vs %d\" %(len(nodule_slices),len(vols))\r\n label_dict[scan_name] = vols\r\n \r\n lidc2img[scan_name] = sop\r\n error_info = self.dicom_parser.check_location(loc, spacing[-1])\r\n if error_info is not None:\r\n logging.warning(error_info)\r\n \r\n vol_data = vol_dataset.create_dataset(scan_name, data=vol, dtype=np.int16,\r\n chunks=(512, 512, 1), compression='gzip')\r\n vol_data.attrs['spacing'] = spacing\r\n if slice_thickness is not None:\r\n vol_data.attrs['slice_thickness'] = slice_thickness\r\n \r\n vol_dataset.close()\r\n with open(r'C:\\Users\\csce\\Desktop\\nova\\lung_tumer\\data\\LIDC-IDRI\\label_dict.pkl','a') as f:\r\n cPickle.dump(label_dict,f)",
"def __download(self):\n\n if self.__check_exists():\n return\n\n print(\"Downloading AudioMNIST dataset\")\n\n # download files\n try:\n os.makedirs(self.__path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n if not os.path.exists(os.path.join(self.__path, 'AudioMNIST-master.zip')):\n url = 'https://github.com/soerenab/AudioMNIST/archive/master.zip'\n wget_data = wget.download(url, out=self.__path)\n\n archive = zipfile.ZipFile(wget_data)\n\n for file in archive.namelist():\n if file.startswith('AudioMNIST-master/data/'):\n archive.extract(file, self.__path)\n\n print(\"Download successful\")\n\n audio_mnist_src = os.path.join(self.__path, 'AudioMNIST-master/data/')\n data = np.array(glob.glob(os.path.join(audio_mnist_src, \"**/*.wav\")))\n\n train_images = []\n train_labels = []\n test_images = []\n test_labels = []\n\n # first 5-cross-validation set from https://github.com/soerenab/AudioMNIST/blob/master/preprocess_data.py\n train_folders = [28, 56, 7, 19, 35, 1, 6, 16, 23, 34, 46, 53, 36, 57, 9, 24, 37, 2,\n 8, 17, 29, 39, 48, 54, 43, 58, 14, 25, 38, 3, 10, 20, 30, 40, 49, 55,\n 12, 47, 59, 15, 27, 41, 4, 11, 21, 31, 44, 50]\n test_folders = [26, 52, 60, 18, 32, 42, 5, 13, 22, 33, 45, 51]\n\n print(\"Converting audio to images\")\n # create train and test folders and save audios as images\n for filepath in tqdm(data):\n # the last one is just a counter for repeat of each digit, e.g. say zero once, twice, third time..\n\n dig, vp, rep = filepath.rstrip(\".wav\").split(\"/\")[-1].split(\"_\")\n\n # according to https://github.com/soerenab/AudioMNIST/blob/master/preprocess_data.py\n fs, data = wavf.read(filepath)\n\n # resample\n data = librosa.core.resample(y=data.astype(np.float32), orig_sr=fs, target_sr=8000, res_type=\"scipy\")\n # zero padding\n if len(data) > 8000:\n raise ValueError(\"data length cannot exceed padding length.\")\n elif len(data) < 8000:\n embedded_data = np.zeros(8000)\n offset = np.random.randint(low=0, high=8000 - len(data))\n embedded_data[offset:offset + len(data)] = data\n elif len(data) == 8000:\n # nothing to do here\n embedded_data = data\n pass\n\n # 1. fourier transform\n # stft, with selected parameters, spectrogram will have shape (228, 230)\n f, t, zxx = scipy.signal.stft(embedded_data, 8000, nperseg=455, noverlap=420, window='hann')\n # get amplitude\n zxx = np.abs(zxx[0:227, 2:-1])\n\n # if not 2, then convert to decibel\n zxx = librosa.amplitude_to_db(zxx, ref=np.max)\n\n # normalize from range -80,0 to 0,1\n zxx = (zxx - zxx.min()) / (zxx.max() - zxx.min())\n\n zxx = zxx[::-1] # reverse the order of frequencies to fit the images in the paper\n zxx = np.atleast_3d(zxx).transpose(2, 0, 1) # reshape to (1, img_dim_h, img_dim_w)\n\n # decide to which list to add (train or test)\n if int(vp) in train_folders:\n train_images.append(zxx)\n train_labels.append(int(dig))\n elif int(vp) in test_folders:\n test_images.append(zxx)\n test_labels.append(int(dig))\n else:\n raise Exception('Person neither in train nor in test set!')\n\n train_images = torch.Tensor(train_images).float()\n train_labels = torch.Tensor(train_labels).long()\n test_images = torch.Tensor(test_images).float()\n test_labels = torch.Tensor(test_labels).long()\n\n torch.save(train_images, os.path.join(self.__path, 'train_images_tensor.pt'))\n torch.save(train_labels, os.path.join(self.__path, 'train_labels_tensor.pt'))\n torch.save(test_images, os.path.join(self.__path, 'test_images_tensor.pt'))\n torch.save(test_labels, os.path.join(self.__path, 'test_labels_tensor.pt'))\n\n print('Done!')",
"def download_and_prepare_dmipy_example_dataset(self):\r\n subject_ID = 100307\r\n self.download_subject(subject_ID)\r\n self.prepare_example_slice(subject_ID)",
"def download_dataset(dataset, destination):\n\n # Get images belonging to the requested dataset from cache\n cache_df = pd.read_csv(cache_file)\n df = cache_df.loc[cache_df['dataset.name'] == dataset]\n assert (df.shape[0] > 0), \"Dataset {0} does not exist\".format(dataset)\n\n # Create metadata for dataset that includes the file image paths\n print(\"Preprocessing metadata.\")\n files = []\n for _, row in df.iterrows():\n\n if type(row[\"meta.clinical.diagnosis\"]) == str:\n path = os.path.join(row[\"dataset.name\"], slugify(row[\"meta.clinical.diagnosis\"]))\n elif type(row[\"meta.clinical.diagnosis\"]) == str:\n path = os.path.join(row[\"dataset.name\"], slugify(row[\"meta.clinical.benign_malignant\"]))\n else:\n path = os.path.join(row[\"dataset.name\"], \"unknown\")\n\n files.append(os.path.join(path, \"{}.jpg\".format(row[\"_id\"])))\n df[\"file\"] = files\n df.to_csv(os.path.join(destination, \"{0}.csv\".format(dataset)), index=False)\n\n # Download images\n print(\"Downloading images from dataset: {}\".format(dataset))\n for _, row in tqdm(df.iterrows(), total=df.shape[0], desc=\"Downloading images\", unit=\"img\"):\n isic.download_image(row[\"_id\"], os.path.join(destination,row[\"file\"]))",
"def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)",
"def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df",
"def download_data_files(self, dest_directory):\n\t\tif not os.path.exists(dest_directory):\n\t\t\tos.makedirs(dest_directory)\n\t\tfilename = DATA_URL.split('/')[-1]\n\t\tfilepath = os.path.join(dest_directory, filename)\n\t\tif not os.path.exists(filepath):\n\t\t\tdef _progress(count, block_size, total_size):\n\t\t\t\tsys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n\t\t\t\t\t\tfloat(count * block_size) / float(total_size) * 100.0))\n\t\t\t\tsys.stdout.flush()\n\t\t\tfilepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n\t\t\tprint()\n\t\t\tstatinfo = os.stat(filepath)\n\t\t\tprint('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\t\textracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n\t\tif not os.path.exists(extracted_dir_path):\n\t\t\ttarfile.open(filepath, 'r:gz').extractall(dest_directory)",
"def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")",
"def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df",
"def downloadImages(self):\n\t\ti = 0\n\t\tfor im in self.images:\n\t\t\t# Let's get the file extension and file name and make the final file path. \n\t\t\t# We need to do this to slugify the file name and avoid errors when loading images\n\t\t\tfile_name, file_extension = os.path.splitext(im['url'])\n\t\t\tfile_name = file_name.split(\"/\")[-1]\n\n\t\t\tfile_path = self.data_path + self.dataset + \"/\" + im['slug'] + '/' + str(im['id']) + '_' + slugify(file_name) + file_extension\n\n\t\t\t# If file is not in the file path, then download from the url\n\t\t\tif not os.path.exists(file_path):\n\t\t\t\ttry:\n\t\t\t\t\turllib.urlretrieve(im['url'], file_path )\n\t\t\t\t\tprint \"i:{} url:{}\".format(i,im['url'])\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\t\t\ti += 1",
"def download(self):\n\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url in self.resources:\n filename = url.rpartition('/')[2]\n download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=None)\n\n print('Processing...')\n\n training_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_train_valid.amat'))\n )\n test_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_test.amat'))\n )\n\n with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n print('Done!')",
"def download(root: str) -> None:\n for ix in [1, 2]:\n fn = f\"lizard_images{ix}.zip\"\n url = f\"https://warwick.ac.uk/fac/cross_fac/tia/data/lizard/{fn}\"\n SimpleDownloader.download(url, root)\n\n url = \"https://warwick.ac.uk/fac/cross_fac/tia/data/lizard/lizard_labels.zip\"\n SimpleDownloader.download(url, root)\n LizardDataModule.extract_zips(root, rm=True)",
"def download(self):\n cloud_path = f\"gs://{const.GCS_BUCKET}/{self.GCS_PATH}\"\n # download label file\n label_zip = download_file_from_gcs(\n cloud_path, self.root, self.LABEL_ZIP\n )\n with zipfile.ZipFile(label_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)\n\n # download tfexamples for a dataset split\n tfexamples_zip = download_file_from_gcs(\n cloud_path, self.root, self.SPLITS_ZIP.get(self.split)\n )\n with zipfile.ZipFile(tfexamples_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)",
"def download_all_datasets():\n print(\"Downloading all datasets ...\")\n for dataset in get_available_datasets():\n download_dataset(dataset)",
"def fetch_scil_b0():\n zipname = 'datasets_multi-site_all_companies'\n url = 'http://scil.dinf.usherbrooke.ca/wp-content/data/'\n uraw = url + zipname + '.zip'\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, zipname)\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading SCIL b=0 datasets from multiple sites and multiple companies (9.2MB)...')\n opener = urlopen(uraw)\n open(folder+'.zip', 'wb').write(opener.read())\n\n print('Unziping '+folder+'.zip ...')\n zip = zipfile.ZipFile(folder+'.zip', 'r')\n zip.extractall(dipy_home)\n\n print('Done.')\n print('Files copied in folder %s' % dipy_home)\n else:\n print('Dataset already in place. If you want to fetch again please first remove folder %s ' % dipy_home)",
"def main(src_dir, dst_dir='pleiades', print_cfg_ipol=False):\n for dataset in os.listdir(src_dir):\n dataset_abspath = os.path.join(src_dir, dataset)\n if os.path.isdir(dataset_abspath):\n if 'dataset_1' in os.listdir(dataset_abspath): # the dataset has subdatasets (multidate)\n for subdataset in os.listdir(dataset_abspath):\n if os.path.isdir(os.path.join(dataset_abspath, subdataset)):\n l = list_images_in_dataset(os.path.join(dataset_abspath, subdataset))\n mkdir_p(os.path.join(dst_dir, dataset, subdataset))\n create_links(l, os.path.join(dst_dir, dataset, subdataset), print_cfg_ipol)\n else: # the dataset doesn't have subdatasets (monodate)\n l = list_images_in_dataset(dataset_abspath)\n mkdir_p(os.path.join(dst_dir, dataset))\n create_links(l, os.path.join(dst_dir, dataset), print_cfg_ipol)",
"def download_dataset(self):\n raise NotImplementedError",
"def _download(self, path):\n self.logger.info('Getting Million Song Dataset...')\n self.logger.info('Downloading Echo Nest Taste Subprofile train data...')\n base_url = 'http://millionsongdataset.com/sites/default/files/challenge/'\n\n download_dataset(\n base_url + 'train_triplets.txt.zip',\n join(self.data_folder, 'train.zip')\n )\n rename(join(self.data_folder, 'train'), path)\n\n self.logger.info('Downloading evaluation data for MSD Challenge...')\n download_dataset(\n base_url + 'EvalDataYear1MSDWebsite.zip',\n join(path, 'eval.zip')\n )\n rename(\n join(path, 'EvalDataYear1MSDWebsite'),\n join(path, 'evaluation')\n )\n\n self.logger.info('Downloading list of matching errors...')\n url = 'http://millionsongdataset.com/sites/default/files/tasteprofile/sid_mismatches.txt'\n download_url(url, join(path, 'sid_mismatches.txt'))",
"def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))",
"def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))",
"def data_fetch_netcdf(self):\n self.client = boto3.client('s3', aws_access_key_id=self.creds_data['key_id'],\n aws_secret_access_key=self.creds_data['key_access'])\n year = self.month_year[0]\n month = self.month_year[1]\n # change output folder to desired location from TRMM website\n # folder structure to partitioned the data year_month\n output_temp = self.output_folder + year + '_' + month\n url_data = \"http://trmm.atmos.washington.edu/{}interp_data/{}/{}\".format(self.output_folder, year, month)\n print(url_data)\n start_time_year_month = time.time()\n r = requests.get(url_data, auth=self.auth_data)\n # check if url exists then extract netcdf links to download and upload to s3.\n if r.status_code == 200:\n soup = BeautifulSoup(r.text, features='lxml')\n for link in soup.findAll('a'):\n link_url = link.get('href')\n write_path = os.path.join(output_temp, link_url)\n if link_url.endswith('.nc4'):\n file_url = url_data + '/' + link_url\n r = requests.get(file_url, auth=self.auth_data, stream=True)\n if r.status_code == 200:\n self.client.put_object(Body=r.content, Bucket='himatdata', Key='Trmm/' + write_path)\n logging.info(\"Done with Year Month: %s\", month_year)\n print(\"--- %s seconds ---\" % (time.time() - start_time_year_month))\n\n else:\n print('No data/authentication for'.format(month_year))"
] | [
"0.6643622",
"0.64993733",
"0.64365935",
"0.6259419",
"0.61793226",
"0.60971385",
"0.60954",
"0.6052573",
"0.5992943",
"0.5989011",
"0.59568894",
"0.59420604",
"0.58430636",
"0.58426553",
"0.58276373",
"0.5802974",
"0.57967013",
"0.57699454",
"0.5739426",
"0.5739216",
"0.57308984",
"0.5728884",
"0.5725241",
"0.56844157",
"0.56781423",
"0.56669134",
"0.5625988",
"0.5615101",
"0.5615101",
"0.561278"
] | 0.79258645 | 0 |
Save DataFrame to PostgreSQL via JDBC postgresql driver | def psql_saver(spark, df, tbname, savemode='error'):
df.createOrReplaceTempView("view")
spark.sql('''SELECT * FROM view''').write \
.format('jdbc') \
.option('url', 'jdbc:postgresql://%s' % __credential__.jdbc_accessible_host_psql) \
.option('dbtable', tbname) \
.option('user', __credential__.user_psql) \
.option('password', __credential__.password_psql) \
.mode(savemode) \
.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeToJDBC(df,tableName,spark):\n #df.table(tableName).write.jdbc(config.jdbcUrl,tableName,config.connectionProperties)\n #df = df.na.fill(0)\n mode= \"overwrite\"\n #print(\"jdbcURL: \",config.jdbcUrl,\"\\ntable Name :\",tableName,\"\\nmode:\",mode,\"\\nconnection property\",config.connectionProperties,\"\\n\")\n try:\n \n df.write.jdbc(url=config.jdbcUrl, table=tableName, mode=mode, properties=config.connectionProperties)\n print(\"Inserting data into PostgreSQL...\", \"\\n\")\n except Exception as e:\n print(e)",
"def save_data(df: pd.DataFrame, database_filename: str) -> None:\n engine = create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql(Path(database_filename).stem, engine, index=False, if_exists=\"replace\")",
"def save_data(df, database_filename):\n engine = create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql(\"YourTableName\", engine, index=False, if_exists=\"replace\")",
"def save_data(df, database_filename):\n engine = create_engine('sqlite:///' +database_filename)\n df.to_sql('Project2', engine, index=False)",
"def save_data(df, database_filename): \n engine = create_engine('sqlite:///'+database_filename)\n df.to_sql('messages', engine, index=False, if_exists='replace')",
"def write_df_to_db(df, db_path):\n print \"Writing to 'results' table in db: \", db_path\n conn = sqlite3.connect(db_path)\n df.to_sql(\"results\", con=conn,if_exists='replace')",
"def save_data(df, database_filename):\n engine = create_engine('sqlite:///'+database_filename)\n df.to_sql('disasterdata', engine, index=False)",
"def write_to_database(day, data_to_write):\n\n \"\"\" PSQL commands to create the database:\n # CREATE TABLE phonedata (id serial primary key, date DATE, data JSONB);\n # GRANT ALL ON phonedata TO user;\n # GRANT ALL ON SEQUENCE phonedata_id_seq TO user;\n \"\"\"\n\n # Parameters used to connect to the database \n params = {\"host\":\"localhost\",\n \"port\":\"5432\",\n \"database\":\"postgres\", \n \"user\":\"###\", \n \"password\":\"###\"}\n\n # Connect to the database with the parameters above\n conn = psycopg2.connect(**params)\n print(\"PostgreSQL connection is open\")\n\n # enable json handling\n psycopg2.extras.register_json(conn)\n\n # Connect the cursor\n cursor = conn.cursor()\n \n # INSERT the Google Analytics data with date into the database\n print(\"INSERTING into the database now\")\n\n query = \"INSERT INTO phonedata ( date, data ) VALUES ( (CURRENT_DATE - %s)::DATE, %s)\"\n\n cursor.execute(query, (day, psycopg2.extras.Json(data_to_write)))\n \n # Make the changes to the database persistent\n conn.commit()\n\n # Close the connection with the database\n cursor.close()\n conn.close()\n print(\"PostgreSQL connection is closed\")",
"def save_data(df, database_filepath):\n # create a database connect\n conn = sqlite3.connect(database_filepath)\n # replace .db with empty space for new table name\n table_name = database_filepath.replace('.db', '')\n \n return df.to_sql(table_name, con=conn, if_exists='replace', index=False)",
"def save_data(dataframe, database_filename):\n # Creating sqlite engine and save the dataframe with the name message\n engine_process = create_engine('sqlite:///Messages.db')\n dataframe.to_sql('messaging', engine_process, index=False,if_exists='replace')",
"def save_data(df, database_filename):\n \n # check if the database already exists and delete\n if os.path.exists(database_filename):\n os.remove(database_filename)\n \n # instance of engine to database\n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql('message', engine, index = False, if_exists='replace')",
"def save_data(df,database):\r\n \r\n # creating a connection to database\r\n engine = create_engine(f'sqlite:///{database}')\r\n \r\n drop_command = 'DROP TABLE IF EXISTS disaster_messages'\r\n \r\n # drop table if exists\r\n engine.execute(drop_command)\r\n \r\n # save the data\r\n df.to_sql(name='disaster_messages', con=engine, index=False)",
"def save_data(df, database_filename):\n engine = create_engine(\"sqlite:///\" + database_filename)\n df.to_sql('messages', engine, index=False)",
"def save_data(df, database_filename):\n engine = create_engine(database_filename)\n df.to_sql('message_cat', engine, index = False)",
"def save_data(df, database_filepath):\n engine = create_engine('sqlite:///' + database_filepath)\n df.to_sql('cleanData', con=engine, if_exists='replace', index=False)",
"def save_to_database(filename,key, df, metadata = {}):\n\t# Opening the dataframe\n\tstore = pd.HDFStore(filename)\n\t# Feeding the dataframe, 't' means table format (slightly slower but can be modified)\n\tstore.put(key, df, format=\"t\")\n\t# feeding the metadata\n\tstore.get_storer(key).attrs.metadata = metadata\n\t# /!\\ Important to properly close the file\n\tstore.close()",
"def save_data(df, database_filename):\n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql('disaster_messages', engine, index=False, if_exists='replace')",
"def save_data(df, database_filename):\n # Create a database connection \n engine = create_engine('sqlite:///' + database_filename)\n \n # Insert df into DisasterCategories table\n df.to_sql('DisasterCategories', engine, index=False)",
"def df2db(self, df: pd.DataFrame, tab_name):\n\n self.execute(\"set hive.execution.engine = tez\")\n self.execute(\"set tez.queue.name = sephora_internal\")\n self.execute(\"drop table if exists {table_name}\".format(table_name=tab_name))\n df.to_sql(tab_name, self.engine, method='multi', index=False)",
"def df2sql(df, table_name, database_url):\r\n conn = sqlite3.connect(database_url)\r\n df.to_sql(table_name, conn, if_exists='replace', index = False)\r\n conn.commit()",
"def load_sql(df):\n engine = create_engine(f'postgres://{user}:{user}@{host}:{port}/{db}')\n df = df.to_sql(table, engine, if_exists='append')\n logging.debug(str(df)) # logging\n # return df\n #print(df.shape)",
"def save_data(df, database_filename):\n engine = create_engine('sqlite:///{}'.format(database_filename))\n # get a cursor\n #cur = engine.cursor()\n # drop the test table in case it already exists\n result = engine.execute(\"DROP TABLE IF EXISTS messages\")\n \n df.to_sql('messages', engine, index=False)\n result.close()",
"def save_data(df, database_filename): \n engine = create_engine('sqlite:///{}'.format(database_filename)) \n engine.execute(\"DROP TABLE IF EXISTS messages\")\n df.to_sql('messages', engine, index=False)",
"def save(file, table):\n pq.write_table(pa.Table.from_pandas(table), file)",
"def write_sensor_data_to_db(self, sensor_data):\n df = pd.DataFrame(sensor_data)\n df.to_sql('sensor_data', self.conn, if_exists='append', index = False)",
"def df_to_db(dataframe, tablename, engine,\n index=False, index_label=None, if_exists='append',\n chunksize=100000):\n dataframe.to_sql(tablename,\n con=engine,\n index=index,\n index_label=index_label,\n if_exists=if_exists,\n chunksize=chunksize\n )",
"def copy_into_postgres(df, conn, fname):\n print(f\"`{fname}`: Loading data...\")\n\n null_cols = [\n \"col_a\", \"col_b\", \"col_c\", \"col_d\", \"col_e\", \"col_f\", \"col_g\", \"col_h\", \"col_i\", \"col_j\", \"col_k\", \"col_l\", \"col_m\", \n \"col_n\", \"col_o\", \"col_p\", \"col_q\", \"col_r\", \"col_s\", \"col_t\", \"col_u\", \"col_v\", \"col_w\", \"col_x\", \"col_y\", \"col_z\",\n ]\n parsed_cols = [\"file_index\", \"file_name\"] + null_cols[:df.shape[1]]\n insert_cols = \", \".join(parsed_cols)\n\n df.insert(loc=0, column=\"file_name\", value=fname)\n\n with conn.cursor() as curs:\n with io.StringIO() as csv_buffer:\n df.to_csv(csv_buffer, sep=\",\", header=False, index=True)\n csv_buffer.seek(0)\n curs.copy_expert(f\"COPY extract_loader_landing_zone ({insert_cols}) FROM STDIN (FORMAT csv, DELIMITER ',', HEADER FALSE);\", file=csv_buffer)\n conn.commit()\n\n print(f\"`{fname}`: Loaded data!\")\n\n return None",
"def store_to_psql(self, engine, name=None, if_exists='fail'):\n self.sort_df()\n self.table_name(name=name)\n self.dataframe.to_sql(name=self.table, con=engine, if_exists=if_exists)\n return self.table",
"def create_db_dataframe(self, df, table_name):\n try:\n print(\"-I- Writing \" + table_name + \" with DataFrame\")\n df.to_sql(name=table_name, con=self.engine, if_exists='replace', index=True)\n print(\"-I- Write complete.\")\n except Exception as e:\n print(\"-W- \" + str(e))",
"def write_to_db(df, table_name):\n df = df.assign(_xerum_import_ts=pd.Timestamp.now())\n df.columns = map(str.lower, df.columns)\n df.to_sql(table_name, con=engine, if_exists='replace', index=False, method='multi')\n return queries.row_cnt()[\"row_cnt\"]"
] | [
"0.7538958",
"0.7315046",
"0.7035893",
"0.69290936",
"0.6893589",
"0.67505896",
"0.6718586",
"0.6711653",
"0.6706281",
"0.6678071",
"0.66444737",
"0.66421574",
"0.6595042",
"0.65944266",
"0.650788",
"0.6489796",
"0.64839333",
"0.6483777",
"0.6443881",
"0.6425489",
"0.64215523",
"0.6386963",
"0.6366148",
"0.63647294",
"0.6318147",
"0.6307674",
"0.630277",
"0.6285659",
"0.62821895",
"0.6281403"
] | 0.73793316 | 1 |
Load the file list from PostgreSQL and return the readable filepath. | def psql_file_loader(spark, tbname):
filelist_rdd = psql_loader(spark, tbname) \
.rdd.map(lambda x: Row(caseid=x.case_id, filepath=x.path + '/' + x.filename))
return filelist_rdd | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def psql_loader(spark, tbname):\n print(\"Loading files from PostgreSQL table: %s\" % tbname)\n filelist = spark.read \\\n .format('jdbc') \\\n .option('url', 'jdbc:postgresql://%s' % __credential__.jdbc_accessible_host_psql) \\\n .option('dbtable', tbname) \\\n .option('user', __credential__.user_psql) \\\n .option('password', __credential__.password_psql) \\\n .load()\n\n return filelist",
"def pg(file):\n global_config = get_config(file)\n config = global_config.get(\"postgres\")\n databases = config.get(\"databases\")\n for db in databases:\n result = postgres.load(config, db)\n print_result(db, result)",
"def _get_parsed_files(self):\n\n parsed = []\n with Historical_ROAs_Parsed_Table() as t:\n for row in t.execute(f'SELECT * FROM {t.name}'):\n parsed.append(row['file'])\n return parsed",
"def get_database_filepath(functional_unit):\n dbs = set.union(\n *[Database(key[0]).find_graph_dependents() for key in functional_unit]\n )\n return [Database(obj).filepath_processed() for obj in dbs]",
"def load_file():\n global list_of_table, data_base, new_data\n open_name = askopenfilename()\n\n if Path(open_name).suffix == '.db':\n data_base = open_name\n data_base = str(data_base)\n new_data_base = parse(data_base)\n new_data = update_list_tables(new_data_base)\n new_data.clear()\n\n else:\n mistake_db_file()",
"def read_relations(db, openfile):\n pass",
"def sqlfile(path, **kw):\n sql = path.read_text()\n return sql.format(**kw)",
"def load_source(self, path, alias = None):\n\t\ttry:\n\t\t\treturn list(open(path, \"r\"))\n\t\texcept OSError:\n\t\t\treturn None",
"def get_broker_load_sql(self, db_name, table_name, job_file_list, file_columns,\n columns_from_path):\n label = \"%s_%d_%d\" % (table_name, int(time.time()*1000), int(random.random()*100))\n job_file_list = [\"\\\"%s\\\"\" % (job_file) for job_file in job_file_list]\n cmd = \"LOAD LABEL %s.%s (DATA INFILE (%s) INTO TABLE %s\"\\\n % (db_name, label, \",\".join(job_file_list), table_name)\n if conf_parser.column_separator:\n cmd = cmd + \" COLUMNS TERMINATED BY \\\"%s\\\"\" % (conf_parser.column_separator)\n if conf_parser.file_format:\n cmd = cmd + \" FORMAT AS \\\"%s\\\"\" % (conf_parser.file_format)\n if file_columns:\n cmd = cmd + \" (%s)\" % (\",\".join(file_columns))\n if columns_from_path:\n cmd = cmd + \" COLUMNS FROM PATH AS (%s)\" % (\",\".join(columns_from_path))\n cmd = cmd + \") WITH BROKER \\\"%s\\\"\" % (conf_parser.broker_name)\n if conf_parser.broker_username:\n cmd = cmd + \" (\\\"username\\\"=\\\"%s\\\"\" % (conf_parser.broker_username)\n if conf_parser.broker_password:\n cmd = cmd + \", \\\"password\\\"=\\\"%s\\\"\" % (conf_parser.broker_password)\n cmd = cmd + \")\"\n cmd = cmd + \" PROPERTIES(\\\"max_filter_ratio\\\"=\\\"%s\\\", \\\"timeout\\\"=\\\"%s\\\");\"\\\n % (conf_parser.max_filter_ratio, conf_parser.timeout)\n return cmd",
"def pdbfile_list():\n \n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list",
"def loadSQL_beddays(filepath='O:\\Administration\\\\02 - Økonomi og PDK\\Medarbejdermapper\\Kasper\\Focus1 - Ad hoc opgaver\\Lungemed sengedage og visitationer\\Lungemed.sql'):\n content = open(filepath, 'r').read()\n return content",
"def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list",
"def loading_data_to_sqlite(list_files):\n engine = connecting_database()\n if engine is None:\n return False\n\n print()\n print(\"-\".rjust(60, \"-\"))\n print(\"Loading data\".center(60))\n print(\"-\".rjust(60, \"-\"))\n\n for filename in list_files:\n name, ext = os.path.splitext(filename)\n if ext != '.csv':\n print(\">> WARNING: CSV file invalid!\")\n return False\n\n print(f\">> Populating the table: stg_{name}\")\n df = pd.read_csv(path + inputfile + filename, sep=',', header=0)\n df.to_sql(f\"stg_{name}\", con=engine, index=False, if_exists='replace')\n print(\"-\".rjust(60, \"-\"))\n\n return True",
"def read_locations(db, openfile):\n pass",
"def loadDatabase ():\n database = []\n # Open a file\n path = \"lyd/\"\n dirs = os.listdir( path )\n \n # This prints all of the files and directories\n for file in dirs:\n if file == \".DS_Store\": #Mac file\n continue\n songdict = {}\n print (file)\n Zxx = STFTsignal.getSTFTofFile(path + file) #STFT of the file\n #mean, eigen and weights are stored in dictionary songdict\n songdict[\"mean\"], songdict[\"eigen\"], songdict[\"weights\"] = PCA(Zxx)\n songdict[\"name\"] = file\n database.append (songdict) \n return database",
"def get_file(self, attached_db=False):\n if attached_db:\n files = [self._sql_file]\n att = self.get_attached_database_list(True)\n for alias, file in att:\n files.append(\"%s,%s\" % (alias, file))\n temp = \";\".join(files)\n if self._engine != \"SQLite\":\n if self._host is None:\n temp = \"%s:::%s\" % (self._engine, temp)\n else:\n temp = \"%s:::%s###%s\" % (self._engine, self._host, temp)\n return temp\n else:\n return self._sql_file",
"def load_list_file():\n\n with open(path_list, \"r\") as list_file:\n return json.load(list_file)",
"def get_sql_path(file_path: str) -> str:\n dag_dir = configuration.get('core', 'dags_folder')\n return os.path.join(dag_dir, file_path)",
"def load_directory_as_db(self, dir_path, db_name):\n load_dir = os.path.join(self.data_dir, dir_path)\n data_files = glob.glob(os.path.join(load_dir, '*.txt'))\n file_groups = defaultdict(list)\n for path in data_files:\n path_noext, _ = os.path.splitext(path)\n filename_noext = os.path.basename(path_noext)\n i = filename_noext.find('-')\n if i == -1:\n table_name = filename_noext\n else:\n table_name = filename_noext[:i]\n file_groups[table_name].append(path)\n\n for table_name in sorted(file_groups.keys()):\n register_name = '{}_{}'.format(db_name, table_name)\n data_files = file_groups[table_name]\n logger.info('REGISTERING {}:{}'.format(register_name, data_files))\n data_files = filter(lambda x: os.path.getsize(x) > 0, data_files)\n if self.load_tables and register_name not in self.load_tables:\n continue\n jdb = self.sql_context.read.json(data_files)\n jdb.printSchema()\n jdb.registerTempTable(register_name)",
"def db_file():\n return abspath('vmchecker.db')",
"def loadDatabase(database):\n for file_name in os.listdir(\"Users\"):\n chemin = os.path.join(\"Users\", file_name)\n key = file_name.lower()\n database[key]=pickle.load(open(chemin,\"rb\"))",
"def load_postgresql_lib(finder, module):\n fileName = os.path.join(module.path[0], \"libsys.sql\")\n finder.IncludeFiles(fileName, os.path.basename(fileName))",
"def get_attached_database_list(self, file=False):\n if self.isMSSQL():\n return [] # pragma: no cover\n else:\n cur = self._connection.cursor()\n cur.execute(\"PRAGMA database_list;\")\n res = cur.fetchall()\n cur.close()\n res = [r for r in res if r[1] != \"temp\" and r[1] != \"main\"]\n if file:\n return [(r[1], r[2]) for r in res]\n else:\n return [r[1] for r in res]",
"def import_list(ctx, list_path):\n with open(list_path, 'r') as fobj:\n migrator.import_list(ctx.obj[\"sceptre_dir\"], ctx.obj[\"options\"], fobj)",
"def _get_files_in_db(self):\r\n query = 'SELECT DISTINCT file_name FROM {0};'.format(\r\n self.tables['measurements'])\r\n self.cursor.execute(query)\r\n result = self.cursor.fetchall()\r\n files = [ele[0] for ele in result if ele[0] is not None]\r\n return files",
"def parse(database_path: str) -> str:\n new_path = database_path.split(\"/\")\n database_file_name = './' + new_path[-1]\n return database_file_name",
"def read_database(db_path, db_file, *args):\n\n db_filepath = os.path.join(db_path, db_file)\n\n # list to store loaded data\n data_imported = []\n conn = sqlite3.connect(db_filepath)\n\n for data_name in args:\n\n\n info = f'Reading {data_name} from database................'\n print(info, end=\"\")\n data_name_in_db = conn.execute(\n f\"\"\"SELECT name FROM sqlite_master WHERE type='table' \n AND name='{data_name}'; \"\"\").fetchall()\n if data_name_in_db:\n df = pd.read_sql(f\"select * from {data_name}\", con=conn)\n substitute_names(df)\n # revert single column DataFrame to Series\n if 'index' in df.columns:\n df.set_index('index', inplace=True)\n df = df.squeeze('columns')\n data_imported.append(df)\n print('ok')\n else:\n data_imported.append(None)\n print('no data')\n conn.close()\n return data_imported #if len(data_imported)>1 else data_imported[0]",
"def loadSQL_visitations(filepath='O:\\Administration\\\\02 - Økonomi og PDK\\Medarbejdermapper\\Kasper\\Focus1 - Ad hoc opgaver\\Lungemed sengedage og visitationer\\Lungemed_visitationsoprindelse_nogroup.sql'):\n content = open(filepath, 'r').read()\n return content",
"def read_sql_from_file(path, conn):\n with open(path, 'r', encoding='utf-8') as f:\n qu = f.read()\n \n df = read_sql(qu, conn)\n \n return df",
"def load_dbc_files(dbc_paths):\n import can_decoder\n from pathlib import Path\n\n db_list = []\n for dbc in dbc_paths:\n db = can_decoder.load_dbc(Path(__file__).parent / dbc)\n db_list.append(db)\n\n return db_list"
] | [
"0.69596756",
"0.5759256",
"0.5735681",
"0.5731232",
"0.5665434",
"0.56590945",
"0.56081575",
"0.54501665",
"0.54350275",
"0.5431418",
"0.53963053",
"0.53911364",
"0.53735834",
"0.5373231",
"0.53518295",
"0.53414434",
"0.53365165",
"0.53235567",
"0.53212494",
"0.5320971",
"0.53167874",
"0.5306412",
"0.5301479",
"0.5269347",
"0.5261779",
"0.5260346",
"0.52586323",
"0.52282524",
"0.52269375",
"0.5201557"
] | 0.66681415 | 1 |
Save DataFrame to Redshift via JDBC redshift driver | def redshift_saver(spark, df, tbname, tmpdir, savemode='error'):
df.createOrReplaceTempView("view")
spark.sql('''SELECT * FROM view''') \
.write.format("com.databricks.spark.redshift") \
.option("url", __credential__.jdbc_accessible_host_redshift) \
.option("dbtable", tbname) \
.option("forward_spark_s3_credentials", True) \
.option("tempdir", "s3n://gdcdata/%s" % tmpdir) \
.mode(savemode) \
.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeToJDBC(df,tableName,spark):\n #df.table(tableName).write.jdbc(config.jdbcUrl,tableName,config.connectionProperties)\n #df = df.na.fill(0)\n mode= \"overwrite\"\n #print(\"jdbcURL: \",config.jdbcUrl,\"\\ntable Name :\",tableName,\"\\nmode:\",mode,\"\\nconnection property\",config.connectionProperties,\"\\n\")\n try:\n \n df.write.jdbc(url=config.jdbcUrl, table=tableName, mode=mode, properties=config.connectionProperties)\n print(\"Inserting data into PostgreSQL...\", \"\\n\")\n except Exception as e:\n print(e)",
"def psql_saver(spark, df, tbname, savemode='error'):\n df.createOrReplaceTempView(\"view\")\n spark.sql('''SELECT * FROM view''').write \\\n .format('jdbc') \\\n .option('url', 'jdbc:postgresql://%s' % __credential__.jdbc_accessible_host_psql) \\\n .option('dbtable', tbname) \\\n .option('user', __credential__.user_psql) \\\n .option('password', __credential__.password_psql) \\\n .mode(savemode) \\\n .save()",
"def write(database, table, df, jdbc_conf, cut_off_percentage, repartition=False, s3path='', format='csv'):\n sc = SparkContext.getOrCreate()\n glueContext = GlueContext(sc)\n jvm = sc._jvm\n jsc = sc._jsc\n jdf = df._jdf\n\n if s3path is not \"\":\n df = DataFrame(jvm.com.slf.CustomJDBCUtils.write(\n database, table, jdf, jdbc_conf, cut_off_percentage), jsc)\n try:\n df.repartition(1).write.format(format).save(s3path)\n return df\n except:\n # Spark Writing Failed. Reverting to GlueContext\n glueContext.write_dynamic_frame_from_options(\n frame=DynamicFrame.fromDF(df, glueContext, 'dynamic_frame'),\n connection_type='s3',\n connection_options={'path': s3path},\n format=format)\n else:\n return DataFrame(jvm.com.slf.CustomJDBCUtils.write(database, table, jdf, jdbc_conf, cut_off_percentage), jsc)",
"def write(\n self, feature_set: FeatureSet, dataframe: DataFrame, spark_client: SparkClient,\n ) -> Any:",
"def save(client: Redis, identifier: str, df: pd.DataFrame) -> None:\n pipe = client.pipeline()\n for c in df:\n pipe.rpush(f'{identifier}:{c}', *_encode(df[c].values))\n pipe.rpush(f'{identifier}:_index', *_encode(df.index.values))\n pipe.execute()",
"def write_to_db(df, table_name):\n df = df.assign(_xerum_import_ts=pd.Timestamp.now())\n df.columns = map(str.lower, df.columns)\n df.to_sql(table_name, con=engine, if_exists='replace', index=False, method='multi')\n return queries.row_cnt()[\"row_cnt\"]",
"def save_data(df: pd.DataFrame, database_filename: str) -> None:\n engine = create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql(Path(database_filename).stem, engine, index=False, if_exists=\"replace\")",
"def log_model_scores_to_mysql(df, schema, db_conn):\n db.write_dataframe_to_database(df, schema, 'model_score', db_conn)",
"def save_data(df, database_filename):\n engine = create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql(\"YourTableName\", engine, index=False, if_exists=\"replace\")",
"def write_df_to_db(df, db_path):\n print \"Writing to 'results' table in db: \", db_path\n conn = sqlite3.connect(db_path)\n df.to_sql(\"results\", con=conn,if_exists='replace')",
"def df2db(self, df: pd.DataFrame, tab_name):\n\n self.execute(\"set hive.execution.engine = tez\")\n self.execute(\"set tez.queue.name = sephora_internal\")\n self.execute(\"drop table if exists {table_name}\".format(table_name=tab_name))\n df.to_sql(tab_name, self.engine, method='multi', index=False)",
"def write_sensor_data_to_db(self, sensor_data):\n df = pd.DataFrame(sensor_data)\n df.to_sql('sensor_data', self.conn, if_exists='append', index = False)",
"def add_to_database(self, df):\n \n from sqlalchemy import create_engine\n \n engine = create_engine(\"mysql://dublinbus:somepaawsord/researchpracticum\")\n con = engine.connect()\n df.to_sql(con=con, name='TimeTables', if_exists='append')\n con.close()",
"def save_dataframe(state: State):\n\n try:\n state.games.df.to_csv(ROOT_PATH + \"/results/data/raw_data.csv\")\n LOGGER.debug(\"Successfully saved data in ../results/data/\")\n\n except Exception as e:\n LOGGER.error(f\"Could not save dataframe file - {e}\")",
"def save_data(df, database_filename):\n # Create a database connection \n engine = create_engine('sqlite:///' + database_filename)\n \n # Insert df into DisasterCategories table\n df.to_sql('DisasterCategories', engine, index=False)",
"def save_data(df, database_filename):\n engine = create_engine('sqlite:///'+database_filename)\n df.to_sql('disasterdata', engine, index=False)",
"def save_data(df, database_filename):\n engine = create_engine('sqlite:///' +database_filename)\n df.to_sql('Project2', engine, index=False)",
"def save_data(df,database):\r\n \r\n # creating a connection to database\r\n engine = create_engine(f'sqlite:///{database}')\r\n \r\n drop_command = 'DROP TABLE IF EXISTS disaster_messages'\r\n \r\n # drop table if exists\r\n engine.execute(drop_command)\r\n \r\n # save the data\r\n df.to_sql(name='disaster_messages', con=engine, index=False)",
"def execute(self, context): \n aws_hook = AwsHook(self.aws_credentials)\n credentials = aws_hook.get_credentials()\n redshift = PostgresHook(self.redshift_conn_id)\n execution_date = context['execution_date']\n \n self.log.info(f\"Truncating {self.table}\")\n redshift.run(f\"TRUNCATE TABLE {self.table}\")\n \n \n self.log.info(f\"Inserting data into {self.table}\")\n s3_path = f\"s3://{self.s3_bucket}/{self.s3_key}\"\n\n if self.s3_key == \"log_data\":\n year = execution_date.year\n month = execution_date.month\n \n s3_path = '/'.join([s3_path, str(year), str(month)])\n \n formatted_sql = StageToRedshiftOperator.copy_sql.format(\n self.table,\n s3_path,\n credentials.access_key,\n credentials.secret_key,\n self.file_format,\n self.format_path\n )\n \n redshift.run(formatted_sql)",
"def submit_to_queue(queue_df, conn, table_name):\n queue_df.to_sql(con=conn, name=table_name, if_exists='replace', index=False)\n print 'Inserted ' + str(len(queue_df)) + ' records to the task_queue'",
"def to_sql( # pylint: disable=too-many-locals\n df: pd.DataFrame,\n con: redshift_connector.Connection,\n table: str,\n schema: str,\n mode: str = \"append\",\n overwrite_method: str = \"drop\",\n index: bool = False,\n dtype: Optional[Dict[str, str]] = None,\n diststyle: str = \"AUTO\",\n distkey: Optional[str] = None,\n sortstyle: str = \"COMPOUND\",\n sortkey: Optional[List[str]] = None,\n primary_keys: Optional[List[str]] = None,\n varchar_lengths_default: int = 256,\n varchar_lengths: Optional[Dict[str, int]] = None,\n use_column_names: bool = False,\n lock: bool = False,\n chunksize: int = 200,\n commit_transaction: bool = True,\n) -> None:\n if df.empty is True:\n raise exceptions.EmptyDataFrame(\"DataFrame cannot be empty.\")\n _validate_connection(con=con)\n autocommit_temp: bool = con.autocommit\n con.autocommit = False\n try:\n with con.cursor() as cursor:\n created_table, created_schema = _create_table(\n df=df,\n path=None,\n con=con,\n cursor=cursor,\n table=table,\n schema=schema,\n mode=mode,\n overwrite_method=overwrite_method,\n index=index,\n dtype=dtype,\n diststyle=diststyle,\n sortstyle=sortstyle,\n distkey=distkey,\n sortkey=sortkey,\n primary_keys=primary_keys,\n varchar_lengths_default=varchar_lengths_default,\n varchar_lengths=varchar_lengths,\n )\n if index:\n df.reset_index(level=df.index.names, inplace=True)\n column_placeholders: str = \", \".join([\"%s\"] * len(df.columns))\n schema_str = f'\"{created_schema}\".' if created_schema else \"\"\n insertion_columns = \"\"\n if use_column_names:\n insertion_columns = f\"({', '.join(df.columns)})\"\n placeholder_parameter_pair_generator = _db_utils.generate_placeholder_parameter_pairs(\n df=df, column_placeholders=column_placeholders, chunksize=chunksize\n )\n for placeholders, parameters in placeholder_parameter_pair_generator:\n sql: str = f'INSERT INTO {schema_str}\"{created_table}\" {insertion_columns} VALUES {placeholders}'\n _logger.debug(\"sql: %s\", sql)\n cursor.executemany(sql, (parameters,))\n if table != created_table: # upsert\n if lock:\n _lock(cursor, [table], schema=schema)\n _upsert(cursor=cursor, schema=schema, table=table, temp_table=created_table, primary_keys=primary_keys)\n if commit_transaction:\n con.commit()\n except Exception as ex:\n con.rollback()\n _logger.error(ex)\n raise\n finally:\n con.autocommit = autocommit_temp",
"def write(self, dataframe: DataFrame) -> None:\n parameters = self.default\n options = self.specification.get('options', {})\n parameters.update(options)\n\n connection = self.engine(parameters)\n\n self.formatter.format(dataframe=dataframe,\n path_or_buffer=connection,\n method='direct',\n schema=parameters['schema'])\n return \"Inserted with success\"",
"def save_to_db(\n df: pd.DataFrame,\n collection: pymongo.collection.Collection,\n replace: bool\n ):\n records = df.to_dict(\"records\")\n if replace:\n collection.drop()\n collection.insert_many(records)",
"def save_csv(connection, query, columns, name):\n\n try:\n df = pd.read_sql(query, connection, columns=columns)\n df.to_csv(name, index=False)\n except Exception as ex:\n print(type(ex))\n print(ex)",
"def save_to_gcs(df, file, bucket=settings.ASSETS.BUCKET):\n output_file = NamedTemporaryFile().name\n df.to_csv(output_file, compression=\"gzip\", index=False)\n upload_blob(bucket, output_file, file)",
"def df2sql(df, table_name, database_url):\r\n conn = sqlite3.connect(database_url)\r\n df.to_sql(table_name, conn, if_exists='replace', index = False)\r\n conn.commit()",
"def save_timestamped_df(input_df, results_dir, output_file_name):\n file_name = kn.create_timestamped_filename(output_file_name, \"df\")\n kn.save_df(input_df, results_dir, file_name)",
"def _write(self):\n\n output_path = os.path.join(config.S3_OUTPUT, config.DATAFRAME_ARTISTS)\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n\n print('Writing dataframe to {}'.format(output_path))\n\n dataframe.write.parquet(\n output_path,\n mode='overwrite'\n )",
"def df_to_db(dataframe, tablename, engine,\n index=False, index_label=None, if_exists='append',\n chunksize=100000):\n dataframe.to_sql(tablename,\n con=engine,\n index=index,\n index_label=index_label,\n if_exists=if_exists,\n chunksize=chunksize\n )",
"def save_data(df, database_filename):\n engine = create_engine(database_filename)\n df.to_sql('message_cat', engine, index = False)"
] | [
"0.7210167",
"0.6551748",
"0.6547626",
"0.6318852",
"0.6138051",
"0.60281956",
"0.599131",
"0.59745264",
"0.5963789",
"0.59333736",
"0.593165",
"0.583602",
"0.5832216",
"0.57807076",
"0.57741284",
"0.57587796",
"0.5732389",
"0.57164097",
"0.5714424",
"0.5713656",
"0.57106453",
"0.5694501",
"0.56510943",
"0.56455487",
"0.5638743",
"0.561655",
"0.5614291",
"0.5611785",
"0.5610931",
"0.56077975"
] | 0.6924096 | 1 |
Load the file list from Redshift and return the readable filepath. | def redshift_file_loader(spark, tbname, tmpdir):
filelist_rdd = redshift_loader(spark, tbname, tmpdir) \
.rdd.map(lambda x: Row(caseid=x.case_id, filepath=x.path + '/' + x.filename))
return filelist_rdd | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def redshift_loader(spark, tbname, tmpdir):\n print(\"Loading files from Redshift table: %s\" % tbname)\n filelist = spark.read \\\n .format(\"com.databricks.spark.redshift\") \\\n .option(\"url\", __credential__.jdbc_accessible_host_redshift) \\\n .option(\"forward_spark_s3_credentials\", True) \\\n .option(\"dbtable\", tbname) \\\n .option(\"tempdir\", \"s3n://gdcdata/%s\" % tmpdir) \\\n .load()\n return filelist",
"def psql_file_loader(spark, tbname):\n filelist_rdd = psql_loader(spark, tbname) \\\n .rdd.map(lambda x: Row(caseid=x.case_id, filepath=x.path + '/' + x.filename))\n return filelist_rdd",
"def load_list_file():\n\n with open(path_list, \"r\") as list_file:\n return json.load(list_file)",
"def load_source(self, path, alias = None):\n\t\ttry:\n\t\t\treturn list(open(path, \"r\"))\n\t\texcept OSError:\n\t\t\treturn None",
"def get_url_list_file(self):\n return self.URL_LIST_FILE",
"def read_string_list(fn, prefix=\"\"):\n\n if ( False == os.path.isfile( fn ) ):\n raise Exception(\"%s does not exist.\" % (fn))\n \n with open(fn, \"r\") as fp:\n lines = fp.read().splitlines()\n\n n = len(lines)\n\n if ( \"\" == prefix ):\n for i in range(n):\n lines[i] = lines[i].strip()\n else:\n for i in range(n):\n lines[i] = \"%s/%s\" % ( prefix, lines[i].strip() )\n\n return lines",
"def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None):\n from .utils import connection_with_anon, connection_with_gs\n\n path = addextension(path, ext)\n scheme, bucket_name, keylist = self.getfiles(\n path, start=start, stop=stop, recursive=recursive)\n\n if not keylist:\n raise FileNotFoundError(\"No objects found for '%s'\" % path)\n\n credentials = self.credentials\n\n self.nfiles = len(keylist)\n\n if spark and isinstance(self.engine, spark):\n\n def getsplit(kvIter):\n if scheme == 's3' or scheme == 's3n':\n conn = connection_with_anon(credentials)\n bucket = conn.get_bucket(bucket_name)\n elif scheme == 'gs':\n conn = boto.storage_uri(bucket_name, 'gs')\n bucket = conn.get_bucket()\n else:\n raise NotImplementedError(\"No file reader implementation for URL scheme \" + scheme)\n\n for kv in kvIter:\n idx, keyName = kv\n key = bucket.get_key(keyName)\n buf = key.get_contents_as_string()\n yield idx, buf\n\n npartitions = min(npartitions, self.nfiles) if npartitions else self.nfiles\n rdd = self.engine.parallelize(enumerate(keylist), npartitions)\n return rdd.mapPartitions(getsplit)\n\n else:\n\n if scheme == 's3' or scheme == 's3n':\n conn = connection_with_anon(credentials)\n bucket = conn.get_bucket(bucket_name)\n elif scheme == 'gs':\n conn = connection_with_gs(bucket_name)\n bucket = conn.get_bucket()\n else:\n raise NotImplementedError(\"No file reader implementation for URL scheme \" + scheme)\n\n def getsplit(kv):\n idx, keyName = kv\n key = bucket.get_key(keyName)\n buf = key.get_contents_as_string()\n return idx, buf\n\n return [getsplit(kv) for kv in enumerate(keylist)]",
"def load_captured_urls_local(filename):\n if os.path.isfile(filename) == False:\n return []\n \n with open(filename, 'r') as f:\n return f.read().splitlines()",
"def import_list(ctx, list_path):\n with open(list_path, 'r') as fobj:\n migrator.import_list(ctx.obj[\"sceptre_dir\"], ctx.obj[\"options\"], fobj)",
"def read(self, path, ext=None, start=None, stop=None, recursive=False, npartitions=None):\n path = uri_to_path(path)\n files = self.list(path, ext=ext, start=start, stop=stop, recursive=recursive)\n\n nfiles = len(files)\n self.nfiles = nfiles\n\n if spark and isinstance(self.engine, spark):\n npartitions = min(npartitions, nfiles) if npartitions else nfiles\n rdd = self.engine.parallelize(enumerate(files), npartitions)\n return rdd.map(lambda kv: (kv[0], readlocal(kv[1])))\n else:\n return [(k, readlocal(v)) for k, v in enumerate(files)]",
"def _ReadImageList(list_path):\n with tf.io.gfile.GFile(list_path, 'r') as f:\n image_paths = f.readlines()\n image_paths = [entry.rstrip() for entry in image_paths]\n return image_paths",
"def get_file_list(input_list):\n if not isinstance(input_list, Iterable) or isinstance(input_list, str):\n raise ArgumentTypeError('input_list must be iterable (and not string)')\n file_list = []\n for item in input_list:\n if os.path.isfile(item):\n file_list.append(os.path.abspath(item))\n elif os.path.isdir(item):\n for fname in os.listdir(item):\n path = os.path.join(item, fname)\n if os.path.isfile(path):\n file_list.append(path)\n else:\n raise OpenL3Error('Could not find {}'.format(item))\n\n return file_list",
"def psql_loader(spark, tbname):\n print(\"Loading files from PostgreSQL table: %s\" % tbname)\n filelist = spark.read \\\n .format('jdbc') \\\n .option('url', 'jdbc:postgresql://%s' % __credential__.jdbc_accessible_host_psql) \\\n .option('dbtable', tbname) \\\n .option('user', __credential__.user_psql) \\\n .option('password', __credential__.password_psql) \\\n .load()\n\n return filelist",
"def loadFromFile(self, filename):\n\t\treturn []",
"def file_list(load):\n if \"env\" in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop(\"env\")\n\n ret = []\n\n if \"saltenv\" not in load:\n return ret\n\n saltenv = load[\"saltenv\"]\n metadata = _init()\n\n if not metadata or saltenv not in metadata:\n return ret\n for bucket in _find_files(metadata[saltenv]):\n for buckets in bucket.values():\n files = [f for f in buckets if not fs.is_file_ignored(__opts__, f)]\n ret += _trim_env_off_path(files, saltenv)\n\n return ret",
"def _get_parsed_files(self):\n\n parsed = []\n with Historical_ROAs_Parsed_Table() as t:\n for row in t.execute(f'SELECT * FROM {t.name}'):\n parsed.append(row['file'])\n return parsed",
"def load_list(name, path, offset, chunk_length=0):\n ret = []\n with open(os.path.join(path, name) + '.pkl', 'rb') as f:\n f.seek(offset)\n if chunk_length == 0:\n while True:\n try:\n ret.append(pickle.load(f))\n except:\n return ret\n for i in range(chunk_length):\n try:\n ret.append(pickle.load(f))\n except:\n return ret, f.tell()\n return ret, f.tell()",
"def get_svc_list_file(self):\n\n return paths.SVC_LIST_FILE",
"def LoadListFile(file):\n\tlst = []\n\ttry:\n\t\twith open(file,'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.rstrip()\n\t\t\t\tlst.append(line)\n\texcept:\n\t\treturn []\n\treturn lst",
"def get_listfile(self, datadir):\n return []",
"def load_lsh(filename):\n with open(filename, 'rb') as handle:\n return pickle.load(handle)",
"def fs_ls(self, src):\n cmd = (\n \"import uos\\nfor f in uos.ilistdir(%s):\\n\"\n \" print('{:12} {}{}'.format(f[3]if len(f)>3 else 0,f[0],'/'if f[1]&0x4000 else ''))\"\n % ((\"'%s'\" % src) if src else \"\")\n )\n self.exec_(cmd, data_consumer=stdout_write_bytes)",
"def orig_filepath_list(filename_list, src_path):\n orig_filepaths = list([])\n i = 0\n for filename in filename_list:\n orig_filepaths.append(src_path + filename_list[i])\n i += 1\n return orig_filepaths",
"def loadList(file_name):\n with open(file_name) as f:\n l = [line.strip() for line in f]\n return l",
"def load_server_list(filename):\n if not os.path.isfile(filename):\n return #ignore this error for now\n fo=open(filename,\"r\")\n rd=fo.read()\n fo.close()\n __load_server_list(rd)",
"def read_file_list(filename):\n\n # hint: when you read lines of files, there will be a \"newline\"\n # (end-of-line character) at the end of each line, and you want to\n # strip that off before you print it. Do some research on that!\n\n # with open(filename, 'r') as file:\n # print(file.read())\n #cwd = os.getcwd() # This gets the visual studio code opened location\n cwd = os.path.dirname(os.path.realpath(__file__))\n print(cwd)\n try:\n file_contents = Path(cwd + \"\\\\\" + filename).read_text()\n except:\n return \"File not found\"\n return file_contents",
"def GetWorkloadFileList() -> list[str]:\n return [data.ResourcePath(workload) for workload in FLAGS.ycsb_workload_files]",
"def load_data_str(rel_path):\r\n full_path = path(__file__).abspath().dirname() / \"data\" / rel_path # pylint: disable=E1120\r\n with open(full_path) as data_file:\r\n return data_file.read()",
"def __read_file(file_path):\n assert os.path.exists(file_path), 'FILE \"{}\" NOT FOUND,' \\\n ' PLEASE GIVE THE CORRECT FILE PATH.'.format(file_path)\n url_list = []\n if file_path == '':\n return url_list\n else:\n my_file = open(file_path, 'r')\n for line in my_file.readlines():\n url_list.append(''.join(line.split('\\n')))\n return url_list",
"def get_file(file_to_edit):\n events = []\n file_path = lrs_path + file_to_edit\n with open(file_path, \"r\") as the_file:\n filereader = csv.reader(the_file)\n for row in filereader:\n events.append(row)\n the_file.close()\n return events"
] | [
"0.6710121",
"0.59790194",
"0.56483924",
"0.5509965",
"0.5405186",
"0.5352408",
"0.53398067",
"0.5331406",
"0.5290376",
"0.52780545",
"0.5258845",
"0.5241987",
"0.52318573",
"0.52213573",
"0.5219097",
"0.52087283",
"0.5166526",
"0.51605594",
"0.5156012",
"0.51536196",
"0.51154655",
"0.5097816",
"0.5091168",
"0.50734353",
"0.5072464",
"0.50722355",
"0.5068708",
"0.506643",
"0.50524",
"0.5051944"
] | 0.68664795 | 0 |
Finds bootloader properties for the device using offline inspection. | def inspect_boot_loader(g, device) -> inspect_pb2.InspectionResults:
bios_bootable = False
uefi_bootable = False
root_fs = ""
try:
stream = os.popen('gdisk -l {}'.format(device))
output = stream.read()
print(output)
if _inspect_for_hybrid_mbr(output):
bios_bootable = True
part_list = g.part_list('/dev/sda')
for part in part_list:
try:
guid = g.part_get_gpt_type('/dev/sda', part['part_num'])
# It covers both GPT "EFI System" and BIOS "EFI (FAT-12/16/32)".
if guid == 'C12A7328-F81F-11D2-BA4B-00A0C93EC93B':
uefi_bootable = True
# TODO: detect root_fs (b/169245755)
# It covers "BIOS boot", which make a protective-MBR bios-bootable.
if guid == '21686148-6449-6E6F-744E-656564454649':
bios_bootable = True
except Exception:
continue
except Exception as e:
print("Failed to inspect disk partition: ", e)
return inspect_pb2.InspectionResults(
bios_bootable=bios_bootable,
uefi_bootable=uefi_bootable,
root_fs=root_fs,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_device_file_dict():\n cmd = 'lshw -class disk'\n desc = \"description\"\n log_name = \"logical name\"\n serial = \"serial\"\n\n dev = []\n dev_list = []\n\n ret, output, err = run_gluster_command(cmd)\n output = output.decode('ASCII')\n dev_info = output.split('\\n')\n for line in dev_info:\n if re.search(desc, line):\n if dev:\n dev_list.append(dev)\n\n dev = []\n if re.search(log_name, line) or re.search(serial, line):\n temp = line.split(':')\n temp[1] = temp[1].strip(' ')\n dev.append(temp[1])\n dev_list.append(dev)\n for line in dev_list:\n print(line)",
"def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings",
"def test_get_bios_boot_mode_list(self):\n pass",
"def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)",
"def get_info():\n\n global DISKINFO\n DISKINFO = {}\n\n #Run diskutil list to get disk names.\n runcmd = subprocess.Popen(\"diskutil list -plist\", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n\n #Get the output.\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n global PLIST\n\n PLIST = plistlib.loads(stdout)\n\n #Find the disks.\n for disk in PLIST[\"AllDisks\"]:\n #Run diskutil info to get disk info.\n runcmd = subprocess.Popen(\"diskutil info -plist \"+disk, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n PLIST = plistlib.loads(stdout)\n\n #Check if the disk is a partition.\n disk_is_partition = is_partition(disk)\n\n if not disk_is_partition:\n #These are devices.\n get_device_info(disk)\n\n else:\n #These are Partitions. Fix for disks w/ more than 9 partitions.\n host_disk = \"/dev/\"+disk.split(\"s\")[0]+\"s\"+disk.split(\"s\")[1]\n get_partition_info(disk, host_disk)\n\n #Check we found some disks.\n if not DISKINFO:\n raise RuntimeError(\"No Disks found!\")",
"def _get_persistent_boot_devices(self):\n # Check if the BIOS resource if exists.\n headers_bios, bios_uri, bios_settings = self._check_bios_resource()\n\n # Get the Boot resource.\n boot_settings = self._get_bios_boot_resource(bios_settings)\n\n # Get the BootSources resource\n try:\n boot_sources = boot_settings['BootSources']\n except KeyError:\n msg = (\"BootSources resource not found.\")\n raise exception.IloError(msg)\n\n try:\n boot_order = boot_settings['PersistentBootConfigOrder']\n except KeyError:\n msg = (\"PersistentBootConfigOrder resource not found.\")\n raise exception.IloCommandNotSupportedError(msg)\n\n return boot_sources, boot_order",
"def find_stick():\n out = subprocess.check_output(\n \"gdbus introspect --system --dest org.freedesktop.UDisks \"\n \"--object-path /org/freedesktop/UDisks/devices --recurse \"\n \"--only-properties\".split())\n devs = zip(*((re.match(r\".* = '?(.*?)'?;\", x).group(1)\n for x in out.splitlines()\n if \"DriveConnectionInterface =\" in x\n or \"DeviceIsPartition =\" in x\n or \"DeviceFile = \" in x),)*3)\n try:\n return next(dev[2] for dev in devs if dev[0] == 'usb'\n and dev[1] == 'true')\n except StopIteration:\n return None",
"def discover(self):\n\n # Get the Huge Page configuration\n self.get_hugepages()\n\n # Get the device configuration\n self.get_devices_per_node()\n\n # Get the CPU configuration\n self.get_cpu()\n\n # Get the current grub cmdline\n self.get_grub()",
"def bdev_nvme_get_discovery_info(client):\n return client.call('bdev_nvme_get_discovery_info')",
"def loopdev(diskimg):\n result = subprocess.run(\n ['losetup', '--all', '--list', '--json'], check=True, capture_output=True)\n for ld in json.loads(result.stdout.decode())['loopdevices']:\n if ld['back-file'] == diskimg:\n return ld['name']\n return None",
"def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault",
"def test_get_drives_drive_firmware(self):\n pass",
"def get_boot_dev_count(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetBootDevCount', self.handle)",
"def boot_config():\n # quick check to grab a config file from /boot partition.\n # this function helps users who cannot SSH/access the Pi,\n # but can access the microSD card\n if os.path.exists(BOOT_CONFIG_PATH):\n print(\"Configuration loaded from /boot directory.\")\n with open(BOOT_CONFIG_PATH) as boot_file:\n with open(CONFIG_FILE_PATH, 'w+') as config_file:\n for line in boot_file:\n config_file.write(line)",
"def _try_to_get_drive_info_from_dbus(device):\n\n # ensure the device is under /dev, and extract the block device name\n path_components = os.path.split(device)\n if path_components[0] != \"/dev\":\n return {}\n block_device_name = path_components[-1]\n\n # try importing dbus, and exit gracefully if it fails\n try:\n import dbus\n except ImportError:\n return {}\n\n try:\n\n bus = dbus.SystemBus()\n\n # get the block object based on the block device name\n block_obj = bus.get_object(\n \"org.freedesktop.UDisks2\",\n \"/org/freedesktop/UDisks2/block_devices/\" + block_device_name,\n )\n block_iface = dbus.Interface(block_obj, \"org.freedesktop.DBus.Properties\")\n block = block_iface.GetAll(\"org.freedesktop.UDisks2.Block\")\n\n # get the drive object, based on the drive identifier from the block object\n drive_path = block.get(\"Drive\")\n drive_obj = bus.get_object(\"org.freedesktop.UDisks2\", drive_path)\n drive_iface = dbus.Interface(drive_obj, \"org.freedesktop.DBus.Properties\")\n drive = drive_iface.GetAll(\"org.freedesktop.UDisks2.Drive\")\n\n # extract the name and guid from the block and drive properties, calculate drivetype, and return\n return {\n \"name\": str(\n block.get(\"IdLabel\")\n or \" \".join([drive.get(\"Vendor\"), drive.get(\"Model\")]).strip()\n ),\n \"guid\": str(block.get(\"IdUUID\") or drive.get(\"Serial\") or drive.get(\"Id\")),\n \"drivetype\": _get_drivetype_from_dbus_drive_properties(drive),\n }\n\n except ValueError:\n return {}\n except dbus.exceptions.DBusException:\n return {}",
"def sunpower_fetch(sunpower_monitor):\n try:\n sunpower_data = sunpower_monitor.device_list()\n _LOGGER.debug(\"got data %s\", sunpower_data)\n data = {}\n # Convert data into indexable format data[device_type][serial]\n for device in sunpower_data[\"devices\"]:\n if device[\"DEVICE_TYPE\"] not in data:\n data[device[\"DEVICE_TYPE\"]] = {device[\"SERIAL\"]: device}\n else:\n data[device[\"DEVICE_TYPE\"]][device[\"SERIAL\"]] = device\n return data\n except ConnectionException as error:\n raise UpdateFailed from error",
"def __fill_boot_settings_fields(profile, profile_elements):\n result = True\n selenium2lib = ui_lib.get_s2l()\n # Validate the profile in XML file\n __validate_boot_settings_properties_in_xml_file(profile)\n # If XML is fine, go ahead filling Boot Setting UI fields\n result &= ui_lib.wait_for_element_and_click(profile_elements.ID_COMBO_MENU_VIEW)\n result &= ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_BOOTSETTINGS,\n PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.wait_for_element_visible(profile_elements.ID_CHKBOX_MANAGE_BOOT)\n if profile.has_property(XML_MANAGE_BOOT_MODE_ATTRIBUTE) and profile.manageBoot == \"false\":\n result &= ui_lib.wait_for_checkbox_and_unselect(profile_elements.ID_CHKBOX_MANAGE_BOOT)\n elif profile.has_property(XML_BOOT_MODE_ATTRIBUTE):\n boot_mode_option = profile.bootMode\n logger._log_to_console_and_log_file(\" --> Selecting Boot Mode..\")\n __select_value_from_a_profile_combo_box(profile_elements.ID_COMBO_PROFILE_BOOT_MODE, profile_elements.ID_COMBO_PROFILE_BOOT_MODE_LIST % boot_mode_option)\n if boot_mode_option == CONSTANT_UEFI or boot_mode_option == CONSTANT_UEFI_OPTIMIZED:\n if profile.has_property(XML_BOOT_POLICY_ATTRIBUTE):\n boot_policy_option = profile.bootPolicy\n result &= __select_value_from_a_profile_combo_box(profile_elements.ID_COMBO_PROFILE_PXE_BOOT_POLICY, profile_elements.ID_COMBO_PROFILE_PXE_BOOT_POLICY_LIST % boot_policy_option)\n result &= ui_lib.wait_for_element_visible(profile_elements.ID_CHKBOX_PROFILE_BOOT_ORDER)\n if profile.has_property(XML_MANAGE_BOOT_ORDER_ATTRIBUTE) and profile.manageBootOrder == \"false\":\n selenium2lib.unselect_checkbox(profile_elements.ID_CHKBOX_PROFILE_BOOT_ORDER)\n else:\n selenium2lib.select_checkbox(profile_elements.ID_CHKBOX_PROFILE_BOOT_ORDER)\n # Set primary boot device\n if profile.has_property(XML_PRIMARY_BOOT_DEVICE):\n primary_boot_device = profile.primaryBootDevice\n result &= __select_value_from_a_profile_combo_box(profile_elements.ID_COMBO_PROFILE_PRIMARY_BOOT_DEVICE, profile_elements.ID_COMBO_PROFILE_PRIMARY_BOOT_DEVICE_LIST % primary_boot_device)\n elif boot_mode_option == CONSTANT_LEGACY_BIOS:\n __fill_boot_order(profile, profile_elements)\n else:\n __fill_boot_order(profile, profile_elements)\n return result",
"def findDeviceDescriptor(self, string: str) -> cern.japc.core.DeviceDescriptor:\n ...",
"def bdev_nvme_get_mdns_discovery_info(client):\n return client.call('bdev_nvme_get_mdns_discovery_info')",
"def get_block_device_list(vars = {}, log = sys.stderr):\n\n # make sure we can access to the files/directories in /proc\n if not os.access(PROC_PARTITIONS_PATH, os.F_OK):\n return None\n\n # table with valid scsi/sata/ide/raid block device names\n valid_blk_names = {}\n # add in valid sd and hd block device names\n for blk_prefix in ('sd','hd'):\n for blk_num in map (\\\n lambda x: chr(x), range(ord('a'),ord('z')+1)):\n devicename=\"%s%c\" % (blk_prefix, blk_num)\n valid_blk_names[devicename]=None\n\n # add in valid scsi raid block device names\n for M in range(0,1+1):\n for N in range(0,7+1):\n devicename = \"cciss/c%dd%d\" % (M,N)\n valid_blk_names[devicename]=None\n\n for devicename in valid_blk_names.keys():\n # devfs under 2.4 (old boot cds) used to list partitions\n # in a format such as scsi/host0/bus0/target0/lun0/disc\n # and /dev/sda, etc. were just symlinks\n try:\n devfsname= os.readlink( \"/dev/%s\" % devicename )\n valid_blk_names[devfsname]=None\n except OSError:\n pass\n\n # only do this once every system boot\n if not os.access(DEVICES_SCANNED_FLAG, os.R_OK):\n\n # this is ugly. under devfs, device\n # entries in /dev/scsi/.. and /dev/ide/...\n # don't show up until you attempt to read\n # from the associated device at /dev (/dev/sda).\n # so, lets run sfdisk -l (list partitions) against\n # most possible block devices, that way they show\n # up when it comes time to do the install.\n devicenames = valid_blk_names.keys()\n devicenames.sort()\n for devicename in devicenames:\n os.system( \"sfdisk -l /dev/%s > /dev/null 2>&1\" % devicename )\n\n # touch file\n fb = open(DEVICES_SCANNED_FLAG,\"w\")\n fb.close()\n\n devicelist= {}\n\n partitions_file= file(PROC_PARTITIONS_PATH,\"r\")\n line_count= 0\n for line in partitions_file:\n line_count= line_count + 1\n\n # skip the first two lines always\n if line_count < 2:\n continue\n\n parts= string.split(line)\n\n if len(parts) < 4:\n continue\n\n device= parts[3]\n\n # skip and ignore any partitions\n if not valid_blk_names.has_key(device):\n continue\n\n try:\n major= int(parts[0])\n minor= int(parts[1])\n blocks= int(parts[2])\n except ValueError, err:\n continue\n\n gb_size= blocks/BLOCKS_PER_GB\n\n # check to see if the blk device is readonly\n try:\n # can we write to it?\n dev_name= \"/dev/%s\" % device\n fb = open(dev_name,\"w\")\n fb.close()\n readonly=False\n except IOError, e:\n # check if EROFS errno\n if errno.errorcode.get(e.errno,None) == 'EROFS':\n readonly=True\n else:\n # got some other errno, pretend device is readonly\n readonly=True\n\n devicelist[dev_name]= {'major': major,'minor': minor,'blocks': blocks, 'size': gb_size, 'readonly': readonly}\n return devicelist",
"def create_boot_dev(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_CreateBootDev', self.handle))",
"def get_boot_record(disk):\n\n #TODO\n return \"Unknown\", \"Unknown\"",
"def get_device_properties(device):\n results = devices.show(device)\n return jsonify(results)",
"def load_devices():",
"def find(ctx, name):\n conf = settings.devices.get(name, dict())\n if conf.get('type') == 'command':\n return conf, name, name\n\n uuids = ctx.obj['uuids']\n context = Context()\n for dev in iter(context.list_devices()):\n if 'ID_FS_TYPE' in dev:\n if name == uuids.get(dev.get('ID_FS_UUID')):\n return (settings.devices[name], dev['DEVNAME'],\n settings.devices[name].get('label',\n dev.get('ID_FS_LABEL')))\n\n print('Device \"%s\" not found.' % name)\n sys.exit(1)",
"def step6(self):\n if len(self.mrs) == 3:\n count = 3\n elif len(self.mrs) == 2:\n count = 2\n else:\n count = 1\n for mr in self.mrs[0:count]:\n self.log.info(\"Display boot drive on controller:%d\"\n % (mr.ctrl_id))\n vd_id = mr.cli.bootdrive_vd_get()\n if (int(vd_id) == -1): # -1 : No boot VD.\n self.log.info(\"No boot VD found on controller: %d\"\n % (mr.ctrl_id))\n else:\n self.log.info(\"VD ID of the boot VD: %d\"\n % int((vd_id)))",
"def vdisk_in_flashcopy(self, diskname):\n LOG.debug(\"Entering\")\n cmd = ''.join([\"svcinfo lsfcmap -filtervalue \",\n \"target_vdisk_name=%s -delim :\" % diskname])\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n return(100, None)\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index('progress')\n progress = values[index]\n index = header.index('id')\n map_id = values[index]\n\n LOG.debug(\"Exiting (progress = %s, map_id = %s)\" % (progress, map_id))\n return progress, map_id",
"def get_mbed_devices(self):\n upper_ven = [ven.upper() for ven in self.usb_vendor_list]\n mounts_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SYSTEM\\MountedDevices')\n for point, label, _ in self.iter_vals(mounts_key):\n printable_label = label.decode('utf-16le', 'ignore')\n if ('DosDevices' in point and\n any(v in printable_label.upper() for v in upper_ven)):\n logger.debug(\"Found Mount point %s with usb ID %s\",point,\n printable_label)\n yield (point, printable_label)\n else:\n logger.debug(\"Skipping Mount point %r label %r\", point, label)",
"def bootloader() -> NoReturn:",
"def get_devices():\n global managed_objects\n global devices_by_adr\n \n devices_by_adr = {}\n \n r = re.compile(\"\\/org\\/bluez\\/hci\\d*\\/dev\\_(.*)\")\n # e.g., match a string like this:\n # /org/bluez/hci0/dev_58_C9_35_2F_A1_EF\n \n for key, value in managed_objects.items():\n # print(\"key=\", key)\n m = r.match(key)\n if m is not None:\n dev_str = m.group(1) # we have a device string!\n # print(\"dev_str=\", dev_str)\n # let's flatten that dict a bit\n devices_by_adr[dev_str] = value[\"org.bluez.Device1\"]"
] | [
"0.5718715",
"0.55690753",
"0.5462103",
"0.54236096",
"0.53736985",
"0.5368723",
"0.53373444",
"0.53061146",
"0.5274291",
"0.52600014",
"0.52261484",
"0.5221996",
"0.51640713",
"0.5162854",
"0.51505417",
"0.5148141",
"0.51273084",
"0.5101032",
"0.50776225",
"0.50754064",
"0.5074345",
"0.50732803",
"0.50169736",
"0.49803507",
"0.49568427",
"0.49545583",
"0.49416268",
"0.49398404",
"0.49299243",
"0.49253574"
] | 0.61695313 | 0 |
Finds hybrid MBR, which potentially is BIOS bootableeven without a BIOS boot partition. | def _inspect_for_hybrid_mbr(gdisk_output) -> bool:
is_hybrid_mbr = False
mbr_bios_bootable_re = re.compile(r'(.*)MBR:[\s]*hybrid(.*)', re.DOTALL)
if mbr_bios_bootable_re.match(gdisk_output):
is_hybrid_mbr = True
return is_hybrid_mbr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_bios_boot_mode_by_moid(self):\n pass",
"def test_get_bios_boot_mode_list(self):\n pass",
"def detect_mbr(self, filename, offset, fs_id):\n self.logger.debug('Detecting MBR partition type')\n\n if fs_id not in self.__mbr_plugins:\n return None\n else:\n plugins = self.__mbr_plugins.get(fs_id)\n for plugin in plugins:\n if plugin.detect(filename, offset):\n return plugin.get_volume_object()\n return None",
"def test_patch_bios_boot_mode(self):\n pass",
"def inspect_boot_loader(g, device) -> inspect_pb2.InspectionResults:\n\n bios_bootable = False\n uefi_bootable = False\n root_fs = \"\"\n\n try:\n stream = os.popen('gdisk -l {}'.format(device))\n output = stream.read()\n print(output)\n if _inspect_for_hybrid_mbr(output):\n bios_bootable = True\n\n part_list = g.part_list('/dev/sda')\n for part in part_list:\n try:\n guid = g.part_get_gpt_type('/dev/sda', part['part_num'])\n # It covers both GPT \"EFI System\" and BIOS \"EFI (FAT-12/16/32)\".\n if guid == 'C12A7328-F81F-11D2-BA4B-00A0C93EC93B':\n uefi_bootable = True\n # TODO: detect root_fs (b/169245755)\n # It covers \"BIOS boot\", which make a protective-MBR bios-bootable.\n if guid == '21686148-6449-6E6F-744E-656564454649':\n bios_bootable = True\n except Exception:\n continue\n\n except Exception as e:\n print(\"Failed to inspect disk partition: \", e)\n\n return inspect_pb2.InspectionResults(\n bios_bootable=bios_bootable,\n uefi_bootable=uefi_bootable,\n root_fs=root_fs,\n )",
"def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings",
"def test_update_bios_boot_mode(self):\n pass",
"def _locate_bootloader():\n pkg_path = os.path.dirname(__file__)\n blpath = os.path.abspath(os.path.join(pkg_path, 'bootloader'))\n if not os.path.isfile(blpath):\n raise InternalError(\"bootloader not found at {}\".format(blpath))\n return blpath",
"def search_lxc_bridges():\n bridges = __context__.get(\"lxc.bridges\", None)\n # either match not yet called or no bridges were found\n # to handle the case where lxc was not installed on the first\n # call\n if not bridges:\n bridges = set()\n running_bridges = set()\n bridges.add(DEFAULT_BR)\n try:\n output = __salt__[\"cmd.run_all\"](\"brctl show\")\n for line in output[\"stdout\"].splitlines()[1:]:\n if not line.startswith(\" \"):\n running_bridges.add(line.split()[0].strip())\n except (SaltInvocationError, CommandExecutionError):\n pass\n for ifc, ip in __grains__.get(\"ip_interfaces\", {}).items():\n if ifc in running_bridges:\n bridges.add(ifc)\n elif os.path.exists(f\"/sys/devices/virtual/net/{ifc}/bridge\"):\n bridges.add(ifc)\n bridges = list(bridges)\n # if we found interfaces that have lxc in their names\n # we filter them as being the potential lxc bridges\n # we also try to default on br0 on other cases\n\n def sort_bridges(a):\n pref = \"z\"\n if \"lxc\" in a:\n pref = \"a\"\n elif \"br0\" == a:\n pref = \"c\"\n return f\"{pref}_{a}\"\n\n bridges.sort(key=sort_bridges)\n __context__[\"lxc.bridges\"] = bridges\n return bridges",
"def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()",
"def _get_next_wb(self) -> Optional[Package]:\n for dist in self.distributions:\n for arch in self.architectures:\n response = self._query_wannabuild(arch, dist,\n '--list=needs-build')\n pending = response.split('\\n')\n if not pending[0]:\n continue\n result = self._take(pending[0])\n if result:\n return result\n return None",
"def get_supported_boot_devices(self, task):\n if (getattr(task.node, 'power_interface') == 'ipmitool'\n or task.node.driver_internal_info.get('irmc_ipmi_succeed')):\n return super(IRMCManagement, self).get_supported_boot_devices(task)\n else:\n return super(ipmitool.IPMIManagement,\n self).get_supported_boot_devices(task)",
"def get_one_time_boot(self):\n system = self._get_host_details()\n try:\n if system['Boot']['BootSourceOverrideEnabled'] == 'Once':\n device = system['Boot']['BootSourceOverrideTarget']\n if device in DEVICE_RIS_TO_COMMON:\n return DEVICE_RIS_TO_COMMON[device]\n return device\n else:\n # value returned by RIBCL if one-time boot setting are absent\n return 'Normal'\n\n except KeyError as e:\n msg = \"get_one_time_boot failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)",
"def bootpart(disks):\n return path_to_partition(disks, '/boot/foo')",
"def get_boot_driver(self):\n return self._boot_driver",
"def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)",
"def get_boot_device(self):\n root_vol = None\n boot_vol = None\n for volume in self.volumes:\n if not volume.partitions:\n continue\n for partition in volume.partitions:\n if partition.mount_point == \"/\":\n root_vol = volume\n elif partition.mount_point == '/boot':\n boot_vol = volume\n\n if not boot_vol:\n return root_vol\n return boot_vol",
"def bootloader() -> NoReturn:",
"def get_boot_mode(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode",
"def get_boot_mode(self, task):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='get_boot_mode')",
"def search_lxc_bridge():\n return search_lxc_bridges()[0]",
"def find_bridge(i, j, chain) :\n B = chain.bridges_dict\n br = None\n for b in B.keys() :\n if (B[b].lumen1 == i and B[b].lumen2 == j) or (B[b].lumen1 == j and B[b].lumen2 == i) :\n br = b\n if br == None :\n print('No bridge found to connect these lumens ('+str(i)+', '+str(j)+') !')\n return br",
"def supported_boot_interfaces(self):\n return [fake.FakeBoot] + super().supported_boot_interfaces",
"def is_bootable(self):\n return self.bootable_flag == 0x80",
"def mba_supported():\n return common.MBA_CAP in SYSTEM_CAPS",
"def get_bootarch(self):\n return self._bootarch",
"def boot_configuration(self):\n bootconfs = self.get_logical_configuration(gdef.BOOT_LOG_CONF)\n if not bootconfs:\n return bootconfs\n assert len(bootconfs) == 1 # Only one boot configuration can exist for each device instance.\n return bootconfs[0]",
"def UseExistingBootDisk(disks):\n return any(disk.get('boot', False) for disk in disks)",
"def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault"
] | [
"0.6185693",
"0.6110025",
"0.59356195",
"0.56100863",
"0.55986625",
"0.55217767",
"0.5459415",
"0.53488076",
"0.527089",
"0.52338624",
"0.51531035",
"0.5113562",
"0.50975084",
"0.50700265",
"0.5068751",
"0.5063921",
"0.5033384",
"0.50226516",
"0.49860033",
"0.49854615",
"0.4954317",
"0.493543",
"0.493463",
"0.49108374",
"0.49038294",
"0.49031234",
"0.48924258",
"0.48739812",
"0.4855018",
"0.48434755"
] | 0.7008793 | 0 |
Returns a linux.Inspector that is configured with all detectable Linux distros. | def _linux_inspector(
fs: boot_inspect.system.filesystems.Filesystem) -> linux.Inspector:
return linux.Inspector(fs, _LINUX) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def known_os_type():\n return 'Linux'",
"def platform():\n return ['linux']",
"def linux_os_config(self) -> Optional[pulumi.Input['LinuxOSConfigArgs']]:\n return pulumi.get(self, \"linux_os_config\")",
"def __distro(self):\n\n if hpccm.config.g_linux_distro == linux_distro.UBUNTU:\n if not self.__ospackages:\n self.__ospackages = ['binutils-dev', 'file', 'libnuma-dev',\n 'make', 'wget']\n\n if self.repository:\n self.__ospackages.extend(['autoconf', 'automake',\n 'ca-certificates', 'git',\n 'libtool'])\n\n if hpccm.config.g_linux_version >= StrictVersion('18.0'):\n self.__runtime_ospackages = ['libbinutils']\n else:\n self.__runtime_ospackages = ['binutils']\n elif hpccm.config.g_linux_distro == linux_distro.CENTOS:\n if not self.__ospackages:\n self.__ospackages = ['binutils-devel', 'file', 'make',\n 'numactl-devel', 'wget']\n\n if self.repository:\n self.__ospackages.extend(['autoconf', 'automake',\n 'ca-certificates', 'git',\n 'libtool'])\n\n self.__runtime_ospackages = ['binutils']\n else: # pragma: no cover\n raise RuntimeError('Unknown Linux distribution')",
"def __distro(self):\n\n if hpccm.config.g_linux_distro == linux_distro.UBUNTU:\n if not self.__ospackages:\n self.__ospackages = ['bzip2', 'file', 'hwloc', 'libnuma-dev',\n 'make', 'openssh-client', 'perl',\n 'tar', 'wget']\n self.__runtime_ospackages = ['hwloc', 'openssh-client']\n elif hpccm.config.g_linux_distro == linux_distro.CENTOS:\n if not self.__ospackages:\n self.__ospackages = ['bzip2', 'file', 'hwloc', 'make',\n 'numactl-devel', 'openssh-clients',\n 'perl', 'tar', 'wget']\n self.__runtime_ospackages = ['hwloc', 'openssh-clients']\n else: # pragma: no cover\n raise RuntimeError('Unknown Linux distribution')",
"def linux_configurations(self) -> Sequence['outputs.GetComputeMachineOsProfileLinuxConfigurationResult']:\n return pulumi.get(self, \"linux_configurations\")",
"def is_linux():\n (sysname, nodename, release, version, machine) = os.uname()\n return sysname == 'Linux'",
"def osname_is_linux():\n return (\"Linux\" == g_osname)",
"def __distro(self):\n\n if hpccm.config.g_linux_distro == linux_distro.UBUNTU:\n if not self.__ospackages:\n if self.__runfile:\n self.__ospackages = ['perl', 'wget']\n else:\n self.__ospackages = ['apt-transport-https',\n 'ca-certificates', 'gnupg', 'wget']\n\n if hpccm.config.g_linux_version >= StrictVersion('22.04'):\n self.__distro_label = 'ubuntu2204'\n elif hpccm.config.g_linux_version >= StrictVersion('20.04'):\n self.__distro_label = 'ubuntu2004'\n elif hpccm.config.g_linux_version >= StrictVersion('18.0'):\n self.__distro_label = 'ubuntu1804'\n else:\n self.__distro_label = 'ubuntu1604'\n\n elif hpccm.config.g_linux_distro == linux_distro.CENTOS:\n if not self.__ospackages:\n if self.__runfile:\n self.__ospackages = ['perl', 'perl-Env', 'wget']\n\n if hpccm.config.g_linux_version >= StrictVersion('8.0'):\n self.__distro_label = 'rhel8'\n else:\n self.__distro_label = 'rhel7'\n\n else: # pragma: no cover\n raise RuntimeError('Unknown Linux distribution')",
"def test_plugin_macosx_pretend_linux(mocker, pretend_macos):\n\n # When patching something which has a side effect on the module-level code\n # of a plugin, make sure to reload it.\n mocker.patch(\"platform.system\", return_value=\"Linux\")\n reload_plugin(\"NotifyMacOSX\")\n\n # Our object is disabled.\n obj = apprise.Apprise.instantiate('macosx://', suppress_exceptions=False)\n assert obj is None",
"def is_linux():\n return sys.platform[:5] == \"linux\"",
"def _detect_os():\n # TODO: Add pillar support for the apachectl location\n os_family = __grains__[\"os_family\"]\n if os_family == \"RedHat\":\n return \"apachectl\"\n elif os_family == \"Debian\" or os_family == \"Suse\":\n return \"apache2ctl\"\n else:\n return \"apachectl\"",
"def os_is_linux():\n return platform.system() == \"Linux\" and \"raspberrypi\" not in platform.uname()",
"def IsLinux():\n return os.name == 'posix' and os.uname()[0] == 'Linux'",
"def get_system_spec():\n import pkg_resources\n import platform\n\n if sys.platform == 'darwin':\n system_info = 'macOS {} {}'.format(\n platform.mac_ver()[0],\n platform.architecture()[0],\n )\n else:\n system_info = '{} {} {} {}'.format(\n platform.system(),\n '_'.join(platform.architecture()),\n platform.release(),\n platform.machine(),\n )\n\n system_spec = dict(\n raiden=pkg_resources.require(raiden.__name__)[0].version,\n python_implementation=platform.python_implementation(),\n python_version=platform.python_version(),\n system=system_info,\n )\n return system_spec",
"def auto_detect_os(distro):\n if is_debian(distro):\n return \"Debian\"\n\n if is_redhat(distro):\n return \"Redhat\"\n\n return \"Unknown\"",
"def is_linux():\r\n return sys.platform.startswith('linux')",
"def describe_operating_systems():\n pass",
"def get_os_type(cls):\n return {\n 'Darwin': cls.MAC,\n 'Linux': cls.LINUX,\n 'Windows': cls.WINDOWS\n }.get(platform.system(), cls.LINUX)",
"def se_linux(self) -> Optional[pulumi.Input['SELinuxStrategyOptionsPatchArgs']]:\n return pulumi.get(self, \"se_linux\")",
"def get_virtual_node_addon_os_type(self) -> str:\n return \"Linux\"",
"def __distro(self):\n\n if hpccm.config.g_linux_distro == linux_distro.UBUNTU:\n if not self.__ospackages:\n self.__ospackages = ['build-essential', 'cpio']\n self.__bashrc = '/etc/bash.bashrc'\n elif hpccm.config.g_linux_distro == linux_distro.CENTOS:\n if not self.__ospackages:\n self.__ospackages = ['gcc', 'gcc-c++', 'make', 'which']\n self.__bashrc = '/etc/bashrc'\n else: # pragma: no cover\n raise RuntimeError('Unknown Linux distribution')",
"def is_linux() -> bool:\n\n return sys.platform.startswith('linux')",
"def _get_osname():\n osname = sys.platform.lower()\n if osname == \"linux2\":\n osname = \"linux\"\n return osname",
"def __distro(self):\n\n if hpccm.config.g_linux_distro == linux_distro.UBUNTU:\n self.__directory_string = 'Ubuntu-16.04'\n # By specifying an install prefix, the installer bypasses\n # the deb package manager (debs are not relocatable like\n # rpms). Therefore, remove the files directly rather than\n # removing packages.\n self.__generic_aarch64_only = 'find /opt/arm -maxdepth 1 -type d -name \"armpl-*\" -not -name \"*Generic-AArch64*\" -print0 | xargs -0 rm -rf'\n self.__installer_template = 'arm-compiler-for-hpc-{}_Generic-AArch64_Ubuntu-16.04_aarch64-linux-deb.sh'\n self.__package_string = 'Ubuntu_16.04'\n self.__url_string = 'Ubuntu16.04'\n\n if not self.__ospackages:\n self.__ospackages = ['libc6-dev', 'python', 'tar', 'wget']\n\n elif hpccm.config.g_linux_distro == linux_distro.CENTOS:\n self.__directory_string = 'RHEL-7'\n self.__generic_aarch64_only = 'rpm --erase $(rpm -qa | grep armpl | grep -v Generic-AArch64)'\n self.__installer_template = 'arm-compiler-for-hpc-{}_Generic-AArch64_RHEL-7_aarch64-linux-rpm.sh'\n self.__package_string = 'RHEL_7'\n self.__url_string = 'RHEL7'\n\n if not self.__ospackages:\n self.__ospackages = ['glibc-devel', 'tar', 'wget']\n\n else: # pragma: no cover\n raise RuntimeError('Unknown Linux distribution')",
"def build_linux(self, **kargs):\n self.linux_files = [\"%s/%s/wombat/vmlinux\" % (self.builddir, self.name)]\n LIB_DEPENDS = [self.libs[\"mutex\"][1]]\n LIB_DEPENDS += [self.libs[\"iguana\"][1]]\n LIB_DEPENDS += [self.libs[\"l4\"][1]]\n LIB_DEPENDS += [self.libs[\"timer\"][1]]\n LIB_DEPENDS += [self.libs[\"l4e\"][1]]\n LIB_DEPENDS += [self.libs[\"c\"][1]]\n LIB_DEPENDS += [self.libs[\"circular_buffer\"][1]]\n LIB_DEPENDS += [self.libs[\"ll\"][1]]\n LIB_DEPENDS += [self.libs[\"range_fl\"][1]]\n LIB_DEPENDS += [self.libs[\"naming\"][1]]\n\n if \"pxa\" in self.machine.drivers:\n LIB_DEPENDS += [self.libs[\"pxa\"][1]]\n \n l4linux = self.Command(self.linux_files, LIB_DEPENDS, buildlinux)\n l4linux = Flatten([l4linux])[0]\n Precious(self.linux_files)\n\n\twombat_cflags = \"-DENDIAN_%s \" % self.machine.endian.upper()\n\n\tif machine.pidreloc == True:\n wombat_cflags += \" -DARM_PID_RELOC \"\n\n if restrict_vm == True:\n wombat_cflags += \" -DCONFIG_RESTRICTED_VM=1 \"\n\n\tif (hasattr(machine, \"c_flags\")):\n\t wombat_cflags += ' '.join(machine.c_flags)\n\n # This is horrible :(\n mutex_include = os.getcwd() + os.sep + self.libs[\"mutex\"][0][0][1:]\n ig_include = os.getcwd() + os.sep + self.libs[\"iguana\"][0][0][1:]\n ig_idl4_include = self.libs[\"iguana\"][0][-1]\n l4_include = os.getcwd() + os.sep + self.libs[\"l4\"][0][0][1:]\n timer_include = os.getcwd() + os.sep + self.libs[\"timer\"][0][0][1:]\n cb_include = os.getcwd() + os.sep + self.libs[\"circular_buffer\"][0][0][1:]\n idl4_include = os.getcwd() + os.sep + self.libs[\"idl4\"][0][1:] + os.sep\n naming_include = os.getcwd() + os.sep + self.libs[\"naming\"][0][0][1:] + os.sep\n \n mutex_lib = os.getcwd() + os.sep + self.libs[\"mutex\"][2][1:] + os.sep\n ig_lib = os.getcwd() + os.sep + self.libs[\"iguana\"][2][1:] + os.sep\n l4_lib = os.getcwd() + os.sep + self.libs[\"l4\"][2][1:] + os.sep\n timer_lib = os.getcwd() + os.sep + self.libs[\"timer\"][2][1:] + os.sep\n l4e_lib = os.getcwd() + os.sep + self.libs[\"l4e\"][2][1:] + os.sep\n c_lib = os.getcwd() + os.sep + self.libs[\"c\"][2][1:] + os.sep\n cb_lib = os.getcwd() + os.sep + self.libs[\"circular_buffer\"][2][1:] + os.sep\n ll_lib = os.getcwd() + os.sep + self.libs[\"ll\"][2][1:] + os.sep\n rfl_lib = os.getcwd() + os.sep + self.libs[\"range_fl\"][2][1:] + os.sep\n naming_lib = os.getcwd() + os.sep + self.libs[\"naming\"][2][1:] + os.sep\n\n LIB_ARGS = \"\"\n LIB_ARGS += \" LIBL4_INCLUDE=%s\" % l4_include\n LIB_ARGS += \" LIBTIMER_INCLUDE=%s\" % timer_include\n LIB_ARGS += \" LIBCB_INCLUDE=%s\" % cb_include\n LIB_ARGS += \" IGUANA_INCLUDE=%s\" % ig_include\n LIB_ARGS += \" IGUANA_IDL_INCLUDE=%s\" % ig_idl4_include\n LIB_ARGS += \" IDL4_INCLUDE=%s\" % idl4_include\n LIB_ARGS += \" NAMING_INCLUDE=%s\" % naming_include\n LIB_ARGS += \" MUTEX_INCLUDE=%s\" % mutex_include\n if \"pxa\" in self.machine.drivers:\n pxa_include = os.getcwd() + os.sep + self.libs[\"pxa\"][0][0][1:] + os.sep\n LIB_ARGS += \" LIBPXA_INCLUDE=%s\" % pxa_include\n\n LIB_ARGS += \" LIBCDIR=%s\" % c_lib\n LIB_ARGS += \" LIBIGUANADIR=%s\" % ig_lib\n LIB_ARGS += \" LIBL4DIR=%s\" % l4_lib\n LIB_ARGS += \" LIBTIMERDIR=%s\" % timer_lib\n LIB_ARGS += \" LIBL4EDIR=%s\" % l4e_lib\n LIB_ARGS += \" LIBCBDIR=%s\" % cb_lib\n LIB_ARGS += \" LIBLLDIR=%s\" % ll_lib\n LIB_ARGS += \" LIBRANGE_FLDIR=%s\" % rfl_lib\n LIB_ARGS += \" LIBNAMINGDIR=%s\" % naming_lib\n LIB_ARGS += \" LIBMUTEXDIR=%s\" % mutex_lib\n if \"pxa\" in self.machine.drivers:\n pxa_lib = os.getcwd() + os.sep + self.libs[\"pxa\"][2][1:] + os.sep\n LIB_ARGS += \" LIBPXADIR=%s\" % pxa_lib\n\n l4linux.linux_build_cmd = \"make -C wombat O=%s/%s/wombat WOMBAT_CFLAGS=\\'%s\\' V=0 %s \" \\\n \"CROSS_COMPILE=%s \" % \\\n (self.builddir, self.name, wombat_cflags, LIB_ARGS, self.toolchain)\n\n if cleaning and os.path.exists(\"%s/%s/wombat\" % (self.builddir, self.name)):\n shutil.rmtree(\"%s/%s/wombat\" % (self.builddir, self.name))\n\n # As for pistachio we don't track the L4Linux dependencies so the\n # use needs to explicitly specify scons build_linux= to get L4Linux\n # rebuilt\n add_arg(\"build_linux\", \"Set this option if you want to rebuild Wombat on this build\", 0)\n if build_linux != 0:\n AlwaysBuild(l4linux)\n\n\tenv['EXPECT_TEST_DATA'] = [(\"Iguana init starting\", None),\n (\"Loading linux\", None),\n (\"Memory: \\d+k/\\d+k available\", None),\n (\"Please press Enter to activate this console.\", None)]\n\n return l4linux",
"def se_linux(self) -> pulumi.Input['SELinuxStrategyOptionsArgs']:\n return pulumi.get(self, \"se_linux\")",
"def init_linuxArch(self):\n archDic = {'i386': 'i386', 'i686': 'i386', 'i586': 'i386', 'amd64': 'x86_64', 'x86_64': 'x86_64',\n 'i86pc': 'x86_64'}\n result, resultErr = self.ksp_ssh.ssh_execute_command('uname -m')\n #self.logger.info(\"arch info %s\" % (result.strip()))\n self.realArch = result.strip()\n linuxArch = archDic.get(result.strip(), \"unknownArch\") # 判断计算机是多少位\n #self.logger.info(\"linux arch info %s\" % linuxArch)\n return linuxArch",
"def linux():\n command = \"cat /etc/NetworkManager/system-connections/*\"\n networks = subprocess.check_output(command, shell=True).decode(\"utf-8\")\n return networks",
"def detect_os(self, env=None):\n if env is None:\n env = os.environ\n if 'ROS_OS_OVERRIDE' in env:\n splits = env[\"ROS_OS_OVERRIDE\"].split(':')\n self._os_name = splits[0]\n if len(splits) > 1:\n self._os_version = splits[1]\n if len(splits) > 2:\n self._os_codename = splits[2]\n else:\n self._os_codename = ''\n else:\n self._os_version = self._os_codename = ''\n self._override = True\n else:\n for os_name, os_detector in self._os_list:\n if os_detector.is_os():\n self._os_name = os_name\n self._os_version = os_detector.get_version()\n self._os_codename = os_detector.get_codename()\n self._os_detector = os_detector\n break\n\n if self._os_name:\n return self._os_name, self._os_version, self._os_codename\n else: # No solution found\n attempted = [x[0] for x in self._os_list]\n raise OsNotDetected(\"Could not detect OS, tried %s\" % attempted)"
] | [
"0.5891477",
"0.5780679",
"0.5668247",
"0.5573606",
"0.5561961",
"0.54512626",
"0.5450596",
"0.537677",
"0.5364674",
"0.53325284",
"0.53206855",
"0.53015876",
"0.5254666",
"0.522435",
"0.52176446",
"0.51821816",
"0.51740766",
"0.5099447",
"0.5091836",
"0.5078843",
"0.5066999",
"0.504743",
"0.50383174",
"0.5027224",
"0.5002847",
"0.4970096",
"0.4940217",
"0.4935953",
"0.4924386",
"0.49150896"
] | 0.78530514 | 0 |
| This void function saves all including files and directories with same name with word to the search_list list. return_equals(directory, word[, result=search_list]) | def return_equals(self, directory, word, result=search_list):
try:
directories = listdir(self.directory)
except WindowsError:
directories = []
if "$Recycle.Bin" in directories:
directories.remove("$Recycle.Bin")
if "*\\*" in directories:
directories.remove("*\\*")
# print directories
for element in directories:
element = element.lower()
word = word.lower()
word_index = element.find(word)
if not element:
continue
elif self.is_pattern :
if self.search_s(element) :
result.append(directory + "\\" + element)
elif element.split('.')[-1] == "txt":
print element.split('.')[-1]
try:
text_file = open(self.directory + "\\" + element, 'r')
line = text_file.readline()
while line:
if search_s(line):
result.append([self.directory + "\\" + element, ])
break
line = text_file.readline()
text_file.close()
except IOError:
print 'kir'
print
elif element == self.word:
result.append(directory + "\\" + element)
elif word_index + 1:
# print directory + "\\" + element
result.append([self.directory + "\\" + element, word_index, word_index + len(word)])
elif element.split('.')[-1] == "txt":
print element.split('.')[-1]
try:
text_file = open(self.directory + "\\" + element, 'r')
line = text_file.readline()
while line:
if word in line:
result.append([self.directory + "\\" + element, word_index, word_index + len(word)])
break
line = text_file.readline()
text_file.close()
except IOError:
print 'kir'
print
elif isdir(directory + "\\" + element):
thread_obj = CompleteSearch(directory + "\\" + element, self.word)
threads_list.append(thread_obj)
thread_obj.start()
thread_obj.join() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search(word, current_directory, search_result_list=search_list):\n if search_result_list:\n for counter in range(len(search_result_list)):\n search_result_list.pop()\n if current_directory:\n searcher_object = CompleteSearch(current_directory, word)\n searcher_object.start()\n searcher_object.join()\n return remove_equals(search_result_list)\n\n else:\n for cleaner in range(len(search_result_list)):\n search_result_list.pop()\n for driver in drivers():\n searcher_object = CompleteSearch(driver, word)\n searcher_object.start()\n return remove_equals(search_result_list)",
"def filesearch(word=\"\"):\n logger.info('Starting filesearch')\n file = []\n for f in glob.glob(\"*\"):\n if word[0] == \".\":\n if f.endswith(word):\n file.append(f)\n\n elif word in f:\n file.append(f)\n #return file\n logger.debug(file)\n return file",
"def search_entries(search):\n _, filenames = default_storage.listdir(\"entries\")\n result = []\n for filename in filenames: \n if filename.endswith(\".md\"):\n nameonly = re.sub(r\"\\.md$\", \"\", filename)\n \n if nameonly.lower() == search.lower():\n #print(\"name only :\", nameonly)\n #print(\"search :\", search)\n return (nameonly)\n elif search.lower() in nameonly.lower():\n result.append(nameonly)\n return(result)",
"def search(self,path,key_words):\t#key_words must be tuple\n\t\ttry:\n\t\t\tall=os.walk(path,False)\t#os.walk() is a generator , the return is a tuple which is (dirpath,dirnames,filenames)\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tfor item in all:\n\t\t\t\tfilepath=item[0]\n\t\t\t\tfor filename in item[2]:\n\t\t\t\t\tfor key_word in key_words:\t#find all key_word\n\t\t\t\t\t\tif key_word in filename.lower():\t#ignore case of word , and only search filename\n\t\t\t\t\t\t\tself.result.append(os.path.join(filepath,filename))",
"def get_filepaths(keyword, directory):\n \n matches = []\n filenames_total = []\n \n for root, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if keyword in filename:\n matches.append(root + '/' + filename)\n filenames_total.append(filename)\n return matches, filenames_total",
"def get_set_from_search(self, word):\n found_set = set()\n found_count = 0\n # con1 = services.connect(user='sysdba', password='masterkey')\n # print(\"Security file for database is: \", con1.get_security_database_path() + \"\\n\")\n\n con = fdb.connect(\n database=self.db_filepath,\n # dsn='localhost:~/test/CGI.vvv', #localhost:3050\n user='sysdba', password='masterkey'\n #charset='UTF8' # specify a character set for the connection\n )\n\n # Create a Cursor object that operates in the context of Connection con:\n cur = con.cursor()\n\n if \"'\" in word: # we need to add an extra for SQL statements\n word = word.replace(\"'\", \"''\")\n\n SELECT = \"select * from FILES WHERE FILE_NAME LIKE '%\" + word + \".%'\" # adding period to include start of extension\n\n try:\n cur.execute(SELECT)\n for row in cur:\n print(\"found: \", row[1])\n found_set.add(row[1])\n found_count += 1\n\n print(\"found_count:\", found_count)\n con.close()\n return found_set, found_count\n\n except Exception as identifier:\n errormesg = \"Error while looking up: \" + word + \"\\n\" + str(identifier)\n print(BColors.FAIL + errormesg + BColors.ENDC)\n return found_set, found_count",
"def search_all(search_tree, word_list):\n\n to_write = \"\"\n for w in word_list:\n comp = pocet_porovnani(search_tree, w.value, print_results=False)\n to_write += w.value + \":\" + str(comp) + \"\\n\"\n\n with open(\"search.txt\", \"w\") as f:\n f.write(to_write)\n print(colored(\" ulozene do suboru search.txt\", color=\"green\"))",
"def search(self, word):",
"def directory_find(root, word):\n for path, dirs, files in os.walk(root):\n if word in dirs:\n return os.path.join(path, word)",
"def test_directory_search(self, test_directory, search_terms, expected):\n test_file_directory = dirname(abspath(getfile(currentframe())))\n test_directory = join(test_file_directory, test_directory)\n search = FileSearch()\n results = search.search_files_in_directory(\n test_directory, search_terms)\n self.assertEquals(len(results), expected)\n results = search.search_files_in_directory(EMPTY, EMPTY)\n self.assertIsNone(results)",
"def get_files_match(matches, dname, Lshow, Ldir=False):\n matched_files=[]\n ### two for-loop is converted\n for fname in os.listdir(dname):\n #print(f\"{fname}\")\n for match in matches:\n if re.search(match, fname):\n ### if it is dir skip\n if os.path.isdir(fname) and not Ldir:\n continue\n if Lshow:\n print(f\"detect {fname}\") # in {match} {matches}\")\n matched_files.append(fname)\n return matched_files",
"def search(self, lookupword):\n with sqlite3.connect(self.dbpath) as conn:\n cursor = conn.cursor()\n cursor.execute('SELECT word, videofile FROM translation WHERE \\\n lower(word)=lower(?)', (lookupword,))\n find = cursor.fetchall()\n\n if find != []:\n # the word was found\n find = self.addSuffixes(find)\n return (True, find)\n\n else:\n # the word was not found\n # search the database for similar words\n altoptions = self._findAltOpts(lookupword)\n return (False, altoptions)",
"def _FindFileNamesInDirectory(input_api, dir_path, search_file_names):\n matches = []\n for _, _, file_names in input_api.os_walk(dir_path):\n for file_name in file_names:\n if file_name in search_file_names:\n matches.append(file_name)\n return matches",
"def createFileList(my_dir, term):\n \n for subdir, dirs, files in os.walk(my_dir):\n for file in files:\n if re.search(re.escape(term), file):\n result = os.path.join(subdir, file)\n fileList.append(result)\n return fileList",
"def _findFilesInDir(self, directory, extension=\".py\", foundFiles=None):\n\n #mutable default arguments in Python are evaluated once when the function is defined, not each time the function is called.\n if foundFiles == None:\n foundFiles = []\n \n filenames = os.listdir(directory)\n for filename in filenames:\n #need to verify that the entity is a file (this avoids problems when directory names have file extensions)\n if filename[-len(extension):] == extension and filename[0:1] != '.' and os.path.isfile(directory + '/' + filename):\n foundFiles.append(directory + '/' + filename)\n print ('===>' + filename)\n return foundFiles",
"def find_files(directory, extension, magictext):\n global files_logged\n global found_text\n dir_path = os.path.abspath(directory)\n dir_files = os.listdir(dir_path)\n for file in dir_files:\n if file.endswith(extension) and file not in files_logged:\n logger.info('New file found: {}'.format(file))\n files_logged.append(file)\n if file.endswith(extension):\n file_path = os.path.join(dir_path, file)\n if find_string_in_files(file_path, magictext):\n break\n for file in files_logged:\n if file not in dir_files:\n logger.info('File deleted: {}'.format(file))\n files_logged.remove(file)\n found_text[file] = 0",
"def match_words_to_search(chunks, searchresult, compare_func, join=True):\n wordlist = [hebstrip(w)[1] for w in word_bound.split(searchresult)]\n wordset = set(wordlist)\n genlist = [\n m\n for m in [\n match_one(rlist, wordset)\n for rlist in chunks.linked_heb\n if rlist.data\n ]\n if m\n ]\n ours = [i[0] for i in genlist]\n theirs = [i[1] for i in genlist]\n if join:\n return compare_func(\" \".join(ours), \" \".join(wordlist)), theirs\n else:\n return compare_func(ours, wordlist), theirs",
"def match_finder(word_list):\n dupe_check = []\n match_list = []\n for word in word_list:\n if word in match_list:\n continue\n elif word in dupe_check:\n match_list.append(word)\n else:\n dupe_check.append(word)\n return match_list",
"def search(self, word: str) -> bool:\n node = self.root\n res = [False]\n self.dfs(node, word, res)\n \n return res[0]",
"def search(self, values: dict):\n self.results.clear()\n self.matches, self.records = 0, 0\n # Extensions to be ignored.\n if values[\"-EXT-\"].endswith(\";\"):\n values[\"-EXT-\"] = values[\"-EXT-\"][:-1]\n if values[\"-DIR-\"].endswith(\";\"):\n values[\"-DIR-\"] = values[\"-DIR-\"][:-1]\n ignore_extensions = tuple(values[\"-EXT-\"].split(\";\")) \\\n if values[\"-EXT-\"] else ()\n # Folders to be ignored.\n ignore_folders = tuple(\"/\" + folder + \"/\"\n for folder in values[\"-DIR-\"].split(\";\")\n if values[\"-DIR-\"])\n \n # Check whether to ignore or search dot files/folders\n if values[\"-DOT-\"]:\n ignore_folders = (\"/.\",) + ignore_folders\n \n if values[\"CONTAINS\"]:\n function = self.contains\n elif values[\"STARTSWITH\"]:\n function = self.startswith\n else:\n function = self.endswith\n \n search_term = values[\"TERM\"].lower()\n for path, files in self.file_index:\n if any(ignored_folder in path + \"/\"\n for ignored_folder in ignore_folders):\n continue\n for file in files:\n if file.endswith(ignore_extensions) or \\\n values[\"-DOT-\"] and file.startswith(\".\"):\n continue\n self.records += 1\n if function(file.lower(), search_term):\n result = os.path.join(path, file)\n self.results.append(result)\n self.matches += 1\n \n with open(\"search_results.txt\", \"w\") as f:\n f.writelines(self.results)",
"def search(self, word):\n return self.dfsSearch(self.root, word, 0, False)",
"def search(self, word):\n return self.helper(word, self.root)",
"def search(self, word):\n return self.helper(word, self.root)",
"def search(self, word):\n return self.find(self.root,word)",
"def search_file(filename, term):\n try:\n filepath = root + filename\n with open(filepath) as file:\n return term in file.read().splitlines()\n except FileNotFoundError:\n with open(filepath,\"w+\") as file:\n return (search_file(filename, term))",
"def mergeSimilarDocuments(root, searchTerm, extension=\"txt\"):\n searchTerm = str(searchTerm).lower()\n\n document = []\n\n for root, directories, files in os.walk(root):\n for name in files:\n # Skip non-extension matching files.\n if not name[-len(extension):] == extension:\n continue\n filepath = os.path.join(root, name)\n if searchTerm in filepath.lower():\n with codecs.open(filepath, 'r', encoding=\"utf-8\") as f:\n document.append(f.read())\n\n return ''.join(document)",
"def search(self, word):\n return self.searchRecursive(self.root, word)",
"def buildMatchingWordList(searchWord, fileList, addCurrentBuffer=0):\n\t# build list of words that match from all imports that have .py source\n\t# speed this up by using a map?\n\twordList = []\n\tregex = re.compile(\"(\\W|\\A)\"+searchWord)\n\tregexWord = re.compile(\"\\A\"+searchWord+\"[a-zA-Z0-9_]+\")\n\t_addMatchingWords( vim.current.buffer, regex, regexWord, wordList)\n\tfor f in fileList:\n\t\tlines = open(f).readlines()\n\t\t_addMatchingWords( lines, regex, regexWord, wordList)\n\treturn wordList",
"def search_for_word(self, word, depth=\"shallow\"):\n\n # self._get_search_response(word)\n self._extract_html(uri_for_search(word))\n\n results = self.html.select(\".concept_light.clearfix\")\n # print(results)\n fmtd_results = []\n\n if depth == \"shallow\":\n for r in results:\n fmtd_results.append(self._extract_dictionary_information(r))\n\n elif depth == \"deep\":\n\n for r in results:\n fmtd_results.append(self._extract_dictionary_information(r))\n\n # If there are more than 20 results on the page, there is no \"More Words\" link\n more = self.html.select_one(\".more\")\n\n while more:\n link = more.get(\"href\")\n response = requests.get(r\"http:\" + link, timeout=5)\n html = BeautifulSoup(response.content, \"html.parser\")\n results = html.select(\".concept_light.clearfix\")\n\n for r in results:\n fmtd_results.append(self._extract_dictionary_information(r))\n\n more = html.select_one(\".more\")\n\n return fmtd_results",
"def search(self):\n apk_files = self.apk.get_files_types()\n search_results = []\n for file_path, file_type in apk_files.iteritems():\n file_ext = os.path.splitext(os.path.basename(file_path))[1]\n\n #if file type filter on, and this file is not that type, then skip\n if self.file_types and not any(interested_type in file_type.lower() or interested_type in file_ext for interested_type in self.file_types):\n continue\n\n search_result = None\n file_data = self.apk.get_file(file_path)\n\n if self.search_strings:\n for pattern in self.patterns:\n match = pattern.search(file_data)\n if match:\n search_result = {'file_path': file_path,\n 'file_type': file_type,\n 'search_string': pattern.pattern}\n search_results.append(search_result)\n else:\n search_result = {'file_path': file_path,\n 'file_type': file_type,\n 'search_string': None}\n search_results.append(search_result)\n\n #write individual files\n if search_result and self.save_matched_files_dir:\n #save original structure to avoid duplicate filename collisions\n save_file_path = os.path.join(self.save_matched_files_dir, file_path)\n if not os.path.exists(os.path.dirname(save_file_path)):\n os.makedirs(os.path.dirname(save_file_path))\n\n with open(save_file_path,'wb') as f:\n f.write(file_data)\n\n if 'Android binary XML' in file_type:\n with open(save_file_path,'r+') as axml_f:\n decoded_axml = AXMLPrinter(axml_f.read()).buff\n axml_f.seek(0)\n axml_f.write(decoded_axml)\n axml_f.truncate()\n\n return search_results"
] | [
"0.70012105",
"0.6431989",
"0.607879",
"0.602893",
"0.59404826",
"0.5890252",
"0.582532",
"0.5747598",
"0.5707703",
"0.567852",
"0.56751424",
"0.5637301",
"0.56358767",
"0.5613221",
"0.5607855",
"0.55870503",
"0.5574656",
"0.5474942",
"0.5450693",
"0.54445446",
"0.54160345",
"0.5345033",
"0.5345033",
"0.53430593",
"0.53329885",
"0.53137046",
"0.52908134",
"0.52528745",
"0.52367556",
"0.52363306"
] | 0.8496839 | 0 |
| This function returns search results of files and directories with same name of word. | First current directory searches and if there is any result, will return as a list | If user searches in home page, all drivers searches for results and the result will return as a list; search(word, current_directory[, search_result_list=search_list]) | def search(word, current_directory, search_result_list=search_list):
if search_result_list:
for counter in range(len(search_result_list)):
search_result_list.pop()
if current_directory:
searcher_object = CompleteSearch(current_directory, word)
searcher_object.start()
searcher_object.join()
return remove_equals(search_result_list)
else:
for cleaner in range(len(search_result_list)):
search_result_list.pop()
for driver in drivers():
searcher_object = CompleteSearch(driver, word)
searcher_object.start()
return remove_equals(search_result_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def return_equals(self, directory, word, result=search_list):\n try:\n directories = listdir(self.directory)\n except WindowsError:\n directories = []\n if \"$Recycle.Bin\" in directories:\n directories.remove(\"$Recycle.Bin\")\n if \"*\\\\*\" in directories:\n directories.remove(\"*\\\\*\")\n # print directories\n for element in directories:\n element = element.lower()\n word = word.lower()\n word_index = element.find(word)\n if not element:\n continue\n\n elif self.is_pattern :\n if self.search_s(element) :\n result.append(directory + \"\\\\\" + element)\n\n elif element.split('.')[-1] == \"txt\":\n print element.split('.')[-1]\n try:\n text_file = open(self.directory + \"\\\\\" + element, 'r')\n line = text_file.readline()\n while line:\n if search_s(line):\n result.append([self.directory + \"\\\\\" + element, ])\n break\n line = text_file.readline()\n text_file.close()\n except IOError:\n print 'kir'\n print\n\n elif element == self.word:\n result.append(directory + \"\\\\\" + element)\n elif word_index + 1:\n # print directory + \"\\\\\" + element\n result.append([self.directory + \"\\\\\" + element, word_index, word_index + len(word)])\n elif element.split('.')[-1] == \"txt\":\n print element.split('.')[-1]\n try:\n text_file = open(self.directory + \"\\\\\" + element, 'r')\n line = text_file.readline()\n while line:\n if word in line:\n result.append([self.directory + \"\\\\\" + element, word_index, word_index + len(word)])\n break\n line = text_file.readline()\n text_file.close()\n except IOError:\n print 'kir'\n print\n elif isdir(directory + \"\\\\\" + element):\n thread_obj = CompleteSearch(directory + \"\\\\\" + element, self.word)\n threads_list.append(thread_obj)\n thread_obj.start()\n thread_obj.join()",
"def search(self,path,key_words):\t#key_words must be tuple\n\t\ttry:\n\t\t\tall=os.walk(path,False)\t#os.walk() is a generator , the return is a tuple which is (dirpath,dirnames,filenames)\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tfor item in all:\n\t\t\t\tfilepath=item[0]\n\t\t\t\tfor filename in item[2]:\n\t\t\t\t\tfor key_word in key_words:\t#find all key_word\n\t\t\t\t\t\tif key_word in filename.lower():\t#ignore case of word , and only search filename\n\t\t\t\t\t\t\tself.result.append(os.path.join(filepath,filename))",
"def filesearch(word=\"\"):\n logger.info('Starting filesearch')\n file = []\n for f in glob.glob(\"*\"):\n if word[0] == \".\":\n if f.endswith(word):\n file.append(f)\n\n elif word in f:\n file.append(f)\n #return file\n logger.debug(file)\n return file",
"def search(self, word):\n return self.dfsSearch(self.root, word, 0, False)",
"def search_for_word(self, word, depth=\"shallow\"):\n\n # self._get_search_response(word)\n self._extract_html(uri_for_search(word))\n\n results = self.html.select(\".concept_light.clearfix\")\n # print(results)\n fmtd_results = []\n\n if depth == \"shallow\":\n for r in results:\n fmtd_results.append(self._extract_dictionary_information(r))\n\n elif depth == \"deep\":\n\n for r in results:\n fmtd_results.append(self._extract_dictionary_information(r))\n\n # If there are more than 20 results on the page, there is no \"More Words\" link\n more = self.html.select_one(\".more\")\n\n while more:\n link = more.get(\"href\")\n response = requests.get(r\"http:\" + link, timeout=5)\n html = BeautifulSoup(response.content, \"html.parser\")\n results = html.select(\".concept_light.clearfix\")\n\n for r in results:\n fmtd_results.append(self._extract_dictionary_information(r))\n\n more = html.select_one(\".more\")\n\n return fmtd_results",
"def search(self, word):\n return self._dfs(word, 0, self.trie)",
"def search_entries(search):\n _, filenames = default_storage.listdir(\"entries\")\n result = []\n for filename in filenames: \n if filename.endswith(\".md\"):\n nameonly = re.sub(r\"\\.md$\", \"\", filename)\n \n if nameonly.lower() == search.lower():\n #print(\"name only :\", nameonly)\n #print(\"search :\", search)\n return (nameonly)\n elif search.lower() in nameonly.lower():\n result.append(nameonly)\n return(result)",
"def search(self, word):\n return self.find(self.root,word)",
"def search(self, values: dict):\n self.results.clear()\n self.matches, self.records = 0, 0\n # Extensions to be ignored.\n if values[\"-EXT-\"].endswith(\";\"):\n values[\"-EXT-\"] = values[\"-EXT-\"][:-1]\n if values[\"-DIR-\"].endswith(\";\"):\n values[\"-DIR-\"] = values[\"-DIR-\"][:-1]\n ignore_extensions = tuple(values[\"-EXT-\"].split(\";\")) \\\n if values[\"-EXT-\"] else ()\n # Folders to be ignored.\n ignore_folders = tuple(\"/\" + folder + \"/\"\n for folder in values[\"-DIR-\"].split(\";\")\n if values[\"-DIR-\"])\n \n # Check whether to ignore or search dot files/folders\n if values[\"-DOT-\"]:\n ignore_folders = (\"/.\",) + ignore_folders\n \n if values[\"CONTAINS\"]:\n function = self.contains\n elif values[\"STARTSWITH\"]:\n function = self.startswith\n else:\n function = self.endswith\n \n search_term = values[\"TERM\"].lower()\n for path, files in self.file_index:\n if any(ignored_folder in path + \"/\"\n for ignored_folder in ignore_folders):\n continue\n for file in files:\n if file.endswith(ignore_extensions) or \\\n values[\"-DOT-\"] and file.startswith(\".\"):\n continue\n self.records += 1\n if function(file.lower(), search_term):\n result = os.path.join(path, file)\n self.results.append(result)\n self.matches += 1\n \n with open(\"search_results.txt\", \"w\") as f:\n f.writelines(self.results)",
"def _FindFileNamesInDirectory(input_api, dir_path, search_file_names):\n matches = []\n for _, _, file_names in input_api.os_walk(dir_path):\n for file_name in file_names:\n if file_name in search_file_names:\n matches.append(file_name)\n return matches",
"def search(self, word):\n return self.searchRecursive(self.root, word)",
"def get_filepaths(keyword, directory):\n \n matches = []\n filenames_total = []\n \n for root, dirnames, filenames in os.walk(directory):\n for filename in filenames:\n if keyword in filename:\n matches.append(root + '/' + filename)\n filenames_total.append(filename)\n return matches, filenames_total",
"def directory_find(root, word):\n for path, dirs, files in os.walk(root):\n if word in dirs:\n return os.path.join(path, word)",
"def search(self, word):\n return self.helper(word, self.root)",
"def search(self, word):\n return self.helper(word, self.root)",
"def search(self, word):\n return self.__search(self.__root, word,0)",
"def search(self, word):\n node = self.root\n return self.searchHelper(node, word)",
"def _search(dork): \n retVal = [] \n paths = [] \n\n if not dork: \n return None \n\n headers = {} \n\n headers[HTTP_HEADER.USER_AGENT] = dict(conf.httpHeaders).get(HTTP_HEADER.USER_AGENT, DUMMY_SEARCH_USER_AGENT) \n headers[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE \n\n gpage = conf.googlePage if conf.googlePage > 1 else 1 \n\n#polluted by xi4okv QQ£º48011203 \n\n for gpage in xrange(1,10): \n logger.info(\"using search result page #%d\" % gpage) \n\n url = \"https://m.baidu.com/s?\" \n url += \"word=%s&\" % urlencode(dork, convall=True) \n url += \"&pn=%d\" % ((gpage - 1) * 10) \n\n try: \n req = urllib2.Request(url, headers=headers) \n conn = urllib2.urlopen(req) \n\n requestMsg = \"HTTP request:\\nGET %s\" % url \n requestMsg += \" %s\" % httplib.HTTPConnection._http_vsn_str \n logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) \n\n page = conn.read() \n code = conn.code \n status = conn.msg \n\n responseHeaders = conn.info() \n page = decodePage(page, responseHeaders.get(\"Content-Encoding\"), responseHeaders.get(\"Content-Type\")) \n #print page \n\n responseMsg = \"HTTP response (%s - %d):\\n\" % (status, code) \n\n if conf.verbose <= 4: \n responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING) \n elif conf.verbose > 4: \n responseMsg += \"%s\\n%s\\n\" % (responseHeaders, page) \n\n logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) \n except urllib2.HTTPError, e: \n pass \n\n urls = [urllib.unquote(match.group(0) or match.group(1)) for match in re.finditer(GOOGLE_REGEX, page, re.I)] \n #retVal = re.findall(GOOGLE_REGEX, page, re.I) \n\n import urlparse \n\n for url in urls: \n urls_pat = re.compile(r\"http://(.*)[^']\") \n aurl = re.findall(urls_pat, url) \n if \"?\" in url and \"baidu\" not in url: \n xpath = urlparse.urlparse(url).path \n if xpath not in paths: \n paths.append(xpath) \n retVal.append(aurl[0]) \n\n #print retVal \n\n return retVal",
"def search(self, word):",
"def search(self, word):\n return self.subsearch(self.root, word)",
"def search(self, word):\n return self.find(word, 0, self.root)",
"def get_set_from_search(self, word):\n found_set = set()\n found_count = 0\n # con1 = services.connect(user='sysdba', password='masterkey')\n # print(\"Security file for database is: \", con1.get_security_database_path() + \"\\n\")\n\n con = fdb.connect(\n database=self.db_filepath,\n # dsn='localhost:~/test/CGI.vvv', #localhost:3050\n user='sysdba', password='masterkey'\n #charset='UTF8' # specify a character set for the connection\n )\n\n # Create a Cursor object that operates in the context of Connection con:\n cur = con.cursor()\n\n if \"'\" in word: # we need to add an extra for SQL statements\n word = word.replace(\"'\", \"''\")\n\n SELECT = \"select * from FILES WHERE FILE_NAME LIKE '%\" + word + \".%'\" # adding period to include start of extension\n\n try:\n cur.execute(SELECT)\n for row in cur:\n print(\"found: \", row[1])\n found_set.add(row[1])\n found_count += 1\n\n print(\"found_count:\", found_count)\n con.close()\n return found_set, found_count\n\n except Exception as identifier:\n errormesg = \"Error while looking up: \" + word + \"\\n\" + str(identifier)\n print(BColors.FAIL + errormesg + BColors.ENDC)\n return found_set, found_count",
"def _find_files(directory, dirs_to_look_in, files_to_search_for, \n current_dir, see_files):\n full_name = True\n if see_files:\n full_name = False\n files_to_load = search_directory(directory, \n look_in=dirs_to_look_in,\n search_for=files_to_search_for,\n file_type='files',\n current_dir=current_dir,\n full_name=full_name)\n if not files_to_load:\n raise UserWarning('No files were found matching the search for %s'\\\n ' in the directory(s) %s%s' \\\n % (files_to_search_for, directory, \n dirs_to_look_in))\n return files_to_load",
"def test_directory_search(self, test_directory, search_terms, expected):\n test_file_directory = dirname(abspath(getfile(currentframe())))\n test_directory = join(test_file_directory, test_directory)\n search = FileSearch()\n results = search.search_files_in_directory(\n test_directory, search_terms)\n self.assertEquals(len(results), expected)\n results = search.search_files_in_directory(EMPTY, EMPTY)\n self.assertIsNone(results)",
"def search(self, word: str) -> bool:\n node = self.root\n res = [False]\n self.dfs(node, word, res)\n \n return res[0]",
"def full_search(pw, *arg, **kw):\n return pw.search(*arg, **kw)",
"def search(text):\n s = Search()\n result = _search(s, text)\n _print_results(result)\n return result",
"def search(self, word):\r\n return self.DFS(word, 0, 0, self.trie.root)",
"def search():\n pass",
"def search(self, search_word: str, base_url: str, depth_limit: int=0):\n self.__search(search_word,base_url,depth_limit)\n print(json.dumps(self.__result, ensure_ascii=False))"
] | [
"0.7444881",
"0.70426524",
"0.6684862",
"0.6606855",
"0.6576921",
"0.6373673",
"0.63490796",
"0.6346005",
"0.6337016",
"0.632932",
"0.62723446",
"0.62628967",
"0.6246423",
"0.6217556",
"0.6217556",
"0.6211351",
"0.6206448",
"0.62025374",
"0.61939514",
"0.61712366",
"0.61656684",
"0.6132813",
"0.6095609",
"0.60500526",
"0.6046157",
"0.60252947",
"0.6022719",
"0.6000209",
"0.59874177",
"0.596022"
] | 0.8495745 | 0 |
Fn that Initializes the App. Prints some Fancy Stuff to StreamLit page, Display demo Image and train data stats | def _init_app() -> None:
# set title
st.title("Detecting Pet Faces 👁 🐶 🐱",)
st.markdown(
"This application detects the faces of some common Pet Breeds using a **RetinaNet**."
)
st.write("## How does it work?")
st.write(
"Upload an image of a pet (cat or dog) and the app will draw the dounding box where it detects the objects:"
)
# Display demo Image
st.image(
Image.open("images/res_3.png"), caption="Example", use_column_width=True,
)
st.write("## Upload your own image")
st.write(
"**Note:** The model has been trained on pets breeds given in the `The Oxford-IIIT Pet Dataset`"
" and therefore will only with those kind of images."
)
st.markdown("**To be more precise the model has been trained on these breeds:**")
# Show Train data Statistics
st.image(Image.open("images/breed_count.jpg"), use_column_width=True,) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n\n\tst.title(\"Iris EDA App with streamlit\")\n\tst.subheader(\"Streamlit is Cool\")",
"def load_homepage() -> None:\n st.image(\"iwakka.png\",\n use_column_width=True)\n \n st.header(\"Hello! This dashboard will help you to analize data from iWakka device\")\n st.write(\"Here are some step to process data: \")\n st.header(\" II. Download data\")\n st.write(\"Here you can download data\")\n \n st.header(\" III. Statistic Data\")\n st.write(\"You can judge patient condition accroding to provided data\") \n \n st.header(\"IV. AGF Indices\")\n st.write(\"Here you can analyse each chart\") \n \n st.header(\" V. Notes\")\n st.write(\"It can be useful for you to collect notes concerning your patient\") \n\n st.header(\" VI. Rank of patient\")\n st.write(\"You can compare results for selected patients\" )",
"def app():\n # Add title to the page\n st.title(\"Welcome to the Data Info page\")\n\n # Add subheader for the section\n st.subheader(\"View Data\")\n\n # Load the dataset\n X, y = load_data()\n df = pd.concat([X, y], axis=1)\n\n # Create an expansion option to check the data\n with st.expander(\"View data\"):\n st.dataframe(df)\n\n # Create a section to columns values\n # Give subheader\n st.subheader(\"Columns Summary:\")\n\n # Create a checkbox to get the summary.\n if st.checkbox(\"View Summary\"):\n st.dataframe(df.describe())\n\n # Create multiple check box in row\n col_name, col_dtype, col_data = st.columns(3)\n\n # Show name of all dataframe\n with col_name:\n if st.checkbox(\"Column Names\"):\n st.dataframe(df.columns)\n\n # Show datatype of all columns \n with col_dtype:\n if st.checkbox(\"Columns data types\"):\n dtypes = df.dtypes.apply(lambda x: x.name)\n st.dataframe(dtypes)\n \n # Show data for each columns\n with col_data: \n if st.checkbox(\"Columns Data\"):\n col = st.selectbox(\"Column Name\", list(df.columns))\n st.dataframe(df[col])\n \n # Add image for your data describtion.\n #st.image(\"./images/iris_classification_model.jpg\")\n\n # Add info about your dataset\\\n # st.write(\"Data Info\")\n\n # Add the link to you dataset\n # st.markdown(\"\"\"\n # <p style=\"font-size:24px\">\n # <a \n # href=\"https://github.com/ShishirShekhar/car-price-prediction/blob/main/about.py\"\n # target=_blank\n # style=\"text-decoration:none; color:red\"\n # >Dataset\n # </a> \n # </p>\n # \"\"\", unsafe_allow_html=True\n # )",
"def app():\n # Add balloons animation when page opens\n st.balloons()\n\n # Add title\n st.title(\"Welcome to the about me page\")\n \n # Add Contact Details\n st.header('Contact Us')\n\n # Add email\n st.markdown('''### Name:\n Khushal Mehrotra''')\n\n # Add name\n st.markdown('''### Email:\n [email protected]''')\n\n # Add github\n st.markdown('''### GitHub: [Khushal Mehrotra](https://github.com/km-07)''')\n\n # Add linkedin\n st.markdown('''### Linkedin: [Khushal Mehrotra](https://www.linkedin.com/in/khushal-mehrotra-5276091a9/)''')",
"def _define_app(self):\n # Define each component of the page\n\n # First the graph element that will plot the pose and velocity of the\n # robot\n pose_graph_layout = html.Div(dcc.Graph(id='pose', style={ 'width': '100%' }), className='row')\n\n # Then the section that will update the parameters for the shape that\n # the turtle will trace in the turtle sim\n shape_params_layout = html.Div(\n [\n dcc.Input(id=\"shape-edges\", type='number', placeholder='Num Edges', className='col mx-2'),\n dcc.Input(id=\"shape-radius\", type='number', placeholder='Radius', className='col mx-2'),\n html.Button(\"Trace Shape\", id='trace-button', n_clicks=0, className='btn btn-large btn-primary col-3'),\n ],\n className='row'\n )\n\n # Then the section that will display the status of the shape server\n server_status_layout = html.Div(\n dcc.Markdown(id='server-status', className='col'),\n className='row my-2'\n )\n\n # String them all together in a single page\n self._app.layout = html.Div(\n [\n # Hidden button for JS polling\n html.Button(id='refresh-status', n_clicks=0, style={ 'display': 'none' }),\n\n # The params for tracing the shape\n html.Div(html.H3('Shape Tracing:', className='col'), className='row mt-4'),\n shape_params_layout,\n server_status_layout,\n\n # The section showing the action status\n html.Div(html.H3('Pose History:', className='col'), className='row my-2'),\n pose_graph_layout,\n\n # The interval component to update the plots\n dcc.Interval(id='interval-component',\n n_intervals=0,\n interval=(Dashboard.POSE_UPDATE_INTERVAL * 1000)),\n ],\n className=\"container\"\n )\n\n # Define callbacks to update the elements on the page\n self._app.callback(\n dash.dependencies.Output('pose', 'figure'),\n [dash.dependencies.Input('interval-component', 'n_intervals')]\n )(self._define_pose_history_callback())\n\n # Define a callback to send the goal to the server when the 'Trace'\n # button is clicked. Wait until the client is done executing\n self._app.callback(\n dash.dependencies.Output('trace-button', 'autoFocus'),\n [dash.dependencies.Input('trace-button', 'n_clicks')],\n [dash.dependencies.State('shape-edges', 'value'),\n dash.dependencies.State('shape-radius', 'value')]\n )(self._define_trace_shape_callback())\n\n # Define a callback to show the status of the server\n self._app.callback(\n dash.dependencies.Output('server-status', 'children'),\n [dash.dependencies.Input('refresh-status', 'n_clicks')]\n )(self._define_server_status_callback())\n\n # Add the flask API endpoints\n self._flask_server.add_url_rule(\n Dashboard.APP_STATUS_URL,\n Dashboard.APP_STATUS_ENDPOINT,\n self._flask_status_endpoint\n )",
"def main():\n\tst.title(\"NLP App made with ❤ by Shubhanshu using Streamlit\")\n\tst.subheader(\"Natural Language Processing on the Go\")\n\n\t# SIDEBARS\n\tst.sidebar.subheader(\"About the App\")\n\tst.sidebar.text(\"NLP App made using Streamlit Framework\")\n\tst.sidebar.info(\"Streamlit is Awesome !!...Kudos to the Streamlit Team :)\")\n\n\t# Tokenization\n\tif st.checkbox(\"Show Tokens and Lemma\"):\n\t\tst.subheader(\"Tokenize Your Text\")\n\t\tmessage = st.text_area(\"Enter Your Text\",\"Type Here\",key=0)\n\t\tif st.button(\"Analyze\",key=1):\n\t\t\tnlp_result = text_analyzer(message)\n\t\t\tst.json(nlp_result)\n\n\n\t# Named Entity\n\tif st.checkbox(\"Show Named Entities\"):\n\t\tst.subheader(\"Extract Entities From Your Text\")\n\t\tmessage = st.text_area(\"Enter Your Text\",\"Type Here\",key=2)\n\t\tif st.button(\"Extract\",key=3):\n\t\t\tnlp_result = entity_analyzer(message)\n\t\t\tst.json(nlp_result)\n\n\t# Sentiment Analysis\n\tif st.checkbox(\"Show Sentiment Analysis\"):\n\t\tst.subheader(\"Sentiment of Your Text\")\n\t\tmessage = st.text_area(\"Enter Your Text\",\"Type Here\",key=4)\n\t\tif st.button(\"Analyze\",key=5):\n\t\t\tblob = TextBlob(message)\n\t\t\tresult_sentiment = blob.sentiment\n\t\t\tst.success(result_sentiment)\n\n\t# Text Summarization\n\tif st.checkbox(\"Show Text Summarization\"):\n\t\tst.subheader(\"Summarize your text\")\n\t\tmessage = st.text_area(\"Enter Your Text\",\"Type Here\",key=6)\n\t\tsummary_options = st.selectbox(\"Choose Your Summarizer\",(\"gensim\",\"sumy\"))\n\t\tif st.button(\"Summarize\",key=7):\n\t\t\tif summary_options == 'gensim':\n\t\t\t\tst.text(\"Using Gensim..\")\n\t\t\t\tsummary_result = summarize(message)\n\t\t\telif summary_options == 'sumy':\n\t\t\t\tst.text(\"Using Sumy..\")\n\t\t\t\tsummary_result = sumy_summarizer(message)\n\t\t\telse:\n\t\t\t\tst.warning(\"Using Default Summarizer\")\n\t\t\t\tst.text(\"Using Gensim\")\n\t\t\t\tsummary_result = summarize(message)\n\t\t\tst.success(summary_result)",
"def main():\r\n\r\n\tst.title(\"The Medicinal Use Detector App\")\r\n\tst.subheader(\"Built By Harshit And Gautam\")\r\n\thtml_temp = \"\"\"\r\n\t\t\t\t\t\t\t<div style=\"background-color:#00FF00 ;padding:10px\">\r\n\t\t\t\t\t\t\t<h1 style=\"color:white;text-align:center;\">Lyrics Generator App</h1>\r\n\t\t\t\t\t\t\t</div>\r\n\t\t\t\t\t\t\t\"\"\"\r\n\tst.markdown(\"<span style=“background-color:#121922”>\",unsafe_allow_html=True)\r\n\r\n\tfrom PIL import Image\r\n\timg = Image.open(\"medicine.jpg\")\r\n\tst.image(img, width=400)\r\n\tmenu = [\"Home\",\"About\",\"Login\",\"SignUp\"]\r\n\tchoice = st.sidebar.selectbox(\"Menu\",menu)\r\n \r\n\tif choice == \"Home\":\r\n\t\tst.subheader(\"Home\")\r\n\t\tst.info(\"This is a medicinal use detection app aiming to tell the functions of the given medicine\")\r\n\t\tst.success(\"Please Sign Up For Accessing the App\")\r\n\t\r\n\telif choice == \"About\":\r\n\t\tst.subheader(\"About\")\r\n\t\tst.text(\"This is a Medicinal Use Detector App which helps in knowing the fuctiions and uses of the medicines by generating APIs from google. As many of the medicines are left unused at home due to lack of knowledge of their purpose. This app helps in making use of these medicines\")\r\n\r\n\telif choice == \"Login\":\r\n\t\tst.subheader(\"Login Section\")\r\n\r\n\t\tusername = st.sidebar.text_input(\"User Name\")\r\n\t\tpassword = st.sidebar.text_input(\"Password\",type='password')\r\n\t\tif st.sidebar.checkbox(\"Login\"):\r\n\t\t\t# if password == '12345':\r\n\t\t\tcreate_usertable()\r\n\t\t\thashed_pswd = make_hashes(password)\r\n\r\n\t\t\tresult = login_user(username,check_hashes(password,hashed_pswd))\r\n\t\t\tif result:\r\n\r\n\t\t\t\tst.success(\"Logged In as {}\".format(username))\r\n\t\t\t\tst.balloons()\r\n\r\n\t\t\t\ttask = st.selectbox(\"Task\",[\"Add Post\",\"Student\",\"Faculty\"])\r\n\t\t\t\tif task == \"Add Post\":\r\n\t\t\t\t\tst.subheader(\"Add Your Post\")\r\n\r\n\t\t\t\telif task == \"Student\":\r\n\t\t\t\t\tst.subheader(\"Student\")\r\n\t\t\t\telif task == \"Faculty\":\r\n\t\t\t\t\tst.subheader(\"Faculty\")\r\n\t\t\t\t\tuser_result = view_all_users()\r\n\t\t\t\t\tclean_db = pd.DataFrame(user_result,columns=[\"Username\",\"Password\"])\r\n\t\t\t\t\tst.dataframe(clean_db)\r\n\t\t\telse:\r\n\t\t\t\tst.warning(\"Incorrect Username/Password\")\r\n\r\n\r\n\r\n\r\n\r\n\telif choice == \"SignUp\":\r\n\t\tst.subheader(\"Create New Account\")\r\n\t\tnew_user = st.text_input(\"Username\")\r\n\t\tnew_password = st.text_input(\"Password\",type='password')\r\n\t\tst.selectbox(\"Your Gender\", [\"Male\", \"Female\", \"Others\"])\r\n\t\tAge=st.text_input(\"Age\")\r\n\r\n\t\tif st.button(\"Signup\"):\r\n\t\t\tcreate_usertable()\r\n\t\t\tadd_userdata(new_user,make_hashes(new_password))\r\n\t\t\tst.success(\"You have successfully created a valid Account\")\r\n\t\t\tst.info(\"Go to Login Menu to login\")\r\n\t\t\tst.balloons()",
"def init_app(state):\n app = state.app\n\n app.config.setdefault('SPLIT_ALLOW_MULTIPLE_EXPERIMENTS', False)\n app.config.setdefault('SPLIT_DB_FAILOVER', False)\n app.config.setdefault('SPLIT_IGNORE_IP_ADDRESSES', [])\n app.config.setdefault('SPLIT_ROBOT_REGEX', r\"\"\"\n (?i)\\b(\n Baidu|\n Gigabot|\n Googlebot|\n libwww-perl|\n lwp-trivial|\n msnbot|\n SiteUptime|\n Slurp|\n WordPress|\n ZIBB|\n ZyBorg\n )\\b\n \"\"\")\n\n app.jinja_env.globals.update({\n 'ab_test': ab_test,\n 'finished': finished\n })\n\n @app.template_filter()\n def percentage(number):\n number *= 100\n if abs(number) < 10:\n return \"%.1f%%\" % round(number, 1)\n else:\n return \"%d%%\" % round(number)",
"def train_main(cls):\n launcher = cls()\n launcher.launch()",
"def main(self):\r\n\r\n #Train the GEN and DISC\r\n self.modelTrain.main()\r\n self.disp.show()",
"async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)",
"def viewer_setup(self):\n pass",
"def viewer_setup(self):\n pass",
"def init(self):\n sg.theme(gui.app_theme)\n self.window = sg.Window(\n gui.app_title,\n gui.create_layout(),\n **gui.window_config,\n )\n gui.after_window_init(self.window)",
"def main():\n\n app = QApplication(sys.argv)\n win = TestWindow(TrainingGoalCalibrationTakePhotoScreen())\n win.show()\n sys.exit(app.exec_())",
"def main():\n app = App()\n app.run()",
"def main():\n \n\n\t# Creates a main title and subheader on your page -\n\t# these are static across all pages\n\t#st.title(\"Tweet Classifer\")\n\n\n #image= Image.open('resources/imgs/global_warming_img.jpg')\n\n \n\n\n\t# Creating sidebar with selection box -\n\t# you can create multiple pages this way\n\toptions = [\"Background\",\"Prediction\",\"Data visualization\", \"Information\",\"Project Team\"]\n\tselection = st.sidebar.radio(\"Choose page:\",options )\n\t#selection = st.sidebar.selectbox(\"Choose Option\", options)\n\n\t# Builing out the \"Background\" page\n\tif selection == \"Background\":\n\t\tst.info('This works');\t\n\t\tst.write('Explorers !!!')\t\n\t\timage= Image.open('resources/imgs/global_warming_img.jpg')\n\t\tst.image(image, caption='https://www.azocleantech.com/article.aspx?ArticleID=898', use_column_width=True)\n\t\t\n\n\n\tif selection == \"Project Team\" :\n\n\n\t\t#First row of pictures\n\n\t\tcol1, col2,col3 = st.beta_columns(3)\n\t\tRic_Pic =Image.open('resources/imgs/Rickie_pic.png') \n\t\tcol1.image(Ric_Pic,caption=\"Rickie Mogale Mohale\", width=150)\n\t\t\n \n\t\tCot_Pic =Image.open('resources/imgs/courtney_pic.png') \n\t\tcol2.image(Cot_Pic,caption=\"Courtney Murugan\", width=150)\n\t\t\n\n\t\tCot_Pic =Image.open('resources/imgs/jacques_pic.png') \n\t\tcol3.image(Cot_Pic,caption=\"Jacques Stander\", width=150)\n\t\t\n\n #Second row of pictures\n\t\tcol4, col5,col6 = st.beta_columns(3)\n\t\tvas_Pic =Image.open('resources/imgs/Rickie_pic.png') \n\t\tcol4.image(Ric_Pic,caption=\"Veshen Naidoo\", width=150)\n\t\t\n \n\t\tPhiw_Pic =Image.open('resources/imgs/Rickie_pic.png') \n\t\tcol5.image(Phiw_Pic,caption=\"Phiweka Mthini\", width=150)\n\n\t\tnor_Pic =Image.open('resources/imgs/Rickie_pic.png') \n\t\tcol6.image(nor_Pic,caption=\"Nourhan ALfalous\", width=150)\n\n\t\t#Third row of picture \n\t\tcol7, col8,col9 = st.beta_columns(3)\n\n\t\tzin_Pic =Image.open('resources/imgs/zintle_pic.png') \n\t\tcol8.image(zin_Pic,caption='Zintle Faltein-Maqubela', width=150)\n\t\tcol8.header(\"Role : Team Supervisor\")\n\t\t\n\t# Building out the \"Information\" page\n\tif selection == \"Information\":\n\n\t\tst.info(\"General Information\");\n\t\t# You can read a markdown file from supporting resources folder\n\t\tst.markdown(\"Some information here\");\n\n\t\tst.subheader(\"Raw Twitter data and label\");\n\t\tif st.checkbox('Show raw data'): # data is hidden if box is unchecked\n\t\t\tst.write(raw[['sentiment', 'message']]) # will write the df to the page\n\n\t# Building out the predication page\n\tif selection == \"Prediction\":\n\t\n\n\t\t\n\t\t\n\t\tst.markdown(\"\")\n\t\tst.info('This page will help you predict an individual\\'s position on global warming base on their tweet')\n\t\tst.subheader('To make predictions, please follow the three steps below')\n\t\t\n\t\t#selecting input text\n\t\ttext_type_selection = ['Single tweet','multiple tweets'] \n\t\ttext_selection = st.selectbox('Step 1 ) : Text input', text_type_selection)\n\n\t\t\n\t\t#C\n\t\tdef get_keys(val,my_dict):\n\t\t\tfor key,value in my_dict.items():\n\t\t\t\tif val == value:\n\t\t\t\t\t return key\n\t\t#C\n\n\n\t\t# User selecting prediction model\n\t\t#Models = [\"Logistic regression\",\"Decision tree\",\"Random Forest Classifier\",\"Naive Bayes\",\"XGboost\",\"Linear SVC\"]\n\t\t#selected_model =st.selectbox(\"Step 3 ) : Choose prediction model \",Models )\n \n\n\t\tif text_selection== 'Single tweet':\n ### SINGLE TWEET CLASSIFICATION ###\n\t\t\t\n # Creating a text box for user input\n\t\t\tinput_text = st.text_area(\"Step 2 ) : Enter Your Single Text Below :\") ##user entering a single text to classify and predict\n\t\t\tModels = [\"Logistic regression\",\"Decision tree\",\"Random Forest\",\"Naive Bayes\",\"XGboost\",\"Linear SVC\" ]\n\t\t\tselected_model = st.selectbox(\"Step 3 ) : Choose prediction model \",Models)\n\t\t\tdef load_prediction_models(model_file):\n\n\t\t\t\tloaded_models = joblib.load(open(os.path.join(model_file),\"rb\"))\n\t\t\t\treturn loaded_models\n\n\t\t\tprediction_labels = {'Negative':-1,'Neutral':0,'Positive':1}\n\t\t\tif st.button(\"Classify\"):\n\t\t\t\t## showing the user original text\n\t\t\t\tst.text(\"Input tweet is :\\n{}\".format(input_text))\n\n\t\t\t\t## Calling a function to process the text\n\t\t\t\t#tweet_text = cleaner(input_text) \n\n\t\t\t\t# Transforming user input with vectorizer\n\t\t\t\tvect_text = tweet_cv.transform([input_text]).toarray()\n\t\t\t\t# Load your .pkl file with the model of your choice + make predictions\n\t\t\t\t# Try loading in multiple models to give the user a choice\n\t\t\t\tpredictor = joblib.load(open(os.path.join(\"resources/Logistic_regression.pkl\"),\"rb\"))\n\t\t\t\tprediction = predictor.predict(vect_text)\n\n \t#M Model_ Selection\n\t\t\t\tif selected_model == \"Logistic regression\":\n\n\t\t\t\t\tpredictor = load_prediction_models(\"resources/Logistic_regression.pkl\")\n\t\t\t\t\tprediction = predictor.predict(vect_text)\n \t # st.write(prediction)\n\t\t\t\telif selected_model == \"Decision tree\":\n\n\t\t\t\t\tpredictor = load_prediction_models(\"resources/Logistic_regression.pkl\")\n\t\t\t\t\tprediction = predictor.predict(vect_text)\n # st.write(prediction)\n\t\t\t\telif selected_model == \"Random Forest Classifier\":\n\t\t\t\t\tpredictor = load_prediction_models(\"resources/Logistic_regression.pkl\")\n\t\t\t\t\tprediction = predictor.predict(vect_text)\n # st.write(prediction)\n\t\t\t\telif selected_model == \"Naive Bayes\":\n\t\t\t\t\tpredictor = load_prediction_models(\"resources/Logistic_regression.pkl\")\n\t\t\t\t\tprediction = predictor.predict(vect_text)\n\t\t\t\telif selected_model ==\"XGboost\" :\n\t\t\t\t\t predictor = load_prediction_models(\"resources/Logistic_regression.pkl\")\n\t\t\t\t\t prediction = predictor.predict(vect_text)\n\t\t\t\telif selected_model == \"Linear SVC\" :\n\t\t\t\t\tpredictor = load_prediction_models(\"resources/Logistic_regression.pkl\")\n\t\t\t\t\tprediction = predictor.predict(vect_text)\n\t\t\t\t# st.write(prediction)\n\t\t\t # When model has successfully run, will print prediction\n\t\t\t # You can use a dictionary or similar structure to make this output\n\t\t\t # more human interpretable.\n\t\t\t # st.write(prediction)\n\t\t\t\tfinal_result = get_keys(prediction,prediction_labels)\n\t\t\t\tst.success(\"Tweet Categorized as : {}\".format(final_result))",
"def __init__(self, root):\n self.app=root\n self.app.geometry('800x500')\n self.app.title(\"Bryce Streeper: Asset Allocation Visual \")\n self.makeTitle()\n self.makeGraph()\n self.makeSliders()\n self.update()",
"def page_home(state):\n\n st.title(\":house: Welcome to Stock Swing Predictor (SSP)\")\n\n image = Image.open(\"ssp.png\") # load logo\n st.image(image, use_column_width=True)\n st.markdown(\"*Note:* This is a conceptual tool and should not be used to make real/serious trading decisions.\")\n st.markdown(\"## Tool Overview:\")\n st.markdown(\n \"The Stock Swing Predictor makes future stock price swing predictions for any stock for the next day. Price swings are simply whether or not a price goes up or down, so with this the tool predicts which way a stocks price will move or swing for the upcoming day.\\nPredictions are made using seven different models and with the user's choice of dataset size. The models are trained using the stock price data of previous days.\"\n )\n\n st.markdown(\"## Using the Tool:\")\n st.markdown(\n \"Using the tool is simple once you are in the Web Interface! To run the tool, go to the `Run Settings` page.\"\n )\n st.markdown(\n \"After filling out the data fields for your chosen option, you can than click the button below to run the tool. After this, wait until the tool prompts you to `Go to the Prediction Dashboard to view your data`. Once prompted, you can then go to the Prediction Dashboard page and view your data.\"\n )\n\n st.markdown(\"## Experimental Results and Optimal Settings:\")\n st.markdown(\n \"Extensive experimentation was completed on the tool, the results of which are detailed in the README.\"\n )\n st.markdown(\"### Settings Recommendations:\")\n st.markdown(\n \"- It is recommended that one runs the tool with as much data as possible, as results are generally more accurate for all models. 1 or 2 years is the optimal amount of training data it seems, any more of that and you will be waiting for your results for a while.\"\n )\n st.markdown(\n \"- With this, the most accurate model seems to be the SVR-POLY model (Support Vector Regression with a Polynomial kernel), especially when trained with 1 year of data. Experimental results show future prediction accuracy results of almost 80%. The SVR-RBF model is also quite accurate, when trained with one month of data.\"\n )\n\n st.markdown(\"### Some Experimental Results:\")\n image2 = Image.open(\"results.png\") # load logo\n st.image(image2, use_column_width=True)\n\n st.markdown(\n \"This shows how accurate models are and which amount of training data they are most accuate with. \\n This table displays the predictions on 9 different stocks over 5 different days for each time period of data. This was done from 3/30/2021-4/6/2021. With this, the percentage represents the number of predictions that were correct, out of a total 45 predictions that were made for each time period of data.\"\n )\n st.markdown(\"## Get in Touch & Learn More:\")\n st.markdown(\"- View source code on the [Project GitHub](https://github.com/lussierc/StockSwingPredictor). Consider contributing.\")\n st.markdown(\"- View my personal website and get contact information [here](https://christianlussier.com).\")\n st.markdown(\"## Disclaimer:\")\n st.markdown(\"We are not responsible for any investment losses incurred by users. This tool is by no means, a be-all-end all for stock prediction and while it offers promise it should not be used to make serious trading decisons. It is a conceptual tool that is somewhat accurate and is meant give users insights into the potential uses of ML for stock prediction.\")",
"def main():\n\n viewer = Viewer(1900, 1200)\n viewer.add((init_BaracuddaFish()))\n viewer.add(init_BlueStarFish())\n viewer.add_movable(init_SeaSnake())\n init_groupeOfFishs(viewer)\n\n under_water = [\n 'res/skybox/underwater/uw_lf.jpg',\n 'res/skybox/underwater/uw_rt.jpg',\n 'res/skybox/underwater/uw_up.jpg',\n 'res/skybox/underwater/uw_dn.jpg',\n 'res/skybox/underwater/uw_ft.jpg',\n 'res/skybox/underwater/uw_bk.jpg']\n viewer.add(Skybox(under_water))\n\n viewer.run()",
"def OnInit(self):\n frame = App_Frame(title='PyDraw')\n frame.Show()\n return True",
"def main():\n\n root = tk.Tk()\n root.title(\"Exploring US Bikeshare Data\")\n app = Application(master=root)\n print(\"Application loaded! Please use the GUI window to continue...\")\n app.mainloop()",
"def run_datalab_fbs():\n\n appctxt = ApplicationContext()\n win = DataLab()\n # debug_setup(win)\n win.show()\n exit_code = appctxt.app.exec_()\n sys.exit(exit_code)",
"def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16",
"def main():\r\n st.sidebar.title(\"MỤC LỤC\")\r\n selection = st.sidebar.radio(\"Go to\", list(PAGES.keys()))\r\n\r\n page = PAGES[selection]\r\n\r\n with st.spinner(f\"Loading {selection} ...\"):\r\n ast.shared.components.write_page(page)\r\n st.sidebar.title(\"Intro\")\r\n st.sidebar.info(\r\n \"App này được làm ra nhằm mục đích phục vụ cho việc **nghiên cứu và học tập** \"\r\n \"[source code](https://github.com/BienQuangTruong/streamlit_app). \"\r\n )\r\n st.sidebar.title(\"About\")\r\n st.sidebar.info(\r\n \"\"\"\r\n App này được thực hiện bởi Biện Quang Trường - Nguyễn Văn Phước.\r\n\"\"\"\r\n )",
"def main():\r\n\r\n st.title(\"Summarizer App\")\r\n menu = ['Home', 'About']\r\n choice = st.sidebar.selectbox(\"Menu\", menu)\r\n\r\n if choice == \"Home\":\r\n st.subheader(\"Summarization\")\r\n raw_text = st.text_area(\"Enter Text Here\")\r\n if st.button(\"Summarize\"):\r\n \r\n with st.expander(\"original Text\"):\r\n st.write(raw_text)\r\n\r\n # Layout\r\n c1, c2 = st.columns(2)\r\n with c1:\r\n with st.expander(\"LexRank Summary\"):\r\n my_summary = sumy_summarizer(raw_text)\r\n document_len = {\"Original\":len(raw_text), \r\n \"Summary\":len(my_summary)}\r\n st.write(document_len)\r\n st.write(my_summary) \r\n st.info(\"Rouge Score\")\r\n eval_df = evaluate_summary(my_summary, raw_text)\r\n st.dataframe(eval_df.T)\r\n eval_df['metrics'] = eval_df.index\r\n c = alt.Chart(eval_df).mark_bar().encode(\r\n x = 'metrics', y = 'rouge-1')\r\n st.altair_chart(c)\r\n\r\n with c2:\r\n with st.expander(\"LexRank Summary\"):\r\n my_summary = summarize(raw_text)\r\n document_len = {\"Original\":len(raw_text), \r\n \"Summary\":len(my_summary)}\r\n st.write(document_len)\r\n st.write(my_summary)\r\n\r\n st.info(\"Rouge Score\")\r\n eval_df = evaluate_summary(my_summary, raw_text)\r\n st.dataframe(eval_df.T)\r\n eval_df['metrics'] = eval_df.index\r\n c = alt.Chart(eval_df).mark_bar().encode(\r\n x = 'metrics', y = 'rouge-1')\r\n st.altair_chart(c)\r\n\r\n else:\r\n st.subheader(\"About\")\r\n st.write(\"Webpage for Summarization using NLP\")\r\n st.write(\"***\")\r\n st.write(\"### Packages Used\")\r\n st.write(\"pip install streamlit gensim==3.8.3 sumy gensim_sum_ext pandas altair seaborn rouge\")\r\n st.write(\"***\")\r\n st.write(\"This webpage is maintained by Ramakrishnan\")",
"def init_app(self):\n\n self.setAttribute(PyQt5.QtCore.Qt.WA_DeleteOnClose)\n self.setGeometry(300, 300, 500, 550)\n self.setWindowTitle(\"DICOM Viewer\")",
"def setup(self):\n \n # Define ui file to be used as a graphical interface\n # This file can be edited graphically with Qt Creator\n # sibling_path function allows python to find a file in the same folder\n # as this python module\n self.ui_filename = sibling_path(__file__, \"lick_training_plot.ui\")\n \n #Load ui file and convert it to a live QWidget of the user interface\n self.ui = load_qt_ui_file(self.ui_filename)\n\n # Measurement Specific Settings\n # This setting allows the option to save data to an h5 data file during a run\n # All settings are automatically added to the Microscope user interface\n self.settings.New('save_h5', dtype=bool, initial=False)\n self.settings.New('tdelay', dtype=int, initial=0,ro=True)\n self.settings.New('trial_time',dtype=int,initial=10,ro=False)\n self.settings.New('lick_interval', dtype=int, initial=1,ro=False)\n self.settings.New('water_reward', dtype=bool, initial=False,ro=False)\n self.settings.New('total_drops', dtype=int, initial=0,ro=False)\n self.settings.New('save_movie', dtype=bool, initial=False,ro=False)\n self.settings.New('movie_on', dtype=bool, initial=False,ro=True)\n #self.settings.New('sampling_period', dtype=float, unit='s', initial=0.005)\n \n # Create empty numpy array to serve as a buffer for the acquired data\n #self.buffer = np.zeros(10000, dtype=float)\n \n # Define how often to update display during a run\n self.display_update_period = 0.04 \n \n # Convenient reference to the hardware used in the measurement\n self.daq_ai = self.app.hardware['daq_ai']\n self.arduino_sol = self.app.hardware['arduino_sol']\n self.water=self.app.hardware['arduino_water']\n self.camera=self.app.hardware['camera']",
"def home():\n return render_template(\n 'index.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n image = Cam.do.GetLastImage(),\n \n )",
"def set_app_defaults(self):\n self.curve_render = 0\n self.image_render = 0\n self.image_height = 200\n self.image_data = []\n self.auto_scale = True\n\n self.create_actions()\n self.setup_signals()\n self.reset_graph()\n\n self.fps = utils.SimpleFPS()\n\n # Click the live button\n self.ui.actionContinue_Live_Updates.trigger()"
] | [
"0.6979405",
"0.66623354",
"0.64542246",
"0.6356499",
"0.6246196",
"0.62062424",
"0.61641407",
"0.6148707",
"0.6089208",
"0.6088656",
"0.60550535",
"0.6050303",
"0.6050303",
"0.60261196",
"0.60190606",
"0.5988488",
"0.5968736",
"0.59630483",
"0.59583074",
"0.5930924",
"0.590887",
"0.5907054",
"0.5904834",
"0.5902993",
"0.59014857",
"0.5872159",
"0.5855561",
"0.5855296",
"0.58535963",
"0.5790332"
] | 0.7796994 | 0 |
Moroccan Mosaic using Python Turtle | def draw():
myPen = turtle.Turtle()
myPen.shape("arrow")
myPen.speed(1000) # Set the speed of the turtle
# A Procedue to draw a mosaic by repeating and rotating a polygon shape.
def drawMosaic(color, numberOfSides, size, numberOfIterations):
myPen.color(color)
for i in range(0, numberOfIterations):
for j in range(0, numberOfSides):
myPen.forward(size)
myPen.left(360 / numberOfSides)
myPen.left(360 / numberOfIterations)
mt = 2
if (mt == 1):
# Mosaic #1
drawMosaic("#0B5CCB", 6, 100, 20)
elif (mt == 2):
# Mosaic #2
drawMosaic("#CB0B3F",5,100,10)
elif (mt == 3):
# Mosaic #3
drawMosaic("#0BCB9D",6,100,6)
elif (mt == 4):
# Mosaic #4
drawMosaic("#FF5733",6,100,15)
myPen.hideturtle() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def example_from_m3():\n # ------------------------------------------------------------------\n # Next two lines after this comment set up a TurtleWindow object\n # for animation. The definition of a TurtleWindow is in the\n # rg (shorthand for rosegraphics) module.\n # ------------------------------------------------------------------\n window = rg.TurtleWindow()\n window.delay(1) # Bigger numbers mean slower animation.\n\n # ------------------------------------------------------------------\n # Next two lines make (construct) two SimpleTurtle objects.\n # ------------------------------------------------------------------\n nadia = rg.SimpleTurtle()\n akil = rg.SimpleTurtle('turtle')\n\n # ------------------------------------------------------------------\n # Next lines ask the SimpleTurtle objects to do things:\n # ------------------------------------------------------------------\n nadia.forward(100)\n nadia.left(90)\n nadia.forward(200)\n\n akil.right(45)\n akil.backward(50)\n akil.right(60)\n\n nadia.forward(50)\n nadia.left(135)\n\n # ------------------------------------------------------------------\n # Next lines set the pen and speed characteristics of the\n # SimpleTurtle objects. The pen characteristic is itself\n # an object that is constructed, of type Pen.\n # ------------------------------------------------------------------\n nadia.pen = rg.Pen('blue', 10) # The 10 is the Pen's thickness\n nadia.speed = 10 # 1 is slowest, big is faster, maxes out about 100\n\n akil.pen = rg.Pen('red', 30)\n akil.speed = 1\n\n akil.backward(100)\n nadia.forward(100)\n\n nadia.left(60)\n nadia.forward(500)\n nadia.speed = 1 # was 10, so much slower now\n nadia.right(120)\n nadia.forward(200)\n\n window.close_on_mouse_click()",
"def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())",
"def morphology(seed=425, th=120):\n \n # impact parameters\n M = 1e8*u.Msun\n B = 19.85*u.kpc\n V = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 3000\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xr = 20*u.kpc + np.random.randn(Nstar)*0.02*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n \n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n\n plt.close()\n fig, ax = plt.subplots(3,1,figsize=(12,8), sharex=True)\n \n c_init = mpl.cm.Blues_r(1)\n c_fin0 = mpl.cm.Blues_r(0.5)\n c_fin = mpl.cm.Blues_r(0.2)\n \n eta = coord.Angle(np.arctan2(np.sqrt(stream['x'][0].to(u.kpc).value**2 + stream['x'][1].to(u.kpc).value**2),xr.to(u.kpc).value)*u.rad)\n xi = np.arctan2(stream['x'][1].to(u.kpc).value, stream['x'][0].to(u.kpc).value)\n xi = coord.Angle((xi - np.median(xi))*u.rad)\n \n vlabel = ['x', 'y', 'z']\n \n for i in range(3):\n plt.sca(ax[i])\n im = plt.scatter(xi.deg, eta.deg, c=stream['v'][i].value, s=20)\n \n plt.xlim(-60, 50)\n plt.ylim(55, 35)\n plt.gca().set_aspect('equal')\n \n if i==2:\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n \n divider = make_axes_locatable(plt.gca())\n cax = divider.append_axes(\"right\", size=\"3%\", pad=0.1)\n plt.colorbar(im, cax=cax)\n plt.ylabel('$V_{{{}}}$ [km s$^{{-1}}$]'.format(vlabel[i]))\n \n plt.tight_layout()",
"def spirala(t):\n t.penup()\n t.setx(random.randrange(-200,200))\n t.sety(random.randrange(-200,200))\n t.pencolor(random.randrange(0,255),random.randrange(0,255),200)\n t.width(random.randrange(2,13))\n t.pendown()\n\n for i in range(120):\n \tt.forward(20+i)\n \tt.left(30 - i/1.5)",
"def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()",
"def shapes2():\r\n turtle.up()\r\n turtle.backward(100)\r\n turtle.left(270)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.backward(700)\r\n shapes()",
"def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()\n tess.hideturtle()",
"def draw_housing_2():\r\n tom.pensize(3)\r\n tom.color(\"black\", \"darkgrey\")\r\n tom.begin_fill()\r\n tom.forward(80)\r\n tom.left(90)\r\n tom.forward(200)\r\n tom.circle(40, 180)\r\n tom.forward(200)\r\n tom.left(90)\r\n tom.end_fill()\r\n tom.hideturtle()",
"def shapes():\r\n turtle.up()\r\n turtle.forward(500)\r\n turtle.down()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()",
"def background(self):\n sun = graphics.Circle(graphics.Point(200, 310), 50)\n sun.setFill('yellow')\n sun.draw(self.win)\n \n earth = graphics.Circle(graphics.Point(40, 250), 30)\n earth.setFill('blue')\n earth.draw(self.win)\n continent = graphics.Circle(graphics.Point(30, 265), 10)\n continent.setFill('green')\n continent.draw(self.win)\n cont_2 = graphics.Circle(graphics.Point(30, 235), 10)\n cont_2.setFill('green')\n cont_2.draw(self.win)\n cont_3 = graphics.Circle(graphics.Point(55, 245), 10)\n cont_3.setFill('green')\n cont_3.draw(self.win)\n \n stars = graphics.Circle(graphics.Point(250, 250), 5)\n stars.setFill('white')\n stars.draw(self.win)\n star1 = graphics.Circle(graphics.Point(100, 250), 5)\n star1.setFill('white')\n star1.draw(self.win)\n star2 = graphics.Circle(graphics.Point(150, 150), 5)\n star2.setFill('white')\n star2.draw(self.win)\n star3 = graphics.Circle(graphics.Point(50, 100), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star3 = graphics.Circle(graphics.Point(100, 50), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star4 = graphics.Circle(graphics.Point(250, 80), 5)\n star4.setFill('white')\n star4.draw(self.win)\n star4 = graphics.Circle(graphics.Point(200, 60), 5)\n star4.setFill('white')\n star4.draw(self.win)",
"def main():\n tortue_1 = turtle.Turtle()\n tortue_1.shape(\"turtle\")\n tortue_1.color(\"aquamarine4\")\n longueur = 200\n largeur = 200\n nbre_carres = 3\n angle_entre_carres = 15\n for i in range(nbre_carres):\n trace_rectangle(tortue_1, longueur, largeur)\n tortue_1.left(angle_entre_carres * (i + 1))\n\n turtle.exitonclick() # Empêche la fenêtre de se fermer automatiquement à la fin du tracé",
"def animation (t,mode = \"cercle\",taille = 40):\n\tx,y = primitives.get_position ()\n\t\n\t# En fonction du nombre de « cycles » \n\t# on peut définir des couleurs différentes\n\t# qui sont représentatives d'une progression\n\tif t % 5 == 0:\n\t\tliste = [\"rouge\",\"carmin\",\"or\",\"vert\",\"chartreuse\"]\n\telif t % 3 == 0:\n\t\tliste = [\"carmin\",\"or\",\"chartreuse\"]\n\telif t % 2 == 0:\n\t\tliste = [\"carmin\",\"chartreuse\"]\n\telse: # Un nombre indéterminé \n\t\tliste = [\"zinzolin\",\"indigo\"]\n\n\t# speed (0) est déjà activé normalement \n\tfor i in range (t):\n\t\t# Définit la couleur de ce tour de boucle \n\t\tcurrent_color = couleurs.string_to_hexa (liste[i % len (liste)])\n\n\t\tif mode == \"cercle\":\n\t\t\t# Fait un cercle ... mouhaha\n\t\t\tprimitives.cercle (6,taille * 2 + 20,generer_couleurs (current_color,6, taille))\n\t\telif mode == \"arc\":\n\t\t\tprimitives.arc (20,taille + 10,generer_couleurs (current_color,5, taille))\n\t\telse: # mode == \"ligne\"\n\t\t\tprimitives.colonnes (1,taille + 10, taille + 10,generer_couleurs (current_color,4,taille))",
"def cool_turtle():\n # Make the TurtleWindow.\n window = rg.TurtleWindow()\n\n # Make the SimpleTurtle.\n cool_turtle = rg.SimpleTurtle('turtle')\n cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too\n cool_turtle.speed = 1 # Slow\n\n # Move the SimpleTurtle to her starting position.\n start_at = rg.Point(100, -50)\n cool_turtle.pen_up()\n cool_turtle.go_to(start_at)\n cool_turtle.pen_down()\n\n # Set up some parameters that control the nature of the shape drawn.\n size = 100 # Try 150 too\n angle = 1 # Try 20 too\n iterations = 360 # Try 90 too\n\n # Store the animation speed (to reset it later).\n tracer_n, tracer_d = window.tracer(), window.delay()\n\n # Make the animation go much faster.\n # First number: bigger means faster.\n # Second number: bigger means slower.\n window.tracer(5, 5)\n\n for _ in range(iterations):\n cool_turtle.right(angle)\n cool_turtle.draw_square(size)\n\n # Reset the animation to its original speed.\n window.tracer(tracer_n, tracer_d)\n\n window.close_on_mouse_click()",
"def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_name = initialize(turtle_shape, bg_color,\n turtle_color, turtle_speed)\n\n for i in range(36):\n for i in range(4):\n turtle_name.forward(200)\n turtle_name.right(90)\n turtle_name.right(10)",
"def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up()\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down()",
"def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer",
"def draw_graphic(self):\r\n\r\n t = Turtle()\r\n text = Turtle()\r\n s = t.getscreen()\r\n s.bgcolor(\"orange\")\r\n count = 0\r\n while count < 1:\r\n text.penup()\r\n text.setposition(-100, -100)\r\n text.pencolor(\"purple\")\r\n text.write(\"{}, area: {:.2f}, perimeter: {:.2f}\".format(self.name, self.area(), self.perimeter()), align=\"left\",\r\n font=(\"Arial\", 20, \"bold\"))\r\n t.goto(0, 0)\r\n t.pen(pencolor=\"purple\", fillcolor=\"green\", pensize=6, speed=20)\r\n t.fillcolor(\"red\")\r\n t.begin_fill()\r\n t.pendown()\r\n t.circle(self.__radius)\r\n t.end_fill()\r\n delay(30)\r\n t.clear()\r\n t.reset()\r\n text.clear()\r\n text.reset()\r\n count += 1",
"def ksh(i,t,htanses):\n for (zs,ys,zx,yx) in htanses[i]:\n alex.penup()\n alex.goto((zs%m)*20-10*m,(zs//m)*20-10*n)\n alex.pendown()\n alex.goto((ys%m+1)*20-10*m,(ys//m)*20-10*n)\n alex.goto((yx%m+1)*20-10*m,(yx//m+1)*20-10*n)\n alex.goto((zx%m)*20-10*m,(zx//m+1)*20-10*n)\n alex.goto((zs%m)*20-10*m,(zs//m)*20-10*n)\n alex.hideturtle()",
"def add_mosaics(self):\n for tree in self.mosaictrees:\n self.add_mosaic(tree, -1)",
"def draw_flower_bed():\n turtle.up()\n turtle.left(180)\n turtle.forward(200)\n turtle.right(180)\n turtle.down()\n for x in range(3):\n draw_flower_advanced()",
"def draw_flower_bed():\n turtle.up()\n turtle.left(180)\n turtle.forward(200)\n turtle.right(180)\n turtle.down()\n for flower in range(3):\n draw_flower_advanced()",
"def main():\n draw_sun()\n draw_pavement()\n draw_building()\n martin.goto(12, 40) # lines 171, 173, and 175 move the turtle down to space out the windows on the building.\n draw_windows()\n martin.goto(12, 0)\n draw_windows()\n martin.goto(12, -40)\n draw_windows()\n draw_door()\n draw_doorknob()",
"def moons(moon_list):\n full_moon = \"\"\"\n _..._ \n .:::::::. \n::::::::::: FULL MOON\n::::::::::: \n`:::::::::' \n `':::''\n\"\"\"\n wain_gibbous = \"\"\"\n _..._ \n .::::. `. \n:::::::. : WAINING GIBBOUS\n:::::::: : \n`::::::' .' \n `'::'-' \n\"\"\"\n second_quarter = \"\"\"\n ..._ \n .:::: `. \n:::::: : SECOND QUARTER\n:::::: : \n`::::: .' \n `'::.-' \n\"\"\"\n wain_crescent = \"\"\"\n _..._ \n .::' `. \n::: : WAINING CRESCENT\n::: : \n`::. .' \n `':..-' \n\"\"\"\n new_moon = \"\"\"\n _..._ \n .' `. \n: : NEW MOON\n: : \n`. .' \n `-...-' \n\"\"\"\n wax_crescent = \"\"\"\n _..._ \n .' `::. \n: ::: WAXING CRESCENT\n: ::: \n`. .::' \n `-..:'' \n\"\"\"\n first_quarter = \"\"\"\n _..._ \n .' ::::. \n: :::::: FIRST QUARTER\n: :::::: \n`. :::::' \n `-.::'' \n\"\"\"\n wax_gibbous = \"\"\"\n _..._ \n .' .::::. \n: :::::::: WAXING GIBBOUS\n: :::::::: \n`. '::::::' \n `-.::'' \n\"\"\"\n phase = moon_list[0]\n moon_age = moon_list[1]\n print(Fore.LIGHTYELLOW_EX)\n if phase == \"NE\":\n print(new_moon)\n print(Style.RESET_ALL)\n print(\"---------------------------\")\n if phase == \"Q1\":\n print(first_quarter)\n print(Style.RESET_ALL)\n print(\"---------------------------\")\n if phase == \"FU\":\n print(full_moon)\n print(Style.RESET_ALL)\n print(\"---------------------------\")\n if phase == \"Q3\":\n print(second_quarter)\n print(Style.RESET_ALL)\n print(\"---------------------------\")\n elif 1 <= moon_age < 8:\n print(wax_crescent)\n print(Style.RESET_ALL)\n print(\"---------------------------\")\n elif 7 < moon_age < 15:\n print(wax_gibbous)\n print(Style.RESET_ALL)\n print(\"---------------------------\")\n elif 15 <= moon_age < 22:\n print(wain_gibbous)\n print(Style.RESET_ALL)\n print(\"---------------------------\")\n elif 22 <= moon_age <= 29:\n print(wain_crescent)\n print(Style.RESET_ALL)\n print(\"---------------------------\")\n else:\n print(Style.RESET_ALL)\n print(\"MOON NOT FOUND.\")\n rerun()",
"def drawhalfmonster(mirror, generikmon):\r\n # place turtle at top middle facing up\r\n back_to_start()\r\n\r\n turtle.right(mirror*90) #requires turtle to be facing upwards\r\n turtle.right(mirror*generikmon['headA'])\r\n turtle.forward(generikmon['headL'])\r\n turtle.right(mirror*generikmon['cheekA'])\r\n turtle.forward(generikmon['cheekL'])\r\n turtle.right(mirror*generikmon['chinA'])\r\n turtle.forward(generikmon['chinL'])\r\n\r\n # get back to centerline\r\n turtle.setx(0)\r\n #note y cord of bottom of chin to figure out how far down to go to draw mouth\r\n y_chin = turtle.ycor()\r\n\r\n ### draw mouth\r\n back_to_start()\r\n turtle.penup() # unless you want to draw a cat's nose\r\n turtle.forward(y_chin*generikmon['mouthratio'])\r\n turtle.right(mirror*90)\r\n turtle.pendown()\r\n turtle.forward(generikmon['mouthL'])\r\n\r\n ### draw eyes\r\n back_to_start()\r\n turtle.penup() # unless you want to draw a cat's nose\r\n turtle.forward(y_chin*generikmon['eyeratio'])\r\n turtle.right(mirror*-90)\r\n turtle.forward(generikmon['eyeL'])\r\n turtle.shape(\"circle\")\r\n turtle.stamp()\r\n\r\n turtle.penup()\r\n turtle.home()",
"def draw_flower():\n turtle.right(45)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(135)\n turtle.forward(150) # draws the stem",
"def draw_sun():\n lisandro.penup()\n lisandro.goto(40, 90)\n lisandro.begin_fill()\n lisandro.circle(150) # draws out a circle with a radius of 150 for the sun.\n lisandro.end_fill()\n lisandro.hideturtle()",
"def fermat_spiral(a, t):\n global scale\n t.ht()\n t.pd()\n tt = Turtle()\n tt.ht()\n tt.pd()\n tt.color('red')\n tt.seth(180)\n for i in range(300):\n theata = i * get_radian()\n r = sqrt(a**2*theata)\n x = r * cos(theata)\n y = r * sin(theata)\n t.goto(x, y)\n tt.goto(-x, -y)\n t.up()\n t.home()\n t.fd(a + 10)\n t.seth(90)\n t.pensize(2)\n t.color('purple')\n t.pd()\n t.circle(30)",
"def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)",
"def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)",
"def cover_button(self):\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n t.speed(20)\r\n t.penup()\r\n t.goto(190,-260)\r\n t.setheading(0)\r\n t.color(\"#696969\")\r\n t.pendown()\r\n t.begin_fill()\r\n for y in range(2):\r\n t.forward(150)\r\n t.left(90)\r\n t.forward(70)\r\n t.left(90)\r\n t.end_fill()\r\n t.goto(0,0)"
] | [
"0.65632975",
"0.6095558",
"0.6092665",
"0.60891646",
"0.60118824",
"0.5902748",
"0.5898425",
"0.58668756",
"0.5748317",
"0.57450193",
"0.5698768",
"0.5697413",
"0.5694109",
"0.5622034",
"0.5611353",
"0.5568879",
"0.5532535",
"0.55252886",
"0.55061954",
"0.54686457",
"0.54614514",
"0.5431588",
"0.5431483",
"0.54189837",
"0.54026484",
"0.54009145",
"0.5398913",
"0.5391544",
"0.5391544",
"0.53651124"
] | 0.6980053 | 0 |
Load a ElMo embedder | def load_elmo(cuda_device: int) -> ElmoEmbedder:
return ElmoEmbedder(cuda_device=cuda_device) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def embed():",
"def load(app, verbose, replay, exp_config=None):\n if replay:\n exp_config = exp_config or {}\n exp_config[\"replay\"] = True\n log(header, chevrons=False)\n loader = LoaderDeployment(app, Output(), verbose, exp_config)\n loader.run()",
"def pretrained(name=\"elmo\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(ElmoEmbeddings, name, lang, remote_loc)",
"def _import_elmo():\n\n elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-news_wmt11-16_1.5M_steps.tar.gz',\n trainable=False) # news\n # elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-twitter_2013-01_2018-04_600k_steps.tar.gz',\n # trainable=False) # twitter\n print('❤️ ❤️ ❤️ DONE (re)importing Tensorflow hub.Module ')\n print('Tensorflow version is', tf.__version__)\n\n return elmo",
"def test_wordpiece_embedder(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": {\n \"emb_dim\": 30, \"tokenizer_type\": \"wordpiece-tokenizer\", \"add_terminals\": True\n },\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)",
"def load(ctx):\n if not is_owner(ctx.update):\n return\n global cmds\n cmds.load_ext(ctx.args[0], ctx.update)",
"def test_default_embedder(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": {\"emb_dim\": 5},\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)",
"def test_glove_embedder(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": {\"embedder_type\": \"glove\", \"emb_dim\": 5},\n }\n\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n with pytest.raises(ValueError):\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n\n config = {**config, \"params\": {\"embedder_type\": \"glove\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)",
"def load_enemies(self):\n\n self.enemies_layer = EnemyLayer()\n self.enemies_layer.set_enemy_waves(self.enemy_waves())\n self.enemies_layer.push_handlers(self)\n self.add(self.enemies_layer)",
"def test_load(elf, expected):\n elf_filename = os.path.join(elf_dir, elf)\n epiphany = Epiphany()\n with open(elf_filename, 'rb') as elf:\n epiphany.init_state(elf, elf_filename, '', [], False, is_test=True)\n epiphany.state.mem.write(0x00100004, 4, 0xFFFFFFFF)\n epiphany.max_insts = 10000\n epiphany.run()\n expected.check(epiphany.state)",
"def main():\r\n # create the EdenLudo sample\r\n EdenEvolves = EdenLudo()\r\n # ru-n the scene\r\n run()",
"def _load_emane(self) -> None:\n # check for emane\n path = utils.which(\"emane\", required=False)\n self.has_emane = path is not None\n if not self.has_emane:\n logger.info(\"emane is not installed, emane functionality disabled\")\n return\n # get version\n emane_version = utils.cmd(\"emane --version\")\n logger.info(\"using emane: %s\", emane_version)\n emane_prefix = self.config.get(\"emane_prefix\", DEFAULT_EMANE_PREFIX)\n emane_prefix = Path(emane_prefix)\n EmaneModelManager.load_locals(emane_prefix)\n # load custom models\n custom_path = self.config.get(\"emane_models_dir\")\n if custom_path is not None:\n logger.info(\"loading custom emane models: %s\", custom_path)\n custom_path = Path(custom_path)\n EmaneModelManager.load(custom_path, emane_prefix)",
"def loadEmbModel(embFile, logger):\n logger.info(\"Loading Embedding Model\")\n f = open(embFile,'r')\n model = {}\n v = []\n for line in f:\n splitLine = line.split(' ')\n word = splitLine[0]\n try:\n embedding = np.array([float(val) for val in splitLine[1:]])\n except:\n logger.info(len(v), line)\n model[word] = embedding\n v.append(embedding)\n mean = np.array(v).mean(0)\n logger.info(mean.shape)\n model['<unk>'] = torch.tensor(mean)\n model['<pad>'] = torch.zeros(embedding.shape)\n model['<start>'] = torch.zeros(embedding.shape)\n model['<end>'] = torch.zeros(embedding.shape)\n logger.info(\"Done.\",len(model),\" words loaded!\")\n return model",
"def load_embedding(src: str, embedding_type, layers) -> TransformerEmbedding:\n emb = TransformerEmbedding(src, embedding_type=embedding_type, layers=layers)\n return emb",
"def oembed(self, url):\r\n _url = '{0}/oembed'.format(self.get_url())\r\n return http.Request('GET', _url, {'url': url}), parsers.parse_json",
"def load(self, eng):\n eng.eval(\"load_system('simulink_househeat')\", nargout=0)",
"def __init__(\n self,\n emb_dim: int = 1024,\n dropout_value: float = 0.0,\n layer_aggregation: str = \"sum\",\n cuda_device_id: int = -1,\n ):\n super(BowElmoEmbedder, self).__init__()\n self.emb_dim = emb_dim\n self.dropout_value = dropout_value\n self.layer_aggregation_type = layer_aggregation\n self.allowed_layer_aggregation_types = [\"sum\", \"average\", \"last\", \"first\"]\n self.cuda_device_id = cuda_device_id\n self.device = (\n torch.device(\"cpu\")\n if cuda_device_id < 0\n else torch.device(f\"cuda:{cuda_device_id}\")\n )\n self.msg_printer = wasabi.Printer()\n\n assert (\n self.layer_aggregation_type in self.allowed_layer_aggregation_types\n ), self.msg_printer.fail(\n f\"For bag of words elmo encoder, the allowable aggregation \"\n f\"types are {self.allowed_layer_aggregation_types}. You passed {self.layer_aggregation_type}\"\n )\n\n # load the elmo embedders\n with self.msg_printer.loading(\"Creating Elmo object\"):\n self.elmo = ElmoEmbedder(cuda_device=self.cuda_device_id)\n self.msg_printer.good(\"Finished Loading Elmo object\")",
"def oembed(self, url):\n _url = '{0}/oembed'.format(self.get_url())\n return http.Request('GET', _url, {'url': url}), parsers.parse_json",
"def load_model(self, name: str):\n\n # Loading config\n self.cM = ConfigManager(name + \".cfg\")\n\n # Loading Vocabs\n out_voc = pickle.load(open(name + \".out_voc\", \"rb\"))\n in_voc = pickle.load(open(name + \".in_voc\", \"rb\"))\n\n self.output_field.vocab = out_voc\n self.input_field.vocab = in_voc\n\n num_classes = len(self.output_field.vocab)\n embed = nn.Embedding.from_pretrained(self.input_field.vocab.vectors)\n self.network = FrameIDNetwork(self.cM, embed, num_classes)\n\n self.network.load_model(name + \".ph\")",
"def example_WormExperimentFile():\n \n worm_file_path = os.path.join(user_config.DROPBOX_PATH, \n user_config.WORM_FILE_PATH) \n\n w = wormpy.WormExperimentFile()\n w.load_HDF5_data(worm_file_path)\n\n return w",
"def exe():\n e = entry()\n if e:\n return load(e)",
"def load_enroller(self, enroller_file):\n self._test(enroller_file)",
"async def olá(self):\r\n\t\tawait self.client.say('© Maddie 2017')\r\n\t\te = Embed()\r\n\t\te.set_image(url='https://cdn.discovery.pgsitecore.com/en-us/-/media/Olay_PathFinder/Images/a/OLAY%20TE%207IN1%20DEEP%20PENETRATING%20MOISTURE%20BODY%20WASH_Front.png?w=460&v=1-201705260605')\r\n\t\tawait self.client.say(embed=e)",
"async def HalflingMasterCheerleaders(self, ctx):\n\n data = getattr(special_play, inspect.currentframe().f_code.co_name)()\n await self.send_embed(data, ctx)",
"def __init__(self):\n\n # Load embeddings index\n self.embeddings = self.load()\n self.console = Console()",
"def load(self):",
"def load_text_embed(filepath: Union[str, os.PathLike], load_dir: str = 'model') \\\n -> Tuple[TransformerEmbedding, Callable]:\n model_dir = Path(filepath).joinpath(load_dir)\n tokenizer = AutoTokenizer.from_pretrained(str(model_dir.resolve()))\n args = dill.load(open(model_dir.joinpath('embedding.dill'), 'rb'))\n emb = TransformerEmbedding(\n str(model_dir.resolve()), embedding_type=args['embedding_type'], layers=args['layers']\n )\n return emb, tokenizer",
"def test_load_experiment(self):\n exp = Experiment(self.epath,\n normalization='ch0',\n auto_alignment=False)\n self.assertTrue(isinstance(exp, Experiment))",
"def __init__(self):\r\n # Webhook URL\r\n self.WEBHOOK = DiscordWebhook(url=\"https://discord.com/api/webhooks/806990374996410368/QqilGNrBo652oBEnsuX-BMkU1e8_PGIO4ENiyiQF_V6qtuQLkT6Z_1-lFmzmatp9M8Mz\")\r\n \r\n # Executing Function\r\n # self.ExecuteEmbed()\r",
"def load_embedding(path=PROJECT_DIR / \"outputs/models/embedding.pkl\"):\n try:\n with open(path, \"rb\") as inp:\n embedding = pickle.load(inp)\n return embedding\n\n except FileNotFoundError:\n logger.error(f\"There is no embedding to load at {path}\")"
] | [
"0.62175786",
"0.590029",
"0.58213216",
"0.5748839",
"0.5716236",
"0.565742",
"0.5587765",
"0.5479743",
"0.5476409",
"0.54455894",
"0.54414856",
"0.53916883",
"0.53201646",
"0.5269377",
"0.5220537",
"0.52032304",
"0.5201442",
"0.51874834",
"0.5172268",
"0.51608175",
"0.5155096",
"0.5146104",
"0.51433015",
"0.5123223",
"0.51214546",
"0.51156014",
"0.5096173",
"0.50949186",
"0.50868803",
"0.5085329"
] | 0.72271544 | 0 |
Preprocess bureau.csv and bureau_balance.csv. | def bureau_and_balance(num_rows=None, nan_as_category=True):
bureau = pd.read_csv('bureau.csv', nrows=num_rows)
bb = pd.read_csv('bureau_balance.csv', nrows=num_rows)
bb, bb_cat = one_hot_encoder(bb, nan_as_category)
bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)
# Bureau balance: Perform aggregations and merge with bureau.csv
bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}
# will be 'avg' category, as min, max, and size don't make sense for
# categorical columns
for col in bb_cat:
bb_aggregations[col] = ['mean'] # mean for categorical is a normalized
# score for each entry, so that an ID with 100 entries in bb table
# doesn't count for more than ID with only 10
# aggregated by bureau; later will be aggregated by ID
bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)
bb_agg.columns = pd.Index([e[0] + "_" + e[1].upper() for e in
bb_agg.columns.tolist()])
bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')
bureau.drop(['SK_ID_BUREAU'], axis=1, inplace=True)
del bb, bb_agg
gc.collect()
# adds a column with average # of each category of 'status':
# STATUS has 5 categories: {0, 1, 2, 3, c, x}
print(bureau.columns)
# Bureau and bureau_balance numeric features; domain knowledge
# or some thought put into these choices; 11 numeric columns in
# bureau, and 3 from bb months_balance based on min, max, size metrics
# we chose
num_aggregations = {
'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],
'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],
'DAYS_CREDIT_UPDATE': ['mean'],
'CREDIT_DAY_OVERDUE': ['max', 'mean'],
'AMT_CREDIT_MAX_OVERDUE': ['mean'],
'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],
'AMT_CREDIT_SUM_OVERDUE': ['mean'],
'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],
'AMT_ANNUITY': ['max', 'mean'],
'CNT_CREDIT_PROLONG': ['sum'],
'MONTHS_BALANCE_MIN': ['min'],
'MONTHS_BALANCE_MAX': ['max'],
'MONTHS_BALANCE_SIZE': ['mean', 'sum']
}
# Bureau and bureau_balance categorical features
cat_aggregations = {}
for cat in bureau_cat:
cat_aggregations[cat] = ['mean']
for cat in bb_cat:
cat_aggregations[cat + "_MEAN"] = ['mean'] # STATUS categories
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations,
**cat_aggregations})
# this flattens the two-deep index
bureau_agg.columns = pd.Index(['BURO_' + e[0] + "_" + e[1].upper()
for e in bureau_agg.columns.tolist()])
# Bureau: Active credits - using only numerical aggregations
active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]
active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)
active_agg.columns = pd.Index(['ACTIVE_' + e[0] + "_" + e[1].upper()
for e in active_agg.columns.tolist()])
bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')
del active, active_agg
gc.collect()
# Bureau: Closed credits - using only numerical aggregations
closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]
closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)
closed_agg.columns = pd.Index(['CLOSED_' + e[0] + "_" + e[1].upper()
for e in closed_agg.columns.tolist()])
bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')
del closed, closed_agg, bureau
gc.collect()
return bureau_agg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_bureau_data(in_dir, nan_as_category=True):\n logger = logging.getLogger(__name__)\n logger.debug('Loading bureau data...')\n\n bureau = pd.read_csv(in_dir + '/bureau.csv')\n\n bb = pd.read_csv(in_dir + '/bureau_balance.csv')\n bb, bb_cat = one_hot_encoder(bb, nan_as_category)\n bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)\n\n # Bureau balance: Perform aggregations and merge with bureau.csv\n bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}\n for col in bb_cat:\n bb_aggregations[col] = ['mean']\n bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)\n bb_agg.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in bb_agg.columns.tolist()])\n bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')\n bureau.drop(['SK_ID_BUREAU'], axis=1, inplace=True)\n del bb, bb_agg\n gc.collect()\n\n # Bureau and bureau_balance numeric features\n num_aggregations = {\n 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],\n 'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],\n 'DAYS_CREDIT_UPDATE': ['mean'],\n 'CREDIT_DAY_OVERDUE': ['max', 'mean'],\n 'AMT_CREDIT_MAX_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],\n 'AMT_ANNUITY': ['max', 'mean'],\n 'CNT_CREDIT_PROLONG': ['sum'],\n 'MONTHS_BALANCE_MIN': ['min'],\n 'MONTHS_BALANCE_MAX': ['max'],\n 'MONTHS_BALANCE_SIZE': ['mean', 'sum']\n }\n # Bureau and bureau_balance categorical features\n cat_aggregations = {}\n for cat in bureau_cat:\n cat_aggregations[cat] = ['mean']\n for cat in bb_cat:\n cat_aggregations[cat + \"_MEAN\"] = ['mean']\n\n bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})\n bureau_agg.columns = pd.Index(['BURO_' + e[0] + \"_\" + e[1].upper() for e in bureau_agg.columns.tolist()])\n # Bureau: Active credits - using only numerical aggregations\n active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]\n active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)\n active_agg.columns = pd.Index(['ACTIVE_' + e[0] + \"_\" + e[1].upper() for e in active_agg.columns.tolist()])\n bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')\n del active, active_agg\n gc.collect()\n # Bureau: Closed credits - using only numerical aggregations\n closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]\n closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)\n closed_agg.columns = pd.Index(['CLOSED_' + e[0] + \"_\" + e[1].upper() for e in closed_agg.columns.tolist()])\n bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')\n del closed, closed_agg, bureau\n gc.collect()\n logger.info('Loaded bureau data')\n return bureau_agg.fillna(0)",
"def pre_process_books(csv, outputname):\n df = pd.read_csv(csv, error_bad_lines=False, encoding = \"ISO-8859-1\")\n df.to_csv(outputname, index=False)",
"def init(fileName):\r\n global grand_prod_cost, grand_album_sales\r\n infile = ''\r\n try:\r\n with open(fileName, mode='r') as infile:\r\n reader = csv.reader(infile)\r\n sniffer = csv.Sniffer()\r\n has_header = sniffer.has_header(infile.read(2048))\r\n infile.seek(0)\r\n if (has_header):\r\n next(reader) # move curser to next row so the header is not included\r\n initBands(reader)\r\n # Reset the curser to start based on presence of header\r\n if(has_header):\r\n infile.seek(0)\r\n # avoid header\r\n next(reader)\r\n else:\r\n infile.seek(0)\r\n splitByBand(reader)\r\n except Exception as e:\r\n print('Exception in init')\r\n raise e",
"def load_breuer_csv():\n import csv\n with open('Hill_Re_10595_Breuer.csv', 'r') as fh:\n reader = csv.reader(fh)\n reader.next() # Eat header\n raw = np.array([[float(i) for i in l[:-1]] for l in reader if len(l) > 0])\n return analyse_breuer(raw)",
"def test_load_coinbasettr(self):\n with open(self.filename) as f:\n coinbasettr.CoinbaseTTRParser(csv_content=f)\n parser = coinbasettr.CoinbaseTTRParser(filename=self.filename)\n parser.cleanup()",
"def pre_process_reviews(csv, outputname):\n df = pd.read_csv(csv)\n df = df.drop(\"Unnamed: 0\", axis='columns')\n df.to_csv(outputname, index=False)",
"def process_csv_data(file_for_processing: FileForProcessing):\n \n if file_for_processing.file_to_process.os_type == ANDROID_API:\n # Do fixes for Android\n if file_for_processing.data_type == ANDROID_LOG_FILE:\n file_for_processing.file_contents = fix_app_log_file(\n file_for_processing.file_contents, file_for_processing.file_to_process.s3_file_path\n )\n \n header, csv_rows_list = csv_to_list(file_for_processing.file_contents)\n if file_for_processing.data_type != ACCELEROMETER:\n # If the data is not accelerometer data, convert the generator to a list.\n # For accelerometer data, the data is massive and so we don't want it all\n # in memory at once.\n csv_rows_list = list(csv_rows_list)\n \n if file_for_processing.data_type == CALL_LOG:\n header = fix_call_log_csv(header, csv_rows_list)\n if file_for_processing.data_type == WIFI:\n header = fix_wifi_csv(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n else:\n # Do fixes for iOS\n header, csv_rows_list = csv_to_list(file_for_processing.file_contents)\n \n if file_for_processing.data_type != ACCELEROMETER:\n csv_rows_list = list(csv_rows_list)\n \n # Memory saving measure: this data is now stored in its entirety in csv_rows_list\n file_for_processing.clear_file_content()\n \n # Do these fixes for data whether from Android or iOS\n if file_for_processing.data_type == IDENTIFIERS:\n header = fix_identifier_csv(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n if file_for_processing.data_type == SURVEY_TIMINGS:\n header = fix_survey_timings(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n \n header = b\",\".join([column_name.strip() for column_name in header.split(b\",\")])\n if csv_rows_list:\n return (\n # return item 1: the data as a defaultdict\n binify_csv_rows(\n csv_rows_list,\n file_for_processing.file_to_process.study.object_id,\n file_for_processing.file_to_process.participant.patient_id,\n file_for_processing.data_type,\n header\n ),\n # return item 2: the tuple that we use as a key for the defaultdict\n (\n file_for_processing.file_to_process.study.object_id,\n file_for_processing.file_to_process.participant.patient_id,\n file_for_processing.data_type,\n header\n )\n )\n else:\n return None, None",
"def process_csv(self, user: User, csv_file):\n self.db_session.rollback()\n csv = pandas.read_csv(StringIO(csv_file.read().decode('utf-8')))\n missing_cols = [col_name for col_name in CSV_SENSOR_MAP.values() if col_name not in csv.columns.values]\n if missing_cols:\n raise OBDControllerError(f'CSV is missing the following columns: {\", \".join(missing_cols)}')\n\n csv = csv[CSV_SENSOR_MAP.values()]\n start_datetime = self._resolve_date_from_csv_row(csv.iloc[0])\n gen_session_id = str(start_datetime.timestamp()).replace('.', '')[:12]\n\n if self.db_session.query(OBDSession).filter(OBDSession.id == gen_session_id).first():\n return\n\n session = OBDSession.create(self.db_session, id=gen_session_id, user_id=user.id, date=start_datetime)\n _ = CarState.create_from_csv(self.db_session, session, csv)\n self.db_session.commit()",
"def pre_process_data():\n data_list, header_list = Parser.__parse_csv_data(Parser.training_data_file)\n table = pandas.DataFrame(data_list, columns=header_list)\n table.drop(['date', 'employee id'], axis=1, inplace=True)\n unique_categories = table['category'].unique()\n unique_expense_desc = table['expense description'].unique()\n unique_tax_name = table['tax name'].unique()\n\n column_index = {\n 'input': {},\n 'output': {}\n }\n\n column_index['input']['pre-tax amount'] = {\n 'column_index': 0,\n 'type': 'int'\n }\n\n column_index['input']['tax amount'] = {\n 'column_index': 1,\n 'type': 'int'\n }\n\n index = 2\n\n for i in range(len(unique_expense_desc)):\n column_index['input'][unique_expense_desc[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n index += len(unique_expense_desc)\n\n for i in range(len(unique_tax_name)):\n column_index['input'][unique_tax_name[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n for i in range(len(unique_categories)):\n column_index['output'][unique_categories[i]] = {'value': i}\n\n Parser.__save_column_index(column_index)",
"def _process_csv_data(csv_file, user_data_map):\n with open(csv_file, 'r') as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n if len(row) < 2:\n print('The CSV file is not in expected format.')\n raise Exception\n user_data_map[row[1].lower()] = row[0]",
"def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data",
"def preprocessing(name_file):\n\n db_data = pd.read_csv(name_file).dropna()\n db_data['Timestamp'] = pd.to_datetime(db_data['Timestamp'], unit='s')\n db_data = db_data[db_data['Timestamp'].dt.year >= 2017]\n db_data.reset_index(inplace=True, drop=True)\n db_data = db_data.drop(['Timestamp'], axis=1)\n db_data = db_data[0::60]\n\n n = len(db_data)\n\n # Split data\n train = db_data[0:int(n * 0.7)]\n validation = db_data[int(n * 0.7):int(n * 0.9)]\n test = db_data[int(n * 0.9):]\n\n # Normalize data\n train_mean = train.mean()\n train_std = train.std()\n train = (train - train_mean) / train_std\n validation = (validation - train_mean) / train_std\n test = (test - train_mean) / train_std\n\n return train, validation, test",
"def loadCSV(input_file):",
"def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")",
"def post_init(cr, registry):\n import_csv_data(cr, registry)",
"def main():\n\n bank_lookup = {\n 'Spending': bank_main,\n 'Income': bank_main,\n 'Saving': bank_savings,\n 'Credit card': bank_credit,\n }\n\n sheet_data = petl.fromxlsx('sample-data.xlsx', sheet='Data')\n data = sheet_data.cut(*range(5))\n early_data = data.select('Date', lambda r: r.month <= 2)\n\n for account, table in split_table(early_data, 'Account'):\n modified_table = bank_lookup[account](table)\n # modified_table.tocsv(table['Account'][0]+'.csv')\n print(modified_table)",
"def post_process_output_file():\n parsed_data = []\n unparseable_data = []\n\n with open('../output/part-00000', 'r') as input_file:\n for line in input_file:\n line = line.strip()\n try:\n csv_splits = line.split(',')\n csv_splits[0] = int(csv_splits[0])\n # parsed_data is a list of lists\n parsed_data.append(csv_splits)\n except ValueError:\n unparseable_data.append(line)\n parsed_data.sort()\n\n with open('../output/titanic_test_data.csv', 'w') as output_file:\n # start with lines that couldn't be parsed\n # hopefully this will only be the original header\n for line in unparseable_data:\n output_file.write(\"%s\\n\" % line)\n for line in parsed_data:\n output_file.write(\"%d,%s\\n\" % (line[0], line[1]))",
"def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file",
"def _process_data_file(self, manifest_row):\n # get the file object for the data\n csv_reader = DataReader(meta=self.meta,\n manifest_row=manifest_row,\n load_from=\"file\")\n\n # get file path for storing clean PSV files\n temp_filepath = self._get_temp_filepath(manifest_row=manifest_row)\n\n # validate and clean\n self._load_single_file(table_name=manifest_row['destination_table'],\n manifest_row=manifest_row,\n csv_reader=csv_reader,\n temp_filepath=temp_filepath)",
"def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)",
"def preprocess(self):\n df = pd.read_csv(self.input, sep=self.dataSeparator, index_col = 0)\n #ATTENTION: this processing assumes that the data is formatted in a way that header and index are automatically recognized. remove trailing commas/separators at first line of the file for this to be achieved\n if self.transposeMatrix:\n df = df.T\n\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_transposed.csv\"\n\n df.to_csv(filename)\n return filename",
"def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()",
"def process_rows(csv_reader, budget_model):\n # Metadata\n date_column = 0\n revenue_column = 1\n\n i = 0 # Unit Tesing\n\n for row in csv_reader:\n\n # Unit Testing\n # i = i + 1\n # if i > 4:\n # break\n\n date_string = str(row[date_column])\n\n # Bypass header.\n if date_string == \"Date\":\n continue\n \n # Format period key.\n month_name = date_string[:3].lower()\n if month_name == \"jan\":\n period_month = \"01\"\n elif month_name == \"feb\":\n period_month = \"02\"\n elif month_name == \"mar\":\n period_month = \"03\"\n elif month_name == \"apr\":\n period_month = \"04\"\n elif month_name == \"may\":\n period_month = \"05\"\n elif month_name == \"jun\":\n period_month = \"06\"\n elif month_name == \"jul\":\n period_month = \"07\"\n elif month_name == \"aug\":\n period_month = \"08\"\n elif month_name == \"sep\":\n period_month = \"09\"\n elif month_name == \"oct\":\n period_month = \"10\"\n elif month_name == \"nov\":\n period_month = \"11\"\n elif month_name == \"dec\":\n period_month = \"12\"\n else:\n raise ValueError('Month String was invalid: ' + date_string)\n period_year = date_string[-2:]\n period_key = period_year + period_month\n\n # Format period name.\n period_name = month_name + \"-\" + period_year\n period_name = period_name.capitalize()\n\n # Get revenue.\n revenue = float(row[revenue_column])\n\n # If the period already exists, add the revenue to the period.\n # If the period does not exist, add the period with its revenue.\n periods = budget_model[\"periods\"]\n\n if period_key in periods:\n period = periods[period_key]\n period[\"revenue\"] = period[\"revenue\"] + revenue\n else:\n period = {}\n period[\"name\"] = period_name\n period[\"revenue\"] = revenue\n periods[period_key] = period",
"def process_wounds():\n\n wounds = pd.read_csv(\n f\"{raw_data}\\\\wounds.csv\", parse_dates=[\"Date Time Occurred\", \"Date Healed\"]\n )\n cols_to_drop = [\"Participant\"]\n wounds.drop(cols_to_drop, axis=1, inplace=True)\n wounds.columns = clean_table_columns(wounds.columns)\n wounds.dropna(subset=[\"member_id\"], inplace=True)\n wounds[\"member_id\"] = wounds[\"member_id\"].astype(int)\n wounds.to_csv(f\"{processed_data}\\\\wounds.csv\", index=False)\n return wounds",
"def test_data_preprocessing(raw_data): \r\n\r\n # get data output\r\n data_output = raw_data[['Submitby Date Time', 'Challenge Manager', 'Challenge Copilot', 'Posting Date Date', 'Track',\r\n 'Technology List', 'First Place Prize', 'Num Registrations', 'Total Prize']]\r\n with open('cache/extended_columns.pkl', 'rb') as f:\r\n extended_columns = pickle.load(f)\r\n with open('cache/num_date_columns.pkl', 'rb') as f:\r\n max_date_columns = pickle.load(f)\r\n \r\n data_output = class_binaryzation_for_test(data_output, extended_columns)\r\n try:\r\n data_output = date_separation1(data_output, max_num_columns=NUM_DATE_COLUMNS)\r\n except:\r\n data_output = date_separation2(data_output)\r\n data_output = money_digitalization(data_output)\r\n data_output = get_date_in_days(data_output)\r\n data_output['Days from Posting to Submit'] = data_output['Submitby Date Time Days from 2016'] \\\r\n - data_output['Posting Date Date Days from 2016'] \r\n\r\n return data_output",
"def prepare_data(\n self,\n csv_file_path='../data/train-orig.csv',\n tiff_folder_path='../data/train/', # for validation: '../data/valid'\n mixup: bool = False,\n for_training=True,\n data_ratio_mixup=2,\n alpha_mixup=0.2,\n # if True, standardization parameters will be computed, if False then apply parameters computed from training data\n ):\n self.data = read_data(\n csv_file_path=csv_file_path,\n tiff_folder_path=tiff_folder_path,\n )\n # Mixup\n if mixup:\n self.mixup_data(data_ratio_produce=data_ratio_mixup, alpha=alpha_mixup)\n\n ### augment, do whatever you want (distinguish between train and validation setting!)",
"def _setprior_csv(self, csv_file):\n\n #-- number of time-points\n npts = self.get_npts()\n\n #-- read state from CSV file\n fmt = self._guess_time_format(csv_file)\n state_inst = sv.get_state_csv(fname=csv_file, fmt=fmt)\n\n #-- LAI,Canopy-Height,Soil-Moisture\n self.prstate = np.empty((3,npts), dtype=np.float64)\n\n for i,date_utc in enumerate(self.schedule_dct['date_utc']):\n idx, timedelt = sv.find_nearest_date_idx(state_inst.date_utc, date_utc)\n if timedelt.days>=1:\n msg = \"for scheduled date ---{}--- \".format(date_utc.strftime('%Y-%m-%dT%H%M'))\n msg += \"time nearest state differs by at least one day!\"\n FileLogger.warn(msg)\n #-- LAI\n self.prstate[0,i] = state_inst.lai[idx]\n #-- canopy-height\n self.prstate[1,i] = state_inst.can_height[idx]\n #-- SM\n self.prstate[2,i] = state_inst.soil_moisture[idx]",
"def import_clean_process():\n # loading the co2 emissions data for the Earth, I'm only interested in the\n # total emissions and the year\n global_co2 = pd.read_csv(\n \"datasets/Global CO2 Emissions.csv\",\n usecols=[\n \"Year\",\n \"Total\"\n ],\n parse_dates=[\"Year\"],\n index_col=\"Year\"\n )\n # creating the global temperature dataframe\n global_temp_data = open(\n \"datasets/CRUTEM.4.6.0.0.global_n+s\",\n \"r\"\n )\n global_temp = pd.DataFrame(\n {\n \"global_temp\": [],\n }\n )\n for line in global_temp_data:\n # each line in the file is an observation for the year, the first\n # column being the year, the second being the temperature measurement\n data = line.split()\n global_temp.at[pd.to_datetime(data[0]), \"global_temp\"] = float(data[1])\n global_temp_data.close()\n # loading the co2 emissions data for the UK\n uk_co2 = pd.read_csv(\n \"datasets/UK carbon dioxide emissions between 1858 to 2017 .csv\",\n parse_dates=[\"Date\"],\n index_col=\"Date\"\n )\n # creating the dataframe for the UK temperature data\n uk_temp = pd.DataFrame(\n {\n \"uk_temp\": [],\n }\n )\n # this file consists of monthly and seasonal averages for the UK surface\n # temperature\n uk_tmean = open(\n \"datasets/UK Mean Temperature (Degrees C)\",\n \"r\"\n )\n for index, line in enumerate(uk_tmean):\n # the data begins on the eigth line in the file\n if index > 7:\n data = line.split()\n # the monthly temperatures are from the 2nd and 13th columns\n month_temps = np.array(data[1:13]).astype(float)\n # the first reading is the year, I've taken the average of all the\n # months to get an annual average\n uk_temp.at[pd.to_datetime(data[0]), \"uk_temp\"] = month_temps.mean()\n uk_tmean.close()\n # removing the temperature reading for 2019 as it isn't averaged over the\n # whole year (this program was written in 06/2019)\n uk_temp = uk_temp[:-1]\n # merging the temperature and co2 emissions dataframes for the Earth\n global_data = pd.merge(\n global_temp,\n global_co2,\n left_index=True,\n right_index=True,\n how=\"outer\"\n )\n # merging the temperature and co2 emissions dataframes for the UK\n uk_data = pd.merge(\n uk_temp,\n uk_co2,\n left_index=True,\n right_index=True,\n how=\"outer\"\n )\n # merging the global and UK dataframes\n df_data = pd.merge(\n global_data,\n uk_data,\n left_index=True,\n right_index=True,\n how=\"outer\"\n )\n # rename some of the columns to make them more clear\n df_data = df_data.rename(\n columns={\n \"Total\": \"global_co2\",\n \"CO2 Emissions\": \"uk_co2\"\n }\n )\n return df_data",
"def preprocess(self):\n filtered_data = pd.read_csv(self.input)\n\n if self.config.getboolean(\"filterMissingsInGenes\"):\n # first filter out the genes that have more missings than threshold\n filtered_data = self.filterMissings(self.config[\"threshold\"], filtered_data)\n if self.config.getboolean(\"filterMissingsInSamples\"):\n # second transpose matrix and filter out samples that have more missings than threshold\n filtered_samples = self.filterMissings(self.config[\"threshold\"], filtered_data.T)\n filtered_data = filtered_samples.T\n\n # transpose back into original orientation and save\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_filtered.csv\"\n filtered_data.to_csv(filename, index=False)\n return filename",
"def process_file(input_file, output_good, output_bad):\n\t# Lists containing good and bad rows\n\tlist_good = []\n\tlist_bad = []\n\t# Open csv file \n\twith open(input_file, 'r') as f:\n\t\t# Create reader from csv and store header row\n\t\treader = csv.DictReader(f)\n\t\theader = reader.fieldnames\n\t\t# Store useful keys\n\t\tyear = 'productionStartYear'\n\t\turi = 'URI'\n\t\t# Loop through all rows\n\t\tfor row in reader:\n\t\t\t# Discard rows with a URI not from DBpedia\n\t\t\tif not row[uri].startswith('http://dbpedia.org'):\n\t\t\t\tcontinue\n\t\t\t# Extract year from datetime\n\t\t\tyear_value = row[year][:4]\n\t\t\t# Change row datetime value to its year\n\t\t\trow[year] = year_value\n\t\t\t# Check if year actually contains a year\n\t\t\tif not row[year].isdigit():\n\t\t\t\t# Add to list_bad\n\t\t\t\tlist_bad.append(row)\n\t\t\t\tcontinue\n\t\t\t# Check if year falls within expected range\n\t\t\tif int(row[year]) < 1886 or int(row[year]) > 2014:\n\t\t\t\t# Add list to bad\n\t\t\t\tlist_bad.append(row)\n\t\t\t\tcontinue\n\t\t\t# Row is proper, add to list_good\n\t\t\tlist_good.append(row)\n\t\t\n\t\t# Open good ouput file, write the good rows to it\n\t\twith open(output_good, 'w') as csvfile:\n\t\t\twriter = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)\n\t\t\twriter.writeheader()\n\t\t\tfor item in list_good:\n\t\t\t\twriter.writerow(item)\n\n\t\t# Open bad ouput file, write the nad rows to it\n\t\twith open(output_bad, 'w') as csvfile:\n\t\t\twriter = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)\n\t\t\twriter.writeheader()\n\t\t\tfor item in list_bad:\n\t\t\t\twriter.writerow(item)"
] | [
"0.64036053",
"0.62629867",
"0.58125216",
"0.5807141",
"0.57303107",
"0.57129735",
"0.57014513",
"0.567897",
"0.56665885",
"0.5632176",
"0.5606566",
"0.5606245",
"0.55926096",
"0.55781627",
"0.55719185",
"0.5529289",
"0.55151397",
"0.54760665",
"0.5462434",
"0.543665",
"0.5422039",
"0.5417394",
"0.5415675",
"0.54111975",
"0.54081",
"0.53872144",
"0.53764874",
"0.53716725",
"0.5355504",
"0.5349873"
] | 0.65498394 | 0 |
Lightgbm GBDT with KFold or Stratified KFold. | def kfold_lightgbm(df, num_rows, num_folds, stratified=False, debug=False):
train_df = df[df['TARGET'].notnull()]
test_df = df[df['TARGET'].isnull()]
text = "Starting LightGBM. Train shape: {}, test shape: {}"
print(text.format(train_df.shape, test_df.shape))
del df
gc.collect()
# Cross validation model
if stratified:
folds = StratifiedKFold(n_splits=num_folds, shuffle=True,
random_state=1001)
else:
folds = KFold(n_splits=num_folds, shuffle=True, random_state=1001)
# Create arrays and dataframes to store results
# oof = 'out of fold'
oof_preds = np.zeros(train_df.shape[0])
# sub_preds = submission predictions (i.e., will be submitted to kaggle)
sub_preds = np.zeros(test_df.shape[0])
feature_importance_df = pd.DataFrame()
feats = [f for f in train_df.columns
if f not in ['TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU',
'SK_ID_PREV', 'index']]
xx = folds.split(train_df[feats], train_df['TARGET'])
for n_fold, (train_idx, valid_idx) in enumerate(xx):
train_x, train_y = train_df[feats].iloc[train_idx], train_df['TARGET'].iloc[train_idx]
valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['TARGET'].iloc[valid_idx]
# LightGBM parameters found by Bayesian optimization
clf = LGBMClassifier(
nthread=4,
n_estimators=10000, # quite a few trees!
learning_rate=0.02,
num_leaves=34,
colsample_bytree=0.9497036,
subsample=0.8715623,
max_depth=8,
reg_alpha=0.041545473,
reg_lambda=0.0735294,
min_split_gain=0.0222415,
min_child_weight=39.3259775,
silent=-1,
verbose=-1, )
clf.fit(train_x, train_y,
eval_set=[(train_x, train_y), (valid_x, valid_y)],
eval_metric='auc', verbose=200, early_stopping_rounds=200)
oof_preds[valid_idx] = clf.predict_proba(valid_x,
num_iteration=\
clf.best_iteration_)[:, 1]
# our submission predicitons are a sum of probabilities for each fold
# because each fold finds a different optimal fit (.best_iteration)
# based on that fold's data; we do need to weight the probabilities
# by 1/n_splits, obviously
sub_preds += clf.predict_proba(test_df[feats],
num_iteration=
clf.best_iteration_)[:, 1]/folds.n_splits
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = n_fold + 1
feature_importance_df = pd.concat([feature_importance_df,
fold_importance_df], axis=0)
print('---------\nFold %2d AUC : %.6f' %
(n_fold + 1, roc_auc_score(valid_y, oof_preds[valid_idx])))
print('Feature importance df shape:', feature_importance_df.shape)
del clf, train_x, train_y, valid_x, valid_y
gc.collect()
print('Full AUC score %.6f\n=======' % roc_auc_score(train_df['TARGET'],
oof_preds))
# plot ROC AUC and precision - sensitivity
# Write submission file and plot feature importance
if not debug:
test_df['TARGET'] = sub_preds
test_df[['SK_ID_CURR', 'TARGET']].to_csv(submission_file_name,
index=False)
folds_idx = [(trn_idx, val_idx) for trn_idx, val_idx in xx]
display_importances(feature_importance_df, num_rows)
display_roc_curve(train_df['TARGET'], oof_preds, folds_idx)
display_precision_recall(train_df['TARGET'], oof_preds, folds_idx)
return feature_importance_df, oof_preds, train_df['TARGET'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train_cv(X_train, Y_train, nfold = 5, early_stopping_rounds = 20):\n # model params\n params = { \"objective\" : \"multiclass\",\n \"num_class\" : 6,\n \"verbosity\" : -1 }\n\n # create dataset for lightgbm\n lgb_train = lgb.Dataset(X_train, Y_train)\n \n # cross validate to find optimal no of iterations\n r = lgb.cv(params, \n lgb_train, \n 10000,\n early_stopping_rounds = early_stopping_rounds,\n nfold = nfold,\n feval = accuracy_error,\n metrics = 'None',\n verbose_eval = True,\n seed = 42)\n\n # Highest score\n r_best = np.max(r['accuracy-mean'])\n\n # best number of estimators\n best_estimators = np.argmax(r['accuracy-mean']) + 1\n print(best_estimators)\n\n print(f'The maxium accuracy on the validation set was {r_best:.5f}')\n print(f'The ideal number of iterations was {best_estimators}.')\n\n # Fit on all of the training data using the ideal number of iterations\n model = lgb.LGBMClassifier(n_estimators=best_estimators, n_jobs = -1,\n **params, random_state = 42) \n model.fit(X_train, Y_train)\n\n return model",
"def lgb_kfolds_scikitlearn(traindata, ytrain, valdata, yval, num_round):\n increment = round(len(traindata) * 0.29)\n data_idx = increment\n split_rounds = params['training']['split-rounds']\n modelkwargs = params['training']['lgbt-model-kwargs']\n evaldictslist = []\n\n model = lgb.LGBMRegressor(\n n_estimators=num_round,\n **modelkwargs\n )\n\n for roundno in range(split_rounds):\n evaldict = {}\n evalcallback = lgb.record_evaluation(evaldict)\n start = data_idx % len(traindata)\n data_idx += increment\n end = data_idx % len(traindata)\n if end < start:\n xsubset = np.vstack((traindata.iloc[start:, :], traindata.iloc[:end, :]))\n ysubset = np.concatenate((ytrain[start:], ytrain[:end]))\n else:\n xsubset = traindata.iloc[start:end, :]\n ysubset = ytrain[start:end]\n\n model.fit(\n xsubset, ysubset,\n eval_set=[(valdata, yval), (traindata, ytrain)],\n eval_names=['validation', 'train'],\n verbose=False,\n callbacks=[evalcallback],\n )\n\n evaldictslist.append(evaldict)\n\n model = model.booster_\n\n return model, evaldictslist",
"def xgb(x_train, y_train, x_test):\n\n model = XGBClassifier()\n # y_train = np.reshape(y_train, (len(y_train), 1))\n # data = np.concatenate((x_train, y_train), axis=1)\n # for train, test in kfold.split(data):\n # # print(\"reached here\")\n # x_tr = data[train, :-1]\n # y_tr = data[train, -1]\n # x_va = data[test, :-1]\n # y_va = data[test, -1]\n\n # model.fit(x_tr, y_tr)\n # y_pred = model.predict(x_va)\n # predictions = [round(value) for value in y_pred]\n # f1 = f1_score(y_va, predictions)\n # print(f1)\n model.fit(x_train, y_train)\n y_predict = model.predict(x_test)\n y_predict = [round(value) for value in y_predict]\n return y_predict",
"def train_lightgbm(x_train, train_labels, x_test, orig_test):\n train_labels = train_labels['Col2']\n num_lgbm_ensemble = 17\n lgb_forests = []\n for i in range(num_lgbm_ensemble):\n print(\"training LGBC model {}\".format(i))\n params = {\n 'n_estimators': 17,\n 'max_depth': 7,\n 'learning_rate': 0.01,\n 'random_state': i,\n 'colsample_bytree': 0.1,\n 'reg_lambda': 15,\n 'reg_alpha': 10\n }\n\n lgbc = models.make_model(params=params, model_name='light_gbm')\n lgbc.fit(x_train, train_labels)\n lgb_forests.append(lgbc)\n\n model_file_path = os.path.join(MODEL_PATH, \"lgb\", \"lgb_forest.pkl\")\n pickle.dump(lgb_forests, open(model_file_path, 'wb'))\n\n evaluation.submission_lgbm(model_file_path,\n x_test,\n orig_test,\n submission_name='submission_lgb.csv')",
"def lgb_hyperopt(data, labels, num_evals=1000, n_folds=5, diagnostic=False):\r\n LGBM_MAX_LEAVES = 2**11 #maximum number of leaves per tree for LightGBM\r\n LGBM_MAX_DEPTH = 25 #maximum tree depth for LightGBM \r\n EVAL_METRIC_LGBM_CLASS = 'f1'\r\n\r\n def lgb_f1_score(y_hat, data):\r\n y_true = data.get_label()\r\n y_hat = np.round(y_hat)\r\n return 'f1', f1_score(y_true, y_hat), True\r\n\r\n print('Running {} rounds of LightGBM parameter optimisation:'.format(num_evals))\r\n #clear space\r\n \r\n integer_params = ['max_depth',\r\n 'num_leaves',\r\n 'max_bin',\r\n 'min_data_in_leaf',\r\n 'min_data_in_bin']\r\n \r\n def objective(space_params):\r\n \r\n #cast integer params from float to int\r\n for param in integer_params:\r\n space_params[param] = int(space_params[param])\r\n \r\n #extract nested conditional parameters\r\n if space_params['boosting']['boosting'] == 'goss':\r\n top_rate = space_params['boosting'].get('top_rate')\r\n other_rate = space_params['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n space_params['top_rate'] = top_rate\r\n space_params['other_rate'] = other_rate\r\n \r\n subsample = space_params['boosting'].get('subsample', 1.0)\r\n space_params['boosting'] = space_params['boosting']['boosting']\r\n space_params['subsample'] = subsample\r\n \r\n cv_results = lgb.cv(space_params, train, nfold = n_folds, stratified=True,\r\n early_stopping_rounds=100, seed=42, feval=lgb_f1_score)\r\n \r\n best_loss = -cv_results['f1-mean'][-1]\r\n\r\n return{'loss':best_loss, 'status': STATUS_OK }\r\n \r\n train = lgb.Dataset(data, labels)\r\n \r\n #integer and string parameters, used with hp.choice()\r\n boosting_list = [{'boosting': 'gbdt',\r\n 'subsample': hp.uniform('subsample', 0.5, 1)},\r\n {'boosting': 'goss',\r\n 'subsample': 1.0,\r\n 'top_rate': hp.uniform('top_rate', 0, 0.5),\r\n 'other_rate': hp.uniform('other_rate', 0, 0.5)}] #if including 'dart', make sure to set 'n_estimators'\r\n\r\n objective_list_reg = ['huber', 'gamma', 'fair', 'tweedie']\r\n objective_list_class = ['binary', 'cross_entropy']\r\n objective_list = objective_list_class\r\n is_unbalance_list = [True]\r\n\r\n space ={'boosting' : hp.choice('boosting', boosting_list),\r\n 'num_leaves' : hp.quniform('num_leaves', 2, LGBM_MAX_LEAVES, 1),\r\n 'max_depth': hp.quniform('max_depth', 2, LGBM_MAX_DEPTH, 1),\r\n 'max_bin': hp.quniform('max_bin', 32, 255, 1),\r\n 'min_data_in_leaf': hp.quniform('min_data_in_leaf', 1, 256, 1),\r\n 'min_data_in_bin': hp.quniform('min_data_in_bin', 1, 256, 1),\r\n 'min_gain_to_split' : hp.quniform('min_gain_to_split', 0.1, 5, 0.01),\r\n 'lambda_l1' : hp.uniform('lambda_l1', 0, 5),\r\n 'lambda_l2' : hp.uniform('lambda_l2', 0, 5),\r\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.2)),\r\n 'metric' : None, \r\n 'objective' : hp.choice('objective', objective_list),\r\n 'feature_fraction' : hp.quniform('feature_fraction', 0.5, 1, 0.01),\r\n 'bagging_fraction' : hp.quniform('bagging_fraction', 0.5, 1, 0.01),\r\n 'is_unbalance' : hp.choice('is_unbalance', is_unbalance_list)\r\n }\r\n\r\n trials = Trials()\r\n best = fmin(fn=objective,\r\n space=space,\r\n algo=tpe.suggest,\r\n max_evals=num_evals, \r\n trials=trials)\r\n \r\n #fmin() will return the index of values chosen from the lists/arrays in 'space'\r\n #to obtain actual values, index values are used to subset the original lists/arrays\r\n #extract nested conditional parameters\r\n try:\r\n if best['boosting']['boosting'] == 'goss':\r\n top_rate = best['boosting'].get('top_rate')\r\n other_rate = best['boosting'].get('other_rate')\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n except:\r\n if boosting_list[best['boosting']]['boosting'] == 'goss':\r\n top_rate = best['top_rate']\r\n other_rate = best['other_rate']\r\n #0 <= top_rate + other_rate <= 1\r\n top_rate = max(top_rate, 0)\r\n top_rate = min(top_rate, 0.5)\r\n other_rate = max(other_rate, 0)\r\n other_rate = min(other_rate, 0.5)\r\n best['top_rate'] = top_rate\r\n best['other_rate'] = other_rate\r\n best['boosting'] = boosting_list[best['boosting']]['boosting']#nested dict, index twice\r\n best['metric'] = metric_list[best['metric']]\r\n best['objective'] = objective_list[best['objective']]\r\n best['is_unbalance'] = is_unbalance_list[best['is_unbalance']]\r\n \r\n #cast floats of integer params to int\r\n for param in integer_params:\r\n best[param] = int(best[param])\r\n \r\n print('{' + '\\n'.join('{}: {}'.format(k, v) for k, v in best.items()) + '}')\r\n if diagnostic:\r\n return(best, trials)\r\n else:\r\n return(best)",
"def make_prediction_classification(logger, run_id, df_train_X, df_train_Y, df_test_X, kf, features=None,\n params=None, n_estimators=10000,\n early_stopping_rounds=100, model_type='lgb',\n is_test=False, seed=42, model=None,\n plot_feature_importance=False, cat_features=None):\n yoof = np.zeros(len(df_train_X))\n yhat = np.zeros(len(df_test_X))\n cv_scores = []\n result_dict = {}\n feature_importance = pd.DataFrame()\n best_iterations = []\n\n # kf = KFold(n_splits=n_splits, random_state=SEED, shuffle=False)\n # kf = StratifiedKFold(n_splits=n_splits, random_state=seed, shuffle=True)\n\n fold = 0\n for in_index, oof_index in kf.split(df_train_X[features], df_train_Y):\n # Start a counter describing number of folds\n fold += 1\n # Number of splits defined as a part of KFold/StratifiedKFold\n n_splits = kf.get_n_splits()\n logger.info(f'fold {fold} of {n_splits}')\n X_in, X_oof = df_train_X.iloc[in_index].values, df_train_X.iloc[oof_index].values\n y_in, y_oof = df_train_Y.iloc[in_index].values, df_train_Y.iloc[oof_index].values\n\n if model_type == 'lgb':\n lgb_train = lgb.Dataset(X_in, y_in)\n lgb_eval = lgb.Dataset(X_oof, y_oof, reference=lgb_train)\n\n model = lgb.train(\n params,\n lgb_train,\n valid_sets=[lgb_train, lgb_eval],\n verbose_eval=50,\n early_stopping_rounds=early_stopping_rounds,\n feature_name=features,\n categorical_feature=cat_features\n )\n\n del lgb_train, lgb_eval, in_index, X_in, y_in\n gc.collect()\n\n yoof[oof_index] = model.predict(X_oof, num_iteration=model.best_iteration)\n if is_test is False:\n yhat += model.predict(df_test_X.values, num_iteration=model.best_iteration)\n\n logger.info(f'Best number of iterations for fold {fold} is: {model.best_iteration}')\n best_iteration = model.best_iteration\n\n elif model_type == 'xgb':\n xgb_train = xgb.DMatrix(data=X_in, label=y_in, feature_names=features)\n xgb_eval = xgb.DMatrix(data=X_oof, label=y_oof, feature_names=features)\n\n watchlist = [(xgb_train, 'train'), (xgb_eval, 'valid_data')]\n model = xgb.train(dtrain=xgb_train,\n num_boost_round=n_estimators,\n evals=watchlist,\n early_stopping_rounds=early_stopping_rounds,\n params=params,\n verbose_eval=50)\n\n del xgb_train, xgb_eval, in_index, X_in, y_in\n gc.collect()\n\n yoof[oof_index] = model.predict(xgb.DMatrix(X_oof, feature_names=features), ntree_limit=model.best_ntree_limit)\n if is_test is False:\n yhat += model.predict(xgb.DMatrix(\n df_test_X.values, feature_names=features),\n ntree_limit=model.best_ntree_limit)\n\n logger.info(f'Best number of iterations for fold {fold} is: {model.best_ntree_limit}')\n best_iteration = model.best_ntree_limit\n\n elif model_type == 'cat':\n # feature_names accepts only list\n cat_train = Pool(data=X_in, label=y_in, feature_names=features.tolist(), cat_features=cat_features)\n cat_eval = Pool(data=X_oof, label=y_oof, feature_names=features.tolist(), cat_features=cat_features)\n cat_test = Pool(data=df_test_X, feature_names=features.tolist(), cat_features=cat_features)\n\n model = CatBoost(params=params)\n model.fit(cat_train, eval_set=cat_eval, use_best_model=True)\n\n del in_index, X_in, y_in, cat_train\n gc.collect()\n\n yoof[oof_index] = model.predict(cat_eval)\n if is_test is False:\n # yhat += model.predict(df_test_X.values)\n yhat += model.predict(cat_test)\n\n del cat_eval, cat_test\n best_iteration = model.best_iteration_\n logger.info(f'Best number of iterations for fold {fold} is: {best_iteration}')\n\n elif model_type == 'sklearn':\n model = model\n model.fit(X_in, y_in)\n\n yoof[oof_index] = model.predict_proba(X_oof)[:, 1]\n if is_test is False:\n yhat += model.predict_proba(df_test_X.values)[:, 1]\n\n # Calculate feature importance per fold\n # TODO : Bolier plate code\n if model_type == 'lgb':\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = features\n fold_importance[\"importance\"] = model.feature_importance()\n fold_importance[\"fold\"] = fold\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\n feature_importance.sort_values(by=['importance'], inplace=True)\n elif model_type == 'xgb':\n # Calculate feature importance per fold\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = model.get_score().keys()\n fold_importance[\"importance\"] = model.get_score().values()\n fold_importance[\"fold\"] = fold\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\n feature_importance.sort_values(by=['importance'], inplace=True)\n elif model_type == 'cat':\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = model.feature_names_\n fold_importance[\"importance\"] = model.get_feature_importance()\n fold_importance[\"fold\"] = fold\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\n feature_importance.sort_values(by=['importance'], inplace=True)\n\n cv_oof_score = roc_auc_score(y_oof, yoof[oof_index])\n logger.info(f'CV OOF Score for fold {fold} is {cv_oof_score}')\n cv_scores.append(cv_oof_score)\n best_iterations.append(best_iteration)\n\n del oof_index, X_oof, y_oof\n gc.collect()\n\n util.update_tracking(run_id, \"metric_fold_{}\".format(fold), cv_oof_score, is_integer=False)\n\n yhat /= n_splits\n\n oof_score = round(roc_auc_score(df_train_Y, yoof), 5)\n avg_cv_scores = round(sum(cv_scores)/len(cv_scores), 5)\n std_cv_scores = round(np.array(cv_scores).std(), 5)\n\n logger.info(f'Combined OOF score : {oof_score}')\n logger.info(f'Average of {fold} folds OOF score {avg_cv_scores}')\n logger.info(f'std of {fold} folds OOF score {std_cv_scores}')\n\n result_dict['yoof'] = yoof\n result_dict['prediction'] = yhat\n result_dict['oof_score'] = oof_score\n result_dict['cv_scores'] = cv_scores\n result_dict['avg_cv_scores'] = avg_cv_scores\n result_dict['std_cv_scores'] = std_cv_scores\n\n util.update_tracking(run_id, \"oof_score\", oof_score, is_integer=False)\n util.update_tracking(run_id, \"cv_avg_score\", avg_cv_scores, is_integer=False)\n util.update_tracking(run_id, \"cv_std_score\", std_cv_scores, is_integer=False)\n # Best Iteration\n util.update_tracking(run_id, 'avg_best_iteration', np.mean(best_iterations), is_integer=False)\n util.update_tracking(run_id, 'std_best_iteration', np.std(best_iterations), is_integer=False)\n\n del yoof, yhat\n gc.collect()\n\n # Plot feature importance\n if (model_type == 'lgb') | (model_type == 'xgb') | (model_type == 'cat'):\n # Not sure why it was necessary. Hence commenting\n # feature_importance[\"importance\"] /= n_splits\n cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\n by=\"importance\", ascending=False)[:50].index\n\n best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\n\n result_dict['feature_importance'] = feature_importance\n result_dict['best_features'] = best_features\n\n logger.info('Training/Prediction completed!')\n return result_dict",
"def train_default(X_train, Y_train):\n model = lgb.LGBMClassifier(objective = 'multiclass', num_class = \"6\", random_state = 42)\n model.fit(X_train, Y_train)\n return model",
"def train_model(X_train, y_train, X_valid, y_valid, params=None, model_type='lgb', \r\n model_path_name='lgb', plot_feature_importance=False, model=None):\r\n def lgb_f1_score(y_true, y_pred):\r\n y_pred = np.round(y_pred)\r\n return 'f1', f1_score(y_true, y_pred), True\r\n\r\n scores = []\r\n feature_importance = pd.DataFrame()\r\n print('Started at', time.ctime())\r\n \r\n \r\n if model_type == 'lgb':\r\n \r\n model = lgb.LGBMClassifier(**params, n_estimators=50000, n_jobs=-1)\r\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), \r\n eval_metric=lgb_f1_score, early_stopping_rounds=300)\r\n \r\n y_pred_valid = model.predict(X_valid)\r\n \r\n if model_type == 'cat':\r\n model = cb.CatBoost(iterations=20000, **params)\r\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False)\r\n y_pred_valid = model.predict(X_valid)\r\n\r\n #save the model\r\n joblib.dump(model, model_path_name)\r\n \r\n scores.append(f1_score(y_valid, y_pred_valid)) \r\n \r\n if model_type == 'lgb':\r\n # feature importance\r\n fold_importance = pd.DataFrame()\r\n fold_importance[\"feature\"] = X_train.columns\r\n fold_importance[\"importance\"] = model.feature_importances_\r\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\r\n \r\n print('score: {0:.4f}.'.format(np.mean(scores)))\r\n\r\n if model_type == 'lgb':\r\n feature_importance[\"importance\"]\r\n if plot_feature_importance:\r\n cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\r\n by=\"importance\", ascending=False)[:50].index\r\n\r\n best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\r\n\r\n #sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False));\r\n \r\n return feature_importance, np.mean(scores)\r\n return np.mean(scores)\r\n \r\n else:\r\n return np.mean(scores)",
"def lgbm_insight_wr():\n steps = [('scaler', t.MyScaler(dont_scale='for_profit')),\n ('knn', t.KNNKeepDf())]\n pipe = Pipeline(steps)\n pipe.fit(X_raw)\n X = pipe.transform(X_raw)\n\n # Run once to get ideal parameters\n # params = {\n # 'max_bin': [10, 20, 50, 100, 255],\n # 'num_leaves': [5, 10, 31, 50],\n # 'bagging_fraction': [.1, .3, .5, .7, 1]\n # }\n\n # lgb_q = LGBMRegressor(objective='quantile')\n #\n # gs = RandomizedSearchCV(lgb_q, params,\n # scoring=['r2', 'neg_mean_squared_error',\n # 'neg_mean_absolute_error'],\n # refit='neg_mean_squared_error'\n # )\n # gs.fit(X.to_numpy(), y)\n lgbm = LGBMRegressor(num_leaves=50,\n max_bin=100,\n bagging_fraction=0.1,\n objective='quantile')\n\n cv_results = cross_validate(lgbm, X.to_numpy(), y,\n scoring=['r2', 'neg_mean_squared_error',\n 'neg_mean_absolute_error'],\n return_train_score=True)\n\n output = pd.DataFrame(\n {'train_r2': [cv_results['train_r2'].mean()],\n 'train_rmse': [np.mean(\n [np.sqrt(abs(i))\n for i in cv_results['train_neg_mean_squared_error']])],\n 'train_mae': [abs(cv_results['train_neg_mean_absolute_error'].mean())],\n 'test_r2': [cv_results['test_r2'].mean()],\n 'test_rmse': [np.mean(\n [np.sqrt(abs(i))\n for i in cv_results['test_neg_mean_squared_error']])],\n 'test_mae': [abs(cv_results['test_neg_mean_absolute_error'].mean())]\n },\n index=['LGBM']\n )\n return output",
"def lgbm_insight_er():\n steps = [('scaler', t.MyScaler(dont_scale='for_profit')),\n ('knn', t.KNNKeepDf())]\n pipe = Pipeline(steps)\n pipe.fit(X_raw_er)\n X = pipe.transform(X_raw_er)\n\n # Run once to get ideal parameters\n # params = {\n # 'max_bin': [10, 20, 50, 100, 255],\n # 'num_leaves': [5, 10, 31, 50],\n # 'min_data_in_leaf': [10, 20, 30],\n # 'bagging_fraction': [.1, .3, .5, .7, 1]\n # }\n\n # lgb_q = LGBMRegressor(objective='quantile')\n\n # gs = RandomizedSearchCV(lgb_q, params,\n # scoring=['r2', 'neg_mean_squared_error',\n # 'neg_mean_absolute_error'],\n # refit='neg_mean_squared_error'\n # )\n # gs.fit(X, y_er)\n\n lgbm = LGBMRegressor(num_leaves=50,\n max_bin=100,\n bagging_fraction=0.1,\n objective='quantile')\n\n cv_results = cross_validate(lgbm, X.to_numpy(), y_er,\n scoring=['r2', 'neg_mean_squared_error',\n 'neg_mean_absolute_error'],\n return_train_score=True)\n\n output = pd.DataFrame(\n {'train_r2': [cv_results['train_r2'].mean()],\n 'train_rmse': [np.mean(\n [np.sqrt(abs(i))\n for i in cv_results['train_neg_mean_squared_error']])],\n 'train_mae': [abs(cv_results['train_neg_mean_absolute_error'].mean())],\n 'test_r2': [cv_results['test_r2'].mean()],\n 'test_rmse': [np.mean(\n [np.sqrt(abs(i))\n for i in cv_results['test_neg_mean_squared_error']])],\n 'test_mae': [abs(cv_results['test_neg_mean_absolute_error'].mean())]\n },\n index=['LGBM']\n )\n return output",
"def train_model(data_dir, rows):\n X, y = read_vectorized_features(data_dir, rows)\n\n # Set params\n # Scores ~0.784 (without tuning and early stopping)\n params = {'boosting_type': 'gbdt',\n 'max_depth' : -1,\n 'objective': 'binary',\n 'nthread': 3, # Updated from nthread\n 'num_leaves': 64,\n 'learning_rate': 0.05,\n 'max_bin': 512,\n 'subsample_for_bin': 200,\n 'subsample': 1,\n 'subsample_freq': 1,\n 'colsample_bytree': 0.8,\n 'reg_alpha': 5,\n 'reg_lambda': 10,\n 'min_split_gain': 0.5,\n 'min_child_weight': 1,\n 'min_child_samples': 5,\n 'scale_pos_weight': 1,\n 'num_class' : 1,\n 'metric' : 'binary_error'}\n\n # Create parameters to search\n gridParams = {\n 'learning_rate': [0.15, 0.2, 0.25, 0.3], #default = 0.1\n 'n_estimators': [40],\n 'num_leaves': [6,8,12,16],\n 'boosting_type' : ['gbdt'],\n 'objective' : ['binary'],\n 'random_state' : [501], # Updated from 'seed'\n 'colsample_bytree' : [0.65, 0.66],\n 'subsample' : [0.7,0.75],\n 'reg_alpha' : [1,1.2],\n 'reg_lambda' : [1,1.2,1.4],\n }\n\n # Create classifier to use. Note that parameters have to be input manually\n # not as a dict!\n mdl = lgb.LGBMClassifier(boosting_type= 'gbdt',\n objective = 'binary',\n n_jobs = 3, # Updated from 'nthread'\n silent = True,\n max_depth = params['max_depth'],\n max_bin = params['max_bin'],\n subsample_for_bin = params['subsample_for_bin'],\n subsample = params['subsample'],\n subsample_freq = params['subsample_freq'],\n min_split_gain = params['min_split_gain'],\n min_child_weight = params['min_child_weight'],\n min_child_samples = params['min_child_samples'],\n scale_pos_weight = params['scale_pos_weight'])\n\n # Create the grid\n grid = GridSearchCV(mdl, gridParams,\n verbose=0,\n cv=4,\n n_jobs=2)\n # train\n grid.fit(X, y)\n print(grid.best_params_)\n print(grid.best_score_)\n\n\n # train\n lgbm_dataset = lgb.Dataset(X, y)\n lgbm_model = lgb.train({\"application\": \"binary\"}, lgbm_dataset)\n\n return lgbm_model",
"def lgb_train_method(traindata, ytrain, valdata, yval, num_round):\n traindataset = lgb.Dataset(traindata, label=ytrain)\n valdataset = lgb.Dataset(valdata, label=yval)\n\n model_kwargs = params['training']['lgbt-model-kwargs']\n\n # train model\n evaldict = {}\n model = lgb.train(model_kwargs,\n traindataset,\n num_round,\n valid_sets=[valdataset, traindataset],\n valid_names=['validation', 'train'],\n early_stopping_rounds=10,\n evals_result=evaldict,\n verbose_eval=False\n )\n\n return model, evaldict",
"def main(debug=False):\n num_rows = 10000 if debug else None\n df = application_train_test(num_rows)\n print('df shape:', df.shape, '- After app process')\n with timer(\"Process bureau and bureau_balance\"):\n bureau = bureau_and_balance(num_rows)\n print('---------')\n print(\"df shape:\", bureau.shape, '- just bureau')\n df = df.join(bureau, how='left', on='SK_ID_CURR')\n print('df shape:', df.shape, '- After bureau')\n del bureau\n gc.collect()\n\n with timer(\"Process previous_applications\"):\n prev = previous_applications(num_rows)\n print('---------')\n print(\"df shape:\", prev.shape, '- Previous applications')\n df = df.join(prev, how='left', on='SK_ID_CURR')\n print(\"df shape:\", df.shape, '- Joined Previous applications')\n del prev\n gc.collect()\n\n with timer(\"Process POS-CASH balance\"):\n pos = pos_cash(num_rows)\n print('---------')\n print(\"df shape:\", pos.shape, \"- just Pos-cash balance\")\n df = df.join(pos, how='left', on='SK_ID_CURR')\n print(\"df shape:\", df.shape, '- Joined pos-cash')\n del pos\n gc.collect()\n\n with timer(\"Process installments payments\"):\n ins = installments_payments(num_rows)\n print(\"df shape:\", ins.shape, \"just Installments payments\")\n df = df.join(ins, how='left', on='SK_ID_CURR')\n print(\"df shape:\", df.shape, '- Joined Installments')\n del ins\n gc.collect()\n with timer(\"Process credit card balance\"):\n cc = credit_card_balance(num_rows)\n print(\"df shape:\", cc.shape, \"- just Credit card balance \")\n df = df.join(cc, how='left', on='SK_ID_CURR')\n print(\"df shape:\", df.shape, '- Joined cc balance')\n del cc\n gc.collect()\n\n with timer(\"Run LightGBM with kfold\"):\n feature_importance_df, oof_preds, y = kfold_lightgbm(df, num_rows, num_folds=10, stratified=False, debug=debug)\n\n return feature_importance_df, oof_preds, y",
"def k_fold_BoW(data, vectorizer, features, k=10, reg=1):\n \n shuffle(data) # random shuffle data before making folds\n \n folds = chunks(data, k)\n k_fold_acc = []\n \n for fold in folds:\n # CountVectorizer: convert a collection of text documents \n # to a matrix of token counts.\n count_vectorizer = vectorizer(tokenizer=identity_tokenizer, lowercase=False,\n max_features=features) \n \n # Matrix of shape len(subj_train) x #words.\n train_data = (fold[0][k][0] for k in range(len(fold[0]))) # text for the training data\n train_features = count_vectorizer.fit_transform(train_data)\n\n ### Logistic regression classifier.\n logreg = linear_model.LogisticRegression(C=reg, n_jobs=1, solver='liblinear', multi_class='ovr')\n train_tag = [fold[0][k][1] for k in range(len(fold[0]))] # labels for the trainig data\n logreg = logreg.fit(train_features, train_tag)\n \n test_data = fold[1] # Both text and labels\n acc = evaluate_prediction_BoW(count_vectorizer, logreg, test_data)\n k_fold_acc.append(acc)\n \n return k_fold_acc",
"def train_model_regression(X, X_test, y, params, folds, model_type='lgb', eval_metric='mae', columns=None,\r\n plot_feature_importance=False, model=None,\r\n verbose=10000, early_stopping_rounds=200, n_estimators=50000):\r\n columns = X.columns if columns is None else columns\r\n X_test = X_test[columns]\r\n\r\n # to set up scoring parameters\r\n metrics_dict = {'mae': {'lgb_metric_name': 'mae',\r\n 'catboost_metric_name': 'MAE',\r\n 'sklearn_scoring_function': metrics.mean_absolute_error},\r\n 'group_mae': {'lgb_metric_name': 'mae',\r\n 'catboost_metric_name': 'MAE',\r\n 'scoring_function': group_mean_log_mae},\r\n 'mse': {'lgb_metric_name': 'mse',\r\n 'catboost_metric_name': 'MSE',\r\n 'sklearn_scoring_function': metrics.mean_squared_error}\r\n }\r\n\r\n result_dict = {}\r\n\r\n # out-of-fold predictions on train data\r\n oof = np.zeros(len(X))\r\n\r\n # averaged predictions on train data\r\n prediction = np.zeros(len(X_test))\r\n\r\n # list of scores on folds\r\n scores = []\r\n feature_importance = pd.DataFrame()\r\n\r\n # split and train on folds\r\n for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\r\n print(f'Fold {fold_n + 1} started at {time.ctime()}')\r\n if type(X) == np.ndarray:\r\n X_train, X_valid = X[columns][train_index], X[columns][valid_index]\r\n y_train, y_valid = y[train_index], y[valid_index]\r\n else:\r\n X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index]\r\n y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]\r\n\r\n if model_type == 'lgb':\r\n model = lgb.LGBMRegressor(**params, n_estimators=n_estimators, n_jobs=-1)\r\n model.fit(X_train, y_train,\r\n eval_set=[(X_train, y_train), (X_valid, y_valid)],\r\n eval_metric=metrics_dict[eval_metric]['lgb_metric_name'],\r\n verbose=verbose, early_stopping_rounds=early_stopping_rounds)\r\n\r\n y_pred_valid = model.predict(X_valid)\r\n y_pred = model.predict(X_test, num_iteration=model.best_iteration_)\r\n\r\n if model_type == 'xgb':\r\n train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns)\r\n valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns)\r\n\r\n watchlist = [(train_data, 'train'), (valid_data, 'valid_data')]\r\n model = xgb.train(dtrain=train_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200,\r\n verbose_eval=verbose, params=params)\r\n y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns),\r\n ntree_limit=model.best_ntree_limit)\r\n y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit)\r\n\r\n if model_type == 'sklearn':\r\n model = model\r\n model.fit(X_train, y_train)\r\n\r\n y_pred_valid = model.predict(X_valid).reshape(-1, )\r\n score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)\r\n print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.')\r\n print('')\r\n\r\n y_pred = model.predict(X_test).reshape(-1, )\r\n\r\n if model_type == 'cat':\r\n model = CatBoostRegressor(iterations=20000, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'],\r\n **params,\r\n loss_function=metrics_dict[eval_metric]['catboost_metric_name'])\r\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True,\r\n verbose=False)\r\n\r\n y_pred_valid = model.predict(X_valid)\r\n y_pred = model.predict(X_test)\r\n\r\n oof[valid_index] = y_pred_valid.reshape(-1, )\r\n if eval_metric != 'group_mae':\r\n scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid))\r\n else:\r\n scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid, X_valid['type']))\r\n\r\n prediction += y_pred\r\n\r\n if model_type == 'lgb' and plot_feature_importance:\r\n # feature importance\r\n fold_importance = pd.DataFrame()\r\n fold_importance[\"feature\"] = columns\r\n fold_importance[\"importance\"] = model.feature_importances_\r\n fold_importance[\"fold\"] = fold_n + 1\r\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\r\n\r\n prediction /= folds.n_splits\r\n\r\n print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))\r\n\r\n result_dict['oof'] = oof\r\n result_dict['prediction'] = prediction\r\n result_dict['scores'] = scores\r\n\r\n # if model_type == 'lgb':\r\n # if plot_feature_importance:\r\n # feature_importance[\"importance\"] /= folds.n_splits\r\n # cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\r\n # by=\"importance\", ascending=False)[:50].index\r\n #\r\n # best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\r\n #\r\n # plt.figure(figsize=(16, 12));\r\n # sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False));\r\n # plt.title('LGB Features (avg over folds)');\r\n #\r\n # result_dict['feature_importance'] = feature_importance\r\n\r\n return result_dict",
"def bagxgb_train(X_train, y_train, size=10, write=False):\n list_models = []\n #d_train = lgb.Dataset(X_train, label=y_train)\n with tqdm(total=size) as pbar:\n for nb in range(size):\n model = xgb.XGBClassifier(max_depth=7, min_child_weight=1, learning_rate=0.01, n_estimators=5000, gamma=0.8, subsample=0.95, colsample_bytree=0.6, reg_alpha=0.0025, objective='binary:logistic', nthread=4, scale_pos_weight=1,\n seed=nb+1)\n model.fit(X_train, y_train)\n list_models.append(model)\n pbar.update()\n return list_models",
"def gc_kfold_cv(data, group, begin, end):\n # Get group data\n data['group'] = group\n \n # Filter test and train based on begin and end\n test = data[data['group'].isin(range(begin, end))]\n train = data[~data['group'].isin(range(begin, end))]\n \n # Return train and test \n dfs = {} \n tsets = [train, test]\n \n # Combine train and test to return dfs\n for i, val in enumerate([1, 2]):\n dfs[val] = tsets[i]\n \n return dfs",
"def xgb_experiment(X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n dtrain = xgb.DMatrix(X_train, label=y_train)\n dtest = xgb.DMatrix(X_test, label=y_test)\n\n param = {'optimizer': 'dart', 'max_depth': 5, 'eta': 0.001,\n 'silent': 1, 'objective': 'multi:softmax', 'num_class': 10}\n watchlist = [(dtest, 'eval'), (dtrain, 'train')]\n num_round = 1000\n bst = xgb.train(param, dtrain, num_round, watchlist, verbose_eval=False)\n preds = bst.predict(dtest)\n labels = dtest.get_label()\n logging.info('error=%f' % (sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds))))\n return bst",
"def train_kfold(self, blank_model, model_params, dataset, batch_size, drop_last_batch=True):\n kfold = StratifiedKFold(n_splits=self.cv, shuffle=True)\n best_val_loss = 999999\n best_cv = 0\n best_model = None\n for i, (train_idx, test_idx) in enumerate(kfold.split(dataset, dataset.targets)):\n # Initialize model\n #model = copy.deepcopy(blank_model)\n model = blank_model(**model_params).to(model_params['device'])\n print(model.dev)\n #print(model_params['device'])\n print('Training fold %d'%(i), flush=True)\n train_ds = torch.utils.data.Subset(dataset, train_idx)\n test_ds = torch.utils.data.Subset(dataset, test_idx)\n train_loader = torch.utils.data.DataLoader(train_ds, batch_size=batch_size, shuffle=True, drop_last=drop_last_batch, num_workers=self.num_work)\n test_loader = torch.utils.data.DataLoader(test_ds, batch_size=batch_size, shuffle=True, drop_last=drop_last_batch, num_workers=self.num_work)\n # Train model\n epoch_hist = model.train_model(train_loader, test_loader=test_loader, n_epochs=self.n_epochs, learning_rate=self.lr, train_patience=self.train_p, test_patience=self.test_p, save_model=False)\n # Save attributes\n self.cv_res_dict[i]['history'] = epoch_hist\n if 'valid_loss' in epoch_hist.keys():\n self.cv_res_dict[i]['best_valid_loss'] = np.min(epoch_hist['valid_loss'])\n #if self.return_model:\n #self.cv_res_dict[i]['model'] = model\n if best_val_loss > np.min(epoch_hist['valid_loss']):\n best_val_loss = np.min(epoch_hist['valid_loss'])\n best_cv = i\n best_model = model\n # Save if save all\n if self.save_all:\n full_path = self.path_dir+self.model_prefix+'fold_'+str(i)+'.pt'\n print('Saving model at %s'%(full_path), flush=True)\n torch.save(model.state_dict(), full_path)\n\n # delete model from memory ?\n del model\n if model_params['device'] == torch.device('cuda'):\n torch.cuda.empty_cache()\n\n self.best_cv = best_cv\n print('Best Fold: %d'%(self.best_cv), flush=True)\n # Save best model to path\n if self.save_best and not self.save_all:\n path_best = self.path_dir+self.model_prefix+'best_fold.pt'\n print('Saving best model at %s'%(path_best), flush=True)\n torch.save(best_model.state_dict(), path_best)\n elif self.save_best and self.save_all:\n print('Best model already saved for fold %d'%(self.best_cv), flush=True)\n\n return",
"def xgb_scikit_random_train(train_X, train_Y, test_X, test_Y):\n\n x_train, x_val, y_train, y_val = train_test_split(train_X, train_Y, test_size=0.1)\n logger.info(f\"Train set size: {len(x_train)}, validation set(for early stopping) size: {len(x_val)}\")\n objective = 'binary:logistic'\n eval_metric = 'logloss'\n early_stopping_rounds = 7\n n_iter = 100 # number of iterations for RandomizedSearchCV\n param_dist = {\n 'n_estimators': stats.randint(100, 300), # default 100, try 100-300\n 'max_depth': stats.randint(5, 10), # default 6, try 5-10\n 'gamma': stats.uniform(0, 10), # default 0, try 0-10\n 'subsample': stats.uniform(0.8, 0.2), # default 1, try 0.8-1\n 'colsample_bytree': stats.uniform(0.7, 0.3), # default 1, try 0.7-1\n 'learning_rate': stats.loguniform(1e-3, 10), # default 0.3, try 0.001-10\n }\n clf = xgb.XGBClassifier(objective=objective, eval_metric=eval_metric, early_stopping_rounds=early_stopping_rounds)\n xgb_search = RandomizedSearchCV(clf, param_distributions=param_dist, n_iter=n_iter,\n return_train_score=True, n_jobs=-1, pre_dispatch=64)\n xgb_search.fit(x_train, y_train, eval_set=[(x_val, y_val)], verbose=False)\n model = xgb_search.best_estimator_\n test_score, train_score, val_score = report_model(model, test_X, test_Y, x_train, y_train, x_val, y_val)\n\n return model, f\"spread_{test_score}_XGB_{datetime.datetime.now():%Y%m%d_%H%M}\"",
"def k_fold_linear(data: pd.DataFrame, algorithm, folds: int = 5) -> (float, float):\n d = data.sample(frac=1)\n segments = np.array_split(d, folds)\n acc_test = []\n\n acc_train = []\n for i in range(folds):\n temp = segments.copy()\n\n test = temp.pop(i)\n train = pd.concat(temp)\n test_labels = list(test['Labels'])\n train_labels = list(train['Labels'])\n\n model = algorithm(train)\n test_predictions = [round(x, 1) for x in predict_linear_regression(test.drop(['Labels'], axis=1), model)]\n train_predictions = [round(x, 1) for x in predict_linear_regression(train.drop(['Labels'], axis=1), model)]\n\n Confusion_Matrix(test_predictions, test_labels)\n\n acc_test.append(accuracy(test_predictions, test_labels))\n acc_train.append(accuracy(train_predictions, train_labels))\n\n return avg(acc_train), avg(acc_test)",
"def train_xgb(X,y):\n\t\n\txgb_handle = xgb.XGBClassifier()\n\n\tone_to_left = st.beta(10, 1) \n\tfrom_zero_positive = st.expon(0, 50)\n\t\n\t#Define distributions to sample from for hyper parameter optimization\n\tparam_dist = { \n\t \"n_estimators\": st.randint(3, 40),\n\t \"max_depth\": st.randint(3, 40),\n\t \"learning_rate\": st.uniform(0.05, 0.4),\n\t \"colsample_bytree\": one_to_left,\n\t \"subsample\": one_to_left,\n\t \"gamma\": st.uniform(0, 10),\n\t \"reg_alpha\": from_zero_positive,\n\t \"min_child_weight\": from_zero_positive,\n\t}\n\n\tn_iter_search = 20\n\trandom_search = RandomizedSearchCV(xgb_handle, param_distributions=param_dist,\n\t n_iter=n_iter_search,verbose=10,scoring=\"roc_auc\",\n\t n_jobs=1,cv=5)\n\n\trandom_search_res_xgb = random_search.fit(X, y)\n\t\n\t#Get the best model that was retrained on all data\n\txgb_model = random_search_res_xgb.best_estimator_\n\n\treturn(xgb_model,random_search_res_xgb)",
"def train_xgb(params, X_train, y_train, cv, scorer='neg_mean_squared_error', seed=42):\n\n n_estimators = int(params[\"n_estimators\"])\n max_depth= int(params[\"max_depth\"])\n\n try:\n model = xgb.XGBRegressor(n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=params[\"learning_rate\"],\n subsample=params[\"subsample\"], \n seed=seed)\n\n \n #result = model.fit(X_train,\n # y_train.values.ravel(),\n # eval_set=[(X_train, y_train.values.ravel())],\n # early_stopping_rounds=50,\n # verbose=False)\n\n fit_params = {\n 'eval_set': [(X_train, y_train.values.ravel())],\n 'early_stopping_rounds': 50,\n 'verbose': False\n }\n\n return_estimator = False\n cv_score = cross_validate(\n model,\n X_train, y_train.values.ravel(),\n cv=cv,\n scoring=scorer,\n return_estimator=return_estimator,\n fit_params=fit_params\n )\n\n scores = np.abs(np.array(cv_score['test_score']))\n avg_score = np.mean(scores)\n return {\n \"loss\": avg_score,\n \"scores\": scores,\n \"status\": STATUS_OK,\n #\"models\": cv_score['estimator']\n }\n\n except ValueError as ex:\n return {\n \"error\": ex,\n \"status\": STATUS_FAIL\n }",
"def __init__(self, splits):\n\t\tself.kfold = KFold(splits)",
"def nested_cv(X, y, model, n_splits, n_folds, unique_id):\n \n cv = StratifiedKFold(n_splits=n_splits,\n shuffle=True,\n random_state=42) # Outer CV\n \n i_start = 0\n i_list = []\n results_df = None\n cv_path = unique_id + '_NestedCV.pkl'\n \n if os.path.isfile(cv_path) == True: # If CV is incomplete, resume\n results_df = pd.read_pickle(cv_path)\n i_start = results_df.Outer_fold.max() + 1\n print('Resuming cross-validation from fold ' + str(i_start + 1))\n \n # Generate indices to split data by StratifiedKFold\n # Append indices for each fold to list \n for tr_i, te_i in cv.split(X,y):\n i_list.append([tr_i, te_i])\n \n # For each fold...\n for i in range(i_start, len(i_list)):\n results_list = []\n print('Beginning fold ' + str(i+1) + ' of ' + str(len(i_list)))\n \n # Split data into training and test tests\n X_train = X.loc[X.index.intersection(i_list[i][0])]\n y_train = y.loc[y.index.intersection(i_list[i][0])]\n X_test = X.loc[X.index.intersection(i_list[i][1])]\n y_test = y.loc[y.index.intersection(i_list[i][1])]\n\n start = time.time()\n \n # Fit the HyperoptEstimator to training data (optimise model)\n model.fit(X_train,\n y_train,\n n_folds=n_folds, # Inner stratified k-fold CV\n cv_shuffle=True)\n \n end = time.time()\n duration = end - start\n\n # Use optimised model to predict labels for test data\n y_pred = model.predict(X_test)\n score = f1_score(y_test, y_pred, average='weighted') # Evaluate\n \n # Everything below: formats and/or calculates results for output file\n sorted_labels = np.sort(y_test.unique())\n unweighted_score = f1_score(y_test, y_pred,\n average=None,\n labels=sorted_labels)\n c_matrix = confusion_matrix(y_test, y_pred,\n labels=sorted_labels)\n\n for trial in range(len(model.trials.trials)):\n if model.trials.trials[trial].get('result').get('status') == 'ok':\n trial_loss = model.trials.trials[trial].get('result').get('loss')\n trial_duration = model.trials.trials[trial].get('result').get('duration')\n else:\n trial_loss = np.nan\n trial_duration = np.nan\n \n results_list.append([i,\n score,\n unweighted_score,\n le.inverse_transform(sorted_labels),\n c_matrix,\n duration,\n trial,\n trial_loss,\n trial_duration])\n \n append_df = pd.DataFrame(results_list,\n columns=['Outer_fold',\n 'Outer_score',\n 'Outer_unweighted_scores',\n 'Outer_unweighted_score_labels',\n 'Outer_confusion_matrix',\n 'Outer_training_duration',\n 'Trial',\n 'Trial_loss',\n 'Trial_duration'])\n if i == i_start:\n if results_df is not None:\n final_df = pd.concat([results_df,\n append_df],\n ignore_index=True)\n else:\n final_df = append_df\n final_df.to_pickle(cv_path)\n \n else:\n results_df = pd.read_pickle(cv_path)\n final_df = pd.concat([results_df,\n append_df],\n ignore_index=True)\n final_df.to_pickle(cv_path)",
"def crossValidationKfold(automodel, \r\n X, y,\r\n params_automl : dict = {},\r\n score_function = accuracy_score,\r\n cv : int = 3,\r\n shuffle: bool = True,\r\n verbose : bool = True,\r\n allmetrics: bool = False):\r\n if(isinstance(X, pd.DataFrame) or isinstance(y, pd.DataFrame)):\r\n X = X.values\r\n y = y.values\r\n skf = StratifiedKFold(n_splits = cv, \r\n shuffle = shuffle, \r\n random_state = 42)\r\n if(allmetrics):\r\n train_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n test_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n else:\r\n train_scores = np.empty((cv, ))\r\n test_scores = np.empty((cv, ))\r\n for idx, (idx_tr, idx_ts) in enumerate(skf.split(X, y)):\r\n X_tr, X_ts = X[idx_tr], X[idx_ts]\r\n y_tr, y_ts = y[idx_tr], y[idx_ts] \r\n am = automodel(**params_automl)\r\n am.fit(X_tr, y_tr)\r\n if(not allmetrics):\r\n \r\n train_scores[idx] = score_function(am.predict(X_tr), y_tr)\r\n test_scores[idx] = score_function(am.predict(X_ts), y_ts)\r\n if(verbose):\r\n print('it: {} train score: {:.3f}, val score: {:.3f}'.format(idx, \r\n train_scores[idx],\r\n test_scores[idx]))\r\n else:\r\n train_current = {}\r\n test_current = {}\r\n for name, metric in all_metrics_classifications.items():\r\n train_current[name] = metric(am.predict(X_tr), y_tr)\r\n test_current[name] = metric(am.predict(X_ts), y_ts)\r\n train_scores[name].append(train_current[name])\r\n test_scores[name].append(test_current[name])\r\n \r\n if(verbose):\r\n print('it: {} train scores: {}, val scores: {}'.format(idx, train_current,\r\n test_current))\r\n\r\n if(not allmetrics):\r\n return test_scores.mean(), test_scores.std()\r\n else:\r\n # -- calculate means of all metrics-- #\r\n return dict(map(lambda kv: (kv[0], np.asarray(kv[1]).mean()), test_scores.items()))",
"def cv_fit_xgb_model(model,\n X_train, y_train,\n X_valid, y_valid,\n cv_nfold=5,\n early_stopping_rounds=50,\n missing=np.nan,\n eval_metric='auc',\n scoring=None,\n verbose=True):\n # Train cv\n xgb_param = model.get_xgb_params()\n dtrain = xgb.DMatrix(X_train.values, label=y_train.values, missing=missing)\n cv_result = xgb.cv(\n xgb_param,\n dtrain,\n num_boost_round=model.get_params()['n_estimators'],\n nfold=cv_nfold,\n metrics=[eval_metric],\n early_stopping_rounds=early_stopping_rounds,\n show_progress=False)\n best_n_estimators = cv_result.shape[0]\n model.set_params(n_estimators=best_n_estimators)\n\n # Train model\n model.fit(X_train, y_train, eval_metric=eval_metric)\n\n scorer = get_scorer(scoring)\n # Predict and score training data\n train_score = scorer(model, X_train, y_train)\n # Predict and score validation data\n valid_score = scorer(model, X_valid, y_valid)\n\n # Print model report:\n if verbose:\n print(\"\\nModel Report\")\n print(\"best n_estimators: {}\".format(best_n_estimators))\n print(\"Score (Train): %f\" % train_score)\n print(\"Score (Validation) : %f\" % valid_score)\n\n return best_n_estimators, train_score, valid_score",
"def xgboost_cv(self, nsplits: int = 5) -> (float, float, float):\r\n x_train, x_test, y_train, y_test = train_test_split(self.x, self.y, test_size=0.2)\r\n params = {\r\n \"max_depth\": [2, 3, 5, 8],\r\n \"eta\": [0.01, 0.05, 0.1, 0.15, 0.2],\r\n \"objective\": ['binary:logistic'],\r\n \"sumsample\": [0.5, 0.7, 1],\r\n \"colsample_bytree\": [0.5, 0.7, 1],\r\n \"n_estimators\": [50, 100, 200, 500],\r\n }\r\n \"\"\"\r\n fit_params = {\r\n \"early_stopping_rounds\": 20,\r\n \"eval_metric\": \"error\",\r\n \"eval_set\": [(x_test, y_test)]\r\n }\r\n \"\"\"\r\n model = xgb.XGBClassifier()\r\n gridcv = GridSearchCV(model, params, cv=nsplits)\r\n gridcv.fit(x_train, y_train) # , **fit_params)\r\n best_params = gridcv.best_params_\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = xgb.XGBClassifier(**best_params).fit(x_train, y_train)\r\n \"\"\"\r\n x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2)\r\n model = xgb.XGBClassifier(**best_params).fit(x_t, y_t, eval_metric=\"error\", eval_set=[(x_v, y_v)],\r\n early_stopping_rounds=20)\r\n \"\"\"\r\n y_predict = model.predict(x_test)\r\n acc_result.append(binary_acc(y_test, y_predict))\r\n return np.mean(acc_result), np.std(acc_result), best_params",
"def __init__(self, splits):\n\t\tself.kfold = StratifiedKFold(splits)",
"def k_fold_FVGMM(data, wv_model, n_comp=15, k=10, reg=1):\n \n ## Prepare the corpus.\n tokenized_data_text = [data[k][0] for k in range(len(data))] # data\n \n # Initialize a GMM with K components.\n gmm_neu = mixture.GaussianMixture(n_components=n_comp, covariance_type='diag', \n max_iter=300, n_init=10, reg_covar=1e-05)\n \n # Fit the word embedding data with the GMM model.\n gmm_neu.fit(wv_model.vectors)\n \n ## Create train/test sets.\n data_tags = [data[k][1] for k in range(len(data))] # tags\n comb_data = list(zip(tokenized_data_text, data_tags))\n random.shuffle(comb_data)\n folds = chunks(comb_data, k)\n \n k_fold_acc = []\n \n for fold in folds:\n # Training data\n X_train = [fold[0][k][0] for k in range(len(fold[0]))] # text \n y_train = [fold[0][k][1] for k in range(len(fold[0]))] # labels\n \n # Test data\n X_test = [fold[1][k][0] for k in range(len(fold[1]))] # text \n y_test = [fold[1][k][1] for k in range(len(fold[1]))] # labels\n \n # Get sentence embedding by using the FVs.\n X_train_FV = [FV_GMM(BoWE_doc(wv_model, X_train[k]), gmm_neu) for k in range(len(X_train))]\n X_test_FV = [FV_GMM(BoWE_doc(wv_model, X_test[k]), gmm_neu) for k in range(len(X_test))]\n \n ## Logistic regression classifier.\n\n # Use the elements in train_vecs as feature vectors.\n logreg = linear_model.LogisticRegression(C=reg, n_jobs=1, solver='liblinear', multi_class='ovr')\n logreg = logreg.fit(X_train_FV, y_train)\n\n ## Evaluation.\n acc = evaluate_prediction(logreg, X_test_FV, y_test)\n k_fold_acc.append(acc)\n \n return k_fold_acc"
] | [
"0.73550516",
"0.6758229",
"0.6660438",
"0.6614328",
"0.6598201",
"0.621922",
"0.6148235",
"0.6131735",
"0.612636",
"0.6091015",
"0.60582244",
"0.60028034",
"0.59997773",
"0.5892357",
"0.58801293",
"0.58663964",
"0.5835386",
"0.58261806",
"0.5813785",
"0.5792207",
"0.5786084",
"0.57149756",
"0.5703422",
"0.57025105",
"0.5699269",
"0.5696957",
"0.56803894",
"0.5652597",
"0.5630858",
"0.5594309"
] | 0.79730666 | 0 |
Convert variable to integer or string depending on the case. | def to_int(variable):
try:
return int(variable)
except ValueError:
return variable | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_str(variable):\n try:\n int(variable)\n return str(variable)\n except ValueError:\n return variable",
"def convertInt(s):\n try:\n int(s)\n return \"INT\"\n except:\n return s",
"def _as_int(self, name):\n org_type = self._get_type(name)\n if org_type == 'int': return None\n valid = ['single']\n is_num_str = self.is_like_numeric(name) if org_type == 'string' else False\n is_all_ints = self._all_str_are_int(self._data[name])\n is_convertable = is_num_str and is_all_ints\n if not (org_type in valid or is_convertable):\n msg = 'Cannot convert variable {} of type {} to int!'\n raise TypeError(msg.format(name, org_type))\n if self._has_categorical_data(name):\n self._meta['columns'][name].pop('values')\n self._meta['columns'][name]['type'] = 'int'\n if org_type == 'string':\n if is_all_ints:\n self._data[name] = self._data[name].apply(lambda x: int(x))\n else:\n self._data[name] = self._data[name].apply(lambda x: float(x))\n return None",
"def makeinputstring(variabel):\r\n if type(variabel) == int:\r\n return str(variabel)\r\n elif type(variabel) == float:\r\n return str(int(float(variabel)))\r\n else:\r\n return str(variabel)",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def to_int(name, default=0):\n try:\n return int(get(name))\n except (TypeError, ValueError):\n return default",
"def Int(val):\n try:\n return int(val)\n except ValueError:\n return ''",
"def tryCastToInt(number):\n try:\n return int(number)\n except:\n print(\"Error! Impossible to parse this variable\")\n return 0",
"def check_type(var):\n if isinstance(var, float):\n return float(var)\n else:\n return str(var)",
"def cast(val, regs):\n try:\n return int(val)\n except ValueError as ve:\n return regs[val]",
"def try_to_convert(value):\n try:\n return int(value)\n except:\n return value",
"def process_int(integer: int) -> str:\n\n return str(integer) if integer else Presenter.DEFAULT",
"def as_int_or_float(val):\n if type(val) == str:\n ret_val = float(val) if '.' in val else int(val)\n return ret_val\n return val",
"def possible_int(arg):\n try:\n return int(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as an int, treating it as a string')\n return arg",
"def str2type(raw_val):\n try:\n return int(raw_val)\n except ValueError:\n pass\n\n try:\n return float(raw_val)\n except ValueError:\n return raw_val",
"def try_int_cast(value):\n try: \n return int(value)\n except:\n return value",
"def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n return super().value_from_str(s)",
"def try_to_convert (id):\n converted = id\n try:\n converted = int(id)\n except ValueError:\n pass\n return converted",
"def convert(s):\n\n try:\n\n return int(s)\n except (ValueError, TypeError) as e:\n print(\"conversion error {}\".format(str(e)), file=sys.stderr)\n pass\n return -1",
"def to_int(param, in_str):\n try:\n return int(in_str)\n except ValueError:\n return exit_msg(f\"Bad Request: Wrong type, expected 'int' for parameter '{param}'\")",
"def str_to_int(inp, default=None):\n try:\n return int(inp)\n except ValueError:\n return default",
"def to_int(value):\n\n if isinstance(value, int):\n return value\n\n elif isinstance(value, string_types):\n return int(value) if value.isdigit() else None",
"def to_int(str_val: str) -> int:\n\n return int(str_val) if is_int(str_val) else None",
"def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n try:\n return bool(s)\n except ValueError:\n return self.default",
"def try_int(data):\n try:\n return int(data)\n except (ValueError, TypeError ):\n return data",
"def cast_int(v):\n try:\n return int(v)\n except ValueError:\n return v"
] | [
"0.7063601",
"0.6914241",
"0.6573752",
"0.641254",
"0.6336709",
"0.6336709",
"0.6336709",
"0.6336709",
"0.6336709",
"0.6302409",
"0.6298774",
"0.62717533",
"0.6248804",
"0.6206999",
"0.6136418",
"0.6114751",
"0.61015326",
"0.6098964",
"0.6075144",
"0.6054744",
"0.60094166",
"0.6009355",
"0.5998343",
"0.59892535",
"0.59710026",
"0.59327817",
"0.59311366",
"0.589952",
"0.588663",
"0.58562416"
] | 0.71815616 | 0 |
Convert variable to integer or string depending on the case. | def to_str(variable):
try:
int(variable)
return str(variable)
except ValueError:
return variable | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_int(variable):\n try:\n return int(variable)\n except ValueError:\n return variable",
"def convertInt(s):\n try:\n int(s)\n return \"INT\"\n except:\n return s",
"def _as_int(self, name):\n org_type = self._get_type(name)\n if org_type == 'int': return None\n valid = ['single']\n is_num_str = self.is_like_numeric(name) if org_type == 'string' else False\n is_all_ints = self._all_str_are_int(self._data[name])\n is_convertable = is_num_str and is_all_ints\n if not (org_type in valid or is_convertable):\n msg = 'Cannot convert variable {} of type {} to int!'\n raise TypeError(msg.format(name, org_type))\n if self._has_categorical_data(name):\n self._meta['columns'][name].pop('values')\n self._meta['columns'][name]['type'] = 'int'\n if org_type == 'string':\n if is_all_ints:\n self._data[name] = self._data[name].apply(lambda x: int(x))\n else:\n self._data[name] = self._data[name].apply(lambda x: float(x))\n return None",
"def makeinputstring(variabel):\r\n if type(variabel) == int:\r\n return str(variabel)\r\n elif type(variabel) == float:\r\n return str(int(float(variabel)))\r\n else:\r\n return str(variabel)",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def int_or_str(text):\n try:\n return int(text)\n except ValueError:\n return text",
"def to_int(name, default=0):\n try:\n return int(get(name))\n except (TypeError, ValueError):\n return default",
"def Int(val):\n try:\n return int(val)\n except ValueError:\n return ''",
"def tryCastToInt(number):\n try:\n return int(number)\n except:\n print(\"Error! Impossible to parse this variable\")\n return 0",
"def check_type(var):\n if isinstance(var, float):\n return float(var)\n else:\n return str(var)",
"def cast(val, regs):\n try:\n return int(val)\n except ValueError as ve:\n return regs[val]",
"def try_to_convert(value):\n try:\n return int(value)\n except:\n return value",
"def process_int(integer: int) -> str:\n\n return str(integer) if integer else Presenter.DEFAULT",
"def as_int_or_float(val):\n if type(val) == str:\n ret_val = float(val) if '.' in val else int(val)\n return ret_val\n return val",
"def possible_int(arg):\n try:\n return int(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as an int, treating it as a string')\n return arg",
"def str2type(raw_val):\n try:\n return int(raw_val)\n except ValueError:\n pass\n\n try:\n return float(raw_val)\n except ValueError:\n return raw_val",
"def try_int_cast(value):\n try: \n return int(value)\n except:\n return value",
"def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n return super().value_from_str(s)",
"def try_to_convert (id):\n converted = id\n try:\n converted = int(id)\n except ValueError:\n pass\n return converted",
"def convert(s):\n\n try:\n\n return int(s)\n except (ValueError, TypeError) as e:\n print(\"conversion error {}\".format(str(e)), file=sys.stderr)\n pass\n return -1",
"def to_int(param, in_str):\n try:\n return int(in_str)\n except ValueError:\n return exit_msg(f\"Bad Request: Wrong type, expected 'int' for parameter '{param}'\")",
"def str_to_int(inp, default=None):\n try:\n return int(inp)\n except ValueError:\n return default",
"def to_int(value):\n\n if isinstance(value, int):\n return value\n\n elif isinstance(value, string_types):\n return int(value) if value.isdigit() else None",
"def to_int(str_val: str) -> int:\n\n return int(str_val) if is_int(str_val) else None",
"def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n try:\n return bool(s)\n except ValueError:\n return self.default",
"def try_int(data):\n try:\n return int(data)\n except (ValueError, TypeError ):\n return data",
"def cast_int(v):\n try:\n return int(v)\n except ValueError:\n return v"
] | [
"0.7184124",
"0.6915447",
"0.6576159",
"0.6408534",
"0.6338109",
"0.6338109",
"0.6338109",
"0.6338109",
"0.6338109",
"0.6305314",
"0.6301793",
"0.6274125",
"0.62451327",
"0.62085027",
"0.6137398",
"0.6113615",
"0.61019415",
"0.61016726",
"0.60746306",
"0.6056998",
"0.60118085",
"0.60095185",
"0.6000852",
"0.59933335",
"0.5974142",
"0.5936906",
"0.59352916",
"0.5899698",
"0.5889349",
"0.58584917"
] | 0.7059871 | 1 |
Checks if jsonstat version attribute exists and is equal or greater \ than 2.0 for a given dataset. | def check_version_2(dataset):
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_schema_version(context, version):\n data = context.response.json()\n check_and_get_attribute(data, version)",
"def check_version(self, node):\n assert \"version\" in node, \"Version node does not contain attribute 'version'\"\n assert len(node[\"version\"]) >= 1, \"Expecting at least one 'version' value\"\n # TODO: add more thorough checks",
"def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6",
"def check_recommended_versions_result(context, version):\n json_data = context.response.json()\n result = json_data[\"recommended_versions\"]\n assert result == version, \"different version found {} != {}\".format(version, result)",
"def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()",
"def test_schema_version(self):\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )",
"def test_package_version(self, package, version):\n with self._conn.begin():\n return bool(self._conn.scalar(\n \"VALUES (test_package_version(%s, %s))\", (package, version)\n ))",
"def test_status(self):\n status_resp = http.get(urljoin(self.uri, '/api/status'))\n for k in status_resp.json().keys():\n if k.endswith('_version'):\n self.assertEqual(status_resp[k].count('.'), 2)",
"def test_above_24_latest_version(self):\n self.data['version'] = ''\n self.data['appVersion'] = '28.0'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20202020.01') > -1",
"def test_package_version():\n coverage_version = package_version('coverage')\n pytest_version = package_version('pytest')\n\n assert coverage_version is not None\n assert coverage_version < (1000, 0, 0)\n assert pytest_version is not None\n assert pytest_version > (5, 0)",
"def is_valid_version(self):\n pass",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def test_minor_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.minor = 33\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def check_image_version(duthost):\n skip_version(duthost, [\"201811\", \"201911\", \"202012\"])",
"def check_conventions_version_number(ds, attr, conv_type, min_ver, max_ver):\n\n if attr not in ds.ncattrs():\n return 0\n global_attr = getattr(ds, attr)\n\n version = None\n global_attr_split = global_attr.split(' ')\n for conv in global_attr_split:\n if conv_type in conv:\n version = float(re.findall(r\"[+]?\\d*\\.\\d+|\\d+\", conv)[0])\n\n if not version:\n return 1\n\n range_check = None\n if conv_type == 'CF':\n range_check = min_ver <= version <= max_ver\n elif conv_type == 'ATMODAT':\n range_check = (version == min_ver) and (version == max_ver)\n\n if not range_check:\n return 2\n else:\n return 3",
"def check_versions(context, num=0, versions='', ecosystem='', package=''):\n versions = split_comma_separated_list(versions)\n vrsns = context.response.json()['items']\n assert len(vrsns) == num\n for v in vrsns:\n assert v['ecosystem'] == ecosystem\n assert v['package'] == package\n assert v['version'] in versions",
"def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")",
"def test1_version(self):\n lVersion = rdbhdb.__version__.split('.')\n nVersion = need_version.split('.')\n self.assert_(lVersion >= nVersion, rdbhdb.__version__)",
"def version_check():\n try:\n with open('git.json', 'r') as fp:\n git_md = json.loads(fp.read())\n except IOError:\n # In the event that there is no git metadata, just print null values\n # twice.\n print \"null\"\n print \"null\"\n return\n\n if git_md['GitHub']:\n if git_md['GitHubUser'] is not None and git_md[\n 'GitHubRepo'] is not None:\n latest_release = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/releases/latest\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n latest_tag = latest_release['tag_name']\n\n # Go through all of the tags to see if this commit matches a tag.\n tags = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/git/refs/tags\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n\n current_tag = \"Unreleased\"\n for tag in tags:\n if tag['object']['sha'] == git_md['GitSHA']:\n current_tag = tag['ref'].split('/')[-1]\n\n print current_tag\n print latest_tag\n else:\n print \"MissingGitHubDetails\"\n print \"MissingGitHubDetails\"\n else:\n # In the event that there is a git file, but it doesn't indicate GitHub\n # then just print some stuff indicating that.\n print \"NonGitHub\"\n print \"NonGitHub\"",
"def check_image_version(duthost):\n pytest_require(parse_version(duthost.kernel_version) > parse_version(\"4.9.0\"),\n \"Test was not supported for 201911 and older image versions!\")",
"def is_version_data_existed(self):\n # if exists, skip\n # return \n\n return True",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def _is_valid_result(result):\n return result.get(\"version\", \"\") != \"\"",
"def test_version_exists():\n assert ztm.__version__",
"def test_drycc_version_header_good(self):\n response = self.client.get('/v2/apps')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.has_header('DRYCC_API_VERSION'), True)\n self.assertEqual(response['DRYCC_API_VERSION'], __version__.rsplit('.', 1)[0])",
"def test_version(server):\n\n assert isinstance(server.version(), six.string_types)",
"def check_schema_existence(context, schema, version, selector=None):\n data = context.response.json()\n if selector is not None:\n api_schemas = check_and_get_attribute(data, selector)\n schema = check_and_get_attribute(api_schemas, schema)\n else:\n schema = check_and_get_attribute(data, schema)\n check_and_get_attribute(schema, version)",
"def version(self):\n r = requests.get(\"http://%s/api/version\" %(self.url), headers=self.headers)\n if r.status_code == 200:\n return True, r.content\n else:\n return False, {}",
"def compatible_version(self):\n\n cursor = self.disk_connection.cursor()\n try:\n row = cursor.execute(\"\"\"\n SELECT COUNT(schema_version_hash) FROM version WHERE schema_version_hash=(?);\n \"\"\", (self.schema_version_hash,)).fetchone()\n return row[0] > 0\n except sqlite3.Error: # pylint: disable=broad-except\n return False",
"def check_package(package_info, server, inventory_entry, inventory_attributes):\n for package_ent, pack_ent_attributes in package_info:\n package_name = \n package_version = pack_ent_attributes['DAMswPackageVersion'][0]\n \n if StrictVersion(package_version) >= from_version and StrictVersion(package_version) <= to_version:\n return True"
] | [
"0.6801831",
"0.59882814",
"0.58080256",
"0.56571555",
"0.56525904",
"0.56482166",
"0.5614213",
"0.5611383",
"0.5608659",
"0.55231154",
"0.55203396",
"0.55158633",
"0.5412891",
"0.54117423",
"0.5407262",
"0.5394365",
"0.5364731",
"0.53334713",
"0.5327647",
"0.5326494",
"0.5310656",
"0.5293525",
"0.52834296",
"0.52786267",
"0.52719426",
"0.5271164",
"0.5256251",
"0.52517456",
"0.5245582",
"0.5239222"
] | 0.7722066 | 0 |
Unnest collection structure extracting all its datasets and converting \ them to Pandas Dataframes. | def unnest_collection(collection, df_list):
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_data(self):\n structure_data = self.parse_root(self.root)\n\n dict_data = {}\n for d in structure_data:\n dict_data = {**dict_data, **d}\n df = pd.DataFrame(data=list(dict_data.values()), index=dict_data.keys()).T\n\n return df",
"def __object_demapper(self, data: list) -> pd.DataFrame:\n data = pd.DataFrame.from_records([s.to_dict() for s in data])\n\n return data",
"def _gather_deep_data(self):\n\n cleaned_data_from_website = list()\n\n for i, search_result in self.data_from_website.iterrows():\n cleaned_data_from_website.append(self._deep_data(search_result.url))\n\n cleaned_data_from_website = pd.DataFrame(cleaned_data_from_website)\n if len(cleaned_data_from_website) == 0:\n cleaned_data_from_website['@id'] = '0'\n cleaned_data_from_website.set_index('@id', inplace=True)\n self.data_from_website = cleaned_data_from_website",
"def extract_data():\n client = MongoClient(HOST, PORT)\n collection = client[DB][COLLECTION]\n df = pd.DataFrame(collection.find().limit(10))\n return df",
"def from_dict(data):\n dfs = []\n decomposer = SOAPDecomposer(**data[\"decomposer\"])\n for dfdata in data[\"dfs\"]:\n dfs.append(DF.from_dict(dfdata, decomposer, data[\"x\"]))\n\n if len(dfs) > 0:\n dtype = dfs[0].dtype\n else:# pragma: no cover\n #We default to radial distributions.\n dtype == \"R\"\n\n if dtype == \"R\":\n result = RDFCollection(dfs, data[\"counts\"])\n else:\n result = ADFCollection(dfs, data[\"counts\"])\n result.label = data[\"label\"]\n result.tags = data[\"tags\"]\n return result",
"def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)",
"def from_multi(eltype, data, kdims, vdims):\n from spatialpandas import GeoDataFrame\n\n xname, yname = (kd.name for kd in kdims[:2])\n\n new_data, types, geom_types = [], [], []\n for d in data:\n types.append(type(d))\n new_dict = to_geom_dict(eltype, d, kdims, vdims, SpatialPandasInterface)\n if 'geom_type' in new_dict and new_dict['geom_type'] not in geom_types:\n geom_types.append(new_dict['geom_type'])\n new_data.append(new_dict)\n if not isinstance(new_data[-1], dict):\n types[-1] = type(new_data[-1])\n if len(set(types)) > 1:\n raise DataError('Mixed types not supported')\n if new_data and types[0] is GeoDataFrame:\n data = pd.concat(new_data)\n else:\n columns = [d.name for d in kdims+vdims if d not in (xname, yname)]\n if len(geom_types) == 1:\n geom = geom_types[0]\n else:\n geom = SpatialPandasInterface.geom_type(eltype)\n data = to_spatialpandas(new_data, xname, yname, columns, geom)\n return data",
"def parser(self) -> 'Builder':\n\n # loop over datasets\n df_parts = []\n entries = defaultdict(dict)\n # for dataset in input_yaml.keys():\n for dataset in self.input_yaml['catalog']:\n # get a list of keys that are common to all files in the dataset\n entries['global'] = {}\n for g in dataset.keys():\n if 'data_sources' not in g and 'ensemble' not in g:\n entries['global'][g] = dataset[g]\n # loop over ensemble members, if they exist\n if 'ensemble' in dataset.keys():\n for member in dataset['ensemble']:\n glob_string = member.pop('glob_string')\n filelist = glob.glob(glob_string)\n for f in filelist:\n entries[f].update(member)\n # loop over all of the data_sources for the dataset, create a dataframe\n # for each data_source, append that dataframe to a list that will contain\n # the full dataframe (or catalog) based on everything in the yaml file.\n for stream_info in dataset['data_sources']:\n filelist = glob.glob(stream_info['glob_string'])\n stream_info.pop('glob_string')\n for f in filelist:\n entries[f].update(stream_info)\n\n partial_parser_netcdf = functools.partial(self._parser_netcdf, local_attrs=entries)\n self.builder = Builder(None, parser=partial_parser_netcdf, lazy=False)\n self.builder.filelist = [x for x in entries.keys() if x != 'global']\n df_parts.append(\n self.builder.build('path', 'variable')\n .df.set_index('path')\n .apply(lambda x: x.apply(pd.Series).stack())\n .reset_index()\n .drop('level_1', 1)\n )\n # create the combined dataframe from all of the data_sources and datasets from\n # the yaml file\n df = pd.concat(df_parts, sort=False)\n\n self.builder.df = df.sort_values(by=['path'])\n return self.builder",
"def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)",
"def from_pandas(\n dfs: Union[\"pandas.DataFrame\", List[\"pandas.DataFrame\"]]\n) -> Dataset[ArrowRow]:\n import pandas as pd\n\n if isinstance(dfs, pd.DataFrame):\n dfs = [dfs]\n return from_pandas_refs([ray.put(df) for df in dfs])",
"def parse_jsons(self):\n # store all data in a pandas DataFrame\n pandas_df = pandas.DataFrame(self.__input_jsons)\n return pandas_df",
"def get_data(paths, df_names, categorical_feats, groupby=None, exclude_classes=[], rel_cols=None, sep=\",\"):\n\n def _load_data(path, sep=sep):\n \"\"\"small function to load according to the dataformat. (excel or csv)\"\"\"\n filename, file_extension = os.path.splitext(path)\n\n if file_extension in [\".csv\", \".tsv\"]:\n df = pd.read_csv(path, index_col=0, sep=sep)\n else:\n df = pd.read_excel(path, index_col=0)\n\n return df\n\n # initialize list to store dataframes in\n dfs = []\n\n # Handle single path input\n if groupby and (len(paths) == 1 or isinstance(paths, str)):\n\n # load data depending on if the single path is given in a list of as string\n if isinstance(paths, str):\n data = _load_data(paths, sep)\n elif isinstance(paths, list):\n data = _load_data(*paths, sep)\n else:\n raise ValueError(\"It seems like the input was a single path. Please input path as string or inside a list.\")\n\n grouping = data.groupby(groupby)\n\n # split dataframe groups and create a list with all dataframes\n for name, grp in grouping:\n # skip class if it should be excluded\n if name in exclude_classes:\n continue\n\n df = grouping.get_group(name)[::]\n\n # consider all columns as relevant is no rel_cols given.\n if rel_cols is None:\n rel_cols = list(df)\n\n # consider the relevant columns\n dfs.append(df[rel_cols])\n\n # Handle multiple paths input\n elif len(paths) > 1:\n for path in paths:\n df = _load_data(path)\n dfs.append(df)\n\n return DataCollection(dfs, df_names, categorical_feats)",
"def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df",
"def flatten(self):\n return DataArray([s for s in self.unstructured()])",
"def _get_data_as_flattened_dataframe(self, json_lines):\n if isinstance(json_lines, pd.DataFrame):\n return json_lines\n payload_data = None\n if isinstance(json_lines, dict):\n # Glean Payload Data\n found_payload_key = None\n payloads = {}\n for payload_key in self._payload_keys:\n if payload_key in json_lines.keys():\n payload_data = json_lines[payload_key]\n if isinstance(payload_data, dict):\n payload_data = self._find_data(payload_data)\n payload_data = self._coalesce_dicts(payload_data)\n payload_data, original_df_dtypes = data_utils.json_to_dataframe(\n json_lines=payload_data,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n for column in payload_data.columns:\n payload_data.rename(\n columns={column: payload_key + self._key_separator + str(column)},\n inplace=True)\n payloads[payload_key] = payload_data\n \n max_payload_length = 0\n for payload in payloads:\n if len(payloads[payload]) > max_payload_length:\n payload_data = payloads[payload]\n max_payload_length = len(payloads[payload])\n found_payload_key = payload\n \n # Get the non-payload data\n flattened_json = []\n for key in json_lines:\n if key != found_payload_key:\n flattened_json = flattened_json + self._find_data(json_lines[key], path=key)\n\n # Coalesce the data together\n json_lines = self._coalesce_dicts(flattened_json)\n\n data, original_df_dtypes = data_utils.json_to_dataframe(\n json_lines=json_lines,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n self._original_df_dtypes = original_df_dtypes\n\n if payload_data is not None:\n self._metadata = data\n data = payload_data\n\n return data",
"def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=5, exog_idx=[10, 2, 6, 7, 8])",
"def arts_to_dataframe( arts, photos_df=None ):\n\n # ordered list of types of art. these will be categories in our\n # DataFrame.\n art_types = [\"tag\",\n \"sticker\",\n \"stencil\",\n \"text\",\n \"other\",\n \"throwup\",\n \"piece\",\n \"mural\"]\n\n # ordered list of sizes of art. these will be categories in our\n # DataFrame.\n art_sizes = [\"tiny\",\n \"small\",\n \"medium\",\n \"large\",\n \"huge\"]\n\n # ordered list of qualities of art. these will be categories in our\n # DataFrame.\n art_qualities = [\"bad\",\n \"poor\",\n \"fair\",\n \"good\",\n \"excellent\"]\n\n # ordered list of processing states of art. these will be categories in\n # our DataFrame.\n art_states = [\"unreviewed\",\n \"needs_review\",\n \"reviewed\"]\n\n # columns in the constructed DataFrame.\n art_columns = [\"id\",\n \"photo_id\",\n \"type\",\n \"size\",\n \"quality\",\n \"state\",\n \"region\",\n \"tags\",\n \"created_time\",\n \"modified_time\",\n \"artists\",\n \"associates\",\n \"vandals\",\n \"photo_series\",\n \"record\"]\n\n # create a list of tuples containing the contents of the PhotoRecords.\n art_tuples = []\n for art in arts:\n # we can't provide Series information if we weren't handed a DataFrame.\n if photos_df is None:\n photo_series = None\n else:\n photo_series = photos_df.loc[art[\"photo_id\"]]\n\n art_tuples.append( (art[\"id\"],\n art[\"photo_id\"],\n art[\"type\"],\n art[\"size\"],\n art[\"quality\"],\n art[\"state\"],\n art[\"region\"],\n art[\"tags\"],\n art[\"created_time\"],\n art[\"modified_time\"],\n art[\"artists\"],\n art[\"associates\"],\n art[\"vandals\"],\n photo_series,\n art) )\n\n # create our base DataFrame that we'll doctor up.\n arts_df = pd.DataFrame.from_records( art_tuples,\n index=\"id\",\n columns=art_columns )\n\n # convert several columns to ordered categorical data so that we can rely\n # on its structure during analysis.\n arts_df[\"type\"] = pd.Categorical( arts_df[\"type\"],\n categories=art_types,\n ordered=True )\n arts_df[\"size\"] = pd.Categorical( arts_df[\"size\"],\n categories=art_sizes,\n ordered=True )\n arts_df[\"quality\"] = pd.Categorical( arts_df[\"quality\"],\n categories=art_qualities,\n ordered=True )\n arts_df[\"state\"] = pd.Categorical( arts_df[\"state\"],\n categories=art_states,\n ordered=True )\n\n return arts_df",
"def data_pandas(detections):\n return DataWrapperPandas(detections, duplicates_radius=1)",
"def df(self) -> \"pandas.DataFrame\":\n titles = []\n comments = []\n alternative_codes = []\n children = []\n for cat in self.values():\n titles.append(cat.title)\n comments.append(cat.comment)\n alternative_codes.append(cat.codes[1:])\n children.append(\n tuple(tuple(sorted(c.codes[0] for c in cs)) for cs in cat.children)\n )\n return pandas.DataFrame(\n index=self.keys(),\n data={\n \"title\": titles,\n \"comment\": comments,\n \"alternative_codes\": alternative_codes,\n \"children\": children,\n },\n )",
"def DataFrame(dat):\n keys = dat.keys()\n l = []\n for key in keys:\n v = dat[key]\n assert type(v) is np.ndarray and v.ndim <= 2 and v.ndim >= 1, \\\n '%s must be np.ndarray with 1 <= ndim <= 2 !' % key\n\n if v.ndim == 1:\n ix = pd.MultiIndex.from_product([[key]] + [[0]])\n l.append(pd.DataFrame(v[:,np.newaxis], columns=ix))\n else:\n ix = pd.MultiIndex.from_product([[key]] + [\n np.arange(s) for s in v.shape[1:]\n ])\n l.append(pd.DataFrame(v, columns=ix))\n return pd.concat(l, axis=1)",
"def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)",
"def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)",
"def load(datasets, treemakers='Basics', force_reload=False):\n if isinstance(datasets, str):\n datasets = [datasets]\n if isinstance(treemakers, (type, str)):\n treemakers = [treemakers]\n\n combined_dataframes = []\n\n for treemaker in treemakers:\n\n dataframes = []\n for dataset in datasets:\n minitree_path = get(dataset, treemaker, force_reload=force_reload)\n new_df = pd.DataFrame.from_records(root_numpy.root2array(minitree_path).view(np.recarray)) \n dataframes.append(new_df)\n\n # Concatenate mini-trees of this type for all datasets\n combined_dataframes.append(pd.concat(dataframes))\n\n # Concatenate mini-trees of all types\n if not len(combined_dataframes):\n raise RuntimeError(\"No data was extracted? What's going on??\")\n return pd.concat(combined_dataframes, axis=1)",
"def make_exploded_df(dfs_dict: Dict[str, pd.DataFrame], drop_original: bool = True,\n row_explode_by: str = None,\n col_explode_by: str = None, keep_all_cols: bool = False,\n ) -> Tuple[pd.DataFrame, list, list]:\n\n body = dfs_dict[\"body_cells\"]\n if col_explode_by is None:\n if (not dfs_dict[\"col_headers\"] is None) and len(dfs_dict[\"col_headers\"]) != 0:\n col_explode_by = \"title\"\n else:\n col_explode_by = \"index\"\n\n if row_explode_by is None:\n if (not dfs_dict[\"row_headers\"] is None) and len(dfs_dict[\"row_headers\"]) != 0:\n row_explode_by = \"title\"\n else:\n row_explode_by = \"index\"\n\n if col_explode_by == \"title\":\n exploded, col_header_names = _horiz_explode(body, \"column_header_texts\", drop_original=drop_original)\n elif col_explode_by == \"title_id\":\n # prevent from crashing if no column headers exist\n if (dfs_dict[\"col_headers\"] is None) or len(dfs_dict[\"col_headers\"]) == 0:\n exploded, col_header_names = _explode_indexes(body, \"column\", drop_original=drop_original)\n else:\n exploded, col_header_names = _horiz_explode(body, \"column_header_ids\", drop_original=drop_original)\n elif col_explode_by == \"index\":\n exploded, col_header_names = _explode_indexes(body, \"column\", drop_original=drop_original)\n elif col_explode_by == \"concat\":\n if (dfs_dict[\"col_headers\"] is None) or len(dfs_dict[\"col_headers\"]) == 0:\n exploded, col_header_names = _explode_indexes(body, \"column\", drop_original=drop_original)\n else:\n exploded, col_header_names = _explode_by_concat(body, \"column_header_texts\")\n\n else:\n exploded = body\n col_header_names = []\n\n if row_explode_by == \"title\":\n exploded, row_header_names = _horiz_explode(exploded, \"row_header_texts\", drop_original=drop_original)\n elif row_explode_by == \"title_id\":\n # prevent from crashing if no column headers exist\n if (dfs_dict[\"row_headers\"] is None) or len(dfs_dict[\"row_headers\"]) == 0:\n exploded, row_header_names = _explode_indexes(exploded, \"row\", drop_original=drop_original)\n else:\n exploded, row_header_names = _horiz_explode(exploded, \"row_header_ids\", drop_original=drop_original)\n elif row_explode_by == \"index\":\n exploded, row_header_names = _explode_indexes(exploded, \"row\", drop_original=drop_original)\n elif row_explode_by == \"concat\":\n if (dfs_dict[\"row_headers\"] is None) or len(dfs_dict[\"row_headers\"]) == 0:\n exploded, row_header_names = _explode_indexes(exploded, \"row\", drop_original=drop_original)\n else:\n exploded, row_header_names = _explode_by_concat(exploded, \"row_header_texts\")\n\n\n else:\n exploded = exploded\n row_header_names = []\n\n if drop_original and not keep_all_cols:\n cols_to_keep = [\"text\"] + row_header_names + col_header_names + [\"attributes.type\"]\n exploded = exploded[cols_to_keep]\n\n return exploded, row_header_names, col_header_names",
"def _datasets(self):\n return self._flat_data._datasets",
"def dataset_grabber(sess, link):\n json_dict = sess.get(link).json()\n if '.geojson' in link:\n dataset = gpd.GeoDataFrame.from_features(json_dict['features'])\n else:\n dataset = pd.DataFrame(json_dict)\n return dataset",
"def to_dataframe(self, attrs_as_columns=False):\n\n # Set up empty dict for dataframe\n ds = {}\n\n # Add every key containing a list into the dict\n keys = [k for k in self.dict.keys()]\n for key in keys:\n if isinstance(self.dict[key], list):\n ds[key] = self.dict[key]\n else:\n if attrs_as_columns:\n ds[key] = self.dict[key]\n\n # Convert entire dict to a DataFrame\n ds = pd.DataFrame(ds)\n\n # Return dataset\n return ds",
"def return_data_as_pandas_df(self):\n if not self.response:\n return None\n\n data = self.response['data'][self.data_type.value]\n\n # flatten data dictionary by joining property and subproperty names\n data_flat = {}\n for i, entry in enumerate(data):\n id = self.id[i]\n curr_dict = {}\n for key, values in entry.items():\n if isinstance(values, list):\n v = values[0]\n else:\n v = values\n if isinstance(v, str):\n new_key = f\"{key}\"\n curr_dict[new_key] = v\n else:\n for subprop, val in v.items():\n new_key = f\"{key}.{subprop}\"\n curr_dict[new_key] = val\n data_flat[id] = curr_dict\n\n return pd.DataFrame.from_dict(data_flat, orient='index')",
"def read(self, collection_name):\n return pd.DataFrame.from_dict(list(self.database[collection_name].find()))",
"def prepare_dataset() -> Tuple[pd.DataFrame, Dict]:\n\n data_dir = Path.cwd()/\"freiburg_grocery_images\"\n labels = [directory.name for directory in data_dir.iterdir()]\n label_map = {label: i for i, label in enumerate(labels)}\n\n all_items = [str(file) for label in labels for file in (data_dir/label).iterdir()]\n labels_of_items = [label for label in labels for file in (data_dir/label).iterdir()]\n\n df = pd.DataFrame({\"Image\": all_items, \"Label\": labels_of_items})\n return df, label_map"
] | [
"0.6218987",
"0.577961",
"0.56963843",
"0.5687765",
"0.55922854",
"0.5588951",
"0.55812603",
"0.55670446",
"0.5566674",
"0.5553015",
"0.54693437",
"0.546879",
"0.5448559",
"0.5372168",
"0.53552485",
"0.5334211",
"0.53155845",
"0.53145075",
"0.5292812",
"0.5292152",
"0.5270108",
"0.5270108",
"0.526779",
"0.52508944",
"0.52210724",
"0.521473",
"0.5212593",
"0.520761",
"0.5207169",
"0.52050066"
] | 0.7781752 | 0 |
Get label from a given dimension. | def get_dim_label(js_dict, dim, input="dataset"):
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getAxisLabel(self, dim=0):\n return self.__axis_labels__[dim]",
"def test_get_dim_label_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['label'] == 'Unemployment rate')",
"def get_label(self, label):\n\n return torch.from_numpy(np.array(label)).long()",
"def fromLabel(name):\n return Data.labels.index(name)",
"def get_label_name(label):\n\tindex = np.argmax(label)\n\tlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\treturn labels[int(index)]",
"def get_label(self, name):\n label_list = self.wls_board.get_labels()\n for label in label_list:\n if name in label.name: \n return label",
"def label_to_name(self, label):\n return self.labels[label]",
"def get_matching_dimname(self, dimname):\n return dimname",
"def _get_label(self):\n return self.label",
"def get_label(self):\n return self.job[self.label_key]",
"def label_to_name(self, label):\n\t\treturn self.labels[label]",
"def label_to_name(self, label):\n\t\t\treturn self.labels[label]",
"def get_label(self, key):\n return self.labels.get(key, None)",
"def get_label(id):\n return if_found(dao.get_label(id))",
"def get_label(self):\n oshape = (ctypes.c_uint * 2)()\n ostride = ctypes.c_uint()\n ret = cxnlib.CXNIOGetLabel(self.handle,\n oshape, ctypes.byref(ostride))\n return ctypes2numpyT(ret, [x for x in oshape], 'float32', ostride.value)",
"def test_get_dim_label_with_index(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][2]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == '2003')\n self.assertTrue(dims_df.iloc[-1]['label'] == '2014')",
"def get_label_name(label_id):\n if self._int_to_label == {}:\n print(\"ERROR\")\n print(\"Need to import data first\")\n else:\n label_name = self._int_to_label[label_id]\n\n return label_name",
"def get_label(image, model):\n x = Variable(image, volatile=True)\n label = model(x).data.max(1)[1].numpy()[0]\n # We have string labels for ImageNet\n if isinstance(model, torchvision.models.inception.Inception3):\n label_string = labels.get(label)\n return label_string\n return label",
"def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name",
"def get_label(self, ):\n return self.attrs.get(self.AttributeNames.LABEL, None)",
"def get_label_by_id(document_id):\n document = Documents.query.filter_by(id=document_id).first()\n if document:\n return document.label\n return document",
"def _get_label(obj):\n # NOTE: BarContainer and StemContainer are instances of tuple\n while not hasattr(obj, 'get_label') and isinstance(obj, tuple) and len(obj) > 1:\n obj = obj[-1]\n label = getattr(obj, 'get_label', lambda: None)()\n return label if label and label[:1] != '_' else None",
"def labelit(self, varname):\n \n if not varname:\n return \"\"\n return self.vardict[varname].VariableLabel or varname",
"def get_label(client, label):\n image_name = get_image_name()\n image = client.images.get(image_name)\n try:\n return image.labels[label]\n except KeyError:\n raise Exception(f\"Image should have a label '{label}'\")",
"def get_label(self):\n return self.label",
"def get_label(self):\n return self.label",
"def get_label(self):\n return self.label",
"def get_label(self):\n return self.label",
"def get_label ( self ):\n return self.label",
"def test_get_dim_index_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_index(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['index'] == 0)"
] | [
"0.7131004",
"0.7038563",
"0.6737117",
"0.6715745",
"0.6470384",
"0.6414515",
"0.6387479",
"0.6375916",
"0.6359965",
"0.63417023",
"0.6326266",
"0.6309062",
"0.6306092",
"0.63019234",
"0.6247734",
"0.619815",
"0.6156095",
"0.6134617",
"0.61015564",
"0.609251",
"0.60764146",
"0.6065842",
"0.60592455",
"0.60566753",
"0.60481966",
"0.60481966",
"0.60481966",
"0.60481966",
"0.6036658",
"0.6025799"
] | 0.7249571 | 0 |
Reads data from URL, Dataframe, JSON string, JSON file or OrderedDict. | def read(cls, data):
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fetch_data(url: str, d: datetime) -> pd.DataFrame:\n return pd.read_json(url)",
"def read_url(full_url = None, \n table_format = 'json'):\n \n if table_format == 'json':\n data = requests.get(full_url)\n df = pyjstat.from_json_stat(data.json(object_pairs_hook=OrderedDict))\n df = df[0]\n \n elif table_format == 'csv':\n df = pd.read_csv(full_url)\n else:\n print(\"\"\"Table_format is incorrectly specified. \n It must be 'json-stat' or 'csv'\"\"\")\n df = None\n return df",
"def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)",
"def getData(url):\n data = ''\n request = urllib2.Request(url, headers={\"Accept\": \"application/json\"})\n try:\n data = json.loads(urllib2.urlopen(request).read())\n except urllib2.HTTPError, e:\n raise Exception(\"HTTP error: %d\" % e.code)\n except urllib2.URLError, e:\n raise Exception(\"Network error: %s\" % e.reason.args[1])\n\n return data",
"def read_json(path_or_buf=None, *args, **kwargs):\n\n return _read_multi(\n func=pd.read_json,\n path_or_buf=path_or_buf,\n *args,\n **kwargs\n )",
"def get_input(url):\n if '.csv' in url:\n return pd.read_csv(url)\n elif '.json' in url:\n return pd.read_json(url, lines=True)\n elif '.parquet' in url:\n return pd.read_parquet(url)\n else:\n raise NotImplementedError(\"File type not supported\")",
"def read(cls, data):\n if isinstance(data, OrderedDict):\n return cls(data)\n elif isinstance(data, basestring) and data.startswith((\"http://\",\n \"https://\",\n \"ftp://\",\n \"ftps://\")):\n return cls(request(data))\n elif isinstance(data, basestring):\n try:\n json_dict = json.loads(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise\n else:\n try:\n json_dict = json.load(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise",
"def load_data(self, data):\n\n if type(data) is \"json\":\n dataF = pd.read_json(data)\n elif type(data) is list:\n dataF = pd.DataFrame(data)\n else:\n print(\"Other types not implemented\")\n return\n dataF = self.__process_data(dataF)\n self.__dataF = dataF",
"def get_data(url):\n response = get(url, timeout=10)\n \n if response.status_code >= 400:\n raise RuntimeError(f'Request failed: { response.text }')\n \n return response.json()",
"def _load_data_from_file(self, input_file_path):\n with FileOrBufferHandler(input_file_path, 'r', \n encoding=self.file_encoding) as input_file:\n try:\n data = json.load(input_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n input_file.seek(0)\n data = data_utils.read_json(\n data_generator=input_file,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n return data",
"def _load_data(self, url, options=None, location=None):\n # Set API key in query parameters\n params = { \"api-key\": self.key }\n\n # Add options to query parameters\n if options is not None:\n params.update(options)\n\n # Load the data from the API, raise error if there's an invalid status code\n res = self.session.get(self.protocol + url, params=params, timeout=(4, 10))\n if res.status_code == 401:\n raise ValueError(\"Invalid API Key\")\n elif res.status_code == 404:\n raise RuntimeError(\"Error 404: This page is not available\")\n res.raise_for_status()\n\n if orjson is None:\n parsed_res = res.json()\n else:\n parsed_res = orjson.loads(res.content)\n\n # Get the data from the usual results location\n if location is None:\n results = parsed_res.get(\"results\")\n\n # Sometimes the results are in a different location, this location can be defined in a list\n # Load the data from that location\n else:\n results = parsed_res\n for loc in location:\n results = results.get(loc)\n\n return results",
"def read(cls, data):\n if isinstance(data, pd.DataFrame):\n output = OrderedDict({})\n output['version'] = '2.0'\n output['class'] = 'dimension'\n [label] = [x for x in list(data.columns.values) if\n x not in ['id', 'index']]\n output['label'] = label\n output['category'] = OrderedDict({})\n output['category']['index'] = data.id.tolist()\n output['category']['label'] = OrderedDict(\n zip(data.id.values, data[label].values))\n return cls(output)\n elif isinstance(data, OrderedDict):\n return cls(data)\n elif isinstance(data, basestring) and data.startswith((\"http://\",\n \"https://\",\n \"ftp://\",\n \"ftps://\")):\n return cls(request(data))\n elif isinstance(data, basestring):\n try:\n json_dict = json.loads(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise\n else:\n try:\n json_dict = json.load(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise",
"def fetchJson(url):",
"def _load_json(self, kind, source, **kwargs):\n if source is None:\n raise exceptions.invalid_json_map[kind](f\"Cannot load {kind} - no data source specified.\")\n\n # Decode the json string and deserialize to objects.\n try:\n data = load_json(source, **kwargs)\n except FileNotFoundError as e:\n raise exceptions.file_not_found_map[kind](e)\n\n except jsonlib.decoder.JSONDecodeError as e:\n raise exceptions.invalid_json_map[kind](e)\n\n return data",
"def load_json(url):\n with urllib.request.urlopen(url) as u:\n data = json.loads(u.read().decode())\n return data",
"def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data",
"def read_csv_data(url):\n\n csv_data = pd.read_csv(url)\n\n return csv_data",
"def get_json_data(url):\n\n r = requests.get(url)\n try:\n return r.json()\n except json.JSONDecodeError:\n # Catch the Unexpected UTF-8 BOM error\n r.encoding='utf-8-sig'\n return r.json()",
"def fetch_dataset(url, pandas_impl=pandas):\n\n print(f'fetching dataset at {url}')\n return pandas_impl.read_csv(url)",
"def get_data():\n try:\n response = requests.get(uri)\n json_data = response.json()\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)\n return json_data",
"def read_data(params):\n data_path = params['path']\n interaction_path = os.path.join(data_path,\n params['interactions'])\n get_logger().debug(f'Read {params[\"name\"]} data from {interaction_path}')\n # read interaction data from json file and convert to dataframe\n data = []\n with open(interaction_path) as f:\n # for each line in the json file\n for line in f:\n # store the line in the array for manipulation\n record = json.loads(line)\n data.append((record['user_id'], record['business_id'], record['stars']))\n data = pd.DataFrame(data, columns=['org_user', 'org_item', 'rating'])\n return data",
"def read_data(self, loc):\n pass",
"def read_url(path, name, *args, version=None, driver_kwargs=None, scheme=None, **kwargs):\n\tif driver_kwargs is None:\n\t\tdriver_kwargs = {}\n\tfd = open(path, **driver_kwargs) if scheme is None else DRIVERS[scheme](path, **driver_kwargs)\n\tdata = read(fd, name, *args, version=version, **kwargs)\n\tfd.close()\n\treturn data",
"def _get_raw_data(self, url, series):\n url = self._get_url(url, series)\n try:\n response = self.http.request(url, headers=self._reqheaders)\n except httplib2.ServerNotFoundError as e:\n raise TVDBConnectError(e.message), None, sys.exc_info()[2]\n rep = response[0]\n log.debug(\n 'http-status:%s,content:%s', \n rep['status'], \n rep['content-type']\n )\n if int(rep['status']) >= 400:\n raise TVDBConnectError(\n 'Failed to get \"%s\" from thetvdb. errno:%s' % (\n series, rep['status']),\n rep['status']\n ) \n return response[1]",
"def data_loader(self, url, type_of):\n\n data_loader = None\n if type_of == \"csv\":\n data_loader = self.csv\n elif type_of == \"json\":\n data_loader = self.json\n elif type_of == \"parquet\":\n data_loader = self.parquet\n elif type_of == \"avro\":\n data_loader = self.avro\n else:\n RaiseIt.type_error(data_loader, [\"csv\", \"json\", \"parquet\", \"avro\", ])\n\n i = url.rfind('/')\n data_name = url[(i + 1):]\n data_def = {\n \"displayName\": data_name,\n \"url\": url\n }\n return Downloader(data_def).download(data_loader, type_of)",
"def readData(url, data=[], code='gbk'):\n tmp = urlopen(url).read().decode(code)\n data.append(tmp)\n return tmp",
"def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)",
"def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)",
"def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)",
"def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)"
] | [
"0.6491046",
"0.64579487",
"0.6190409",
"0.6136968",
"0.6119346",
"0.61163783",
"0.606284",
"0.6042426",
"0.5994613",
"0.5884721",
"0.58762944",
"0.58694124",
"0.58575994",
"0.5831916",
"0.58314955",
"0.58214885",
"0.5820414",
"0.57760435",
"0.57522964",
"0.5729711",
"0.56720954",
"0.5631377",
"0.5626862",
"0.55977076",
"0.55863637",
"0.55824864",
"0.55814534",
"0.55814534",
"0.55814534",
"0.55814534"
] | 0.6827572 | 0 |
Reads data from URL or OrderedDict. | def read(cls, data):
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)",
"def readData(url, data=[], code='gbk'):\n tmp = urlopen(url).read().decode(code)\n data.append(tmp)\n return tmp",
"def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data",
"def readUrl(self, url):\n if self._ignoreUrlIncludes:\n return\n try:\n f = self._openUrl(url)\n self.readObject(url, f)\n except CfgEnvironmentError:\n if not self._ignoreErrors:\n raise",
"def read(url, encoding=None, cache=None, mode=\"rb\"):\n with read_handle(url, cache, mode=mode) as handle:\n data = handle.read()\n\n if encoding:\n data = data.decode(encoding)\n\n return data",
"def _load_data(self, url, options=None, location=None):\n # Set API key in query parameters\n params = { \"api-key\": self.key }\n\n # Add options to query parameters\n if options is not None:\n params.update(options)\n\n # Load the data from the API, raise error if there's an invalid status code\n res = self.session.get(self.protocol + url, params=params, timeout=(4, 10))\n if res.status_code == 401:\n raise ValueError(\"Invalid API Key\")\n elif res.status_code == 404:\n raise RuntimeError(\"Error 404: This page is not available\")\n res.raise_for_status()\n\n if orjson is None:\n parsed_res = res.json()\n else:\n parsed_res = orjson.loads(res.content)\n\n # Get the data from the usual results location\n if location is None:\n results = parsed_res.get(\"results\")\n\n # Sometimes the results are in a different location, this location can be defined in a list\n # Load the data from that location\n else:\n results = parsed_res\n for loc in location:\n results = results.get(loc)\n\n return results",
"def get_data(self, url):\n return self.get(url).get('data', [])",
"async def get_data(self, path: str) -> Dict:\n # function accepts paths that start with / and also path that do not start with /\n if path.startswith(\"/\"):\n path = path[1:]\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(f\"{self._opa_url}/data/{path}\") as opa_response:\n return await opa_response.json()\n except aiohttp.ClientError as e:\n logger.warning(\"Opa connection error: {err}\", err=e)\n raise",
"def getData(url):\n data = ''\n request = urllib2.Request(url, headers={\"Accept\": \"application/json\"})\n try:\n data = json.loads(urllib2.urlopen(request).read())\n except urllib2.HTTPError, e:\n raise Exception(\"HTTP error: %d\" % e.code)\n except urllib2.URLError, e:\n raise Exception(\"Network error: %s\" % e.reason.args[1])\n\n return data",
"def load_json(url):\n with urllib.request.urlopen(url) as u:\n data = json.loads(u.read().decode())\n return data",
"def read_data(self, loc):\n pass",
"async def fetch_data(self, url: str) -> dict:\n async with self.bot.http_session.get(url) as r:\n return await r.json()",
"def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data",
"def readData(self, dataDict):\n pass",
"def getData(eUrl, eToken):\n if eUrl == '' or eToken == '':\n print(\"Enviroment is not setup\")\n exit(1)\n\n with urllib.request.urlopen(eUrl + eToken) as url:\n data = json.loads(url.read().decode())\n\n return(data)",
"def read(cls, data):\n if isinstance(data, pd.DataFrame):\n return cls((json.loads(\n to_json_stat(data, output='dict', version='2.0'),\n object_pairs_hook=OrderedDict)))\n elif isinstance(data, OrderedDict):\n return cls(data)\n elif (isinstance(data, basestring)\n and data.startswith((\"http://\", \"https://\",\n \"ftp://\", \"ftps://\"))):\n # requests will do the rest...\n return cls(request(data))\n elif isinstance(data, basestring):\n try:\n json_dict = json.loads(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise\n else:\n try:\n json_dict = json.load(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise",
"def fetchJson(url):",
"def read(data):\n return Link(**data)",
"def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)",
"def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)",
"def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)",
"def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)",
"def fetch_data(data_url):\n return requests.get(data_url).content",
"def get(self, url):\n return json.loads(self.as_source.urlopen(url).read())",
"def _read_data(self):",
"def get(url):\n http = urllib3.PoolManager()\n r = http.request(\"GET\", url)\n data = xmltodict.parse(r.data)\n return data",
"def _fetch_http(self, url, params):\n params['format'] = 'json'\n # urllib.urlencode expects str objects, not unicode\n fixed = dict([(to_bytes(b[0]), to_bytes(b[1]))\n for b in params.items()])\n request = urllib2.Request(url, urllib.urlencode(fixed))\n request.add_header('Accept-encoding', 'gzip')\n response = self._opener.open(request)\n if isinstance(self._cj, cookielib.MozillaCookieJar):\n self._cj.save()\n if response.headers.get('Content-Encoding') == 'gzip':\n compressed = StringIO(response.read())\n gzipper = gzip.GzipFile(fileobj=compressed)\n data = gzipper.read()\n else:\n data = response.read()\n return data",
"def get_data(url):\n response = get(url, timeout=10)\n \n if response.status_code >= 400:\n raise RuntimeError(f'Request failed: { response.text }')\n \n return response.json()",
"def read_url(path, name, *args, version=None, driver_kwargs=None, scheme=None, **kwargs):\n\tif driver_kwargs is None:\n\t\tdriver_kwargs = {}\n\tfd = open(path, **driver_kwargs) if scheme is None else DRIVERS[scheme](path, **driver_kwargs)\n\tdata = read(fd, name, *args, version=version, **kwargs)\n\tfd.close()\n\treturn data",
"def read_data():\r\n\r\n if os.path.isfile(os.getcwd() + \"/www/access_list.txt\") and os.stat(os.getcwd() + \"/www/access_list.txt\").st_size != 0:\r\n data = json.load(open(os.getcwd() + \"/www/access_list.txt\"))\r\n return collections.defaultdict(dict, data)\r\n else:\r\n return collections.defaultdict(dict)"
] | [
"0.61915475",
"0.595929",
"0.581939",
"0.57758147",
"0.5734308",
"0.5706303",
"0.5693144",
"0.56683004",
"0.56371516",
"0.56260544",
"0.5621542",
"0.5556245",
"0.55431974",
"0.55298895",
"0.5499914",
"0.54831415",
"0.54318327",
"0.54240686",
"0.5399689",
"0.5399689",
"0.5399689",
"0.5399689",
"0.5393692",
"0.53905267",
"0.538503",
"0.5382411",
"0.5344191",
"0.5343763",
"0.52905345",
"0.5245763"
] | 0.62716776 | 0 |
Extract the action from a command (get, insert, update, delete) | def get_action(command):
return command.split(" ")[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_command(self,command):\n\t\treturn self.command_handlers[command]",
"def get_value(command):\n if is_get(command) or is_delete(command):\n return None\n elif is_insert(command) or is_update(command):\n return command.split(\" \")[2]",
"def _get_action(self):\n return self.__action",
"def command(self):\n if self.model is self.model_action:\n return self.command_action\n else:\n return self.command_candidate",
"def get_action_command(self):\n if self.action.value == \"start\":\n self.action_command = self.ServerStartSubCommand()\n else:\n self.action_command = None",
"def obtain_action(self):\r\n\t\treturn",
"def get_cmd(self, command):\n return self.commands[command][\"cmd\"]",
"def _getCommand(self, cmd):\n try:\n cmd_str = cmd.decode('utf-8')\n return getattr(self, 'do_' + cmd_str, None)\n except:\n return None",
"def get_action(self):\n raise NotImplementedError",
"def get_action(self):\n return self.__action",
"def __commandparser(self, data):\n # zum bearbeiten einen String daraus machen\n cmdstr = data.decode('utf-8')\n self.log.debug(\"cmd: %s\" % cmdstr)\n # json parsen und dictonary Objekt daraus machen\n cmd = json.loads(cmdstr)\n #\n # ist es ein GET Kommando?\n #\n if 'get' in cmd:\n self.log.debug(\"get cmd recognized...\")\n return self.__get_cmd_parse(cmd['get'])\n elif 'set' in cmd:\n self.log.debug(\"set cmd recognized...\")\n return self.__set_cmd_parse(cmd['set'])\n elif 'delete' in cmd:\n self.log.debug(\"DELETE cmd recognized...\")\n return self.__delete_cmd_parse(cmd['delete'])\n else:\n self.log.warning(\"unknown command recived! Data: <{}>\".format(cmdstr))\n return json.dumps({'error': 'unknown command or not implemented yet'}).encode(encoding='utf-8')\n # ENDE __commandparser",
"def get_command(self):\n return self.c_dict['COMMAND']",
"def _receive_command(self, command):\n if command.startswith('RET '):\n print(command[4:]) # Return value\n elif command.startswith('ERROR '):\n logger.error('JS - ' + command[6:].strip())\n elif command.startswith('WARN '):\n logger.warn('JS - ' + command[5:].strip())\n elif command.startswith('PRINT '):\n print(command[5:].strip())\n elif command.startswith('INFO '):\n logger.info('JS - ' + command[5:].strip())\n elif command.startswith('SET_PROP '):\n # todo: seems weird to deal with here. implement by registring some handler?\n # Should be better when we implement a more formal protocol\n _, id, name, txt = command.split(' ', 3)\n ob = Model._instances.get(id, None)\n if ob is not None:\n ob._set_prop_from_js(name, txt)\n elif command.startswith('SET_EVENT_TYPES '):\n _, id, txt = command.split(' ', 3)\n ob = Model._instances.get(id, None)\n if ob is not None:\n ob._set_event_types_js(txt)\n elif command.startswith('EVENT '):\n _, id, name, txt = command.split(' ', 3)\n ob = Model._instances.get(id, None)\n if ob is not None:\n ob._emit_from_js(name, txt)\n else:\n logger.warn('Unknown command received from JS:\\n%s' % command)",
"def _execute_op(self, op):\n if op.op_type == Operation.Type.GET:\n if op.key in self._store:\n return Result.OK, self._store[op.key]\n else:\n return Result.NOT_FOUND, \"\"\n elif op.op_type == Operation.Type.PUT:\n self._store[op.key] = op.value\n return Result.OK, \"\"\n elif op.op_type == Operation.Type.DEL:\n self._store.pop(op.key, None)\n return Result.OK, \"\"\n else:\n raise ValueError(\"Invalid operation type\")",
"def _execute_op(self, op):\n if op.op_type == Operation.Type.GET:\n if op.key in self._store:\n return Result.OK, self._store[op.key]\n else:\n return Result.NOT_FOUND, \"\"\n elif op.op_type == Operation.Type.PUT:\n self._store[op.key] = op.value\n return Result.OK, \"\"\n elif op.op_type == Operation.Type.DEL:\n self._store.pop(op.key, None)\n return Result.OK, \"\"\n else:\n raise ValueError(\"Invalid operation type\")",
"def cmd(self, cmd):\n return cmd",
"def _command(self, *cmd, handler=None):",
"def action(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"action\")",
"def set_action_cmd(self, action):\n if self.args.snapcheck is True:\n action = \"snapcheck\"\n if self.args.check is True:\n action = \"check\"\n if self.args.snap is True:\n action = \"snap\"\n if self.args.diff is True:\n action = \"diff\"\n return action",
"def action(self):\n return self._get_field(\"action\")",
"def get_action(self, context):\n pass",
"def get_action(self, action):\n actions = {\n self.GO_ACTION: self.go,\n self.CLICK_ACTION: self.click,\n self.CHECK_ACTION: self.check,\n self.WAIT_ACTION: self.wait,\n self.FILL_FORM_ACTION: self.fill,\n self.SELECT_FORM_ACTION: self.select\n }\n try:\n return actions[action]\n except KeyError:\n raise Exception('{0} is not a valid action, the valid actions are: {1}'.format(action,\n \", \".join(actions.keys())))",
"def get_command(self):\n req_type = type(self.req)\n\n if req_type == ureq.CreateEntryRequest:\n return commands.CreateCommand(self.req.results)\n elif req_type == ureq.ReadEntryRequest:\n return commands.ReadCommand(self.req.results)\n elif req_type == ureq.UpdateEntryRequest:\n return commands.UpdateCommand(self.req.results)\n elif req_type == ureq.DeleteEntryRequest:\n return commands.DeleteCommand(self.req.results)",
"def get_command(self):\n return self.command",
"def get_result(self, action):\n return self.__cmd_handler.handle_cmd(action)",
"def get_command(self, module_name, command_name):\r\n actions = self.plugins.get(module_name) or {}\r\n if command_name in actions:\r\n return actions[command_name]\r\n if None in actions:\r\n return actions[None]\r\n raise InvalidCommand(module_name, command_name)",
"def processCommand(self, command, args):\n\n commandMap = { \n \"new\" : self.createNewList,\n \"view\" : self.trelloView,\n \"add\" : self.trelloAddCard, \n \"remove\" : self.trelloDeleteCard, \n }\n\n if command not in commandMap: return \">> Command not found\" \n \n return commandMap[command](args)",
"def action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action\")",
"def action(self):\n return self._action",
"def action(self):\n return self._action"
] | [
"0.6848488",
"0.6668799",
"0.66313916",
"0.6625008",
"0.65247416",
"0.6460432",
"0.64350355",
"0.63146126",
"0.6298981",
"0.6275523",
"0.6194686",
"0.6180713",
"0.61756164",
"0.614495",
"0.614495",
"0.6138919",
"0.6136538",
"0.6116887",
"0.60967165",
"0.60889083",
"0.60766923",
"0.6064063",
"0.6058359",
"0.6019852",
"0.6007114",
"0.5975362",
"0.59693944",
"0.5921651",
"0.59207094",
"0.59207094"
] | 0.77882355 | 0 |
Extract the key from a command | def get_key(command):
return command.split(" ")[1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_bin_key(self, command):\n\t\treturn self.remote.encode_button(command)",
"def get_cmd(self, command):\n return self.commands[command][\"cmd\"]",
"def get_command(self):\n return self.c_dict['COMMAND']",
"def _get_command_lookup(self, command_dict):",
"def key(key):\n return key",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key\")",
"def _get_command_config(self, i):\n key_pair = list(self._run_list[i].items())[0]\n return key_pair",
"def key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key\")",
"def key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key\")",
"def get(self, key):\n return self.execute_command(self.GET_CMD, key)",
"def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")",
"def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")",
"def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")",
"def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")",
"def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")",
"def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")",
"def key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key\")"
] | [
"0.7064575",
"0.69919556",
"0.6910567",
"0.6785822",
"0.674809",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.67085004",
"0.6574106",
"0.6511884",
"0.6511884",
"0.65019923",
"0.6458599",
"0.6458599",
"0.6458599",
"0.6458599",
"0.6458599",
"0.6458599",
"0.6458599"
] | 0.90267634 | 0 |
Extract the 'value' from a command | def get_value(command):
if is_get(command) or is_delete(command):
return None
elif is_insert(command) or is_update(command):
return command.split(" ")[2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _query_state_value(command):\n _LOGGER.info('Running state command: %s', command)\n\n try:\n return_value = subprocess.check_output(command, shell=True)\n return return_value.strip().decode('utf-8')\n except subprocess.CalledProcessError:\n _LOGGER.error('Command failed: %s', command)",
"def get_command():\n\tS = raw_input().split()\n\n\ttry:\n\t\t# input contains a command and value\n\t\treturn [S[0], int(S[1])]\n\texcept:\n\t\t# input contains a command and no value\n\t\treturn S",
"def eventually_call_command(value):\n if value.startswith(u'`') and value.endswith(u'`'):\n cmd = value[1:-1]\n try:\n processed_value = subprocess.check_output(cmd, shell=True)\n except subprocess.CalledProcessError as e:\n raise ValueError(u'The call to the external tool failed: %s' % e)\n processed_value = processed_value.decode('utf-8')\n processed_value = processed_value.split('\\n')[0].strip('\\r\\n\\t')\n return processed_value\n return value",
"def cmd_get(self):\n return self.text",
"def get(self):\n return self.match.group(\"value\")",
"def get_cmd(self, command):\n return self.commands[command][\"cmd\"]",
"def get_command(self):\n return self.c_dict['COMMAND']",
"def command(self):\n with open(self.x, 'rt') as fi:\n line = next(fi) # the first line\n\n version, cmd_line = line.strip().split(';')\n version = version.split(' ')[2]\n cmd_line = re.sub('\"', '', cmd_line.strip())\n\n return [version, cmd_line]",
"def parse_line(self, line):\n command, _, arg = line.strip().partition(\" \")\n return command, arg.strip()",
"def cmd(self):\n value = None\n for insndesc in self.structure:\n if insndesc['instruction'] == 'FROM': # new stage, reset\n value = None\n elif insndesc['instruction'] == 'CMD':\n value = insndesc['value']\n return value",
"def getoutput(cmd):\n return getstatusoutput(cmd)[1]",
"def cmd_parse(self, cmd):\n chan = (cmd & 0x200) >> 8\n val = cmd & 0xff\n return 'w'+str(chan)+'.v'+str(value)",
"def value(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)",
"def arg1(self):\n if self.command_type() == Parser.C_PUSH or self.command_type() == Parser.C_POP:\n arg1 = self._line.split()[1]\n elif self.command_type() == Parser.C_ARITHMETIC:\n arg1 = self._line.split()[0]\n else:\n raise Exception(\"Not to be invoked with any other command\")\n return arg1",
"def values(self, command, **kwargs):\n return self.instrument.values(\":INP%d:%s\" % (\n self.number, command), **kwargs)",
"def values(self, command, **kwargs):\n return self.instrument.values(\":INP%d:%s\" % (\n self.number, command), **kwargs)",
"def result_of(cmd):\n cmd_list_arr = cmd.split(\" \")\n result = check_output(cmd_list_arr).decode(\"utf-8\")\n return result",
"def GetCommand(name, database):\n value = database.GetValue(name)\n if(value == None):\n return \"Name not found\"\n else:\n return value",
"def get_value(value):\n if value:\n return value.split('\\n')[0]\n else:\n return None",
"def arg1(self):\n t = self.command_type()\n if t == 'C_RETURN':\n return\n\n args = self.current_command.split(' ')\n\n if t == 'C_ARITHMETIC':\n # Return the command itself.\n return args[0]\n\n return args[1]",
"def cli(ctx):\n return ctx.gi.cannedvalues.get_values()",
"def command_output(cmd):\n import subprocess\n return subprocess.Popen(\n cmd.split(\";\"), stdout=subprocess.PIPE).communicate()[0]",
"def get_command(pid):",
"def extract_command(text):\n return text.split()[0].split('@')[0][1:] if is_command(text) else None",
"def extract_command(text):\n return text.split()[0].split('@')[0][1:] if is_command(text) else None",
"def get_command(self, offset):\n cmd = struct.unpack_from('!I', self.string, offset=offset)[0]\n # if cmd not in [1,2,3,4,9]:\n # raise ValueError('Command not recognised')\n offset += struct.calcsize('!I')\n return cmd, offset",
"def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)",
"def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)",
"def get_key(command):\n return command.split(\" \")[1]",
"def getvalue(self):\n return self.out.getvalue()"
] | [
"0.666239",
"0.66055787",
"0.659243",
"0.6578315",
"0.65387666",
"0.64170784",
"0.63077176",
"0.6285835",
"0.62800246",
"0.62364197",
"0.6215651",
"0.6152268",
"0.6127448",
"0.609683",
"0.60843295",
"0.60843295",
"0.60530883",
"0.60434",
"0.6038694",
"0.6008705",
"0.59813315",
"0.5974339",
"0.5967071",
"0.5951356",
"0.5951356",
"0.59179384",
"0.58934957",
"0.58934957",
"0.5887182",
"0.5871803"
] | 0.75534874 | 0 |
Spawns tasks for each GSoCProject in the given Program. | def spawnRemindersForProjectSurvey(self, request, *args, **kwargs):
post_dict = request.POST
# retrieve the program_key and survey_key from POST data
program_key = post_dict.get('program_key')
survey_key = post_dict.get('survey_key')
survey_type = post_dict.get('survey_type')
if not (program_key and survey_key and survey_type):
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid spawnRemindersForProjectSurvey data: %s' % post_dict)
program_entity = GSoCProgram.get_by_key_name(program_key)
if not program_entity:
# invalid program specified, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid program specified: %s' % program_key)
q = GSoCProject.all()
q.filter('status', 'accepted')
q.filter('program', program_entity)
if 'cursor' in post_dict:
q.with_cursor(post_dict['cursor'])
projects = q.fetch(self.BATCH_SIZE)
if not projects:
# we are done, return OK
return http.HttpResponse()
for project in projects:
task_params = {
'survey_key': survey_key,
'survey_type': survey_type,
'project_key': str(project.key())
}
task_url = '/tasks/gsoc/surveys/send_reminder/send'
new_task = taskqueue.Task(params=task_params, url=task_url)
new_task.add('mail')
# pass along these params as POST to the new task
task_params = {
'program_key': program_key,
'survey_key': survey_key,
'survey_type': survey_type,
'cursor': q.cursor()
}
task_url = request.path
new_task = taskqueue.Task(params=task_params, url=task_url)
new_task.add()
# return OK
return http.HttpResponse() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_all_projects(args):\n man = load_manifest()\n\n if args[0] == '-p':\n parallel = True\n del args[0]\n else:\n parallel = False\n\n towait = []\n\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n print >>sys.stderr, \"In project: \", name, \" running \", \" \".join(args)\n p = repo.command_process(args)\n if not parallel:\n p.Wait()\n print >>sys.stderr\n else:\n towait.append(p)\n\n for p in towait:\n p.Wait()",
"def new_tasks(self, extra):\n\n tasks = []\n\n try:\n fd = open(self.params.command_file)\n \n self.result_dir = os.path.dirname(self.params.output)\n \n for line in fd:\n command = line.strip()\n\n if not command:\n # ignore black lines\n continue\n\n cmd_args = _parse_command(command)\n \n # setting jobname\n jobname = \"gc_gps-%s%s%s%s%s\" % (cmd_args['pos'],\n cmd_args['realizations'],\n cmd_args['snr'],\n cmd_args['mast.h'],\n cmd_args['sd.mast.o'])\n\n extra_args = extra.copy()\n extra_args['jobname'] = jobname\n # FIXME: ignore SessionBasedScript feature of customizing \n # output folder\n extra_args['output_dir'] = self.params.output\n\n extra_args['output_dir'] = extra_args['output_dir'].replace('NAME', os.path.join('.computation',jobname))\n extra_args['output_dir'] = extra_args['output_dir'].replace('SESSION', os.path.join('.computation',jobname))\n extra_args['output_dir'] = extra_args['output_dir'].replace('DATE', os.path.join('.computation',jobname))\n extra_args['output_dir'] = extra_args['output_dir'].replace('TIME', os.path.join('.computation',jobname))\n\n self.log.debug(\"Creating Task for command: %s\" % command)\n\n tasks.append(GcgpsTask(\n command,\n self.params.R_source_folder,\n self.result_dir,\n self.params.input_dir,\n **extra_args))\n\n except IOError, ioe:\n self.log.error(\"Error while reading command file \" +\n \"%s.\" % self.params.command_file +\n \"Message: %s\" % ioe.message)\n except Exception, ex:\n self.log.error(\"Unexpected error. Error type: %s, Message: %s\" % (type(ex),str(ex)))\n\n finally:\n fd.close()\n\n return tasks",
"def new_tasks(self, extra):\n tasks = []\n\n for parameter in self._enumerate_csv(self.params.csv_input_file):\n parameter_str = '.'.join(str(x) for x in parameter)\n jobname = \"gpfi-%s\" % parameter_str\n\n extra_args = extra.copy()\n\n extra_args['jobname'] = jobname\n \n extra_args['output_dir'] = self.params.output\n extra_args['output_dir'] = extra_args['output_dir'].replace('NAME', jobname)\n extra_args['output_dir'] = extra_args['output_dir'].replace('SESSION', jobname)\n extra_args['output_dir'] = extra_args['output_dir'].replace('DATE', jobname)\n extra_args['output_dir'] = extra_args['output_dir'].replace('TIME', jobname)\n\n self.log.debug(\"Creating Application for parameter : %s\" %\n (parameter_str))\n\n tasks.append(GpfiApplication(\n parameter,\n self.param.model,\n **extra_args))\n\n return tasks",
"def main():\n for task in range(1, 6):\n # get map object for the current task\n map_obj = MapObj(task=task)\n # display map\n map_obj.show_map()\n # find cost optimal path using a-star\n node = search(\n map_obj=map_obj,\n heuristic=euclidian_distance,\n moving_goal=(task == 5)\n )\n # draw optimal path on map\n map_obj.draw_path(node)\n # display the map\n map_obj.show_map()",
"def tasks():",
"def do_pp(self, arg):\n self.do_projects(arg)",
"def create_projects(self):\n if self.gl is None or self.config is None:\n print(\"No config/Gitlab found, please run connect first.\")\n exit(1)\n else:\n print(\"Starting Project creation.\")\n gl = self.gl\n config = self.config\n for project in config[\"projects\"]:\n # get the import url\n imp_url = config[\"projects\"][project][\"import_url\"]\n\n # Set rights/members/protected master\n if config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"all_users\":\n for user in self.users:\n print(\"Importing \\'\" + imp_url + \"\\' for user \\'\" + user.username + \"\\'\")\n pj = user.projects.create({'name': project,\n 'user_id': user.id,\n 'access_level': gitlab.OWNER_ACCESS,\n 'import_url': imp_url})\n elif config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"user\":\n for user in self.users:\n if user.username == config[\"projects\"][project][\"owner_conf\"][\"name\"]:\n print(\"Importing \\'\" + imp_url + \"\\' for user \\'\" + user.username + \"\\'\")\n pj = user.projects.create({'name': project,\n 'user_id': user.id,\n 'Access_level': gitlab.OWNER_ACCESS,\n 'import_url': imp_url})\n elif config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"group\":\n for group in self.groups:\n if group.name == config[\"projects\"][project][\"owner_conf\"][\"name\"]:\n print(\"Importing \\'\" + imp_url + \"\\' for group \\'\" + group.name + \"\\'\")\n pj = group.projects.create({'name': project,\n 'namespace_id': group.id,\n 'import_url': imp_url})\n else:\n print(\"Project owner Config is wrong, aborting\")\n exit(1)\n # Delete protected Master Branch\n if config[\"projects\"][project][\"protect_master_branch\"] == \"False\":\n print(\"Removing Project master Branch protection\")\n pj.protectedbranches.delete('master')",
"def run_tasks(self, task_index=0):\n for generator in self.task_tree[task_index]():\n next(generator)\n # self.device_control.wait_for_device()\n \n next_index = task_index + 1\n if next_index < len(self.task_tree):\n self.run_tasks(next_index)",
"def define_tasks(self, imgcollections,\r\n description, dimensions, folder):\r\n n = imgcollections.size().getInfo()\r\n collections = imgcollections.toList(n) # this is server-object; not iterable\r\n tasks = []\r\n itr = np.arange(n).tolist()\r\n random.shuffle(itr)\r\n pbar = tqdm.tqdm(itr)\r\n for i in pbar:\r\n image = collections.get(i)\r\n task = self.define_task(ee.Image(image).float(),\r\n \"{0:05d}\".format(i),\r\n description,\r\n dimensions,\r\n folder)\r\n tasks.append(task)\r\n pbar.set_description(\"defining tasks {0:05d}/{1:05d}\".format(i, n)) \r\n return tasks",
"def spawnBulkCreateTasks(data, org_admin):\n data = StringIO.StringIO(data.encode('UTF-8'))\n tasks = csv.DictReader(data, fieldnames=DATA_HEADERS, restval=\"\")\n\n task_list = []\n for task in tasks:\n # pop any extra columns\n task.pop(None,None)\n task_list.append(db.Text(simplejson.dumps(task)))\n\n bulk_data = bulk_create_model.GCIBulkCreateData(\n tasks=task_list, created_by=org_admin, total_tasks=len(task_list))\n bulk_data.put()\n\n task_params = {\n 'bulk_create_key': bulk_data.key()\n }\n\n logging.info('Enqueued bulk_create with: %s' %task_params)\n new_task = taskqueue.Task(params=task_params,\n url=BULK_CREATE_URL)\n # add to the gci queue\n new_task.add(queue_name='gci-update')",
"def make(c, force=None, s3_bucket=None, wp_site=None, groups=[], tags=[]):\n\n for sp_ns in ns_foreach_task_subdir(c):\n try:\n sp_ns.tasks.make(c, force=force, s3_bucket=s3_bucket, wp_site=wp_site,\n groups=groups, tags=tags)\n except UnexpectedExit:\n pass",
"def project():",
"def project():",
"def project():",
"def launch_coms(self):\n # Go through list of comunication.\n for com in self.coms:\n com_name = com.partition(' ')[0]\n subpub_name = com_name.lower()+'_com'\n args = str(self.vehicle_id)+' '+com\n node = Node('sml_world', 'communication.py',\n namespace=self.namespace, args=args,\n name=com_name.lower())\n self.launcher.launch(node)\n # Register subscriptions for each of them.\n rospy.Subscriber(self.namespace + subpub_name,\n getattr(msgs, com_name+'Com'),\n getattr(self, 'process_'+subpub_name))",
"def generate_tasks(self, task):",
"def project_grp():\n pass",
"def group_tasks(*tasks):\n\n class TaskGroup(*tasks):\n # TODO: figure out how to make the setup work at the moment it just picks the first in MRO\n # def setup(self, x): pass\n\n def process(self, x):\n for t in tasks:\n self.log.debug(\"Calling process for subtask %s\", t.__name__)\n x = t.process(self, x)\n\n return x\n\n return TaskGroup",
"def main(type, project, author, email):\n while os.path.exists(project):\n click.echo('The project has been exists. Would you want to rebuild the project?\\n')\n click.echo('> {:<12}\\tfor\\tcontinue'.format('YES'))\n click.echo('> {:<12}\\tfor\\tbreak'.format('NO'))\n click.echo('> {:<12}\\tfor\\tbuilding another project\\n'.format('PROJECT NAME'))\n confirm_info = input('> ').strip().lower()\n if confirm_info == 'yes':\n shutil.rmtree(project)\n elif confirm_info == 'no':\n return\n else:\n project = confirm_info\n my_project = CreateNewProject(type, project, author, email)\n my_project.run()",
"def atlas_projects():\n pass",
"def project():\n\n return M(c=\"project\", f=\"task\")(\n M(\"Tasks\", f=\"task\")(\n M(\"Create\", m=\"create\"),\n M(\"My Open Tasks\", vars={\"mine\":1}),\n ),\n )",
"def genesippr_task(self, proj_id):\n\n project_obj = Project.objects.get(id=proj_id)\n basepath = os.path.dirname(__file__).replace('/SilentD', '')\n\n description = project_obj.description.replace(' ', '') # remove any spaces in the project name\n partialpath = os.path.join(str(project_obj.date.date()), description)\n execute_genesipper = 'GeneSippr/run_genesippr.sh'\n\n # run the GeneSippr docker container from an outside script\n p = Popen([execute_genesipper, basepath, partialpath, str(project_obj.id)])\n print(\"GeneSippr is creating reports for the project.\")\n p.communicate() # wait until the script completes before resuming the code\n\n # path for all reports created from the docker run, check to ensure they are all present\n results_16spath = get_resultdir(project_obj, result_folder_names.folder_16s)\n results_GDCSpath = get_resultdir(project_obj, result_folder_names.folder_GDCS)\n results_genesippr = get_resultdir(project_obj, result_folder_names.folder_genesippr)\n\n if file_exists(results_16spath) and file_exists(results_GDCSpath) and file_exists(results_genesippr):\n project_obj.genesippr_results = \"Done\"\n project_obj.save()\n print(\"The GeneSippr task was successful\")\n else:\n project_obj.genesippr_results = \"Error\"\n project_obj.save()\n print(\"An error occurred when running the GeneSippr task.\")",
"def get_all_projects_tasks(dump: Optional[Union[bool, str]] = None,\n get_predictions_instead: bool = False):\n\n @ray.remote\n def _iter_projects(proj_id, get_preds_instead=get_predictions_instead):\n if get_preds_instead:\n _tasks = get_tasks_from_mongodb(proj_id,\n dump=dump,\n get_predictions=True)\n else:\n _tasks = get_tasks_from_mongodb(proj_id)\n for task in _tasks:\n task.pop('_id')\n return _tasks\n\n project_ids = get_project_ids_str().split(',')\n\n futures = []\n for project_id in project_ids:\n futures.append(_iter_projects.remote(project_id))\n\n tasks = []\n for future in tqdm(futures):\n tasks.append(ray.get(future))\n\n if dump:\n with open(dump, 'w') as j:\n json.dump(sum(tasks, []), j)\n\n return sum(tasks, [])",
"def build(c, force=None):\n for sp_ns in ns_foreach_task_subdir(c):\n print(\"-- running build in \", os.getcwd())\n\n # sp_ns.tasks.build(c, force)\n c.run('invoke build')",
"def main(*, instance_connection_info: Dict[str, str], project_id: str) -> List[Operation]:\n\n # Create the tamr client\n tamr_client = tbox.utils.client.create(**instance_connection_info)\n\n project = tamr_client.projects.by_resource_id(project_id)\n\n # Retrieve upstream projects\n LOGGER.info(f\"Retrieving upstream projects for project: {project}\")\n\n upstream_projects = tbox.utils.upstream.projects(project)\n\n operations = []\n if upstream_projects:\n LOGGER.info(\n f\"The following upstream projects were retrieved successfully {upstream_projects}\"\n )\n\n # Update all projects in a chained workflow and return the operations that were carried out\n operations = tbox.workflow.jobs.run(upstream_projects)\n else:\n LOGGER.info(f\"No upstream projects found for project {project}\")\n\n # Now that all upstream project have been run, run the target project\n # and add it's operations to the list\n operations.extend(tbox.workflow.jobs.run([project]))\n\n return operations",
"def makeprojects(working_directory=None, args=None):\n from .__main__ import main\n if args is None:\n args = []\n return main(working_directory, args)",
"def comm_times_group(ns, hosts):\n\n return run_on_hosts(hosts,\n '''python %sape/timings/communication/mpi_run_group.py \"%s\" %s'''%(\n ape_dir, ns, ' '.join(hosts)))",
"def doproj(projfile, password=None, startup=\"asis\"):\n # debugging\n # makes debug apply only to the current thread\n #try:\n #import wingdbstub\n #if wingdbstub.debugger != None:\n #import time\n #wingdbstub.debugger.StopDebug()\n #time.sleep(1)\n #wingdbstub.debugger.StartDebug()\n #import thread\n #wingdbstub.debugger.SetDebugThreads({thread.get_ident(): 1}, default_policy=0)\n ## for V19 use\n ### ###SpssClient._heartBeat(False)\n #except:\n #pass \n fh = FileHandles()\n projfile = fh.resolve(projfile)\n setstartup(projfile, startup, password)\n print(\"**** Opening project %s\" % projfile)\n state = None\n lines = []\n ###with open(projfile) as f:\n with codecs.open(projfile, encoding=\"utf_8_sig\") as f:\n for line in f.readlines():\n line = line.rstrip() # strip newline\n # lines starting with \"<whitespace>;\" are comments and are just printed\n if line.lstrip().startswith(\";\"):\n print(line)\n continue\n # if section header, process previous section; otherwise accumulate lines\n if line in list(dispatch.keys()):\n if state == \"[PROJECT]\":\n for item in lines:\n doproj(item, password) # Will never set child as startup script\n else:\n dispatch[state](lines, password)\n lines = []\n state = line\n else:\n lines.append(line)\n \n # on end of file\n if state == \"[PROJECT]\":\n for item in lines:\n doproj(item, password)\n else:\n dispatch[state](lines, password)",
"def main():\n\n parser = argparse.ArgumentParser(description=\"generateTestStubs\")\n\n parser.add_argument(\"taskFile\",\n help=\"Path for assignment file.\")\n\n args = parser.parse_args()\n\n if not os.path.exists(args.taskFile):\n print(\"Task file does not exist.\")\n sys.exit(1)\n\n taskMgr = EEWebLPProject()\n taskMgr.initLP()\n\n #taskMgr.listProjects()\n #taskMgr.loadTree([\"project_id=8008922\"])\n tasks = taskMgr.getTasks([\"project_id=6890048\"],parent_id=8008922)\n\n fileByAssignee = taskMgr.getTaskOwners(args.taskFile)\n taskMgr.updateTaskOwners(fileByAssignee,tasks)",
"def gen_examples_worker(program):\n print(\"\\rGenerating examples... %d\\\\%d (remaining programs: %d)\" %\n (progress_counter.value, num_programs, valid_counter.value), end=\"\")\n\n input_output_examples = constraint.get_input_output_examples(program, num_examples=num_examples,\n num_tries=num_example_tries)\n\n progress_counter.value += 1\n if input_output_examples:\n return input_output_examples\n else:\n valid_counter.value -= 1\n return None"
] | [
"0.6236974",
"0.5777459",
"0.5777358",
"0.56144845",
"0.5541909",
"0.5535827",
"0.5474582",
"0.5409724",
"0.5370587",
"0.53408545",
"0.533208",
"0.53173953",
"0.53173953",
"0.53173953",
"0.5280981",
"0.5270837",
"0.52411216",
"0.5234688",
"0.5231515",
"0.5197715",
"0.51743555",
"0.51579106",
"0.5148389",
"0.5128708",
"0.51230335",
"0.51169795",
"0.5110705",
"0.51074725",
"0.51049525",
"0.51022345"
] | 0.57984895 | 1 |
Sends a reminder mail for a given GSoCProject and Survey. A reminder is only send if no record is on file for the given Survey and GSoCProject. | def sendSurveyReminderForProject(self, request, *args, **kwargs):
post_dict = request.POST
project_key = post_dict.get('project_key')
survey_key = post_dict.get('survey_key')
survey_type = post_dict.get('survey_type')
if not (project_key and survey_key and survey_type):
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid sendSurveyReminderForProject data: %s' % post_dict)
# set model depending on survey type specified in POST
if survey_type == 'project':
survey_model = ProjectSurvey
record_model = GSoCProjectSurveyRecord
elif survey_type == 'grading':
survey_model = GradingProjectSurvey
record_model = GSoCGradingProjectSurveyRecord
else:
return error_handler.logErrorAndReturnOK(
'%s is an invalid survey_type' %survey_type)
# retrieve the project and survey
project_key = db.Key(project_key)
project = GSoCProject.get(project_key)
if not project:
# no existing project found, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid project specified %s:' % project_key)
survey = survey_model.get_by_key_name(survey_key)
if not survey:
# no existing survey found, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid survey specified %s:' % survey_key)
# try to retrieve an existing record
q = record_model.all()
q.filter('project', project)
q.filter('survey', survey)
record = q.get()
if not record:
# send reminder email because we found no record
student = ndb.Key.from_old_key(project.parent_key()).get()
site_entity = site.singleton()
if survey_type == 'project':
url_name = 'gsoc_take_student_evaluation'
to_name = student.public_name
to_address = student.contact.email
mail_template = 'modules/gsoc/reminder/student_eval_reminder.html'
elif survey_type == 'grading':
url_name = 'gsoc_take_mentor_evaluation'
mentors = ndb.get_multi(map(ndb.Key.from_old_key, project.mentors))
to_address = [mentor.contact.email for mentor in mentors]
to_name = 'mentor(s) for project "%s"' % (project.title)
mail_template = (
'modules/gsoc/reminder/mentor_eval_reminder.html')
program = project.program
hostname = site.getHostname()
url_kwargs = {
'sponsor': program_logic.getSponsorKey(program).name(),
'program': program.link_id,
'survey': survey.link_id,
'user': student.profile_id,
'id': str(project.key().id()),
}
url_path_and_query = reverse(url_name, kwargs=url_kwargs)
survey_url = '%s://%s%s' % ('http', hostname, url_path_and_query)
# set the context for the mail template
mail_context = {
'student_name': student.public_name,
'project_title': project.title,
'survey_url': survey_url,
'survey_end': survey.survey_end,
'to_name': to_name,
'site_name': site_entity.site_name,
'sender_name': "The %s Team" % site_entity.site_name,
}
# set the sender
_, sender_address = mail_dispatcher.getDefaultMailSender()
mail_context['sender'] = sender_address
# set the receiver and subject
mail_context['to'] = to_address
mail_context['subject'] = (
'Evaluation "%s" Reminder' % survey.title)
# find all org admins for the project's organization
org_key = ndb.Key.from_old_key(
GSoCProject.org.get_value_for_datastore(project))
org_admins = profile_logic.getOrgAdmins(org_key)
# collect email addresses for all found org admins
org_admin_addresses = []
for org_admin in org_admins:
org_admin_addresses.append(org_admin.contact.email)
if org_admin_addresses:
mail_context['cc'] = org_admin_addresses
# send out the email
mail_dispatcher.sendMailFromTemplate(mail_template, mail_context)
# return OK
return http.HttpResponse() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spawnRemindersForProjectSurvey(self, request, *args, **kwargs):\n post_dict = request.POST\n\n # retrieve the program_key and survey_key from POST data\n program_key = post_dict.get('program_key')\n survey_key = post_dict.get('survey_key')\n survey_type = post_dict.get('survey_type')\n\n if not (program_key and survey_key and survey_type):\n # invalid task data, log and return OK\n return error_handler.logErrorAndReturnOK(\n 'Invalid spawnRemindersForProjectSurvey data: %s' % post_dict)\n\n program_entity = GSoCProgram.get_by_key_name(program_key)\n\n if not program_entity:\n # invalid program specified, log and return OK\n return error_handler.logErrorAndReturnOK(\n 'Invalid program specified: %s' % program_key)\n\n q = GSoCProject.all()\n q.filter('status', 'accepted')\n q.filter('program', program_entity)\n\n if 'cursor' in post_dict:\n q.with_cursor(post_dict['cursor'])\n\n projects = q.fetch(self.BATCH_SIZE)\n\n if not projects:\n # we are done, return OK\n return http.HttpResponse()\n\n for project in projects:\n task_params = {\n 'survey_key': survey_key,\n 'survey_type': survey_type,\n 'project_key': str(project.key())\n }\n task_url = '/tasks/gsoc/surveys/send_reminder/send'\n\n new_task = taskqueue.Task(params=task_params, url=task_url)\n new_task.add('mail')\n\n # pass along these params as POST to the new task\n task_params = {\n 'program_key': program_key,\n 'survey_key': survey_key,\n 'survey_type': survey_type,\n 'cursor': q.cursor()\n }\n task_url = request.path\n new_task = taskqueue.Task(params=task_params, url=task_url)\n new_task.add()\n\n # return OK\n return http.HttpResponse()",
"def send_reminder():\n\n name = config[\"email\"][\"name\"]\n user = config[\"email\"][\"user\"]\n subject = \"REMINDER: %s\" % sys.argv[1]\n body = sys.argv[2] if len(sys.argv) > 2 else \"\"\n email_helper.send(user, name, user, subject, body)",
"def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")",
"def send_reminder(self):\n message_contents = \"This is a reminder that your event: \" + self.event_title + \" takes place on \" + self.event_date + \" in \" + self.event_location\n subject = \"Event Reminder\"\n attendees = self.gameplanuser_set.all()\n for attendee in attendees:\n remindermessage = Message.objects.create(sender=self.event_manager, recipient=attendee, contents=message_contents)\n remindermessage.save()",
"def copyedit_complete_notify(request, project, copyedit_log, reminder=False):\n subject = 'Your approval needed to publish: {0}'.format(project.title)\n # Prepend reminder to the subject if needed\n if reminder:\n subject = \"Reminder - {}\".format(subject)\n\n for person in project.author_list():\n if not person.approval_datetime:\n body = loader.render_to_string(\n 'notification/email/copyedit_complete_notify.html', {\n 'name': person.get_full_name(),\n 'project': project,\n 'copyedit_log': copyedit_log,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'project_info': email_project_info(project),\n 'footer': email_footer(),\n 'SITE_NAME': settings.SITE_NAME,\n })\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [person.user.email], fail_silently=False)",
"def resubmit_notify(project, comments):\n subject = 'Resubmission of project: {}'.format(project.title)\n email_context = {\n 'project': project,\n 'signature': settings.EMAIL_SIGNATURE,\n 'project_info': email_project_info(project),\n 'footer': email_footer(),\n 'SITE_NAME': settings.SITE_NAME,\n }\n\n for email, name in project.author_contact_info():\n email_context['name'] = name\n body = loader.render_to_string(\n 'notification/email/resubmit_notify.html', email_context)\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [email], fail_silently=False)\n\n # notify editorial team\n email_context['name'] = project.editor.get_full_name()\n email_context['author_comments'] = comments\n body = loader.render_to_string(\n 'notification/email/resubmit_notify_editor.html', email_context)\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [project.editor.email], fail_silently=False)",
"def send_survey_if_necessary(event):\n now = timezone.now()\n if now < event.datetime_end or event.datetime_end < (now - datetime.timedelta(days=30)) or not event.approved \\\n or event.cancelled or event.survey_sent or not event.send_survey or event.contact is None:\n return\n email = SurveyEmailGenerator(event=event, subject='Post-event survey for {}'.format(event.event_name),\n to_emails=event.contact.email)\n email.send()\n # set_revision_comment('Post-event survey sent.')\n event.survey_sent = True\n event.save()",
"def send_reminder(self):\n pass",
"def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()",
"def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n blank_contact = self.create_contact(data={'email': ''})\n self.group.contacts.add(blank_contact)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)",
"def edit_decision_notify(request, project, edit_log, reminder=False):\n # Reject\n if edit_log.decision == 0:\n subject = 'Submission rejected for project {}'.format(project.title)\n template = 'notification/email/reject_submission_notify.html'\n # Resubmit with revisions\n elif edit_log.decision == 1:\n subject = 'Revisions requested for project {}'.format(project.title)\n template = 'notification/email/revise_submission_notify.html'\n # Accept\n else:\n subject = 'Submission accepted for project: {}'.format(project.title)\n template = 'notification/email/accept_submission_notify.html'\n # Prepend reminder to the subject if needed\n if reminder:\n subject = \"Reminder - {}\".format(subject)\n author_list = [project.author_contact_info(only_submitting=True)]\n else:\n author_list = project.author_contact_info()\n\n for email, name in author_list:\n body = loader.render_to_string(template, {\n 'name': name,\n 'project': project,\n 'edit_log': edit_log,\n 'domain': get_current_site(request),\n 'url_prefix': get_url_prefix(request),\n 'signature': settings.EMAIL_SIGNATURE,\n 'project_info': email_project_info(project),\n 'footer': email_footer(), 'SITE_NAME': settings.SITE_NAME\n })\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [email], fail_silently=False)",
"def send_reminder(self, url):\n variables = {\"url\": url, \"username\": self.contact.user.alias}\n send_template_email(recipients=[self.identifier],\n subject=\"Reminder from Rmnd.in!\",\n from_address=\"[email protected]\",\n variables=variables,\n template=\"email/reminder_email\")",
"def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n blank_contact = self.create_contact(data={'email': ''})\n null_contact = self.create_contact(data={'email': None})\n self.group.contacts.add(blank_contact)\n self.group.contacts.add(null_contact)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)\n self.stopRouter()",
"def send_feedback_email_task(subject, message, sender, reciever):\n logger.info(\"Reminder email\")\n return send_reminder_mail(subject, message, sender, reciever)",
"def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n # thisName = (\"%s %s\" % (result['Name'], result['Surname']))\n thisName = (\"%s\" % (result['Name']))\n thisAddress = (\"%s</br>%s</br>%s %s\" % (result['Address1'], result['Address2'], result['Town'], result['Postcode']))\n thisAddress = thisAddress.replace(\"None </br>\", \"\")\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n thisAddress = thisAddress.replace(\"None</br>\", \"\")\n participantCount = (\"%s\" % mdb.getParticipantCount(str(householdID)))\n # prepare the custom email\n\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n # DOESN'T happen yet - de is excluded from query for now\n # emailPath = os.path.join(thisPath, \"emails/email_confirm_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_automated_date.html\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[address]\", thisAddress)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n templateText = templateText.replace(\"[participantCount]\", participantCount)\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" [email protected] < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b [email protected] < ' + emailFilePath, shell=True)",
"def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)",
"def submit_notify(project):\n subject = 'Submission of project: {}'.format(project.title)\n email_context = {\n 'project': project,\n 'signature': settings.EMAIL_SIGNATURE,\n 'project_info': email_project_info(project),\n 'footer': email_footer(),\n 'SITE_NAME': settings.SITE_NAME,\n }\n\n for email, name in project.author_contact_info():\n email_context['name'] = name\n body = loader.render_to_string(\n 'notification/email/submit_notify.html', email_context)\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [email], fail_silently=False)\n\n # notify editorial team\n if project.core_project.publishedprojects.exists():\n subject = 'A new version has been submitted: {0}'.format(project.title)\n else:\n subject = 'A new project has been submitted: {0}'.format(project.title)\n email_context['name'] = \"Colleague\"\n body = loader.render_to_string(\n 'notification/email/submit_notify_team.html', email_context)\n\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [settings.CONTACT_EMAIL], fail_silently=False)",
"def mail_send():\n report_file_path = (\n f'{os.path.abspath(\".\")}/{Common.get_config_value(\"report_location\")}'\n )\n with open(f\"{report_file_path}/subject\", \"rb\") as subject_handler:\n subject = pickle.load(subject_handler)\n with open(f\"{report_file_path}/{'recipient'}\", \"rb\") as recipient_handler:\n recipient = pickle.load(recipient_handler)\n report_file_path = (\n f\"{os.path.abspath('.')}/{Common.get_config_value('report_location')}\"\n )\n try:\n if os.path.isfile(f\"{report_file_path}/mail_report.html\"):\n os.popen(\n f\"ssh -i {Common.get_config_value('build_server_pemfile')} \"\n f\"-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no\"\n f\" root@{Common.get_config_value('build_server_hostname')}\"\n f\" {Common.get_config_value('mail_script_location')}/\"\n f\"{Common.get_config_value('mail_script_name')} \"\n f\"{subject} {recipient}\"\n )\n Common.logger.info(\"Mail send successfully\")\n except Exception as ex:\n Common.logger.warning(f\"Mail sent failed due to exception: {ex}\")",
"def recs():\n click.echo(\"Emailing recommendations to destination...\")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n message: str = recs_to_message(res, next_day)\n settings: Optional[Settings] = dio_dir.get_settings()\n assert settings is not None, \"Have to setup diogenes to get emails. Run `dio setupemail`\"\n send_message(message, today, settings)\n click.echo(\"Recommendations emailed!\")",
"def test_send_weekly_update_email(self, email_mock, summary_mock,\n contacts_mock):\n self._load_fixtures()\n rapidpro_contact = self._get_rapidpro_contact()\n district_list = self._district_summary_data()\n totals = self._district_summary_totals(district_list)\n contacts_mock.return_value = [rapidpro_contact]\n summary_mock.return_value = district_list, totals\n send_weekly_update_email()\n self.assertTrue(contacts_mock.called)\n self.assertTrue(summary_mock.called)\n self.assertTrue(email_mock.called)\n contact_args, contact_kwargs = contacts_mock.call_args_list[0]\n self.assertEqual(contact_args[0],\n settings.RAPIDPRO_WEEKLY_UPDATE_CONTACT_GROUP)\n email_args, email_kwargs = email_mock.call_args_list[0]\n self.assertEqual(email_args[0][0], 'Mosh <[email protected]>')\n self.assertEqual(email_args[1], district_list)\n self.assertEqual(email_args[2], totals)",
"def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = '[email protected]'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()",
"def test_send_notification_with_reports_filled(self, fake_requests_obj):\n # act like it's March 2012\n fake_date = datetime.datetime(year=2012, month=3, day=1)\n (fake_requests_obj.expects_call().returns(fake_date))\n\n management.call_command('send_third_report_notification', [], {})\n eq_(len(mail.outbox), 3)",
"def notify_students():\n time_now = datetime.datetime.now(get_localzone())\n emails_to_send = Email.objects.all()\n for email in emails_to_send:\n if email.assignment.date_assigned <= time_now:\n send_mail(subject=email.subject,\n message=email.message,\n recipient_list=Student.objects.filter(assignments=email.assignment),\n from_email=None,\n fail_silently=False)\n email.delete()",
"def test_send_notification_with_reports_filled(self, fake_requests_obj):\n # act like it's March 2012\n fake_date = datetime.datetime(year=2012, month=3, day=1)\n (fake_requests_obj.expects_call().returns(fake_date))\n\n management.call_command('send_mentor_report_notification', [], {})\n eq_(len(mail.outbox), 3)",
"def notify_grd_operator_documents(reminder_indicator):\n filters = {'docstatus': 0,'workflow_state':'Booked','preparing_documents':'No','date_and_time_confirmation':['=',today()],'reminded_grd_operator_documents': 0, 'reminded_grd_operator_documents_again':0}\n if reminder_indicator == 'red':\n filters['reminded_grd_operator_documents'] = 1\n filters['reminded_grd_operator_documents_again'] = 0 \n fp_list = frappe.db.get_list('Fingerprint Appointment', filters, ['name', 'grd_operator', 'grd_supervisor'])\n \n cc = [fp_list[0].grd_supervisor] if reminder_indicator == 'red' else []\n email_notification_to_grd_user('grd_operator', fp_list, reminder_indicator, 'Prepare Documents', cc)\n \n if reminder_indicator == 'red':\n field = 'reminded_grd_operator_documents_again'\n elif reminder_indicator == 'yellow':\n field = 'reminded_grd_operator_documents'\n frappe.db.set_value(\"Fingerprint Appointment\", filters, field, 1)",
"def sendEmail(householdID):\n contactID = mdb.getContact(householdID)\n sqlq = \"\"\"\n SELECT Name, Surname, Address1, Address2, Town, Postcode, email, status\n FROM Contact\n WHERE idContact = '{}';\n \"\"\".format(contactID)\n result = mdb.getSQL(sqlq)[0]\n\n thisName = (\"%s\" % (result['Name']))\n thisEmail = (\"%s\" % (result['email']))\n thisStatus = (\"%s\" % (result['status']))\n\n # prepare the custom email\n thisPath = os.path.dirname(os.path.abspath(__file__))\n if (thisStatus == 'de'):\n emailPath = os.path.join(thisPath, \"emails/email_graph_de.html\")\n locale.setlocale(locale.LC_ALL, 'de_DE.utf8')\n else:\n emailPath = os.path.join(thisPath, \"emails/email_graph.html\")\n dtChoice = mdb.getHHdtChoice(householdID)\n thisDate = dtChoice.strftime(\"%A, %-d %B\")\n\n templateFile = open(emailPath, \"r\")\n templateText = templateFile.read()\n templateFile.close()\n templateText = templateText.replace(\"[householdID]\", householdID)\n templateText = templateText.replace(\"[contactID]\", contactID)\n templateText = templateText.replace(\"[name]\", thisName)\n templateText = templateText.replace(\"[date]\", thisDate)\n templateText = templateText.replace(\"[securityCode]\", mdb.getSecurityCode(householdID))\n\n # Subject\n subjectLine = templateText.splitlines()[0]\n templateText = templateText[templateText.find('\\n') + 1:] # find line break and return all from there - i.e. remove first line\n \n # email file\n emailFilePath = os.path.join(thisPath, \"tempEmail.htmail\")\n emailFile = open(emailFilePath, \"w+\")\n emailFile.write(templateText)\n emailFile.close()\n\n # call('mutt -e \"set content_type=text/html\" -s \"[TESTING]' + subjectLine + '\" [email protected] < ' + emailFilePath, shell=True)\n call('mutt -e \"set content_type=text/html\" -s \"' + subjectLine + '\" ' + thisEmail + ' -b [email protected] < ' + emailFilePath, shell=True)",
"def send_warning(self):\n\n # Check whether all the necessary parameters for SMS are present\n if self.your_phone != '' and self.twilio_phone != '' and self.account_sid != '' and self.auth_token != '':\n client = Client(self.account_sid, self.auth_token)\n\n try:\n sms = client.messages.create(\n body=\"\"\"Last will: It was at least 30 days since your last check in. \n This is a reminder to check in in the next 24 hours.\"\"\",\n from_=self.twilio_phone,\n to=self.your_phone)\n sms\n print(\"\\nSMS sent\")\n except Exception as e:\n print(f\"An error occurred while trying to send the SMS. Error: {e}\")\n\n else:\n print(\"\\nMissing SMS parameters. SMS not sent\")\n\n # Check whether all the necessary parameters for email are present\n if self.sender_name != '' and self.recipient_email != '' and self.email != '' and self.email_pwd != '':\n message = f\"\"\"It has been at least 30 days since you last checked in. \nYou need to check in in the next 24 hours.\\n\nOtherwise at {self.deadline} the email with the important info will be sent to the designated recipient.\\n\nIn order to reset simply go to the working directory and run python3 last_will.py\"\"\"\n\n # send_email will return 0 if everything went ok, otherwise it will return an error message\n status = send_email(self.sender_name, self.your_email,\n self.email, self.email_pwd,\n subject='Last will: Reminder to check in', unencrypted_message=message)\n\n if status != 0:\n print(status)\n exit(1)\n else:\n print(\"Email sent\\n\")\n\n print(f\"You have until {self.deadline} to check in. \"\n f\"In order to do that simply go to the working directory and run ./last_will.sh\\n\")\n else:\n print(\"Missing email parameters. Email not sent.\\n\")\n exit(1)",
"def send_mail():\n msg = MIMEMultipart()\n msg[\"From\"] = \"[email protected]\"\n msg[\"To\"] = SENT_TO\n msg[\"Subject\"] = \"The Hive Case Metrics\"\n msg.attach(MIMEText(\"Attached are the requested case metrics in .XLSX format.\"))\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(open(\"Hive Metrics.xlsx\", \"rb\").read())\n encoders.encode_base64(part)\n part.add_header(\"Content-Disposition\", 'attachment; filename=\"Hive Metrics.xlsx\"')\n msg.attach(part)\n smtp = smtplib.SMTP(SMTP_SERVER)\n smtp.starttls()\n smtp.sendmail(msg[\"From\"], msg[\"To\"].split(\",\"), msg.as_string())\n smtp.quit()",
"def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()",
"def test_send_notification_with_reports_filled(self, fake_requests_obj):\n # act like it's March 2012\n fake_date = datetime.datetime(year=2012, month=3, day=1)\n (fake_requests_obj.expects_call().returns(fake_date))\n\n management.call_command('send_second_report_notification', [], {})\n eq_(len(mail.outbox), 3)"
] | [
"0.6965906",
"0.65784955",
"0.6296216",
"0.62702554",
"0.6187918",
"0.6152014",
"0.61322725",
"0.61321557",
"0.6081931",
"0.6029177",
"0.59823436",
"0.5896652",
"0.58888614",
"0.58611333",
"0.5771516",
"0.5752663",
"0.5662646",
"0.56331736",
"0.56252635",
"0.5573855",
"0.55688256",
"0.55227566",
"0.5515175",
"0.54937905",
"0.5491273",
"0.54633933",
"0.54509914",
"0.5450612",
"0.54455256",
"0.5444627"
] | 0.77263916 | 0 |
This method is called when the viewer is initialized. Optionally implement this method, if you need to tinker with camera position and so forth. | def viewer_setup(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _initialize(self):\n if not self._is_initialized:\n self.connect(retries=Camera.CONNECTION_RETRIES)\n self.cam.resolution = (self.resolution['x'], self.resolution['y'])\n self.cam.start_preview()\n time.sleep(2)\n self._is_initialized = True",
"def initialize(self):\n self.Update()\n ViewportManager.updateAll()\n self.wxStep()\n ViewportManager.initializeAll()\n # Position the camera\n if base.trackball is not None:\n base.trackball.node().setPos(0, 30, 0)\n base.trackball.node().setHpr(0, 15, 0)\n\n # to make persp view as default\n self.perspViewMenuItem.Check()\n self.onViewChange(None, 3)\n\n # initializing direct\n if self.fStartDirect:\n base.startDirect(fWantTk = 0, fWantWx = 0)\n\n base.direct.disableMouseEvents()\n newMouseEvents = [\"_le_per_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.mouseEvents]\n base.direct.mouseEvents = newMouseEvents\n base.direct.enableMouseEvents()\n\n base.direct.disableKeyEvents()\n keyEvents = [\"_le_per_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.keyEvents]\n base.direct.keyEvents = keyEvents\n base.direct.enableKeyEvents()\n\n base.direct.disableModifierEvents()\n modifierEvents = [\"_le_per_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.modifierEvents]\n base.direct.modifierEvents = modifierEvents\n base.direct.enableModifierEvents()\n\n base.direct.cameraControl.lockRoll = True\n base.direct.setFScaleWidgetByCam(1)\n\n unpickables = [\n \"z-guide\",\n \"y-guide\",\n \"x-guide\",\n \"x-disc-geom\",\n \"x-ring-line\",\n \"x-post-line\",\n \"y-disc-geom\",\n \"y-ring-line\",\n \"y-post-line\",\n \"z-disc-geom\",\n \"z-ring-line\",\n \"z-post-line\",\n \"centerLines\",\n \"majorLines\",\n \"minorLines\",\n \"Sphere\",]\n\n for unpickable in unpickables:\n base.direct.addUnpickable(unpickable)\n\n base.direct.manipulationControl.optionalSkipFlags |= SKIP_UNPICKABLE\n base.direct.manipulationControl.fAllowMarquee = 1\n base.direct.manipulationControl.supportMultiView()\n base.direct.cameraControl.useMayaCamControls = 1\n base.direct.cameraControl.perspCollPlane = self.perspView.collPlane\n base.direct.cameraControl.perspCollPlane2 = self.perspView.collPlane2\n\n for widget in base.direct.manipulationControl.widgetList:\n widget.setBin('gui-popup', 0)\n widget.setDepthTest(0)\n\n # [gjeon] to intercept messages here\n base.direct.ignore('DIRECT-delete')\n base.direct.ignore('DIRECT-select')\n base.direct.ignore('DIRECT-preDeselectAll')\n base.direct.ignore('DIRECT-toggleWidgetVis')\n base.direct.fIgnoreDirectOnlyKeyMap = 1\n\n # [gjeon] do not use the old way of finding current DR\n base.direct.drList.tryToGetCurrentDr = False\n\n else:\n base.direct=None\n #base.closeWindow(base.win)\n base.win = base.winList[3]",
"def setup_camera(self) -> None:\n self.world.camera.update(\n cam_base_pos=(0, -3, 0),\n cam_dist=1.2*self.world.env_dim,\n cam_yaw=0,\n cam_pitch=-60\n )",
"def initViewer(self, viewer=None, open=False, loadModel=False):\n\n import meshcat\n\n self.viewer = meshcat.Visualizer() if viewer is None else viewer\n\n if open:\n self.viewer.open()\n\n if loadModel:\n self.loadViewerModel()",
"def setupCamera(self) :\n\t\tbase.disableMouse()\n\t\tbase.camera.setPos(self.avatarNP.getPos())\n\t\tbase.camera.setZ(self.avatarNP.getZ()+1.5)\n\t\tbase.camera.setHpr(self.avatarNP.getHpr()[0],0,0)\t\t\n\t\tself.fieldAngle = 46.8\t# similar to human eye;\n\t\t\t\t\t# change this to zoom in/out\n\t\tbase.camLens.setFov(self.fieldAngle)",
"def setup_camera(self) -> None:\n self.world.camera.update(\n cam_base_pos=(3., 0, 2),\n cam_dist=2.5,\n cam_yaw=90,\n cam_pitch=-50\n )",
"def setupCamera(self):\n\t\tself.eye = self.vr.newEye(\"test_cam\")\n\t\tself.eye.reposition(0.0, 1.0, 0.5, 0.0, 0.0, 0.0)\n\t\tself.eye.setFOV(self.config.camFOV)\n\t\n\t\tself.video.clear(\"black\")\n\t\tself.video.show(self.eye, 0, 0)",
"def initialCamera(self, cmd):\n\n pass",
"def initialize_camera(self, distance, yaw, pitch, x=0, y=0, z=0):\n lookat = [x, y, z]\n\n self._env.unwrapped._p.resetDebugVisualizerCamera(\n distance, yaw, pitch, lookat)",
"def onInit(self):\n pass",
"def on_show_view(self):\n self.setup()",
"def on_show_view(self):\n self.setup()",
"def on_show_view(self):\n self.setup()",
"def make_main(self):\n\t\tself.scene.camera = self.main_camera",
"def appInit(self):\n glMatrixMode( GL_PROJECTION )\n glLoadIdentity()\n glMatrixMode( GL_MODELVIEW )\n glLoadIdentity()\n\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glEnable( GL_DEPTH_TEST )\n glShadeModel( GL_SMOOTH )\n glEnable( GL_NORMALIZE )\n glEnable( GL_COLOR_MATERIAL )\n\n self.set_lighting()\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)\n self.make_simple_scenes()\n self.make_multi_object_scene()",
"def _set_init_pose(self):\n raise NotImplementedError()",
"def _set_init_pose(self):\n raise NotImplementedError()",
"def _set_init_pose(self):\n raise NotImplementedError()",
"def on_initialize(self) -> None:\n pass",
"def __init__(self, viewer: geoviewer.GeoGraphViewer) -> None:\n super().__init__()\n self.viewer = viewer\n\n # Setting log with handler, that allows access to log\n # via self.log_handler.show_logs()\n self.logger = logging.getLogger(type(self).__name__)\n self.logger.setLevel(self.viewer.logger.level)\n self.log_handler = self.viewer.log_handler\n self.logger.addHandler(self.log_handler)\n\n self.logger.info(\"BaseControlWidget initialised.\")",
"def initialize_default(self):\n self.initialize_navigation()\n self.initialize_viewport()",
"def initialize_visualization(self) -> None:\n pass",
"def setCamera(self, viewX=0, viewY=0):\n self.viewX = viewX\n self.viewY = viewY",
"def setup(self):\n self.fname = None\n self.remote = self.camera.get('remote', None)\n self.picture_directory = self.camera.get('directory', Bawt.DEFAULT_DIRECTORY)\n self.resolution = self.camera.get('resolution', Bawt.DEFAULT_RESOLUTION)\n LOG.info(\"Picture directory set to: %s\" % self.picture_directory)\n LOG.info(\"Resolution set to %s\" % self.resolution)\n self.timelapse = self.camera.get('timelapse', None)\n self._is_initialized = False",
"def init_view(self):\n self.view_map = self.ctx.clientmap",
"def initializeGL(self):\n self._graphicsInitialized = True\n if self._context:\n self._createSceneviewer()\n self.graphicsInitialized.emit()\n # initializeGL end",
"def initialize_scene(self):\n if Time.now() - self.initial_time > 0.45 and self.should_initialize:\n self.should_initialize = False\n self.background_particle_controller = BackgroundParticlesController()\n self.player_controller = PlayerController()\n self.obstacle_controller_wrapper = ObstacleControllerWrapper()\n self.items_controller = ItemsControllerWrapper()\n self.score_controller = ScoreController()",
"def _update_camera(self, render=False):\n self._renderer.set_camera(\n # needs fix, distance moves when focal point updates\n distance=self._renderer.plotter.camera.distance * 0.9,\n focalpoint=tuple(self._ras),\n reset_camera=False)",
"def on_init(self):\n self.controller = gameController.Controller()",
"def view_init(self, elev=None, azim=None):\n\n self.dist = 10\n\n if elev is None:\n self.elev = self.initial_elev\n else:\n self.elev = elev\n\n if azim is None:\n self.azim = self.initial_azim\n else:\n self.azim = azim"
] | [
"0.7294715",
"0.707441",
"0.7071216",
"0.7047371",
"0.7039038",
"0.697051",
"0.6797928",
"0.67072433",
"0.66976625",
"0.6676851",
"0.6588336",
"0.6588336",
"0.6588336",
"0.6553483",
"0.6510361",
"0.65016794",
"0.65016794",
"0.65016794",
"0.6449363",
"0.64364845",
"0.63936776",
"0.6388893",
"0.6388701",
"0.6370243",
"0.63702327",
"0.63482827",
"0.63360363",
"0.6306012",
"0.6305086",
"0.6303333"
] | 0.804509 | 0 |
Count number of paths by constructing Pascal's triangle, which is dynamic programming. | def npaths_dp(x,y):
## We'll fill in each position in the grid with the number of ways
## to get from the start to that position.
grid = [[None for j in range(y+1)] for i in range(x+1)]
## The grid will look something like this:
## 1-1-1-1- ...
## | | | |
## 1-2-3-4- ...
## | | | |
## 1-3-6-10-...
## ...which is just Pascal's Triangle.
## along the edges, there's only 1 path
for i in range(x+1):
grid[i][0] = 1
for j in range(y+1):
grid[0][j] = 1
## any position in the grid is the sum of the two positions
## to the left and up from the current one.
for i in range(1, x+1):
for j in range(1, y+1):
grid[i][j] = grid[i-1][j] + grid[i][j-1]
## print out the grid, just for laughs
# for r in grid:
# print(r)
## return the resulting count
return grid[x][y] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_triangle_count_08(self):\n body = {\"direction\": \"IN\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 0}\n else:\n assert 0",
"def findTriangles(p):\n triangleCount = 0\n for a in range(3, p//3 + 1):\n for b in range(a+1, p//2):\n c = p - (a+b)\n if (a**2 + b**2) == c**2:\n triangleCount += 1\n return triangleCount",
"def ss_triangle_count(graph: ScipyGraph) -> int:\n props = ScipyGraph.Type.compute_abstract_properties(graph, {\"edge_type\"})\n if props[\"edge_type\"] == \"map\":\n # Drop weights before performing triangle count\n m = graph.value.copy()\n m.data = np.ones_like(m.data)\n elif props[\"edge_type\"] == \"set\":\n m = graph.value\n L = ss.tril(m, k=-1).tocsr()\n U = ss.triu(m, k=1).tocsc()\n return int((L @ U.T).multiply(L).sum())",
"def test_triangle_count_07(self):\n body = {\"direction\": \"OUT\", \"degree\": 1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 0}\n else:\n assert 0",
"def dynamic_programming_path_counter(grid_size):\n G = [1] * grid_size\n for i in range(grid_size):\n for j in range(i):\n G[j] = G[j] + G[j-1]\n G[i] = 2 * G[i - 1]\n return G[grid_size - 1]",
"def test_triangle_count_06(self):\n body = {\"direction\": \"OUT\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0",
"def test_triangle_count_04(self):\n body = {\"direction\": \"OUT\"}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_out': 13, 'vertices_out': 7, 'triangles': 2}\n else:\n assert 0",
"def test_triangle_count_05(self):\n body = {\"direction\": \"IN\", \"degree\": -1}\n code, res = Algorithm().post_triangle_count(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'edges_in': 13, 'vertices_in': 9, 'triangles': 2}\n else:\n assert 0",
"def count_paths_staircase(length_staircase, possible_actions):\n path = [0] * length_staircase\n # First we add our possible_actions to our path count\n for i in possible_actions:\n path[i - 1] = 1\n # Compute number of path combinations to every step\n for i in range(length_staircase):\n for j in possible_actions:\n k = i + j\n if k >= length_staircase:\n continue\n path[k] += path[i]\n return path",
"def count_pathologies(graph):\n return Counter(_pathology_iterator(graph))",
"def numPaths(self):\n if self.numpaths > -1:\n return self.numpaths\n\n if self.jolt == 0:\n return 1\n\n paths = 0\n for parent in self.parents:\n paths += parent.numPaths()\n \n return paths",
"def pascal_triangle(n: int, r: int):\n if n==0\n return 1\n elif r ==0:\n return 1\n elif n==r:\n return 1\n else\n return pascal_triangle(n-1,r) + pascal_triangle(n-1, r-1)",
"def num_paths(A, r, c, target):\n\tif target == \"\":\n\t\treturn 1\n\tM = len(A) # Number of rows\n\tN = M and len(A[0]) # Number of columns, 0 if A is empty\n\tS = len(target)\n\n\tmemo = [[[None for i in range(S)] for j in range(N)] for k in range(M)] # REPLACE\n\tdef count(r, c, k):\n\t\t\"\"\"The number of paths through A starting at R, C that match\n\t\tTARGET[k:].\"\"\"\n\t\tif 0 <= r < M and 0 <= c < N:\n\t\t\tif A[r][c] == target[k]:\n\t\t\t\t\"*** YOUR CODE HERE ***\"\n\t\t\t\treturn memoized_count(r, c, k)\n\t\t\telse:\n\t\t\t\treturn 0\n\t\telse:\n\t\t\treturn 0\n\n\tdef memoized_count(r1, c1, k1):\n\t\t\"*** YOUR CODE HERE ***\"\n\t\tif k1 == S-1 and A[r1][c1] == target[k1]:\n\t\t\tmemo[r1][c1][k1] = 1\n\t\tif memo[r1][c1][k1] is not None:\n\t\t\treturn memo[r1][c1][k1]\n\t\telse:\n\t\t\tif A[r1][c1] == target[k1]:\n\t\t\t\tmemo[r1][c1][k1] = 0\n\t\t\t\tfor i in [-1, 0, 1]:\n\t\t\t\t\tfor j in [-1, 0, 1]:\n\t\t\t\t\t\tif 0 <= r1+i < M and 0 <= c1+j < N and not (i == 0 and j == 0):\n\t\t\t\t\t\t\tmemo[r1][c1][k1] += memoized_count(r1+i, c1+j, k1+1)\n\t\t\telse:\n\t\t\t\tmemo[r1][c1][k1] = 0\n\t\t\treturn memo[r1][c1][k1]\n\n\tr = count(r, c, 0)\n\treturn r",
"def solve(n: int) -> None:\n count_triangles = 3 * n * n\n for x in range(1, n+1):\n for y in range(1, x+1):\n xy_gcd = gcd(x, y)\n move_x, move_y = x // xy_gcd, y // xy_gcd\n i = 1\n while y + i * move_x <= n and x - i * move_y >= 0:\n count_triangles += 1 + int(x != y)\n i += 1\n i = 1\n while y - i * move_x >= 0 and x + i * move_y <= n:\n count_triangles += 1 + int(x != y)\n i += 1\n print(count_triangles)",
"def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1",
"def calculate_paths(shape: Tuple[int, int], point: Tuple[int, int]) -> int:\n\tn, m = map(int, input().split())\n\tf = [[0] * (m+1) for i in range(n+1)]\n\tf[1][1] = 1\n\tfor i in range(2, n+1):\n\t\tfor j in range(2, m + 1):\n\t\t\tf[i][j] = f[i-2][j-2] + f[i-2][j-1]\n\treturn n + m",
"def triadic_census(G):\n\t\n\t# this algorithm requires the nodes be integers from 0 to n, so we use the node's indexes in G.nodes()\n\t\n\t# initialze the count to zero\n\tcount = dict((n,0) for n in TRIAD_NAMES)\n\tfor vi,v in enumerate(G):\n\t\tfor u in set(itertools.chain(G.predecessors_iter(v),G.successors_iter(v))):\n\t\t\tui = G.nodes().index(u)\n\t\t\tif ui<=vi : continue\n\t\t\tneighbors = set(itertools.chain(G.successors_iter(v),G.successors_iter(u),G.predecessors_iter(u),G.predecessors_iter(v)))\n\t\t\tneighbors.remove(u)\n\t\t\tneighbors.remove(v)\n\t\t\t\n\t\t\t# calculate dyadic triads instead of counting them\n\t\t\tif G.has_edge(u,v) and G.has_edge(v,u):\n\t\t\t\tcount[\"102\"] += len(G) - len(neighbors) - 2 \n\t\t\telse:\n\t\t\t\tcount[\"012\"] += len(G) - len(neighbors) - 2\t\n\t\t\t\n\t\t\t# count connected triads\n\t\t\tfor w in neighbors:\n\t\t\t\twi = G.nodes().index(w)\n\t\t\t\tif ui<wi or(vi<wi and wi<ui and not v in G.predecessors(w) and not v in G.successors(w)):\n\t\t\t\t\tcode = _tricode(G,v,u,w)\n\t\t\t\t\tcount[TRICODE_TO_NAME[code]] +=1\n\t\n\t# null triads = total number of possible triads - all found triads\t\t\n\tn = len(G)\n\tcount[\"003\"] = ((n * (n-1) * (n-2)) / 6) - sum(count.values())\n\treturn count",
"def pascal_triangle(n):\n if n > 0:\n triangle_rows = [[1]]\n for i in range(1, n):\n middle_numbers_list = []\n for pair in zip(triangle_rows[-1], triangle_rows[-1][1:]):\n middle_numbers_list.append(sum(pair))\n triangle_rows.append([1] + middle_numbers_list + [1])\n return print(triangle_rows)\n else:\n return print([])",
"def triangle(self, freq: int, /) -> None:",
"def triangle(n):\n return n*(n+1)/2",
"def path_cost(path):\n return len(path)",
"def triangleNumber(n):\n return sum(range(n+1))",
"def count_ways(n):\n if n < 0:\n return 0\n elif n == 0:\n return 1\n else:\n total = 0\n for i in range(1, min(n, 3) + 1):\n total += count_ways(n - i)\n return total",
"def pascal_triangle(n):\n\n if n <= 0:\n return []\n\n l = [[0 for x in range(i + 1)] for i in range(n)]\n l[0] = [1]\n\n for i in range(1, n):\n l[i][0] = 1\n for j in range(1, i + 1):\n if j < len(l[i - 1]):\n l[i][j] = l[i - 1][j - 1] + l[i - 1][j]\n else:\n l[i][j] = l[i - 1][0]\n return l",
"def triangle(n: int) -> int:\n return int(n * (n + 1) / 2)",
"def pascal_triangle(n):\n triangle = []\n begin = 1\n for y in range(0, n):\n row = []\n for x in range(0, y + 1):\n if y == 0 or x == 0 or (y > 0 and x == y):\n row.append(begin)\n else:\n row.append(triangle[y - 1][x] + triangle[y - 1][x - 1])\n triangle.append(row)\n return triangle",
"def pascal_triangle(n):\n ans = []\n if n <= 0:\n return ans\n ans.append([1])\n if n == 1:\n return ans\n ans.append([1, 1])\n if n == 2:\n return ans\n for i in range(2, n):\n newlist = []\n newlist.append(1)\n fill = [ans[i-1][k] + ans[i-1][k+1] for k in range(len(ans) - 1)]\n newlist += fill\n newlist.append(1)\n ans.append(newlist)\n return ans",
"def triangle(n):\n\n accumulator = 0\n\n for i in range(1,n+1):\n accumulator += i\n\n return accumulator",
"def Hashtables__Triplets():\n # URL: https://www.hackerrank.com/challenges/count-triplets-1/problem\n ## Passes all tests\n # O(n) ish.\n # dae9ccff5aea4a8ca6e087a7c16bd70d Notability notes\n from collections import defaultdict\n from dataclasses import dataclass\n\n @dataclass\n class I:\n idx: int\n cnt: int\n\n\n def countTriplets(arr, r):\n d = defaultdict(list)\n prev_count = defaultdict(int) #\n triple_count = 0\n for i, v in enumerate(arr):\n prev = v / r # (!) Integer division can be wrong. 17 // 3 -> 5. This builds incorrect previous (5, 17)\n prev_prev = (prev / r, prev)\n\n if prev_prev in d:\n # cnt = sum([i.cnt for i in d[prev_prev]]) # Counting the whole chain can be O(n) ish. Tests 6,11 fail.\n cnt = prev_count[(prev / r, prev, \"sum\")] # Optimization, keep rolling sum. -> O(1)\n triple_count += cnt\n if prev in d:\n prev_c = len(d[prev]) # O(1)\n d[(prev, v)].append(I(i, prev_c))\n prev_count[(prev, v, \"sum\")] += prev_c # Keep rolling su.\n d[v].append(i)\n\n return triple_count\n\n _, r = [int(i) for i in input().split()]\n arr = [float(i) for i in input().split()]\n print(countTriplets(arr, r))\n\n #### wip entries\n # T (Submission 6) -> (integer devision issue.\n # 100000 3\n # 1 17 80 68 5 5 58 17 38 81 26 44 38 6 12 ...\n # expr: 2325652489\n # Act : 667065187 << wrong, under count.\n # ac2 : 19107507001 << wrong, over count. (integer devision issue.\n # ac3: 2325652489",
"def triangle(n):\n return (n * (n + 1)) / 2"
] | [
"0.6798037",
"0.670736",
"0.66936386",
"0.6689137",
"0.6681198",
"0.6662002",
"0.6618841",
"0.65798163",
"0.64765126",
"0.6342883",
"0.630705",
"0.6297395",
"0.6195524",
"0.61252505",
"0.6113715",
"0.6110095",
"0.60757464",
"0.60736287",
"0.60501665",
"0.6022417",
"0.6019725",
"0.6017441",
"0.601049",
"0.60096693",
"0.5962062",
"0.5941856",
"0.5939903",
"0.5916239",
"0.588503",
"0.5880059"
] | 0.70024705 | 0 |
return the product of a sequence of factors | def prod(factors):
return reduce(operator.mul, factors, 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def product(factors):\n product = 1\n for i in factors:\n product *= i\n return product",
"def rf_prod(prime_factors: [int, ]):\n return 1 if not prime_factors else reduce(mul, prime_factors, 1)",
"def mul_factor(factors: List[Tuple[int, int]]) -> int:\n n = 1\n for f in factors:\n n *= f[0] ** f[1]\n return n",
"def _prod(seq):\n return reduce(lambda x, y: x*y, seq, 1)",
"def inline_product(factors, seed):\n for r in factors:\n seed *= r\n return seed",
"def prod(seq):\n p = 1\n for a in seq:\n p *= a\n return p",
"def product(it):\n prod = 1\n for x in it:\n prod *= x\n return prod",
"def _compute_factors(roots, multiplicity, include_powers=False):\n current = cupy.array([1])\n suffixes = [current]\n for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]):\n monomial = cupy.r_[1, -pole]\n for _ in range(int(mult)):\n current = cupy.polymul(current, monomial)\n suffixes.append(current)\n suffixes = suffixes[::-1]\n\n factors = []\n current = cupy.array([1])\n for pole, mult, suffix in zip(roots, multiplicity, suffixes):\n monomial = cupy.r_[1, -pole]\n block = []\n for i in range(int(mult)):\n if i == 0 or include_powers:\n block.append(cupy.polymul(current, suffix))\n current = cupy.polymul(current, monomial)\n factors.extend(reversed(block))\n\n return factors, current",
"def product(*nums):\n\treturn reduce((lambda x, y: x * y), nums)",
"def product(numbers):\n p = 1\n for x in numbers:\n p *= x\n return p",
"def product(numbers):\n p = 1\n for x in numbers:\n p *= x\n return p",
"def get_factors():",
"def __mul__(self, factor):\n if type(factor) == int or type(factor) == float:\n return Vector([c * factor for c in self.components])\n else:\n raise NotImplementedError\n raise Exception(\"Type \" + str(type(factor)) + \" is not valid. Expected float or int types.\")",
"def primefactors_with_multiplicity(n):\n factors = []\n primes = prime_sieve(n)\n\n for p in primes:\n while n % p == 0:\n factors.append(p)\n n /= p\n if n == 1:\n return(factors)\n return([n])",
"def product( iterable ):\n p= 1\n for n in iterable:\n p *= n\n return p",
"def mult_numbers(numbers):\n product = 1\n for number in numbers:\n product = product * number\n\n return product",
"def prod(arg):\n ret = 1\n for i in range(0, len(arg)):\n ret = ret * arg[i]\n return ret",
"def multiply(numbers):\n prod = 1\n for i in numbers:\n prod = prod*i\n return prod",
"def prod(n):\n product = S.One\n for i in n:\n product = product * i\n return product",
"def product1(a, b, c) :\n return a * b * c",
"def _prod(s):\n return reduce(lambda x, y: x * y, s, 1)",
"def prod(iterable):\n return reduce(operator.mul, iterable, 1)",
"def prod(iterable):\n return reduce(operator.mul, iterable, 1)",
"def prod(iterable):\n return reduce(operator.mul, iterable, 1)",
"def factors(self, X):\r\n return (lambda fd: [X] if not fd else fd + self.factors(X // fd[0])) (self.firstdiv(X))",
"def prod(l):\n return reduce(lambda a, b: a*b, l)",
"def _make_product(terms):\n if terms:\n product = terms[0]\n for term in terms[1:]:\n product = Mul((product, term))\n return product \n else:\n return Const(1)",
"def multiplied(*values):\n values = [_normalize(v) for v in values]\n def _product(it):\n p = 1\n for n in it:\n p *= n\n return p\n for v in zip(*values):\n yield _product(v)",
"def product(iterable):\n prod = 1\n for i in iterable:\n prod *= i\n return prod",
"def prod(iterable):\n \n return reduce(operator.mul, iterable, 1)"
] | [
"0.82882315",
"0.7311317",
"0.72265387",
"0.71801776",
"0.7172264",
"0.6903313",
"0.6506647",
"0.6431132",
"0.636319",
"0.63479465",
"0.63479465",
"0.6306992",
"0.63020563",
"0.62482494",
"0.6170946",
"0.6158294",
"0.6128926",
"0.61053824",
"0.60901135",
"0.6059942",
"0.6057559",
"0.6054435",
"0.6054435",
"0.6054435",
"0.60538465",
"0.6052791",
"0.6042126",
"0.604172",
"0.6028663",
"0.60230684"
] | 0.80307823 | 1 |
Yields (_id, text_body) for all docs with a concatenated text body field. | def fetch_doc_text_body(self, document_level, find_query_mixin={}):
find_query = {'subreddit': self.subreddit, 'postwise.text':{'$exists':True}}
find_query.update(find_query_mixin)
if document_level != 'postwise':
raise NotImplementedError('document_level:%s' % document_level)
print 'found %i matching the query for text body docs' % self.posts_read.find(find_query).count()
for doc in self.posts_read.find(find_query):
yield doc['_id'], doc[document_level]['text'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iter_texts():\n dirs = 'comm_use_subset noncomm_use_subset pmc_custom_license biorxiv_medrxiv'.split()\n for dir in dirs:\n fnames = (DATA_PATH / dir / dir).glob('*')\n for fname in fnames:\n with fname.open() as f:\n content = json.load(f)\n \n for key in 'abstract body_text'.split():\n for row in content[key]:\n yield row['text']",
"def text_to_docs(text_id):\n\n row = Text.get(Text.id==text_id)\n\n\n doc_ids = set()\n for tokens in row.queries:\n\n # Execute the query.\n results = config.es.search(\n\n index='document',\n request_timeout=90,\n\n body={\n 'fields': [],\n 'size': 1000000,\n 'filter': {\n 'query': {\n 'match_phrase': {\n 'body': {\n 'query': ' '.join(tokens),\n 'slop': 5,\n }\n }\n }\n }\n }\n\n )\n\n # Fail the job if the result is incomplete.\n if results['timed_out']:\n raise TimeoutError()\n\n # Register the doc ids.\n if results['hits']['total'] > 0:\n for hit in results['hits']['hits']:\n doc_ids.add(int(hit['_id']))\n\n\n # Build doc -> text links.\n citations = []\n for doc_id in doc_ids:\n\n citations.append({\n 'document': doc_id,\n 'text': row.id,\n 'tokens': row.hash_tokens,\n })\n\n # Bulk-insert the results.\n if citations:\n Citation.insert_many(citations).execute()",
"def __iter__(self):\r\n for text in self.get_texts():\r\n yield self.dictionary.doc2bow(text, allow_update=False)",
"def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]",
"def query_texts():\n alloweds = {'author', 'is_prose', 'language', 'title'}\n filters = {}\n for allowed in alloweds:\n grabbed = flask.request.args.get(allowed, None)\n if grabbed:\n filters[allowed] = grabbed\n before_val = flask.request.args.get('before', None)\n after_val = flask.request.args.get('after', None)\n try:\n if before_val is not None:\n before_val = int(before_val)\n if after_val is not None:\n after_val = int(after_val)\n except ValueError:\n return tv5api.errors.error(\n 400,\n message='If used, \"before\" and \"after\" must have integer values.')\n\n if before_val is not None and after_val is not None:\n results = flask.g.db.find(\n tesserae.db.entities.Text.collection,\n year_not=(before_val, after_val),\n **filters)\n elif before_val is not None and after_val is None:\n results = flask.g.db.find(\n tesserae.db.entities.Text.collection,\n # Assuming that lower limit pre-dates all texts in database\n year=(-999999999999, before_val),\n **filters)\n elif not before_val is None and after_val is not None:\n results = flask.g.db.find(\n tesserae.db.entities.Text.collection,\n # Assuming that upper limit post-dates all texts in database\n year=(after_val, 999999999999),\n **filters)\n else:\n results = flask.g.db.find(\n tesserae.db.entities.Text.collection,\n **filters)\n return flask.jsonify(texts=[fix_id(r.json_encode()) for r in results])",
"def pipe(self, texts):\n for text in texts:\n yield self(text)",
"def processed_bulk(self, pipeline):\n docs = [Document([], text=t) for t in EN_DOCS]\n return pipeline(docs)",
"def text_contents_from_document_body(\n content: str, granularity=\"document\"\n) -> List[TextContent]:\n\n return text_content_split_functions[granularity](content)",
"def get_complete_text(doc_list):\n complete_text = \"\"\n for doc in doc_list:\n text = read_file( doc, mode='rb', ignore_comments= False).decode('utf-8')\n complete_text = complete_text + text\n return complete_text",
"def allText(node):\n return \"\".join(allTextGenerator(node))",
"def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list",
"def merge_docs(self):",
"def Hydrate(df, txtPath, excludes=True):\n\n def HydrateText(docId, startOffset, endOffset, properties, txtPath, excludes):\n \n # Read in the text for the document (want to think about ways for improving performance)\n docText = ''\n text = ''\n \n # Check if file already has been read (written to tmp space)\n if os.path.exists('/tmp/' + docId):\n with io.open('/tmp/' + docId,'r',encoding='utf-8') as f:\n docText = f.read()\n else:\n try:\n with io.open(txtPath + docId,'r',encoding='utf-8') as f:\n docText = f.read()\n with io.open('/tmp/' + docId,'w',encoding='utf-8') as f:\n f.write(docText)\n except Exception as ex:\n print(ex)\n docText=\"\"\n \n # Return properties if docText was empty or 'text' is already defined in the properties\n if (docText == '') or ((properties != None) and ('text' in properties)):\n return properties\n else:\n if (excludes) and (properties != None) and ('excludes' in properties) and (len(properties['excludes']) > 0):\n excludes = []\n exToks = []\n for excludesEntry in properties['excludes'].split(\"|\"):\n toks = excludesEntry.split(\",\") \n excludes.append((int(toks[0]),toks[1],toks[2],int(toks[3]),int(toks[4])))\n excludes = list(set(excludes))\n for exclude in excludes:\n exToks.append((exclude[3],exclude[4]))\n exToks = list(set(exToks))\n exToks.sort(key=lambda tup: (tup[0], tup[1]))\n curOffset = startOffset\n for exTok in exToks:\n if exTok[0] <= curOffset:\n curOffset = exTok[1]\n else:\n text = text + docText[curOffset:exTok[0]]\n curOffset = exTok[1]\n if curOffset < endOffset:\n text = text + docText[curOffset:endOffset]\n \n else:\n text = docText[startOffset:endOffset]\n \n if properties != None:\n properties['text'] = text\n else:\n properties = {}\n properties['text'] = text\n return properties\n \n HydrateTextUDF = udf(HydrateText,MapType(StringType(),StringType()))\n\n hydratedf = df.sortWithinPartitions('docId') \\\n .withColumn('properties', HydrateTextUDF(col('docId'),col('startOffset'),col('endOffset'),col('properties'),lit(txtPath),lit(excludes)))\n return hydratedf",
"def get_text(api, obj_type):\n\n if obj_type == \"comments\":\n search_function = api.search_comments\n filters = [\"author\", \"body\"]\n check_attr = \"body\"\n else: # type == \"submissions\"\n search_function = api.search_submissions\n filters = [\"author\", \"title\", \"selftext\"]\n check_attr = \"selftext\"\n\n text = list(\n search_function(\n after=YESTERDAY, before=TODAY, subreddit=\"wallstreetbets\", filter=filters\n )\n )\n\n # prevent errors caused by removed/deleted posts\n text[:] = [\n x for x in text if getattr(x, check_attr) not in (\"[removed]\", \"[deleted]\")\n ]\n\n return text",
"def processed_doc(self, pipeline):\n return [pipeline(text) for text in EN_DOCS]",
"def process(self, text: str = None, text_key: Any = None) -> Dict:\n if not text and not text_key:\n raise TypeError(\" user must provide text or tuple_text_key \")\n\n self.set_parameters(text, text_key)\n if not self.__pipe_mode:\n word_processed = self.single_document_processing()\n else:\n word_processed = self.multiple_document_processing()\n return word_processed",
"def doc(self):\n return {'_id': self._id,\n 'text': self.text}",
"def add_texts(\n self,\n texts: Iterable[str],\n metadatas: Optional[List[dict]] = None,\n refresh_indices: bool = True,\n **kwargs: Any,\n ) -> List[str]:\n try:\n from elasticsearch.exceptions import NotFoundError\n from elasticsearch.helpers import bulk\n except ImportError:\n raise ValueError(\n \"Could not import elasticsearch python package. \"\n \"Please install it with `pip install elasticsearch`.\"\n )\n requests = []\n ids = []\n embeddings = self.embedding.embed_documents(list(texts))\n dim = len(embeddings[0])\n mapping = _default_text_mapping(dim)\n\n # check to see if the index already exists\n try:\n self.client.indices.get(index=self.index_name)\n except NotFoundError:\n # TODO would be nice to create index before embedding,\n # just to save expensive steps for last\n self.client.indices.create(index=self.index_name, mappings=mapping)\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n _id = str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": self.index_name,\n \"vector\": embeddings[i],\n \"text\": text,\n \"metadata\": metadata,\n \"_id\": _id,\n }\n ids.append(_id)\n requests.append(request)\n bulk(self.client, requests)\n\n if refresh_indices:\n self.client.indices.refresh(index=self.index_name)\n return ids",
"def load_texts(self):\n\n db = mongo(self.text_dbname)\n\n print(\"Loading texts:\")\n # For each other, query the text database and add their works to texts to compare\n for author in self.authors:\n for text in db.texts.find({'author':author, 'language':\"latin\"}, no_cursor_timeout=True):\n self.texts_to_compare.append(text)\n print(\" -- loaded texts for\", author)\n\n return",
"def textfrombodies(self) -> str:\n type_priority = [\"plain\", \"html\", \"other\"] # TODO: Make configurable\n\n for texttype in type_priority:\n if texttype == \"plain\" and texttype in self.textbodies:\n \"\"\"Text is plain, so it can be used verbatim\"\"\"\n return self.textbodies[texttype]\n if texttype == \"html\" and texttype in self.textbodies:\n \"\"\"HTML text. Convert to markup with html2text and remove extra spaces\"\"\"\n text = html2text.html2text(self.textbodies[texttype])\n # Remove every second newline which is added to distinguish between paragraphs in Markdown, but makes\n # the jira ticket hard to read.\n return re.sub(\"(\\n.*?)\\n\", \"\\g<1>\", text)\n if texttype == \"other\" and len(self.textbodies):\n # If no other text is found, return the first available body if any.\n return self.textbodies[list(self.textbodies.keys())[0]]\n return \"The email contained no text bodies.\"",
"def body_words(self):\n\n if self._body_words == []:\n for s in self.body():\n for w in s.split():\n self._body_words.append(w)\n\n return self._body_words",
"def allTextGenerator(node):\n if node.nodeType == node.TEXT_NODE:\n yield node.data\n for child in node.childNodes:\n for text in allTextGenerator(child):\n yield text",
"def linked_text_paragraphs(self):\n for par in self._main_paragraphs_raw():\n par_links = par.find_all('a')\n if len(par_links) == 0:\n self.main_count += len(par.text)\n yield par.text\n else:\n for el in par.contents:\n if el.name is None:\n #this is plain text\n self.main_count += len(str(el))\n yield str(el)\n elif el.name == \"a\" and \"href\" in el.attrs:\n id = el[\"href\"].lstrip('#')\n try:\n foot_par = self._get_footnote_par(id)\n except NoFootnoteError:\n self.log(f\"Could not find footnote for {id}, skipping.\")\n self.footnote_count += len(foot_par.text)\n yield foot_par.text",
"def get_snippets(self, constraints):\n for ind, text in enumerate(self.__mongo_db.get({\"title\": \"Chapter 1\"})):\n self.__postgre_db.insert(\"texts\", {\"title\": text['title']})\n self.find_text_window(text['text'], text['id'], constraints)\n print(\"Chapter \" + str(text['id']) + \" done.\")",
"def text(self):\n return self.query.query",
"def iter_documents(self):\n raise NotImplementedError",
"def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p",
"def paragraph(self, text):\n return [text]",
"def iter_from(self, fieldname, text):\n\t\tfields = 'fieldname', 'text', 'docfreq', 'indexfreq'\n\t\tcur = self.index.collection.find(fields=fields).sort('fieldname')\n\t\treturn (tuple(rec[field] for field in fields) for rec in cur\n\t\t\tif rec['fieldname'] >= fieldname)",
"def __iter__(self):\n for document in self.query:\n yield self._to_document(document)"
] | [
"0.6241602",
"0.5947075",
"0.584004",
"0.55060875",
"0.5495156",
"0.5454279",
"0.5435753",
"0.5326532",
"0.5322977",
"0.53185534",
"0.52696717",
"0.5266342",
"0.52630454",
"0.52428854",
"0.52328455",
"0.522977",
"0.52256966",
"0.5223575",
"0.5191079",
"0.5173535",
"0.5165849",
"0.51622593",
"0.51539785",
"0.5110467",
"0.51045716",
"0.5085792",
"0.506921",
"0.5048615",
"0.50437856",
"0.50384974"
] | 0.6546281 | 0 |
Merges 2 or more topics into a single new topic. Just reassigns all the docs in those topics to new topic. Eg merging the topics ["1","2","3"] will go into a new topic named "(1+2+3)". You don't need to classify anything! | def merge_topics(self, topic_ids):
new_topic_id = '(' + '+'.join(topic_ids) + ')'
arbitrary_prob = 1
result = self.posts_write.update(
{'subreddit':self.subreddit, 'postwise.topic_assignment.topic':{'$in':topic_ids}},
{'$set':{'postwise.topic_assignment.topic':new_topic_id,
'postwise.topic_assignment.prob':arbitrary_prob},
}, multi=True)
print 'merged %i documents into "%s"' % (result['nModified'], new_topic_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge(ss: List[Stream[Any]], topics: List[Any] = None) -> Stream[Any]:\n\n def g(deps, this, src, value):\n if topics is not None:\n return (topics[ss.index(src)], value)\n return value\n\n return combine(g, ss)",
"def add_subscription_topics(self, topics: List[str]) -> None:\n self.log.debug(f\"Adding {topics} to {self.topics}\")\n self.topics.extend(topics)",
"def add_topic ( topics , stream = -1 ) :\n return Ostap.Utils.AddTopic ( topics , level , stream )",
"def test_topic_reduction(reduced_topics):\n base_bertopic = BERTopic(bert_model='distilbert-base-nli-mean-tokens', verbose=False)\n nr_topics = reduced_topics + 2\n base_bertopic.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents.copy(), topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents.copy(), c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(base_bertopic.mapped_topics, dict)\n assert not set(base_bertopic.get_topics_freq().Topic).difference(set(new_documents.Topic))\n assert base_bertopic.mapped_topics",
"def add_topics(self, project: str, *topics: str):\n assert self.exists(project), f'Project {project} inesistente'\n\n return self.collection.find_one_and_update(\n {'url': project},\n {\n '$addToSet': {\n 'topics': {\n '$each': topics,\n }\n }\n }\n )",
"def test_topic_reduction(reduced_topics):\n model = BERTopic()\n nr_topics = reduced_topics + 2\n model.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents.copy())\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents.copy())\n new_freq = model.get_topic_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(model.mapped_topics, dict)\n assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))\n assert model.mapped_topics",
"def save_doc_topics(self, topic_modeler, find_query_mixin={}, topic_id_namer=str):\n nmf = topic_modeler.nmf\n vectorizer = topic_modeler.vectorizer\n # only update docs that are the current subreddit,\n # and have tokens (via process_text.py)\n find_query = {'subreddit': self.subreddit, 'postwise.tokens':{'$exists':True}}\n find_query.update(find_query_mixin)\n\n doc_count = 0\n for doc in self.posts_read.find(find_query):\n text_body = doc['postwise']['text']\n\n # Make a dict topic_id:topic_prob, where topic_id is determined by topic_id_namer function\n topic_distros = nmf.transform(vectorizer.transform([text_body]))[0]\n topic_dict = {topic_id_namer(topic_id):prob for topic_id, prob in enumerate(topic_distros)}\n\n # assign each doc to its most probable topic. In case of a tie, chose a random one.\n # buffer_val allows small variation between two topics to count as a tie,\n # eg 0.0323423 is close enough to 0.0323487\n #\n # individal prob values will shrink as number of topics grow,\n # so the buffer should also shrink as number of topics grow.\n buffer_val = 0.001/(len(topic_distros))\n strongest_topics = [topic_id for topic_id, prob in topic_dict.items()\n if (max(topic_distros) - prob < buffer_val)]\n\n topic_assignment = random.choice(strongest_topics)\n\n # DEBUG: introspect the docs with multiple topics\n # if len(strongest_topics) != 1:\n # print 'DEBUG: Got ambigious topic, choosing randomly.'\n # print buffer_val\n # print topic_distros\n # print strongest_topics\n # print topic_assignment\n\n # self.posts_write.update({'_id':doc['_id']},{'$set':{'postwise.topic_distro':topic_dict, 'postwise.topic_assignment':{'topic':topic_assignment,'prob':topic_dict[topic_assignment]}}}, upsert=True)\n #\n # ^ actually, don't persist the topic_distro. Just the assignment.\n self.posts_write.update({'_id':doc['_id']},{'$set':{\n 'postwise.topic_assignment':{\n 'topic':topic_assignment,'prob':topic_dict[topic_assignment]}}}, upsert=True)\n\n doc_count += 1\n print 'Saved topic distros for %i documents' % doc_count",
"def msg_topic ( *topics ) :\n topic = 0\n for i in topics : \n if isinstance ( i , integer_types ) : topic |= i\n elif isinstance ( i , string_types ) :\n ii = i.lower() \n if ii == 'generation' : topic |= ROOT.RooFit.Generation \n elif ii == 'minimization' : topic |= ROOT.RooFit.Minimization\n elif ii == 'minization' : topic |= ROOT.RooFit.Minimization\n elif ii == 'plotting' : topic |= ROOT.RooFit.Plotting\n elif ii == 'fitting' : topic |= ROOT.RooFit.Fitting \n elif ii == 'integration' : topic |= ROOT.RooFit.Integration \n elif ii == 'linkstatemgmt' : topic |= ROOT.RooFit.LinkStateMgmt\n elif ii == 'eval' : topic |= ROOT.RooFit.Eval\n elif ii == 'caching' : topic |= ROOT.RooFit.Caching\n elif ii == 'optimization' : topic |= ROOT.RooFit.Optimization\n elif ii == 'optimisation' : topic |= ROOT.RooFit.Optimization\n elif ii == 'objecthandling' : topic |= ROOT.RooFit.ObjectHandling\n elif ii == 'inputarguments' : topic |= ROOT.RooFit.InputArguments\n elif ii == 'tracing' : topic |= ROOT.RooFit.Tracing\n elif ii == 'contents' : topic |= ROOT.RooFit.Contents\n elif ii == 'datahandling' : topic |= ROOT.RooFit.DataHandling\n elif ii == 'numintegration' : topic |= ROOT.RooFit.NumIntegration\n elif ii == 'numericintegration' : topic |= ROOT.RooFit.NumIntegration\n elif ii == 'numericalintegration' : topic |= ROOT.RooFit.NumIntegration\n elif ii == 'fastevaluations' : topic |= ROOT.RooFit.FastEvaluations\n else : logger.error ( 'MsgTopic/1: unknown topic %s, skip' % i )\n else : logger.error ( 'MsgTopic/2: unknown topic %s/%s, skip' % ( i , type ( i ) ) )\n \n return topic",
"def test_topic_merge_other_forum(topic_normal):\n forum_other = Forum(title=\"Test Forum 2\", category_id=1)\n forum_other.save()\n\n topic_other = Topic(title=\"Test Topic 2\")\n post_other = Post(content=\"Test Content 2\")\n topic_other.save(user=topic_normal.user, forum=forum_other, post=post_other)\n\n assert not topic_normal.merge(topic_other)",
"def test_topic_reduction_edge_cases():\n model = BERTopic()\n nr_topics = 5\n model.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents)\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents)\n new_freq = model.get_topic_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)",
"def process_topics(self):\n self._init_lda()\n f = open(self.OUTPUT_PATH, \"w\")\n for link in self.electrical_links:\n try:\n self.logger.info(\"processing: {0}\".format(link))\n page = wikipedia.page(link)\n title = gensim.parsing.preprocess_string(page.title)\n content = gensim.parsing.preprocess_string(page.content)\n\n title_bow = self.dictionary.doc2bow(title)\n content_bow = self.dictionary.doc2bow(content)\n\n new_bag_of_words = title_bow + content_bow\n self.lda.update([content_bow])\n topics = self.get_sorted_topics(new_bag_of_words)\n f.write(\"{0}:: {1}\\n\".format(link, topics))\n except UnicodeError:\n self.logger.info(\"PROCESSING FAILED!\")\n continue\n\n f.close()\n self.lda.save(self.MODEL_PATH)\n return True",
"def model(self, doc_list=None):\r\n\r\n # eta => prior for the per-topic word distribution\r\n eta = torch.ones(self.V)\r\n\r\n with pyro.plate(\"topics\", self.K):\r\n\r\n # Beta => per topic word distribution\r\n Beta = pyro.sample(f\"beta\", dist.Dirichlet(eta))\r\n\r\n # alpha => prior for the per-doc topic vector\r\n alpha = torch.ones(self.K) / self.K\r\n\r\n X_List, Theta = [], []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # theta => per-doc topic vector\r\n theta = pyro.sample(f\"theta_{d}\", dist.Dirichlet(alpha))\r\n\r\n doc = None if doc_list is None else doc_list[d]\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]):\r\n\r\n # assign a topic\r\n z_assignment = pyro.sample(\r\n f\"z_assignment_{d}\",\r\n dist.Categorical(theta)\r\n )\r\n\r\n # from that topic vec, select a word\r\n X = pyro.sample(\r\n f\"w_{d}\",\r\n dist.Categorical(Beta[z_assignment]),\r\n obs=doc\r\n )\r\n\r\n X_List.append(X)\r\n Theta.append(theta)\r\n\r\n Theta = torch.stack(Theta)\r\n\r\n return X_List, Beta, Theta",
"def topics(ctx):\n pass",
"def update_topics(mongo_collection, name, topics):\n query_name = {'name': name}\n new_topics = {'$set': {'topics': topics}}\n if mongo_collection:\n return mongo_collection.update_many(query_name, new_topics)",
"def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics",
"def assign_topics_to_sentences(self):\n \n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # Read the data - id and reponse column\n dt = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n\n\n \n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n \n # Split into sentences\n dt['sentences'] = self.getSentences(dt[self.col_name])\n \n \n \n\n \n \n\n # Store number of sentences in each response as a column\n dt['num_sent'] = dt['sentences'].apply(lambda x: len(x))\n\n # Split each row into multiple rows - one row for each sentence\n dt = (dt\n .set_index([self.id_col_name, self.col_name, 'num_sent'])['sentences']\n .apply(pd.Series)\n .stack()\n .reset_index()\n .drop('level_3', axis = 1)\n .rename(columns = {0:'sentences'}))\n\n\n # Clean the sentences\n dt['sentences_cleaned'] = self.cln.clean(dt['sentences'], typo = self.typo_ind)\n\n # Remove useless sentences\n dt['sentences_cleaned'] = self.getValid(dt['sentences_cleaned'])\n\n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n\n # Tokenize words in the cleaned sentences\n responses = list(self.sent_to_words(dt['sentences_cleaned'].values.tolist()))\n\n\n # Call the lexicon function\n topic_lexicons = self.prepare_lexicons()\n\n # Lists to store results\n count_topic_all = []\n actual_topic_all = []\n\n # Tag each response into a topic\n for response in responses:\n\n count_topic = []\n actual_topic = []\n\n for topic in topic_lexicons:\n\n # Count occurance of each word in word stock in the response\n temp = sum(dict((x, response.count(x)) for x in topic).values())\n count_topic.append(temp)\n\n\n for index, value in enumerate(count_topic):\n\n # Consider the topic if atleast one(?) word from its word-stock occurs in the response\n if value > 0:\n actual_topic.append(topic_dict[index])\n\n\n # If more than 3 topics are tagged for single sentence, refine by increasing\n # cutoff to at least 2 words instead of 1\n if len(actual_topic) > 3:\n\n actual_topic = []\n for index, value in enumerate(count_topic):\n\n if value > 1: # Increase cutoff\n actual_topic.append(topic_dict[index])\n\n count_topic_all.append(count_topic)\n actual_topic_all.append(actual_topic)\n\n\n dt['tags'] = actual_topic_all\n dt['num_tags'] = count_topic_all\n\n\n # Select only the most important columns\n dt_less = dt[[self.id_col_name, 'sentences', 'tags']]\n\n return dt, dt_less",
"def infertopics(self):\n\n # Iterate over nodes missing topic attribute (only occurs for new nodes)\n for uid in self.scan(attribute=\"updated\"):\n # Remove updated attribute\n self.removeattribute(uid, \"updated\")\n\n # Get list of neighboring nodes\n ids = self.edges(uid)\n\n # Infer topic\n topic = Counter(self.attribute(x, \"topic\") for x in ids).most_common(1)[0][0] if ids else None\n if topic:\n # Add id to topic list and set topic attribute\n self.topics[topic].append(uid)\n self.addattribute(uid, \"topic\", topic)\n\n # Set topic rank\n self.addattribute(uid, \"topicrank\", len(self.topics[topic]) - 1)\n\n # Infer category\n category = Counter(self.attribute(x, \"category\") for x in ids).most_common(1)[0][0]\n self.addattribute(uid, \"category\", category)",
"def full_summarizer_word_comparison(sentences, topic_sentences, number_topics):\n\n word_counts = []\n\n for sentence in sentences:\n document_1_words = sentence.split()\n document_2_words = ''.join(topic_sentences).split()\n\n common_words = set(document_1_words).intersection(set(document_2_words))\n word_counts.append(len(common_words))\n\n return [j for i, j in sorted(list(zip(word_counts, sentences)), reverse=True)][0:number_topics]",
"def test_wiki_topics(self):\n t1 = TopicFactory(slug='doesnotexist')\n t2 = TopicFactory(slug='extant')\n t3 = TopicFactory(slug='tagged')\n\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.topics.add(t2)\n RevisionFactory(document=doc, is_approved=True)\n\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.topics.add(t2)\n doc.topics.add(t3)\n RevisionFactory(document=doc, is_approved=True)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n ([t2.slug, t3.slug], 1),\n )\n\n qs = {'a': 1, 'w': 1, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])",
"def suppress_topics ( *topics ) :\n if topics and 1 == len( topics ) :\n t = str ( topics [ 0 ] ).lower()\n if 'config' == t : return suppress_topics() \n\n if not topics :\n newtopics = [] \n import ostap.core.config as CONFIG\n if 'RooFit' in CONFIG.config :\n import string\n ws = string.whitespace \n node = CONFIG.config [ 'RooFit' ]\n data = node.get('RemoveTopics','(,)' )\n topics = tuple ( i.strip ( ws ) for i in data.split ( ',' ) if i.strip ( ws ) ) \n \n if topics : \n svc = ROOT.RooMsgService.instance()\n svc.saveState () \n topic = msg_topic ( *topics ) \n num = svc.numStreams()\n for i in range ( num ) : ok = Ostap.Utils.remove_topic ( i , topic )",
"def train_lda_topic_model_with_mallet(texts, path_mallet,\n terms_to_remove=[], num_topics=50,\n no_below=10, no_above=0.9,\n scoring=False, start=2, step=3):\n preprocessed_corpus = []\n print ('training of gensim corpus began')\n for i, text in enumerate(texts):\n if i == 0:\n # todo filter here\n text = text.split()\n\n # Additional filtering steps #\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n\n dct = initialize_gensim_dictionary([text])\n else:\n text = text.split()\n # Additional filtering steps\n\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n add_documents_to_gensim_dictionary(dct, [text])\n # todo:this is to be integrated to the building process\n\n if len(terms_to_remove) > 0:\n for term in terms_to_remove:\n dct.filter_tokens(bad_ids=[dct.token2id[term]])\n\n dct.filter_extremes(no_below=no_below, no_above=no_above)\n\n gensim_corpus = [dct.doc2bow(bag_of_word.split()) for bag_of_word in texts]\n print ('gensim corpus done')\n if scoring:\n\n coherence_values = []\n\n for n in range(start, num_topics, step):\n\n lda = LdaMallet(constants.PATH_TO_MALLET,\n gensim_corpus, id2word=dct,\n num_topics=n)\n coherencemodel = CoherenceModel(model=lda,\n texts=preprocessed_corpus,\n dictionary=dct, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return coherence_values\n\n else:\n lda = LdaMallet(constants.PATH_TO_MALLET, gensim_corpus,\n id2word=dct, num_topics=num_topics)\n # Visualize LDA results, poor results obtained.\n # from gensim.models.wrappers import ldamallet\n # lda_model = ldamallet.malletmodel2ldamodel(lda)\n # vis = pyLDAvis.gensim.prepare(lda_model, gensim_corpus, dct)\n # pyLDAvis.save_html(vis , 'test.html')\n return {'model': lda, 'corpus': gensim_corpus}",
"def merge_docs(self):",
"def __init__(self,corpus,topic_number=10,iteration_number=1000,burn_in=500,update_cycle=100,alpha=None,beta=None):\n # documents, key: id of document, value: list of word in an specific document.\n self.documents = corpus.documents\n # number of iteration when using Gibbs Sampling.\n self.iteration_number = iteration_number\n self.topic_number = topic_number\n self.burn_in = burn_in\n self.update_cycle = update_cycle\n # number of terms.\n self.term_number = len(corpus.word_id)\n # number of documents.\n self.document_number = len(self.documents)\n # if alpha and beta is None, then assign values to them.\n if alpha == None:\n self.alpha = [2.0] * self.topic_number\n else:\n self.alpha = alpha\n if beta == None:\n self.beta = [0.5] * self.term_number\n else:\n self.beta = beta\n # The sum of elements in beta.\n self.sum_beta = sum(self.beta)\n # The sum of elements in alpha.\n self.sum_alpha = sum(self.alpha)\n # counter, [m][k] refers to the number of times that topic k has been observed with a word in document m.\n self.document_topic_count_matrix = {}\n # counter, [k][t] refers to the number of times that term t has been observed with topic k.\n self.topic_term_count_matrix = {}\n # distribution matrix, [m][k] refers the probability that assigning topic k to document m.\n self.document_distribution_over_topic = {}\n # distribution matrix, [k][t] refers the probability that assigning topic k to term t.\n self.topic_distribution_over_term = {}\n # counter, [m] refers the number of times that all topics have been observed with a word in document m.\n # also, [m] equals to the number of words in document m.\n self.sum_document_by_topic_count = {}\n # counter, [k] refers the number of times that all terms have been observed with topic k.\n self.sum_topic_by_term_count = {}\n # topic assigned to an word in a document. [m][n] refers to the topic that assigned to the n th word in document\n # m.\n self.word_topic_assignment = {}\n # the number of times that the distribution has been updated.\n self.update_number = 0.0",
"def merge_with(self, topic):\n with transaction.atomic():\n if self == topic:\n return self\n if (\n self.branched_from\n and topic.branched_from\n and self.branched_from != topic.branched_from\n ):\n raise ValueError(\"Cannot merge topics with different branched_from topics.\")\n if self.most_recent.semester >= topic.most_recent.semester:\n Course.objects.filter(topic=topic).update(topic=self)\n if topic.branched_from and not self.branched_from:\n self.branched_from = topic.branched_from\n self.save()\n topic.delete()\n return self\n else:\n Course.objects.filter(topic=self).update(topic=topic)\n if self.branched_from and not topic.branched_from:\n topic.branched_from = self.branched_from\n topic.save()\n self.delete()\n return topic",
"def guide(self, doc_list=None):\r\n\r\n with pyro.plate(\"topics\", self.K) as k_vec:\r\n\r\n # Lambda => latent variable for the per-topic word q distribution\r\n Lamda = torch.stack([\r\n pyro.param(\r\n f\"lamda_q_{k}\",\r\n (1 + 0.01*(2*torch.rand(self.V)-1)),\r\n constraint=constraints.positive)\r\n for k in k_vec\r\n ])\r\n\r\n # Beta_q => per-topic word q distribtion\r\n Beta_q = pyro.sample(f\"beta\", dist.Dirichlet(Lamda))\r\n\r\n Theta_q = []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # gamma => q for the per-doc topic vector\r\n gamma = pyro.param(f\"gamma_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n\r\n # theta_q => posterior per-doc topic vector\r\n theta_q = pyro.sample(f\"theta_{d}\", dist.Dirichlet(gamma))\r\n\r\n phi = pyro.param(\r\n f\"phi_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive\r\n )\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]) as w_vec:\r\n\r\n phi = torch.stack([\r\n pyro.param(\r\n f\"phi_q_{d}_{w}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n for w in w_vec\r\n ])\r\n\r\n # assign a topic\r\n pyro.sample(f\"z_assignment_{d}\", dist.Categorical(phi))\r\n\r\n Theta_q.append(theta_q)\r\n\r\n Theta_q = torch.stack(Theta_q)\r\n\r\n return Beta_q, Theta_q",
"def format_topics(topics):\n return '|'.join([topic.get('title', '') for topic in topics])",
"def relabel_topic(user, map_id, old_topic_text, new_topic_text):\n the_map = get_map(user, map_id)\n topic = the_map.subtopics.pop(old_topic_text)\n topic.text = new_topic_text\n the_map.subtopics[new_topic_text] = topic\n save_map(user, map_id, the_map)",
"def test_topic_reduction_edge_cases(base_bertopic):\n\n nr_topics = 5\n base_bertopic.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents, topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents, c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)",
"def prepare(self,docs,topics):\n \n self.docs, self.dictionary, self.corpus = self.clean_docs(docs)\n \n # Create keyword map\n self.set_keyword_map()\n \n # Create keyword map with their relatives\n self.set_keyword_map_rel()\n \n self.topic_map = {topic: set(self.get_related_keywords(topic,self.keyword_map_rel,_score=False)) \n for topic in topics}",
"def __init__(self, topics=None):\n self.topics = topics or []"
] | [
"0.6516039",
"0.6307437",
"0.62713104",
"0.60813874",
"0.6040409",
"0.59586054",
"0.59270495",
"0.581572",
"0.58087695",
"0.56809825",
"0.56502634",
"0.56499624",
"0.56362545",
"0.55953455",
"0.55468065",
"0.5537097",
"0.5506792",
"0.5490293",
"0.5485791",
"0.54724145",
"0.5464588",
"0.5461026",
"0.5434221",
"0.5431372",
"0.5412329",
"0.5335671",
"0.5334946",
"0.53305125",
"0.52960116",
"0.52958786"
] | 0.74834514 | 0 |
Uses the trained TopicModeler to assign all docs in the find query to their "strongest" single topic. | def save_doc_topics(self, topic_modeler, find_query_mixin={}, topic_id_namer=str):
nmf = topic_modeler.nmf
vectorizer = topic_modeler.vectorizer
# only update docs that are the current subreddit,
# and have tokens (via process_text.py)
find_query = {'subreddit': self.subreddit, 'postwise.tokens':{'$exists':True}}
find_query.update(find_query_mixin)
doc_count = 0
for doc in self.posts_read.find(find_query):
text_body = doc['postwise']['text']
# Make a dict topic_id:topic_prob, where topic_id is determined by topic_id_namer function
topic_distros = nmf.transform(vectorizer.transform([text_body]))[0]
topic_dict = {topic_id_namer(topic_id):prob for topic_id, prob in enumerate(topic_distros)}
# assign each doc to its most probable topic. In case of a tie, chose a random one.
# buffer_val allows small variation between two topics to count as a tie,
# eg 0.0323423 is close enough to 0.0323487
#
# individal prob values will shrink as number of topics grow,
# so the buffer should also shrink as number of topics grow.
buffer_val = 0.001/(len(topic_distros))
strongest_topics = [topic_id for topic_id, prob in topic_dict.items()
if (max(topic_distros) - prob < buffer_val)]
topic_assignment = random.choice(strongest_topics)
# DEBUG: introspect the docs with multiple topics
# if len(strongest_topics) != 1:
# print 'DEBUG: Got ambigious topic, choosing randomly.'
# print buffer_val
# print topic_distros
# print strongest_topics
# print topic_assignment
# self.posts_write.update({'_id':doc['_id']},{'$set':{'postwise.topic_distro':topic_dict, 'postwise.topic_assignment':{'topic':topic_assignment,'prob':topic_dict[topic_assignment]}}}, upsert=True)
#
# ^ actually, don't persist the topic_distro. Just the assignment.
self.posts_write.update({'_id':doc['_id']},{'$set':{
'postwise.topic_assignment':{
'topic':topic_assignment,'prob':topic_dict[topic_assignment]}}}, upsert=True)
doc_count += 1
print 'Saved topic distros for %i documents' % doc_count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def semanticSearch(model, topics, index, idx_to_docid, k=1000):\r\n run = {}\r\n topic_nums = [topic for topic in topics]\r\n queries = [topics[topic]['title'] for topic in topics]\r\n encoded_queries = model.encode(queries)\r\n labels, distances = index.knn_query(encoded_queries, k=k)\r\n for i,topic in enumerate(topic_nums):\r\n run[topic] = []\r\n # considers highest passage match only for a document\r\n added_docids = []\r\n sim = [1-x for x in distances[i]]\r\n scored_run = zip(labels[i], sim)\r\n for i, (passageidx, dist) in enumerate(scored_run):\r\n docid = idx_to_docid[passageidx]\r\n \r\n if docid not in added_docids:\r\n run[topic].append((docid, dist))\r\n added_docids.append(docid)\r\n run[topic] = run[topic][:1000]\r\n return run",
"def connect_topic_id_to_topics_old(self, model):\n #t = model.get_topics()\n for key, value in self.representants.items():\n connection_results = {}\n for article in value:\n try:\n #get most possible index\n topic_index = max(model.analyse_text(article[1]), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\")#TODO replace with if\n continue\n #add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n #find index that occured mostly\n best_candidates = max(connection_results.items(), key=operator.itemgetter(1))\n print(best_candidates)\n self.log_writer.add_log(\"Best candidate with index {} is connected to topic {} with {}% accuracy\".format(best_candidates[0], key, (connection_results[best_candidates[0]]/len(value))*100))\n #create connection between topic id and model topic index\n self.topic_indexes[key] = best_candidates[0]\n #creat connection in opposite direction if there already is some connection add found index to that connection (some model topic index can represent more than one real topic)\n if best_candidates[0] not in self.topics_of_index:\n self.topics_of_index[best_candidates[0]] = [key]\n else:\n self.topics_of_index[best_candidates[0]].append(key)\n\n self.log_writer.add_log(\"Out of {} real topics only {} were learned\".format(len(self.representants), len(self.topics_of_index)))",
"def connect_topic_id_to_topics(self, model):\n confidence = []\n for key, value in self.representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article[1]), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key, tp_num, val / len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2), reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n self.log_writer.add_log(\n 'Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],\n conf[1],\n conf[2]))\n self.topic_indexes[conf[0]] = conf[1]\n\n for key, value in self.topic_indexes.items():\n self.topics_of_index[value] = [key]",
"def model(self, doc_list=None):\r\n\r\n # eta => prior for the per-topic word distribution\r\n eta = torch.ones(self.V)\r\n\r\n with pyro.plate(\"topics\", self.K):\r\n\r\n # Beta => per topic word distribution\r\n Beta = pyro.sample(f\"beta\", dist.Dirichlet(eta))\r\n\r\n # alpha => prior for the per-doc topic vector\r\n alpha = torch.ones(self.K) / self.K\r\n\r\n X_List, Theta = [], []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # theta => per-doc topic vector\r\n theta = pyro.sample(f\"theta_{d}\", dist.Dirichlet(alpha))\r\n\r\n doc = None if doc_list is None else doc_list[d]\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]):\r\n\r\n # assign a topic\r\n z_assignment = pyro.sample(\r\n f\"z_assignment_{d}\",\r\n dist.Categorical(theta)\r\n )\r\n\r\n # from that topic vec, select a word\r\n X = pyro.sample(\r\n f\"w_{d}\",\r\n dist.Categorical(Beta[z_assignment]),\r\n obs=doc\r\n )\r\n\r\n X_List.append(X)\r\n Theta.append(theta)\r\n\r\n Theta = torch.stack(Theta)\r\n\r\n return X_List, Beta, Theta",
"def connect_topic_id_to_topics_old(model, representants, log_writer):\n # t = model.get_topics()\n topic_indexes = {}\n topics_of_index = {}\n for key, value in representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n best_candidates = max(connection_results.items(), key=operator.itemgetter(1))\n print(best_candidates)\n log_writer.add_log(\n \"Best candidate with index {} is connected to topic {} with {}% accuracy\".format(best_candidates[0],\n key, (\n connection_results[\n best_candidates[\n 0]] / len(\n value)) * 100))\n # create connection between topic id and model topic index\n topic_indexes[key] = best_candidates[0]\n # creat connection in opposite direction if there already is some connection add found index to that connection (some model topic index can represent more than one real topic)\n if best_candidates[0] not in topics_of_index:\n topics_of_index[best_candidates[0]] = [key]\n else:\n topics_of_index[best_candidates[0]].append(key)\n return topic_indexes, topics_of_index",
"def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics",
"def connect_topic_id_to_topics(model, representants, log_writer):\n # t = model.get_topics()\n topic_indexes = {}\n topics_of_index = {}\n confidence = []\n for key, value in representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key,tp_num,val/len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2),reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n log_writer.add_log('Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],conf[1],conf[2]))\n topic_indexes[conf[0]] = conf[1]\n\n for key, value in topic_indexes.items():\n topics_of_index[value] = [key]\n print(topic_indexes)\n print(topics_of_index)\n return topic_indexes, topics_of_index",
"def try_latent_topics_intro_model(k):\n highest_f1 = 0\n print \"start time: {}\".format(datetime.now())\n print \"using {} latent topics\".format(k)\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n prep.prepare(n_components=k, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl')\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features = topic_features\n X_train, y_train = prep.subset(features)\n print \"regular data prep complete\"\n print topic_features\n\n\n rf = RandomForestClassifier()\n gb = GradientBoostingClassifier()\n\n mc = ModelChooser([rf, gb])\n mc.fit_predict(X_train, y_train)\n mc.print_results()\n\n for i, score in enumerate(mc.f1_scores):\n if score > highest_f1:\n highest_f1 = score\n best_n_latent_features = k\n if i == 0:\n best_model_type = \"Random Forest\"\n else:\n best_model_type = \"Gradient Booster\"\n\n\n print \"end time: {}\".format(datetime.now())\n print \"-\"*10\n results = \"f1 score was {} with {} latent features on {} model\".format(highest_f1, best_n_latent_features, best_model_type)\n print results\n return results",
"def __getitem__(self, doc):\n lda_model = ldamodel.LdaModel(\n num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)\n lda_model.topics = np.zeros((self.vocab_len, self.num_topics))\n ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc)\n\n time_lhoods = []\n for time in range(self.num_time_slices):\n lda_model = self.make_lda_seq_slice(lda_model, time) # create lda_seq slice\n lhood = LdaPost.fit_lda_post(ldapost, 0, time, self)\n time_lhoods.append(lhood)\n\n doc_topic = ldapost.gamma / ldapost.gamma.sum()\n # should even the likelihoods be returned?\n return doc_topic",
"def post_process_result_of_lda_topic_model(lda_model, gensim_corpus,\n document_collection,\n document_collection_filtered,\n n_closest=25):\n # Prepare containers to store results\n # Container to keep the document topic matrix\n n_closest = - n_closest\n document_topic_matrix = []\n # Container to keep topics and the closest texts to each topic\n topic_closest_doc_with_topics_words = []\n # Container to keep topics\n all_topics = lda_model.show_topics(50)\n\n # Create an LDA corpus from the original gensim corpus\n lda_corpus = lda_model[gensim_corpus]\n\n # Iterate through the lda corpus and create the document topic matrix\n for i, documents in enumerate(lda_corpus):\n # Data returned is not proper numpy matrix\n document_topic_matrix.append(\n np.array([elements[1]for elements in documents]))\n\n # Create the proper numpy matrix\n document_topic_matrix = np.vstack(document_topic_matrix)\n\n # Find the closest texts to a given topic\n # Iterate through the transpose of the document topic matrix\n for i, element in enumerate(document_topic_matrix.T):\n # Identify the id of 15 closest texts of each topic\n closest = element.argsort(axis=0)[n_closest:][::-1]\n # Create a container to keep each text with the id above\n texts = []\n for element in closest:\n texts.append({'matched_text':\n document_collection_filtered[element],\n 'matched_text_words':\n document_collection[element]['match_word'],\n 'testimony_id': document_collection[element]\n ['testimony_id']})\n\n # Append them to container\n topic_closest_doc_with_topics_words.append({'texts': texts,\n 'topic_words':\n all_topics[i]})\n\n return {'topic_documents': topic_closest_doc_with_topics_words,\n 'document_topic_matrix': document_topic_matrix}",
"def test_topic_reduction(reduced_topics):\n model = BERTopic()\n nr_topics = reduced_topics + 2\n model.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents.copy())\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents.copy())\n new_freq = model.get_topic_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(model.mapped_topics, dict)\n assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))\n assert model.mapped_topics",
"def guess_topic(lda, query, features_vec, irrelevant, verbose=True):\n query_doc = []\n doc_topic = []\n topic_most_pr = None\n if isinstance(query,str):\n query = clean(query)\n query = n_grammize(query)\n for term in query:\n weight = set_weight(term, irrelevant)\n if term in features_vec:\n query_doc.append(weight * array(features_vec[term]))\n elif isinstance(query,tuple):\n if query in features_vec:\n weight = set_weight(query, irrelevant)\n query_doc.append(weight * array(features_vec[query]))\n elif isinstance(query,list):\n for term in query:\n weight = set_weight(term, irrelevant)\n if term in features_vec:\n query_doc.append(weight * array(features_vec[term]))\n X = array(query_doc)\n if len(X)==1:\n X = X.reshape(1,-1)\n if len(X)==0:\n return topic_most_pr\n doc_topic = lda.transform(X)\n sum_topics = numpy.zeros(len(doc_topic[0]))\n for i in range(len(doc_topic)):\n sum_topics = sum_topics + doc_topic[i]\n topic_most_pr = sum_topics.argmax()\n if verbose == True:\n if topic_most_pr in legend:\n return legend[topic_most_pr]\n else:\n return topic_most_pr\n else:\n return topic_most_pr",
"def grid_search_intro_model_with_latent_topics(k):\n if k == 100: # there exists a saved file already if using 100 latent topics\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n else:\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n prep.prepare(n_components=k, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl', save=True)\n\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', u'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No', u'taxlevy_Yes']\n features += topic_features\n X_train, y_train = prep.subset(features)\n\n rf = RandomForestClassifier()\n gb = GradientBoostingClassifier()\n ada = AdaBoostClassifier()\n\n mc = ModelChooser([rf, gb, ada])\n\n tuning_params = [ {'max_features': [.1, .5, .7], 'max_depth': [5, 8, 10], 'n_estimators': [100000]},\n {'learning_rate': [.1, .05], 'max_depth': [2, 4], 'n_estimators': [100, 500]},\n {'learning_rate': [.1, .05], 'n_estimators': [100, 500]}]\n\n mc.grid_search(X_train, y_train, tuning_params)",
"def model_topics(df):\n\n data = df.text.values.tolist()\n data_words = list(sent_to_words(data))\n\n # Build the bigram and trigram models\n bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)\n trigram = gensim.models.Phrases(bigram[data_words], threshold=100) \n\n # Faster way to get a sentence clubbed as a trigram/bigram\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n\n # Remove Stop Words\n data_words_nostops = remove_stopwords(data_words)\n\n # Form Bigrams\n data_words_bigrams = make_bigrams(data_words_nostops,bigram_mod)\n\n # Initialize spacy 'en' model, keeping only tagger component (for efficiency)\n nlp = spacy.load('en', disable=['parser', 'ner'])\n\n # Do lemmatization keeping only noun, adj, vb, adv\n data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n\n # Create Dictionary\n id2word = corpora.Dictionary(data_lemmatized)\n\n # Create Corpus\n texts = data_lemmatized\n\n # Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in texts]\n\n # Perform Topic Modeling for number of topics ranging from 5 to 50 in steps of 5\n model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=5, limit=50, step=5)\n\n return model_list,coherence_values,corpus,id2word",
"def infertopics(self):\n\n # Iterate over nodes missing topic attribute (only occurs for new nodes)\n for uid in self.scan(attribute=\"updated\"):\n # Remove updated attribute\n self.removeattribute(uid, \"updated\")\n\n # Get list of neighboring nodes\n ids = self.edges(uid)\n\n # Infer topic\n topic = Counter(self.attribute(x, \"topic\") for x in ids).most_common(1)[0][0] if ids else None\n if topic:\n # Add id to topic list and set topic attribute\n self.topics[topic].append(uid)\n self.addattribute(uid, \"topic\", topic)\n\n # Set topic rank\n self.addattribute(uid, \"topicrank\", len(self.topics[topic]) - 1)\n\n # Infer category\n category = Counter(self.attribute(x, \"category\") for x in ids).most_common(1)[0][0]\n self.addattribute(uid, \"category\", category)",
"def show_topic_model_textually(seed_gensim_topic_model, seed_gensim_corpus,\n texts_to_analyze, num_topics):\n print(\"alpha =\", seed_gensim_topic_model.alpha)\n print(seed_gensim_topic_model)\n print(seed_gensim_topic_model.print_topics(num_topics))\n print()",
"def test_full_model(model, documents, request):\n topic_model = copy.deepcopy(request.getfixturevalue(model))\n if model == \"base_topic_model\":\n topic_model.save(\"model_dir\", serialization=\"pytorch\", save_ctfidf=True, save_embedding_model=\"sentence-transformers/all-MiniLM-L6-v2\")\n topic_model = BERTopic.load(\"model_dir\")\n topics = topic_model.topics_\n\n for topic in set(topics):\n words = topic_model.get_topic(topic)[:10]\n assert len(words) == 10\n\n for topic in topic_model.get_topic_freq().Topic:\n words = topic_model.get_topic(topic)[:10]\n assert len(words) == 10\n\n assert len(topic_model.get_topic_freq()) > 2\n assert len(topic_model.get_topics()) == len(topic_model.get_topic_freq())\n\n # Test extraction of document info\n document_info = topic_model.get_document_info(documents)\n assert len(document_info) == len(documents)\n\n # Test transform\n doc = \"This is a new document to predict.\"\n topics_test, probs_test = topic_model.transform([doc, doc])\n\n assert len(topics_test) == 2\n\n # Test topics over time\n timestamps = [i % 10 for i in range(len(documents))]\n topics_over_time = topic_model.topics_over_time(documents, timestamps)\n\n assert topics_over_time.Frequency.sum() == len(documents)\n assert len(topics_over_time.Topic.unique()) == len(set(topics))\n\n # Test hierarchical topics\n hier_topics = topic_model.hierarchical_topics(documents)\n\n assert len(hier_topics) > 0\n assert hier_topics.Parent_ID.astype(int).min() > max(topics)\n\n # Test creation of topic tree\n tree = topic_model.get_topic_tree(hier_topics, tight_layout=False)\n assert isinstance(tree, str)\n assert len(tree) > 10\n\n # Test find topic\n similar_topics, similarity = topic_model.find_topics(\"query\", top_n=2)\n assert len(similar_topics) == 2\n assert len(similarity) == 2\n assert max(similarity) <= 1\n\n # Test topic reduction\n nr_topics = len(set(topics))\n nr_topics = 2 if nr_topics < 2 else nr_topics - 1\n topic_model.reduce_topics(documents, nr_topics=nr_topics)\n\n assert len(topic_model.get_topic_freq()) == nr_topics\n assert len(topic_model.topics_) == len(topics)\n\n # Test update topics\n topic = topic_model.get_topic(1)[:10]\n vectorizer_model = topic_model.vectorizer_model\n topic_model.update_topics(documents, n_gram_range=(2, 2))\n\n updated_topic = topic_model.get_topic(1)[:10]\n\n topic_model.update_topics(documents, vectorizer_model=vectorizer_model)\n original_topic = topic_model.get_topic(1)[:10]\n\n assert topic != updated_topic\n if topic_model.representation_model is not None:\n assert topic != original_topic\n\n # Test updating topic labels\n topic_labels = topic_model.generate_topic_labels(nr_words=3, topic_prefix=False, word_length=10, separator=\", \")\n assert len(topic_labels) == len(set(topic_model.topics_))\n\n # Test setting topic labels\n topic_model.set_topic_labels(topic_labels)\n assert topic_model.custom_labels_ == topic_labels\n\n # Test merging topics\n freq = topic_model.get_topic_freq(0)\n topics_to_merge = [0, 1]\n topic_model.merge_topics(documents, topics_to_merge)\n assert freq < topic_model.get_topic_freq(0)\n\n # Test reduction of outliers\n if -1 in topics:\n new_topics = topic_model.reduce_outliers(documents, topics, threshold=0.0)\n nr_outliers_topic_model = sum([1 for topic in topic_model.topics_ if topic == -1])\n nr_outliers_new_topics = sum([1 for topic in new_topics if topic == -1])\n\n if topic_model._outliers == 1:\n assert nr_outliers_topic_model > nr_outliers_new_topics\n\n # # Save and load model\n # if topic_model.topic_embeddings_ is not None:\n # topic_model.save(\"model_dir\", serialization=\"pytorch\", save_ctfidf=True)\n # loaded_model = BERTopic.load(\"model_dir\")",
"def explore_topic(topic_number, topn=25, model=10):\n #\n if model==25:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_25'))\n topicname=topic_names_25[topic_number]\n gensimSTR=''\n elif model==15:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_15'))\n topicname=topic_names_15[topic_number]\n gensimSTR=''\n elif model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)\n ##",
"def fit(self, corpus, **kwargs):\n if not len(corpus.dictionary):\n return None\n self.reset_model(corpus)\n self.running = True\n self.update(corpus.ngrams_corpus, **kwargs)\n self.topic_names = ['Topic{} ({})'.format(i, ', '.join(words))\n for i, words in enumerate(self._topics_words(3), 1)]\n self.running = False",
"def __find_topics(self, concepts):\n\n # Set up\n found_topics = dict() # to store the matched topics\n explanation = dict()\n\n # finding matches\n for concept in concepts:\n evgrams = everygrams(concept.split(), 1, 3) # list of unigrams, bigrams, trigrams\n for grams in evgrams:\n gram = \"_\".join(grams)\n gram_without_underscore = \" \".join(grams)\n #### Finding similar words contained in the model\n\n list_of_matched_topics = []\n\n if self.fast_classification:\n list_of_matched_topics = self.__get_similar_words_from_cached_model(gram,grams)\n else:\n list_of_matched_topics = self.__get_similar_words_from_full_model(gram, grams)\n\n\n for topic_item in list_of_matched_topics:\n\n topic = topic_item[\"topic\"]\n str_sim = topic_item[\"sim_t\"]\n wet = topic_item[\"wet\"]\n sim = topic_item[\"sim_w\"]\n\n\n if str_sim >= self.min_similarity and topic in self.cso.topics_wu:\n\n\n if topic in found_topics:\n #tracking this match\n found_topics[topic][\"times\"] += 1\n\n found_topics[topic][\"gram_similarity\"].append(sim)\n\n #tracking the matched gram\n if gram in found_topics[topic][\"grams\"]:\n found_topics[topic][\"grams\"][gram] += 1\n else:\n found_topics[topic][\"grams\"][gram] = 1\n\n #tracking the most similar gram to the topic\n if str_sim > found_topics[topic][\"embedding_similarity\"]:\n found_topics[topic][\"embedding_similarity\"] = str_sim\n found_topics[topic][\"embedding_matched\"] = wet\n\n else:\n #creating new topic in the result set\n found_topics[topic] = {'grams': {gram:1},\n 'embedding_matched': wet,\n 'embedding_similarity': str_sim,\n 'gram_similarity':[sim],\n 'times': 1,\n 'topic':topic}\n\n\n\n if sim == 1:\n found_topics[topic][\"syntactic\"] = True\n\n\n\n primary_label_topic = self.cso.get_primary_label_wu(topic)\n if primary_label_topic not in explanation:\n explanation[primary_label_topic] = set()\n\n explanation[primary_label_topic].add(gram_without_underscore)\n\n return found_topics, explanation",
"def main(self, words_docs, cleaned_sentences, lang, model_dir, number_of_clusters, embedding_model, model_id):\n\t\ttry:\n\t\t\tif embedding_model == \"tfidf\": text_vector = self.create_tfidf_vectors(cleaned_sentences)\n\t\t\telif embedding_model == \"word2vec\": text_vector = self.create_w2v_vectors(words_docs)\n\t\t\tmodel, pred_dict = self.train_model(cleaned_sentences, text_vector, number_of_clusters, lang, model_id, model_dir)\n\t\t\tdf_dominant_topic = self.evaulate_clusters(pred_dict, model_dir)\n\n\t\texcept Exception as e:\n\t\t\tprint(\"\\n Error in main : \",e)\n\t\t\tprint(\"\\n Error details : \", traceback.format_exc())\n\n\t\treturn df_dominant_topic",
"def scrape_topic(self, topic, min_len=0, max_len=9999):\n search = sc.search_keyword(topic)\n keyword = next(search).fill()\n with open(\n 'loadings\\\\topic_papers\\\\{}.txt'.format(topic),\n 'a',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(keyword.publications):\n\n if len(pubblication.bib['title']) < min_len \\\n or len(pubblication.bib['title']) > max_len:\n continue\n file.write(pubblication.bib['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break",
"def getRecommendedTopics(request, limit):\n if request.method == 'GET':\n user = request.user;\n scores = {};\n for topic in Topic.objects.all():\n\n neighbor_visits = Visit.objects.filter(user=user, topic__relates_to__topic_to=topic)\n\n neighbor_visits_count = len(neighbor_visits);\n if neighbor_visits_count > 0:\n last_neighbor_visit = neighbor_visits.order_by('-visit_date')[0].visit_date;\n else:\n last_neighbor_visit = topic.created_at\n\n relevance_score = 5*neighbor_visits_count - (timezone.now()-last_neighbor_visit).total_seconds()/3600\n recommendation = relevance_score + topic.hotness\n\n scores[topic] = recommendation;\n\n sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)[:int(limit)]\n recommended_topics = [key for key, value in sorted_scores]\n #print(recommended_topics)\n serializer = TopicNestedSerializer(recommended_topics, many=True);\n return Response(serializer.data)",
"def compute_topic_model(year_from=1900, year_to=2020, venues_filter=None, n_topics=100, use_lemmer=True,\n min_df=2, max_df=0.8):\n start = time.time()\n out_fileprefix = get_output_fileprefix(year_from, year_to, venues_filter, n_topics)\n\n corpus, tf_features_names = get_corpus_gensim_for_learning(year_from, year_to, venues_filter, use_lemmer, min_df, max_df)\n execute_lda_gensim(corpus, tf_features_names, n_topics, out_fileprefix)\n\n end = time.time()\n return year_from, year_to, n_topics, (end - start)",
"def label(self, input_doc=None):\n if input_doc == None:\n input_doc = self.stemmed_corpus\n X = self.vect.transform(input_doc)\n new_corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)\n topics = self.ldamodel.get_document_topics(new_corpus)\n max_topic = []\n for tpc in list(topics):\n # get most relevant topic (tuple: 0 = topic, 1 = relevance distribution)\n max_topic.append(max(tpc,key=lambda item:item[1])[0]) \n return max_topic",
"def __rank_topics(self, found_topics, explanation):\n max_value = 0\n scores = []\n for _,topic in found_topics.items():\n topic[\"score\"] = topic[\"times\"] * len(topic['grams'].keys())\n scores.append(topic[\"score\"])\n if topic[\"score\"] > max_value:\n max_value = topic[\"score\"]\n\n for _,topic in found_topics.items():\n if \"syntactic\" in topic:\n topic[\"score\"] = max_value\n\n\n\n\n # Selection of unique topics\n unique_topics = {}\n for t_p,topic in found_topics.items():\n prim_label = self.cso.get_primary_label_wu(t_p)\n if prim_label in unique_topics:\n if unique_topics[prim_label] < topic[\"score\"]:\n unique_topics[prim_label] = topic[\"score\"]\n else:\n unique_topics[prim_label] = topic[\"score\"]\n\n # ranking topics by their score. High-scored topics go on top\n sort_t = sorted(unique_topics.items(), key=lambda v: v[1], reverse=True)\n #sort_t = sorted(found_topics.items(), key=lambda k: k[1]['score'], reverse=True)\n\n\n # perform\n vals = []\n for t_p in sort_t:\n vals.append(t_p[1]) #in 0, there is the topic, in 1 there is the info\n\n\n #### suppressing some warnings that can be raised by the kneed library\n warnings.filterwarnings(\"ignore\")\n try:\n x_vals = range(1,len(vals)+1)\n t_kn = KneeLocator(x_vals, vals, direction='decreasing')\n if t_kn.knee is None:\n #print(\"I performed a different identification of knee\")\n t_kn = KneeLocator(x_vals, vals, curve='convex', direction='decreasing')\n except ValueError:\n pass\n\n ##################### Pruning\n\n try:\n knee = int(t_kn.knee)\n except TypeError:\n knee = 0\n except UnboundLocalError:\n knee = 0\n\n if knee > 5:\n try:\n knee += 0\n except TypeError:\n print(\"ERROR: \",t_kn.knee,\" \",knee, \" \", len(sort_t))\n\n else:\n try:\n if sort_t[0][1] == sort_t[4][1]:\n top = sort_t[0][1]\n test_topics = [item[1] for item in sort_t if item[1]==top]\n knee = len(test_topics)\n\n else:\n knee = 5\n except IndexError:\n knee = len(sort_t)\n\n final_topics = []\n final_topics = [self.cso.get_topic_wu(sort_t[i][0]) for i in range(0,knee)]\n self.reset_explanation()\n self.explanation = {self.cso.topics_wu[sort_t[i][0]]: explanation[sort_t[i][0]] for i in range(0,knee)}\n\n return final_topics",
"def get_last_topic(self):\r\n try:\r\n session = self.persistence.get_session()\r\n topic = session.query(Topic).order_by(desc(Topic.id)).limit(1).one()\r\n return topic\r\n except NoResultFound:\r\n raise TopicNotFound",
"def get_document_distribution_over_topic(self):\n return self.document_distribution_over_topic",
"def __getitem__(self, bow, iterations=100):\n is_corpus, corpus = utils.is_corpus(bow)\n if not is_corpus:\n # query is a single document => make a corpus out of it\n bow = [bow]\n\n self.convert_input(bow, infer=True)\n cmd = \\\n self.mallet_path + ' infer-topics --input %s --inferencer %s ' \\\n '--output-doc-topics %s --num-iterations %s --doc-topics-threshold %s --random-seed %s'\n cmd = cmd % (\n self.fcorpusmallet() + '.infer', self.finferencer(),\n self.fdoctopics() + '.infer', iterations, self.topic_threshold, str(self.random_seed)\n )\n logger.info(\"inferring topics with MALLET LDA '%s'\", cmd)\n check_output(args=cmd, shell=True)\n result = list(self.read_doctopics(self.fdoctopics() + '.infer'))\n return result if is_corpus else result[0]",
"def test_get_topics(self):\n\n for m in self.models:\n\n topics = m.topics\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertEqual(topics.num_rows(), 25)\n self.assertEqual(topics.num_columns(), 2)\n z = m.topics[\"topic_probabilities\"]\n for k in range(m.num_topics):\n self.assertTrue(\n abs(sum(z.vector_slice(k)) - 1) < DELTA,\n \"Returned probabilities do not sum to 1.\",\n )\n\n # Make sure returned object is an SFrame of the right size\n topics = m.get_topics()\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertTrue(\n topics.num_columns() == 3,\n \"Returned SFrame should have a topic, word, and probs.\",\n )\n\n # Make sure that requesting a single topic returns only that topic\n num_words = 8\n topics = m.get_topics([5], num_words=num_words)\n self.assertTrue(\n all(topics[\"topic\"] == 5), \"Returned topics do not have the right id.\"\n )\n self.assertEqual(topics.num_rows(), num_words)\n topics = m.get_topics([2, 4], num_words=num_words)\n self.assertEqual(set(list(topics[\"topic\"])), set([2, 4]))\n self.assertEqual(topics.num_rows(), num_words + num_words)\n\n # Make sure the cumulative probability of the returned words is\n # is less than the cutoff we provided.\n # A cutoff of 1.0 should return num_words for every topic.\n cutoff = 1.0\n topics = m.get_topics(cdf_cutoff=cutoff, num_words=len(m.vocabulary))\n totals = topics.groupby(\n \"topic\", {\"total_score\": turicreate.aggregate.SUM(\"score\")}\n )\n self.assertTrue(\n all(totals[\"total_score\"] <= (cutoff + DELTA)),\n \"More words were returned than expected for this cutoff.\",\n )\n\n # Make sure we raise errors for bad input\n with self.assertRaises(ValueError):\n m.get_topics([-1])\n with self.assertRaises(ValueError):\n m.get_topics([10000])\n with self.assertRaises(ToolkitError):\n topics = m.get_topics(output_type=\"other\")\n\n # Test getting topic_words\n topic_words = m.get_topics(output_type=\"topic_words\", num_words=5)\n self.assertEqual(type(topic_words), turicreate.SFrame)\n\n # Test words are sorted correctly for the first topic\n # TODO: Make this more deterministic.\n\n # topic_probs = m.get_topics(num_words=5)\n # expected = [w for w in topic_probs['word'][:5]]\n # observed = topic_words['words'][0]\n # self.assertEqual(observed[0], expected[0])"
] | [
"0.6346439",
"0.6285733",
"0.6115459",
"0.59040916",
"0.5758389",
"0.5743669",
"0.5686132",
"0.56583214",
"0.56441104",
"0.5585149",
"0.55538833",
"0.5521366",
"0.55110216",
"0.55009013",
"0.54710156",
"0.5399704",
"0.5399425",
"0.5389501",
"0.5375041",
"0.53640926",
"0.53452903",
"0.530153",
"0.52948546",
"0.5284834",
"0.5272713",
"0.5270984",
"0.5266709",
"0.5252943",
"0.52102405",
"0.5202953"
] | 0.6996216 | 0 |
Remove the postwise.topic_distro and postwise.topic_distro fields from all documents in the subreddit | def wipe_all_topics(self):
# doc_count = self.posts_read.find({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}}).count()
doc_count = self.posts_write.update({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}},
{'$unset':{'postwise.topic_distro':True,'postwise.topic_assignment':True}}, multi=True)
print 'wiped topics from %i documents' % doc_count['nModified'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _clear_document(self, docid):\n doc = self.get_document(docid)\n for term, count in doc.get_terms():\n term_entry = self.sql_session.query(Term).get(term)\n term_entry.count -= abs(count)\n term_entry.distinct_docs -= 1\n any_term = self.sql_session.query(Term).get(ANY)\n any_term.distinct_docs -= 1\n doc.delete()",
"def __delete__(self, instance):\n instance.doc.pop(self.slug, None)",
"def purge_posts(app, env, docname):\n\n if hasattr(env, \"ablog_posts\"):\n env.ablog_posts.pop(docname, None)\n filename = os.path.split(docname)[1]\n env.domains[\"std\"].data[\"labels\"].pop(filename, None)\n env.domains[\"std\"].data[\"anonlabels\"].pop(filename, None)",
"def remove_redundant(post_tree: dict) -> dict:\n\n for comment in post_tree[\"comments\"]:\n if \"body\" not in comment:\n post_tree[\"comments\"].remove(comment)\n\n for k in list(post_tree.keys()):\n if k not in [\"title\", \"selftext\", \"id\", \"comments\", \"author\"]:\n post_tree.pop(k)\n\n for comment in post_tree[\"comments\"]:\n\n if (\"replies\" in comment and comment[\"replies\"] != \"\"\n and comment[\"replies\"] is not None):\n comment[\"replies\"] = comment[\"replies\"][\"data\"][\"children\"]\n else:\n comment[\"replies\"] = []\n\n for k in list(comment.keys()):\n if k not in [\"replies\", \"body\", \"id\", \"parent_id\", \"author\"]:\n comment.pop(k)\n\n post_tree[\"comments\"] = {\n comment[\"id\"]: comment\n for comment in post_tree[\"comments\"]\n }\n\n return post_tree",
"def clear_subjects(db):\n\t\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tp.subject = None",
"def clean_duplicate_documents(self):\n title_k = lambda x: x.title\n for k, g in groupby(sorted(self.annotation_documents, key=title_k), title_k):\n g = list(g)\n if len(g) > 1:\n # check first if one is in test set\n to_remove = [x for x in g if x not in self.test]\n if (\n len(to_remove) > 1\n ): # if test is not matched, make subselection based on annotation unit count\n select_k = lambda x: (\n len(x.events) + len(x.sentiment_expressions),\n x.annotator_id != \"gilles\",\n )\n to_remove.sort(key=select_k, reverse=True)\n to_remove = to_remove[1:]\n for docrm in to_remove:\n self.annotation_documents.remove(docrm)\n if docrm in self.dev:\n self.dev.remove(docrm)\n elif docrm in self.test:\n self.test.remove(docrm)\n print(f\"Duplicate doc removed: {docrm}\")",
"def remove_words_and_ngrams(self, document):\n for w in self.words_and_ngrams_exceptions:\n document = re.sub(w, '', document)\n return document",
"async def remove_doc(self, *args, **kwargs):\n pass",
"def dup_remove(doc):\n paragraphs_his = {}\n del_ids = []\n para_id = None\n if 'most_related_para' in doc:\n para_id = doc['most_related_para']\n doc['paragraphs_length'] = []\n for p_idx, (segmented_paragraph, paragraph_score) in \\\n enumerate(zip(doc[\"segmented_paragraphs\"], doc[\"segmented_paragraphs_scores\"])):\n doc['paragraphs_length'].append(len(segmented_paragraph))\n paragraph = ''.join(segmented_paragraph)\n if paragraph in paragraphs_his:\n del_ids.append(p_idx)\n if p_idx == para_id:\n para_id = paragraphs_his[paragraph]\n continue\n paragraphs_his[paragraph] = p_idx\n # delete\n prev_del_num = 0\n del_num = 0\n for p_idx in del_ids:\n if p_idx < para_id: \n prev_del_num += 1\n del doc[\"segmented_paragraphs\"][p_idx - del_num]\n del doc[\"segmented_paragraphs_scores\"][p_idx - del_num]\n del doc['paragraphs_length'][p_idx - del_num]\n del_num += 1\n if len(del_ids) != 0:\n if 'most_related_para' in doc:\n doc['most_related_para'] = para_id - prev_del_num\n doc['paragraphs'] = []\n for segmented_para in doc[\"segmented_paragraphs\"]:\n paragraph = ''.join(segmented_para)\n doc['paragraphs'].append(paragraph)\n return True\n else:\n return False",
"def _clean_doc(doc: Dict) -> Dict:\n doc['label'] = doc['labels'][config.LANG]['value']\n aliases = doc['aliases'][config.LANG] if config.LANG in doc['aliases'] else []\n\n doc['aliases'] = [alias['value'] for alias in aliases]\n\n for key in DOC_CLEAN_KEYS:\n try:\n del doc[key]\n except:\n continue\n return doc",
"def userproject_post_delete(sender, instance, **kwargs):\n instance.document.delete(False)",
"def delete_topic():\n return dict()",
"def deleteDistortionKeywords(hdr):\n # We need to use '.pop' to guard against the possibility, however remote,\n # that the keyword has already been removed before calling this function.\n for kw in DIST_KWS:\n hdr.pop(kw, None)\n\n # This can use 'del' since it will work even if the keywords\n # are missing altogether since the multi_kw uses wild-cards\n for multi_kw in DIST_MULTI_KWS:\n del hdr[multi_kw]",
"async def reddit_remove(self, ctx, subreddit : SubredditConverter, post_type : EnumConverter(Subreddit.PostType) = Subreddit.PostType.hot):\n Subreddit.delete().where(Subreddit.channel_id == ctx.channel.id).where(Subreddit.subreddit == subreddit).execute()\n await ctx.success()",
"def clear_doc():\n for obj in DOC.Objects:\n DOC.removeObject(obj.Name)",
"def full_reset(self):\n for docid in self.iter_docids():\n self.delete(docid)\n self.client.delete(self.dbprefix + 'schema')\n self.client.delete(self.dbprefix + 'docs')\n self.client.delete(self.dbprefix + 'nextid')",
"def cleartopics(self):\n\n # Clear previous topics, if any\n if self.topics:\n for uid in self.scan():\n self.removeattribute(uid, \"topic\")\n self.removeattribute(uid, \"topicrank\")\n\n if self.categories:\n self.removeattribute(uid, \"category\")\n\n self.topics, self.categories = None, None",
"def delete_relatives(self):\n category_ratings = list(self.category_ratings.all())\n self.category_ratings.clear()\n for category_rating in category_ratings:\n if category_rating.isOrphaned():\n category_rating.delete()\n\n word_counts = list(self.word_counts.all())\n self.word_counts.clear()\n for word_count in word_counts:\n if word_count.isOrphaned():\n word_count.delete()",
"def pop_non_relevant_vuln_fields(data: Dict):\n keys_to_keep = [\n \"title\",\n \"description\",\n \"content_type\",\n \"published_at\",\n \"references\",\n \"severity\",\n \"solutions\",\n \"alternate_ids\",\n ]\n for key in list(data):\n if key not in keys_to_keep:\n data.pop(key)",
"def clean_docs(self, docs):\n cleaned = [self.cleaning(doc) for doc in docs]\n print(cleaned[0])\n return cleaned",
"def save_doc_topics(self, topic_modeler, find_query_mixin={}, topic_id_namer=str):\n nmf = topic_modeler.nmf\n vectorizer = topic_modeler.vectorizer\n # only update docs that are the current subreddit,\n # and have tokens (via process_text.py)\n find_query = {'subreddit': self.subreddit, 'postwise.tokens':{'$exists':True}}\n find_query.update(find_query_mixin)\n\n doc_count = 0\n for doc in self.posts_read.find(find_query):\n text_body = doc['postwise']['text']\n\n # Make a dict topic_id:topic_prob, where topic_id is determined by topic_id_namer function\n topic_distros = nmf.transform(vectorizer.transform([text_body]))[0]\n topic_dict = {topic_id_namer(topic_id):prob for topic_id, prob in enumerate(topic_distros)}\n\n # assign each doc to its most probable topic. In case of a tie, chose a random one.\n # buffer_val allows small variation between two topics to count as a tie,\n # eg 0.0323423 is close enough to 0.0323487\n #\n # individal prob values will shrink as number of topics grow,\n # so the buffer should also shrink as number of topics grow.\n buffer_val = 0.001/(len(topic_distros))\n strongest_topics = [topic_id for topic_id, prob in topic_dict.items()\n if (max(topic_distros) - prob < buffer_val)]\n\n topic_assignment = random.choice(strongest_topics)\n\n # DEBUG: introspect the docs with multiple topics\n # if len(strongest_topics) != 1:\n # print 'DEBUG: Got ambigious topic, choosing randomly.'\n # print buffer_val\n # print topic_distros\n # print strongest_topics\n # print topic_assignment\n\n # self.posts_write.update({'_id':doc['_id']},{'$set':{'postwise.topic_distro':topic_dict, 'postwise.topic_assignment':{'topic':topic_assignment,'prob':topic_dict[topic_assignment]}}}, upsert=True)\n #\n # ^ actually, don't persist the topic_distro. Just the assignment.\n self.posts_write.update({'_id':doc['_id']},{'$set':{\n 'postwise.topic_assignment':{\n 'topic':topic_assignment,'prob':topic_dict[topic_assignment]}}}, upsert=True)\n\n doc_count += 1\n print 'Saved topic distros for %i documents' % doc_count",
"def remove_topics(self, project: str, *topics: str):\n assert self.exists(project), f'Project {project} inesistente'\n\n return self.collection.find_one_and_update(\n {\n 'url': project\n },\n {\n '$pull': {\n 'topics': {\n '$in': topics,\n }\n }\n }\n )",
"def cleanup(self):\n # Removing the ROS system wide advert about which topic are interfaced with this process\n # TODO : lock this for concurrent access\n if_topics = rospy.get_param('~' + TopicBack.IF_TOPIC_PARAM, [])\n if_topics.remove(self.fullname)\n rospy.set_param('~' + TopicBack.IF_TOPIC_PARAM, if_topics)\n\n # cleanup pub and sub, so we can go through another create / remove cycle properly\n self._remove_pub(self.pub)\n self._remove_sub(self.sub)",
"def clean_subreddit(filename):\n\n # Get name for processed file\n regex = r\"([^\\/]+)(?=\\-all)\"\n matches = re.search(regex, subreddit_folder)\n new_file = matches.group(1)\n\n # Create list of columns to keep\n keep_cols = ['id', 'created_utc', 'author', 'title',\\\n 'score', 'num_comments', 'subreddit', 'link_flair_text']\n\n keep_cols_text = ['id', 'created_utc', 'author', 'selftext']\n\n # Create file name\n processedfile_csv = \"data/processed/submissions/\" + new_file + \\\n \"-metadata\" + \".csv\"\n\n processed_textfile_csv = \"data/processed/submissions/\" + new_file + \\\n \"-text\" + \".csv\"\n\n # Create empty data frame\n df_keep = pd.DataFrame()\n df_keep_text = pd.DataFrame()\n\n # Read in json file\n try:\n data = pd.read_json(filename)\n\n # ValueError: Trailing data thrown if file is pretty indented\n except ValueError:\n data = pd.read_json(filename, lines = True)\n\n try:\n df_keep = df_keep.append(data[keep_cols])\n except KeyError:\n keep_cols = ['id', 'created_utc', 'author', 'title',\\\n 'score', 'num_comments', 'subreddit']\n df_keep = df_keep.append(data[keep_cols])\n\n try:\n df_keep_text = df_keep_text.append(data[keep_cols_text])\n except KeyError:\n keep_cols_text = ['id', 'created_utc', 'author']\n df_keep_text = df_keep_text.append(data[keep_cols_text])\n\n\n # Change date format\n ## For metadata\n df_keep['datetime_dv'] = pd.to_datetime(df_keep['created_utc'], unit = 's')# dv = derived\n df_keep['date_dv'] = df_keep['datetime_dv'].dt.date\n\n # For text\n df_keep_text['datetime_dv'] = pd.to_datetime(df_keep_text['created_utc'], unit = 's')# dv = derived\n df_keep_text['date_dv'] = df_keep_text['datetime_dv'].dt.date\n\n\n ##### Delimit by date #####\n # Create mask of time slot\n mask = (df_keep['date_dv'] >= start) & (df_keep['date_dv'] <= end) # inclusive on either end\n mask_text = (df_keep_text['date_dv'] >= start) & (df_keep_text['date_dv'] <= end)\n\n # Only keep data within date frame\n df_keep = df_keep.loc[mask]\n df_keep_text = df_keep_text.loc[mask_text]\n ############################\n\n\n # Save to json\n df_keep_text.to_csv(processed_textfile_csv, mode = \"w\")\n df_keep.to_csv(processedfile_csv, mode = \"w\") # mode= w will overwrite previous file\n print(len(df_keep_text.index))\n print(processed_textfile_csv)\n\n\n data = [] # force empty",
"def _delete_volatile_keys(self, solr_dict):\n\n def delete(del_solr_dict, path_list):\n k = path_list[0]\n if k in del_solr_dict:\n if len(path_list) > 1:\n delete(del_solr_dict[k], path_list[1:])\n else:\n del del_solr_dict[k]\n\n delete(solr_dict, ['response', 'maxScore'])\n delete(solr_dict, ['responseHeader', 'QTime'])",
"def clean_docs(self,docs):\n\n # Remove numbers, but not words that contain numbers.\n docs = [[token for token in doc if not token.isnumeric()] for doc in docs]\n\n # Remove words that are only one character.\n docs = [[token for token in doc if len(token) > 1 and token not in stop_words] for doc in docs]\n\n # lemmatizer = WordNetLemmatizer()\n # docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]\n\n # Add bigrams and trigrams to docs (only ones that appear 20 times or more).\n bigram = Phrases(docs, min_count=20)\n for idx in range(len(docs)):\n for token in bigram[docs[idx]]:\n if '_' in token:\n # Token is a bigram, add to document.\n docs[idx].append(token)\n\n # Create a dictionary representation of the documents.\n dictionary = Dictionary(docs)\n\n # Filter out words that occur less than 20 documents, or more than 50% of the documents.\n dictionary.filter_extremes(no_below=20, no_above=0.5)\n\n # Bag-of-words representation of the documents.\n corpus = [dictionary.doc2bow(doc) for doc in docs]\n\n return docs,dictionary,corpus",
"def test_reverse_delete_rule_pull(self):\n\n class BlogPost(Document):\n content = StringField()\n authors = ListField(ReferenceField(self.Person, reverse_delete_rule=PULL))\n\n BlogPost.drop_collection()\n self.Person.drop_collection()\n\n me = self.Person(name=\"Test User\")\n me.save()\n\n someoneelse = self.Person(name=\"Some-one Else\")\n someoneelse.save()\n\n post = BlogPost(content=\"Watching TV\", authors=[me, someoneelse])\n post.save()\n\n another = BlogPost(content=\"Chilling Out\", authors=[someoneelse])\n another.save()\n\n someoneelse.delete()\n post.reload()\n another.reload()\n\n assert post.authors == [me]\n assert another.authors == []",
"def delete_identifying_fields(self, view):\n\t\tassert view.upper()=='TRAIN' or view.upper()=='TEST' # ensures we perform this only if view is train or test\n\t\t\n\t\tself.prDoc['about']['problemName']='NULL'\n\t\ttry:\n\t\t\tdel self.prDoc['about']['problemDescription']\n\t\texcept KeyError:\n\t\t\tpass\n\t\t\n\t\t# save datasetDoc.json file\n\t\twith open(os.path.join(self.prHome, 'problemDoc.json'), 'w') as fp:\n\t\t\tjson.dump(self.prDoc, fp, indent=2, sort_keys=False)",
"def remove_person(self, document):\n del self.__people[document]",
"def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")"
] | [
"0.64760447",
"0.5975757",
"0.58964",
"0.5851747",
"0.5772015",
"0.56319493",
"0.55570906",
"0.5484582",
"0.5437361",
"0.54371744",
"0.5370441",
"0.5353809",
"0.53488874",
"0.5298896",
"0.5295336",
"0.5265782",
"0.5254592",
"0.52453",
"0.52275765",
"0.5221391",
"0.5215771",
"0.5212126",
"0.5206381",
"0.519359",
"0.5178462",
"0.5147991",
"0.5138986",
"0.5134768",
"0.5117547",
"0.51073444"
] | 0.6893495 | 0 |
Yields each individual comment in a post. | def each_comment_from_post(post):
# first yield the post text body, if any
if post['text']:
yield post['text']
# then yield each comment
for comment in post['comments']:
yield comment['text'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iterateComments(db, post_id):\n c=db.cursor()\n c.execute(\"\"\"SELECT * FROM comments WHERE post_id=%d\"\"\" % post_id)\n for comment in c.fetchall():\n yield Comment(answer)\n c.close()",
"def comments(\n self, **stream_options: Any\n ) -> Generator[praw.models.Comment, None, None]:\n return stream_generator(self.subreddit.comments, **stream_options)",
"def get_comments(self):\n\t\tself.comments = graph.get_connections(post['id'], 'comments')",
"def get_post_comments(post, user_agent=default_user_agent):\n post_permalink = post['permalink']\n\n response_data = requests.get(post_permalink, headers = {'User-agent': user_agent})\n post_data = response_data.json()[1]\n\n # right now this gets the title, eventually convert to unique id for each title\n post_id = post['post_id']\n\n return get_post_comments_recur(post_data, [], -1, post_id)",
"def comments(self, q=None, sort=None):\n params = {}\n if sort is not None:\n params[\"sort\"] = sort\n if q is not None:\n params[\"q\"] = q\n for comment in self._get_paged(\"comments\", params=params):\n yield Comment(comment, **self._new_session_args)",
"def make_comments(post, comments):\n for comment in comments:\n try:\n com = RedditComment(reddit_post=post, **comment)\n com.save()\n except Exception as ex:\n print 'comment could not be created'\n print ex",
"def render_comments(self, post, comment_to_edit=None):\n rendered_comments = \"\"\n for comment in post.comments:\n if (comment_to_edit and\n comment.get_id() == comment_to_edit.get_id()):\n rendered_comments += self.render_str(\n \"blog/editcomment.html\", comment=comment_to_edit)\n else:\n rendered_comments += self.render_str(\n \"blog/singlecomment.html\", p=post, comment=comment)\n return rendered_comments",
"def post_process_post(self, post):\r\n post.article = self.rewrite_ob_urls(post.article)\r\n post._commit()\r\n \r\n comments = Comment._query(Comment.c.link_id == post._id, data = True)\r\n for comment in comments:\r\n comment.body = self.rewrite_ob_urls(comment.body)\r\n comment._commit()",
"def get_comments(id_post):\n return Comms.objects.filter(post__id=id_post)",
"def commentList(post):\n comments = Comment.objects.all().filter(post=post).order_by('-published')\n remote_comments = RemoteComment.objects.all().filter(post=post).order_by('published')\n comment_list = list()\n\n if comments:\n for comment in comments:\n comment_dict = dict()\n comment_dict['author'] = addAuthor(comment.author)\n comment_dict['comment'] = comment.comment\n comment_dict['contentType'] = comment.contentType\n comment_dict['published'] = comment.published\n comment_dict['id'] = comment.id\n comment_list.append(comment_dict)\n if remote_comments:\n for remote in remote_comments:\n remote_dict = dict()\n server = remote.server\n r = requests.get(remote.author, auth=(server.username, server.password))\n if r.status_code == 200:\n author = remoteAddAuthor(r.json())\n remote_dict['author'] = author\n remote_dict['comment'] = remote.comment\n remote_dict['contentType'] = remote.contentType\n remote_dict['published'] = remote.published\n remote_dict['id'] = remote.id\n comment_list.append(remote_dict)\n else:\n continue\n\n comment_list = sorted(comment_list, key=lambda k: k['published'], reverse=True)\n\n return comment_list",
"def comments(self, limit=100, all=False):\n source, edge = self.id, \"comments\"\n return lazygen(Comment, source, edge,\n limit=limit, get_all=all)",
"def do_comment(self, data={}):\n\n try:\n comment = data['comment'] if 'comment' in data else ''\n post_type = data['post_type'] if 'post_type' in data else ''\n post_id = int(data['post_id']) if 'post_id' in data else ''\n\n if not comment or not post_type or not post_id:\n raise Exception('Invalid parameter')\n\n submit_comment_url = BASE_URL + 'post_comments/'\n response = self.request('POST', submit_comment_url, params={\n 'comment': comment, 'post_type': post_type, 'post_id': post_id\n })\n response = response.json()\n output = []\n for item in response:\n output.append(self._convert_comment(item))\n return output\n except Exception as e:\n Utils.log(traceback.format_exc())\n Utils.error(e.args[0])",
"def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()",
"def get_post_comments_recur(comment, comments, parent_comment_id, parent_post_id):\n if 'data' in comment:\n comment_data = comment['data']\n\n new_comment = None\n\n # a new comment exists at this layer, add it to the total list of comments\n if 'body' in comment_data:\n new_comment = {\n \"score\": comment_data['score'],\n \"body\": comment_data['body'],\n \"subreddit\": comment_data['subreddit'],\n \"author\": comment_data['author'],\n \"parent_comment_id\": parent_comment_id,\n \"parent_post_id\": parent_post_id,\n \"created\": comment_data['created'],\n \"comment_id\": comment_data['id']\n }\n comments.append(new_comment)\n\n next_parent_comment_id = parent_comment_id if new_comment is None else new_comment['comment_id']\n\n # recurse on children\n if 'children' in comment_data:\n for child in comment_data['children']:\n comments = get_post_comments_recur(child, comments, next_parent_comment_id, parent_post_id)\n\n # recurse on replies\n if 'replies' in comment_data:\n comments = get_post_comments_recur(comment_data['replies'], comments, next_parent_comment_id, parent_post_id)\n\n return comments",
"def parse_comment(comment, postid):\n urls = get_links_from_body(comment.body)\n if urls:\n # Only insert comment into DB if it contains a link\n comid_db = db.insert('Comments',\n (None,\n postid,\n comment.id,\n comment.author,\n comment.body,\n comment.upvotes,\n comment.downvotes,\n comment.created_utc))\n for url in urls:\n parse_url(url, postid=postid, commentid=comid_db)\n # Recurse over child comments\n for child in comment.children:\n parse_comment(child, postid)",
"def comments(self, request, pk=None):\n post = self.get_object()\n comments = Comment.objects.filter(post=post).order_by('created_at')\n serializer = PostCommentsSerializer(comments, many=True)\n return Response(serializer.data, status.HTTP_200_OK)",
"def comments(post_id: str, max: int = None):\n for comment in client.comments(post_id=post_id, max=max):\n print(json.dumps(comment))",
"def test_print_comments():\n flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')\n print(len(flat_comments))\n print(len(tree_comments))\n\n print('flat comments')\n for c in flat_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)\n\n print()\n print('tree comments')\n for c in tree_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)",
"async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count",
"def all_comments_from_post(post, prepend_title=True):\n if 'comments' in post:\n comments = [comment['text'] for comment in post['comments']]\n post_text = post['text']\n title_text = post['title']\n if post_text:\n # preprend post text body if it exists\n comments = [post_text + '\\n'] + comments\n if prepend_title and title_text:\n # optionally prepend title\n comments = [title_text] + comments\n return '\\n'.join(comments).strip()\n else:\n return ''",
"def get_comments(self, post_id, post_type):\n\n try:\n if not post_id or not post_type:\n raise Exception('Invalid parameter')\n if post_type != 'question' and post_type != 'answer':\n raise Exception('Invalid parameter')\n\n url = BASE_URL + 'post_comments/'\n url += '?' + urllib.parse.urlencode({'post_id': int(post_id), 'post_type': post_type})\n response = self.request('GET', url, is_ajax=True)\n response = response.json()\n output = []\n for item in response:\n output.append(self._convert_comment(item))\n return output\n except Exception as e:\n Utils.log(traceback.format_exc())\n Utils.error(e.args[0])",
"def parse_comment(self, node):\n\n data = []\n\n if node is not None:\n comment_id_pattern = re.compile('comment-(\\d+)')\n for comment_node in node.find_all('div', class_='comment'):\n item = {}\n item['is_deletable'] = False\n item['is_editable'] = False\n \n comment_id_result = comment_id_pattern.search(comment_node.get('id'))\n if comment_id_result:\n item['id'] = int(comment_id_result.group(1))\n \n comment_body_node = comment_node.find('div', class_='comment-body')\n if comment_body_node is not None:\n item['content'] = ''\n for p in comment_body_node.find_all(recursive=False):\n if 'class' in p.attrs and 'author' in p['class']:\n item['author'] = p.get_text()\n item['profile_url'] = self.get_link(p.get('href'))\n author_id = self._parse_user_id_from_url(item['profile_url'])\n if self.userId == author_id:\n item['is_deletable'] = True\n item['is_editable'] = True\n elif 'class' in p.attrs and 'age' in p['class']:\n item['date'] = p.abbr['title']\n item['date_ago'] = timeago.format(self._parse_datetime(item['date']), datetime.now(TIMEZONE))\n elif 'class' in p.attrs and 'edit' in p['class']:\n continue\n elif p.name == 'form':\n continue\n else:\n item['content'] += str(p)\n\n data.append(item)\n\n return data",
"def test_get_comments_by_post(self):\n\n CommentFactory(author=self.user, body='Test comment 2', post=self.post)\n data = {\n 'post': self.post.id,\n }\n response = self.client.get(reverse('api:comments-list'), data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 2)",
"def get_comments(self, sort, time):\r\n from r2.models import Comment\r\n return self.get_links(sort, time, Comment)",
"def do_comments(self, line):\n for comment in self.review.comments():\n print(comment)",
"def comments(self):\r\n return Comments(self)",
"def comments(self):\r\n return Comments(self)",
"def comments(self):\r\n return Comments(self)",
"def get_comments(comments):\n API_KEY = secrets.YT_KEY\n youtube = build('youtube', 'v3', developerKey=API_KEY)\n request = youtube.commentThreads().list(\n part='replies',\n videoId=comments,\n textFormat=\"plainText\"\n )\n\n response = request.execute()\n\n video = response['items'][0]['replies']['comments']\n\n\n for i in video:\n print('\\n')\n print(i['snippet']['textDisplay'])\n # print(response['items'][0].keys())",
"def comment_issue(self, msg, issue_id, comment):\n self._asset_bind(msg)\n client = self._github_operator(msg)\n comment_obj = client.issue_comment(task_repository_name(), issue_id, comment)\n yield comment_obj.html_url"
] | [
"0.8102602",
"0.71330816",
"0.71256965",
"0.70209515",
"0.6956547",
"0.6635815",
"0.6539074",
"0.64607584",
"0.6452519",
"0.64320844",
"0.6412035",
"0.6329041",
"0.6324704",
"0.63164395",
"0.6309868",
"0.6283187",
"0.62656665",
"0.62221324",
"0.62046283",
"0.6180858",
"0.61560875",
"0.615461",
"0.6117714",
"0.6099215",
"0.60691285",
"0.6064075",
"0.6064075",
"0.6064075",
"0.60392666",
"0.60156405"
] | 0.8596781 | 0 |
Concatenates all a posts's comments together and returns the result | def all_comments_from_post(post, prepend_title=True):
if 'comments' in post:
comments = [comment['text'] for comment in post['comments']]
post_text = post['text']
title_text = post['title']
if post_text:
# preprend post text body if it exists
comments = [post_text + '\n'] + comments
if prepend_title and title_text:
# optionally prepend title
comments = [title_text] + comments
return '\n'.join(comments).strip()
else:
return '' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def commentList(post):\n comments = Comment.objects.all().filter(post=post).order_by('-published')\n remote_comments = RemoteComment.objects.all().filter(post=post).order_by('published')\n comment_list = list()\n\n if comments:\n for comment in comments:\n comment_dict = dict()\n comment_dict['author'] = addAuthor(comment.author)\n comment_dict['comment'] = comment.comment\n comment_dict['contentType'] = comment.contentType\n comment_dict['published'] = comment.published\n comment_dict['id'] = comment.id\n comment_list.append(comment_dict)\n if remote_comments:\n for remote in remote_comments:\n remote_dict = dict()\n server = remote.server\n r = requests.get(remote.author, auth=(server.username, server.password))\n if r.status_code == 200:\n author = remoteAddAuthor(r.json())\n remote_dict['author'] = author\n remote_dict['comment'] = remote.comment\n remote_dict['contentType'] = remote.contentType\n remote_dict['published'] = remote.published\n remote_dict['id'] = remote.id\n comment_list.append(remote_dict)\n else:\n continue\n\n comment_list = sorted(comment_list, key=lambda k: k['published'], reverse=True)\n\n return comment_list",
"def get_comments(self):\n\t\tself.comments = graph.get_connections(post['id'], 'comments')",
"def render_comments(self, post, comment_to_edit=None):\n rendered_comments = \"\"\n for comment in post.comments:\n if (comment_to_edit and\n comment.get_id() == comment_to_edit.get_id()):\n rendered_comments += self.render_str(\n \"blog/editcomment.html\", comment=comment_to_edit)\n else:\n rendered_comments += self.render_str(\n \"blog/singlecomment.html\", p=post, comment=comment)\n return rendered_comments",
"def each_comment_from_post(post):\n # first yield the post text body, if any\n if post['text']:\n yield post['text']\n # then yield each comment\n for comment in post['comments']:\n yield comment['text']",
"def combine_comments(short_comments_df, long_comments_df):\n long_comments_df.set_index('perfume_id', inplace=True)\n long_comments_df['long_comments'] = long_comments_df['comments'].apply(','.join)\n all_comments = pd.merge(short_comments_df, long_comments_df, how='left', left_index=True, right_index=True)\n all_comments = all_comments.fillna('.')\n all_comments['all_comments'] = all_comments['short_comments'] + all_comments['long_comments']\n all_comments.drop(['comments', 'short_comments', 'long_comments', 'url'], axis=1, inplace=True)\n all_comments = all_comments.reset_index().rename(columns={'index':'perfume_id'})\n return all_comments",
"def task_fetch_posts_and_comments(\n author_id,\n count=28,\n posts_out='data/posts_data.xlsx',\n comments_out='data/comments_data.xlsx'):\n\n # Create query instances for posts and comments\n post_query = Query(PostParser)\n comment_query = Query(CommentParser)\n\n # Query posts data\n post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {\n \"id\": author_id,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)\n\n # Query comments data of posts\n comment_data = []\n for i, post in enumerate(post_data):\n logger.info(\"Get comment of %d %s\" % (i, post['short_code']))\n comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {\n \"shortcode\": post['short_code'],\n \"first\": 50,\n }, None)\n for comment in comment_data_of_one_post:\n comment['post_short_code'] = post['short_code']\n comment_data.extend(comment_data_of_one_post)\n logger.info(\"Count of comment_data: %d\" % len(comment_data))\n\n # Save the comments data\n comment_data_df = pd.DataFrame(comment_data)\n comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)\n logger.info(\"Save the comments data to %s.\" % comments_out)",
"def get_comments(id_post):\n return Comms.objects.filter(post__id=id_post)",
"def get_post_comments(post, user_agent=default_user_agent):\n post_permalink = post['permalink']\n\n response_data = requests.get(post_permalink, headers = {'User-agent': user_agent})\n post_data = response_data.json()[1]\n\n # right now this gets the title, eventually convert to unique id for each title\n post_id = post['post_id']\n\n return get_post_comments_recur(post_data, [], -1, post_id)",
"def get_post_comments():\n\n print('\\nLoading post comments from MongoDB..')\n\n cursor = collection.aggregate([\n {'$group': {'_id': '$category',\n 'comments': {'$push': '$Posts.All Comments.comment_no_stopwords'}}}\n ])\n\n comments_df = pd.DataFrame(list(cursor))\n return comments_df",
"def get_post_comments():\n\n print('\\nLoading post comments from MongoDB..')\n\n cursor = collection.aggregate([\n {'$group': {'_id': '$category',\n 'comments': {'$push': '$Posts.All Comments.comment_no_stopwords'}}}\n ])\n\n comments_df = pd.DataFrame(list(cursor))\n return comments_df",
"def post_process_post(self, post):\r\n post.article = self.rewrite_ob_urls(post.article)\r\n post._commit()\r\n \r\n comments = Comment._query(Comment.c.link_id == post._id, data = True)\r\n for comment in comments:\r\n comment.body = self.rewrite_ob_urls(comment.body)\r\n comment._commit()",
"def get_discussion_comments(self, post_id):\n query = \"SELECT users.Username , discussionreplies.* FROM discussionreplies INNER JOIN users ON (discussionreplies.Users_idUsers=users.idUsers) WHERE Discussions_idDiscussions={}\".format(post_id)\n cursor = DB.instance.connection.cursor()\n cursor.execute(query)\n return cursor.fetchall()",
"def get_post_comments_recur(comment, comments, parent_comment_id, parent_post_id):\n if 'data' in comment:\n comment_data = comment['data']\n\n new_comment = None\n\n # a new comment exists at this layer, add it to the total list of comments\n if 'body' in comment_data:\n new_comment = {\n \"score\": comment_data['score'],\n \"body\": comment_data['body'],\n \"subreddit\": comment_data['subreddit'],\n \"author\": comment_data['author'],\n \"parent_comment_id\": parent_comment_id,\n \"parent_post_id\": parent_post_id,\n \"created\": comment_data['created'],\n \"comment_id\": comment_data['id']\n }\n comments.append(new_comment)\n\n next_parent_comment_id = parent_comment_id if new_comment is None else new_comment['comment_id']\n\n # recurse on children\n if 'children' in comment_data:\n for child in comment_data['children']:\n comments = get_post_comments_recur(child, comments, next_parent_comment_id, parent_post_id)\n\n # recurse on replies\n if 'replies' in comment_data:\n comments = get_post_comments_recur(comment_data['replies'], comments, next_parent_comment_id, parent_post_id)\n\n return comments",
"def get_comments(self,comments):\n all_comments = []\n for comment in comments:\n try :\n all_comments.append({\n 'comment':comment['data']['body'],\n 'score':comment['data']['score']\n })\n except: pass\n return all_comments",
"def comments(post_id: str, max: int = None):\n for comment in client.comments(post_id=post_id, max=max):\n print(json.dumps(comment))",
"def comments(self, request, pk=None):\n post = self.get_object()\n comments = Comment.objects.filter(post=post).order_by('created_at')\n serializer = PostCommentsSerializer(comments, many=True)\n return Response(serializer.data, status.HTTP_200_OK)",
"def update_comments(comments, account_name, post_url):\n inc_number = 0\n for index, comment in comments.iterrows():\n # increment + 1\n inc_number = inc_number + 1\n # get preprocessed comment\n comment_spaces, comment_no_stopwords = preprocess_comment(comment['comment'])\n # get sentiment score from comment\n sentiment_score = get_sentiment(comment_no_stopwords)\n # update collection with comments\n collection.update_one(\n {\n 'Codename': account_name,\n 'Posts.URL': post_url\n },\n {\n '$push': {\n 'Posts.$.All Comments': {'comment_id': inc_number,\n 'user': comment['user'],\n 'comment': comment['comment'],\n 'comment_no_stopwords': comment_no_stopwords,\n 'comment_spaces': comment_spaces,\n 'like': comment['like'],\n 'sentiment_score': sentiment_score\n }\n }\n }\n )",
"def _get_comments(**kwargs):\r\n\r\n # Log in to get cookies.\r\n cookies = _login(**kwargs)\r\n\r\n if 'r' not in kwargs:\r\n # This is the first comments request.\r\n # Make the comments request and set an empty list.\r\n kwargs['r'] = requests.get('https://news.ycombinator.com/threads?id=%s' % kwargs['args'].username,\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n kwargs['comments'] = []\r\n\r\n # Grab the comments.\r\n J = pq(kwargs['r'].content)\r\n comments = J('table table td.default')\r\n\r\n for c in comments:\r\n\r\n comment = _sanitize_comment(J, c)\r\n\r\n if kwargs['args'].no_owner and comment['user'] == kwargs['args'].username:\r\n continue\r\n\r\n # Add the comment to the saved list.\r\n kwargs['comments'].append({\r\n 'user': comment['user'],\r\n 'comment': comment['comment'],\r\n 'reply': comment['reply'],\r\n 'points': comment['points'],\r\n 'link': comment['link'],\r\n 'parent': comment['parent'],\r\n 'story': comment['story'],\r\n 'date': comment['date'],\r\n })\r\n\r\n # If we're getting all comments.\r\n if kwargs['args'].all:\r\n\r\n # Find the 'More' link and load it.\r\n last = J('a', J('table table tr td.title:last'))\r\n if last.text() == 'More':\r\n kwargs['r'] = requests.get('https://news.ycombinator.com%s' % last.attr('href'),\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n # Call this function again, this time with the new list.\r\n return _get_comments(**kwargs)\r\n\r\n return kwargs['comments']",
"def separate_comments(self):\n if not hasattr(self, 'cleaned_html'):\n self.cleaned_html = self.clean_html()\n \n self.separated_comments = self.cleaned_html.split(self.post_splitter)\n return self.separated_comments",
"def getRemoteComments(post_id):\n servers = Server.objects.all()\n for server in servers:\n if server.username and server.password:\n host = server.hostname\n if not host.endswith(\"/\"):\n host = host + \"/\"\n server_api = \"{}posts/{}/comments\".format(host, post_id)\n print('Request:')\n print(server_api)\n try:\n r = requests.get(server_api, auth=(server.username, server.password))\n print(r)\n if r.status_code in [200, 201]:\n comments = r.json()\n return remoteCommentList(comments)\n except Exception as e:\n print(e)\n return None",
"def comments(self):\n return self.get_queryset().filter(content_type__model='comment').order_by('-comments__createdAt')",
"def task_fetch_tag_posts_and_comments(\n tag_name,\n count=100,\n posts_out='data/tag_posts_data.xlsx',\n comments_out='data/tag_comments_data.xlsx'):\n\n # Create query instances for posts and comments\n post_query = Query(TagPostParser)\n comment_query = Query(CommentParser)\n\n # Query posts data\n post_data = post_query.query_all(TAG_POSTS_QUERY_HASH_PARAM, {\n \"tag_name\": tag_name,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)\n\n # Query comments data of posts\n comment_data = []\n for i, post in enumerate(post_data):\n logger.info(\"Get comment of %d %s\" % (i, post['short_code']))\n comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {\n \"shortcode\": post['short_code'],\n \"first\": 50,\n }, 100)\n for comment in comment_data_of_one_post:\n comment['post_short_code'] = post['short_code']\n comment_data.extend(comment_data_of_one_post)\n logger.info(\"Count of comment_data: %d\" % len(comment_data))\n\n # Save the comments data\n comment_data_df = pd.DataFrame(comment_data)\n comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)\n logger.info(\"Save the comments data to %s.\" % comments_out)",
"def iterateComments(db, post_id):\n c=db.cursor()\n c.execute(\"\"\"SELECT * FROM comments WHERE post_id=%d\"\"\" % post_id)\n for comment in c.fetchall():\n yield Comment(answer)\n c.close()",
"def make_comments(post, comments):\n for comment in comments:\n try:\n com = RedditComment(reddit_post=post, **comment)\n com.save()\n except Exception as ex:\n print 'comment could not be created'\n print ex",
"def comments(self, limit=100, all=False):\n source, edge = self.id, \"comments\"\n return lazygen(Comment, source, edge,\n limit=limit, get_all=all)",
"def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()",
"def make_parsed_comments(self):\n if not hasattr(self, 'separated_comments'):\n self.separated_comments = self.separate_comments()\n \n # build comments list of dictionaries, one dictionary for each article\n self.comments = []\n for self.separated_comment in self.separated_comments:\n try:\n comment_data = self.get_comment_data(self.separated_comment)\n self.comments.append(comment_data)\n except Exception as e:\n pass\n return self.comments",
"def get_comments_from_submission_id(submission_id):\n flat_comments = []\n tree_comments = []\n\n submission = (REDDIT.submission(id=submission_id))\n print(submission.num_comments)\n print(submission.shortlink)\n\n # sort comments by best and get the flattened list\n submission.comment_sort = 'confidence'\n\n # tree comments traversal\n submission.comments.replace_more(limit=1)\n for comm in submission.comments.list():\n tree_comments.append(comm)\n\n flat_comments = list(submission.comments)\n\n return flat_comments, tree_comments",
"def postList(posts):\n post_list = list()\n for post in posts:\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def get_comments(self, post_id, post_type):\n\n try:\n if not post_id or not post_type:\n raise Exception('Invalid parameter')\n if post_type != 'question' and post_type != 'answer':\n raise Exception('Invalid parameter')\n\n url = BASE_URL + 'post_comments/'\n url += '?' + urllib.parse.urlencode({'post_id': int(post_id), 'post_type': post_type})\n response = self.request('GET', url, is_ajax=True)\n response = response.json()\n output = []\n for item in response:\n output.append(self._convert_comment(item))\n return output\n except Exception as e:\n Utils.log(traceback.format_exc())\n Utils.error(e.args[0])"
] | [
"0.6669341",
"0.6629759",
"0.6567346",
"0.65568393",
"0.645952",
"0.6353189",
"0.6242865",
"0.62129784",
"0.6153404",
"0.6153404",
"0.60899085",
"0.60534865",
"0.6011431",
"0.59397805",
"0.5903908",
"0.58740234",
"0.5855156",
"0.58104396",
"0.5805067",
"0.5801013",
"0.57634443",
"0.57427084",
"0.5712492",
"0.5698225",
"0.5691264",
"0.5635477",
"0.5633774",
"0.56302077",
"0.5624839",
"0.559004"
] | 0.7301012 | 0 |
Lowercase word and remove junk characters using self.filter_pattern | def clean_word(self, word):
return self.filter_pattern.sub(u'', word.lower()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_ignoring_case(self, pattern):\n return self.filter(re.compile(pattern, re.I))",
"def clean_cases(text):\n return text.lower()",
"def preprocess(self, s):\n stripped = re.sub(\"[^\\w\\s]\", \"\", s)\n stripped = re.sub(\"_\", \"\", stripped)\n\n stripped = re.sub(\"\\s+\", \" \", stripped)\n\n stripped = stripped.strip()\n\n return stripped.lower()",
"def cleanWord(word):\r\n newWord = [letter.lower() for letter in word if letter.isalpha()]\r\n return \"\".join(newWord)",
"def preprocess(text):\n return text.lower()",
"def good_word(self, word):\r\n return word.strip().lower()",
"def filter_lowercase(self, string):\n newstring = string.lower()\n return newstring",
"def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval",
"def no_caps_and_ponctuation(text):\n return re.sub(r'[^\\w\\s]', '', text).lower()",
"def process_word(word: str) -> str:\r\n return re.compile(r\"[\\W_]+\").sub(\"\", word.lower())",
"def clean(word):\n word = word.lower()\n stopwords = ['of', 'and','to', 'at', 'in', '@']\n word = re.sub(r'[\\&/\\-\\(\\)\\|\\@,\\]\\[]+', ' ', word)\n for stopword in stopwords:\n pattern = r'\\b' + stopword + r'\\b'\n pattern = re.compile(pattern)\n word = re.sub(pattern, '', word)\n word = re.sub(r'\\s\\s+', ' ', word)\n return word",
"def clean_data(s):\n s = s.strip()\n s = s.lower()\n return s",
"def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text",
"def clean_word(word):\n return \"\".join([c for c in word.lower() if ord(c) < 128])",
"def preprocess_text(self):\n self.text_received = self.text_received.replace(\" \", \"\").lower()",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def _clean_words(self, title, filter_stopwords=False):\n chars = '\"[]():;?!,\\'-'\n translation = dict((ord(c), u' ') for c in chars)\n def translate(text):\n if isinstance(text, unicode):\n translated = text.translate(translation)\n else:\n translated = text.translate(None, chars)\n return translated\n strips = '.'\n words = [\n x.strip(strips)\n for x in translate(title).split()\n ]\n for word in words:\n if len(word) >= self.min_word_length:\n if filter_stopwords and word.lower() not in STOPWORDS:\n continue\n # if the word contains non-ascii characters, try to convert\n # it to a ascii equivalent so that it's possible to type\n # \"naive\" when you don't even know how to type \"naïve\"\n try:\n word.encode('ascii')\n except UnicodeEncodeError:\n # it contains non-ascii characters\n ascii_word = unidecode(word)\n yield unicode(ascii_word).lower()\n yield word.lower()\n # yield ''.join(c for c in word if c.isalnum())",
"def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()",
"def processword(word):\n word = word.lower()\n word = word.strip('()?,!`.-:\\\"\\n \\'')\n return word",
"def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])",
"def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])",
"def filter(string):\n # remove all unwanted characters\n return regex2.sub(' ', string)",
"def only_lowercase(text):\n\tnot_allowed = string.punctuation + string.whitespace + string.digits\n\ttext2 = [each for each in text if each not in not_allowed]\n\ttext2 = ''.join(text2)\n\treturn text2.lower()",
"def clean_text(text):\n new_text = \"\"\n text = text.lower()\n for character in text:\n if character.isalpha():\n new_text = new_text + character\n return new_text",
"def preprocess(string):\n cleaned = regex.sub(\" \", string)\n return cleaned.lower()"
] | [
"0.73268974",
"0.731889",
"0.7099577",
"0.7067469",
"0.7044841",
"0.7043152",
"0.6991179",
"0.67993104",
"0.67970735",
"0.6778562",
"0.6728453",
"0.6704564",
"0.66781956",
"0.6671974",
"0.66304576",
"0.66165805",
"0.66165805",
"0.66165805",
"0.66165805",
"0.66165805",
"0.66165805",
"0.6600788",
"0.6588069",
"0.6587885",
"0.6515236",
"0.6515236",
"0.6507701",
"0.6499907",
"0.6489981",
"0.6488137"
] | 0.82216036 | 0 |
Delete existing corpus (set of unique words) and make a new one. | def persist_corpus(self):
subreddit = self.postman.subreddit
corpus_coll = self.postman.corpus_write
subreddit_query = {'subreddit':subreddit}
preexisting_corpora = corpus_coll.find(subreddit_query).count()
print 'deleting %i existing corpora for subreddit' % preexisting_corpora
corpus_coll.delete_many(subreddit_query)
result = corpus_coll.insert_one({'subreddit':subreddit, 'corpus':list(self.corpus)})
print 'persisted corpus of length %i' % (len(self.corpus))
# chaining
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_delete_corpus():\n corpus = set() # set<list<action, status, sentence>>\n with open(os.path.join(BASE, \"data/corpus.csv\")) as fp:\n for line in fp:\n corpus.add(line.split(\",\"))\n return corpus",
"def clean(corpus):\n # Initiate clean_corpus\n clean_corpus = [] \n \n for speech in corpus:\n \n # Removes meaningless intro \n speech = speech[5:] \n\n for i in range(len(speech)):\n # Removes 'meaningless text hear (min:sec)\\n' at the beginning of each paragraph\n speech[i] = speech[i][speech[i].find('\\n') + 1:] \n # Replaces brackets with paranthesis\n speech[i] = speech[i].replace('[', '(') \n speech[i] = speech[i].replace(']', ')')\n # Removes meaningless text in parantheses\n speech[i] = re.sub(r'\\([^)]*\\)', '', speech[i]) \n\n # Join all of the paragraphs into one speech\n speech = ','.join(speech) \n\n clean_corpus.append(speech)\n \n # Combined all of the speeches into one document\n \n if len(clean_corpus) == 1:\n clean_corpus = clean_corpus[0]\n if len(clean_corpus) == 2:\n clean_corpus = clean_corpus[0] + clean_corpus[1]\n if len(clean_corpus) == 3:\n clean_corpus = clean_corpus[0] + clean_corpus[1] + clean_corpus[2]\n if len(clean_corpus) == 8:\n clean_corpus = clean_corpus[0] + clean_corpus[1] + clean_corpus[2] + clean_corpus[3] + clean_corpus[4] + \\\n clean_corpus[5] + clean_corpus[6] + clean_corpus[7]\n \n return clean_corpus",
"def delete_words(self, words=None):\n\n if words is None:\n words = self.stopwords\n\n self.__corpora = [\n sub(r' ({0}) '.format('|'.join(words)), ' ', string) for string in self.__corpora\n ]",
"def clean(raw_file,clean_path,results_path):\n clean_file = clean_path + 'clean.pkl'\n stats_file = results_path + 'corpus_stats.pkl' \n raw_text = load_raw_text(raw_file) \n clean_docs = list()\n nlp = spacy.load('en')\n i = 0\n print('Cleaning documents...')\n for text in raw_text: \n words = nlp(text)\n raw_count = len(words)\n words = [w for w in words if not w.is_stop]\n words = [w for w in words if w.ent_type_ != 'PERSON' and w.pos_ != 'PROPN']\n words = [w for w in words if w.is_alpha and not w.is_digit]\n words = [w.lemma_ for w in words if w.text != '-PRON-']\n word_string = ' '.join(words)\n word_string = word_string.lower()\n doc = Document(word_string)\n doc.clean_count = len(words)\n doc.raw_count = raw_count\n clean_docs.append(doc)\n if i%10 == 0:\n print('Document: ' + str(i))\n i += 1\n clean_corpus = Corpus(clean_docs)\n clean_corpus.save_corpus_text(clean_file)\n clean_corpus.save_corpus_stats(stats_file)\n return clean_corpus",
"def test_unlabeled_corpus_saving(self):\n\n original_corpus = [[\"Yo\", \"soy\", \"una\", \"oración\", \"gramatical\", \",\",\n \"regocíjense\", \"en\", \"mi\", \"glória\", \".\"],\n [\"Yo\", \"ungrammatical\", \"es\", \"oración\", \",\"\n \"tú\", \"presumido\", \"elitista\", \".\"]]\n with tempfile.TemporaryDirectory() as temp_dir:\n fileName = temp_dir + \"testfile\"\n corpus_tools.save_tokenized_corpus(fileName, original_corpus)\n loaded_corpus = corpus_tools.load_tokenized_corpus(fileName)\n assert len(original_corpus) == len(loaded_corpus)\n for original_sent, loaded_sent in zip(original_corpus,\n loaded_corpus):\n self.assertEqual(original_sent, loaded_sent)",
"def create_corpus(df):\r\n corpus=[]\r\n for tweet in tqdm(df['text']):\r\n words=[word.lower() for word in word_tokenize(tweet) if((word.isalpha()==1))]\r\n corpus.append(words)\r\n return corpus",
"def make_corpus(self, t, v=None):\n v = self.vectorizer\n\n try:\n corpus = v.transform(t)\n except ValueError, e:\n return None, None\n \n vocab = {y:x for x,y in v.vocabulary_.iteritems()}\n corpus = gensim.matutils.Sparse2Corpus(corpus, documents_columns=False)\n return corpus, vocab",
"def update_corpus(sentences):\n \n corNeg = None\n corPos = None\n corNeu = None\n try:\n corNeg = open('corpus\\\\neg.txt', 'ab')\n corPos = open('corpus\\\\pos.txt', 'ab')\n corNeu = open('corpus\\\\neu.txt', 'ab')\n except:\n print(\"Error: Loading Corpus\")\n return\n for sent_d in sentences:\n sent = sent_d[\"sentence_txt\"]\n tagged = sent_d[\"tag_id\"]\n # update corpus\n if tagged == tag.neg:\n corNeg.write('\\n'+sent)\n if tagged == tag.pos:\n corPos.write('\\n'+sent)\n if tagged == tag.neu:\n corNeu.write('\\n'+sent)\n corNeg.close()\n corPos.close()\n corNeu.close()",
"def build_corpus(self):\n # #############################\n\n doc = metapy.index.Document()\n tok = metapy.analyzers.ICUTokenizer(suppress_tags=True)\n tok = metapy.analyzers.LowercaseFilter(tok)\n tok = metapy.analyzers.LengthFilter(tok, min=3, max=1000)\n tok = metapy.analyzers.Porter2Filter(tok)\n tok = metapy.analyzers.ListFilter(tok, \"lemur-stopwords.txt\", metapy.analyzers.ListFilter.Type.Reject)\n collection = -1\n\n with open(self.documents_path) as file:\n for num, line in enumerate(file):\n l = line.strip()\n c = int(l[0])\n l = l[2:]\n doc.content(l)\n tok.set_content(doc.content())\n if c != collection:\n self.documents.append([])\n collection = c\n self.documents[c].append([token for token in tok])\n self.number_of_collections = len(self.documents)\n self.number_of_documents = len(self.documents[0])\n #print(self.number_of_collections)\n #print(self.number_of_documents)\n #print(self.documents[0])",
"def distinct_words(corpus):\n\n # ------------------\n # Write your implementation here.\n corpus_words = sorted(list(set(word for word_list in corpus for word in word_list)))\n num_corpus_words = len(corpus_words)\n\n # ------------------\n\n return corpus_words, num_corpus_words",
"def create_from_text(cls, text):\n duplicate_words = text.lower().replace('.', ' .').split(' ')\n words = list(set(duplicate_words))\n corpus = [words.index(word) for word in duplicate_words]\n return cls(words, corpus)",
"def clean_lyrics(lyrics_file):\n data_corpus = []\n with open(lyrics_file) as csvfile:\n reader = csv.reader(csvfile, delimiter=\"\\t\")\n for row in reader:\n sentences = row[2].strip().split(u\"<BR>\")\n for sentence in sentences:\n sentence = unicode(sentence)\n sentence = translate_non_alphanumerics(sentence)\n sentence = space_pat.sub(u' ', sentence)\n\n # delete English\n # sentence = eng_words_pat.sub(u'', sentence).split(u\"\\s\")\n\n # sentence = sentence.split(u'')\n # sentence.append(u\".\")\n # sentence += u'.'\n\n if len(sentence) > 1:\n data_corpus.append(sentence)\n\n logger.info(\" Done cleaning crawled data! \")\n\n # saving the corpus\n with codecs.open(\"data/cleaned_lyrics.txt\", \"w\", 'UTF-8') as f:\n f.write(\"\\n\".join(data_corpus))",
"def purge_words(self):\n\n\t\tword_list = self.transcript_string.encode('utf-8').split()\n\t\tpurged_word_list = {}\n\t\tfor word in word_list:\n\t\t\tif word.isalpha():\n\t\t\t\tif word.islower():\n\t\t\t\t\tpurged_word_list.setdefault(word, []).append(word)\n\t\t\t\telse:\n\t\t\t\t\tlower_word = word.lower()\n\t\t\t\t\tpurged_word_list.setdefault(lower_word, []).append(word) \n\t\t\telse:\n\t\t\t\tcontinue \n\t\t\n\t\tself.word_list = purged_word_list",
"def test_duplicate_word_removal(self):\n data = [{\"Header\": \"This is a Header, and this is a Header\", \"Paragraph\": \"This is a Paragraph, and this is a \"\n \"Paragraph\",\n \"Header_keywords\": [\"header\", \"header\"],\n \"Paragraph_keywords\": [\"paragraph\", \"paragraph\"], \"slide\": 10}]\n remove_duplicates = duplicate_word_removal(data)\n data[0][\"Header_keywords\"] = [\"header\"]\n data[0][\"Paragraph_keywords\"] = [\"paragraph\"]\n self.assertEqual(data, remove_duplicates)",
"def generate_corpus():\n data = load_data()\n questions = [s.split(' ', 1)[1].lower() for s in data]\n return questions",
"def vectorcorpus(model, wcl):\r\n corpus = np.array([model.word_vec(word) for word, _ in wcl])\r\n print('Created corpus with {} elements'.format(len(corpus)))\r\n return corpus",
"def _clear_document(self, docid):\n doc = self.get_document(docid)\n for term, count in doc.get_terms():\n term_entry = self.sql_session.query(Term).get(term)\n term_entry.count -= abs(count)\n term_entry.distinct_docs -= 1\n any_term = self.sql_session.query(Term).get(ANY)\n any_term.distinct_docs -= 1\n doc.delete()",
"def testAddWords(self):\n\n\t\t\t\twords = ['mac', 'tips', 'tricks', 'macintosh', 'help', 'hack']\n\t\t\t\tspinner.Word.objects.add(words)\n\t\t\t\tfor word in words:\n\t\t\t\t\t\tword = spinner.Word.objects.get(name=word)\n\t\t\t\t\t\tword.delete()",
"def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount",
"def create_corpus(source):\n\treturn \" \".join([file.read() for file in source])",
"def distinct_words(corpus):\n corpus_words = []\n num_corpus_words = -1\n \n # YOUR CODE HERE\n \n for i in corpus:\n for j in i:\n corpus_words.append(j)\n \n corpus_words = set(corpus_words)\n \n corpus_words = sorted(list(corpus_words))\n \n num_corpus_words = len(corpus_words)\n \n #raise NotImplementedError()\n\n return corpus_words, num_corpus_words",
"def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()",
"def clean_then_embed(self, corpus):\n\n return self.embed_corpus(self.clean_corpus(corpus=corpus))",
"def train(self, corpus): \n # TODO your code here\n # Tip: To get words from the corpus, try\n # for sentence in corpus.corpus:\n # for datum in sentence.data: \n # word = datum.word\n for sentence in corpus:\n prevWord = \"\"\n prevPrevWord = \"\"\n for word in sentence:\n word = word.strip(STRIP_CHARS)\n word = word.lower()\n currentWord = word\n self.unigramCounts[currentWord] += 1\n self.total += 1\n if prevWord != \"\":\n if prevPrevWord != \"\":\n trigram = (prevPrevWord, prevWord, currentWord)\n if trigram not in self.trigramCounts:\n self.continuationCounts[currentWord] += 1\n self.followingCounts[(prevPrevWord, prevWord)] += 1\n self.trigramCounts[trigram] += 1\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n else:\n self.bigramCounts[(prevWord, currentWord)] += 1\n self.totalBigramCounts += 1\n prevPrevWord = prevWord\n prevWord = currentWord\n else:\n prevWord = currentWord\n self.total += len(self.unigramCounts)",
"def clean_text(corpus, model):\n \n new_doc = []\n doc = model(corpus)\n for word in doc:\n if not word.is_stop and word.is_alpha:\n new_doc.append(word.lemma_.lower())\n \n cleaned_string = \", \".join(new_doc) # putting the strings back into one string\n return cleaned_string",
"def load_corpus(self, fn):\n corpus = load_corpus(fn)\n self.corpus = corpus\n self.has_corpus = True",
"def clean_docs(self,docs):\n\n # Remove numbers, but not words that contain numbers.\n docs = [[token for token in doc if not token.isnumeric()] for doc in docs]\n\n # Remove words that are only one character.\n docs = [[token for token in doc if len(token) > 1 and token not in stop_words] for doc in docs]\n\n # lemmatizer = WordNetLemmatizer()\n # docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]\n\n # Add bigrams and trigrams to docs (only ones that appear 20 times or more).\n bigram = Phrases(docs, min_count=20)\n for idx in range(len(docs)):\n for token in bigram[docs[idx]]:\n if '_' in token:\n # Token is a bigram, add to document.\n docs[idx].append(token)\n\n # Create a dictionary representation of the documents.\n dictionary = Dictionary(docs)\n\n # Filter out words that occur less than 20 documents, or more than 50% of the documents.\n dictionary.filter_extremes(no_below=20, no_above=0.5)\n\n # Bag-of-words representation of the documents.\n corpus = [dictionary.doc2bow(doc) for doc in docs]\n\n return docs,dictionary,corpus",
"def get_clean_corpus(FILENAME):\n text = open(FILENAME)\n corpus = text.readlines()\n clean_corpus = []\n doc_no = 0\n prev_doc_no = 0\n doctype_flag = 0\n doctype_end_flag = 0\n doc_list = []\n punct = set(string.punctuation)\n tag = 0\n for i in range(len(corpus)):\n stmt = \"\"\n flag = 1\n sz = len(corpus[i])\n\n\n for j in range(sz-1):\n if(corpus[i][j] == '<'):\n tag = 1\n if(corpus[i][j] == '<' and j+1 < sz and corpus[i][j+1] == 'd'):\n doctype_flag = 1\n if(corpus[i][j] == 'c' and j+1 < sz and corpus[i][j+1] == '>'):\n doctype_end_flag = 1\n if(doctype_flag == 0 and doctype_end_flag == 0 and tag == 0):\n stmt = stmt + (corpus[i][j])\n if(corpus[i][j] == '>'):\n tag = 0\n if(doctype_flag == 1 and corpus[i][j] == '>'):\n doctype_flag = 0\n if(doctype_end_flag == 1 and corpus[i][j] == '>'):\n doctype_end_flag = 0\n doc_no = doc_no + 1\n break\n\n if(stmt != \"\"):\n if(doc_no != prev_doc_no):\n if(len(doc_list) != 0):\n clean_corpus.append(doc_list)\n doc_list = []\n prev_doc_no = doc_no\n stmt = stmt.lower()\n doc_list.append(stmt)\n return clean_corpus",
"def clean_text(corpus, model): \n new_doc = []\n doc = model(corpus)\n for word in doc:\n if not word.is_stop and word.is_alpha:\n new_doc.append(word.lemma_.lower()) \n final = \", \".join(map(str,new_doc)) \n return final",
"def testWords(self):\n\n\t\t\t\twords = ['mac', 'tips', 'tricks', 'macintosh', 'help', 'hack']\n\n\t\t\t\tfor word in words:\n\t\t\t\t\t\tref = spinner.Word.objects.get_single(word, True)\n\t\t\t\t\t\tassert isinstance(ref, spinner.Word)\n\t\t\t\t\t\tref.delete()"
] | [
"0.6868972",
"0.62707067",
"0.6066833",
"0.6028967",
"0.59399575",
"0.5909655",
"0.5907406",
"0.5866781",
"0.5856297",
"0.58376837",
"0.58010125",
"0.57789904",
"0.5776771",
"0.5760552",
"0.5755673",
"0.57234013",
"0.5722739",
"0.57213384",
"0.5716799",
"0.5640827",
"0.56308705",
"0.561919",
"0.56051636",
"0.56006026",
"0.56000865",
"0.5598288",
"0.5577471",
"0.55739117",
"0.55660003",
"0.5563765"
] | 0.65945077 | 1 |
Master function for preprocessing documents. Reads from postman.posts_read and outputs to postman.posts_write | def process(self):
# tokenize, then filter & otherwise process words in each document
# using steps in preprocess_doc()
all_posts_count = self.postman.posts_read.find({'subreddit': self.postman.subreddit}).count()
for post_idx, post in enumerate(self.postman.posts_read.find({'subreddit': self.postman.subreddit})):
# preprocess the post and add the new words to the corpus
new_words = self.preprocess_post(post)
self.corpus.update(new_words)
# print on every Nth post so you know it's alive
if post_idx % 100 == 0:
print 'done post %i out of %i' % (post_idx, all_posts_count)
#TODO:
print 'word count and other corpus-level filters not implemented, skipping...'
# corpus-level filtering
# get rid of invalid documents (based on word count)
# self.corpus = [doc for doc in self.corpus if self.doc_has_valid_wc(doc)]
# print 'filtered out %i out of %i documents' % (pre_corpus_len - len(self.corpus), pre_corpus_len)
# stem or lemmatize
# if self.stem_or_lemma_callback:
# self.corpus = [self.perform_stem_or_lem(doc) for doc in self.corpus]
# for chaining
#######################################################
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pre_process(self, documents):\n\n return documents",
"def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()",
"def preprocess_post(self, post):\n # tokenize, clean, & tag part-of-speech for all words\n if self.document_level == 'postwise':\n\n doc_text = all_comments_from_post(post)\n # leave early if there's nothing there\n if doc_text == '':\n return []\n\n tokens = nltk.word_tokenize(doc_text)\n # TODO: skip this if there's no POS filtering args!\n tagged = nltk.pos_tag(tokens)\n\n # filter out most invalid words with valid_word()\n processed_document = []\n for word, pos_tag in tagged:\n if self.valid_word(word, pos_tag):\n cleaned_word = self.clean_word(word)\n # things like digits and other junk become empty string,\n # so exclude them from final document\n if cleaned_word:\n processed_document.append(cleaned_word)\n # finally, update the post\n post['postwise'] = {'tokens': processed_document, 'text': doc_text}\n self.postman.posts_write.update_one({'_id':post['_id']}, {'$set':post}, upsert=True)\n else:\n raise NotImplementedError('document_level: \"%s\"' % self.document_level)\n\n return processed_document",
"def preproc_doc(document):\n\n # Each document is a list of lines\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n # set a random seed for reproducability\n # since this function is run in parallel, if we hardcode a seed, all\n # documents will have the same permutations. Instead we use the hash of the\n # first sentence as the seed so it is different for each document and it\n # is still reproducible.\n hash_object = hashlib.md5(document[0])\n rng = random.Random(int(hash_object.hexdigest(), 16) % (10**8))\n\n # Each document is composed of a list of sentences. We create paragraphs\n # by keeping together sentences on the same line and adding adjacent sentences\n # if there are fewer than 5 to form the paragraph.\n # The utility functions below expect the document to be split by paragraphs.\n list_of_paragraphs = []\n paragraph = []\n for line in document:\n line = tokenization.convert_to_unicode(line)\n line = line.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n sents = split_line_by_sentences(line)\n for sent in sents:\n tokens = tokenizer.tokenize(sent)\n if tokens:\n paragraph.append(tokens)\n if len(paragraph) > 5:\n list_of_paragraphs.append(paragraph)\n paragraph = []\n\n # In case of any empty paragraphs, remove them.\n list_of_paragraphs = [x for x in list_of_paragraphs if x]\n\n # Convert the list of paragraphs into TrainingInstance object\n # See preprocessing_utils.py for definition\n if FLAGS.format == FORMAT_BINARY:\n instances = create_instances_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n elif FLAGS.format == FORMAT_PARAGRAPH:\n instances = create_paragraph_order_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n\n # Convert token lists into ids and add any needed tokens and padding for BERT\n tf_examples = [\n convert_instance_to_tf_example(tokenizer, instance,\n FLAGS.max_seq_length)[0]\n for instance in instances\n ]\n\n # Serialize TFExample for writing to file.\n tf_examples = [example.SerializeToString() for example in tf_examples]\n\n return tf_examples",
"def preprocess(self):",
"def preprocess(doc_in, doc_out):\n def output(text, doc_id):\n doc_out.write(doc_id + \"\\n\")\n doc_out.write(text.replace(\"\\n\", \" \") + \"\\n\\n\")\n\n def filter_text(t):\n filtered_out = [\"<P>\", \"</P>\"]\n r = t\n for f in filtered_out:\n r = r.replace(f, \" \")\n return r\n\n\n doc_id = None\n reading_text = False\n text = \"\"\n for line in doc_in:\n if(str_text_start in line):\n if(reading_text):\n warning(\"Found \" + str_text_start + \" in text\")\n if(not doc_id):\n warning(\"Reading text without knowing id\")\n continue\n reading_text = True\n continue\n if((str_text_stop in line) and reading_text):\n output(text, doc_id)\n text = \"\"\n reading_text = False\n doc_id = None\n doc_id_match = pat_doc_no.match(line)\n if(doc_id_match):\n doc_id = doc_id_match.group(1)\n if(reading_text):\n warning(\"Found doc id in text\")\n continue\n if(reading_text):\n text = text + filter_text(line)",
"def preprocess(self, documents):\n\n # Store the total number of documents\n num_docs = np.float(len(documents))\n\n # A dict storing the frequency of each word across all documents\n total_word_freq = {}\n\n # A dict storing the number of documents that word appears in\n doc_word_freq = {}\n\n # Iterate over all documents\n for doc in documents:\n # Split the string into a list of words\n words = extract_words(doc)\n\n # Update the 'total_word_freq' dict using all words in 'words'\n for w in words:\n ''' YOUR CODE HERE '''\n if w not in total_word_freq.keys():\n total_word_freq[w] = 1\n else:\n total_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # Update the 'doc_word_freq' dict. Remember to only add '1' corresponding to\n # each word in a document. In case a word appears twice in a document, then\n # it should be ignored. We use the set() data structure to achieve this.\n for w in set(words):\n ''' YOUR CODE HERE '''\n if w not in doc_word_freq:\n doc_word_freq[w] = 1\n else:\n doc_word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n # A set of words with total frequency less than 'self.min_freq'\n remove_words = set()\n\n ''' YOUR CODE HERE '''\n\n # Check frequency of each word and add to 'remove_words'\n for w in total_word_freq.keys():\n if total_word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'total_word_freq' and\n # 'doc_word_freq'.\n for w in remove_words:\n del total_word_freq[w]\n del doc_word_freq[w]\n\n # Create a numpy array to store frequencies from which\n # we can create the 'self.idf' preprocessed numpy array.\n word_freq_tensor = np.zeros(len(doc_word_freq))\n\n # For each word in 'doc_word_freq' dict, update\n # 'self.word_to_idx' and 'self.idx_to_word' and\n # 'word_freq_tensor'.\n i = 0\n for w in doc_word_freq.keys():\n self.word_to_idx[w] = i \n self.idx_to_word[i] = w\n word_freq_tensor[i] = doc_word_freq[w]\n i+=1\n \n #print(word_freq_tensor.shape)\n #print(word_freq_tensor)\n # Calculate 'self.idf' (see hint.pdf for formula)\n self.idf = -1*np.log(word_freq_tensor/(len(documents)))\n ''' END YOUR CODE HERE '''",
"def preprocess(self):\n pass",
"def preprocess(self):\n pass",
"def preprocess(self):\n pass",
"def get_preprocessed_docs(formatted_docs):\r\n preprocessed_docs = {}\r\n for idx in formatted_docs.keys():\r\n preprocessed_docs[idx] = preprocess(formatted_docs[idx])\r\n return preprocessed_docs",
"def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")",
"def preprocess(self, documents):\n\n # A dict storing the frequency of each word\n word_freq = {}\n\n # Iterate for each document\n for doc in documents:\n # Split the document into a list of words and iterate on it\n for w in extract_words(doc):\n # Update word frequencies\n '''YOUR CODE HERE'''\n if w not in word_freq.keys():\n word_freq[w] = 1\n else:\n word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n\n # A set of words with frequency less than 'self.min_freq'\n remove_words = set()\n\n # Check frequency of each word and add to 'remove_words'\n # if it's frequency is below self.min_freq\n\n ''' YOUR CODE HERE '''\n for w in word_freq.keys():\n if word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'word_freq'\n for w in remove_words:\n del word_freq[w]\n\n # Fill 'self.word_to_idx' and 'self.idx_to_word' for\n # each word in 'word_freq' (dicts are explained above)\n\n i = 0\n for w in word_freq.keys():\n self.word_to_idx[w] = i\n self.idx_to_word[i] = w \n i += 1\n\n ''' END YOUR CODE HERE '''",
"def do_single_file_preprocess(pdf_file):",
"def preprocess_main():",
"def post_processor(self):",
"def process(text, output_dir, file_name, json_output):\n\t\n\t# Process HTML\n\tprocessed_text_html = process_html(text)\n\t# Write processed HTML output \n\t#pre_proc.create_text_file(output_dir + \"/html_\" + file_name + \"_pre.html\", processed_text_html)\n\n\t# Convert HMTL to MD\n\ttext_md = pre_proc.extract_text_md(processed_text_html)\n\n\t# Process MD\n\tprocessed_text_md = process_md(text_md)\n\t\n\tif(json_output):\n\t\t# Convert MD to JSON\n\t\tprocessed_json = pre_proc.convert_md_to_json(processed_text_md, file_name)\n\t\t# Write processed JSON output \n\t\tpre_proc.create_binary_file(output_dir + \"/\" + file_name + \".json\", processed_json)\n\telse:\n\t\t# Write processed MD output \n\t\tpre_proc.create_text_file(output_dir + \"/\" + file_name + \".md\", processed_text_md)",
"def file_preprocessing(input_file, output_file):\n # print(\"processing file \" + input_file)z\n # replace the punctuations with space\n replace_punctuation = str.maketrans(string.punctuation, ' '*len(string.punctuation))\n # stemming\n stemmer = PorterStemmer()\n\n with open(input_file, 'r', encoding='utf-8', errors='replace') as inFile, open(output_file,'w') as outFile:\n for line in inFile:\n # replace punctuations\n # convert camel case into space separated\n # convert snake case into space separated\n # remove language keywords\n custom_stopwords = [\"ENDCOND\",\"PVSCL\", \"IFCOND\", \"EVAL\", \"ENDCOND\", \"ELSECOND\", \"ELSEIFCOND\", \"WINDOW\", \"FUNCTION\",\n \"CALLBACK\", \"ABWA\", \"ERROR\", \"TODO\", \"RESOLVE\", \"DOCUMENT\", \"CLASS\", \"LINE\", \"ELEMENT\", \"UTILS\",\n \"NEW\", \"IS\", \"EMPTY\",\"ANNOTATIONS\",\"ANNOTATION\",\"UTILS\",\"CURRENT\",\"TEXT\",\"GET\",\"NAME\",\"LISTERNER\",\n \"ADD\", \"EVENT\", \"CREATE\",\"FOR\", \"FIND\", \"LENGTH\", \"USER\", \"VALUE\", \"ALERT\", \"ALERTS\", \"ID\", \"HANDLER\",\n \"MESSAGE\", \"GROUP\", \"RETRIEVE\", \"MANAGER\", \"LANGUAGE\", \"CONTENT\", \"INIT\"]\n line_witout_puncs = ' '.join([snake_to_spaces(camel_to_spaces(word))\n for word in line.translate(replace_punctuation).split()\n if len(word) >=4 and word not in stopwords.words('english') #and #word.upper() not in (name.upper() for name in custom_stopwords)\n and word not in all_keywords])\n\n\n # stemming\n # singles = []\n # for plural in line_witout_puncs.split():\n # try:\n # singles.append(stemmer.stem(plural))\n # except UnicodeDecodeError:\n # print(plural)\n\n # line_stemmed = ' '.join(singles)\n # print(line_stemmed, file=outFile)\n print(line_witout_puncs.encode(\"utf-8\"), file=outFile)",
"def _preprocess(self, body):\n return body",
"def pre_process(self):\n pass",
"def pre_process(self):\n pass",
"def pre_process(self):\n pass",
"def pre_process(self):\n pass",
"def pre_process(self):\n pass",
"def process_corpus(args):\n\n fs = open(args.input,'r')\n out = list()\n for line in fs:\n blob = TextBlob(line.strip())\n result_info = dict()\n result_info\n result_info['correct'] = str(blob.correct())\n if args.parse :\n result_info['parse'] = get_parsed_text(blob)\n if args.tokenize:\n result_info['tokenize'] = get_tokenizer_result(blob)\n if args.sentiment:\n result_info['sentiment'] = analyze_sentiment(blob)\n if args.sentence_sentiment:\n result_info['sentence_sentiment'] = analyze_sentence_sentiment(blob)\n if args.noun_phrase:\n result_info['noun_phrase'] = get_noun_phrases(blob)\n if args.pos:\n result_info['pos'] = get_pos_tags(blob)\n\n out.append(result_info)\n print out\n json.dump(out,open('out.json','w'))\n fs.close()\n print '******************************* Execution completed *********************************'",
"def createPostingList():\n try :\n ##### peut etre à mettre dans une fonction\n file_reader_last_read_list = initialize_file_readers()\n for idx, file_reader_and_last_read in enumerate(file_reader_last_read_list):\n file_reader_last_read_list[idx]=read_line_and_update(file_reader_and_last_read=file_reader_and_last_read)\n current_word = min_top_word(file_reader_last_read_list=file_reader_last_read_list)\n final_file = open(POSTING_LIST_PATH, \"w\")\n ######\n\n doc_dict = get_doc_dict(DOC_LIST_PATH)\n nb_doc = len(doc_dict)\n\n ### autre function\n i = 0 \n while current_word != \"|||\": \n current_PL = current_word_PL(current_word=current_word, file_reader_last_read_list=file_reader_last_read_list,\\\n doc_dict=doc_dict, nb_doc=nb_doc ) \n curent_string = \"\"\n for key, value in current_PL.items():\n curent_string = \" \" + str(key) + \" \" + str(value) + curent_string\n curent_string = current_word + curent_string\n final_file.write(curent_string + \"\\n\")\n current_word = min_top_word(file_reader_last_read_list=file_reader_last_read_list)\n #if i %1000 == 0:\n #print(i/1000)\n i +=1\n ####\n \n final_file.close()\n close_file_readers(file_reader_last_read_list=file_reader_last_read_list)\n \n except Exception as ex:\n print(ex)\n final_file.close()\n close_file_readers(file_reader_last_read_list=file_reader_last_read_list)",
"def process(self):\n self.extract()\n self.transform()\n self.load()",
"def preprocess(self):\n self.word_to_id, self.unk_word_list = self.build_vocab(mode=\"word\")\n self.word_vocab_size = len(self.word_to_id)\n self.max_word_len = self.get_max_word_length(self.word_to_id)\n # Do not write the same file again\n if not os.path.exists(self.words_vocab_file):\n with open(self.words_vocab_file, 'wb') as f:\n pickle.dump((self.word_to_id, self.unk_word_list), f)\n if self.unit != \"word\":\n self.preprocess_sub_units()",
"def preprocess(\n self,\n manifest_dir,\n filename,\n content_property,\n kwargs=None,\n add_properties=None,\n remove_properties=None,\n ppversion=\"0.1\",\n ):\n\n # Start doc timer\n doc_start = time.time()\n\n # Initialise the Document object\n try:\n doc = Document(\n manifest_dir,\n filename,\n content_property=content_property,\n model=self.nlp,\n kwargs=kwargs,\n )\n except UnicodeDecodeError as err:\n print(\"Document failed:\", filename, \":\", err)\n return False\n\n # short-circuit and skip if JSON was already processed by version\n try:\n if doc.manifest_dict[\"ppversion\"] == ppversion:\n return True\n except KeyError:\n doc.manifest_dict[\"ppversion\"] = ppversion\n\n # export the wikifier document if the directory is set\n if self.wikifier_output_dir:\n doc.export_content(output_dir=self.wikifier_output_dir)\n\n # Remove manifest properties if the remove_properties list is submitted\n if remove_properties is not None:\n doc.remove_property(remove_properties, save=False)\n\n # Sort and serialise the features table\n features = doc.get_features()\n features.sort_values(by=[\"TOKEN\"], inplace=True)\n features_list = json.loads(pd.DataFrame.to_json(features, orient=\"values\"))\n features_list.insert(0, list(features.columns))\n doc.manifest_dict[\"features\"] = features_list\n\n # Bagify the normed tokens (skipping punctuation and line breaks)\n # Attempt to remove stray punctuation\n punct = re.compile(r\"\\.\\W|\\W\\.|^[\\!\\?\\(\\),;:\\[\\]\\{\\}]|[\\!\\?\\(\\),;:\\[\\]\\{\\}]$\")\n filtered = [\n re.sub(punct, \"\", token.norm_)\n for token in doc.content\n if token.norm_ != \"_\"\n and token.is_punct != True\n and token.is_space != True\n and token.is_digit != True\n ]\n filtered = sorted(filtered)\n doc.manifest_dict[\"bag_of_words\"] = dict(Counter(filtered))\n\n # Add any additional properties to the manifest:\n if add_properties is not None:\n for property in add_properties:\n if property == \"lemmas\":\n doc.manifest_dict[\"lemmas\"] = doc.lemmas(as_list=True)\n if property == \"punctuation\":\n doc.manifest_dict[\"punctuation\"] = doc.punctuation(as_list=True)\n if property == \"pos\":\n doc.manifest_dict[\"pos\"] = doc.pos(as_list=True)\n if property == \"tags\":\n doc.manifest_dict[\"tags\"] = doc.tags(as_list=True)\n if property.startswith(\"stems\"):\n options = property.split(\":\")\n doc.manifest_dict[\"stems\"] = doc.stems(\n stemmer=options[1], as_list=True\n )\n if property.startswith(\"ngrams\"):\n doc.manifest_dict[\"ngrams\"] = doc.ngrams(n=options[1], as_list=True)\n\n # Add the readability scores to the manifest\n doc.manifest_dict[\"readability_scores\"] = doc.readability_scores(as_list=True)[\n 0\n ]\n\n # Add the total word count (skipping punctuation and line breaks) to the manifest\n doc.manifest_dict[\"word_count\"] = len(\n doc.filter(\n column=\"TOKEN\",\n skip_punct=True,\n skip_stopwords=False,\n skip_linebreaks=True,\n )\n )\n\n # Add the country in which the document was published\n if self.sources:\n doc.manifest_dict[\"country\"] = [\n x for x in self.sources if x[\"source_title\"] == doc.manifest_dict[\"pub\"]\n ][0][\"country\"]\n\n # Add language model metadata\n doc.manifest_dict[\"language_model\"] = self.nlp.meta\n custom = {\n \"linebreak_regex\": str(regex.LINEBREAK_REGEX),\n \"nonbreak_regex\": str(regex.NONBREAKING_SPACE_REGEX),\n \"prefix_re\": str(regex.PREFIX_RE),\n \"suffix_re\": str(regex.SUFFIX_RE),\n \"infix_re\": str(regex.INFIX_RE),\n \"simple_url_re\": str(regex.SIMPLE_URL_RE),\n \"add_stopwords\": self.add_stopwords,\n \"remove_stopwords\": self.remove_stopwords,\n \"lemmatization_cases\": self.lemmatization_cases,\n \"skip_entities\": self.skip_entities,\n }\n doc.manifest_dict[\"language_model\"][\"custom\"] = custom\n\n # Save the changes to the manifest\n with open(doc.manifest_filepath, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(doc.manifest_dict))\n\n # Print time to completion\n doc_end = time.time()\n doc_t = doc_end - doc_start\n # print('Processed ' + doc.manifest_filepath + ' in ' + str(doc_t) + ' seconds.')",
"def _transform(self, document):\n pass"
] | [
"0.6777922",
"0.63840467",
"0.63757926",
"0.6265362",
"0.624771",
"0.61495733",
"0.61032873",
"0.6045339",
"0.6045339",
"0.6045339",
"0.60180175",
"0.6013464",
"0.6000915",
"0.59672505",
"0.5880899",
"0.5841607",
"0.580233",
"0.5737133",
"0.5736675",
"0.573466",
"0.573466",
"0.573466",
"0.573466",
"0.573466",
"0.57291335",
"0.57263917",
"0.5713223",
"0.5705813",
"0.56944513",
"0.5666831"
] | 0.67262375 | 1 |
Add TimeBased Trimming Input Stream | def create(self, encoding_id, time_based_trimming_input_stream, **kwargs):
# type: (string_types, TimeBasedTrimmingInputStream, dict) -> TimeBasedTrimmingInputStream
return self.api_client.post(
'/encoding/encodings/{encoding_id}/input-streams/trimming/time-based',
time_based_trimming_input_stream,
path_params={'encoding_id': encoding_id},
type=TimeBasedTrimmingInputStream,
**kwargs
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _trimTime(time,data,tStart,tStop):\t\n\tif tStart is None:\n\t\tiStart=0;\n\t\tiStop=len(time);\n\telse:\n\t\t# determine indices of cutoff regions\n\t\tiStart=_process.findNearest(time,tStart); # index of lower cutoff\n\t\tiStop=_process.findNearest(time,tStop);\t # index of higher cutoff\n\t\t\n\t# trim time\n\ttime=time[iStart:iStop];\n\t\n\t# trim data\n\tif type(data) is not list:\n\t\tdata=[data];\n\tfor i in range(0,len(data)):\n\t\tdata[i]=data[i][iStart:iStop];\n\t\t\n\treturn time, data",
"def get(self, encoding_id, input_stream_id, **kwargs):\n # type: (string_types, string_types, dict) -> TimeBasedTrimmingInputStream\n\n return self.api_client.get(\n '/encoding/encodings/{encoding_id}/input-streams/trimming/time-based/{input_stream_id}',\n path_params={'encoding_id': encoding_id, 'input_stream_id': input_stream_id},\n type=TimeBasedTrimmingInputStream,\n **kwargs\n )",
"def trim(self, s, min_freq=0, max_freq=float('Inf'), min_time=0,\n max_time=float('Inf'), save_metadata=True):\n # Regard default parameters\n if max_freq > s.metadata.max_freq:\n max_freq = s.metadata.max_freq\n\n if max_time > s.metadata.max_time:\n max_time = s.metadata.max_time\n\n # Finds frequency and time bounds\n maxK = s.freq_bin(max_freq)\n minK = s.freq_bin(min_freq)\n maxT = s.time_bin(max_time)\n minT = s.time_bin(min_time)\n\n #print min_time, max_time, min_freq, max_freq\n\n new_s = spectrogram.Spectrogram()\n new_s.data = s.data[minK:maxK+1, minT:maxT+1]\n new_s.metadata.min_freq = s.freq_range(minK)[0]\n new_s.metadata.min_time = s.time_range(minT)[0]\n new_s.metadata.max_freq = s.freq_range(maxK)[0]\n new_s.metadata.max_time = s.time_range(maxT)[0]\n new_s.metadata.sampling_configuration = \\\n s.metadata.sampling_configuration\n new_s.metadata.input_metadata = copy.deepcopy(s.metadata)\n\n new_s.metadata.method = md.Metadata(original_input=s.metadata.input,\n original_method=s.metadata.method,\n name='trim',\n min_freq=min_freq,\n max_freq=max_freq,\n min_time=min_time,\n max_time=max_time)\n if save_metadata:\n new_s.metadata.input = md.ObjectMetadata(s)\n\n return new_s",
"def trim2(self, starttime=None, endtime=None, reftime=None, check_npts=True, **kwargs):\n # same as in rf package + mid possibility\n for tr in self.traces:\n st = tr.stats\n ref = (st.starttime + 0.5 * (st.endtime - st.starttime)\n if reftime in ('mid', 'middle') else reftime)\n t1 = _seconds2utc(tr, starttime, reftime=ref)\n t2 = _seconds2utc(tr, endtime, reftime=ref)\n tr.trim(t1, t2, **kwargs)\n if check_npts:\n npts = int(round(np.median([len(tr) for tr in self.traces])))\n self.traces = [tr for tr in self.traces if len(tr) >= npts]\n for tr in self.traces:\n tr.data = tr.data[:npts]\n return self",
"def trim_silence(T, hz, signal):\n N = T * hz\n extra = len(signal) - N\n c = np.abs(signal).cumsum()\n c = c[-extra:] - c[:extra]\n i = np.argmax(c)\n print(f'Keeping {T:.2g} of {len(signal)/hz:.2g} seconds'\n f' starting at +{i/hz:.2f} seconds')\n return signal[i:i+N]",
"def filterTimeCorr(corrPath, keepTimes, linesPerTime):\n\n # The file has stanzas beginning with \n # ---\n # JobID: 287307\n # Followed by metadata, ending with\n # ...\n # ---\n # correlator: P5-P5\n # Followed by further metadata and correlator values\n # Followeid by an EOF or a new stanza\n\n try:\n cfp = open(corrPath,'r')\n except:\n print(\"ERROR opening\", corrPath, \"for reading.\")\n return 1\n\n remainingTimes = set(keepTimes)\n \n # Read correlator stanzas, one at a time. Write the stanzas we want to keep\n inCorr = False\n linesStanza = \"\"\n linesCorrFile = \"\"\n t0 = \"\"\n linect = 0\n for line in cfp:\n a = line.split()\n if a[0] == '---':\n if inCorr:\n # Flush previous stanza unless we are removing it\n if len(linesStanza) > 0 and t0 in remainingTimes:\n linesCorrFile += linesStanza\n linesStanza = \"\"\n inCorr = False\n elif a[0] == \"correlator:\":\n inCorr = True\n elif a[0] == \"JobID:\":\n inCorr = False\n remainingTimes.discard(t0)\n linesStanza = '---\\n'\n linect = 1\n elif a[0] == \"antiquark_source_origin:\":\n # Format is\n # antiquark_source_origin: [ 0, 0, 0, 78 ]\n t0 = a[5]\n # Drop excess lines in stanza\n if linect < linesPerTime:\n linesStanza += line\n linect += 1\n\n # Flush previous stanza unless we are removing it\n if inCorr and len(linesStanza) > 0 and t0 in remainingTimes:\n linesCorrFile += linesStanza\n remainingTimes.discard(t0)\n \n if len(remainingTimes) > 0:\n print(\"ERROR: the following times were unexpectedly missing from\", corrPath)\n print(remainingTimes)\n return 1\n \n cfp.close()\n\n try:\n cfp = open(corrPath,'w')\n except:\n print(\"ERROR opening\", corrPath,\"for writing\")\n \n cfp.write(linesCorrFile)\n cfp.close()\n\n return 0",
"def truncate_data(self, width):\n times_from_mid = self.time - self.midtime\n idxs = np.abs(times_from_mid) < 0.5 * width * self.duration\n self.time = self.time[idxs]\n self.flux = self.flux[idxs]",
"def single_time_stream(self, stream_time=120, do_plot=False, fmin=5, fmax=50):\n self.time_stream_data.take_ts(stream_time=stream_time)\n if do_plot:\n self.time_stream_data.plot_ts()\n self.time_stream_data.get_median_bias_wl(fmin=fmin, fmax=fmax)\n if self.verbose:\n print(f'wl_median {self.time_stream_data.wl_median}')",
"def stream_formatter(streamified):\n trimmed, exactstart,exactend = stream_trimmer(streamified['stream'],\n streamified['starttime'], streamified['endtime'])\n streamified['timeseries'] = trimmed\n streamified['exactstart'] = exactstart\n streamified['exactend']= exactend\n return streamified",
"def trim_audio(data, rate=44100, start_trim=0, end_trim=0, log=False):\n chop = np.copy(data[start_trim*rate : len(data)-end_trim*rate])\n if log:\n m, s = divmod(float(len(data))/rate, 60)\n h, m = divmod(m, 60)\n logging.info(\"Original recording length: %d h %d m %d s\" % (h, m, s))\n logging.info(\"Removed [%d s, %d s] from [start, end] of recording.\" %\n (start_trim, end_trim))\n return chop",
"def trim_sub_sample(file: str,\r\n start_time: str,\r\n end_time: str,\r\n sample_start_time: str,\r\n sample_end_time: str,\r\n timestamp_format: str = '%H:%M:%S') -> str:\r\n trim_duration = calculate_duration(sample_start_time, sample_end_time)\r\n _start_time = datetime.strptime(start_time, timestamp_format)\r\n _start_time = int(_start_time.strftime('%s'))\r\n _sample_start_time = datetime.strptime(sample_start_time, timestamp_format)\r\n _sample_start_time = int(_sample_start_time.strftime('%s'))\r\n _end_time = datetime.strptime(end_time, timestamp_format)\r\n _end_time = int(_end_time.strftime('%s'))\r\n _sample_end_time = datetime.strptime(sample_end_time, timestamp_format)\r\n _sample_end_time = int(_sample_end_time.strftime('%s'))\r\n idx = 1\r\n if duration(file) < trim_duration:\r\n trim_duration = duration(file)\r\n if _sample_start_time < _start_time:\r\n start = 0\r\n else:\r\n start = int(_sample_start_time - _start_time)\r\n if _sample_end_time < _end_time:\r\n end = int(start + trim_duration)\r\n else:\r\n end = duration(file)\r\n trim_video(file, filename(file, idx), start, end)\r\n return filename(file, idx)",
"def make_masked_time_stream(Blocks, ntime=None, window=None, \n return_means=False, subtract_slope=False) :\n\n # Shape of all axes except the time axis.\n back_shape = Blocks[0].dims[1:]\n # Get the time sample spacing.\n Blocks[0].calc_time()\n dt = abs(sp.mean(sp.diff(Blocks[0].time)))\n # Find the beginning and the end of the time axis by looping through\n # blocks.\n # Also get the time axis and the mask\n # for calculating basis polynomials.\n unmask = sp.zeros((0,) + back_shape, dtype=bool)\n time = sp.zeros((0,), dtype=float)\n start_ind = []\n min_time = float('inf')\n max_time = 0.0\n #mean_time = 0.0\n #n_data_times = 0\n for Data in Blocks :\n Data.calc_time()\n start_ind.append(len(time))\n time = sp.concatenate((time, Data.time))\n this_unmask = sp.logical_not(ma.getmaskarray(Data.data))\n unmask = sp.concatenate((unmask, this_unmask), 0)\n # Often the start or the end of a scan is completly masked. Make sure\n # we don't start till the first unmasked time and end at the last\n # unmasked time.\n time_unmask = sp.alltrue(ma.getmaskarray(Data.data), -1)\n time_unmask = sp.alltrue(time_unmask, -1)\n time_unmask = sp.alltrue(time_unmask, -1)\n if sp.alltrue(time_unmask):\n continue\n time_unmask = sp.logical_not(time_unmask)\n min_time = min(min_time, min(Data.time[time_unmask]))\n max_time = max(min_time, max(Data.time[time_unmask]))\n #mean_time += sp.sum(Data.time[time_unmask])\n #n_data_times += len(Data.time[time_unmask])\n # Ensure that the time sampling is uniform.\n if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)\n and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,\n rtol=0.001)) :\n msg = (\"Time sampling not uniformly spaced or Data Blocks don't \"\n \"agree on sampling.\")\n raise ce.DataError(msg)\n # Ensure the shapes are right.\n if Data.dims[1:] != back_shape :\n msg = (\"All data blocks must have the same shape except the time \"\n \"axis.\")\n raise ce.DataError(msg)\n # Now calculate basis polynomials for the mean mode and the slope mode.\n polys = misc.ortho_poly(time[:,None,None,None], 2, unmask, 0)\n #mean_time /= n_data_times\n #if n_data_times == 0:\n # n_data_times = 1\n # Very important to subtract the mean out of the signal, otherwise the\n # window coupling to the mean (0) mode will dominate everything. Can also\n # optionally take out a slope.\n # Old algorithm.\n #total_sum = 0.0\n #total_counts = 0\n #total_slope = 0.0\n #time_norm = 0.0\n #for Data in Blocks:\n # total_sum += sp.sum(Data.data.filled(0), 0)\n # total_counts += ma.count(Data.data, 0)\n # total_slope += sp.sum(Data.data.filled(0) \n # * (Data.time[:,None,None,None] - mean_time), 0)\n # time_norm += sp.sum(sp.logical_not(ma.getmaskarray(Data.data))\n # * (Data.time[:,None,None,None] - mean_time)**2, 0)\n #total_counts[total_counts == 0] = 1\n #time_norm[time_norm == 0.0] = 1\n #total_mean = total_sum / total_counts\n #total_slope /= time_norm\n # New algorithm.\n mean_amp = 0\n slope_amp = 0\n for ii, Data in enumerate(Blocks):\n si = start_ind[ii]\n this_nt = Data.dims[0]\n data = Data.data.filled(0)\n mean_amp += sp.sum(data * unmask[si:si + this_nt,...]\n * polys[0,si:si + this_nt,...], 0)\n slope_amp += sp.sum(data * unmask[si:si + this_nt,...]\n * polys[1,si:si + this_nt,...], 0)\n polys[0,...] *= mean_amp\n polys[1,...] *= slope_amp\n # Calculate the time axis.\n if min_time > max_time:\n min_time = 0\n max_time = 6 * dt\n if not ntime :\n ntime = (max_time - min_time) // dt + 1\n elif ntime < 0:\n # 0 pad by a factor of at least -ntime, but at most 10% more than this.\n time_min = -ntime * (max_time - min_time) / dt\n n_block = 1\n while n_block < time_min/20.0:\n n_block *= 2\n ntime = (time_min//n_block + 1) * n_block\n\n time = sp.arange(ntime)*dt + min_time\n # Allowcate memory for the outputs.\n time_stream = sp.zeros((ntime,) + back_shape, dtype=float)\n mask = sp.zeros((ntime,) + back_shape, dtype=sp.float32)\n # Loop over all times and fill in the arrays.\n for ii, Data in enumerate(Blocks):\n this_nt = Data.dims[0]\n si = start_ind[ii]\n # Subtract the mean calculated above.\n this_data = Data.data.copy()\n this_data -= polys[0,si:si + this_nt,...]\n # If desired, subtract of the linear function of time.\n if subtract_slope:\n #this_data -= (total_slope \n # * (Data.time[:,None,None,None] - mean_time))\n this_data -= polys[1,si:si + this_nt,...]\n # Find the first and last unmasked times.\n time_unmask = sp.alltrue(ma.getmaskarray(this_data), -1)\n time_unmask = sp.alltrue(time_unmask, -1)\n time_unmask = sp.alltrue(time_unmask, -1)\n if sp.alltrue(time_unmask):\n continue\n time_unmask = sp.logical_not(time_unmask)\n unmasked_ind, = sp.where(time_unmask)\n first_ind = min(unmasked_ind)\n last_ind = max(unmasked_ind)\n # Ensure that the time sampling is uniform.\n if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)\n and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,\n rtol=0.001)) :\n msg = (\"Time sampling not uniformly spaced or Data Blocks don't \"\n \"agree on sampling.\")\n raise ce.DataError(msg)\n # Ensure the shapes are right.\n if Data.dims[1:] != back_shape :\n msg = (\"All data blocks must have the same shape except the time \"\n \"axis.\")\n # Apply an offset to the time in case the start of the Data Block\n # doesn't line up with the time array perfectly.\n offset = (time[sp.argmin(abs(time - Data.time[first_ind]))]\n - Data.time[first_ind])\n # Generate window function.\n if window:\n window_function = sig.get_window(window, last_ind - first_ind + 1)\n for ii in range(first_ind, last_ind + 1) :\n ind = sp.argmin(abs(time - (Data.time[ii] + offset)))\n if abs(time[ind] - (Data.time[ii])) < 0.5*dt :\n if sp.any(mask[ind, ...]) :\n msg = \"Overlapping times in Data Blocks.\"\n raise ce.DataError(msg)\n if window:\n window_value = window_function[ii - first_ind]\n else :\n window_value = 1.0\n time_stream[ind, ...] = (window_value \n * this_data[ii, ...].filled(0.0))\n mask[ind, ...] = window_value * sp.logical_not(ma.getmaskarray(\n this_data)[ii, ...])\n if return_means:\n return time_stream, mask, dt, polys[0,0,...]\n else :\n return time_stream, mask, dt",
"def test_trim_filter(self):\n expected_filter = (\n \"{inspec} {trim}=start={pi}:duration={d},{setpts}=PTS-STARTPTS \"\n \"{outspec}\".format(\n inspec=self.segment.input_stream_specifier(),\n trim=self.segment._TRIM, setpts=self.segment._SETPTS,\n pi=self.EXPECTED_PUNCH_IN.total_seconds(),\n d=self.EXPECTED_DURATION,\n outspec=self.segment.output_stream_specifier()))\n self.assertEqual(self.segment.trim_filter(), expected_filter)",
"def trim(self, start_time, end_time):\n\n # find indices of the times in self.times closest to min_t and max_t\n lowest_index = np.abs(self.times - start_time).argmin()\n highest_index = np.abs(self.times - end_time).argmin()\n\n # take slices of the spectrogram and spec_freq that fall within desired range\n return self.__class__(\n self.spectrogram[:, lowest_index : highest_index + 1],\n frequencies=self.frequencies,\n times=self.times[lowest_index : highest_index + 1],\n decibel_limits=self.decibel_limits,\n window_samples=self.window_samples,\n overlap_samples=self.overlap_samples,\n window_type=self.window_type,\n audio_sample_rate=self.audio_sample_rate,\n scaling=self.scaling,\n )",
"def list(self, encoding_id, query_params=None, **kwargs):\n # type: (string_types, TimeBasedTrimmingInputStreamListQueryParams, dict) -> TimeBasedTrimmingInputStream\n\n return self.api_client.get(\n '/encoding/encodings/{encoding_id}/input-streams/trimming/time-based',\n path_params={'encoding_id': encoding_id},\n query_params=query_params,\n pagination_response=True,\n type=TimeBasedTrimmingInputStream,\n **kwargs\n )",
"def delete(self, encoding_id, input_stream_id, **kwargs):\n # type: (string_types, string_types, dict) -> BitmovinResponse\n\n return self.api_client.delete(\n '/encoding/encodings/{encoding_id}/input-streams/trimming/time-based/{input_stream_id}',\n path_params={'encoding_id': encoding_id, 'input_stream_id': input_stream_id},\n type=BitmovinResponse,\n **kwargs\n )",
"def read_timed(self, buf: AnyWritableBuf, timer: Timer | int, /) -> None:",
"def trim_timings(phrase_length, timings):\n extra_hits = np.argwhere(np.cumsum(timings) > int(phrase_length)).ravel()\n\n if len(extra_hits) != 0:\n all_to_end = np.min(extra_hits)\n del timings[all_to_end:]\n\n return timings",
"def get_preprocessed_from_raw(sess_no, raw_path, align_on, from_time, to_time) :\n \n #params\n sess = '01'\n \n trial_length = abs(from_time - to_time)\n\n # Paths\n #raw_path = base_path + 'data/raw/' + sess_no + '/session' + sess + '/'\n rinfo_path = raw_path + 'recording_info.mat'\n tinfo_path = raw_path + 'trial_info.mat'\n\n # Define and loop over intervals\n \n srate = io.get_sfreq(rinfo_path) # = 1 000\n n_trials = io.get_number_of_trials(tinfo_path) \n last_trial = int(max(io.get_trial_ids(raw_path)))\n n_chans = io.get_number_of_channels(rinfo_path)\n channels = [ch for ch in range(n_chans)]\n\n # Pre-process data\n filtered = np.empty([n_trials,\n len(channels),\n int(trial_length * srate/1000)])\n\n trial_counter = 0; counter = 0\n while trial_counter < last_trial:\n n_zeros = 4-len(str(trial_counter+1))\n trial_str = '0' * n_zeros + str(trial_counter+1) # fills leading 0s\n if sess == '01' :\n file_in = sess_no + '01.' + trial_str + '.mat'\n else :\n file_in = sess_no + '02.' + trial_str + '.mat'\n \n if align_on == 'sample' : \n onset = io.get_sample_on(tinfo_path)[trial_counter].item()\n elif align_on == 'match' :\n onset = io.get_match_on(tinfo_path)[trial_counter].item()\n else :\n print(\"Petit problème avec align_on : 'sample' ou 'match' \")\n \n\n \n if np.isnan(onset): # drop trials for which there is no onset info\n print('No onset for ' + file_in)\n trial_counter += 1\n if trial_counter == last_trial:\n break\n else:\n counter += 1\n continue\n print(file_in)\n try:\n raw = io.get_data(raw_path + file_in)\n temp = pp.strip_data(raw,\n rinfo_path,\n onset,\n start=from_time,\n length=trial_length)\n \n if temp.shape[1] == trial_length: # drop trials shorter than length\n filtered[counter] = temp\n counter += 1\n except IOError:\n print('No file ' + file_in)\n trial_counter += 1\n\n # Return data\n\n filtered = np.array(filtered)\n return(filtered)",
"def win_slide(stream, start_time, win_size, step_size, max_windows):\n stream_list=[]\n for i in range(max_windows):\n ts = start_time + (i*step_size)\n st = stream.slice(ts, ts+win_size)\n # skip missing data\n if len(st)!=3: continue\n if not st[0].stats.starttime == st[1].stats.starttime == st[2].stats.starttime: continue\n if not st[0].stats.endtime == st[1].stats.endtime == st[2].stats.endtime: continue\n if len(st[0])!=int(win_size*100+1): continue\n if st.max()[0]==0.0 or st.max()[1]==0.0 or st.max()[2]==0.0: continue\n # add preprocessed time window\n st = preprocess_stream(st)\n stream_list.append(st)\n return stream_list",
"def trim(self, trim_samples):\n n = len(self.timestamps)\n self.timestamps = self.timestamps[:n - trim_samples]\n self.labels = self.labels[:n - trim_samples]\n self.emg = [x[:n - trim_samples] for x in self.emg]\n self.accel = [x[:n - trim_samples] for x in self.accel]\n self.gyro = [x[:n - trim_samples] for x in self.gyro]\n self.orient = [x[:n - trim_samples] for x in self.orient]",
"def trim_time(crush, lead_time):\n lead_time = pd.Timedelta(lead_time)\n crush = crush[crush.index >= (contact_time(crush) - lead_time)]\n crush = crush[crush.index < release_time(crush)]\n return crush",
"def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)",
"def trim_myogram(raw_data, path, slicing_index='Stim'):\n\t# Collect data\n\tvolt_data = []\n\tstim_data = []\n\tslices_begin_time = []\n\tglobal title\n\n\t# data processing\n\ttitle_stim = 'Stim'\n\ttitle_rmg = 'RMG'\n\ttitle_rta = 'RTA'\n\tfor index, data_title in enumerate(raw_data['titles']):\n\t\tdata_start = int(raw_data['datastart'][index]) - 1\n\t\tdata_end = int(raw_data['dataend'][index])\n\t\tfloat_data = [round(float(x), 3) for x in raw_data['data'][0][data_start:data_end]]\n\t\tif title_rmg in data_title:\n\t\t\tvolt_data = float_data\n\t\t# if title_rta in data_title:\n\t\t# \tvolt_data = float_data\n\t\tif title_stim in data_title:\n\t\t\tstim_data = float_data\n\n\t# convert_bio_to_hdf5(volt_data, stim_data, path)\n\n\timport h5py as hdf5\n\t# with hdf5.File(path + \".hdf5\") as file:\n\t\t# for k,v in file.items():\n\t\t\t# print(k, v[:])\n\n\t# find peaks in stimulations data\n\tms_pause = 0\n\tbio_step = 0.25\n\t# print(\"stim_data = \", stim_data)\n\tfor index in range(1, len(stim_data) - 1):\n\t\tif stim_data[index - 1] < stim_data[index] > stim_data[index + 1] and ms_pause <= 0 and\\\n\t\t\t\tstim_data[index] > 0.5:\n\t\t\tslices_begin_time.append(index) # * real_data_step # division by 4 gives us the normal 1 ms step size\n\t\t\tms_pause = int(3 / bio_step)\n\t\tms_pause -= 1\n\t# print(\"slices_begin_time = \", slices_begin_time)\n\t# remove unnecessary data, use only from first stim, and last stim\n\tvolt_data = volt_data[slices_begin_time[0]:slices_begin_time[-1]]\n\n\t# move times to the begin (start from 0 ms)\n\tslices_begin_time = [t - slices_begin_time[0] for t in slices_begin_time]\n\t# print(\"len(volt_data) = \", len(volt_data))\n\treturn volt_data, slices_begin_time",
"def trim_by_points(file: str,\r\n start_time: int,\r\n end_time: int,\r\n factor: str = 's') -> str:\r\n idx = 1\r\n start_time = int(start_time)\r\n end_time = int(end_time)\r\n\r\n _factor = 1 if factor == 's' else 60\r\n total_limit = int(duration(file) / _factor)\r\n\r\n if factor == 'p':\r\n start_time = int((start_time / 100) * total_limit)\r\n end_time = int((end_time / 100) * total_limit)\r\n total_limit = 100\r\n\r\n if end_time < start_time:\r\n raise Exception('Ending time is less than starting time.')\r\n else:\r\n if end_time >= total_limit:\r\n if factor == 'p':\r\n print('Video doesn\\'t have frame to process.')\r\n else:\r\n print('Video doesn\\'t have frames to process and will max out.')\r\n end_time = total_limit\r\n elif start_time < 0:\r\n print('Start should be greater than 0.')\r\n start_time = 0\r\n trim_video(file, filename(file, idx), start_time * _factor,\r\n end_time * _factor)\r\n return filename(file, idx)",
"def non_causal_timecrop(self, length):\n assert length < self.time_length\n\n cut = (self.time_length - length) / 2\n\n _, i_start = _find_nearest(self.times, cut)\n _, i_end = _find_nearest(self.times, self.time_length - cut)\n\n h = np.fft.ifftshift(np.fft.fftshift(self.in_time)[..., i_start:i_end])\n\n new_response = self.from_time(self.fs, h)\n\n if new_response.time_length != length:\n w = f\"Could not precisely shrink to {length}s with fs = {self.fs}\"\n warnings.warn(w)\n\n return new_response",
"def split_ms(msin, msout, start_out, end_out):\n t = pt.table(msin, ack=False)\n\n starttime = t[0]['TIME']\n t1 = t.query('TIME > ' + str(starttime+start_out*3600) + ' && '\n 'TIME < ' + str(starttime+end_out*3600), sortlist='TIME,ANTENNA1,ANTENNA2')\n\n t1.copy(msout, True)\n t1.close()\n t.close()",
"def cull(self):\n now = time.time()\n self.lines = [line for line in self.lines if line.timestamp + self.timeout > now]",
"def process_timecards(self):\n timecard = open('timecards.txt','r')\n time_temp = []\n time = []\n for line in timecard:\n time_temp.append(line)\n for i in time_temp:\n time.append(i.split(','))\n for i in time:\n for q in range(len(i)):\n if q == 0:\n pass\n else:\n i[q] = float(i[q])\n for i in time:\n for q in range(len(i)):\n self.timecard[i[0]] = i[1:]\n #print(self.timecard)\n return self.timecard",
"def trimtimes(time, elmbeg, elmend, preft = 0.0, suft = 0.0):\n valididx = np.zeros(len(time),dtype='bool')\n \n elmbeg = elmbeg - preft\n elmend = elmend + suft\n for i in range(len(time)):\n t = time[i]\n boolbeg = t>=elmbeg\n boolend = t<=elmend\n boolelm = boolbeg & boolend\n valididx[i] = np.sum(boolelm)\n \n #To use only data outside of ELMs\n valididx = np.invert(valididx)\n return time[valididx], valididx"
] | [
"0.596018",
"0.5785316",
"0.57113206",
"0.56665367",
"0.5622098",
"0.55184543",
"0.5498387",
"0.54264224",
"0.5351055",
"0.5325785",
"0.52934694",
"0.5283096",
"0.5221919",
"0.5202626",
"0.5173048",
"0.5157478",
"0.5127592",
"0.51219743",
"0.5113893",
"0.50642955",
"0.5056635",
"0.5025312",
"0.5015724",
"0.50099564",
"0.5000739",
"0.49882153",
"0.4967988",
"0.49664083",
"0.49623442",
"0.49591202"
] | 0.58582246 | 1 |
Delete TimeBased Trimming Input Stream | def delete(self, encoding_id, input_stream_id, **kwargs):
# type: (string_types, string_types, dict) -> BitmovinResponse
return self.api_client.delete(
'/encoding/encodings/{encoding_id}/input-streams/trimming/time-based/{input_stream_id}',
path_params={'encoding_id': encoding_id, 'input_stream_id': input_stream_id},
type=BitmovinResponse,
**kwargs
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _drop_old_data(self, current_time):\n for k in self._buf.keys():\n timelimit = current_time - self._lifetime\n if (k < timelimit):\n del self._buf[k]",
"def filterTimeCorr(corrPath, keepTimes, linesPerTime):\n\n # The file has stanzas beginning with \n # ---\n # JobID: 287307\n # Followed by metadata, ending with\n # ...\n # ---\n # correlator: P5-P5\n # Followed by further metadata and correlator values\n # Followeid by an EOF or a new stanza\n\n try:\n cfp = open(corrPath,'r')\n except:\n print(\"ERROR opening\", corrPath, \"for reading.\")\n return 1\n\n remainingTimes = set(keepTimes)\n \n # Read correlator stanzas, one at a time. Write the stanzas we want to keep\n inCorr = False\n linesStanza = \"\"\n linesCorrFile = \"\"\n t0 = \"\"\n linect = 0\n for line in cfp:\n a = line.split()\n if a[0] == '---':\n if inCorr:\n # Flush previous stanza unless we are removing it\n if len(linesStanza) > 0 and t0 in remainingTimes:\n linesCorrFile += linesStanza\n linesStanza = \"\"\n inCorr = False\n elif a[0] == \"correlator:\":\n inCorr = True\n elif a[0] == \"JobID:\":\n inCorr = False\n remainingTimes.discard(t0)\n linesStanza = '---\\n'\n linect = 1\n elif a[0] == \"antiquark_source_origin:\":\n # Format is\n # antiquark_source_origin: [ 0, 0, 0, 78 ]\n t0 = a[5]\n # Drop excess lines in stanza\n if linect < linesPerTime:\n linesStanza += line\n linect += 1\n\n # Flush previous stanza unless we are removing it\n if inCorr and len(linesStanza) > 0 and t0 in remainingTimes:\n linesCorrFile += linesStanza\n remainingTimes.discard(t0)\n \n if len(remainingTimes) > 0:\n print(\"ERROR: the following times were unexpectedly missing from\", corrPath)\n print(remainingTimes)\n return 1\n \n cfp.close()\n\n try:\n cfp = open(corrPath,'w')\n except:\n print(\"ERROR opening\", corrPath,\"for writing\")\n \n cfp.write(linesCorrFile)\n cfp.close()\n\n return 0",
"def trim(self, s, min_freq=0, max_freq=float('Inf'), min_time=0,\n max_time=float('Inf'), save_metadata=True):\n # Regard default parameters\n if max_freq > s.metadata.max_freq:\n max_freq = s.metadata.max_freq\n\n if max_time > s.metadata.max_time:\n max_time = s.metadata.max_time\n\n # Finds frequency and time bounds\n maxK = s.freq_bin(max_freq)\n minK = s.freq_bin(min_freq)\n maxT = s.time_bin(max_time)\n minT = s.time_bin(min_time)\n\n #print min_time, max_time, min_freq, max_freq\n\n new_s = spectrogram.Spectrogram()\n new_s.data = s.data[minK:maxK+1, minT:maxT+1]\n new_s.metadata.min_freq = s.freq_range(minK)[0]\n new_s.metadata.min_time = s.time_range(minT)[0]\n new_s.metadata.max_freq = s.freq_range(maxK)[0]\n new_s.metadata.max_time = s.time_range(maxT)[0]\n new_s.metadata.sampling_configuration = \\\n s.metadata.sampling_configuration\n new_s.metadata.input_metadata = copy.deepcopy(s.metadata)\n\n new_s.metadata.method = md.Metadata(original_input=s.metadata.input,\n original_method=s.metadata.method,\n name='trim',\n min_freq=min_freq,\n max_freq=max_freq,\n min_time=min_time,\n max_time=max_time)\n if save_metadata:\n new_s.metadata.input = md.ObjectMetadata(s)\n\n return new_s",
"def _trimTime(time,data,tStart,tStop):\t\n\tif tStart is None:\n\t\tiStart=0;\n\t\tiStop=len(time);\n\telse:\n\t\t# determine indices of cutoff regions\n\t\tiStart=_process.findNearest(time,tStart); # index of lower cutoff\n\t\tiStop=_process.findNearest(time,tStop);\t # index of higher cutoff\n\t\t\n\t# trim time\n\ttime=time[iStart:iStop];\n\t\n\t# trim data\n\tif type(data) is not list:\n\t\tdata=[data];\n\tfor i in range(0,len(data)):\n\t\tdata[i]=data[i][iStart:iStop];\n\t\t\n\treturn time, data",
"def clean_timelog():",
"def trim_silence(T, hz, signal):\n N = T * hz\n extra = len(signal) - N\n c = np.abs(signal).cumsum()\n c = c[-extra:] - c[:extra]\n i = np.argmax(c)\n print(f'Keeping {T:.2g} of {len(signal)/hz:.2g} seconds'\n f' starting at +{i/hz:.2f} seconds')\n return signal[i:i+N]",
"def trim2(self, starttime=None, endtime=None, reftime=None, check_npts=True, **kwargs):\n # same as in rf package + mid possibility\n for tr in self.traces:\n st = tr.stats\n ref = (st.starttime + 0.5 * (st.endtime - st.starttime)\n if reftime in ('mid', 'middle') else reftime)\n t1 = _seconds2utc(tr, starttime, reftime=ref)\n t2 = _seconds2utc(tr, endtime, reftime=ref)\n tr.trim(t1, t2, **kwargs)\n if check_npts:\n npts = int(round(np.median([len(tr) for tr in self.traces])))\n self.traces = [tr for tr in self.traces if len(tr) >= npts]\n for tr in self.traces:\n tr.data = tr.data[:npts]\n return self",
"def truncate_data(self, width):\n times_from_mid = self.time - self.midtime\n idxs = np.abs(times_from_mid) < 0.5 * width * self.duration\n self.time = self.time[idxs]\n self.flux = self.flux[idxs]",
"def clear_stream(s):\n s.truncate(0)\n s.seek(0)\n return s",
"def trim(self, start, end):",
"def trim_timings(phrase_length, timings):\n extra_hits = np.argwhere(np.cumsum(timings) > int(phrase_length)).ravel()\n\n if len(extra_hits) != 0:\n all_to_end = np.min(extra_hits)\n del timings[all_to_end:]\n\n return timings",
"def trim_time(crush, lead_time):\n lead_time = pd.Timedelta(lead_time)\n crush = crush[crush.index >= (contact_time(crush) - lead_time)]\n crush = crush[crush.index < release_time(crush)]\n return crush",
"def cull(self):\n now = time.time()\n self.lines = [line for line in self.lines if line.timestamp + self.timeout > now]",
"async def _truncate_tick_data(self, pair: str):\n\n truncate = len(self.close_times[pair]) - self.min_tick_length\n if truncate > 60:\n del self.base_24hr_volumes[pair][0][:truncate]\n del self.close_values[pair][:truncate]\n del self.close_times[pair][:truncate]",
"def trim_audio(data, rate=44100, start_trim=0, end_trim=0, log=False):\n chop = np.copy(data[start_trim*rate : len(data)-end_trim*rate])\n if log:\n m, s = divmod(float(len(data))/rate, 60)\n h, m = divmod(m, 60)\n logging.info(\"Original recording length: %d h %d m %d s\" % (h, m, s))\n logging.info(\"Removed [%d s, %d s] from [start, end] of recording.\" %\n (start_trim, end_trim))\n return chop",
"def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)",
"def remove_timestamp(file):\n delete = open(file, 'w')\n delete.write('')\n delete.close()",
"def _prune_empty_time_series(\n request: tensorboard_service.WriteTensorboardRunDataRequest,\n):\n for (time_series_idx, time_series_data) in reversed(\n list(enumerate(request.time_series_data))\n ):\n if not time_series_data.values:\n del request.time_series_data[time_series_idx]",
"def remove_streaming(self):\n self.streaming = None",
"def test_trim_filter(self):\n expected_filter = (\n \"{inspec} {trim}=start={pi}:duration={d},{setpts}=PTS-STARTPTS \"\n \"{outspec}\".format(\n inspec=self.segment.input_stream_specifier(),\n trim=self.segment._TRIM, setpts=self.segment._SETPTS,\n pi=self.EXPECTED_PUNCH_IN.total_seconds(),\n d=self.EXPECTED_DURATION,\n outspec=self.segment.output_stream_specifier()))\n self.assertEqual(self.segment.trim_filter(), expected_filter)",
"def trim_sub_sample(file: str,\r\n start_time: str,\r\n end_time: str,\r\n sample_start_time: str,\r\n sample_end_time: str,\r\n timestamp_format: str = '%H:%M:%S') -> str:\r\n trim_duration = calculate_duration(sample_start_time, sample_end_time)\r\n _start_time = datetime.strptime(start_time, timestamp_format)\r\n _start_time = int(_start_time.strftime('%s'))\r\n _sample_start_time = datetime.strptime(sample_start_time, timestamp_format)\r\n _sample_start_time = int(_sample_start_time.strftime('%s'))\r\n _end_time = datetime.strptime(end_time, timestamp_format)\r\n _end_time = int(_end_time.strftime('%s'))\r\n _sample_end_time = datetime.strptime(sample_end_time, timestamp_format)\r\n _sample_end_time = int(_sample_end_time.strftime('%s'))\r\n idx = 1\r\n if duration(file) < trim_duration:\r\n trim_duration = duration(file)\r\n if _sample_start_time < _start_time:\r\n start = 0\r\n else:\r\n start = int(_sample_start_time - _start_time)\r\n if _sample_end_time < _end_time:\r\n end = int(start + trim_duration)\r\n else:\r\n end = duration(file)\r\n trim_video(file, filename(file, idx), start, end)\r\n return filename(file, idx)",
"def trim_myogram(raw_data, path, slicing_index='Stim'):\n\t# Collect data\n\tvolt_data = []\n\tstim_data = []\n\tslices_begin_time = []\n\tglobal title\n\n\t# data processing\n\ttitle_stim = 'Stim'\n\ttitle_rmg = 'RMG'\n\ttitle_rta = 'RTA'\n\tfor index, data_title in enumerate(raw_data['titles']):\n\t\tdata_start = int(raw_data['datastart'][index]) - 1\n\t\tdata_end = int(raw_data['dataend'][index])\n\t\tfloat_data = [round(float(x), 3) for x in raw_data['data'][0][data_start:data_end]]\n\t\tif title_rmg in data_title:\n\t\t\tvolt_data = float_data\n\t\t# if title_rta in data_title:\n\t\t# \tvolt_data = float_data\n\t\tif title_stim in data_title:\n\t\t\tstim_data = float_data\n\n\t# convert_bio_to_hdf5(volt_data, stim_data, path)\n\n\timport h5py as hdf5\n\t# with hdf5.File(path + \".hdf5\") as file:\n\t\t# for k,v in file.items():\n\t\t\t# print(k, v[:])\n\n\t# find peaks in stimulations data\n\tms_pause = 0\n\tbio_step = 0.25\n\t# print(\"stim_data = \", stim_data)\n\tfor index in range(1, len(stim_data) - 1):\n\t\tif stim_data[index - 1] < stim_data[index] > stim_data[index + 1] and ms_pause <= 0 and\\\n\t\t\t\tstim_data[index] > 0.5:\n\t\t\tslices_begin_time.append(index) # * real_data_step # division by 4 gives us the normal 1 ms step size\n\t\t\tms_pause = int(3 / bio_step)\n\t\tms_pause -= 1\n\t# print(\"slices_begin_time = \", slices_begin_time)\n\t# remove unnecessary data, use only from first stim, and last stim\n\tvolt_data = volt_data[slices_begin_time[0]:slices_begin_time[-1]]\n\n\t# move times to the begin (start from 0 ms)\n\tslices_begin_time = [t - slices_begin_time[0] for t in slices_begin_time]\n\t# print(\"len(volt_data) = \", len(volt_data))\n\treturn volt_data, slices_begin_time",
"def clean_catalogue(\n source,\n dest=f\"{pd.Timestamp.now().strftime('%y%m%d%H%M%S')}.csv\",\n window=None\n ):\n catalogue = pd.read_csv(source, parse_dates=[3,4,5])\n catalogue.drop_duplicates(ignore_index=True, inplace=True)\n catalogue.sort_values(by=[\"dt_on\"], ignore_index=True, inplace=True)\n if window is not None: # 430367\n count = 0\n rm_idx = []\n for station in catalogue.station.unique():\n subset = catalogue.loc[catalogue[\"station\"] == station]\n remove = []\n dt_on_ = subset[\"dt_on\"].iloc[0]\n for i, dt_on in enumerate(subset[\"dt_on\"]):\n if i == 0:\n continue\n elif dt_on < dt_on_ + pd.Timedelta(window, unit=\"sec\"):\n remove.append(i)\n else:\n dt_on_ = dt_on\n rm_idx.append(subset.index[remove])\n count += len(remove)\n print(f\"Removing {count} entries...\")\n rm_idx = [item for sub_idx in rm_idx for item in sub_idx]\n catalogue.drop(catalogue.index[rm_idx], inplace=True)\n catalogue.reset_index(drop=True, inplace=True)\n catalogue.to_csv(dest)\n print(f\"Catalogue saved to {dest}.\")",
"def trim(self, trim_samples):\n n = len(self.timestamps)\n self.timestamps = self.timestamps[:n - trim_samples]\n self.labels = self.labels[:n - trim_samples]\n self.emg = [x[:n - trim_samples] for x in self.emg]\n self.accel = [x[:n - trim_samples] for x in self.accel]\n self.gyro = [x[:n - trim_samples] for x in self.gyro]\n self.orient = [x[:n - trim_samples] for x in self.orient]",
"def trim(input_file: str, output_path: str, trim_interval: int=5,\n num_workers: int=None, verbose_level=0):\n prefix = input_file.split(os.sep)[-1][:-4] # Get name and remove extension\n duration = float(syscommand.system('soxi -D ' + input_file))\n if duration == 0:\n # For some reason, the soxi command failed with some large files\n # tested. This is an attempt to get the duration in that case.\n import wave\n import contextlib\n with contextlib.closing(wave.open(input_file, 'r')) as f:\n frames = f.getnframes()\n rate = f.getframerate()\n duration = frames / float(rate)\n trims = list(np.arange(0, duration, trim_interval))[:-1]\n if str(verbose_level) == '2' and workers == 1:\n # This code is duplicated for debugging purposes\n for t in trims:\n trim_audio(audio_path=input_file, output_path=output_path,\n name=prefix + '_' + str(t) + '_' + str(duration),\n position=t, duration=trim_interval,\n verbose_level=verbose_level)\n else:\n # Make parallel calls to trim the audio file\n with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) \\\n as executor:\n futures = [\n executor.submit(fn=trim_audio,\n audio_path=input_file,\n output_path=output_path,\n name=prefix + '_' + str(t) + '_' +\n str(duration),\n position=t,\n duration=trim_interval,\n verbose_level=verbose_level)\n for t in trims]\n\n kwargs = {\n 'total': len(futures),\n 'unit': 'files',\n 'unit_scale': True,\n 'leave': True\n }\n for f in tqdm(concurrent.futures.as_completed(futures), **kwargs):\n pass",
"async def _truncate_adjusted_tick_data(self, pair: str):\n\n truncate = len(self.close_times[pair]) - self.min_tick_length\n if truncate > 60:\n del self.base_24hr_volumes[pair][1][:truncate]\n del self.adjusted_close_values[pair][:truncate]",
"def wipe():\n try:\n inputlist *= 0\n except NameError:\n pass\n try:# is not None and len(outputlist) > 0:\n outputlist *= 0\n except NameError:\n pass\n try:# is not None and len(partlist) > 0:\n partlist *= 0\n except NameError:\n pass\n tagged = 0\n try:# is not None and len(datedlist) > 0:\n datedlist *= 0\n except NameError:\n pass\n date = \"\"\n ignore = \"\"",
"def ddremoveELMData(shotnr, time, preft=0.0, suft=0.0, elm_exper=\"AUGD\", elm_edition=0):\n \n ELM = dd.shotfile(\"ELM\", shotnr, experiment=elm_exper)\n t_endELM = ELM(\"t_endELM\")\n elmend = t_endELM.data\n elmbeg = t_endELM.time\n ELM.close()\n\n outtime, validmask = trimtimes(time, elmbeg, elmend, preft=preft, suft=suft)\n\n return validmask",
"def prune_data(self, ts):\n sql = \"delete from %s where dateTime < %d\" % (self.dbm.table_name, ts)\n self.dbm.getSql(sql)\n try:\n # sqlite databases need some help to stay small\n self.dbm.getSql('vacuum')\n except Exception as e:\n pass",
"def trim(self, start_time, end_time):\n\n # find indices of the times in self.times closest to min_t and max_t\n lowest_index = np.abs(self.times - start_time).argmin()\n highest_index = np.abs(self.times - end_time).argmin()\n\n # take slices of the spectrogram and spec_freq that fall within desired range\n return self.__class__(\n self.spectrogram[:, lowest_index : highest_index + 1],\n frequencies=self.frequencies,\n times=self.times[lowest_index : highest_index + 1],\n decibel_limits=self.decibel_limits,\n window_samples=self.window_samples,\n overlap_samples=self.overlap_samples,\n window_type=self.window_type,\n audio_sample_rate=self.audio_sample_rate,\n scaling=self.scaling,\n )"
] | [
"0.60808086",
"0.6057969",
"0.6052673",
"0.59732354",
"0.5900076",
"0.5839115",
"0.57906055",
"0.5750173",
"0.5706388",
"0.55499846",
"0.5537241",
"0.5536599",
"0.55201197",
"0.5512166",
"0.5504158",
"0.54612994",
"0.5454063",
"0.5421012",
"0.53685355",
"0.5366544",
"0.53506696",
"0.53012025",
"0.5261686",
"0.5242635",
"0.52399355",
"0.5226866",
"0.52208275",
"0.52193314",
"0.51865554",
"0.51574093"
] | 0.60823244 | 0 |
List TimeBased Trimming Input Streams | def list(self, encoding_id, query_params=None, **kwargs):
# type: (string_types, TimeBasedTrimmingInputStreamListQueryParams, dict) -> TimeBasedTrimmingInputStream
return self.api_client.get(
'/encoding/encodings/{encoding_id}/input-streams/trimming/time-based',
path_params={'encoding_id': encoding_id},
query_params=query_params,
pagination_response=True,
type=TimeBasedTrimmingInputStream,
**kwargs
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def win_slide(stream, start_time, win_size, step_size, max_windows):\n stream_list=[]\n for i in range(max_windows):\n ts = start_time + (i*step_size)\n st = stream.slice(ts, ts+win_size)\n # skip missing data\n if len(st)!=3: continue\n if not st[0].stats.starttime == st[1].stats.starttime == st[2].stats.starttime: continue\n if not st[0].stats.endtime == st[1].stats.endtime == st[2].stats.endtime: continue\n if len(st[0])!=int(win_size*100+1): continue\n if st.max()[0]==0.0 or st.max()[1]==0.0 or st.max()[2]==0.0: continue\n # add preprocessed time window\n st = preprocess_stream(st)\n stream_list.append(st)\n return stream_list",
"def task_get_time_slices(\n self, timestamp: datetime = None\n ) -> List[Tuple[datetime, datetime]]:\n total_streams: int = self._config[\"graph_streams\"]\n\n t_now: datetime = (\n timestamp.replace(microsecond=0)\n if timestamp is not None\n else datetime.utcnow().replace(microsecond=0)\n )\n\n t_lag: timedelta = timedelta(seconds=self._config[\"graph_timelag\"])\n t_sec: timedelta = timedelta(seconds=1)\n t_delta: timedelta = timedelta(seconds=self._config[\"graph_stream_frame\"])\n\n frame_end: datetime = t_now - t_lag - t_sec\n frame_start: datetime = frame_end + t_sec - t_delta * total_streams\n\n self._logger.info(\n \"Split [%s - %s] into %s slices\",\n frame_start.isoformat(),\n frame_end.isoformat(),\n total_streams,\n )\n\n result: List[Tuple[datetime, datetime]] = []\n\n for i in range(total_streams):\n slice_start: datetime = frame_end + t_sec - t_delta * (i + 1)\n slice_end: datetime = frame_end - t_delta * i\n\n result.append((slice_start, slice_end))\n\n return result",
"def main():\n\n f = open(eventsfile, 'r')\n lines = f.readlines()\n numcounter = 0\n counter = 0\n fullcounter = 0\n movielist = []\n movielists =[]\n timestamp_list = []\n filteredlist = [] \n startdate = \"2020-02-26\"\n \n for line in lines:\n TAPES = line.split('\\t')\n if int(TAPES[2]) == 1 or int(TAPES[2]) == 2:\n filteredlist.append(line)\n \n for newline in filteredlist:\n TAPES = newline.split('\\t')\n fullcounter +=1\n if int(TAPES[2]) == 2:\n timestamp_list.append(0)\n continue\n startdate2 = startdate.split(\"-\")[1] + \"/\" + startdate.split(\"-\")[2] + \"/\" + startdate.split(\"-\")[0]\n dateplustime = startdate2 + TAPES[0][0:len(TAPES[0])]\n thistime = faststrptime(dateplustime)\n unixtimestamp = datetime.datetime.timestamp(thistime)\n timestamp_list.append(int(unixtimestamp))\n\n i = 0 \n for element in timestamp_list:\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+(counter-i)]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n \n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue \n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+1]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n\n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue\n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n counter += 1\n numcounter += 1\n if element != 0:\n movielist.append(counter)\n i += 1\n \n if numcounter == 30:\n numcounter = 0\n movielists.append(movielist)\n movielist = []\n \n if i > (len(timestamp_list)-1):\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n \n numendlists = counter - fullcounter\n first = len(movielists)-numendlists\n last = len(movielists)\n del movielists[first:last]\n \n for x in movielists:\n for y in x:\n if int(filenumber) == y:\n movielist = x\n\n modename = str(movielist[0]) + \"to\" + str(movielist[len(movielist)-1])\n modefilename = \"mode_\" + modename + \".png\"\n try:\n imread(modefilename)\n except:\n imageMode(modename,movielist)\n\n e = loadmodeImage(modefilename)\n \n roimask = np.zeros((ydim,xdim))\n f = open(roisfile, 'r')\n lines = f.readlines()\n i = 1\n i2 = 0\n for line in lines:\n try:\n print(int(line.split(' ')[0]))\n except ValueError:\n i2 += 1\n continue\n minx = int(line.split(' ')[0])\n miny = int(line.split(' ')[1])\n maxx = int(line.split(' ')[2])\n maxy = int(line.split(' ')[3])\n roimask[int(miny):int(maxy),int(minx):int(maxx)] = i\n i += 1\n numberofwells = i-1\n numberofcols = int(i2/2)\n numberofrows = int(numberofwells/numberofcols)\n roimaskweights = convertMaskToWeights(roimask)\n\n cap = cv2.VideoCapture(videoStream)\n\n cap.set(3,roimask.shape[1])\n cap.set(4,roimask.shape[0])\n \n ret,frame = cap.read()\n storedImage = np.array(e * 255, dtype = np.uint8)\n storedMode = Blur(storedImage)\n storedFrame = grayBlur(frame)\n cenData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))*2 -2])\n pixData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))])\n i = 0;\n totalFrames = 0\n while(cap.isOpened()):\n ret,frame = cap.read()\n if ret == False:\n break\n currentFrame = grayBlur(frame)\n diffpix = diffImage(storedFrame,currentFrame,pixThreshold)\n diff = trackdiffImage(storedMode,currentFrame,pixThreshold)\n diff.dtype = np.uint8\n contours,hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n MIN_THRESH = 20.0\n MIN_THRESH_P = 20.0\n roi_dict = {}\n for r in range(0,numberofwells):\n roi_dict[r+1] = []\n for cs in range(0,len(contours)):\n if cv2.contourArea(contours[cs]) < 1.0:\n continue\n if cv2.arcLength(contours[cs],True) < 1.0:\n continue\n if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength(contours[cs],True) > MIN_THRESH_P:\n M = cv2.moments(contours[cs])\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n area = cv2.contourArea(contours[cs])\n perim = cv2.arcLength(contours[cs],True)\n if int(roimask[cY,cX]) == 0:\n continue\n if not roi_dict[int(roimask[cY,cX])]:\n roi_dict[int(roimask[cY,cX])].append((area*perim,cX,cY))\n else:\n if roi_dict[int(roimask[cY,cX])][0][0] < area*perim:\n roi_dict[int(roimask[cY,cX])][0] = (area*perim,cX,cY)\n\n pixcounts = []\n pixcounts = np.bincount(roimaskweights, weights=diffpix.ravel())\n pixData[i,:] = np.hstack((pixcounts))\n counts = []\n keys = roi_dict.keys()\n keys = sorted(keys)\n for k in keys:\n x = -10000\n y = -10000\n if roi_dict[k]:\n x = roi_dict[k][0][1]\n y = roi_dict[k][0][2]\n counts.append(x)\n counts.append(y)\n cv2.line(storedImage,(x,y),(x,y),(255,255,255),2)\n if i == 284:\n cv2.imwrite(videoStream + '_trackedimagewithlines_' + str(i) + \".png\", storedImage)\n cenData[i,:] = np.asarray(counts)\n totalFrames += 1\n storedFrame = currentFrame\n i += 1\n\n file = open(videoStream + \".centroid2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells*2):\n file.write(str(int(cenData[x,:][y])) + '\\n')\n pixData = pixData[:i,:]\n pixData = pixData[:,1:] \n file = open(videoStream + \".motion2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells):\n file.write(str(int(pixData[x,:][y])) + '\\n')\n\n cap.release()\n cv2.destroyAllWindows()\n \n try:\n image = Image.open('lastframe.png')\n except:\n makenumROIsimage()",
"def _trimTime(time,data,tStart,tStop):\t\n\tif tStart is None:\n\t\tiStart=0;\n\t\tiStop=len(time);\n\telse:\n\t\t# determine indices of cutoff regions\n\t\tiStart=_process.findNearest(time,tStart); # index of lower cutoff\n\t\tiStop=_process.findNearest(time,tStop);\t # index of higher cutoff\n\t\t\n\t# trim time\n\ttime=time[iStart:iStop];\n\t\n\t# trim data\n\tif type(data) is not list:\n\t\tdata=[data];\n\tfor i in range(0,len(data)):\n\t\tdata[i]=data[i][iStart:iStop];\n\t\t\n\treturn time, data",
"def parseEvents(data, times, eventTimes):\n striped = []\n remaining = range(len(times))\n stripedEvents = []\n\n for t in eventTimes:\n tmpEvent = t.date()\n for j in range(len(times)):\n tmpTime = times[j].date()\n\n if tmpEvent == tmpTime:\n striped.append(tmpEvent)\n stripedEvents.append(data[j, :])\n remaining.remove(j)\n break\n\n stripedEvents = np.array(stripedEvents)\n remainingTimes = np.array(remaining)\n stripedTimes = np.array(striped)\n remainingEvents = data[remaining]\n\n return stripedTimes, remainingTimes, stripedEvents, remainingEvents",
"def thermals(self) -> Iterator[\"Flight\"]:\n self = cast(\"Flight\", self)\n all_segments = (\n self.unwrap()\n .diff(\"track_unwrapped\")\n .agg_time(\"1T\", vertical_rate=\"max\", track_unwrapped_diff=\"median\")\n .abs(track_unwrapped_diff_median=\"track_unwrapped_diff_median\")\n .query(\"vertical_rate_max > 2 and track_unwrapped_diff_median > 5\")\n )\n if all_segments is not None:\n yield from all_segments.split(\"1T\")",
"def filter_lower_datetime(time, list_time):\n return [t for t in list_time if t <= time]",
"def _FilterMMarks(self):\n\n to_remove = []\n tplus1 = datetime.datetime.now() - datetime.timedelta(hours=1)\n\n for (i, (m1, m2)) in enumerate(self._mmarks):\n if (m1.starttime < tplus1):\n to_remove.append(i)\n\n to_remove.reverse()\n for i in to_remove:\n self._mmarks.pop(i)",
"def readShouts(self, start=-1):\n return []",
"def filterTimeCorr(corrPath, keepTimes, linesPerTime):\n\n # The file has stanzas beginning with \n # ---\n # JobID: 287307\n # Followed by metadata, ending with\n # ...\n # ---\n # correlator: P5-P5\n # Followed by further metadata and correlator values\n # Followeid by an EOF or a new stanza\n\n try:\n cfp = open(corrPath,'r')\n except:\n print(\"ERROR opening\", corrPath, \"for reading.\")\n return 1\n\n remainingTimes = set(keepTimes)\n \n # Read correlator stanzas, one at a time. Write the stanzas we want to keep\n inCorr = False\n linesStanza = \"\"\n linesCorrFile = \"\"\n t0 = \"\"\n linect = 0\n for line in cfp:\n a = line.split()\n if a[0] == '---':\n if inCorr:\n # Flush previous stanza unless we are removing it\n if len(linesStanza) > 0 and t0 in remainingTimes:\n linesCorrFile += linesStanza\n linesStanza = \"\"\n inCorr = False\n elif a[0] == \"correlator:\":\n inCorr = True\n elif a[0] == \"JobID:\":\n inCorr = False\n remainingTimes.discard(t0)\n linesStanza = '---\\n'\n linect = 1\n elif a[0] == \"antiquark_source_origin:\":\n # Format is\n # antiquark_source_origin: [ 0, 0, 0, 78 ]\n t0 = a[5]\n # Drop excess lines in stanza\n if linect < linesPerTime:\n linesStanza += line\n linect += 1\n\n # Flush previous stanza unless we are removing it\n if inCorr and len(linesStanza) > 0 and t0 in remainingTimes:\n linesCorrFile += linesStanza\n remainingTimes.discard(t0)\n \n if len(remainingTimes) > 0:\n print(\"ERROR: the following times were unexpectedly missing from\", corrPath)\n print(remainingTimes)\n return 1\n \n cfp.close()\n\n try:\n cfp = open(corrPath,'w')\n except:\n print(\"ERROR opening\", corrPath,\"for writing\")\n \n cfp.write(linesCorrFile)\n cfp.close()\n\n return 0",
"def make_masked_time_stream(Blocks, ntime=None, window=None, \n return_means=False, subtract_slope=False) :\n\n # Shape of all axes except the time axis.\n back_shape = Blocks[0].dims[1:]\n # Get the time sample spacing.\n Blocks[0].calc_time()\n dt = abs(sp.mean(sp.diff(Blocks[0].time)))\n # Find the beginning and the end of the time axis by looping through\n # blocks.\n # Also get the time axis and the mask\n # for calculating basis polynomials.\n unmask = sp.zeros((0,) + back_shape, dtype=bool)\n time = sp.zeros((0,), dtype=float)\n start_ind = []\n min_time = float('inf')\n max_time = 0.0\n #mean_time = 0.0\n #n_data_times = 0\n for Data in Blocks :\n Data.calc_time()\n start_ind.append(len(time))\n time = sp.concatenate((time, Data.time))\n this_unmask = sp.logical_not(ma.getmaskarray(Data.data))\n unmask = sp.concatenate((unmask, this_unmask), 0)\n # Often the start or the end of a scan is completly masked. Make sure\n # we don't start till the first unmasked time and end at the last\n # unmasked time.\n time_unmask = sp.alltrue(ma.getmaskarray(Data.data), -1)\n time_unmask = sp.alltrue(time_unmask, -1)\n time_unmask = sp.alltrue(time_unmask, -1)\n if sp.alltrue(time_unmask):\n continue\n time_unmask = sp.logical_not(time_unmask)\n min_time = min(min_time, min(Data.time[time_unmask]))\n max_time = max(min_time, max(Data.time[time_unmask]))\n #mean_time += sp.sum(Data.time[time_unmask])\n #n_data_times += len(Data.time[time_unmask])\n # Ensure that the time sampling is uniform.\n if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)\n and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,\n rtol=0.001)) :\n msg = (\"Time sampling not uniformly spaced or Data Blocks don't \"\n \"agree on sampling.\")\n raise ce.DataError(msg)\n # Ensure the shapes are right.\n if Data.dims[1:] != back_shape :\n msg = (\"All data blocks must have the same shape except the time \"\n \"axis.\")\n raise ce.DataError(msg)\n # Now calculate basis polynomials for the mean mode and the slope mode.\n polys = misc.ortho_poly(time[:,None,None,None], 2, unmask, 0)\n #mean_time /= n_data_times\n #if n_data_times == 0:\n # n_data_times = 1\n # Very important to subtract the mean out of the signal, otherwise the\n # window coupling to the mean (0) mode will dominate everything. Can also\n # optionally take out a slope.\n # Old algorithm.\n #total_sum = 0.0\n #total_counts = 0\n #total_slope = 0.0\n #time_norm = 0.0\n #for Data in Blocks:\n # total_sum += sp.sum(Data.data.filled(0), 0)\n # total_counts += ma.count(Data.data, 0)\n # total_slope += sp.sum(Data.data.filled(0) \n # * (Data.time[:,None,None,None] - mean_time), 0)\n # time_norm += sp.sum(sp.logical_not(ma.getmaskarray(Data.data))\n # * (Data.time[:,None,None,None] - mean_time)**2, 0)\n #total_counts[total_counts == 0] = 1\n #time_norm[time_norm == 0.0] = 1\n #total_mean = total_sum / total_counts\n #total_slope /= time_norm\n # New algorithm.\n mean_amp = 0\n slope_amp = 0\n for ii, Data in enumerate(Blocks):\n si = start_ind[ii]\n this_nt = Data.dims[0]\n data = Data.data.filled(0)\n mean_amp += sp.sum(data * unmask[si:si + this_nt,...]\n * polys[0,si:si + this_nt,...], 0)\n slope_amp += sp.sum(data * unmask[si:si + this_nt,...]\n * polys[1,si:si + this_nt,...], 0)\n polys[0,...] *= mean_amp\n polys[1,...] *= slope_amp\n # Calculate the time axis.\n if min_time > max_time:\n min_time = 0\n max_time = 6 * dt\n if not ntime :\n ntime = (max_time - min_time) // dt + 1\n elif ntime < 0:\n # 0 pad by a factor of at least -ntime, but at most 10% more than this.\n time_min = -ntime * (max_time - min_time) / dt\n n_block = 1\n while n_block < time_min/20.0:\n n_block *= 2\n ntime = (time_min//n_block + 1) * n_block\n\n time = sp.arange(ntime)*dt + min_time\n # Allowcate memory for the outputs.\n time_stream = sp.zeros((ntime,) + back_shape, dtype=float)\n mask = sp.zeros((ntime,) + back_shape, dtype=sp.float32)\n # Loop over all times and fill in the arrays.\n for ii, Data in enumerate(Blocks):\n this_nt = Data.dims[0]\n si = start_ind[ii]\n # Subtract the mean calculated above.\n this_data = Data.data.copy()\n this_data -= polys[0,si:si + this_nt,...]\n # If desired, subtract of the linear function of time.\n if subtract_slope:\n #this_data -= (total_slope \n # * (Data.time[:,None,None,None] - mean_time))\n this_data -= polys[1,si:si + this_nt,...]\n # Find the first and last unmasked times.\n time_unmask = sp.alltrue(ma.getmaskarray(this_data), -1)\n time_unmask = sp.alltrue(time_unmask, -1)\n time_unmask = sp.alltrue(time_unmask, -1)\n if sp.alltrue(time_unmask):\n continue\n time_unmask = sp.logical_not(time_unmask)\n unmasked_ind, = sp.where(time_unmask)\n first_ind = min(unmasked_ind)\n last_ind = max(unmasked_ind)\n # Ensure that the time sampling is uniform.\n if not (sp.allclose(abs(sp.diff(Data.time)), dt, rtol=0.1)\n and sp.allclose(abs(sp.mean(sp.diff(Data.time))), dt,\n rtol=0.001)) :\n msg = (\"Time sampling not uniformly spaced or Data Blocks don't \"\n \"agree on sampling.\")\n raise ce.DataError(msg)\n # Ensure the shapes are right.\n if Data.dims[1:] != back_shape :\n msg = (\"All data blocks must have the same shape except the time \"\n \"axis.\")\n # Apply an offset to the time in case the start of the Data Block\n # doesn't line up with the time array perfectly.\n offset = (time[sp.argmin(abs(time - Data.time[first_ind]))]\n - Data.time[first_ind])\n # Generate window function.\n if window:\n window_function = sig.get_window(window, last_ind - first_ind + 1)\n for ii in range(first_ind, last_ind + 1) :\n ind = sp.argmin(abs(time - (Data.time[ii] + offset)))\n if abs(time[ind] - (Data.time[ii])) < 0.5*dt :\n if sp.any(mask[ind, ...]) :\n msg = \"Overlapping times in Data Blocks.\"\n raise ce.DataError(msg)\n if window:\n window_value = window_function[ii - first_ind]\n else :\n window_value = 1.0\n time_stream[ind, ...] = (window_value \n * this_data[ii, ...].filled(0.0))\n mask[ind, ...] = window_value * sp.logical_not(ma.getmaskarray(\n this_data)[ii, ...])\n if return_means:\n return time_stream, mask, dt, polys[0,0,...]\n else :\n return time_stream, mask, dt",
"def chunk_entries(parsed_entries):\n parsed_entries = iter(parsed_entries)\n\n run = [parsed_entries.next()]\n\n for entry in parsed_entries:\n if same_minute(run[-1]['time_received_utc_datetimeobj'],\n entry['time_received_utc_datetimeobj']):\n run.append(entry)\n else:\n yield run\n run = [entry]\n yield run",
"def _hist_filter_ts(commands, start_time, end_time):\n for cmd in commands:\n if start_time <= cmd[1] < end_time:\n yield cmd",
"def trim2(self, starttime=None, endtime=None, reftime=None, check_npts=True, **kwargs):\n # same as in rf package + mid possibility\n for tr in self.traces:\n st = tr.stats\n ref = (st.starttime + 0.5 * (st.endtime - st.starttime)\n if reftime in ('mid', 'middle') else reftime)\n t1 = _seconds2utc(tr, starttime, reftime=ref)\n t2 = _seconds2utc(tr, endtime, reftime=ref)\n tr.trim(t1, t2, **kwargs)\n if check_npts:\n npts = int(round(np.median([len(tr) for tr in self.traces])))\n self.traces = [tr for tr in self.traces if len(tr) >= npts]\n for tr in self.traces:\n tr.data = tr.data[:npts]\n return self",
"def _input_intervals():\n last_timestamp = self._trace.interval.start\n for ir_event in filter_by_task(all_tasks, 'name', 'InputReader', 'any'):\n if last_timestamp <= ir_event.interval.end:\n yield Interval(last_timestamp, ir_event.interval.end)\n last_timestamp = ir_event.interval.end",
"def trim_timings(phrase_length, timings):\n extra_hits = np.argwhere(np.cumsum(timings) > int(phrase_length)).ravel()\n\n if len(extra_hits) != 0:\n all_to_end = np.min(extra_hits)\n del timings[all_to_end:]\n\n return timings",
"def get(self, encoding_id, input_stream_id, **kwargs):\n # type: (string_types, string_types, dict) -> TimeBasedTrimmingInputStream\n\n return self.api_client.get(\n '/encoding/encodings/{encoding_id}/input-streams/trimming/time-based/{input_stream_id}',\n path_params={'encoding_id': encoding_id, 'input_stream_id': input_stream_id},\n type=TimeBasedTrimmingInputStream,\n **kwargs\n )",
"def trim(self, s, min_freq=0, max_freq=float('Inf'), min_time=0,\n max_time=float('Inf'), save_metadata=True):\n # Regard default parameters\n if max_freq > s.metadata.max_freq:\n max_freq = s.metadata.max_freq\n\n if max_time > s.metadata.max_time:\n max_time = s.metadata.max_time\n\n # Finds frequency and time bounds\n maxK = s.freq_bin(max_freq)\n minK = s.freq_bin(min_freq)\n maxT = s.time_bin(max_time)\n minT = s.time_bin(min_time)\n\n #print min_time, max_time, min_freq, max_freq\n\n new_s = spectrogram.Spectrogram()\n new_s.data = s.data[minK:maxK+1, minT:maxT+1]\n new_s.metadata.min_freq = s.freq_range(minK)[0]\n new_s.metadata.min_time = s.time_range(minT)[0]\n new_s.metadata.max_freq = s.freq_range(maxK)[0]\n new_s.metadata.max_time = s.time_range(maxT)[0]\n new_s.metadata.sampling_configuration = \\\n s.metadata.sampling_configuration\n new_s.metadata.input_metadata = copy.deepcopy(s.metadata)\n\n new_s.metadata.method = md.Metadata(original_input=s.metadata.input,\n original_method=s.metadata.method,\n name='trim',\n min_freq=min_freq,\n max_freq=max_freq,\n min_time=min_time,\n max_time=max_time)\n if save_metadata:\n new_s.metadata.input = md.ObjectMetadata(s)\n\n return new_s",
"def split_ms(msin, msout, start_out, end_out):\n t = pt.table(msin, ack=False)\n\n starttime = t[0]['TIME']\n t1 = t.query('TIME > ' + str(starttime+start_out*3600) + ' && '\n 'TIME < ' + str(starttime+end_out*3600), sortlist='TIME,ANTENNA1,ANTENNA2')\n\n t1.copy(msout, True)\n t1.close()\n t.close()",
"def cull(self):\n now = time.time()\n self.lines = [line for line in self.lines if line.timestamp + self.timeout > now]",
"def trim(self, trim_samples):\n n = len(self.timestamps)\n self.timestamps = self.timestamps[:n - trim_samples]\n self.labels = self.labels[:n - trim_samples]\n self.emg = [x[:n - trim_samples] for x in self.emg]\n self.accel = [x[:n - trim_samples] for x in self.accel]\n self.gyro = [x[:n - trim_samples] for x in self.gyro]\n self.orient = [x[:n - trim_samples] for x in self.orient]",
"def single_time_stream(self, stream_time=120, do_plot=False, fmin=5, fmax=50):\n self.time_stream_data.take_ts(stream_time=stream_time)\n if do_plot:\n self.time_stream_data.plot_ts()\n self.time_stream_data.get_median_bias_wl(fmin=fmin, fmax=fmax)\n if self.verbose:\n print(f'wl_median {self.time_stream_data.wl_median}')",
"def wake_trim(pairs, wake_trim_min, period_length_sec):\r\n trim = int((60/period_length_sec) * wake_trim_min)\r\n trimmed_pairs = []\r\n for true, pred in pairs:\r\n inds = np.where(true != 0)[0]\r\n start = max(0, inds[0]-trim)\r\n end = inds[-1]+trim\r\n trimmed_pairs.append([\r\n true[start:end], pred[start:end]\r\n ])\r\n return trimmed_pairs",
"def filter_time_range(start: int, end: int, time_slots: Optional[Container[int]] = None) -> Iterator[int]:\n if time_slots is None:\n time_slots = range(24)\n for time in range(start, end):\n if time in time_slots:\n yield time",
"def read_timestamps(self, tasks):\n from reframe.core.deferrable import evaluate\n\n self.begin_stamps = []\n self.end_stamps = []\n for t in tasks:\n with open(evaluate(t.check.stdout), 'r') as f:\n self.begin_stamps.append(float(f.readline().strip()))\n self.end_stamps.append(float(f.readline().strip()))\n\n self.begin_stamps.sort()\n self.end_stamps.sort()",
"def trimtimes(time, elmbeg, elmend, preft = 0.0, suft = 0.0):\n valididx = np.zeros(len(time),dtype='bool')\n \n elmbeg = elmbeg - preft\n elmend = elmend + suft\n for i in range(len(time)):\n t = time[i]\n boolbeg = t>=elmbeg\n boolend = t<=elmend\n boolelm = boolbeg & boolend\n valididx[i] = np.sum(boolelm)\n \n #To use only data outside of ELMs\n valididx = np.invert(valididx)\n return time[valididx], valididx",
"def read_to_list(folder, start, stop):\n measurements = []\n for i in range(start, stop+1):\n measurements.append(Measurement(folder + str(i) + \".tdms\"))\n return measurements",
"def clip_motif_time_series(freq_preprocessed_data, all_offsets, all_bin_widths, motif_start_times, motif_length: int):\n # [Freq]->(Instances, Frequency, Channels, Time-Steps, Bin Width)\n # Only need to get the times around the first syllable\n\n motif_events_series = []\n for pred_data, offset, bin_width in zip(freq_preprocessed_data, all_offsets, all_bin_widths):\n # Grab the Neural Activity Centered on Each event\n set_window = (offset - bin_width, offset + motif_length)\n chunk_events = fet.get_event_related_nd_chunk(chunk_data=pred_data, chunk_indices=motif_start_times, fs=1000,\n window=set_window) # clip the data at the start times\n\n corrected_chunk_events = []\n for chunk in chunk_events:\n corrected_chunk_events.append(np.squeeze(chunk))\n\n chunk_events = fet.event_shape_correction(chunk_events=corrected_chunk_events,\n original_dim=2) # Reformat to be array-like\n\n chunk_events_series = get_time_series(data=chunk_events, bin_width=bin_width) # clip samples based on bin_width\n\n motif_events_series.append(np.squeeze(chunk_events_series)) # Remove Single axis and append to list\n\n return motif_events_series",
"def stream_formatter(streamified):\n trimmed, exactstart,exactend = stream_trimmer(streamified['stream'],\n streamified['starttime'], streamified['endtime'])\n streamified['timeseries'] = trimmed\n streamified['exactstart'] = exactstart\n streamified['exactend']= exactend\n return streamified",
"def trim(self, start_time, end_time):\n\n # find indices of the times in self.times closest to min_t and max_t\n lowest_index = np.abs(self.times - start_time).argmin()\n highest_index = np.abs(self.times - end_time).argmin()\n\n # take slices of the spectrogram and spec_freq that fall within desired range\n return self.__class__(\n self.spectrogram[:, lowest_index : highest_index + 1],\n frequencies=self.frequencies,\n times=self.times[lowest_index : highest_index + 1],\n decibel_limits=self.decibel_limits,\n window_samples=self.window_samples,\n overlap_samples=self.overlap_samples,\n window_type=self.window_type,\n audio_sample_rate=self.audio_sample_rate,\n scaling=self.scaling,\n )"
] | [
"0.5895306",
"0.57021743",
"0.5695429",
"0.5674139",
"0.55933565",
"0.5469195",
"0.5462523",
"0.5439689",
"0.5400392",
"0.53992",
"0.5375235",
"0.53676647",
"0.53236187",
"0.53185964",
"0.5308876",
"0.53067726",
"0.5305648",
"0.5305554",
"0.529495",
"0.5290115",
"0.52687687",
"0.5260498",
"0.52584374",
"0.52490723",
"0.52203566",
"0.52070874",
"0.5202511",
"0.5191903",
"0.5181044",
"0.51705414"
] | 0.5919266 | 0 |
Given the parsed Version of Pants, return its release notes file path. | def notes_file_for_version(self, version: Version) -> str:
branch_name = self._branch_name(version)
notes_file = self._release_notes.get(branch_name)
if notes_file is None:
raise ValueError(
f"Version {version} lives in branch {branch_name}, which is not configured in "
f"{self._release_notes}."
)
return notes_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_release_notes(self):\n\n notes = self.output.get_header('RELEASE NOTES')\n notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \\\n self.repo, self.product) + '\\n'\n\n notes += self.output.get_sub_header('COMPARISONS')\n notes += self.get_comparison(self.latest_tags[0][VERS],\n self.latest_tags[1][VERS])\n\n if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1):\n notes += self.get_comparison(self.latest_tags[1][VERS],\n self.latest_tags[2][VERS])\n\n if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW:\n notes += self.get_comparison(self.latest_tags[2][VERS],\n self.latest_tags[3][VERS])\n\n tag_data = self.get_tag(self.latest_tags[3][SHA])\n\n notes += self.output.get_sub_header('TAGS')\n notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\\n'\n notes += self.get_url_tag_commit(tag_data[\"object\"][\"sha\"]) + '\\n'\n\n changelog = self.get_changelog(tag_data[\"object\"][\"sha\"])\n if changelog:\n notes += self.output.get_sub_header('CHANGELOG')\n notes += changelog\n return notes",
"def set_note_version(cls, version):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version)\n # return to the base directory\n Utility.popd()",
"def notespath(self):\n return os.path.join(self.relnotesdir, self.notesdir)",
"def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_target,\n git_ref_source))\n else:\n git_ref_source = 'origin/master'\n changelog = run('git log {}..origin/{}'.format(git_ref_source, git_ref_target))\n notes = {\n 'version': version,\n 'author': author,\n 'build_type': build_type,\n 'date': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\n 'changelog': changelog.stdout\n }\n return notes",
"def getSetupNotesFilePath(self, **properties):\n mode = Mode(properties.get('show', None), properties.get('sequence', None))\n filePath = mode.get(\"[recipeNotesFile]\", properties)\n return filePath",
"def version_path(version):\n try:\n version_path = CFG.get(\"Versions\", version)\n except KeyError:\n version_path = version\n return version_path",
"def get_release_note(comments):\n release_note = \"\"\n i = 0\n for comment in comments:\n #pprint.pprint(comment)\n #print \"**** Comment-{0}: {1}\".format(i, comment['body'])\n #print \"**** Comment-{index}: {body}\".format(\n # index=i,\n # body=comment['body']\n # )\n #print \"\\tURL: {0}\".format(comment['html_url'])\n #print \"\\tURL: {url}\".format(url=comment['html_url'])\n #comment['body'].index('Changed make')\n if comment['body'].lower().find('changed make') >= 0:\n #print \"Found 'Release Note'\"\n release_note = comment['body']\n #else:\n #print \"No 'Release Note' found\"\n\n i += 1\n # print \"----------------------------------------------------------\\n\"\n return release_note",
"def set_note_version_server(cls):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n #Get the list of WebRtc nuget pakcages with prereleases\n packages = NugetUtility.nuget_cli('list', 'Id:WebRtc', '-PreRelease')\n packages = packages.split('\\r\\n')\n webrtcRegex = r\"^WebRtc+\\s\"\n #Search the list of the packages for a WebRtc package and set the version\n for package in packages:\n if re.match(webrtcRegex, package, flags=0):\n version = package\n\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version) \n \n # return to the base directory\n Utility.popd()",
"def rtd_build_path(self, version=\"latest\"):\n return os.path.join(self.doc_path, 'rtd-builds', version)",
"def get_download_path(self, version=\"latest\"):\n raise NotImplementedError",
"def _get_releaseinfo_str(version):\n opts = {}\n f = StringIO.StringIO()\n opts['version'] = version\n opts['date'] = get_git_log_info(\"%ci\")\n opts['comments'] = get_git_log_info(\"%b%+s%+N\")\n opts['commit'] = get_git_log_info(\"%H\")\n f.write(relfile_template % opts)\n return f.getvalue()",
"def makeReleaseFileName(cls, version: str) -> str:\n\n from peek_platform import PeekPlatformConfig\n\n return os.path.join(\n PeekPlatformConfig.config.platformSoftwarePath,\n 'peek-release-%s.tar.gz' % version)",
"def read_release_version():\n with open(\"RELEASE-VERSION\", \"r\") as f:\n return f.readline().strip()",
"def get_version() -> str:\n version = read(\"pdf_utils/__version__.py\")\n return re.search(r\"__version__ = \\\"(.*?)\\\"\", version).group(1)",
"def get_version():\n found = None\n with open(os.path.join(PATH, \"pyproject.toml\"), \"rt\") as setup_file:\n for line in setup_file:\n line = line.strip()\n if line.startswith(\"version\"):\n found = line\n break\n\n if found is None:\n raise ValueError(\"Unable to detect version\")\n\n return found.split(\"=\")[-1].replace('\"', \"\").strip()",
"def _get_package_version():\n file = join(get_root(), 'VERSION')\n\n if exists(file):\n with open(file) as file:\n return file.read()\n\n return ''",
"def full_doc_path(self, version='latest'):\n doc_base = self.checkout_path(version)\n for possible_path in ['docs', 'doc', 'Doc']:\n if os.path.exists(os.path.join(doc_base, '%s' % possible_path)):\n return os.path.join(doc_base, '%s' % possible_path)\n #No docs directory, docs are at top-level.\n return doc_base",
"def release_notes(self):\n return self._release_notes",
"def getVersion(self):\n try:\n filepath = f\"{EXTERNAL_DIRECTORY}/VERSION\"\n with open(filepath, \"r\") as file:\n lines = file.readlines()\n for line in lines:\n if line != \"\\n\":\n return line.replace(\"\\n\", \"\")\n\n\n except FileNotFoundError as e:\n _LOGGER.error(\"Could not find VERSION File.\")\n return None\n except Exception as e:\n _LOGGER.debug(\"Could not read program version file. Error message: %s\", e)\n return None",
"def version() -> str:\n with open(join(dirname(__file__), 'resources', 'VERSION')) as f:\n return f.read().strip()",
"def full_build_path(self, version='latest'):\n return os.path.join(self.conf_dir(version), \"_build\", \"html\")",
"def notes(self) -> Optional[str]:\n return pulumi.get(self, \"notes\")",
"def get_for_release_version_path(self):\n return self.__cICommon.get_for_release_version_path()",
"def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date",
"def get_version():\n return \"0.0.1 (prerelease prototype)\"",
"def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")",
"def get_version(version=VERSION, date=DATE):\n return \"JoMRS v{} Modular Rigging System | last update {}\".format(\n \".\".join([i for i in version]), \"/\".join([x for x in date])\n )",
"def _get_version(self, identifier: Identifier,\n version: Optional[int] = None) -> DocMetadata:\n parent_path = self._get_parent_path(identifier=identifier,\n version=version)\n path = os.path.join(parent_path,\n (f'{identifier.filename}.abs' if not version\n else f'{identifier.filename}v{version}.abs'))\n return self.parse_abs_file(filename=path)",
"def get_release_date ():\n fname = os.path.join(\"doc\", \"changelog.txt\")\n release_date = \"unknown\"\n with open(fname) as fd:\n # the release date is on the first line\n line = fd.readline()\n mo = release_ro.search(line)\n if mo:\n release_date = mo.groups(1)\n return release_date",
"def svn_info_t_prejfile_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\""
] | [
"0.64292353",
"0.6268032",
"0.6163238",
"0.61492467",
"0.60980326",
"0.6056897",
"0.59942985",
"0.5937546",
"0.56882155",
"0.5619412",
"0.5613099",
"0.55908245",
"0.55586183",
"0.5537965",
"0.54783875",
"0.5476067",
"0.5475499",
"0.544245",
"0.5414592",
"0.53955483",
"0.5368579",
"0.53563184",
"0.5345628",
"0.5345276",
"0.5333117",
"0.5313615",
"0.53033906",
"0.52999866",
"0.52724534",
"0.5267549"
] | 0.74077183 | 0 |
Tries to find the client with the specified mac address. Returns None if it hasn't been active yet | def get_client(self, mac_address: str) -> Union[Any, None]:
if mac_address in self._clients:
return self._clients[mac_address] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lookup_client(self, ip_addr: str):\n try:\n conn_obj = self.client_list[ip_addr]\n except KeyError:\n raise Networking.Host.ClientNotFoundException\n\n if conn_obj is not None:\n return conn_obj\n else:\n raise Networking.Host.ClientNotFoundException",
"def has_client(self, ip):\n for cli in self.clients:\n if cli.ip == ip:\n return cli\n return None",
"def get_client(self, ip_address):\n\n self.cur.execute(\n 'select * from authenticated_clients where ip_address=%s',\n (ip_address, )\n )\n return self.cur.fetchone()",
"def find_device(cls, mac=None, timeout_sec=TIMEOUT_SEC):\n return get_provider().find_device(service_uuids=cls.ADVERTISED, mac=mac, timeout_sec=timeout_sec, services_uuid_callbacks=cls.ADVERTISED_CALLBACK)",
"def get_device_by_mac_or_None(mac):\n try:\n d = Device.objects.get(mac=mac)\n return d\n except Device.DoesNotExist:\n return None",
"def get_device(mac, devices=None):\n print('get_device:', get_device)\n if not mac:\n return None\n if not devices:\n devices = discover_wemo()\n\n normal_mac = normalize_mac_address(mac)\n\n for dev in devices:\n if dev.mac and normalize_mac_address(dev.mac) == normal_mac:\n print('get_device dev:', dev)\n return dev\n\n return None",
"def sx_router_neigh_get_mac(handle, rif, addr): \n try:\n neigh_entry_cnt_p = new_uint32_t_p()\n neigh_entry_list_p = new_sx_neigh_get_entry_t_arr(1)\n\n filter_p = new_sx_neigh_filter_t_p()\n neigh_filter = sx_neigh_filter_t()\n neigh_filter.filter_by_rif = SX_KEY_FILTER_FIELD_NOT_VALID\n neigh_filter.rif = 0\n sx_neigh_filter_t_p_assign(filter_p, neigh_filter)\n \n rc = sx_api_router_neigh_get(handle, SX_ACCESS_CMD_GET, rif, addr, filter_p, neigh_entry_list_p, neigh_entry_cnt_p)\n if rc == SX_STATUS_ENTRY_NOT_FOUND:\n return None \n sx_check_rc(rc)\n\n neighbor_entry = sx_neigh_get_entry_t_arr_getitem(neigh_entry_list_p, 0)\n \n return neighbor_entry.neigh_data.mac_addr.to_str()\n \n finally:\n delete_sx_neigh_filter_t_p(filter_p)\n delete_sx_neigh_get_entry_t_arr(neigh_entry_list_p) \n delete_uint32_t_p(neigh_entry_cnt_p)",
"def lookup_socket(self, address): # TODO: optimize me\n\n net_tuple = self.read_nodestate(0)\n for item in net_tuple:\n discovered_address = item[1]\n if address == discovered_address:\n return item[0]",
"def get_dhcp_client_iaid(mac_address):\n hwaddr = list(int(byte, 16) for byte in mac_address.split(':'))\n return hwaddr[2] << 24 | hwaddr[3] << 16 | hwaddr[4] << 8 | hwaddr[5]",
"def get(self, mac_address):\n response = ise.get_session_details_by_attr(mac_address=mac_address, fmt='json')\n return response",
"def get_adapter_mac(cls, client_object):\n parsed_data = DefaultCRUDImpl.get_adapter_info(\n client_object)\n for record in parsed_data['table']:\n if record['dev'] == client_object.name:\n return record['mac']\n pylogger.warning('Did not find a MAC address for adapter %r on %r' %\n (client_object.name, client_object.ip))",
"def get_client_by_handle(self, handle):\n candidate_client_objects = [client for client in self if client.handle == handle]\n assert len( candidate_client_objects) < 2, \"?? socket %s appears in list of client objects multiple times\" % handle\n if candidate_client_objects:\n return candidate_client_objects[0]\n return None",
"def get_mac_address(self):\n\t\treturn call_sdk_function('PrlSrvCfgNet_GetMacAddress', self.handle)",
"def get_my_mac():\n\n mac_set = get_my_mac_set(iface_filter=get_default_route()[1])\n return mac_set.pop()",
"def get_device_info(self, mac_address):\n try:\n out = self.get_output(\"info \" + mac_address)\n except BluetoothctlError, e:\n print(e)\n return None\n else:\n return out",
"def connect(self, mac_address):\n try:\n out = self.get_output(\"connect \" + mac_address, 2)\n except BluetoothctlError, e:\n print(e)\n return None\n else:\n res = self.child.expect([\"Failed to connect\", \"Connection successful\", pexpect.EOF])\n success = True if res == 1 else False\n return success",
"def find_by_port(self, port):\n for client in self.clients.values():\n if client.port == port:\n return client",
"def get_client_location(self, conn, macaddr: str, offset=0, limit=100, units=\"FEET\"):\n path = urlJoin(urls.CLIENT_LOCATION[\"GET_CLIENT_LOC\"], macaddr)\n params = {\n \"offset\": offset,\n \"limit\": limit,\n \"units\": units\n }\n resp = conn.command(apiMethod=\"GET\", apiPath=path, apiParams=params)\n return resp",
"def GetMacVendor(macAddress):\n\turlMac = \"https://macvendors.co/api/%s/pipe\" % macAddress\n\tif macAddress in [\"\",\"FF-FF-FF-FF-FF-FF\"]:\n\t\treturn None\n\n\ttry:\n\t\t#sys.stderr.write(\"urlMac=%s\\n\"%urlMac)\n\n\t\timport urllib2\n\t\treq = urllib2.Request(urlMac)\n\t\treq.add_header('User-Agent', \"API Browser\")\n\t\tresp = urllib2.urlopen(req)\n\t\tcontent = resp.readlines()[0]\n\n\t\t#sys.stderr.write(\"content=%s\\n\"%content)\n\t\t#sys.stderr.write(\"content=%s\\n\"%str(type(content)))\n\t\tsplitMac = content.split(\"|\")\n\t\t#sys.stderr.write(\"splitMac[0]=%s\\n\"%splitMac[0])\n\t\treturn splitMac[0]\n\texcept:\n\t\texc = sys.exc_info()[1]\n\t\t#sys.stderr.write(\"Caught %s\\n\"%str(exc))\n\t\t# Any error returns a none strng: Thisinformation is not that important.\n\t\treturn \"Cannot determine vendor\"",
"def getMac(self):\n # Import netifaces here to prevent error importing this module in setup.py\n import netifaces\n interfaces = ['eth0', 'wlan0']\n try:\n interfaces.append(netifaces.gateways()['default'][netifaces.AF_INET][1])\n except:\n pass\n for interface in interfaces:\n try:\n return netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']\n except ValueError:\n pass\n except:\n exception('Error getting MAC address')\n return None",
"def sx_fdb_uc_mac_addr_get(handle, vlan_id, mac_addr): \n try:\n key = sx_fdb_uc_mac_addr_params_t()\n key.fid_vid = vlan_id\n key.mac_addr = ether_addr(mac_addr)\n key.action = SX_FDB_ACTION_FORWARD\n key_p = copy_sx_fdb_uc_mac_addr_params_t_p(key)\n \n key_filter = sx_fdb_uc_key_filter_t()\n key_filter.filter_by_fid = SX_FDB_KEY_FILTER_FIELD_VALID\n key_filter.filter_by_mac_addr = SX_FDB_KEY_FILTER_FIELD_VALID\n key_filter.filter_by_log_port = SX_FDB_KEY_FILTER_FIELD_NOT_VALID\n key_filter.fid = vlan_id\n key_filter.mac_addr = ether_addr(mac_addr)\n key_filter_p = copy_sx_fdb_uc_key_filter_t_p(key_filter)\n \n data_cnt_p = copy_uint32_t_p(SX_FDB_MAX_GET_ENTRIES)\n mac_list_p = new_sx_fdb_uc_mac_addr_params_t_arr(SX_FDB_MAX_GET_ENTRIES)\n \n rc = sx_api_fdb_uc_mac_addr_get(handle, 0, SX_ACCESS_CMD_GET_FIRST, SX_FDB_UC_ALL, key_p, key_filter_p, mac_list_p, data_cnt_p)\n if rc == SX_STATUS_ENTRY_NOT_FOUND:\n return None\n sx_check_rc(rc)\n\n data_cnt = uint32_t_p_value(data_cnt_p)\n if data_cnt == 0:\n return None\n\n assert data_cnt == 1, \"Got unexpected macs amount, mac {} vlan {} data_cnt {}\".format(mac_addr, vlan_id, data_cnt)\n \n mac_entry = sx_fdb_uc_mac_addr_params_t_arr_getitem(mac_list_p, 0)\n assert mac_entry.dest_type == SX_FDB_UC_MAC_ADDR_DEST_TYPE_LOGICAL_PORT, \"Got unexpected mac entry type {}\".format(mac_entry.dest_type)\n\n return mac_entry\n\n finally:\n delete_sx_fdb_uc_mac_addr_params_t_arr(mac_list_p)\n delete_uint32_t_p(data_cnt_p) \n delete_sx_fdb_uc_key_filter_t_p(key_filter_p)\n delete_sx_fdb_uc_mac_addr_params_t_p(key_p)",
"def LookupByStateKey(self, state_key):\n self.ReadClientStateFile()\n for client in self._registered_tokens.values():\n if state_key.encode('hex') in client.get('state_keys', []):\n return client\n\n return None",
"def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]",
"def _get_interface_ip(mac_addr):\n interfaces = netifaces.interfaces()\n for iface in interfaces:\n addresses = netifaces.ifaddresses(iface)\n link_addresses = addresses.get(netifaces.AF_LINK, [])\n for link_addr in link_addresses:\n if link_addr.get('addr') == mac_addr:\n ip_addresses = addresses.get(netifaces.AF_INET)\n if ip_addresses:\n # NOTE: return first address, ironic API does not\n # support multiple\n return ip_addresses[0].get('addr')\n else:\n break",
"def _get_interface_ip(mac_addr):\n interfaces = netifaces.interfaces()\n for iface in interfaces:\n addresses = netifaces.ifaddresses(iface)\n link_addresses = addresses.get(netifaces.AF_LINK, [])\n for link_addr in link_addresses:\n if link_addr.get('addr') == mac_addr:\n ip_addresses = addresses.get(netifaces.AF_INET)\n if ip_addresses:\n # NOTE: return first address, ironic API does not\n # support multiple\n return ip_addresses[0].get('addr')\n else:\n break",
"def get_vendor(mac_addr: str) -> str:\n\n parse_wireshark_oui_database()\n\n mac_addr = mac_addr.lower().replace(':', '').replace('-', '').replace('.', '')\n\n # Split the MAC address in different ways and check against the oui_dict\n for split_length in _oui_length_split_list:\n oui = mac_addr[:split_length]\n if oui in _oui_dict:\n return _oui_dict[oui]\n\n return ''",
"def _get_mac(self):\n return self.__mac",
"def get_device(self):\n addr = self.address\n servers = [server for server in pyrax.cloudservers.list()\n if addr in server.networks.get(\"private\", \"\")]\n try:\n return servers[0]\n except IndexError:\n return None",
"def get_device_name(self, device):\n if not self.last_results:\n return None\n for client in self.last_results:\n if client.mac == device:\n return client.name\n return None",
"def get_mac_address(self, result, host):\n if \"mac\" in result['scan'][host][\"addresses\"]:\n return result['scan'][host][\"addresses\"][\"mac\"]\n else:\n return \"\""
] | [
"0.6797491",
"0.6305357",
"0.62252337",
"0.618177",
"0.59434694",
"0.5883319",
"0.5820577",
"0.58077675",
"0.580335",
"0.57866395",
"0.57657874",
"0.5587245",
"0.5584584",
"0.55807036",
"0.55640537",
"0.5543741",
"0.5538617",
"0.55057526",
"0.55036765",
"0.54920053",
"0.5488901",
"0.5482273",
"0.54581803",
"0.541997",
"0.541997",
"0.5414793",
"0.5412437",
"0.5376435",
"0.53750104",
"0.5281578"
] | 0.796704 | 0 |
Returns true if the model is built for training mode. | def is_training(self):
return self.mode == "train" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_training(self):\n return (\n self.detector.training\n # and self.recognizer.training\n and self.shared_conv.training\n )",
"def trainable(self):\n return True",
"def is_trainable(self):\n return False",
"def has_training_docs(self):\n pass",
"def is_training(self):\n return self._labels_one_hot is not None",
"def training(self):\n self.training = True",
"def train(self):\n self.training = True",
"def is_trainable(self, rel_name):\n return self._declaration[rel_name].trainable",
"def is_trained(self) -> bool:\r\n return not getattr(self._lda, \"classes_\", None) is None",
"def is_trained(self) -> bool:",
"def is_trained(self):\n return len(self.indicator_words) > 0",
"def is_training_completed(self) -> bool:\n return (self.model_dir/self.TRAINING_COMPLETED_FILE_NAME).is_file()",
"def is_trained(self) -> bool:\r\n return not getattr(self._qda, \"classes_\", None) is None",
"def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()",
"def training_target(training_data):\n return training_data.status == \"DEFAULT\"",
"def start_training(self):\n self.training = True",
"def has_train(args):\n return (args.training_set or args.source or args.dataset or\n args.datasets or args.source_file or args.dataset_file or\n args.train_stdin or args.source_file)",
"def has_next_train(self):\n return (self.train_next < len(self.train_list))",
"def test_trainable_property(self):\n scalar_weighted_addition_model = ScalarWeightedAddition(10)\n np.testing.assert_equal(scalar_weighted_addition_model.is_trainable, True)",
"def train_digits(self):\n try:\n # TODO: Make decision taking validation into account validation\n metrics_result = self.model.train()\n logging.info(\"model performance is {}\".format(metrics_result))\n return metrics_result is not None\n # TODO: Apply specific exceptions and log,\n except:\n logging.error(\"Prediction Error:\", sys.exc_info()[0])\n raise ValueError()",
"def train():\n \n ## check for request data\n if not request.json:\n print(\"ERROR: API (train): did not receive request data\")\n return jsonify(False)\n\n ## set the test flag\n test = False\n if 'mode' in request.json and request.json['mode'] == 'test':\n test = True\n\n print(\"... training model\")\n model = model_train(test=test)\n print(\"... training complete\")\n\n return(jsonify(True))",
"def train(self, batch_training=False):\n raise NotImplementedError",
"def train(self, mode: bool = True):\n if self.nn_module.training != mode:\n self.nn_module.train(mode)",
"def _is_train(self):\n return tf.placeholder(dtype=tf.bool,\n name='is_train')",
"def check_norm_state(modules, train_state):\n for mod in modules:\n if isinstance(mod, _BatchNorm):\n if mod.training != train_state:\n return False\n return True",
"def is_ready(model):\n return hasattr(model, '_pywarm_forward_pre_hook')",
"def is_train(self, X):\n if not self._check_shape(X):\n return False\n\n idx = self.sample_idx_\n\n try:\n # Grab sample from `X`\n sample = X[ix_(idx[0], idx[1])]\n\n return array_equal(sample, self.sample_)\n\n except IndexError:\n # If index is out of bounds, X.shape < training_set.shape\n # -> X is not the training set\n return False",
"def assert_train_augmented(self) -> bool:\n dalet = Path(os.environ[\"DATA_PATH\"]) / \"characters\" / \"train\" / \"Dalet\"\n truth_value = False\n try:\n if len(list(dalet.iterdir())) != 72: # downloaded number of chars\n truth_value = True\n except FileNotFoundError:\n pass # this is ok because we handle the truth_value\n return truth_value",
"def test(self):\n self.training = False",
"def check_norm_state(modules, train_state):\n for mod in modules:\n if isinstance(mod, _BatchNorm):\n if mod.training != train_state:\n return False\n return True"
] | [
"0.77450645",
"0.75195754",
"0.7401473",
"0.6901232",
"0.68979996",
"0.6851016",
"0.6818478",
"0.6815334",
"0.67600894",
"0.67210084",
"0.66106576",
"0.6578971",
"0.65409017",
"0.64601964",
"0.6446735",
"0.6420486",
"0.64169925",
"0.63686174",
"0.6364961",
"0.6309912",
"0.6305526",
"0.62735236",
"0.625527",
"0.6170445",
"0.6061617",
"0.6031629",
"0.60141647",
"0.5988689",
"0.59827965",
"0.59759724"
] | 0.8236415 | 0 |
Distort a batch of images. (Processing a batch allows us to easily switch between TPU and CPU execution). | def distort_images(self, images, seed):
if self.mode == "train":
images = image_processing.distort_image(images, seed)
# Rescale to [-1,1] instead of [0, 1]
images = tf.subtract(images, 0.5)
images = tf.multiply(images, 2.0)
return images | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def image_undistort():\n # read test images\n all_test_images = os.listdir('test_images')\n test_images = []\n for name in all_test_images:\n if name.endswith(\".jpg\"):\n test_images.append(name)\n # apply distortion correction on test images\n undistort_images(test_images, './camera_calib_dist_pickle.p')\n print(\"DONE: undistorted test-images saved\")",
"def distorted_inputs():\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n data_dir = FLAGS.data_dir\n if FLAGS.batches_dir.strip():\n print(\"putting on the batches\")\n data_dir = os.path.join(FLAGS.data_dir, FLAGS.batches_dir)\n\n print(\"The data dir is {} in distorted_inputs\".format(data_dir))\n images, labels = qpNet_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size, distort=False)\n\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n\n # binaries the labels if necessary:\n print(\"Binarising here\")\n labels = binariseTheLabels(labels)\n\n return images, labels",
"def distorted_inputs( batch_size):\n\t#read_labels_dict(r\"D:\\PythonWorksp\\TensorFlow\\furniture\\bed\\tf-labels.txt\")\n\timage_list, label_list = read_labeled_image_list(LABEL_FILE)\n\tfor f in image_list:\n\t\tif not tf.gfile.Exists(f):\n\t\t\traise ValueError('Failed to find file: ' + f)\n\n\t#print(label_list)\n\n\timages = tf.convert_to_tensor(image_list, dtype=tf.string)\n\tlabels = tf.convert_to_tensor(label_list, dtype=tf.int64)\n\t\n\tprint(labels)\n\t# Makes an input queue\n\tinput_queue = tf.train.slice_input_producer([images, labels],\n\t\t\t\t\t\t\t\t\t\t\t\t#num_epochs=num_epochs,\n\t\t\t\t\t\t\t\t\t\t\t\tshuffle=True)\n\n\timage, label = read_images_from_disk(input_queue)\n\n\tprint(label)\n\t# Create a queue that produces the filenames to read.\n\t#filename_queue = tf.train.string_input_producer(filenames)\n\n\t# Read examples from files in the filename queue.\n\t#read_input = read_image(filename_queue)\n\treshaped_image = tf.cast(image, tf.float32)\n\n\theight = IMAGE_SIZE\n\twidth = IMAGE_SIZE\n\n\t# Image processing for training the network. Note the many random\n\t# distortions applied to the image.\n\n\t# Randomly crop a [height, width] section of the image.\n\t# distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\tdistorted_image = tf.image.resize_images(reshaped_image, [height, width])\n\n\t# Randomly flip the image horizontally.\n\tdistorted_image = tf.image.random_flip_left_right(distorted_image)\n\n\t# Because these operations are not commutative, consider randomizing\n\t# the order their operation.\n\t# NOTE: since per_image_standardization zeros the mean and makes\n\t# the stddev unit, this likely has no effect see tensorflow#1458.\n\tdistorted_image = tf.image.random_brightness(distorted_image,\n\t\t\t\t\t\t\t\t\t\t\t max_delta=63)\n\tdistorted_image = tf.image.random_contrast(distorted_image,\n\t\t\t\t\t\t\t\t\t\t\t lower=0.2, upper=1.8)\n\n\t# Subtract off the mean and divide by the variance of the pixels.\n\tfloat_image = tf.image.per_image_standardization(distorted_image)\n\n\t# Set the shapes of tensors.\n\tfloat_image.set_shape([height, width, 3])\n\t#label.set_shape([1])#todo\n\n\t# Ensure that the random shuffling has good mixing properties.\n\tmin_fraction_of_examples_in_queue = 0.4\n\tmin_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n\t\t\t\t\t\t min_fraction_of_examples_in_queue)\n\tprint ('Filling queue with %d images before starting to train. '\n\t\t 'This will take a few minutes.' % min_queue_examples)\n\n\t# Generate a batch of images and labels by building up a queue of examples.\n\treturn _generate_image_and_label_batch(float_image, label,\n\t\t\t\t\t\t\t\t\t\t min_queue_examples, batch_size,\n\t\t\t\t\t\t\t\t\t\t shuffle=True)",
"def move_and_process_input(batch):\n x, y = batch\n x = x.to(device).float()\n y = torch.as_tensor(y).to(device)\n x = x.permute(0, -1, 1, 2, 3)\n return x, y",
"def distorted_inputs():\n if not FLAGS.dir_data:\n raise ValueError('Please supply a dir_data')\n\n dir_data = os.path.join(FLAGS.dir_data, 'batches', 'train_batch')\n images, labels = Unet_input.distorted_inputs(dir_data=dir_data, batch_size=FLAGS.batch_size)\n\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n\n return images, labels",
"def prepare_batch(batch, device=None, non_blocking=False):\n\timages, target = batch\n\treturn [convert_tensor(image, device=device, non_blocking=non_blocking) for image in images], \\\n\t convert_tensor(target, device=device, non_blocking=non_blocking)",
"def transform_batch(images,\n max_rot_deg,\n max_shear_deg,\n max_zoom_diff_pct,\n max_shift_pct,\n experimental_tpu_efficiency=True):\n\n def clipped_random():\n rand = tf.random.normal([1], dtype=tf.float32)\n rand = tf.clip_by_value(rand, -2., 2.) / 2.\n return rand\n\n batch_size = images.shape[0]\n tf.debugging.assert_equal(\n images.shape[1],\n images.shape[2],\n \"Images should be square\")\n DIM = images.shape[1]\n channels = images.shape[3]\n XDIM = DIM % 2\n\n rot = max_rot_deg * clipped_random()\n shr = max_shear_deg * clipped_random() \n h_zoom = 1.0 + clipped_random()*max_zoom_diff_pct\n w_zoom = 1.0 + clipped_random()*max_zoom_diff_pct\n h_shift = clipped_random()*(DIM*max_shift_pct)\n w_shift = clipped_random()*(DIM*max_shift_pct)\n\n # GET TRANSFORMATION MATRIX\n m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) \n\n # LIST DESTINATION PIXEL INDICES\n x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) # 10000,\n y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM])\n z = tf.ones([DIM*DIM],tf.int32)\n idx = tf.stack( [x,y,z] ) # [3, 10000]\n\n # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS\n idx2 = tf.matmul(m,tf.cast(idx,tf.float32))\n idx2 = tf.cast(idx2,tf.int32)\n idx2 = tf.clip_by_value(idx2,-DIM//2+XDIM+1,DIM//2)\n\n # FIND ORIGIN PIXEL VALUES \n idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )\n idx3 = tf.transpose(idx3)\n batched_idx3 = tf.tile(idx3[tf.newaxis], [batch_size, 1, 1])\n\n if experimental_tpu_efficiency:\n # This reduces excessive padding in the original tf.gather_nd op\n idx4 = idx3[:, 0] * DIM + idx3[:, 1]\n images = tf.reshape(images, [batch_size, DIM * DIM, channels])\n d = tf.gather(images, idx4, axis=1)\n return tf.reshape(d, [batch_size,DIM,DIM,channels])\n else:\n d = tf.gather_nd(images, batched_idx3, batch_dims=1)\n return tf.reshape(d,[batch_size,DIM,DIM,channels])",
"def preprocess_for_train(\n image, height, width, bbox,\n do_distort_color=True,\n do_flip_horizontal=True,\n do_flip_vertical=False,\n area_range=(0.2, 1.0),\n aspect_ratio_range=(0.75, 1.333),\n normalize_method='inception',\n fast_mode=True,\n scope=None,\n add_image_summaries=True):\n with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):\n if bbox is None:\n bbox = tf.constant(\n [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n\n if add_image_summaries:\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n image_with_box = tf.image.draw_bounding_boxes(\n tf.expand_dims(image, 0), bbox)\n tf.summary.image('image_with_bounding_boxes', image_with_box)\n\n distorted_image, distorted_bbox = distorted_bounding_box_crop(\n image,\n bbox,\n area_range=area_range,\n aspect_ratio_range=aspect_ratio_range)\n\n # Restore the shape since the dynamic slice based upon the bbox_size loses\n # the third dimension.\n distorted_image.set_shape([None, None, 3])\n\n if add_image_summaries:\n image_with_distorted_box = tf.image.draw_bounding_boxes(\n tf.expand_dims(image, 0), distorted_bbox)\n tf.summary.image(\n 'images_with_distorted_bounding_box', image_with_distorted_box)\n\n # This resizing operation may distort the images because the aspect\n # ratio is not respected. We select a resize method in a round robin\n # fashion based on the thread number.\n # Note that ResizeMethod contains 4 enumerated resizing methods.\n\n # We select only 1 case for fast_mode bilinear.\n num_resize_cases = 1 if fast_mode else 4\n distorted_image = apply_with_random_selector(\n distorted_image,\n lambda x, method: tf.image.resize_images(x, [height, width], method),\n num_cases=num_resize_cases)\n\n if add_image_summaries:\n tf.summary.image(\n 'cropped_resized_image', tf.expand_dims(distorted_image, 0))\n\n # Randomly flip the image horizontally.\n if do_flip_horizontal:\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n # Randomly flip the image horizontally.\n if do_flip_vertical:\n distorted_image = tf.image.random_flip_up_down(distorted_image)\n\n if do_distort_color:\n # Randomly distort the colors. There are 4 ways to do it.\n distorted_image = apply_with_random_selector(\n distorted_image,\n lambda x, ordering: distort_color(x, ordering, fast_mode),\n num_cases=4)\n\n if add_image_summaries:\n tf.summary.image(\n 'final_distorted_image', tf.expand_dims(distorted_image, 0))\n\n distorted_image = image_normalize(distorted_image, method=normalize_method)\n return distorted_image",
"def distorted_inputs():\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')\n images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,\n batch_size=FLAGS.batch_size)\n return images, labels",
"def undistort(basedir, img_extension, output_dir, output_prefix, calibration, distortion, output_image_shape=(640, 480), scaling_param=1):\n search = os.path.join(basedir, '*'+img_extension)\n img_paths = glob.glob(search)\n img_paths.sort()\n print(\"Number of Images: \", len(img_paths))\n maxlen = len(img_paths)\n if maxlen == 0:\n raise IOError(\n 'No images were found (maybe wrong \\'image extension\\' parameter?)')\n\n if not os.path.exists(os.path.dirname(output_dir)):\n os.makedirs(os.path.dirname(output_dir))\n\n for img_idx, img_path in enumerate(img_paths):\n img = cv2.imread(img_path, 1)\n height, width, _ = img.shape\n new_camera_matrix = calibration\n\n # scaling parameter between 0 (when all the pixels in the undistorted image are valid)\n # and 1 (when all the source image pixels are retained in the undistorted image)\n new_camera_mtx, roi = cv2.getOptimalNewCameraMatrix(\n calibration, distortion, (width, height), scaling_param, output_image_shape)\n print(\"calibration\", calibration)\n print(\"new_camera_matrix\", new_camera_matrix)\n\n # undistort\n mapx, mapy = cv2.initUndistortRectifyMap(\n calibration, distortion, None, new_camera_mtx, output_image_shape, 5)\n dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)\n\n # crop the image\n x, y, w, h = roi\n dst = dst[y:y+h, x:x+w]\n\n output_path = output_dir+output_prefix+'_%d' % img_idx+img_extension\n print(output_path)\n cv2.imwrite(output_path, dst)\n return True",
"def preprocess_batch(images_batch, preproc_func=None):\n if preproc_func is None:\n return images_batch\n\n with tf.variable_scope('preprocess'):\n images_list = tf.split(images_batch, int(images_batch.shape[0]))\n result_list = []\n for img in images_list:\n reshaped_img = tf.reshape(img, img.shape[1:])\n processed_img = preproc_func(reshaped_img)\n result_list.append(tf.expand_dims(processed_img, axis=0))\n result_images = tf.concat(result_list, axis=0)\n return result_images",
"def collate_fn(self, batch):\n images = list()\n targets = list()\n\n for b in batch:\n images.append(b[0])\n targets.append(b[1])\n\n # images = torch.stack(images, dim=0)\n\n return images, targets # tensor (N, 3, 300, 300), 3 lists of N tensors each",
"def distorted_inputs():\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')\n images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,\n batch_size=FLAGS.batch_size)\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n return images, labels",
"def collate(self, batch):\n \n images = []\n indices = []\n roi_size = 5 if self.Train else 4\n rois = torch.zeros((len(batch), 20, roi_size), dtype=torch.float32)\n rois = rois.to(batch[0][1].device)\n \n for _b in range(len(batch)):\n # Accumulate patches:\n images.append(batch[_b][0].to(torch.float32))\n indices.append(batch[_b][2])\n \n # Accumulate ROI:\n \"\"\"\n image_num = torch.Tensor([_b]).expand(batch[_b][1].size(0))\n image_num = image_num.type(batch[_b][1].dtype).view(-1,1)\n image_num = image_num.to(batch[_b][1].device)\n _roi = torch.cat([image_num, batch[_b][1]], dim=1)\n rois = torch.cat([rois, _roi], dim=0)\n \"\"\"\n num_boxes = batch[_b][1].size(0)\n rois[_b,:num_boxes,:] = batch[_b][1]\n \n \n # Stack outputs and return\n batch = [torch.stack(images, dim=0), rois, torch.Tensor(indices)]\n return batch",
"def detection_collate(batch):\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[0])\n targets.append(torch.FloatTensor(sample[1]))\n return torch.stack(imgs, 0), targets",
"def batch_collate_fn(batch):\n images = []\n masks = []\n \n for (image, trimap, mask) in batch:\n mask = mask.unsqueeze(0)\n trimap = trimap.unsqueeze(0)\n image = torch.cat([image, trimap], 0).unsqueeze(0)\n \n images.append(image)\n masks.append(mask)\n\n images = torch.cat(images, 0)\n masks = torch.cat(masks, 0)\n\n return (images, masks)",
"def transform_images(img1,img2):",
"def _batch_unshuffle_ddp(self, x, idx_unshuffle):\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n\n num_gpus = batch_size_all // batch_size_this\n\n # restored index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this]",
"def process_images(image, label):\n # Normalize images to have a mean of 0 and standard deviation of 1\n # per_image_standardization is preferred, which normalize the entire image to mean zero and std 1.\n # It also make learning fast.\n image = tf.image.per_image_standardization(image)\n # Resize images from 32x32 to 277x277\n image = tf.image.resize(image, (227,227))\n return image, label",
"def undistort_image(mtx_, dist_, img_):\n dst = cv2.undistort(img_, mtx_, dist_, None, mtx_)\n return dst",
"def undistort(img, mtx, dist):\n return cv2.undistort(img, mtx, dist, None, mtx)",
"def batch(img_path, gt_path,img_list, batch, total_size, label_list):\r\n\r\n image_list = [os.path.join(img_path, i) for i in img_list]\r\n gt_list = [os.path.join(gt_path,i) for i in img_list]\r\n\r\n \r\n for i in range(0, total_size, batch):\r\n yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)",
"def preprocess_for_train(image, height, width, bbox,\n fast_mode=True,\n scope=None,\n add_image_summaries=True):\n with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):\n if bbox is None:\n bbox = tf.constant([0.0, 0.0, 1.0, 1.0],\n dtype=tf.float32,\n shape=[1, 1, 4])\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n # Each bounding box has shape [1, num_boxes, box coords] and\n # the coordinates are ordered [ymin, xmin, ymax, xmax].\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox)\n if add_image_summaries:\n tf.summary.image('image_with_bounding_boxes', image_with_box)\n \n distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)\n # Restore the shape since the dynamic slice based upon the bbox_size loses\n # the third dimension.\n distorted_image.set_shape([None, None, 3])\n image_with_distorted_box = tf.image.draw_bounding_boxes(\n tf.expand_dims(image, 0), distorted_bbox)\n if add_image_summaries:\n tf.summary.image('images_with_distorted_bounding_box',\n image_with_distorted_box)\n \n # This resizing operation may distort the images because the aspect\n # ratio is not respected. We select a resize method in a round robin\n # fashion based on the thread number.\n # Note that ResizeMethod contains 4 enumerated resizing methods.\n \n # We select only 1 case for fast_mode bilinear.\n num_resize_cases = 1 if fast_mode else 4\n distorted_image = apply_with_random_selector(\n distorted_image,\n lambda x, method: tf.image.resize_images(x, [height, width], method),\n num_cases=num_resize_cases)\n \n if add_image_summaries:\n tf.summary.image('cropped_resized_image',\n tf.expand_dims(distorted_image, 0))\n \n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n \n if add_image_summaries:\n tf.summary.image('final_distorted_image',\n tf.expand_dims(distorted_image, 0))\n \n return distorted_image",
"def copy_tpx3_files_batch(initial_image_dir, target_dir):\n\tinitial_image_dir = initial_image_dir.rstrip('/') + '/'\n\targ_list = [\"--include=*\", \"--exclude=*\", initial_image_dir, target_dir]\n\trun_rsync(arg_list)",
"def _batchify(batch):\n im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9 = zip(*batch)\n im0 = nd.stack(*im0)\n im1 = nd.stack(*im1)\n im2 = nd.stack(*im2)\n im3 = nd.stack(*im3)\n im4 = nd.stack(*im4)\n im5 = nd.stack(*im5)\n im6 = nd.stack(*im6)\n im7 = nd.stack(*im7)\n im8 = nd.stack(*im8)\n im9 = nd.stack(*im9)\n return im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9",
"def distort_image(image):\n # Randomly flip horizontally.\n with tf.name_scope(\"flip_horizontal\", values=[image]):\n image = tf.image.random_flip_left_right(image)\n\n # Randomly distort the colors based on thread id.\n with tf.name_scope(\"distort_color\", values=[image]):\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.032)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n\n # The random_* ops do not necessarily clamp.\n image = tf.clip_by_value(image, 0.0, 1.0)\n\n return image",
"def adapt_batch(batch):\n image_arrays, labellings = batch\n\n current_batch_size = len(labellings)\n\n images = np.array(image_arrays).reshape(current_batch_size, *image_arrays[0].shape)\n\n padded_labellings = pad_labellings(labellings)\n\n labels = np.array(padded_labellings, dtype=np.int32).reshape(current_batch_size, -1)\n\n input_lengths = compute_input_lengths(image_arrays)\n\n label_lengths = np.array([len(labelling) for labelling in labellings],\n dtype=np.int32).reshape(current_batch_size, 1)\n\n return [images, labels, input_lengths, label_lengths], labels",
"def flow(self, batch_size=32):\n nb_batches = int(len(self.image_ids_in_subset) / batch_size) + 1\n while True:\n # Before each epoch we shuffle the images' ids\n random.shuffle(self.image_ids_in_subset)\n\n for i in range(nb_batches):\n # We first get all the image ids for the next batch\n current_bach = self.image_ids_in_subset[i*batch_size:(i+1)*batch_size]\n X_batch = []\n Y_batch = []\n\n for image_id in current_bach:\n # Load the image and resize it. We get a PIL Image object\n img = image.load_img(self.get_img_path(int(image_id)), grayscale=False, target_size=(cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE))\n # Cast the Image object to a numpy array and put the channel has the last dimension\n img_arr = image.img_to_array(img, data_format='channels_last')\n X_batch.append(img_arr)\n # Y_batch.append(self.id_to_label[image_id])\n Y_batch.append(self.get_labels(image_id))\n\n # resize X_batch in (batch_size, IMG_HEIGHT, IMG_WIDTH, 3)\n X_batch = np.reshape(X_batch, (-1, cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE, 3))\n # resize Y_batch in (None, nb_classes)\n Y_batch = np.reshape(Y_batch, (-1, self.nb_classes))\n\n # substract mean values from imagenet\n X_batch = preprocess_input(X_batch, data_format='channels_last')\n yield(X_batch, Y_batch)",
"def _collate_fn(batch):\n # imgs = [b[0] for b in batch]\n # labels = [b[1] for b in batch]\n # imgs = torch.stack(imgs, dim=0)\n # return [imgs, labels]\n imgs = [b[0] for b in batch]\n labels = [b[1] for b in batch]\n imgs = torch.cat(imgs, dim=0)\n labels = [l for sublist in labels for l in sublist]\n return [imgs, labels]",
"def preprocess(images, target_height, target_width):\n # Scale to [0, 1].\n images = tf.image.convert_image_dtype(images, dtype=tf.float32)\n\n # Stack images channel-wise.\n batch_size = tf.shape(images)[0]\n images = stack_images_channelwise(images, batch_size)\n\n # Resize to target height and width.\n images = tf.image.resize(images, [target_height, target_width])\n return images"
] | [
"0.61262804",
"0.60912216",
"0.598788",
"0.59358394",
"0.5922533",
"0.5875004",
"0.5843194",
"0.57700866",
"0.57675856",
"0.57097095",
"0.5691411",
"0.5681225",
"0.5649462",
"0.5595402",
"0.55895615",
"0.5576115",
"0.55393803",
"0.54922485",
"0.54661673",
"0.54537296",
"0.5453321",
"0.5451674",
"0.54466933",
"0.54462093",
"0.54433966",
"0.543044",
"0.5417488",
"0.540165",
"0.5393477",
"0.53791326"
] | 0.7145252 | 0 |
Builds the input sequence embeddings. | def build_seq_embeddings(self, input_seqs):
with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"):
embedding_map = tf.get_variable(
name="map",
shape=[self.config.vocab_size, self.config.embedding_size],
initializer=self.initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, input_seqs)
return seq_embeddings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_seq_embeddings(self):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.word_embedding_size],\n initializer=self.initializer)\n \n # We need to store the normalized lookup table for efficient mapping of embedding vectors to closest words\n self.normed_embedding_map = tf.nn.l2_normalize(embedding_map, dim=1)\n \n seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) \n # seq_embeddings has the shape (batch_size, sequence_length, sentence_length, embedding_size)\n # meaning, for each index in input_seqs (with shape (batch_size, sequence_length, sentence_length)) it stores an embedding vector\n\n #print('Shape seq_embeddings: ' + str(seq_embeddings.get_shape()))\n\n self.seq_embeddings = seq_embeddings",
"def build(self):\n self.build_inputs()\n self.image_embeddings = self.build_image_embeddings(self.images)\n self.seq_embeddings = self.build_seq_embeddings(self.input_seqs)\n self.build_model()\n self.setup_inception_initializer()\n self.setup_global_step()",
"def build(self):\n self.build_inputs()\n self.build_word_embeddings()\n self.build_encoder()\n self.build_fc()\n self.build_loss()\n self.build_global_step()",
"def build(self, input_shapes):\n (word_embeddings_shape, _) = input_shapes\n width = word_embeddings_shape.as_list()[-1]\n self.type_embeddings = None\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"type_embeddings\",\n shape=[self.token_type_vocab_size, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super(EmbeddingPostprocessor, self).build(input_shapes)",
"def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)",
"def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()",
"def build(self):\n sequence_input = Input(shape=(self.max_sequence_length, ), dtype='int32')\n embedded_sequences = self.embedding_layer(sequence_input)\n x = Conv1D(128, 5, activation='relu')(embedded_sequences)\n x = MaxPooling1D(5)(x)\n x = Conv1D(128, 5, activation='relu')(x)\n x = MaxPooling1D(5)(x)\n x = Flatten()(x)\n x = Dense(128, activation='relu')(x)\n\n y = Bidirectional(LSTM(50, dropout=0.2, recurrent_dropout=0.2))(embedded_sequences)\n z = concatenate([x, y])\n preds = Dense(6, activation='softmax')(z)\n self.model = Model(sequence_input, preds)",
"def build_sentence_encoder(self, raw_encoder_input, input_seq_len):\n with tf.variable_scope('text_encoder'):\n self.embedding = \\\n tf.get_variable(\n \"embedding\", initializer=tf.random_uniform(\n [self.config.word_voc_size,\n self.config.word_embedding_space_size],\n -self.config.TRAIN.SENCODER.none_rnn_para_initial_max,\n self.config.TRAIN.SENCODER.none_rnn_para_initial_max))\n inputs = tf.nn.embedding_lookup(self.embedding, raw_encoder_input)\n\n # now it is [MAX_SEQ_LENGTH, batch_size, embedding_length]\n input_batch_order = tf.transpose(inputs, [1, 0, 2])\n\n # now it is [MAX_SEQ_LENGTH * batch_size, embedding_length]\n input_batch_order = tf.reshape(\n input_batch_order, [-1, self.config.word_embedding_space_size])\n\n # now it is LIST OF [BATCH_SIZE, embedding_length]\n encoder_input = tf.split(0, self.config.seq_max_len,\n input_batch_order)\n\n # the encoder part\n encode_gru_cell = tf.nn.rnn_cell.GRUCell(\n self.config.encoder_dimension)\n # big news: The state is final state, output is a list of tensor.\n # We don't to do that\n _, sentence_rep = tf.nn.rnn(encode_gru_cell, encoder_input,\n dtype=tf.float32,\n sequence_length=input_seq_len)\n self.sentence_rep = sentence_rep\n self.sentence_rep = tf.nn.l2_normalize(self.sentence_rep, 1)\n return",
"def build_bilstm(self, verbose=True):\r\n word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')\r\n inputs = [word_ids]\r\n\r\n if self._params.use_pretrain_embedding:\r\n if verbose: logging.info(\"initial word embedding with pretrained embeddings\")\r\n if self._params.word_embedding_dim == 100:\r\n glove_file = self._params.data_dir + '/glove.6B.100d.txt'\r\n elif self._params.word_embedding_dim == 300:\r\n glove_file = self._params.data_dir + '/glove.42B.300d.txt'\r\n else:\r\n logging.error(\"we only support glove embedding with dimension 100 or 300\")\r\n raise ValueError(\"unmatch word dimension, we only support glove embedding with dimension 100 or 300\")\r\n glove_embedding_index = load_glove(glove_file, self._params.word_embedding_dim)\r\n word_vocab = self.input_processor.word_vocab.vocab\r\n glove_embeddings_matrix = np.zeros([len(word_vocab), self._params.word_embedding_dim])\r\n for word, i in word_vocab.items():\r\n vector = glove_embedding_index.get(word)\r\n if vector is not None:\r\n glove_embeddings_matrix[i] = vector\r\n \r\n word_embeddings = Embedding(input_dim=glove_embeddings_matrix.shape[0],\r\n output_dim=glove_embeddings_matrix.shape[1],\r\n trainable=False,\r\n mask_zero=True,\r\n weights=[glove_embeddings_matrix],\r\n name='word_embedding')(word_ids)\r\n else:\r\n word_embeddings = Embedding(input_dim=self._params.word_vocab_size,\r\n output_dim=self._params.word_embedding_dim,\r\n mask_zero=True,\r\n name='word_embedding')(word_ids)\r\n\r\n input_embeddings = [word_embeddings]\r\n if self._params.use_char:\r\n char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')\r\n inputs.append(char_ids)\r\n if self._params.char_feature == \"lstm\":\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n mask_zero=True,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level lstm features\")\r\n char_feas = TimeDistributed(Bidirectional(LSTM(self._params.char_lstm_size)), name=\"char_lstm\")(char_embeddings)\r\n elif self._params.char_feature == \"cnn\":\r\n # cnn do not support mask\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level cnn features\")\r\n char_feas = char_cnn_encode(char_embeddings, self._params.n_gram_filter_sizes, self._params.n_gram_filter_nums)\r\n else:\r\n raise ValueError('char feature must be lstm or cnn')\r\n\r\n input_embeddings.append(char_feas)\r\n\r\n if self._params.use_pos:\r\n if verbose: logging.info(\"use pos tag features\")\r\n pos_ids = Input(batch_shape=(None, None), dtype='int32', name='pos_input')\r\n inputs.append(pos_ids)\r\n\r\n\r\n pos_embeddings = Embedding(input_dim=self._params.pos_vocab_size,\r\n output_dim=self._params.pos_embedding_dim,\r\n mask_zero=True,\r\n name='pos_embedding')(pos_ids)\r\n input_embeddings.append(pos_embeddings)\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, None), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n input_embeddings.append(dict_embeddings)\r\n\r\n input_embedding = Concatenate(name=\"input_embedding\")(input_embeddings) if len(input_embeddings)>1 else input_embeddings[0]\r\n input_embedding_ln = LayerNormalization(name='input_layer_normalization')(input_embedding)\r\n #input_embedding_bn = BatchNormalization()(input_embedding_ln)\r\n input_embedding_drop = Dropout(self._params.dropout, name=\"input_embedding_dropout\")(input_embedding_ln)\r\n\r\n z = Bidirectional(LSTM(units=self._params.main_lstm_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2),\r\n name=\"main_bilstm\")(input_embedding_drop)\r\n z = Dense(self._params.fc_dim, activation='tanh', name=\"fc_dense\")(z)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n model.compile(loss=loss, optimizer=self._params.optimizer)\r\n\r\n self.model = model",
"def build_word_embeddings(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n elif self.mode == \"test\":\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n else:\n word_emb = tf.get_variable(\n name=\"word_embedding\",\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer)\n\n encode_emb1 = tf.nn.embedding_lookup(word_emb, self.encode_ids1)\n encode_emb2 = tf.nn.embedding_lookup(word_emb, self.encode_ids2)\n\n\n self.encode_emb1 = encode_emb1\n self.encode_emb2 = encode_emb2",
"def _generate_embeddings(self, config): \n tr_parts = []\n te_parts = []\n all_columns = []\n for comp in self.components:\n tr_tmp, te_tmp, cols = comp.generate(config)\n if cols != None:\n print(tr_tmp.shape,te_tmp.shape)\n tr_parts.append(tr_tmp)\n te_parts.append(te_tmp)\n all_columns += cols\n X_train = np.concatenate(tr_parts, axis=1)\n X_test = np.concatenate(te_parts, axis=1)\n print(\"Concatenated size:\", X_train.shape, X_test.shape)\n self.feature_columns = all_columns\n return X_train, X_test",
"def build(self, input_shape):\n dimension_list = input_shape.as_list()\n\n if len(dimension_list) != 3:\n raise ValueError( # pragma: no cover\n \"PositionEmbedding expects a 3-dimensional input tensor \"\n \"of shape [batch, sequence, width]\"\n )\n seq_length = dimension_list[1]\n width = dimension_list[2]\n\n # If we are not using dynamic slicing, we must assume that the sequence\n # length is fixed and max_sequence_length should not be specified.\n if not self._use_dynamic_slicing:\n if seq_length is None: # pragma: no cover\n raise ValueError( # pragma: no cover\n \"PositionEmbedding must have `use_dynamic_slicing` set \"\n \"to True (and max_sequence_length set) when the \"\n \"sequence (1st) dimension of the input is None.\"\n )\n if self._max_sequence_length is not None: # pragma: no cover\n raise ValueError( # pragma: no cover\n \"When `use_dynamic_slicing` is False, \"\n \"max_sequence_length should \"\n \"not be specified and we ought to use seq_length to get the \"\n \"variable shape.\"\n )\n\n if self._max_sequence_length is not None:\n weight_sequence_length = self._max_sequence_length\n else:\n weight_sequence_length = seq_length # pragma: no cover\n\n self._position_embeddings = self.add_weight(\n \"embeddings\",\n shape=[weight_sequence_length, width],\n initializer=self._initializer,\n )\n\n super(PositionEmbedding, self).build(input_shape)",
"def build_input_embed(self, n_input, t_input):\n n_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ntoken, self.n_embed_dim], minval=-0.05, maxval=0.05), name='n_embed_matrix')\n t_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ttoken, self.t_embed_dim], minval=-0.05, maxval=0.05), name='t_embed_matrix')\n n_input_embedding = tf.nn.embedding_lookup(n_embed_matrix, n_input)\n t_input_embedding = tf.nn.embedding_lookup(t_embed_matrix, t_input)\n return n_input_embedding, t_input_embedding",
"def build_model(allidx,MAX_LENGTH,onlyArg):\n wordidx, labelidx, featuresidx, extraidx=allidx\n posidx, neridx, depidx, distanceidx, chnkidx, wikineridx, dbpedianeridx, subneridx = featuresidx\n\n main_input = Input(shape=(MAX_LENGTH,), name='main_input', dtype='int32')\n inputNodes=[main_input]\n\n w2vmodel=\"../embeddings/Domain-Word2vec.model\"\n\n embedding_matrix,EMBEDDING_DIM,vocabulary_size=prepare.wv_embedded(wordidx,w2vmodel)\n \n x = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, input_length=MAX_LENGTH, mask_zero=False)(main_input)\n numnode=int(EMBEDDING_DIM/2)\n\n # pos embedding\n inputNodes,pos_layer=layers.embedlayer(inputNodes,\"pos_input\",posidx,MAX_LENGTH)\n x=Concatenate()([x,pos_layer])\n numnode+=int(len(posidx)/2)\n\n # ner embedding\n inputNodes,ner_layer=layers.embedlayer(inputNodes,\"ner_input\",neridx,MAX_LENGTH)\n x=Concatenate()([x,ner_layer])\n numnode+=int(len(neridx)/2)\n\n inputNodes,wikiner_layer=layers.embedlayer(inputNodes,\"wikiner_input\",wikineridx,MAX_LENGTH)\n x=Concatenate()([x,wikiner_layer])\n numnode+=int(len(wikineridx)/2)\n\n inputNodes,dbpedianer_layer=layers.embedlayer(inputNodes,\"dbpedianer_input\",dbpedianeridx,MAX_LENGTH)\n x=Concatenate()([x,dbpedianer_layer])\n numnode+=int(len(dbpedianeridx)/2)\n\n # dep embedding\n inputNodes,dep0_layer=layers.embedlayer(inputNodes,\"dep0_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep0_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep1_layer=layers.embedlayer(inputNodes,\"dep1_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep1_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep2_layer=layers.embedlayer(inputNodes,\"dep2_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep2_layer])\n numnode+=int(len(depidx)/2)\n\n # chnk embedding\n inputNodes,lvl_layer=layers.embedlayer(inputNodes,\"lvl_input\",distanceidx,MAX_LENGTH)\n x=Concatenate()([x,lvl_layer])\n numnode+=int(len(distanceidx)/2)\n\n inputNodes,chnk_layer=layers.embedlayer(inputNodes,\"chnk_input\",chnkidx,MAX_LENGTH)\n x=Concatenate()([x,chnk_layer])\n numnode+=int(len(chnkidx)/2)\n\n # wikiclass embedding\n inputNodes,subner_layer=layers.embedlayer(inputNodes,\"subner_input\",subneridx,MAX_LENGTH)\n x=Concatenate()([x,subner_layer])\n numnode+=int(len(subneridx)/2)\n\n if onlyArg:\n neartrigger_input = Input(shape=(MAX_LENGTH,), name='neartrigger_input', dtype='int32')\n inputNodes.append(neartrigger_input)\n neartrigger_layer = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, \\\n input_length=MAX_LENGTH, mask_zero=False)(neartrigger_input)\n x=Concatenate()([x,neartrigger_layer])\n numnode+=50\n inputNodes,x,numnode=layers.extralayer(inputNodes,x,numnode,extraidx,featuresidx,MAX_LENGTH)\n\n lstm_out = Bidirectional(LSTM(numnode, dropout=0.2,return_sequences=True))(x)\n numnode=int((numnode+len(labelidx))*2/3)\n\n if onlyArg:\n lstm_out = SeqSelfAttention(attention_activation='tanh', attention_width=5)(lstm_out)\n\n lstm_out = Dropout(0.2)(lstm_out)\n out = Dense(numnode)(lstm_out)\n\n crf = CRF(len(labelidx), sparse_target=False) # CRF layer\n main_output=crf(out)\n loss=crf_loss #crf.loss_function\n acc=[crf_accuracy]\n\n model = Model(inputs=inputNodes, outputs=main_output) \n model.compile(loss=loss,optimizer=Adam(0.001),metrics=acc)\n model.summary()\n\n return model",
"def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()",
"def build_inputs(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n\n elif self.mode == \"test\":\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n \n else:\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.input_queue_capacity,\n num_reader_threads=self.config.num_input_reader_threads)\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(self.config.batch_size)\n s1, s2, label = input_ops.parse_example_batch(\n serialized)\n\n encode_ids1 = s1.ids\n encode_ids2 = s2.ids\n\n encode_mask1 = s1.mask\n encode_mask2 = s2.mask\n \n\n\n self.encode_ids1 = encode_ids1\n self.encode_ids2 = encode_ids2\n\n self.encode_mask1 = encode_mask1\n self.encode_mask2 = encode_mask2\n\n self.label = label",
"def test_build_with_embeddings(self):\n # Train a very small model\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n sentences = [[str(x) for x in numpy.arange(random.randint(3, 20))]\n for _ in range(25)]\n embedding_model = Word2Vec(\n sentences=sentences, size=self.model_arguments['hidden_layer_size'],\n iter=5)\n dataset = KDDCupDataset(embedding_model=embedding_model)\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n # Check build does not raise errors\n model = self.MODEL(\n dataset, embedding_model=embedding_model,\n **self.model_arguments)\n model.build_all()\n resulting_embeddings = model.sess.run(model.embedding_var)\n numpy.testing.assert_array_equal(resulting_embeddings[1:-1],\n embedding_model.wv.syn0)\n model.fit(training_epochs=50)",
"def build(self,unused):\n # (word_embeddings_shape, _) = input_shapes\n # width = word_embeddings_shape.as_list()[-1]\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"token_type_embeddings\",\n shape=[self.token_type_vocab_size, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=LAYER_NORM_NAME, axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super().build(unused)",
"def build_embeddings(opt, word_dict, for_encoder='src'):\n if for_encoder=='src':\n embedding_dim = opt.src_word_vec_size #512\n elif for_encoder=='tgt':\n embedding_dim = opt.tgt_word_vec_size\n elif for_encoder=='structure':\n embedding_dim = 64\n\n word_padding_idx = word_dict.stoi[Constants.PAD_WORD]\n num_word_embeddings = len(word_dict)\n \n if for_encoder=='src' or for_encoder=='tgt':\n\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")\n elif for_encoder=='structure':\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=False,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")",
"def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")",
"def build_model_from_inputs(self):\n if self.term_list is None:\n # no supplied token list -- use vocabulary of the training dataset\n # self.term_list = self.vocabulary\n # info(\"Setting bag dimension to {} from input vocabulary.\".format(len(self.term_list)))\n # will generate the vocabulary from the input\n pass\n info(f\"Building {self.name} model\")\n bagger = None\n if self.config.max_terms is not None:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range, max_terms=self.config.max_terms)\n else:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n train_idx = self.indices.get_train_instances()\n texts = Text.get_strings(self.text.data.get_slice(train_idx))\n bagger.map_collection(texts, fit=True, transform=False)\n self.term_list = bagger.get_vocabulary()\n\n self.dimension = len(self.term_list)\n self.config.dimension = self.dimension",
"def _build(self, ids):\n # Construct embeddings.\n if self._existing_vocab is None:\n if self.EMBEDDINGS not in self._initializers:\n self._initializers[self.EMBEDDINGS] = basic.create_linear_initializer(\n self._vocab_size)\n self._embeddings = tf.get_variable(\n \"embeddings\",\n shape=[self._vocab_size, self._embed_dim],\n dtype=tf.float32,\n initializer=self._initializers[self.EMBEDDINGS],\n partitioner=self._partitioners.get(self.EMBEDDINGS, None),\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n else:\n self._embeddings = tf.get_variable(\n \"embeddings\",\n dtype=tf.float32,\n initializer=self._existing_vocab,\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n\n # Lookup embeddings\n return tf.nn.embedding_lookup(\n self._embeddings, ids, name=\"embedding_lookup\")",
"def build_sentence_encoders(tparams, options):\n opt_ret = dict()\n trng = RandomStreams(1234)\n\n #xs, masks, sents_all = [], [], []\n in_outs = []\n\n langs = options['langs']\n for lang in langs:\n # description string: #words x #samples\n # forward\n x = tensor.matrix('x_%s'%lang, dtype='int64')\n mask = tensor.matrix('x_mask_%s'%lang, dtype='float32')\n\n n_timesteps = x.shape[0]\n n_samples = x.shape[1]\n\n # Word embedding (forward)\n emb = tparams['Wemb_%s'%lang][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])\n\n if options['bidirectional_enc']:\n # backward RNN\n x_r = x[::-1]\n mask_r = mask[::-1]\n emb_r = tparams['Wemb_%s'%lang][x_r.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])\n\n if options['use_dropout']:\n retain_probability_emb = 1-options['dropout_embedding']\n retain_probability_hidden = 1-options['dropout_hidden']\n retain_probability_source = 1-options['dropout_source']\n rec_dropout = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))\n rec_dropout_r = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))\n emb_dropout = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))\n emb_dropout_r = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))\n source_dropout = theano.shared(numpy.float32(retain_probability_source))\n emb *= source_dropout\n if options['bidirectional_enc']:\n embr *= source_dropout\n else:\n rec_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n rec_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n emb_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n emb_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n\n # Encode sentences\n if options['encoder_%s'%lang] == 'bow':\n sents = (emb * mask[:,:,None]).sum(0)\n else:\n # iteratively push input from first hidden layer until the last\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_%i'%(lang,i)\n # if first layer input are wembs, otherwise input will be output of last hidden layer\n layer_below=emb if i==0 else layer_below[0]\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None, prefix=layer_name_prefix, mask=mask,\n emb_dropout=emb_dropout, rec_dropout=rec_dropout)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj = layer_below\n\n if options['bidirectional_enc']:\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_r_%i'%(lang,i)\n # if first layer input are wembs, otherwise input will be output of last hidden layer\n layer_below=emb_r if i==0 else layer_below[0]\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None, prefix=layer_name_prefix, mask=mask_r,\n emb_dropout=emb_dropout_r, rec_dropout=rec_dropout_r)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj_r = layer_below\n\n # use last hidden state of forward and backward RNNs\n sents = concatenate([proj[0][-1],proj_r[0][-1]], axis=proj[0].ndim-2)\n else:\n sents = proj[0][-1]\n\n if options['use_dropout']:\n sents *= shared_dropout_layer((n_samples, options['dim']), use_noise, trng, retain_probability_hidden)\n\n # project sentences into multimodal space\n sents_mm = get_layer('ff')[1](tparams, sents, options, prefix='ff_sentence_mm', activ='linear')\n if not 'attention_type' in options or options['attention_type'] == 'dot':\n sents_mm = l2norm(sents_mm)\n\n if options['use_dropout']:\n sents_mm *= shared_dropout_layer((n_samples, options['dim_multimodal']), use_noise, trng, retain_probability_hidden)\n\n # outputs per language\n in_outs.append(([x, mask], sents_mm))\n\n return trng, in_outs",
"def produce_outputs(self):\n # if self.loaded_aggregated:\n # debug(\"Skippping {} mapping due to preloading\".format(self.base_name))\n # return\n # need to calc term numeric index for aggregation\n\n\n # if self.loaded_preprocessed:\n # debug(\"Skippping {} mapping due to preloading\".format(self.base_name))\n # return\n\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n self.embeddings = np.ndarray((0, len(self.term_list)), dtype=np.int32)\n for idx in self.indices.get_train_test():\n texts = Text.get_strings(self.text.data.get_slice(idx))\n vecs = bagger.map_collection(texts, fit=False, transform=True)\n self.embeddings = np.append(self.embeddings, vecs, axis=0)\n del texts\n\n # texts = Text.get_strings(self.text.data.get_slice(test_idx))\n # vec_test = bagger.map_collection(texts, fit=do_fit)\n # del texts\n\n # self.embeddings = np.vstack((vec_train, vec_test))\n\n # self.embeddings = np.append(vec_train, vec_test)\n # self.vector_indices = (np.arange(len(train)), np.arange(len(test)))\n\n # set misc required variables\n self.set_constant_elements_per_instance()",
"def make_embeddings(self):\n\t\tprint(\"Presetting embedding weights\")\n\t\t\t\n\t\tnp.random.seed(0)\n\t\tweights = np.random.uniform(low = -0.05, high = 0.05, size = (self.FREQCAP, self.EMB_SIZE))\n\t\t\n\t\tcounter = 0\n\n\t\twords = []\n\t\tweights_tmp = []\n\n\t\twith open(self.embeddingpath) as handle:\n\t\t\tfor i, line in enumerate(handle):\n\t\t\t\ttmp = line.strip()\n\t\t\t\tif len(tmp) > 0:\n\t\t\t\t\tsplit = tmp.split(\" \")\n\t\t\t\t\tif split[0] in self.worddict and len(split[1:]) == 300:\n\t\t\t\t\t\twords.append(split[0])\n\t\t\t\t\t\tweights_tmp.append([float(a) for a in split[1:]])\n\t\t\n\t\tweights_tmp = np.array(weights_tmp)\n\n\t\tfor word, column in zip(words, weights_tmp):\n\t\t\tif self.worddict[word] < self.FREQCAP:\n\t\t\t\tcounter += 1\n\t\t\t\tweights[self.worddict[word],:] = column\n\t\t\n\t\tprint(\"Set\", counter, \"of\", weights.shape[0], \"columns\")\n\t\t\n\t\tif self.EMB_SIZE < weights.shape[-1]:\n\t\t\tprint(\"Reducing dimensionality to\", self.EMB_SIZE)\n\t\t\tpca = PCA(self.EMB_SIZE)\n\t\t\tweights = pca.fit_transform(weights)\n\t\t\n\t\tself.embeddings = [weights]",
"def embedding(self, seqs):\n batch_size, seqlen = seqs.shape\n seqs = np.reshape(seqs, (-1)) # convert to 1-d indexes [(batch_sz*seqlen)]\n embs = self.word2vec[seqs] # lookup [(batch_sz*seqlen) x emb_sz]\n embs = np.reshape(embs, (batch_size, seqlen, -1)) # recover the shape [batch_sz x seqlen x emb_sz]\n return embs",
"def build_image_embeddings(self):\n inception_output = image_embedding.inception_v3(\n self.images,\n trainable=self.train_inception,\n is_training=self.is_training())\n\n # Map inception output onto embedding space.\n with tf.variable_scope(\"image_embedding\") as scope:\n image_embeddings = tf.contrib.layers.fully_connected(\n inputs=inception_output,\n num_outputs=self.config.sentence_embedding_size,\n activation_fn=None,\n weights_initializer=self.initializer,\n biases_initializer=None,\n scope=scope)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n image_embeddings = tf.nn.dropout(image_embeddings, self.config.dropout_keep_prob_encoder)\n\n # Save the embedding size in the graph.\n tf.constant(self.config.sentence_embedding_size, name=\"image_embedding_size\")\n\n self.image_embeddings = image_embeddings",
"def build_cnn_input(config):\n print('Building input to cnn layer')\n input_layer = tf.keras.layers.Input(\n shape=(config['sequence_length'],), name='input_sequence')\n embedding_layer = tf.keras.layers.Embedding(\n input_dim=len(config['vocab']), output_dim=config['emb_size'],\n input_length=config['sequence_length'],\n name='embedding_layer')(input_layer)\n\n return input_layer, embedding_layer",
"def get_embeddings(self, in_data):\n context, da = in_data\n if self.fixed_divide:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=True)\n else:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=False)\n\n # Shubhangi: what this step essentially does is it replaces the context words by their token, with UNK as default.\n # again , we don't need this since our context data is essentially vectors therefore commenting this out\n # similary we don't need context embedding , that's exactly what context is already .\n\n # context_emb = []\n context_emb = [float(parameter[0]) for parameter in context]\n\n # for tok in context[-max_context_len:]:\n # context_emb.append(self.dict_token.get(tok, self.UNK_TOKEN))\n\n # Shubhangi: padding is needed because each context sentence could be of different length ,\n # we don't need to include context in padding as we're going to have a fixed size\n # (max_context_len - len(context)) = 0\n\n\n # padding = [self.UNK_TOKEN] * (max_context_len - len(context))\n\n # Shubhangi: padding might be harmless for now therefore not removing ,\n # essentially what this is doing is concatenating the arrays and sending\n if self.use_div_token:\n return context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + da_emb\n return context_emb + da_emb",
"def encode_inputs(self, sents):\n word_inputs = []\n pred_inputs = []\n pos_inputs = []\n\n # Preproc to get all preds per run_id\n # Sanity check - make sure that all sents agree on run_id\n assert(all([len(set(sent.run_id.values)) == 1\n for sent in sents]))\n run_id_to_pred = dict([(int(sent.run_id.values[0]),\n self.get_head_pred_word(sent))\n for sent in sents])\n\n # Construct a mapping from running word index to pos\n word_id_to_pos = {}\n for sent in sents:\n indices = sent.index.values\n words = sent.word.values\n\n for index, word in zip(indices,\n nlpir_ws(\"\".join(words))):\n word_id_to_pos[index] = word['tag']\n\n fixed_size_sents = self.get_fixed_size(sents)\n\n\n for sent in fixed_size_sents:\n\n assert(len(set(sent.run_id.values)) == 1)\n\n word_indices = sent.index.values\n sent_words = sent.word.values\n\n sent_str = \"\".join(sent_words)\n\n\n\n pos_tags_encodings = [(CN_POS_TAGS.index(word_id_to_pos[word_ind]) \\\n if word_id_to_pos[word_ind] in CN_POS_TAGS \\\n else 0)\n for word_ind\n in word_indices]\n\n word_encodings = [self.emb.get_word_index(w)\n for w in sent_words]\n\n # Same pred word encodings for all words in the sentence\n pred_word = run_id_to_pred[int(sent.run_id.values[0])]\n pred_word_encodings = [self.emb.get_word_index(pred_word)\n for _ in sent_words]\n\n word_inputs.append([Sample(w) for w in word_encodings])\n pred_inputs.append([Sample(w) for w in pred_word_encodings])\n pos_inputs.append([Sample(pos) for pos in pos_tags_encodings])\n\n # Pad / truncate to desired maximum length\n ret = defaultdict(lambda: [])\n\n for name, sequence in zip([\"word_inputs\", \"predicate_inputs\", \"postags_inputs\"],\n [word_inputs, pred_inputs, pos_inputs]):\n for samples in pad_sequences(sequence,\n pad_func = lambda : Pad_sample(),\n maxlen = self.sent_maxlen):\n ret[name].append([sample.encode() for sample in samples])\n\n return {k: np.array(v) for k, v in ret.iteritems()}"
] | [
"0.77851015",
"0.7130673",
"0.6865367",
"0.67336845",
"0.67199683",
"0.66785735",
"0.6605034",
"0.6559223",
"0.6552313",
"0.65228623",
"0.65226203",
"0.65208817",
"0.6496347",
"0.64477164",
"0.6435911",
"0.6419806",
"0.6393158",
"0.63673705",
"0.63666874",
"0.63221425",
"0.6312769",
"0.631217",
"0.63116777",
"0.629722",
"0.62901664",
"0.62876433",
"0.6287107",
"0.6239012",
"0.62267745",
"0.6216878"
] | 0.71874005 | 1 |
Sets up the function to restore inception variables from checkpoint. | def setup_inception_initializer(self):
if self.mode != "inference":
# Restore inception variables only.
saver = tf.train.Saver(self.inception_variables)
def restore_fn(sess):
tf.logging.info("Restoring Inception variables from checkpoint file %s",
self.config.inception_checkpoint_file)
saver.restore(sess, self.config.inception_checkpoint_file)
self.init_fn = restore_fn | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_init_fn():\n checkpoint_exclude_scopes = [\"InceptionV1/Logits\", \"InceptionV1/AuxLogits\"]\n\n exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n\n return slim.assign_from_checkpoint_fn(\n os.path.join(checkpoints_dir, 'inception_v1.ckpt'),\n variables_to_restore)",
"def _restore_variables(self, checkpoint):\n checkpoint_variables_map = list_variables(checkpoint)\n valid_variable = lambda name: name.startswith('model/encoder') or \\\n name.startswith('model/decoder')\n checkpoint_variable_names = [name for (name, _) in checkpoint_variables_map\n if valid_variable(name)]\n\n variables = get_variables_to_restore()\n variable_names = [v.name.split(':')[0] for v in variables]\n assignment_map = {}\n for var in checkpoint_variable_names:\n if var in variable_names:\n assignment_map[var] = var\n\n init_from_checkpoint(checkpoint, assignment_map)",
"def restore(self, checkpoint):\n raise NotImplementedError",
"def restore_fn(flags):\n # if flags.tf_initial_checkpoint is None:\n # return None\n\n # Warn the user if a checkpoint exists in the train_dir. Then ignore.\n # if tf.train.latest_checkpoint(flags.train_dir):\n # tf.logging.info(\n # 'Ignoring --checkpoint_path because a checkpoint already exists in %s'\n # % flags.train_dir)\n # return None\n\n exclusions = []\n if flags.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in flags.checkpoint_exclude_scopes.split(',')]\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n # Change model scope if necessary.\n if flags.checkpoint_model_scope is not None:\n variables_to_restore = \\\n {var.op.name.replace(flags.model_name,\n flags.checkpoint_model_scope): var\n for var in variables_to_restore}\n\n tf.compat.v1.logging.info('++++++++++++++++++++')\n tf.compat.v1.logging.info('Fine-tuning from %s. Ignoring missing vars: %s' %\n (flags.pre_trained_checkpoint, flags.ignore_missing_vars))\n slim.assign_from_checkpoint_fn(flags.pre_trained_checkpoint,\n variables_to_restore,\n ignore_missing_vars=flags.ignore_missing_vars)",
"def _init_checkpoint_and_variables(pretrain_checkpoint_path,\n pretrain_checkpoint_exclude_scopes):\n checkpoint_reader = tf.contrib.framework.load_checkpoint(\n pretrain_checkpoint_path)\n return get_variables_to_restore_from_pretrain_checkpoint(\n pretrain_checkpoint_exclude_scopes,\n checkpoint_reader.get_variable_to_shape_map())",
"def get_init_fn():\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n variables_to_restore.append(var)\n\n checkpoint_path = tf.train.latest_checkpoint(\"./base_checkpoint\")\n\n tf.logging.info('Fine-tuning from %s' % checkpoint_path)\n\n return slim.assign_from_checkpoint_fn(\n checkpoint_path,\n variables_to_restore,\n ignore_missing_vars=False)",
"def _get_init_fn():\n exclusions = []\n if FLAGS.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in FLAGS.checkpoint_exclude_scopes.split(',')]\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n\n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n\n tf.logging.info('Fine-tuning from {}'.format(checkpoint_path))\n\n return slim.assign_from_checkpoint_fn(checkpoint_path, variables_to_restore)",
"def _initialize_variables(self, finetune: str=None, **kwargs) -> None:\n if finetune is None:\n super()._initialize_variables(**kwargs) # default initialization\n else:\n self._saver = tf.train.Saver(max_to_keep=100000000)\n logging.info('Restoring variables from `%s`', finetune)\n self._saver.restore(self.session, finetune)",
"def restore(self, checkpoint_data):\n\n self.iteration = checkpoint_data['iteration']\n self.action_requests = checkpoint_data['action_requests']\n\n self.experience = deque()\n for episode in checkpoint_data['experience']:\n ep = []\n for transition in episode:\n ep.append(tuple(transition))\n\n self.experience.append(ep)",
"def __init__(self, checkpoint_path_dict, var_scopes_dict=None, \n checkpoint_exclude_scopes_dict=None):\n \n tf.logging.info(\"Create RestoreCheckpointHook.\")\n #super(IteratorInitializerHook, self).__init__()\n self.checkpoint_path_dict = checkpoint_path_dict\n \n self.var_scopes_dict=var_scopes_dict\n self.checkpoint_exclude_scopes_dict=checkpoint_exclude_scopes_dict\n\n print(self.checkpoint_path_dict, self.var_scopes_dict, self.checkpoint_exclude_scopes_dict)",
"def restore(self):\n\n self.brain.restore_checkpoint()",
"def resume(self, checkpoint):\n model_dict = paddle.load(checkpoint)\n self.model.set_state_dict(model_dict)",
"def _restore_checkpoint(self, checkpoint_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n pretrained_dict = checkpoint['state_dict'] # 预训练模型的state_dict\n model_dict = self.model.state_dict() # 当前用来训练的模型的state_dict\n \n if pretrained_dict.keys() != model_dict.keys(): # 需要进行参数的适配\n print('Parameters are inconsistant, adapting model parameters ...')\n # 在合并前(update),需要去除pretrained_dict一些不需要的参数\n # 只含有识别分支的预训练模型参数字典中键'0', '1'对应全模型参数字典中键'2', '3'\n pretrained_dict['2'] = transfer_state_dict(pretrained_dict['0'], model_dict['2'])\n pretrained_dict['3'] = transfer_state_dict(pretrained_dict['1'], model_dict['3'])\n del pretrained_dict['0'] # 把原本预训练模型中的键值对删掉,以免错误地更新当前模型中的键值对\n del pretrained_dict['1']\n model_dict.update(pretrained_dict) # 更新(合并)模型的参数\n self.model.load_state_dict(model_dict)\n else:\n print('Parameters are consistant, load state dict directly ...')\n self.model.load_state_dict(checkpoint['state_dict'])\n # self.optimizer.load_state_dict(checkpoint['optimizer'])\n # if self.with_cuda:\n # for state in self.optimizer.state.values():\n # for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n # state[k] = v.cuda(self.device)",
"def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)",
"def init_game_setting(self):\n np.random.seed(1) \n self.s_prev = np.zeros((80, 80, 1))\n print('loading trained model from {}'.format(self.model_path))\n self.sess = tf.InteractiveSession(graph=self.model)\n self.saver.restore(self.sess, self.model_path)",
"def _restore_training_state(self, restore_state):\n self.load_state_dict(restore_state[\"model\"])\n self.optimizer.load_state_dict(restore_state[\"optimizer\"])\n self.lr_scheduler.load_state_dict(restore_state[\"lr_scheduler\"])\n start_iteration = restore_state[\"iteration\"] + 1\n if self.config[\"verbose\"]:\n print(f\"Restored checkpoint to iteration {start_iteration}.\")\n\n if restore_state[\"best_model_found\"]:\n # Update checkpointer with appropriate information about best model\n # Note that the best model found so far may not be the model in the\n # checkpoint that is currently being loaded.\n self.checkpointer.best_model_found = True\n self.checkpointer.best_iteration = restore_state[\"best_iteration\"]\n self.checkpointer.best_score = restore_state[\"best_score\"]\n if self.config[\"verbose\"]:\n print(\n f\"Updated checkpointer: \"\n f\"best_score={self.checkpointer.best_score:.3f}, \"\n f\"best_iteration={self.checkpointer.best_iteration}\"\n )\n return start_iteration",
"def setup_encoder_initializer(self):\n if self.mode != \"inference\":\n # Restore inception variables only.\n saver = tf.train.Saver(self.autoencoder_variables)\n\n def restore_fn(sess):\n tf.logging.info(\"Restoring Autoencoder variables from checkpoint dir %s\",\n self.config.autoencoder_checkpoint_dir)\n saver.restore(sess, tf.train.latest_checkpoint(\n self.config.autoencoder_checkpoint_dir))\n\n if self.use_pretrained_ae:\n self.init_fn = restore_fn\n else:\n self.init_fn = None",
"def restore_checkpoint(restore_dir):\n restored_train_state = checkpoints.restore_checkpoint(restore_dir, None)\n variables = {'params': restored_train_state['optimizer']['target']}\n model_state = restored_train_state['model_state']\n variables.update(model_state)\n return variables",
"def restore(self, checkpoint_path: str):\r\n raise NotImplementedError",
"def get_init_fn():\n\tif train_config['checkpoint_path'] is None:\n\t\treturn None\n\t# Warn the user if a checkpoint exists in the train_dir. Then we'll be\n\t# ignoring the checkpoint anyway.\n\tif tf.train.latest_checkpoint(train_config['checkpoint_path']):\n\t\ttf.logging.info(\n\t\t\t'Ignoring --checkpoint_path because a checkpoint already exists in %s'\n\t\t\t% train_config['checkpoint_path'])\n\t\treturn None\n\n\texclusions = []\n\tif train_config['checkpoint_exclude_scopes']:\n\t\texclusions = [scope.strip()\n\t\t for scope in train_config['checkpoint_exclude_scopes'].split(',')]\n\n\t\tvariable_to_restore = []\n\t\tfor var in slim.get_model_variables():\n\t\t\texcluded = False\n\t\t\tfor exclusion in exclusions:\n\t\t\t\tif var.op.name.startswith(exclusion):\n\t\t\t\t\texcluded = True\n\t\t\t\t\tbreak\n\t\t\tif not excluded:\n\t\t\t\tvariable_to_restore.append(var)\n\n\t\tif tf.gfile.IsDirectory(train_config['checkpoint_path']):\n\t\t\tcheckpoint_path = tf.train.latest_checkpoint(train_config['checkpoint_path'])\n\t\telse:\n\t\t\tcheckpoint_path = train_config['checkpoint_path']\n\n\t\ttf.logging.info('Fune-tuning from %s' % checkpoint_path)\n\n\t\treturn slim.assign_from_checkpoint_fn(\n\t\t\tcheckpoint_path,\n\t\t\tvariable_to_restore,\n\t\t\tignore_missing_vars=train_config['ignore_missing_vars'])",
"def _resume_checkpoint(self, resume_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.mnt_best = checkpoint['monitor_best']\n\n # load model params from checkpoint.\n if checkpoint['config']['name'] != self.config['name']:\n self.logger.warning(\n 'Warning: Architecture configuration given in config file is different from that of checkpoint. ' + \\\n 'This may yield an exception while state_dict is being loaded.')\n self.model.load_state_dict(checkpoint['model_state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed. \n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger.warning('Warning: Optimizer type given in config file is different from that of checkpoint. ' + \\\n 'Optimizer parameters not being resumed.')\n self.optimizer.load_state_dict(checkpoint['model_optimizer'])\n\n # load scheduler state from checkpoint only when scheduler type is not changed\n if checkpoint['config']['scheduler']['type'] != self.config['scheduler']['type']:\n self.logger.warning('Warning: Scheduler type given in config file is different from that of checkpoint. ' + \\\n 'Scheduler parameters not being resumed.')\n self.scheduler.load_state_dict(checkpoint['model_scheduler'])\n\n self.train_logger = checkpoint['logger']\n self.logger.info(\"Checkpoint '{}' (epoch {}) loaded\".format(resume_path, self.start_epoch))",
"def _restore(self):\n self._logger = LOGGER\n self._param_store = pyro.get_param_store()\n self.set_state(self.best_params)\n self._alpha_guide_prior_params = dict(\n self._param_store.named_parameters()\n )",
"def restore(self):\n if os.path.isfile( \\\n os.path.join(self.network_path,'net_parameters.nnprm.index')):\n self.load_network_parameters(\n file_name='net_parameters', file_path=self.network_path)\n else:\n self.log(\"Could not load previous network parameters from:\\n{}\".format(\\\n os.path.join(self.network_path,'net_parameters.nnprm') ))\n self.log(\"Starting with untrained parameters\")",
"def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)",
"def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)",
"def _resume_checkpoint(self, resume_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n # 将参数全部放入GPU\n if self.with_cuda:\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda(self.device)\n self.train_logger = checkpoint['logger']\n self.config = checkpoint['config']\n self.logger.info(\"Checkpoint '{}' (epoch {}) loaded\".format(resume_path, self.start_epoch))",
"def _check_restore_parameters(sess, saver):\r\n ckpt = tf.train.get_checkpoint_state(os.path.dirname( 'final_model/'))\r\n if ckpt and ckpt.model_checkpoint_path:\r\n print(\"Loading parameters for the Chatbot -> {}\".format(ckpt.model_checkpoint_path))\r\n saver.restore(sess, ckpt.model_checkpoint_path)\r\n\r\n else:\r\n print(\"Initializing fresh parameters for the Chatbot\")",
"def _resume_checkpoint(self, resume_path):\n resume_path = str(resume_path)\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n\n # load architecture params from checkpoint.\n if checkpoint['config']['model'] != self.config['model']:\n self.logger.warning(\"Warning: Architecture configuration given in config file is different from that of \"\n \"checkpoint. This may yield an exception while state_dict is being loaded.\")\n self.model.load_state_dict(checkpoint['state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['trainer']['optimizer']['type'] != self.config['trainer']['optimizer']['type']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from that of checkpoint. \"\n \"Optimizer parameters not being resumed.\")\n else:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n self.logger.info(\"Checkpoint loaded. Resume from epoch {}\".format(self.start_epoch))",
"def restore_env():\n\n def restore(key, value):\n if value is None:\n if key in os.environ:\n del os.environ[key]\n else:\n os.environ[key] = value\n\n restore(\"TF_XLA_FLAGS\", ORIGINAL_TF_XLA_FLAGS)\n restore(\"TF_FORCE_GPU_ALLOW_GROWTH\", ORIGINAL_TF_FORCE_GPU_ALLOW_GROWTH)",
"def checkpoint():"
] | [
"0.71575403",
"0.7000785",
"0.68606037",
"0.67431253",
"0.66421974",
"0.65625894",
"0.6548759",
"0.654029",
"0.63851607",
"0.63566697",
"0.6351481",
"0.63231075",
"0.63209087",
"0.63020325",
"0.6232951",
"0.6192776",
"0.6189582",
"0.6169525",
"0.61546874",
"0.61290634",
"0.6127255",
"0.6104785",
"0.6064348",
"0.6048607",
"0.6048607",
"0.6041255",
"0.6039926",
"0.6032424",
"0.6001804",
"0.6001417"
] | 0.7782891 | 0 |
3D plot of shell elements coloured by material property. | def shell_properties_3d(
shells: List[Shell],
prop_f: Callable[[Material], float],
prop_units: str,
cmap: matplotlib.colors.Colormap = default_cmap,
colorbar: bool = False,
label: bool = False,
outline: bool = True,
new_fig: bool = True,
):
# Coordinates for rotating the plot perspective.
xs, ys, zs = [], [], []
# Vertices of nodes for each shell.
verts = []
# Min and max values for colour normalization.
prop_min, prop_max = np.inf, -np.inf
for shell in shells:
verts.append([])
for node in shell.nodes():
xs.append(node.x)
ys.append(node.y)
zs.append(node.z)
verts[-1].append([node.x, node.z, node.y])
shell_prop = prop_f(shell.section)
if shell_prop < prop_min:
prop_min = shell_prop
if shell_prop > prop_max:
prop_max = shell_prop
xs, ys, zs = np.array(xs), np.array(ys), np.array(zs)
norm = matplotlib.colors.Normalize(vmin=prop_min, vmax=prop_max)
# Setup a new 3D landscape figure.
if new_fig:
fig, ax = ax_3d(xs=xs, ys=zs, zs=ys)
else:
fig = plt.gcf()
ax = plt.gca()
# Keep track of all values used for colours.
# This is so we don't add duplicate labels.
values = set()
for i, verts_ in enumerate(verts):
value = prop_f(shells[i].section)
colour = cmap(norm(value))
label_str = None
if label and value not in values:
values.add(value)
label_str = f"{value}{prop_units}"
poly = Poly3DCollection(
[verts_],
facecolors=colour,
edgecolors="black" if outline else "none",
linewidths=0.01 if outline else 0,
label=label_str,
)
poly._facecolors2d = poly._facecolors3d
poly._edgecolors2d = poly._edgecolors3d
ax.add_collection3d(poly)
if label:
plt.legend()
# Add a colorbar if requested.
if colorbar:
mappable = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
clb = fig.colorbar(mappable, shrink=0.7)
clb.ax.set_title(prop_units) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot3d(self):\n plot_rupture_wire3d(self)",
"def get_3d_plot(three_d_matrix, ax, title, length):\r\n x, y, z = np.where(three_d_matrix != 0)\r\n ax.scatter(x, y, z, c='blue')\r\n ax.set_xlabel('x')\r\n ax.set_ylabel('y')\r\n ax.set_xlim(0, length)\r\n ax.set_ylim(0, length)\r\n ax.set_title(title)",
"def plot_3d_object(object_):\n \n # Initialize renderer instance\n r = Renderer()\n\n # Add surfaces and goal regions to the renderer instance\n for surf in object_:\n r.add((object_[surf][0],'b',1))\n if len(object_[surf])>2:\n r.add((object_[surf][2],'r',1))\n r.add((gPoint(-15,-15,-15),'k',1))\n r.show()",
"def shell_properties_top_view(\n shells: List[Shell],\n prop_f: Optional[Callable[[Material], float]] = None,\n prop_units: Optional[str] = None,\n cmap: matplotlib.colors.Colormap = default_cmap,\n colorbar: bool = False,\n label: bool = False,\n outline: bool = True,\n):\n # Vertices of nodes for each shell.\n verts = []\n # Min and max values for colour normalization.\n prop_min, prop_max = np.inf, -np.inf\n for shell in shells:\n verts.append([])\n for node in shell.nodes():\n verts[-1].append([node.x, node.z])\n shell_prop = prop_f(shell.section) if prop_f is not None else 0\n if shell_prop < prop_min:\n prop_min = shell_prop\n if shell_prop > prop_max:\n prop_max = shell_prop\n if prop_f is not None:\n norm = matplotlib.colors.Normalize(vmin=prop_min, vmax=prop_max)\n\n # Keep track of all values used for colours.\n # This is so we don't add duplicate labels.\n values = set()\n\n ax = plt.gca()\n for shell, shell_verts in zip(shells, verts):\n colour, label_str = \"none\", None\n if prop_f is not None:\n value = prop_f(shell.section)\n colour = cmap(norm(value))\n if label and value not in values:\n values.add(value)\n label_str = f\"{value} {prop_units}\"\n ax.add_collection(\n matplotlib.collections.PolyCollection(\n [shell_verts],\n facecolors=colour,\n edgecolors=\"black\" if outline else \"none\",\n linewidths=0.01 if outline else 0,\n label=label_str,\n )\n )\n\n if prop_f is not None:\n if label:\n plt.legend()\n if colorbar:\n mappable = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)\n clb = plt.gcf().colorbar(mappable, shrink=0.7)\n clb.ax.set_title(prop_units)",
"def plot_environment_and_mean(self):\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.set_title('Environment, Mean and Data on a 3d Plot')\n ax.set_xlabel('Action')\n ax.set_ylabel('Context')\n ax.set_zlabel('Reward')\n\n # plot self.mu on a wire frame.\n ax.plot_wireframe(self.input_mesh[0], self.input_mesh[1],\n self.mu.reshape(self.input_mesh[0].shape), alpha=0.6, color='g', label='mean')\n # now plot sample environment at all values of the input space and plot.\n ax.plot_wireframe(self.input_mesh[0], self.input_mesh[1],\n np.array(self.sample_from_environment(self.input_mesh)).reshape(self.input_mesh[0].shape),\n alpha=0.5, color='b', label='environment')\n # scatter plot data on top.\n ax.scatter([x[0] for x in self.X], [x[1] for x in self.X], self.Y, c='r',\n marker='o', alpha=1.0, label='data')\n ax.legend()\n return",
"def plot3d(data_x, data_y, data_z, vol):\n fig = go.Figure(\n data = [\n go.Mesh3d(\n x = data_x,\n y = data_y,\n z = data_z,\n i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], # These are needed, numbers from documentation\n j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],\n k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],\n colorscale=[[0, 'darkblue'],\n [0.5, 'lightskyblue'],\n [1, 'darkblue']],\n intensity = np.linspace(0, 1, 8, endpoint=True),\n showscale=False,\n opacity = 0.6\n )\n ],\n layout = go.Layout(\n title = \"Le volume est: \" + str(vol),\n autosize = True\n )\n )\n\n # This prints it\n pyo.iplot(fig, filename='Determinant-Volume')",
"def show(self):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = Axes3D(fig)\n pos = self.cluster.get_positions()\n from itertools import combinations\n for tri in self.mesh.simplices:\n for comb in combinations(tri, 2):\n x1 = pos[comb[0], 0]\n x2 = pos[comb[1], 0]\n y1 = pos[comb[0], 1]\n y2 = pos[comb[1], 1]\n z1 = pos[comb[0], 2]\n z2 = pos[comb[1], 2]\n ax.plot([x1, x2], [y1, y2], zs=[z1, z2], color=\"black\")\n plt.show()",
"def plot_results_3d(p_x, p_y, p_z, h_exp = 0.5):\n plt.figure(figsize = (10, 10))\n ax3d = plt.axes(projection = '3d') \n\n color=iter(cm.rainbow(np.linspace(0,1,p_x.shape[0]))) # (1)\n labels = ['Particle ' + str(pl+1) for pl in np.arange(0, p_x.shape[0], step = 1)]\n \n for p in np.arange(0, p_x.shape[0], step = 1): \n c = next(color) # (1)\n for t in np.arange(0, p_x.shape[1], step = 1): \n ax3d.plot3D(p_x[p, t], p_y[p, t], p_z[p, t], 'x', c = c, label = labels[p]) \n legend_without_duplicate_labels(ax3d)\n ax3d.set_xlabel('X (pixels)') \n ax3d.set_ylabel('Y (pixels') \n ax3d.set_zlabel('Z (pixels)') \n ax3d.set_xlim([origin-150,origin+150])\n ax3d.set_ylim([origin-150,origin+150])\n ax3d.set_zlim([origin-150,origin+150])\n ax3d.set_title('3D particle trajectories - H = ' + str(h_exp))",
"def plot3D(x):\n cycol = cycle('bgrcmk')\n fig = plt.figure()\n ax = Axes3D(fig)\n for i in range(5):\n ax.scatter(x[:, i, 0], x[:, i, 1], x[:, i, 2], c=next(cycol),\n marker='.')\n plt.show()",
"def plot_3D_compare(true_lab, pred_lab):\n ref_shape = [true_lab.shape[1], true_lab.shape[2], true_lab.shape[3]]\n true_loc = np.where(true_lab == 1)\n pred_loc = np.where(pred_lab == 1)\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.9)\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.05)\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()",
"def plot_color_distribution(img, trimap):\n\n f = img[trimap == 255]\n b = img[trimap == 0]\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('Red')\n ax.set_ylabel('Blue')\n ax.set_zlabel('Green')\n ax.set_title(\"3D RGB Scatter\")\n\n for source in [f, b]:\n X, Y, Z = source[:, 0], source[:, 1], source[:, 2]\n ax.scatter3D(X, Y, Z)",
"def viewer(\n self, units='nm', \n draw_edges=True, draw_vertices=True,\n color_by='radius'\n ):\n try:\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D \n from matplotlib import cm\n except ImportError:\n print(\"Skeleton.viewer requires matplotlib. Try: pip install matplotlib --upgrade\")\n return\n\n RADII_KEYWORDS = ('radius', 'radii', 'r')\n COMPONENT_KEYWORDS = ('component', 'components', 'c')\n\n fig = plt.figure(figsize=(10,10))\n ax = Axes3D(fig)\n ax.set_xlabel(units)\n ax.set_ylabel(units)\n ax.set_zlabel(units)\n\n # Set plot axes equal. Matplotlib doesn't have an easier way to\n # do this for 3d plots.\n X = self.vertices[:,0]\n Y = self.vertices[:,1]\n Z = self.vertices[:,2]\n\n max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n mid_x = (X.max()+X.min()) * 0.5\n mid_y = (Y.max()+Y.min()) * 0.5\n mid_z = (Z.max()+Z.min()) * 0.5\n ax.set_xlim(mid_x - max_range, mid_x + max_range)\n ax.set_ylim(mid_y - max_range, mid_y + max_range)\n ax.set_zlim(mid_z - max_range, mid_z + max_range)\n ### END EQUALIZATION CODE ###\n\n component_colors = ['k', 'deeppink', 'dodgerblue', 'mediumaquamarine', 'gold' ]\n\n def draw_component(i, skel):\n component_color = component_colors[ i % len(component_colors) ]\n\n if draw_vertices:\n xs = skel.vertices[:,0]\n ys = skel.vertices[:,1]\n zs = skel.vertices[:,2]\n\n if color_by in RADII_KEYWORDS:\n colmap = cm.ScalarMappable(cmap=cm.get_cmap('rainbow'))\n colmap.set_array(skel.radii)\n\n normed_radii = skel.radii / np.max(skel.radii)\n yg = ax.scatter(xs, ys, zs, c=cm.rainbow(normed_radii), marker='o')\n cbar = fig.colorbar(colmap)\n cbar.set_label('radius (' + units + ')', rotation=270)\n elif color_by in COMPONENT_KEYWORDS:\n yg = ax.scatter(xs, ys, zs, color=component_color, marker='.')\n else:\n yg = ax.scatter(xs, ys, zs, color='k', marker='.')\n\n if draw_edges:\n for e1, e2 in skel.edges:\n pt1, pt2 = skel.vertices[e1], skel.vertices[e2]\n ax.plot( \n [ pt1[0], pt2[0] ],\n [ pt1[1], pt2[1] ],\n zs=[ pt1[2], pt2[2] ],\n color=(component_color if not draw_vertices else 'silver'),\n linewidth=1,\n )\n\n if color_by in COMPONENT_KEYWORDS:\n for i, skel in enumerate(self.components()):\n draw_component(i, skel)\n else:\n draw_component(0, self)\n\n plt.show()",
"def force_show(sub_Idx):\n force_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/forces/force_' + f'{sub_Idx:02d}' + '.txt'\n image_path = './dataset/subj_' + f'{sub_Idx:02d}'+ '/images/'\n force_num = len(glob.glob(image_path + '*.jpg'))\n force_list = load_force_txt(force_path,force_num)\n print('showing '+f'{force_num:03d}'+ ' raw forces for subject ' + f'{sub_Idx:02d}')\n\n fig = plt.figure(figsize = (10, 7)) \n ax = plt.axes(projection =\"3d\") \n\n for x, y, z in force_list:\n ax.scatter3D(x, y, z, color = \"green\")\n ax.set_xlabel('X-axis', fontweight ='bold') \n ax.set_ylabel('Y-axis', fontweight ='bold') \n ax.set_zlabel('Z-axis', fontweight ='bold')\n plt.title(\"3D force data\") \n plt.show()",
"def show_vertex_colors():\n if bpy.app.version > (2, 80, 0):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n space.shading.type = 'SOLID'\n space.shading.color_type = 'VERTEX'",
"def heatmap3d(xL, yL ,zL, valueL, grid=True, color='cool',\n size=100, marker='o',alpha=0.8,save=False, savepath='./'):\n from mpl_toolkits.mplot3d import Axes3D\n #Normalize valueL into 0 to 1\n normalizedValueL = list( (valueL - min(valueL)) / (max(valueL) - min(valueL)) )\n\n if color=='hot':\n colors = plt.cm.hot_r(normalizedValueL)\n # For color bar display\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.hot_r)\n elif color=='cool':\n colors = plt.cm.cool_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.cool_r)\n elif color=='hsv':\n colors = plt.cm.hsv_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.hsv_r)\n elif color=='jet':\n colors = plt.cm.jet_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.jet_r)\n elif color=='gray':\n colors = plt.cm.gray_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.gray_r)\n elif color=='spring':\n colors = plt.cm.spring_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.spring_r)\n elif color=='summer':\n colors = plt.cm.summer_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.summer_r)\n elif color=='autumn':\n colors = plt.cm.autumn_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.autumn_r)\n elif color=='winter':\n colors = plt.cm.winter_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.winter_r)\n else:\n print('Since there is no color, it will be the default cool')\n colors = plt.cm.cool_r(normalizedValueL)\n colmap = plt.cm.ScalarMappable(cmap=plt.cm.cool_r)\n\n colmap.set_array(valueL)\n\n fig = plt.figure()\n ax = Axes3D(fig)\n\n # Set the grid on of off\n if not grid:\n ax.grid(False)\n\n ax.scatter(xL,yL,zL, s =size, c=colors, marker=marker, alpha=alpha)\n # For color bar display\n cb = fig.colorbar(colmap)\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n if save==True:\n date = datetime.datetime.now()\n plt.savefig(savepath+'3Dheatmap_'+str(date.year)+'_'+ str(date.month)+ \\\n '_'+str(date.day)+'_'+str(date.hour)+'_'+ \\\n str(date.minute)+'_'+str(date.second), dpi=150)\n plt.show()",
"def plot3surface( pot, **kwargs ): \n \n fig = plt.figure( figsize = (8., 8.) ) \n gs = matplotlib.gridspec.GridSpec( 3,2, wspace=0.2) \n \n # Make a list with three perpendicular directions which \n # will define the three surface cuts \n perp = [(np.pi/2., 0.), (np.pi/2., -np.pi/2.), (0., -1.*np.pi/2.) ]\n \n # Iterate to plot the three surface cuts\n yMin = 1e16\n yMax = -1e16 \n Ims = []\n for i in range(3):\n ax0 = fig.add_subplot( gs[i,0], projection='3d')\n ax1 = fig.add_subplot( gs[i,1]) \n \n T0, T1, X, Y, Z = surfcut_points( normal = perp[i], \\\n ax0=ax0, **kwargs ) \n \n EVAL = pot.evalpotential(X,Y,Z)\n im = ax1.pcolormesh( T0, T1, EVAL, \\\n cmap=plt.get_cmap('jet') ) \n plt.axes( ax1 ) \n cbar = plt.colorbar(im)\n cbar.set_label( pot.unitlabel, rotation=0) \n \n ymin = EVAL.min()\n ymax = EVAL.max()\n \n Ims.append(im) \n if ymin < yMin : yMin = ymin\n if ymax > yMax : yMax = ymax \n \n for im in Ims:\n im.set_clim( vmin=yMin, vmax=yMax)",
"def render(self, mode = 'human'):\n if mode == 'human':\n\n if self.is_2d:\n fig = plt.figure()\n for index, value in np.ndenumerate(self.board):\n if value == 1:\n plt.scatter(*index, c='red', s=1000, alpha=0.2)\n elif value == -1:\n plt.scatter(*index, c='blue', s=1000, alpha=0.2)\n plt.xlim(-1, self.dim[0])\n plt.ylim(-1, self.dim[1])\n plt.xticks([])\n plt.yticks([])\n plt.grid(True)\n\n if self.is_3d:\n fig = plt.figure()\n ax = Axes3D(fig)\n for index, value in np.ndenumerate(self.board):\n if value == 1:\n ax.scatter(*index, c='red', s=1000, alpha=0.2)\n elif value == -1:\n ax.scatter(*index, c='blue', s=1000, alpha=0.2)\n ax.set_xlim(0, self.dim[0] - 1)\n ax.set_ylim(0, self.dim[1] - 1)\n ax.set_zlim(0, self.dim[2] - 1)\n\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n for index, value in np.ndenumerate(self.board):\n if value == 1:\n ax.scatter(*index, c='red', s=1000, alpha=0.2)\n elif value == -1:\n ax.scatter(*index, c='blue', s=1000, alpha=0.2)\n\n ax.set_xlim(0, self.dim[0] - 1)\n ax.set_ylim(0, self.dim[1] - 1)\n ax.set_zlim(0, self.dim[2] - 1)\n ax.set_title('Nr of steps: ' + str(self.steps))\n\n plt.show()\n return fig",
"def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')",
"def plot(self, plotEdges=False, emphaseEdges=[], col=('b', 'k', 'r'), lims=None, ort=False):\n ax = a3.Axes3D(plt.figure())\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.dist = 30\n ax.azim = -140\n if lims is None:\n lims = [0, 0, 0]\n lims[0] = [min(v.x for v in self.vertices),\n max(v.x for v in self.vertices)]\n lims[1] = [min(v.y for v in self.vertices),\n max(v.y for v in self.vertices)]\n lims[2] = [min(v.z for v in self.vertices),\n max(v.z for v in self.vertices)]\n if ort:\n ma = max(lims[i][1] for i in range(3))\n mi = min(lims[i][0] for i in range(3))\n lims = [[mi, ma]] * 3\n ax.set_xlim(lims[0])\n ax.set_ylim(lims[1])\n ax.set_zlim(lims[2])\n for f in self.faces:\n face = a3.art3d.Poly3DCollection([[v.coords()\n for v in f.vertices]])\n ax.add_collection3d(face)\n face.set_facecolor(col[0])\n face.set_edgecolor(col[1])\n if plotEdges or len(emphaseEdges)>0:\n for e in self.edges:\n edge = a3.art3d.Poly3DCollection([[e.nvt.coords(),\n e.pvt.coords()]])\n ax.add_collection3d(edge)\n if e in emphaseEdges:\n edge.set_edgecolor(col[2])\n else:\n edge.set_edgecolor(col[1])\n plt.show()",
"def plot_gt3D(parent_dir, env, title='GT Cost Value over 3D Reachable Set'):\n\traw_waypts, gt_cost = get_coords_gt_cost(env, parent_dir)\n\tfig = px.scatter_3d(x=raw_waypts[:,88], y=raw_waypts[:,89], z=raw_waypts[:,90], color=gt_cost)\n\tfig.update_layout(title=title)\n\tfig.show()",
"def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()",
"def visualise(cut_list): \r\n\tcutlist = json.load(cut_list)\r\n\tmodified_list =[]\r\n\tz_set = 0\r\n\tc_set = 0\r\n\ta_set = 0\r\n\tcut_num = 0\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\ta.pop(0)\r\n\t\t\ta = list(map(float,a)) + [z_set]\r\n\t\t\t\r\n\t\t\tif a_set != 0 or c_set != 0:\r\n\t\t\t\ta = rotate_a(a_set,a)\r\n\t\t\t\ta = rotate_c(c_set,a_set,a)\r\n\r\n\t\t\ta = a +[f\"a_set {a_set} c_set {c_set} z_set {z_set:.1f} cut_num {cut_num}\"]\r\n\t\t\tmodified_list.append(a)\r\n\r\n\t\telif a[0] == \"z_abs\":\r\n\t\t\tz_set = float(a[1])\r\n\t\t\tcut_num += 1\r\n\t\telif a[0] == \"c_abs\":\r\n\t\t\tc_set = float(a[1])\r\n\t\telif a[0] == \"a_abs\":\r\n\t\t\ta_set = float(a[1])\r\n\r\n\t\telif a[0] == \"z_rel\" or a[0] == \"z_step\":\r\n\t\t\tz_set = z_set + float(a[1])\r\n\t\telif a[0] == \"c_rel\" or a[0] == \"c_step\":\r\n\t\t\tc_set = c_set + float(a[1])\r\n\t\telif a[0] == \"a_rel\" or a[0] == \"a_step\":\r\n\t\t\ta_set = a_set + float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\tdf = pd.DataFrame(modified_list, columns = [\"x\",\"y\",\"z\",\"layer\"])\r\n\tfig = px.line_3d(df,\"x\",\"y\",\"z\",color=\"layer\")\r\n\t#fig.update_layout(scene_aspectmode = \"data\")\r\n\tfig.show()",
"def plot_learned3D(parent_dir, feature_function, env, feat='table', title='Learned function over 3D Reachable Set'):\n\tdata_file = parent_dir + '/data/gtdata/data_{}.npz'.format(feat)\n\tnpzfile = np.load(data_file)\n\ttrain = npzfile['x'][:,:7]\n\ttrain_raw = np.empty((0, 97), float)\n\tfor dp in train:\n\t\ttrain_raw = np.vstack((train_raw, env.raw_features(dp)))\n\tlabels = feature_function(train_raw)\n\teuclidean = angles_to_coords(train, feat, env)\n\tfig = px.scatter_3d(x=euclidean[:, 0], y=euclidean[:, 1], z=euclidean[:, 2], color=labels)\n\tfig.update_layout(title=title)\n\tfig.show()",
"def init_plot():\n fig = plt.figure(constrained_layout=True, figsize=(7,9), dpi=130)\n gs = fig.add_gridspec(5, 1)\n ax2 = fig.add_subplot(gs[:1, :])\n ax1 = fig.add_subplot(gs[1:, :], projection='3d')\n\n tick_color = (0.2, 0.2, 0.2, 1.0)\n pane_color = (0.12, 0.12, 0.12, 1.0)\n ax1.w_xaxis.set_pane_color(pane_color)\n ax1.w_yaxis.set_pane_color(pane_color)\n ax1.w_zaxis.set_pane_color(pane_color)\n\n ax1.tick_params(axis='x', colors=tick_color)\n ax1.tick_params(axis='y', colors=tick_color)\n ax1.tick_params(axis='z', colors=tick_color)\n ax1.view_init(elev=90, azim=180)\n\n ax1.set_xlim3d(0, 80)\n ax1.set_zlim3d(-2, 5)\n \n return (ax1, ax2)",
"def plotTsneE_3D(datadf,level,descname,v,path_output,Efilter,own_cmap,clustermethod=\"kmeans\",onlyShow=False,selected=False):\n\n if not selected:\n datadf = datadf[datadf[\"{}Energy\".format(level)]<=Efilter].sort_values(by=\"{}_{}_klabel\".format(level,descname))\n\n klabels = datadf[\"{}_{}_klabel\".format(level,descname)].astype(str)\n fig = px.scatter_3d(data_frame=datadf,\n z=\"{}Energy\".format(level),\n x=\"{}_{}_tsne1\".format(level,descname),\n y=\"{}_{}_tsne2\".format(level,descname),\n color=klabels,\n color_discrete_sequence=own_cmap,\n size=\"GyRadius\",\n opacity=0.9,\n #symbol=\"symbol\", # Use if needed in Jupyter\n hover_name=datadf.index,\n title=\"{}'s' t-SNE + {}Energy\".format(descname,level),\n #range_z=[-36,-20],\n width= 1200,\n height= 900)\n\n\n if onlyShow:\n fig.show()\n elif selected:\n fig.write_html(\"{}/{}_{}_EtSNE_selected.html\".format(path_output,level,descname))\n else:\n fig.write_html(\"{}/{}_{}_EtSNE.html\".format(path_output,level,descname))",
"def plot_config_3d(view, trace, marker_names):\n\n if view == \"3D Plot\":\n with st.expander(\"3D Plot Configuration\", expanded=True):\n col_plot_type, col_grid_res, col_fill, col_interp = st.columns(4)\n col_col_type, col_choice, col_preview, col_overlay = st.columns(4)\n trace[\"Chart_Type\"] = col_plot_type.selectbox(\"Plot Type\", [\"Contour\",\"3D Scatter\",\"Surface\",\"Heatmap\"], key = \"Chart_Type\")\n col_col_type.selectbox('Color Map Type', ['Sequential','Diverging'], key=\"Color_Set_Type\")\n\n if st.session_state[\"Color_Set_Type\"] == 'Sequential':\n color_map = list(sequential_color_dict().keys())\n else:\n color_map = list(diverging_color_dict().keys())\n\n color_set = col_choice.selectbox(\"Color Map\", color_map) \n if st.session_state[\"Color_Set_Type\"] == 'Sequential':\n st.session_state['Color_Palette'] = sequential_color_dict().get(color_set)\n else:\n st.session_state['Color_Palette'] = diverging_color_dict().get(color_set)\n\n colormap_preview = plot_color_set(st.session_state['Color_Palette'], color_set, view)\n col_preview.image(colormap_preview, use_column_width = True)\n\n if trace[\"Chart_Type\"] != '3D Scatter':\n trace[\"Grid_Res\"] = col_grid_res.number_input(\"Grid Resolution\", min_value=0.0, max_value=100000.0, value=50.0, step=0.5, key=\"Grid_Res\")\n trace[\"Fill_Value\"] = col_fill.selectbox(\"Fill Value\", [\"nan\",0], help=\"fill missing data with the selected value\", key = \"Fill_Value\")\n trace[\"Interp_Method\"] = col_interp.selectbox(\"Interpolation Method\", [\"linear\",\"nearest\",\"cubic\"], key = \"Interp_Method\")\n\n else:\n trace[\"Fill_Value\"] = None\n trace[\"Interp_Method\"] = None\n trace[\"Grid_Res\"] = None\n \n st.session_state[\"Overlay\"] = col_overlay.checkbox(\"Overlay Original Data\", help=\"Display scatter of original data overlayed on chart\")\n \n if st.session_state[\"Overlay\"] == True:\n st.subheader(\"Overlay\")\n col_overlay_alpha, col_overlay_marker, col_overlay_color = st.columns(3)\n overlay_alpha = col_overlay_alpha.slider(\"Opacity\",value=0.5,min_value=0.0, max_value=1.0, step=0.01, key = \"Overlay_Alpha\")\n overlay_marker = col_overlay_marker.selectbox(\"Style\", marker_names, help=\"https://plotly.com/python/marker-style/\", key = \"Overlay Marker\")\n overlay_color = col_overlay_color.color_picker('Pick a color ', '#000000', key = \"Overlay Color\")\n else:\n overlay_alpha = None\n overlay_marker = None\n overlay_color = None\n else:\n trace[\"Chart_Type\"] = None\n st.session_state['Color_Palette'] = None\n trace[\"Fill_Value\"] = None\n trace[\"Interp_Method\"] = None\n trace[\"Grid_Res\"] = None\n\n\n\n return trace[\"Chart_Type\"], trace[\"Fill_Value\"], trace[\"Interp_Method\"], trace[\"Grid_Res\"], st.session_state['Color_Palette'], st.session_state[\"Overlay\"], overlay_alpha, overlay_marker, overlay_color",
"def imshow_mesh_3d(img, vertices, faces, camera_center, focal_length, colors=(76, 76, 204)):\n H, W, C = img.shape\n if not has_pyrender:\n warnings.warn('pyrender package is not installed.')\n return img\n if not has_trimesh:\n warnings.warn('trimesh package is not installed.')\n return img\n try:\n renderer = pyrender.OffscreenRenderer(viewport_width=W, viewport_height=H)\n except (ImportError, RuntimeError):\n warnings.warn('pyrender package is not installed correctly.')\n return img\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(vertices))]\n colors = [color_val(c) for c in colors]\n depth_map = np.ones([H, W]) * np.inf\n output_img = img\n for idx in range(len(vertices)):\n color = colors[idx]\n color = [(c / 255.0) for c in color]\n color.append(1.0)\n vert = vertices[idx]\n face = faces[idx]\n material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=color)\n mesh = trimesh.Trimesh(vert, face)\n rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])\n mesh.apply_transform(rot)\n mesh = pyrender.Mesh.from_trimesh(mesh, material=material)\n scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5))\n scene.add(mesh, 'mesh')\n camera_pose = np.eye(4)\n camera = pyrender.IntrinsicsCamera(fx=focal_length[0], fy=focal_length[1], cx=camera_center[0], cy=camera_center[1], zfar=100000.0)\n scene.add(camera, pose=camera_pose)\n light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)\n light_pose = np.eye(4)\n light_pose[:3, 3] = np.array([0, -1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([0, 1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([1, 1, 2])\n scene.add(light, pose=light_pose)\n color, rend_depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)\n valid_mask = (rend_depth < depth_map) * (rend_depth > 0)\n depth_map[valid_mask] = rend_depth[valid_mask]\n valid_mask = valid_mask[:, :, None]\n output_img = valid_mask * color[:, :, :3] + (1 - valid_mask) * output_img\n return output_img",
"def plot_3d(self, ax_3d: Axes3D, lims_x: array_like = (-1, 1), lims_y: array_like = (-1, 1), **kwargs) -> None:\n X, Y, Z = self.to_mesh(lims_x, lims_y)\n\n ax_3d.plot_surface(X, Y, Z, **kwargs)",
"def visual_callback_3d(fig=None, plot_each=1):\r\n\r\n from mpl_toolkits.mplot3d import Axes3D\r\n # PyMCubes package is required for `visual_callback_3d`\r\n try:\r\n import mcubes\r\n except ImportError:\r\n raise ImportError(\"PyMCubes is required for 3D `visual_callback_3d`\")\r\n\r\n # Prepare the visual environment.\r\n if fig is None:\r\n fig = plt.figure()\r\n fig.clf()\r\n ax = fig.add_subplot(111, projection='3d')\r\n plt.pause(0.001)\r\n\r\n counter = [-1]\r\n\r\n def callback(levelset):\r\n\r\n counter[0] += 1\r\n if (counter[0] % plot_each) != 0:\r\n return\r\n\r\n if ax.collections:\r\n del ax.collections[0]\r\n\r\n coords, triangles = mcubes.marching_cubes(levelset, 0.5)\r\n ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],\r\n triangles=triangles)\r\n plt.pause(0.1)\r\n\r\n return callback",
"def plot_3D_compare_voxels(Y_data_test, Y_pred_data, X_data_test, ref_shape):\n sample_len = Y_data_test.shape[0]\n for i in np.arange(0, sample_len):\n true_lab = Y_data_test[i, ]\n true_loc = np.where(true_lab == 1)\n pred_lab = Y_pred_data[i, ]\n pred_loc = np.where(pred_lab == 1)\n volume = X_data_test[i, ]\n voxels = ~(volume==0)\n fig = plt.figure(i)\n ax = plt.axes(projection=\"3d\")\n axl = plt.gca()\n axl.set_xlim3d([0, ref_shape[0]])\n axl.set_ylim3d([0, ref_shape[1]])\n axl.set_zlim3d([0, ref_shape[2]])\n vx = fig.gca(projection='3d')\n vx.voxels(voxels, facecolors=volume, edgecolor='k')\n ax.scatter3D(true_loc[0], true_loc[1], true_loc[2], marker=\".\", alpha=0.9)\n ax.scatter3D(pred_loc[0], pred_loc[1], pred_loc[2], marker=\".\", alpha=0.05)\n\n fig.set_facecolor('black')\n ax.set_facecolor('black')\n ax.grid(False)\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n ax.set_xlabel('Width', c='white')\n ax.set_ylabel('Depth', c='white')\n ax.set_zlabel('Height', c='white')\n\n plt.show()"
] | [
"0.65396684",
"0.64592177",
"0.6369605",
"0.6002458",
"0.59309155",
"0.5929109",
"0.59126806",
"0.58908397",
"0.5875076",
"0.5787602",
"0.57868934",
"0.57833034",
"0.57310754",
"0.5703645",
"0.5703076",
"0.5677506",
"0.5642147",
"0.5626659",
"0.55626005",
"0.5488953",
"0.5450323",
"0.54383725",
"0.5434093",
"0.5432297",
"0.5430345",
"0.5424369",
"0.5411702",
"0.5408915",
"0.5402053",
"0.5400443"
] | 0.71667206 | 0 |
Top view of shell elements optionally coloured by material property. | def shell_properties_top_view(
shells: List[Shell],
prop_f: Optional[Callable[[Material], float]] = None,
prop_units: Optional[str] = None,
cmap: matplotlib.colors.Colormap = default_cmap,
colorbar: bool = False,
label: bool = False,
outline: bool = True,
):
# Vertices of nodes for each shell.
verts = []
# Min and max values for colour normalization.
prop_min, prop_max = np.inf, -np.inf
for shell in shells:
verts.append([])
for node in shell.nodes():
verts[-1].append([node.x, node.z])
shell_prop = prop_f(shell.section) if prop_f is not None else 0
if shell_prop < prop_min:
prop_min = shell_prop
if shell_prop > prop_max:
prop_max = shell_prop
if prop_f is not None:
norm = matplotlib.colors.Normalize(vmin=prop_min, vmax=prop_max)
# Keep track of all values used for colours.
# This is so we don't add duplicate labels.
values = set()
ax = plt.gca()
for shell, shell_verts in zip(shells, verts):
colour, label_str = "none", None
if prop_f is not None:
value = prop_f(shell.section)
colour = cmap(norm(value))
if label and value not in values:
values.add(value)
label_str = f"{value} {prop_units}"
ax.add_collection(
matplotlib.collections.PolyCollection(
[shell_verts],
facecolors=colour,
edgecolors="black" if outline else "none",
linewidths=0.01 if outline else 0,
label=label_str,
)
)
if prop_f is not None:
if label:
plt.legend()
if colorbar:
mappable = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
clb = plt.gcf().colorbar(mappable, shrink=0.7)
clb.ax.set_title(prop_units) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_top(self):\n return group()",
"def __init__(self, controller, parent=None):\n super(EraseTool, self).__init__(controller, parent)\n self.hide()\n self.setZValue(styles.ZPATHTOOL)",
"def top_option():\n active = get_active_window()\n Width=get_middle_Width(active)\n Height=get_top_Height()\n PosX = get_middle_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)",
"def showTopView(self):\r\n if(self.dataController.fileLoaded == True): \r\n self.dataController.showTopView()\r\n self.midsagittalView = True\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False",
"def main():\n root = Node(1)\n root.left = Node(2)\n root.right = Node(3)\n root.left.left = Node(4)\n root.left.right = Node(5)\n root.right.left = Node(6)\n root.right.right = Node(7)\n\n v = View()\n v.top_view(root)",
"def _TopCLIElement(self):\n if self.IsRoot():\n return self\n # pylint: disable=protected-access\n return self._parent_group._TopCLIElement()",
"def GripperTop(self, attop=True):\r\n \r\n return self.SetFlag(self.optionGripperTop, attop)",
"def always_top(self, value: bool):\n self.tk_ref.wm_attributes('-topmost', int(value))",
"def do_standalone_display(self):\n stage = clutter.Stage()\n stage.connect('destroy', clutter.main_quit)\n stage.connect('key-press-event', lambda x,y: clutter.main_quit())\n stage.set_fullscreen(True)\n stage.set_color(clutter.color_from_string('black'))\n stage.add(self.group)\n stage.show_all()\n clutter.main()",
"def on_show_view(self):\n self.window.background_color = arcade.color.WHITE",
"def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")",
"def set_view(self, s):\n #s.scene.reset_zoom()\n s.scene.z_plus_view()\n c = s.scene.camera\n c.azimuth(-30)\n c.elevation(20)\n s.render()",
"def EnvironmentTab():\n\n mainTab = cmds.columnLayout(adjustableColumn=True, columnAttach=('both', 20))\n \n ### Physical Light\n cmds.separator(height=10, style=\"none\")\n cmds.text(label=\"Physical Light:\", align=\"left\")\n # Slider to select the elevation\n elevationSlider = cmds.floatSliderGrp(label=\"Elevation\", field=True, value=45, min=0, max=90, \n dragCommand=lambda value:EC.elevationChange(value),\n changeCommand=lambda value:EC.elevationChange(value))\n # Slider to select the azimuth (simulat to the height, kinda)\n azimuthSlider = cmds.floatSliderGrp(label=\"Azimuth\", field=True, value=90, min=0, max=360, \n dragCommand=lambda value:EC.azimuthChange(value),\n changeCommand=lambda value:EC.azimuthChange(value))\n # Slider for the light's intensity\n intensitySlider = cmds.floatSliderGrp(label=\"Intensity\", field=True, value=1, min=.1, max=10, \n dragCommand=lambda value:EC.intensityChange(value),\n changeCommand=lambda value:EC.intensityChange(value))\n\n cmds.separator(height=5, style=\"none\")\n cmds.button(label='Create Sky Dome', command=lambda x: EC.createSkyLight(elevationSlider, azimuthSlider, intensitySlider))\n\n ### Environment fog\n cmds.separator(height=20)\n cmds.text(label=\"Environment Fog:\", align=\"left\")\n # Slider for the color\n colorSlider = cmds.colorSliderGrp(label=\"Color\", rgb=(1,1,1), \n dragCommand=lambda value:EC.colorChange(colorSlider),\n changeCommand=lambda value:EC.colorChange(colorSlider))\n # Slider for fog's distance\n distanceSlider = cmds.floatSliderGrp(label=\"Distance\", field=True, value=.02, min=0, max=1000, step=.01, \n dragCommand=lambda value:EC.distanceChange(value), \n changeCommand=lambda value:EC.distanceChange(value))\n # Slider for fog's height\n heightSlider = cmds.floatSliderGrp(label=\"Height\", field=True, value=5, min=0, max=1000, step=.1, \n dragCommand=lambda value:EC.heightChange(value),\n changeCommand=lambda value:EC.heightChange(value))\n\n cmds.separator(height=5, style=\"none\")\n cmds.button(label='Create Environment Fog', command=lambda x: EC.createAiFog(colorSlider, distanceSlider, heightSlider))\n\n cmds.setParent('..')\n\n return mainTab",
"def change_color_on_topview(pair):\n cv2.circle(bird_view_img, (pair[0][0], pair[0][1]), BIG_CIRCLE, COLOR_RED, 2)\n cv2.circle(bird_view_img, (pair[0][0], pair[0][1]), SMALL_CIRCLE, COLOR_RED, -1)\n cv2.circle(bird_view_img, (pair[1][0], pair[1][1]), BIG_CIRCLE, COLOR_RED, 2)\n cv2.circle(bird_view_img, (pair[1][0], pair[1][1]), SMALL_CIRCLE, COLOR_RED, -1)",
"def __init__(self, last_color):\n self._color = last_color\n self._window = tk.Toplevel(width = 200, height = 132)\n self._window.title(\"Color\")\n \n self._initialize()",
"def top(self):",
"def on_show_view(self):\n self.window.background_color = arcade.color.BLACK",
"def top_layer(self):\n return self._top",
"def always_top(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-topmost'))",
"def scene(pack, cmap=None, rot=0, camera_height=0.7, camera_dist=1.5, angle=None,\n lightstrength=1.1, orthographic=False, pad=None, floater_color=(.6, .6, .6),\n bgcolor=(1, 1, 1), box_color=(.5, .5, .5),\n group_indexes=None, clip=False):\n import vapory\n import numpy as np\n\n try:\n import matplotlib as mpl\n import matplotlib.cm as mcm\n vmin, vmax = min(pack.diameters), max(pack.diameters)\n sm = mcm.ScalarMappable(\n norm=mpl.colors.Normalize(vmin, vmax), cmap=cmap)\n cols = [sm.to_rgba(s) for s in pack.diameters]\n except ImportError:\n if not isinstance(cmap, list):\n raise ValueError(\n \"matplotlib could not be imported, and cmap not recognizeable as a list\")\n cols = list(cmap)\n except TypeError:\n if not isinstance(cmap, list):\n raise ValueError(\"matplotlib could not convert cmap to a colormap,\" +\n \" and cmap not recognizeable as a list\")\n cols = list(cmap)\n\n if floater_color is not None:\n ix, _ = pack.backbone()\n ns, = np.nonzero(~ix)\n for n in ns:\n cols[n] = floater_color\n\n mod_add = .5 if not clip else 0.\n rs = np.remainder(pack.rs + mod_add, 1) - mod_add\n if group_indexes is not None:\n for ix in group_indexes:\n xs = pack.rs[ix, :]\n com = np.mean(xs, axis=0)\n comdiff = (np.remainder(com + mod_add, 1) - mod_add) - com\n rs[ix, :] = xs + comdiff\n\n if clip:\n spheres = []\n cube = vapory.Box((-.5, -.5, -.5), (.5, .5, .5))\n dxs = [-1., 0.]\n drs = np.array([(dx, dy, dz)\n for dx in dxs for dy in dxs for dz in dxs])\n maxr = 0\n\n for xyz, s, col in zip(rs, pack.diameters, cols):\n for dr in drs:\n r = dr + xyz\n if np.any(abs(r) - s / 2. > .5):\n # not in the box\n continue\n sphere = vapory.Sphere(r, s / 2.)\n cutsphere = vapory.Intersection(\n cube, sphere,\n vapory.Texture(vapory.Pigment('color', col[:3]))\n )\n spheres.append(cutsphere)\n if np.amax(r) > maxr:\n maxr = np.amax(r)\n else:\n spheres = [\n vapory.Sphere(\n xyz, s / 2., vapory.Texture(vapory.Pigment('color', col[:3])))\n for xyz, s, col in zip(rs, pack.diameters, cols)\n ]\n maxr = np.amax(np.amax(np.abs(rs), axis=1) + pack.diameters / 2.)\n\n extent = (-.5, .5)\n corners = [np.array((x, y, z))\n for x in extent for y in extent for z in extent]\n pairs = [(c1, c2)\n for c1 in corners\n for c2 in corners\n if np.allclose(np.sum((c1 - c2)**2), 1) and sum(c1 - c2) > 0]\n\n radius = 0.01\n cyls, caps = [], []\n if box_color is not None:\n col = vapory.Texture(vapory.Pigment('color', box_color))\n cyls = [vapory.Cylinder(c1, c2, 0.01, col) for c1, c2 in pairs]\n caps = [vapory.Sphere(c, radius, col) for c in corners]\n\n light_locs = [\n [8., 5., -3.],\n [-6., 6., -5.],\n [-6., -7., -4.],\n [10., -5., 7.]\n ]\n\n rotlocs = [[x * np.cos(rot) - z * np.sin(rot), y, z * np.cos(rot) + x * np.sin(rot)]\n for x, y, z in light_locs]\n lights = [\n # vapory.LightSource( [2,3,5], 'color', [1,1,1] ),\n vapory.LightSource(loc, 'color', [lightstrength] * 3) for loc in rotlocs\n ]\n cloc = [np.cos(rot) * camera_dist, camera_dist *\n camera_height, np.sin(rot) * camera_dist]\n # mag = sqrt(sum([d**2 for d in cloc]))\n # direction = [-v*2/mag for v in cloc]\n\n if angle is None:\n if pad is None:\n pad = max(pack.diameters)\n w = sqrt(2) * maxr + pad\n angle = float(np.arctan2(w, 2 * camera_dist)) * 2 * 180 / np.pi\n camera = vapory.Camera(\n 'location', cloc, 'look_at', [0, 0, 0], 'angle', angle)\n # vapory.Camera('orthographic', 'location', cloc, 'direction',\n # direction, 'up', [0,2,0], 'right', [2,0,0])\n\n return vapory.Scene(camera, objects=(lights + spheres + cyls + caps +\n [vapory.Background(\"color\", bgcolor)]))",
"def set_view(self, s):\n #s.scene.reset_zoom()\n s.scene.z_plus_view()\n c = s.scene.camera\n c.azimuth(30)\n c.elevation(30)\n s.render()",
"def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self",
"def Top(self):\r\n\r\n self.dock_direction = AUI_DOCK_TOP\r\n return self",
"def show(self):\n import Helpers\n for p in self.parts:\n color = (p[1][0]*255, p[1][1]*255, p[1][2]*255, 0)\n Helpers.show(p[0], color)",
"def main():\n mainWin = gtk.Window()\n \n #initialize additional widgets\n mainLayout = gtk.Layout()\n artMenuItem = radialmenuitem.RadItem()\n #Connect particular events to particular widgets\n #Add above widgets to window\n mainWin.add(artMenuItem)\n\n mainWin.fullscreen()\n mainWin.set_decorated(0)\n mainWin.show_all()\n\n gtk.main() #main loop",
"def top(self):\n # Sets our Z value to one.\n self.setZValue(1)\n # Set every colliding items Z value to 0\n for sibling in self.collidingItems():\n sibling.setZValue(0)",
"def setSurfaceColors(topcolor=-1,bottomcolor=-1):\n dislin.surclr(topcolor, bottomcolor)",
"def set_view(self):\n self.scene.mlab.view(azimuth=90.0, elevation=-90.0)",
"def reveal_top_card(self):\n if self.get_length() != 0:\n if not self.get_topmost_card().get_exposed():\n self.get_topmost_card().flip_card()",
"def setDisplayShaded():\n for node in nuke.allNodes():\n print node.name()\n goodGeo = [\"Group\", \"ReadGeo\",\"ReadGeo2\",\"Sphere\",\"Cube\",\"Cylinder\",\"Card\", \"Card2\"]\n if node.Class() in goodGeo:\n if node.Class() == \"Group\":\n node.begin()\n for child in nuke.allNodes():\n if child.Class() in goodGeo:\n child['display'].setValue(2)\n node.end()\n else:\n node['display'].setValue(2)"
] | [
"0.56002295",
"0.53599805",
"0.53412706",
"0.5248069",
"0.5060203",
"0.5033952",
"0.49754825",
"0.49591467",
"0.48879454",
"0.48508808",
"0.48502374",
"0.48479536",
"0.4833125",
"0.48296246",
"0.4826038",
"0.48259652",
"0.48203567",
"0.4814789",
"0.48111784",
"0.47930458",
"0.47917375",
"0.4768414",
"0.4768414",
"0.47567257",
"0.47516745",
"0.47393125",
"0.47266236",
"0.47177142",
"0.47126946",
"0.47089738"
] | 0.6281429 | 0 |
Ensure `value` is of type T and return it. | def _check_type(cls, value: Any) -> T:
if not isinstance(value, cls.type):
raise ValueError(
f"{cls!r} accepts only values of type {cls.type!r}, "
f"got {type(value)!r}"
)
return cast(T, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert(cls, value: Any) -> Optional[T]:\n pass",
"def _check_value_type(cls, value: Any) -> V:\n if not isinstance(value, cls.valuetype):\n raise ValueError(\n f\"{cls!r} accepts only values of type \"\n \"{cls.valuetype!r}, got {type(value)!r}\"\n )\n return cast(V, value)",
"def get_type(value):\n return type(value)",
"def gettype(value):\n\n # Return the type\n return type(value)",
"def cast(self, value):\n if value is None:\n return None\n return self.type(value)",
"async def resolved(value: T) -> T:\n return value",
"def from_json(cls, value: Any) -> Optional[T]:\n return cls.convert(value)",
"def type(self, value):\n return value",
"def convert_or_none(value, type_):\n try:\n return type_(value)\n except Exception:\n return None",
"def validate(self, value):\n value = super(Type,self).validate(value)\n if self.type is None:\n return value\n if value is not None and not isinstance(value,self.type):\n try:\n if isinstance(value, list) or isinstance(value, tuple): value = self.type(*value)\n elif isinstance(value, dict): value = self.type(**value)\n else: value = self.type(value)\n except: \n raise BadValueError(\"Cannot coerce: %s to %s\"% (value, self.type))\n return value",
"def __type_checker(value: object) -> str:\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]",
"def castType(self, valueType, value):\n try:\n return valueType(value)\n except (ValueError, TypeError):\n self.error('incorrect type \"{}\"'.format(value))",
"def cast(self, value: Any) -> Any:\n for val in self.values:\n if val['value'] == value:\n return value\n raise err.InvalidArgumentError(\"unknown value '{}'\".format(value))",
"def convert_value(self, v, t):\n if (isinstance(t, (abstract.AbstractError, abstract.AbstractType))\n or v is abstract.DEAD):\n return None\n elif isinstance(t, abstract.AbstractScalar):\n if issubclass(t.values[abstract.TYPE],\n (dtype.Number, dtype.Bool, dtype.Nil)):\n return self.from_scalar(v, t.values[abstract.TYPE])\n elif issubclass(t.values[abstract.TYPE], dtype.EnvType):\n assert len(v._contents) == 0\n return self.empty_env()\n else:\n raise NotImplementedError(f'convert_value for {t}')\n elif isinstance(t, abstract.AbstractTuple):\n return tuple(self.convert_value(v, t)\n for v, t in zip(v, t.elements))\n else:\n raise NotImplementedError(f'convert_value for {t}')",
"def with_value(self, value):\n return type(self)(self.name, self.type, value, self.metadata or None)",
"def value(self) -> Optional[T]:\n return self._value",
"def checkValue(self, value):\n if self.converter and value:\n return self.converter(value)\n return value",
"def _deduceValueType(value):\n\n\tif value.lower() == 'null':\n\t\treturn None\n\n\tif value.startswith(\"0x\"):\n\t\treturn bytes.fromhex(value[2:])\n\n\t# If value can be an int, float() will not raise\n\t# exception too\n\tresult = value\n\ttry:\n\t\tresult = float(value)\n\t\tresult = int(value)\n\texcept:\n\t\tpass\n\n\treturn result",
"def _get_value_type(self, value):\n\n value = value.strip()\n\n if value == 'True':\n return True\n elif value == 'False':\n return False\n else:\n try:\n return_value = int(value)\n except ValueError:\n try:\n return_value = float(value)\n except ValueError:\n return value\n\n return return_value",
"def _get_value_type(cls, value):\n #TODO: Fix Args: documentation once the Python classes handling has changed\n type_ = cls.typeDict.get(type(value))\n if type_ is None:\n type_ = 'CLASS' if inspect.isclass(value) else None\n if type_ is None and value is None:\n type_ = 'STRING'\n return type_",
"def __call__(self, value: SupportsSubstitute[T]) -> T:\n try:\n return value._substitute(self.mapping)\n except AttributeError:\n raise TypeError(\n f'object of type {type(value).__name__} does not support '\n 'substitution'\n )",
"def _convert_value_type_phantom(value: str) -> Any:\n float_regexes = [r'\\d*\\.\\d*[Ee][-+]\\d*', r'-*\\d*\\.\\d*']\n timedelta_regexes = [r'\\d\\d\\d:\\d\\d']\n int_regexes = [r'-*\\d+']\n\n if value == 'T':\n return True\n if value == 'F':\n return False\n\n for regex in float_regexes:\n if re.fullmatch(regex, value):\n return float(value)\n\n for regex in timedelta_regexes:\n if re.fullmatch(regex, value):\n hours, minutes = value.split(':')\n return datetime.timedelta(hours=int(hours), minutes=int(minutes))\n\n for regex in int_regexes:\n if re.fullmatch(regex, value):\n return int(value)\n\n return value",
"def _get(self) -> T:\n ...",
"def get_type(value):\n\n if isinstance(value, str) or value is None:\n return Type.STRING\n elif isinstance(value, bool):\n return Type.BOOLEAN\n elif isinstance(value, (int, float)):\n return Type.NUMBER\n\n raise exceptions.Error(\"Value of unknown type: {value}\".format(value=value))",
"def preprocess(self, value):\n for val_type in self._value_types:\n try:\n value = val_type(value)\n break\n except ValueError:\n pass\n return value",
"def smart_coerce(value: str) -> ValueType:\n try:\n return int(value)\n except ValueError:\n pass\n try:\n return float(value)\n except ValueError:\n pass\n if value.lower() in ('null', 'none', ):\n return None\n elif value.lower() in ('true', ):\n return True\n elif value.lower() in ('false', ):\n return False\n else:\n return value",
"def _get_value(o):\n return value(o, exception=False)",
"def process(self, value):\n return six.text_type(value) if value is not None else None",
"def _fix_value(self, value):\n try:\n return self._castfunc(value)\n except:\n error = \"Can't put '{0}' ({1}) into a {2}. Expected a {3} object.\"\n error = error.format(\n value, # Input value\n type(value), # Type of input value\n type(self), # Type of collection\n self._type # Expected type of input value\n )\n six.reraise(TypeError, TypeError(error), sys.exc_info()[-1])",
"def get_value(self, *args, **kwargs) -> Optional[ValueType]: # pragma: no cover\n raise NotImplementedError"
] | [
"0.7167879",
"0.7088845",
"0.6617856",
"0.65999",
"0.65962976",
"0.633142",
"0.63054323",
"0.6303286",
"0.6302568",
"0.6286788",
"0.6285597",
"0.6257876",
"0.62238514",
"0.6135721",
"0.6133391",
"0.613098",
"0.6101893",
"0.599462",
"0.59832716",
"0.5910546",
"0.5896536",
"0.58754224",
"0.5873106",
"0.58647823",
"0.582474",
"0.5812497",
"0.58019537",
"0.57980543",
"0.5791921",
"0.5785895"
] | 0.7335373 | 1 |
Ensure `value` is of type T and return it. | def _check_type(cls, value: Any) -> T:
if not isinstance(value, cls.type):
raise ValueError(
f"{cls!r} accepts only values of type {cls.type!r}, "
f"got {type(value)!r}"
)
return cast(T, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert(cls, value: Any) -> Optional[T]:\n pass",
"def _check_value_type(cls, value: Any) -> V:\n if not isinstance(value, cls.valuetype):\n raise ValueError(\n f\"{cls!r} accepts only values of type \"\n \"{cls.valuetype!r}, got {type(value)!r}\"\n )\n return cast(V, value)",
"def get_type(value):\n return type(value)",
"def gettype(value):\n\n # Return the type\n return type(value)",
"def cast(self, value):\n if value is None:\n return None\n return self.type(value)",
"async def resolved(value: T) -> T:\n return value",
"def from_json(cls, value: Any) -> Optional[T]:\n return cls.convert(value)",
"def type(self, value):\n return value",
"def convert_or_none(value, type_):\n try:\n return type_(value)\n except Exception:\n return None",
"def validate(self, value):\n value = super(Type,self).validate(value)\n if self.type is None:\n return value\n if value is not None and not isinstance(value,self.type):\n try:\n if isinstance(value, list) or isinstance(value, tuple): value = self.type(*value)\n elif isinstance(value, dict): value = self.type(**value)\n else: value = self.type(value)\n except: \n raise BadValueError(\"Cannot coerce: %s to %s\"% (value, self.type))\n return value",
"def __type_checker(value: object) -> str:\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]",
"def castType(self, valueType, value):\n try:\n return valueType(value)\n except (ValueError, TypeError):\n self.error('incorrect type \"{}\"'.format(value))",
"def cast(self, value: Any) -> Any:\n for val in self.values:\n if val['value'] == value:\n return value\n raise err.InvalidArgumentError(\"unknown value '{}'\".format(value))",
"def convert_value(self, v, t):\n if (isinstance(t, (abstract.AbstractError, abstract.AbstractType))\n or v is abstract.DEAD):\n return None\n elif isinstance(t, abstract.AbstractScalar):\n if issubclass(t.values[abstract.TYPE],\n (dtype.Number, dtype.Bool, dtype.Nil)):\n return self.from_scalar(v, t.values[abstract.TYPE])\n elif issubclass(t.values[abstract.TYPE], dtype.EnvType):\n assert len(v._contents) == 0\n return self.empty_env()\n else:\n raise NotImplementedError(f'convert_value for {t}')\n elif isinstance(t, abstract.AbstractTuple):\n return tuple(self.convert_value(v, t)\n for v, t in zip(v, t.elements))\n else:\n raise NotImplementedError(f'convert_value for {t}')",
"def value(self) -> Optional[T]:\n return self._value",
"def with_value(self, value):\n return type(self)(self.name, self.type, value, self.metadata or None)",
"def checkValue(self, value):\n if self.converter and value:\n return self.converter(value)\n return value",
"def _deduceValueType(value):\n\n\tif value.lower() == 'null':\n\t\treturn None\n\n\tif value.startswith(\"0x\"):\n\t\treturn bytes.fromhex(value[2:])\n\n\t# If value can be an int, float() will not raise\n\t# exception too\n\tresult = value\n\ttry:\n\t\tresult = float(value)\n\t\tresult = int(value)\n\texcept:\n\t\tpass\n\n\treturn result",
"def _get_value_type(self, value):\n\n value = value.strip()\n\n if value == 'True':\n return True\n elif value == 'False':\n return False\n else:\n try:\n return_value = int(value)\n except ValueError:\n try:\n return_value = float(value)\n except ValueError:\n return value\n\n return return_value",
"def _get_value_type(cls, value):\n #TODO: Fix Args: documentation once the Python classes handling has changed\n type_ = cls.typeDict.get(type(value))\n if type_ is None:\n type_ = 'CLASS' if inspect.isclass(value) else None\n if type_ is None and value is None:\n type_ = 'STRING'\n return type_",
"def __call__(self, value: SupportsSubstitute[T]) -> T:\n try:\n return value._substitute(self.mapping)\n except AttributeError:\n raise TypeError(\n f'object of type {type(value).__name__} does not support '\n 'substitution'\n )",
"def _convert_value_type_phantom(value: str) -> Any:\n float_regexes = [r'\\d*\\.\\d*[Ee][-+]\\d*', r'-*\\d*\\.\\d*']\n timedelta_regexes = [r'\\d\\d\\d:\\d\\d']\n int_regexes = [r'-*\\d+']\n\n if value == 'T':\n return True\n if value == 'F':\n return False\n\n for regex in float_regexes:\n if re.fullmatch(regex, value):\n return float(value)\n\n for regex in timedelta_regexes:\n if re.fullmatch(regex, value):\n hours, minutes = value.split(':')\n return datetime.timedelta(hours=int(hours), minutes=int(minutes))\n\n for regex in int_regexes:\n if re.fullmatch(regex, value):\n return int(value)\n\n return value",
"def _get(self) -> T:\n ...",
"def get_type(value):\n\n if isinstance(value, str) or value is None:\n return Type.STRING\n elif isinstance(value, bool):\n return Type.BOOLEAN\n elif isinstance(value, (int, float)):\n return Type.NUMBER\n\n raise exceptions.Error(\"Value of unknown type: {value}\".format(value=value))",
"def preprocess(self, value):\n for val_type in self._value_types:\n try:\n value = val_type(value)\n break\n except ValueError:\n pass\n return value",
"def smart_coerce(value: str) -> ValueType:\n try:\n return int(value)\n except ValueError:\n pass\n try:\n return float(value)\n except ValueError:\n pass\n if value.lower() in ('null', 'none', ):\n return None\n elif value.lower() in ('true', ):\n return True\n elif value.lower() in ('false', ):\n return False\n else:\n return value",
"def _get_value(o):\n return value(o, exception=False)",
"def process(self, value):\n return six.text_type(value) if value is not None else None",
"def _fix_value(self, value):\n try:\n return self._castfunc(value)\n except:\n error = \"Can't put '{0}' ({1}) into a {2}. Expected a {3} object.\"\n error = error.format(\n value, # Input value\n type(value), # Type of input value\n type(self), # Type of collection\n self._type # Expected type of input value\n )\n six.reraise(TypeError, TypeError(error), sys.exc_info()[-1])",
"def get_value(self, *args, **kwargs) -> Optional[ValueType]: # pragma: no cover\n raise NotImplementedError"
] | [
"0.7168732",
"0.70905447",
"0.6620016",
"0.6601747",
"0.65981066",
"0.6333085",
"0.63064355",
"0.6306179",
"0.6302738",
"0.6287991",
"0.62865114",
"0.6258626",
"0.6225704",
"0.61366874",
"0.6134749",
"0.6134575",
"0.6103191",
"0.59965014",
"0.5985597",
"0.5912719",
"0.5897219",
"0.58760345",
"0.58735204",
"0.58645856",
"0.58264464",
"0.581415",
"0.5805035",
"0.58005345",
"0.5793171",
"0.5788876"
] | 0.73346376 | 0 |
Ensure `key` is of type K and return it. | def _check_key_type(cls, key: Any) -> K:
if not isinstance(key, cls.keytype):
raise KeyError(
f"{cls!r} accepts only keys of type {cls.keytype!r}, "
f"got {type(key)!r}"
)
return cast(K, key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self, key, key_type=None):\n pass",
"def get_key(self, key):\n ret = None\n qkey = key.__qualname__\n ret = self.get(qkey)\n if not ret:\n # check all entries if qualname match\n for k in self:\n if k.__qualname__ == qkey:\n return self.get(k)\n return",
"def __getitem__(self, key: Union[str, Tuple[str, T]]) -> Union[str, T]:\n default: Union[str, T]\n if isinstance(key, tuple):\n key, default = key\n else:\n default = ''\n\n key = key.casefold()\n for k in self._keys:\n if k.casefold() == key:\n return self._keys[k]\n else:\n return default",
"def get(self, key: K) -> Optional[V]:\n return self.mget([key])[0]",
"def getK(self):\n return self.getOrDefault(self.k)",
"def opt_key(k: T) -> OptionalKey[T]:\n return OptionalKey(k)",
"def get(self, key: str, default: Union[str, T] = '') -> Union[str, T]:\n key = key.casefold()\n for k in self._keys:\n if k.casefold() == key:\n return self._keys[k]\n else:\n return default",
"def get(self, key: Hashable) -> Any: # type: ignore\n try:\n return[key]\n except (KeyError, TypeError):\n if self.default_factory is None:\n raise KeyError(f'{key} is not in {self.__class__}')\n else:\n try:\n return self.default_factory()\n except TypeError:\n return self.default_factory",
"def _map___getitem__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of key should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n return self.second(self.find(key))",
"def get(self, key: Union[Any, int]) -> Union[Any, Sequence[Any]]:\n try:\n return[key]\n except KeyError:\n return self.default_factory",
"def get_custom_key_from_key(key):\n key_custom = CustomIterator.key_from_protobuf(key.to_protobuf())\n key_custom._type = SubclassMap.get()[key_custom.kind]\n return key_custom",
"def get(self, key: T) -> Optional[U]:\n return self._store.get(key)",
"def __getitem__(self, key: T) -> T:\n return self.lookup(key)",
"def _get_key(key_or_id, key_cls):\n return (\n key_cls.from_string(key_or_id)\n if isinstance(key_or_id, str)\n else key_or_id\n )",
"def get(self, key):\n raise NotImplementedError",
"def get(self, key: K)-> Optional[V]:\n return self._func(key)",
"def get(self, key: str):\r\n\r\n if key in self._inner_dict:\r\n return self._inner_dict[key]\r\n else:\r\n raise KeyError(f\"key '{key}' is invalid\")",
"def GetKeyByPath(self, key_path):",
"def _get_key(args):\n\n input_key = args.input_key\n key = None\n if input_key:\n from pathlib import Path\n key_file = Path(input_key)\n if key_file.is_file():\n key = load_key(key_file)\n\n if not key:\n key = key_handler(args)\n\n return key",
"def get(obj, key, required_type=None):\n if key not in obj:\n return None\n if required_type is not None and not isinstance(obj[key], required_type):\n raise TypeError(f'{key} is not a {required_type}')\n return obj[key]",
"def get(self, key):\n try:\n return self.resource.Object(self.bucketname, key).get()\n except botocore.exceptions.ClientError as error:\n if error.response['Error']['Code'] == 'NoSuchKey':\n # key does not exists\n raise KeyError(error.response['Error'])\n else:\n # unexpected error\n raise error",
"def get(self, key):\n if key is None:\n return None # None is not a valid key\n return get_from_subtree(self.root, key)",
"def key(key):\n return key",
"def get(self, key):\n\t\treturn self.__get(key, key[1:])",
"def get_key(self, key, default=_MISSING):\n if '.' in key:\n # Nested key of form \"foo.bar\"\n key, remainder = key.split('.', 1)\n if default != _MISSING:\n try:\n value = self[key].get_key(remainder, default)\n except KeyError:\n # subdict exists, but doesn't contain key\n return default\n except AttributeError:\n # key points to non-dict thing, so no get_key attribute\n return default\n else:\n value = self[key].get_key(remainder)\n else:\n # Single, non-nested key of form \"foo\"\n if default != _MISSING:\n return self.get(key, default)\n else:\n return self[key]\n return value",
"def get_key(dict, key):\n return dict.get(key, None)",
"def get(self, key: t.Hashable) -> t.Any:",
"def _GetCompleteKeyOrError(arg):\n if isinstance(arg, Key):\n key = arg\n elif isinstance(arg, basestring):\n key = Key(arg)\n elif isinstance(arg, Entity):\n key = arg.key()\n elif not isinstance(arg, Key):\n raise datastore_errors.BadArgumentError(\n 'Expects argument to be an Entity or Key; received %s (a %s).' %\n (arg, typename(arg)))\n assert isinstance(key, Key)\n\n if not key.has_id_or_name():\n raise datastore_errors.BadKeyError('Key %r is not complete.' % key)\n\n return key",
"def __getitem__(self, key):\n return type(self)(self.origin, typeof(key))",
"def require(obj, key, required_type=None):\n if key not in obj:\n raise KeyError(f'{key} not found')\n if required_type is not None and not isinstance(obj[key], required_type):\n raise TypeError(f'{key} is not a {required_type}')\n return obj[key]"
] | [
"0.68074906",
"0.66398764",
"0.6606521",
"0.6545303",
"0.6482864",
"0.645621",
"0.6378684",
"0.63389575",
"0.62857664",
"0.62763745",
"0.6253712",
"0.6230783",
"0.6164547",
"0.61435175",
"0.6126763",
"0.6098707",
"0.60892385",
"0.59927523",
"0.59880143",
"0.59860724",
"0.5982684",
"0.59694654",
"0.59385127",
"0.5920631",
"0.5920062",
"0.59018487",
"0.59007597",
"0.58972627",
"0.58688",
"0.5868659"
] | 0.7774557 | 0 |
Ensure `value` is of type V and return it. | def _check_value_type(cls, value: Any) -> V:
if not isinstance(value, cls.valuetype):
raise ValueError(
f"{cls!r} accepts only values of type "
"{cls.valuetype!r}, got {type(value)!r}"
)
return cast(V, value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_value(self, v, t):\n if (isinstance(t, (abstract.AbstractError, abstract.AbstractType))\n or v is abstract.DEAD):\n return None\n elif isinstance(t, abstract.AbstractScalar):\n if issubclass(t.values[abstract.TYPE],\n (dtype.Number, dtype.Bool, dtype.Nil)):\n return self.from_scalar(v, t.values[abstract.TYPE])\n elif issubclass(t.values[abstract.TYPE], dtype.EnvType):\n assert len(v._contents) == 0\n return self.empty_env()\n else:\n raise NotImplementedError(f'convert_value for {t}')\n elif isinstance(t, abstract.AbstractTuple):\n return tuple(self.convert_value(v, t)\n for v, t in zip(v, t.elements))\n else:\n raise NotImplementedError(f'convert_value for {t}')",
"def get_value(self, *args, **kwargs) -> Optional[ValueType]: # pragma: no cover\n raise NotImplementedError",
"def cast(self, value: Any) -> Any:\n for val in self.values:\n if val['value'] == value:\n return value\n raise err.InvalidArgumentError(\"unknown value '{}'\".format(value))",
"def convert(cls, value: Any) -> Optional[T]:\n pass",
"def _deduceValueType(value):\n\n\tif value.lower() == 'null':\n\t\treturn None\n\n\tif value.startswith(\"0x\"):\n\t\treturn bytes.fromhex(value[2:])\n\n\t# If value can be an int, float() will not raise\n\t# exception too\n\tresult = value\n\ttry:\n\t\tresult = float(value)\n\t\tresult = int(value)\n\texcept:\n\t\tpass\n\n\treturn result",
"def _get_value(o):\n return value(o, exception=False)",
"def _get_test_value(cls, v):\r\n # avoid circular import\r\n from theano.compile.sharedvalue import SharedVariable\r\n\r\n if isinstance(v, graph.Constant):\r\n return v.value\r\n elif isinstance(v, SharedVariable):\r\n return v.get_value(borrow=True, return_internal_type=True)\r\n elif isinstance(v, graph.Variable) and hasattr(v.tag, 'test_value'):\r\n # ensure that the test value is correct\r\n return v.type.filter(v.tag.test_value)\r\n\r\n raise AttributeError('%s has no test value' % v)",
"def from_value(v, broaden=False):\n a = to_abstract(v, None, None)\n if broaden:\n a = _broaden(a, None)\n return a",
"def value(x):\n\tif isNumber(x): return x\n\telse: return x.value()",
"def value(x):\n if isNumber(x):\n return x\n else:\n return x.value()",
"def deserialize_value(self) -> Callable[[InternalType], ValueType]:\n return self.typecasts[ # pragma: no cover\n self.internal_type,\n self.value_type,\n ]",
"def cast(self, value):\n if value is None:\n return None\n return self.type(value)",
"def valueOrDefault(x):\n\tif isNumber(x): return x\n\telse: return x.valueOrDefault()",
"def value(self) -> Optional[T]:\n return self._value",
"def validate(cls, v):\n return v",
"def _proper_type_return(val):\n if len(val) == 0:\n return None\n elif len(val) == 1:\n return list(val.values())[0]\n else:\n return val",
"def _value_for(self, object, name, value):\n try:\n return self.value_for(value)\n except TraitError:\n self.error(object, name, value)",
"def _value_for(self, object, name, value):\n try:\n return self.value_for(value)\n except TraitError:\n self.error(object, name, value)",
"def from_value(v, broaden=False, **kwargs):\n a = to_abstract(v, **kwargs)\n if broaden:\n a = _broaden(a)\n return a",
"def to_python(value):\n return value.values[0]",
"def valueOrDefault(x):\n if isNumber(x):\n return x\n else:\n return x.valueOrDefault()",
"def checkValue(self, value):\n if self.converter and value:\n return self.converter(value)\n return value",
"def _check_type(cls, value: Any) -> T:\n if not isinstance(value, cls.type):\n raise ValueError(\n f\"{cls!r} accepts only values of type {cls.type!r}, \"\n f\"got {type(value)!r}\"\n )\n return cast(T, value)",
"def _check_type(cls, value: Any) -> T:\n if not isinstance(value, cls.type):\n raise ValueError(\n f\"{cls!r} accepts only values of type {cls.type!r}, \"\n f\"got {type(value)!r}\"\n )\n return cast(T, value)",
"def _fix_value(self, value):\n try:\n return self._castfunc(value)\n except:\n error = \"Can't put '{0}' ({1}) into a {2}. Expected a {3} object.\"\n error = error.format(\n value, # Input value\n type(value), # Type of input value\n type(self), # Type of collection\n self._type # Expected type of input value\n )\n six.reraise(TypeError, TypeError(error), sys.exc_info()[-1])",
"async def resolved(value: T) -> T:\n return value",
"def vvalue(self) -> Qval:\n return self.get(self.greedy())",
"def convert(self, value):\n return value",
"def convert_or_none(value, type_):\n try:\n return type_(value)\n except Exception:\n return None",
"def convert(self, value):\r\n return value"
] | [
"0.707001",
"0.6538014",
"0.6471687",
"0.6431161",
"0.63861257",
"0.6296982",
"0.6222986",
"0.6171691",
"0.6100444",
"0.6065048",
"0.60494566",
"0.6046325",
"0.59974855",
"0.59508926",
"0.59442955",
"0.5941331",
"0.5929134",
"0.5929134",
"0.5926267",
"0.59260654",
"0.59233695",
"0.5906915",
"0.5861412",
"0.5861412",
"0.58466035",
"0.583279",
"0.58280665",
"0.582377",
"0.5806961",
"0.5800886"
] | 0.8052303 | 0 |
Create an attribute at each size. | def testsize(self):
for size in range(5):
AttributeAbility(size=size + 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createAttribute(nid, label, primary, list, x, y):\n attribute = Attribute(nid, label, primary, x, y)\n list.append(attribute)",
"def _assign_sizes(self):",
"def _build_attributes(self):\n\n # We might rebuild the program because of snippets but we must\n # keep already bound attributes\n\n dtype = []\n for (name,gtype) in self.all_attributes:\n if name not in self._attributes.keys():\n attribute = Attribute(self, name, gtype)\n else:\n attribute = self._attributes[name]\n\n self._attributes[name] = attribute\n dtype.append(attribute.dtype)",
"def __init__ (self, size, name):\n\n self.size = size\n self.name = name\n self.units = [1 for x in range(size)]",
"def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)",
"def add_attributes(self, x):\n for k, v in x.items():\n setattr(self, k, v)",
"def create_data_set(num_attributes):\n data_set = {}\n for index in range(num_attributes):\n size = random.randint(1, 10) # nosec\n key = str(index).encode(\"utf-8\")\n data_set[key] = get_random_bytes(size)\n return data_set",
"def add_attributes(self, attrs):\n for attr in attrs:\n self.add_attribute(attr)",
"def apply(self, name, size, type):\n self.properties['name'] = name\n self.properties['size'] = size\n self.properties['type'] = type",
"def set_attr(self):\n\n # Create a new array\n self.fileh.create_array('/', 'array', self.a1)\n for i in range(self.nobjects):\n # Set an attribute\n setattr(self.fileh.root.array.attrs, \"attr\" + str(i), str(self.a1))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print(\"Time for Undo, Redo (set_attr):\", undo, \"s, \", redo, \"s\")",
"def get_attributes(cls):\r\n return [Attribute('size', '20'),\r\n Attribute('label', ''), ]",
"def __init__(self, **initial_attributes):\n\n for attribute_name, attribute_value in initial_attributes.items():\n setattr(self, attribute_name, attribute_value)",
"def __init__(self, size):\n self.size = size",
"def __init__(self, size):\n self.size = size",
"def set_element_dimensions(self, size_x, size_y, size_z):\n size_x = 1.0 * size_x\n size_y = 1.0 * size_y\n size_z = 1.0 * size_z\n x = np.repeat(size_x, self.numelements)\n y = np.repeat(size_y, self.numelements)\n z = np.repeat(size_z, self.numelements)\n self.dimensions = g.Points.from_xyz(x, y, z)\n return self",
"def to_attributes(array, *dimensions):\n dims = [d for d in array.dim_names if d not in dimensions]\n atts = list(array.att_names) + [d for d in dimensions if d in array.dim_names]\n return redimension(array, dims, atts)",
"def generate_attributes(self):\n for group in self.dict:\n for param in self.dict[group]:\n if group in self.group_douplicate and param in self.name_douplicate:\n setattr(self, group+'_'+param, self(group, param))\n else:\n setattr(self, param, self(group, param))",
"def get_attributes(cls):\r\n return [\r\n Attribute('size', '20'),\r\n Attribute('inline', False),\r\n Attribute('label', ''),\r\n ]",
"def define_attribute(self, name, atype, data=None):\n self.attributes.append(name)\n self.attribute_types[name] = atype\n self.attribute_data[name] = data",
"def add_attributes(self, attrs):\n self.attrs.add_container(attrs)",
"def store_attribute_array(self, attributes):\n\t\tself.attributes = attributes\n\n\t\t# Combine all of the attribute data into one data array\n\t\tdata = bytearray()\n\t\toffset = 0\n\t\tfor attr in attributes:\n\t\t\tattr.set_offset(offset)\n\t\t\toffset += attr.data.nbytes\n\t\t\tdata.extend(attr.data.tobytes())\n\n\t\t# Store the count for the vertices if we are not using indexing\n\t\tif (self.IBO is None):\n\t\t\tself.count = attributes[0].count\n\n\t\tglBindBuffer(GL_ARRAY_BUFFER, self.VBO)\n\n\t\tglBufferData(\n\t\t\t\t\tGL_ARRAY_BUFFER,\n\t\t\t\t\toffset,\n\t\t\t\t\tbytes(data),\n\t\t\t\t\tGL_STATIC_DRAW\n\t\t\t\t\t)",
"def test_define_attributes(self):\n\n class Test(pyperry.Base): pass\n\n self.assertEqual(len(Test.defined_attributes), 0)\n\n Test.define_attributes(['id', 'name', 'name'])\n self.assertEqual(len(Test.defined_attributes), 2)\n\n Test.define_attributes(['foo_id', 'foo_id', 'id'])\n self.assertEqual(len(Test.defined_attributes), 3)",
"def randomize(self):\n self.size = randint(1,5)\n self.resource = randint(1,3)\n self.temperature = randint(20, 1000)\n self.gravity = randint(0, 10)\n for key in self.get_atmosphere().keys():\n setattr(self, key, randint(0, 5))\n for attribute_count in range(randint(0, 3)):\n pa = PlanetaryAttribute.objects.order_by('?')[0]\n self.attributes.add(pa)",
"def __init__(self, width, length):\n self.width = width\n self.length = length",
"def add_attribute(self, attr_type, name, components):\n self.attributes[attr_type] = {\"name\": name, \"components\": components}",
"def create_attribute(owner_name, att_name, context=ast.Load(), line=0, column=0):\n attribute = ast.Attribute()\n attribute.attr = att_name\n attribute.ctx = context\n attribute.lineno = line\n attribute.col_offset = column\n\n if isinstance(owner_name, str):\n attribute_name = ast.Name()\n attribute_name.ctx = ast.Load()\n attribute_name.id = owner_name\n attribute_name.lineno = line\n attribute_name.col_offset = column\n\n attribute.value = attribute_name\n else:\n attribute.value = owner_name\n\n return attribute",
"def update(self, *args, **kwargs):\n attributes = [\"id\", \"size\", \"x\", \"y\"]\n if len(args) > 0:\n for i in range(len(args)):\n setattr(self, attributes[i], args[i])\n else:\n self.id = kwargs.get(\"id\", self.id)\n self.size = kwargs.get(\"size\", self.size)\n self.x = kwargs.get(\"x\", self.x)\n self.y = kwargs.get(\"y\", self.y)",
"def _set_attributes(self):",
"def size_attr(attr):\n suffix = attr[-1]\n multiple = 1\n\n # Detect and strip off the suffix.\n if suffix == 'K' or suffix == 'k':\n multiple = 1024L\n attr_num = attr[:-1]\n elif suffix == 'M' or suffix == 'm':\n multiple = 1024L * 1024L\n attr_num = attr[:-1]\n elif suffix == 'G' or suffix == 'g':\n multiple = 1024L * 1024L * 1024L\n attr_num = attr[:-1]\n else:\n attr_num = attr\n\n try:\n val = long(attr_num, 0) * multiple\n except ValueError:\n raise EzXMLError('\"%s\" did not parse as a size value.' % attr)\n return val",
"def set_attributes(self):\n for i, battery in enumerate(sorted(self.batteries.values(),\n key=operator.attrgetter(\"weight\"))):\n setattr(battery, \"cap\", self.caps[self.big_iterations][i])\n if self.caps[self.big_iterations][i] is 450:\n cost = 900\n elif self.caps[self.big_iterations][i] is 900:\n cost = 1350\n else:\n cost = 1800\n setattr(battery, \"cost\", cost)\n battery.capacity = self.caps[self.big_iterations][i]"
] | [
"0.6165269",
"0.6130667",
"0.5805776",
"0.5680951",
"0.5595299",
"0.55945766",
"0.5581527",
"0.5567885",
"0.55510604",
"0.5543194",
"0.55387217",
"0.5535529",
"0.5517087",
"0.5517087",
"0.54875994",
"0.5471983",
"0.54380286",
"0.54255795",
"0.5416302",
"0.5413304",
"0.54108626",
"0.5404962",
"0.5404574",
"0.53687865",
"0.53467345",
"0.5334691",
"0.5317587",
"0.5308837",
"0.52918845",
"0.5290834"
] | 0.70325506 | 0 |
Create each attribute ability and verify the Ability Cost. | def testAC(self):
for size in range(5):
for attr in ('ST', 'DX'):
a = AttributeAbility([attr,], size + 1)
self.assertEqual(a.AC, (2000, 4000, 7000, 15000, 25000)[size])
for attr in ('IQ', 'Dam'):
a = AttributeAbility([attr,], size + 1)
self.assertEqual(a.AC, (1000, 2000, 3500, 7000, 15000)[size])
a = AttributeAbility(['MA',], size + 1)
self.assertEqual(a.AC, (1000, 2000, 3000, 6000, 12000)[size])
a = AttributeAbility(['Hit',], size + 1)
self.assertEqual(a.AC, (1000, 2500, 5000, 10000, 18000)[size]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])",
"def testsize(self):\n for size in range(5):\n AttributeAbility(size=size + 1)",
"def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def testabilities(self):\n for ability in AmuletAbility.typelist:\n a = AmuletAbility(ability)\n self.assertEqual(a.type, ability)\n if ability != 'Attribute':\n self.assert_(ability in str(a))\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def testMA(self):\n for size in range(5):\n a = AttributeAbility(['MA',], size + 1)\n self.assert_(str((size + 1) * 2) in str(a))\n self.assert_('MA' in str(a))",
"def testabilities(self):\n for ability in WeaponAbility.typelist:\n a = WeaponAbility(ability)\n self.assert_(ability in str(a))\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))",
"def create(self):\n\n self._calculate_hp()\n self.race.alterAbilities()\n self.race.racialAbilities()",
"def test_attractor_list(self):\n assert len(get_attractor_list()) > 130",
"def _validability(self, ability):\n return isinstance(ability, AttributeAbility)",
"def testrandom(self):\n for i in range(100):\n AmuletAbility()",
"def _numabilities(self):\n rolls_remaining = 1\n numabilities = 0\n numspecials = 0\n while rolls_remaining > 0:\n roll = random.randint(1, 12)\n if roll == 1: # roll twice more\n rolls_remaining += 1\n elif roll == 2: # special & roll again\n numspecials += 1\n numabilities += 1\n else: # attribute\n numabilities += 1\n rolls_remaining -= 1\n if numabilities >= self.maxabilities: # limit total abilities\n rolls_remaining = 0\n numspecials = max(numspecials, # limit attr abilities\n numabilities - len(self.attributes))\n self.specials = numspecials\n return numabilities",
"def testenhancements(self):\n list = [MentalAbility('Fireball', 3),]\n a = WeaponAbility('Enhanced', abilities=list)\n self.assertEqual(a.abilities, list)\n self.assertEqual(a.AC, list[0].AC)\n list *= 5\n a = WeaponAbility('Enhanced', abilities=list)\n self.assertEqual(a.abilities, list)\n self.assertEqual(a.AC, list[0].AC * (1 + 2 + 4 + 8 + 16))",
"def testrandom(self):\n for i in range(100):\n WeaponAbility()",
"def test_create_with_permissions(self):\n permissions = Permission.objects.filter(name__in=('Can add course mode', 'Can change course mode'))\n for permission in permissions:\n self.user.user_permissions.add(permission)\n\n self.assert_can_create_course()",
"def testsize(self):\n for size in range(1, 5):\n a = WeaponAbility('Defender', size=size)\n self.assert_(str(size) in str(a))",
"def allocateAllAttributePoints(self):\n\t\turl = \"https://habitica.com/api/v3/user/allocate-now\"\n\t\treturn(postUrl(url, self.credentials))",
"def testinvalidattribute(self):\n self.assertRaises(AbilityError, AttributeAbility, 'Invalid')\n self.assertRaises(AbilityError, AttributeAbility, '', 3)",
"def testinvalidsize(self):\n self.assertRaises(AbilityError, AttributeAbility, size=0)\n self.assertRaises(AbilityError, AttributeAbility, size=6)",
"def add_attributes(self, attribute_list, simulate=False):\n with LayerEditingManager(self.layer, 'Add attributes', DEBUG):\n # add attributes\n layer_pr = self.layer.dataProvider()\n proposed_attribute_dict = {}\n proposed_attribute_list = []\n for input_attribute in attribute_list:\n input_attribute_name = input_attribute.name()[:10]\n proposed_attribute_name = input_attribute_name\n i = 1\n while True:\n current_attribute_names = \\\n [attribute.name() for attribute in layer_pr.fields()]\n if proposed_attribute_name in current_attribute_names:\n # If the attribute is already assigned, change the\n # proposed_attribute_name\n i_num_digits = len(str(i))\n # 10 = shapefile limit\n # 1 = underscore\n max_name_len = 10 - i_num_digits - 1\n proposed_attribute_name = '%s_%d' % (\n input_attribute_name[:max_name_len], i)\n i += 1\n else:\n # If the attribute name is not already assigned,\n # add it to the proposed_attribute_dict\n proposed_attribute_dict[input_attribute_name] = \\\n proposed_attribute_name\n input_attribute.setName(proposed_attribute_name)\n proposed_attribute_list.append(input_attribute)\n break\n if not simulate:\n added_ok = layer_pr.addAttributes(proposed_attribute_list)\n if not added_ok:\n raise AttributeError(\n 'Unable to add attributes %s' %\n proposed_attribute_list)\n return proposed_attribute_dict",
"def generate_ca(valid_attributes):\n attr_list = valid_attributes.split(',')\n nb_attributes = len(attr_list)\n\n gen_g1 = G1.generator()\n gen_g2 = G2.generator()\n exp = [G1.order().random() for _ in range(nb_attributes + 1)]\n\n pk = [gen_g1] + [gen_g1 ** i for i in exp[1:]] + [gen_g2] + [gen_g2 ** i for i in exp]\n sk = gen_g1 ** exp[0]\n\n sk = [sk, pk, attr_list]\n pk = [pk, attr_list]\n\n\n return (jsonpickle.encode(pk).encode(), jsonpickle.encode(sk).encode())",
"def _abilities_all_units(self) -> Counter:\n abilities_amount = Counter()\n for unit in self.units + self.structures: # type: Unit\n for order in unit.orders:\n abilities_amount[order.ability] += 1\n if not unit.is_ready:\n if self.race != Race.Terran or not unit.is_structure:\n # If an SCV is constructing a building, already_pending would count this structure twice\n # (once from the SCV order, and once from \"not structure.is_ready\")\n abilities_amount[self._game_data.units[unit.type_id.value].creation_ability] += 1\n\n return abilities_amount",
"def testinvalidattribute(self):\n self.assertRaises(AbilityError, AmuletAbility, 'Attribute', attr='Dam')\n self.assertRaises(AbilityError, AmuletAbility, 'Attribute', attr='')\n self.assertRaises(AbilityError, \n AmuletAbility, 'Control NPC', attr='ST')",
"def testequality(self):\n for i in range(len(AmuletAbility.typelist[:8])): # no extra params\n type1 = AmuletAbility.typelist[i]\n self.assertEqual(AmuletAbility(type1), \n AmuletAbility(type1))\n for type2 in AmuletAbility.typelist[i+1:8]:\n self.assertNotEqual(AmuletAbility(type1), \n AmuletAbility(type2))\n\n for i in range(len(AmuletAbility.elements)):\n e1 = AmuletAbility.elements[i]\n self.assertEqual(AmuletAbility('Proof', element=e1), \n AmuletAbility('Proof', element=e1))\n for e2 in AmuletAbility.elements[i+1:]:\n self.assertNotEqual(AmuletAbility('Proof', element=e1),\n AmuletAbility('Proof', element=e2))\n\n for i in range(len(AmuletAbility.attributes)):\n a1 = AmuletAbility.attributes[i]\n self.assertEqual(AmuletAbility('Attribute', attr=a1), \n AmuletAbility('Attribute', attr=a1))\n for a2 in AmuletAbility.attributes[i+1:]:\n self.assertNotEqual(AmuletAbility('Attribute', attr=a1), \n AmuletAbility('Attribute', attr=a2))\n\n for s1 in range(1, 5):\n self.assertEqual(AmuletAbility('Skepticism', size=s1), \n AmuletAbility('Skepticism', size=s1))\n for s2 in range(s1 + 1, 5):\n self.assertNotEqual(AmuletAbility('Skepticism', size=s1), \n AmuletAbility('Skepticism', size=s2))",
"def _validability(self, ability):\n return (isinstance(ability, AttributeAbility) or\n isinstance(ability, WeaponAbility))",
"def __add_expanded_abilities(self, name):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n abilities = loop.run_until_complete(self.api.process_requests(\n \"ability\", name))\n ability_list = []\n factory = PokemonAbilityFactory(abilities, True)\n for ability in factory.create():\n ability_list.append(ability)\n return ability_list",
"def test_models_organization_get_abilities_member_user(self):\n access = factories.UserOrganizationAccessFactory(role=\"member\")\n\n with self.assertNumQueries(1):\n abilities = access.organization.get_abilities(access.user)\n\n self.assertEqual(\n abilities,\n {\n \"delete\": False,\n \"get\": True,\n \"patch\": False,\n \"put\": False,\n \"manage_accesses\": False,\n },\n )",
"def insert_skill_abilities(cursor):\n # Get the skill of every ability\n abilities_skills = dict()\n with open(SKILL_ABILITIES_PATH, encoding='UTF-8') as skills_file:\n skills_dict = ujson.load(skills_file)\n for skill_id, skill_abilities in skills_dict.items():\n for skill_ability in skill_abilities:\n abilities_skills[skill_ability.lower()] = skill_id.lower()\n\n # Get info from HTML\n abilities_html_dict = dict()\n with open(ABILITIES_HTML_PATH, encoding='UTF-8') as abilities_html_file:\n soup = BeautifulSoup(abilities_html_file, 'html.parser')\n for ability in soup.findAll('div'):\n # Remove clutter from attribute ID\n ability_id = ability.attrs['id'][18:-8]\n ability_name = ability.b.text\n ability_type = ''\n ability_max_level = 0\n ability_req_skill_level = 0\n ability_desc = ability.contents[-1].strip()\n # Parse all except the name and desc that we already got\n for i in range(2, len(ability.contents)-2):\n if isinstance(ability.contents[i], Tag):\n if ability.contents[i].text == \"Type:\":\n ability_type = ability.contents[i+1].strip()\n elif ability.contents[i].text == \"Max Level:\":\n ability_max_level = int(ability.contents[i+1].strip())\n elif ability.contents[i].text == \"Required Skill Level:\":\n ability_req_skill_level = int(ability.contents[i+1].strip())\n elif ability.contents[i].text == \"Circle:\":\n pass\n else:\n if ability.contents[i].name != 'br':\n l.warning(\"There is a non handled tag {} in ability: {}\".format(ability.contents[i].text,\n ability))\n abilities_html_dict[ability_id.lower()] = {\n 'name': ability_name,\n 'type': ability_type,\n 'max_level': ability_max_level,\n 'req_skill_level': ability_req_skill_level,\n 'desc': ability_desc\n }\n\n with open(ABILITIES_JSON_PATH, encoding='UTF-8') as abilities_file:\n abilities_dict = ujson.load(abilities_file)\n abilities = list()\n # Get list of sorted abilities\n sorted_abilities_ids = list()\n for ability_id, ability in abilities_dict.items():\n if ability_id:\n sorted_abilities_ids.append((ability_id, int(ability.get(\"ClassID\", 0))))\n else:\n sorted_abilities_ids.append((ability_id, 0))\n sorted_abilities_ids.sort(key=lambda tup: tup[1])\n # Start processing them\n for ability_id, _ in sorted_abilities_ids:\n ability = abilities_dict[ability_id]\n html_ability = abilities_html_dict.get(ability.get(\"ClassName\", \"\").lower(), dict())\n ability_info = list()\n # Get Ability Id\n ability_info.append(int(get_value(ability, \"Ability\", \"ClassID\", str)))\n # Get Ability Name\n ability_info.append(get_value(html_ability, \"Ability\", \"name\", str))\n # Get Ability Type\n ability_info.append(get_value(html_ability, \"Ability\", \"type\", str))\n # Get Ability Required Circle\n ability_info.append(int(get_value(ability, \"Ability\", \"ReqCircle\", int)))\n # Get Ability Max Level\n ability_info.append(get_value(html_ability, \"Ability\", \"max_level\", int))\n # Get Ability Desc\n ability_info.append(get_value(html_ability, \"Ability\", \"desc\", str))\n # Get Ability Icon\n ability_info.append(format_icon(get_value(ability, \"Ability\", \"Icon\", str)))\n # Get Skill Class\n ability_info.append(get_ability_skill(cursor, abilities_skills.get(ability_id.lower(), '')))\n # Get Ability Required Skill Level\n ability_info.append(get_value(html_ability, \"Ability\", \"req_skill_level\", int))\n\n abilities.append(tuple(ability_info))\n\n abilities = tuple(abilities)\n\n cursor.executemany(\"INSERT INTO skill_abilities (id, name, type, circle, max_level, desc, icon, skill_id, \"\n \"req_skill_level) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\", abilities)",
"def testrange(self):\n for range_ in range(1, 5):\n a = WeaponAbility('Animated', range=range_)\n self.assert_(str(range_) in str(a))",
"def add_ability(self, ability):\n self.abilities.append(ability)",
"def test_create_att_object():\n from .scripts.initializedb import create_att_object\n att_object = create_att_object(\"a\", \"b\", \"c\", \"d\", \"c\")\n assert isinstance(att_object, Attribute)"
] | [
"0.73843056",
"0.66870636",
"0.6642298",
"0.66236144",
"0.6408896",
"0.6198641",
"0.5768418",
"0.5674704",
"0.5548058",
"0.55243206",
"0.54773396",
"0.541924",
"0.5391959",
"0.53720325",
"0.5355543",
"0.5314864",
"0.5313",
"0.5310781",
"0.5279219",
"0.5240787",
"0.5218387",
"0.52121425",
"0.5203369",
"0.5192635",
"0.51760703",
"0.5166614",
"0.51562095",
"0.51139313",
"0.51116914",
"0.510406"
] | 0.6941932 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.