query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Recupera do servidor a lista de servidores associados a guilda consultada | def get_list_servers(p_id_guilda):
server_list = select_data.get_guild_servers(p_id_guilda)
#css_mensagem = '```css\n####### SERVERS ################'
list_server = []
for server in server_list:
if server['description'] != None:
description_server = server['description']
else:
description_server = ''
return_data = '\n### Id Server: ' + str(server['id_server_sk']) + ' - ' + server['name_guild'] + '\n### Map: ' + server['map_name'] + '\n### Modo: ' + server['mode_server'] + '\n### Patreon: ' + server['map_patreon'] + '\n### Description: ' + description_server + '\n -----------------------------------------------------------------------------------'
list_server.append(return_data)
#css_mensagem = css_mensagem + return_data
#css_mensagem = css_mensagem + '\n##############################```'
return list_server #css_mensagem
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def redis_client_list(self):\n def func(server):\n return server.server.client_list()\n self.__run_redis_cmd(func)",
"def load_servers_from_db(self):\r\n db = self.getDB()\r\n cursor = db.cursor()\r\n\r\n res = cursor.execute(\"\"\"SELECT * FROM `IRC_servers` WHERE `Registred_users_userID` = %s;\"\"\", (self.userID,))\r\n\r\n result = cursor.fetchall()\r\n db.close()\r\n print(\"RESULT\", result)\r\n servers = list()\r\n\r\n for res in result:\r\n server_dict_temp = {\"serverID\": res[0],\r\n \"serverSessionID\": res[1],\r\n \"nickname\": res[2],\r\n \"isAway\": res[3],\r\n \"isConnected\": res[4],\r\n \"Registred_users_userID\": res[5],\r\n \"serverName\": res[6],\r\n \"serverIP\": res[7],\r\n \"serverPort\": res[8],\r\n \"useSSL\": res[9]}\r\n servers.append(server_dict_temp)\r\n self.server_list_text = servers",
"async def listplayers(self, ctx, *, server_name=None):\n if server_name:\n server_name = server_name.replace('_', ' ').title()\n msg = await ctx.send(f'**Getting Data for the {server_name} server**')\n await ctx.channel.trigger_typing()\n resp = await self.bot.aio_session.get(\n f'{self.bot.api_base}/rcon/{ctx.guild.id}/{server_name}/listplayers/',\n headers=self.bot.auth_header\n )\n if resp.status == 200:\n message = '\\n'.join(await resp.json())\n await ctx.channel.trigger_typing()\n await msg.delete()\n await ctx.send(f'**Players currently on the {server_name} server:**\\n{message}')\n return\n elif resp.status < 500:\n message = (await resp.json()).get('details', 'There was a problem. Please try again')\n else:\n message = \"There was an error on my server. I have notified the maintainers.\"\n await ctx.send(message)\n else:\n futures = []\n resp = await self.bot.aio_session.get(\n f'{self.bot.api_base}/rcon/{ctx.guild.id}/',\n headers=self.bot.auth_header\n )\n if resp.status != 200:\n await ctx.send('There was a problem getting the servers for this guild.')\n return\n guild_servers = await resp.json()\n for server in guild_servers:\n msg = await ctx.send(f'**Getting Data for the {server[\"name\"]} server**')\n\n # noinspection PyShadowingNames\n async def _listplayers(server_name: str, msg: discord.Message):\n resp = await self.bot.aio_session.get(\n f'{self.bot.api_base}/rcon/{ctx.guild.id}/{server_name}/listplayers/',\n headers=self.bot.auth_header\n )\n if resp.status == 200:\n message = '\\n'.join(await resp.json())\n await ctx.channel.trigger_typing()\n await msg.delete()\n await ctx.send(f'**Players currently on the {server_name} server:**\\n{message}')\n return\n elif resp.status < 500:\n message = f'Error getting data for {server_name}' + \\\n (await resp.json()).get('details', 'Please try again')\n else:\n message = \"There was an error on my server. I have notified the maintainers.\"\n await ctx.send(message)\n\n futures.append(_listplayers(msg=msg, server_name=server['name']))\n if futures:\n asyncio.ensure_future(asyncio.gather(*futures))\n else:\n await ctx.send('There are no available servers for this guild.')",
"def retrieveGuildsInfo(self):\n serverInfo = self.con.getGuildsInfo()\n\n for server in serverInfo:\n serverData = server.split(', ')\n self.serverSettings[serverData[0]] = serverData[1]",
"async def listservers_command(self, ctx):\n serverlist = \"```py\\n\"\n serverlist += str([\n member.server.name for member in\n self.bot.get_all_members() if\n member.id == ctx.message.author.id])\n serverlist += \"\\n```\"\n try:\n await self.bot.send_message(\n ctx.message.channel,\n content=serverlist)\n except discord.Forbidden:\n await self.bot.BotPMError.resolve_send_message_error(\n self.bot, ctx)",
"async def list(self, ctx):\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n if len(self.db[server.id]) < 1:\n await self.bot.say(\"No boxes have been created for this server yet, please create some using [p]box create\"\n \" first, thanks\")\n return\n boxes = self.db[server.id].keys()\n await self.bot.say(\"Here are this server's boxes:\\n{}\".format(\"\\n\".join(boxes)))",
"def Servers(self, server=None):\n if server:\n self.current = server\n return \"successful\"\n\n servers = []\n for x in XbmcServers.select():\n servers.append({'name': x.name, 'id': x.id})\n if len(servers) < 1:\n return\n return {'current': self.current, 'servers': servers}",
"def get_all_servers(self) -> List[Server]:\n pass",
"def get_client_list(self):\r\n cursor = self.conn.cursor()\r\n cursor.execute(\"\"\"SELECT * FROM CLIENT\"\"\")\r\n return cursor.fetchall()",
"def clients(self, server):\n servers = coordinator.get_job_servers(self.job)\n for sid, name in servers.iteritems():\n if name == server:\n # assuming sid is zero-based and all sid < number of servers:\n clpsv = (self.numclients + len(servers) - 1) / len(servers)\n clients = range(self.numclients)[sid * clpsv:(sid + 1) * clpsv]\n return [str(n) for n in clients]\n return []",
"def get_servers(self):\n\t\treturn self.__servers",
"def clients(self, r):\r\n return r.clients",
"def list_servers(self, request):\n token = request.form.get('token')\n if token is None:\n token = request.args.get('token')\n\n rest_client = RestClient.instance()\n if (not rest_client.validate_token(token)):\n return (401, 'Unauthorized')\n\n game_servers = GameServers.instance().get_servers()\n out = []\n for game_server in game_servers.values():\n out.append({\n 'name': game_server.get_name(),\n 'host': game_server.get_host(),\n 'port': game_server.get_port(),\n 'owner': game_server.get_owner()\n })\n return (200, json.dumps(out))",
"def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)",
"def get_servers(self) -> dict:\n uri = f\"{self.uri}/servers\"\n\n response = self.request(uri=uri)\n return response.json()",
"def get_srv_list():\n srv_list = [splitext(basename(sock))[0] \\\n for sock in glob.glob(CEPH_SOCKET_PATH + \"*.asok\")]\n return srv_list",
"def list_clients(): # Listar clientes\n global clients\n\n for idx, client in enumerate(clients):\n print('{}: {}'.format(idx, client))",
"def get_server_info(p_id_guilda, p_id_server):\r\n server_list = select_data.get_guild_servers(p_id_guilda)\r\n \r\n for server in server_list:\r\n if server['id_server_sk'] == p_id_server:\r\n return_data = server\r\n break\r\n return return_data",
"def do_clients_by_server(self, arg):\n\n args = arg.split()\n if len(args) != 0:\n common.error('`clients_by_server` doesn\\'t expect any arguments.')\n else:\n servers = self.central_server.download_service.get_servers()\n\n if not servers.is_empty():\n print(\"%i server(s):\" % len(\n servers.servers))\n print('')\n for server in servers.servers:\n print('server:')\n print(str(server))\n print('clients:')\n if server.clients:\n clients = [client[0] for client in server.clients]\n print(tabulate(\n [[c.username, c.host, str(c.port)] for c\n in clients],\n headers=['Username', 'Host', 'Port'],\n tablefmt=\"psql\"))\n print('')\n else:\n print('No clients\\n')\n else:\n print('There\\'s no available servers')",
"def run(self):\n self._list_servers()",
"def user_list(server_object, client, address, command_args):\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the users.\n\tfor usr in server_object.usrs.values():\n\t\tmsg += usr + '\\n'\n\n\tclient.send(msg.encode())",
"def get_status_servers(p_id_guilda):\r\n json_file = select_data.get_guild_servers(p_id_guilda)\r\n css_mensagem = '```css\\n'\r\n mensagem = ''\r\n\r\n for x in json_file:\r\n\r\n rcon_host = x['ip_server']\r\n rcon_port = int(x['rcon_port'])\r\n rcon_pwd = x['rcon_password']\r\n\r\n mensagem = mensagem + '\\n### Id Server: ' + str(x['id_server_sk']) + ' - ' + x['name_guild'] + '\\n### Mapa: ' + x['map_name'] + '\\n### Modo: ' + x['mode_server'] + '\\n### Patreon: ' + x['map_patreon'] \r\n\r\n try:\r\n rcon.RCONClient(rcon_host, rcon_port, rcon_pwd)\r\n mensagem = mensagem + '\\n### Status: Online' + '\\n### IP Server: ' + x['ip_server'] + '\\n -----------------------------------------------------------------------------------' + '\\n'\r\n \r\n except:\r\n mensagem = mensagem + '\\n### Status: ::Offline' + '\\n### IP Server: ' + x['ip_server'] + '\\n -----------------------------------------------------------------------------------' + '\\n' \r\n pass\r\n \r\n \r\n css_mensagem = css_mensagem + mensagem + '\\n ```'\r\n return css_mensagem",
"def list_servers():\n (code, message) = rest_api.list_servers(request)\n if (code == 200):\n return message\n else:\n abort(code)",
"def list_mc_servers(self, by_name=False, all_data=False):\n status, data, errors, messages = self._make_get_request(MCAPIRoutes.LIST)\n \n if status == 200:\n if by_name:\n y = 0\n returnData = dict()\n for items in data['servers']:\n returnData[y] = items.get(\"id\", 0)\n y += 1\n returnData[y] = items.get(\"name\", 0)\n return returnData\n if all_data:\n y = 0\n returnData = dict()\n for items in data['servers']:\n returnData[y] = items.get(\"id\", 0)\n y += 1\n returnData[y] = items.get(\"name\", 0)\n y += 1\n returnData[y] = items.get(\"running\", 0)\n y = y + 1\n returnData[y] = items.get(\"auto_start\", 0)\n return returnData\n del returnData\n else:\n return data['servers']\n elif status == 500:\n self._check_errors(errors, messages)",
"def list_hosts():\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n res = hosts.get_all(db)\n res = {'list': res}\n return jsonify(res)",
"def list():\n rino.remote.list()",
"def get_all_index_servers(self):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM host WHERE type = 'Index Server';\")\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)",
"def get_servers():\n all_servers = []\n start = 0\n size = 100\n\n while True:\n params = {\n 'start': start,\n 'size': size,\n 'names': 1,\n 'cdata': 1\n }\n\n xml_content = _call(\n servers_base_url + 'get_server_list.php',\n parser='xml',\n params=params\n )\n\n servers = [Server.load(server_node) for server_node in xml_content.xpath('/result/server')]\n\n if not servers:\n break\n\n all_servers.extend(servers)\n\n if servers[-1].is_last:\n break\n\n start += size\n\n _set_servers_location(all_servers)\n _set_server_event(all_servers)\n\n all_servers.sort(\n key=lambda s: s.players.current,\n reverse=True\n )\n\n return all_servers",
"async def servers(self, ctx):\n # [p]servers\n\n owner = ctx.message.author\n servers = list(self.bot.servers)\n server_list = {}\n msg = \"\"\n for i in range(0, len(servers)):\n server_list[str(i)] = servers[i]\n msg += \"{}: {}\\n\".format(str(i), servers[i].name)\n msg += \"\\nTo leave a server just type its number.\"\n for page in pagify(msg, ['\\n']):\n await self.bot.say(page)\n while msg is not None:\n msg = await self.bot.wait_for_message(author=owner, timeout=15)\n if msg is not None:\n msg = msg.content.strip()\n if msg in server_list.keys():\n await self.leave_confirmation(server_list[msg], owner, ctx)\n else:\n break\n else:\n break",
"def clients():\n pass"
]
| [
"0.68262804",
"0.66768247",
"0.6599744",
"0.65755504",
"0.64908904",
"0.6244407",
"0.6241114",
"0.61838216",
"0.61600584",
"0.60755295",
"0.6030671",
"0.59916914",
"0.5982203",
"0.59614354",
"0.5943407",
"0.5913406",
"0.58924514",
"0.58590823",
"0.5850152",
"0.5847823",
"0.5825379",
"0.5817215",
"0.5808849",
"0.5803274",
"0.5742569",
"0.5740125",
"0.57371986",
"0.5700078",
"0.5696217",
"0.56901693"
]
| 0.6723718 | 1 |
Given a graph, G and a source and sink (background and foreground), FF will use the FordFulkerson to find the max flow of the graph = min cut | def FF(G):
# create index for background and foreground
back = len(G)-2
fore = len(G)-1
# create empty path to start (will be filled by one iteration of BFS)
# BFS will check all nodes (filling the path completely) and will update path
path = [-1]*len(G)
max_flow = 0
# while a path exists between the foreground and background on the image
while(find_path(G, path)):
path_flow = float("inf")
s = back
# go until reaching the foreground
while(s != fore):
# take the min route to traverse the image
path_flow = min(path_flow, G[path[s],s])
s = path[s]
# Add path flow to overall flow
max_flow += path_flow
# update residual capacities of the edges and reverse edges along path
v = back
while(v != fore):
u = path[v]
G[u][v] -= path_flow
G[v][u] += path_flow
v = path[v]
return path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ford_fulkerson_algorithm(graph: np.ndarray, source: int, sink: int) -> np.ndarray:\r\n\r\n residual_graph = copy.deepcopy(graph)\r\n row = len(residual_graph)\r\n parent = [-1] * row\r\n max_flow = 0\r\n\r\n if source == sink or sink < 0 or source < 0 or source >= row or sink >= row:\r\n raise WrongInputException('Wrong input source/sink vertice(s)')\r\n\r\n while bfs(residual_graph, row, source, sink, parent):\r\n\r\n path_flow = float(\"Inf\")\r\n s = sink\r\n while s != source:\r\n path_flow = min(path_flow, residual_graph[parent[s]][s])\r\n s = parent[s]\r\n\r\n max_flow += path_flow\r\n\r\n v = sink\r\n while v != source:\r\n u = parent[v]\r\n residual_graph[u][v] -= path_flow\r\n residual_graph[v][u] += path_flow\r\n v = parent[v]\r\n print(\"Max flow: %d\" % max_flow)\r\n\r\n return residual_graph",
"def max_flow(self, source, sink):\n\n max_flow = 0\n\n for i in range(self.V):\n for j in range(self.V):\n self.flow[i, j] = 0\n\n while self.bfs(source, sink):\n increment = maxint\n\n u = sink\n\n while self.pred[u] != -1:\n increment = min(\n increment, self.capacity[self.pred[u], u] - self.flow[self.pred[u], u])\n u = self.pred[u]\n\n u = sink\n\n while self.pred[u] != -1:\n self.flow[self.pred[u], u] += increment\n self.flow[u, self.pred[u]] -= increment\n u = self.pred[u]\n\n max_flow += increment\n\n return max_flow",
"def find_max_flow(graph, start, end):\n try:\n flow_value, _ = nx.maximum_flow(graph.graph, start, end, capacity=\"weight\")\n return flow_value\n except Exception as e:\n print(\"Error in Max flow computation \", e)\n return \"NaN\"",
"def update_flow(self):\n N = len(self.vertices)\n _vertices = self.vertices+['_source', '_sink']\n s, t = _vertices.index('_source'), _vertices.index('_sink')\n cost, capacity = dok_matrix((N+2, N+2)), dok_matrix((N+2, N+2))\n\n cost[:N, :N] = self.cost\n capacity[:N, :N] = self.upper_bound-self.lower_bound\n # _source to main vertices\n l_in = self.lower_bound.toarray().sum(axis=0)\n us, = l_in.nonzero()\n for u in us:\n capacity[s, u] = l_in[u]\n # main vertices to _sink\n l_out = self.lower_bound.toarray().sum(axis=1)\n us, = l_out.nonzero()\n for u in us:\n capacity[u, t] = l_out[u]\n # sink to source\n infinite_flow = self.upper_bound.toarray().sum()\n capacity[_vertices.index('sink'), _vertices.index('source')] = infinite_flow\n\n # get a feasible flow on original graph by finding the max flow on\n # auxiliary graph\n aux_fg = FlowGraph(_vertices, cost, capacity, True)\n aux_fg.FordFulkerson()\n assert aux_fg.residual[s].toarray().sum()==0, 'feasible flow within bounds not found'\n\n self.residual = aux_fg.residual[:N, :N]\n s, t = self.vertices.index('source'), self.vertices.index('sink')\n self.residual[s, t] = 0\n self.residual[t, s] = 0\n\n self.FordFulkerson()",
"def maximum_flow(graph, start, end):\n if not isinstance(graph, BasicGraph):\n raise TypeError(f\"Expected subclass of BasicGraph, not {type(graph)}\")\n Graph = type(graph)\n \n if start not in graph:\n raise ValueError(f\"{start} not in graph\")\n if end not in graph:\n raise ValueError(f\"{end} not in graph\")\n\n inflow = sum(d for s, e, d in graph.edges(from_node=start))\n outflow = sum(d for s, e, d in graph.edges(to_node=end))\n unassigned_flow = min(inflow, outflow) # search in excess of this 'flow' is a waste of time.\n total_flow = 0\n # -----------------------------------------------------------------------\n # The algorithm\n # I reviewed a number of algorithms, such as Ford-fulkerson algorithm,\n # Edmonson-Karp and Dinic, but I didn't like them due to their naive usage\n # of DFS, which leads to a lot of node visits.\n #\n # I therefore choose to invert the capacities of the graph so that the\n # capacity any G[u][v] = c becomes 1/c in G_inverted.\n # This allows me to use the shortest path method to find the path with\n # most capacity in the first attempt, resulting in a significant reduction\n # of unassigned flow.\n #\n # By updating G_inverted, with the residual capacity, I can keep using the\n # shortest path, until the capacity is zero, whereby I remove the links\n # When the shortest path method returns 'No path' or when unassigned flow\n # is zero, I exit the algorithm.\n #\n # Even on small graphs, this method is very efficient, despite the overhead\n # of using shortest path. For very large graphs, this method outperforms\n # all other algorithms by orders of magnitude.\n # -----------------------------------------------------------------------\n\n edges = [(n1, n2, 1 / d) for n1, n2, d in graph.edges() if d > 0]\n inverted_graph = Graph(from_list=edges) # create G_inverted.\n capacity_graph = Graph() # Create structure to record capacity left.\n flow_graph = Graph() # Create structure to record flows.\n\n while unassigned_flow:\n # 1. find the best path\n d, path = shortest_path(inverted_graph, start, end)\n if d == float(\"inf\"): # then there is no path, and we must exit.\n return total_flow, flow_graph\n # else: use the path and lookup the actual flow from the capacity graph.\n\n path_flow = min([min(d, capacity_graph.edge(s, e, default=float(\"inf\"))) for s, e, d in graph.edges(path=path)])\n\n # 2. update the unassigned flow.\n unassigned_flow -= path_flow\n total_flow += path_flow\n\n # 3. record the flows and update the inverted graph, so that it is\n # ready for the next iteration.\n edges = graph.edges(path)\n for n1, n2, d in edges:\n # 3.a. recording:\n v = flow_graph.edge(n1, n2, default=None)\n if v is None:\n flow_graph.add_edge(n1, n2, path_flow)\n c = graph.edge(n1, n2) - path_flow\n else:\n flow_graph.add_edge(n1, n2, value=v + path_flow)\n c = graph.edge(n1, n2) - (v + path_flow)\n capacity_graph.add_edge(n1, n2, c)\n\n # 3.b. updating:\n # if there is capacity left: update with new 1/capacity\n # else: remove node, as we can't do 1/zero.\n if c > 0:\n inverted_graph.add_edge(n1, n2, 1 / c)\n else:\n inverted_graph.del_edge(n1, n2)\n return total_flow, flow_graph",
"def max_flow_min_cut(vertices, start, end):\n\n max_flow = 0 # There is no flow initially\n\n while True:\n bfs(vertices, start)\n\n if not end.bfs_visited:\n # There is no path between start and end\n break\n\n path = end.get_bfs_path()\n bottleneck = min([edge.weight for edge in path])\n\n # Update edges and residual edges values\n for edge in path:\n edge.weight -= bottleneck\n edge.inverse_edge.weight += bottleneck\n max_flow += bottleneck\n\n min_cut = calculate_min_cut(vertices, start)\n return max_flow, min_cut",
"def max_flow(self, source, sink):\n\n path = self.valid_path(source, sink, [])\n\n while path:\n # get the maximum possible flow that can be taken from this path:\n max_flow = min([edge.capacity for edge in path])\n for edge in path:\n self.edges[edge] += max_flow\n path = self.valid_path(source, sink, [])\n\n # Compute all the flows from the neighbors of source:\n return sum([self.edges[edge] for edge in self.adjacents[source]])",
"def test_max_flow(default_plugin_resolver):\n dpr = default_plugin_resolver\n source_node = 0\n target_node = 7\n ebunch = [\n (0, 1, 9),\n (0, 3, 10),\n (1, 4, 3),\n (2, 7, 6),\n (3, 1, 2),\n (3, 4, 8),\n (4, 5, 7),\n (4, 2, 4),\n (5, 2, 5),\n (5, 6, 1),\n (6, 2, 11),\n ]\n nx_graph = nx.DiGraph()\n nx_graph.add_weighted_edges_from(ebunch)\n graph = dpr.wrappers.Graph.NetworkXGraph(nx_graph, edge_weight_label=\"weight\")\n\n expected_flow_value = 6\n bottleneck_nodes = {2, 4}\n expected_nodemap = {2: 6, 4: 6}\n\n mv = MultiVerify(dpr)\n results = mv.compute(\"flow.max_flow\", graph, source_node, target_node)\n\n # Compare flow rate\n results[0].assert_equal(expected_flow_value)\n\n # Normalize actual flow to prepare to transform\n actual_flow = results[1].normalize(dpr.wrappers.Graph.NetworkXGraph)\n\n # Compare sum of out edges for bottleneck nodes\n out_edges = mv.transform(\n dpr.plugins.core_networkx.algos.util.graph.aggregate_edges,\n actual_flow,\n lambda x, y: x + y,\n initial_value=0,\n )\n out_bottleneck = mv.transform(\n dpr.algos.util.nodemap.select.core_python, out_edges, bottleneck_nodes\n )\n out_bottleneck.assert_equal(expected_nodemap)\n\n # Compare sum of in edges for bottleneck nodes\n in_edges = mv.transform(\n \"util.graph.aggregate_edges.core_networkx\",\n actual_flow,\n lambda x, y: x + y,\n initial_value=0,\n in_edges=True,\n out_edges=False,\n )\n in_bottleneck = mv.transform(\n \"util.nodemap.select.core_python\", in_edges, bottleneck_nodes\n )\n in_bottleneck.assert_equal(expected_nodemap)",
"def fdc_flv(self, low_flow: float = 0.3) -> float:\n\n low_flow = 1.0 - low_flow\n # make sure that metric is calculated over the same dimension\n obs = self.true.flatten()\n sim = self.predicted.flatten()\n\n if (low_flow <= 0) or (low_flow >= 1):\n raise RuntimeError(\"l has to be in the range (0,1)\")\n\n # for numerical reasons change 0s to 1e-6\n sim[sim == 0] = 1e-6\n obs[obs == 0] = 1e-6\n\n # sort both in descending order\n obs = -np.sort(-obs)\n sim = -np.sort(-sim)\n\n # subset data to only top h flow values\n obs = obs[np.round(low_flow * len(obs)).astype(int):]\n sim = sim[np.round(low_flow * len(sim)).astype(int):]\n\n # transform values to log scale\n obs = np.log(obs + 1e-6)\n sim = np.log(sim + 1e-6)\n\n # calculate flv part by part\n qsl = np.sum(sim - sim.min())\n qol = np.sum(obs - obs.min())\n\n flv = -1 * (qsl - qol) / (qol + 1e-6)\n\n return float(flv * 100)",
"def greedy_max_cut(graph):\n cut = Cut(set(), set())\n for vertex in graph.nodes:\n l_neighbors = sum((adj in cut.left) for adj in graph.neighbors(vertex))\n r_neighbors = sum((adj in cut.right) for adj in graph.neighbors(vertex))\n if l_neighbors < r_neighbors:\n cut.left.add(vertex)\n else:\n cut.right.add(vertex)\n return cut",
"def peak_flow(self, folder_name, p_lat=0.0, p_lon=0.0, floodpeak=10):\n if p_lat == 0:\n p_lat = self.LAT\n if p_lon == 0:\n p_lon = self.LON\n\n grid_cell = self.coord_to_grid_cell(p_lat, p_lon)\n # grid_cell = 3674 # DEBUG *****\n\n # what kind of flood peak window are we using\n if floodpeak == 10:\n logbase = numpy.log(numpy.log(10 / 9)) # 10-year flood\n elif floodpeak == 100:\n logbase = numpy.log(numpy.log(100 / 99)) # 100-year flood\n else:\n return 0\n\n # for each year in the range\n year_peaks = [0] * 97\n for i in range(1916, 2011):\n # Downloading the file from dropbox\n self.DROPBOX.download_file(folder_name, \"outflw\" + str(i) + \".bin\", self.TMP_FOLDER)\n output_file = os.path.join(os.getcwd(), self.TMP_FOLDER, folder_name, \"outflw\" + str(i) + \".bin\")\n year_flow = self.map_input_to_flow(output_file, grid_cell, i, False)\n year_peaks[i - 1916] = max(year_flow)\n\n # calculate the gumbel distribution\n flow_mean = numpy.nanmean(year_peaks)\n flow_sdev = numpy.nanstd(year_peaks, ddof=1) # ddof=1 emulates matlab's bias-compensation default\n kt_gumbel = ((-6 ** 0.5) / 3.14) * (0.5772 + logbase)\n xt_gumbel = flow_mean + (kt_gumbel * flow_sdev)\n\n # find the year with the maximal difference / minimum flow\n curr_year = 1915\n min_year = 0\n min_val = float(\"inf\")\n for val in year_peaks:\n curr_year += 1\n this_flow = numpy.abs(xt_gumbel - val)\n if this_flow < min_val:\n min_year = curr_year\n min_val = this_flow\n return min_year",
"def find_flow(g, v_in, v_out, meas_plane=None, timeout=100):\n f, l_k = gflow(g, v_in, v_out, meas_plane, timeout)\n if f:\n print(\"gflow found\")\n print(\"g is \", f)\n print(\"l_k is \", l_k)\n else:\n print(\"no gflow found, finding flow\")\n f, l_k = flow(g, v_in, v_out, timeout=timeout)\n if f:\n print(\"flow found\")\n print(\"f is \", f)\n print(\"l_k is \", l_k)\n else:\n print(\"no flow found\")",
"def max_dist(graph, source):\r\n q = queue.Queue()\r\n found = {}\r\n distance = {}\r\n for vertex in graph: \r\n found[vertex] = 0\r\n distance[vertex] = -1\r\n max_distance = 0\r\n found[source] = 1\r\n distance[source] = 0\r\n q.put(source)\r\n while q.empty() == False:\r\n current = q.get()\r\n for neighbour in graph[current]:\r\n if found[neighbour] == 0:\r\n found[neighbour] = 1\r\n distance[neighbour] = distance[current] + 1\r\n max_distance = distance[neighbour]\r\n q.put(neighbour)\r\n return max_distance",
"def detour_without_lambda(Graph, src_node_ID, dst_node_ID):\n try:\n edge = Graph.GetEI(src_node_ID, dst_node_ID)\n\n except:\n print(\"edge {}->{} does not exist\".format(src_node_ID, dst_node_ID))\n return\n edge_id = edge.GetId()\n flow = Graph.GetFltAttrDatE(edge_id, \"Flow\")\n capacity = Graph.GetFltAttrDatE(edge_id, \"Capacity\")\n congestion = Graph.GetFltAttrDatE(edge_id, \"Congestion\")\n\n # delete the edge\n Graph.DelEdge(src_node_ID, dst_node_ID)\n # delete the edge from all subsets\n\n def return_edge():\n\n # and to main graph\n id = Graph.AddEdge(src_node_ID, dst_node_ID)\n Graph.AddFltAttrDatE(id, flow, \"Flow\")\n Graph.AddFltAttrDatE(id, capacity, \"Capacity\")\n Graph.AddFltAttrDatE(id, congestion, \"Congestion\")\n\n path_length, path = find_shortest_path(Graph, src_node_ID, [dst_node_ID], weight_type=\"Congestion\")\n if path_length != float(\"inf\"):\n return_edge()\n return path_length\n\n return_edge()\n return 0",
"def bfs_edges_generator(graph, source, reverse=...):\n ...",
"def dfs(x, p, step):\n disc[x] = low[x] = step\n for xx in graph.get(x, []): \n if disc[xx] == inf: \n step += 1\n dfs(xx, x, step)\n low[x] = min(low[x], low[xx])\n if low[xx] > disc[x]: ans.append([x, xx]) # bridge\n elif xx != p: low[x] = min(low[x], disc[xx])",
"def get_weight_from_minflow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"start\"],\n self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"destin\"],\n self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.source(),\n x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.sink(),\n x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n v,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n v,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n x,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n x,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i], capacities[i], unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n print(\"Has become ({}, {}) with sup {}\".format(start,\n destin,\n sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in \\\n self.arc_info[arc].keys()):\n print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n new_flow = old_flow + sup_flow\n self.arc_info[arc][\"weight\"] = int(new_flow)\n print(\"Edge ({},{}) weight is changed from {} to {}\".format(\n start,\n destin,\n old_flow,\n new_flow))\n else:\n print('There was an issue with the min cost flow input.')\n #self.check_conservation_of_flow() # check that solution is valid",
"def source_adj_gsdf(gmdata_sim,gmdata_obs,IsolationFilter,num_pts,dt): \n t = np.arange(num_pts)*dt\n ts=np.flip(-t[1:], axis=0)\n lTime = np.concatenate((ts,t), axis=0)#Lag time \n \n #convolve the waveforms for the cross- and auto-correlagrams \n cross = np.correlate(IsolationFilter,gmdata_obs,'full')\n auto = np.correlate(IsolationFilter,gmdata_sim,'full') \n \n #GSDF Parameters \n w0=2*np.pi/(lTime[-1]) \n# wN=2*np.pi/(2*dt)\n# w(:,1)=-wN:w0:wN\n wf=w0*np.linspace(-int(num_pts/2),int(num_pts/2),num_pts) \n fi = [0.05, 0.075, 0.1]\n# fi = [0.02, 0.03, 0.04, 0.05]\n# fi = [0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2]\n Tw = 2/np.mean(fi) # Effective window\n# sw = 2*np.pi*0.72/Tw; # Sigma w ~ 0.2827433388230814\n sw=0.1 \n \n# #% A local maximum will be selected closest to 0-lag\n# I_O=np.argmax(cross)\n# I_S=np.argmax(auto) \n\n I_O, peaks_O = find_peaks(np.abs(hilbert(cross))/np.max(np.abs(hilbert(cross))), height=0.25)\n I_S, peaks_S = find_peaks(np.abs(hilbert(auto))/np.max(np.abs(hilbert(auto))), height=0.25)\n\n PkO = peaks_O.get(\"peak_heights\", \"\")\n PkS = peaks_S.get(\"peak_heights\", \"\")\n\n if (I_O==[] or I_S==[]):\n I_O=np.argmax(cross)\n I_S=np.argmax(auto)\n else:\n I_O_min = np.argmin(np.multiply((1+np.abs(lTime[I_O]))**2,np.abs(1-PkO)))\n I_O = I_O[I_O_min]\n\n I_S_min = np.argmin(np.multiply((1+np.abs(lTime[I_S]))**2,np.abs(1-PkS)))\n I_S = I_S[I_S_min]\n \n ##Windowing\n win1=np.exp(-(0.5*sw**2)*(lTime-lTime[I_O])**2)\n win2=np.exp(-(0.5*sw**2)*(lTime-lTime[I_S])**2) \n \n #\n WO = np.multiply(win1,cross)\n WS = np.multiply(win2,auto)\n WS = WS*np.max(WO)/np.max(WS) #Normalized window by amplitude\n #% Parameters for \"bootstraping\"\n InOR=np.argmax(WO)\n InSR=np.argmax(WS) \n \n #% Isolation filter FFT for perturbation kernel\n tff=np.conj(fftshift(fft(IsolationFilter)))*1/num_pts \n \n adj_sim_decompose = np.zeros((len(fi),num_pts))\n adj_sim_sum = np.zeros(num_pts)\n TauP_arr = np.zeros(len(fi)) \n \n ne = int(np.min([2/np.min(fi)/dt,num_pts/2])) #% Effective bandwidth for inversion\n \n for i in range(0,len(fi)): \n si = 0.1*fi[i]\n #Crosscorrelagram and Autocorrelagram filtering\n dO=computebandfftfilter_gauss(WO,dt,fi[i],si,lTime);\n dS=computebandfftfilter_gauss(WS,dt,fi[i],si,lTime); \n \n # % Check bootstraping\n InO=np.argmax(np.real(dO))\n InS=np.argmax(np.real(dS)) \n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InO=int(InO)\n if (lTime[InO] < lTime[InOR]+0.51/fi[i]) and (lTime[InO] >= lTime[InOR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InO] >= (lTime[InOR]+0.45/fi[i])):\n InO=InO-np.round(1/fi[i]/dt)\n elif (lTime[InO] < lTime[InOR]-0.45/fi[i]):\n InO=InO+np.round(1/fi[i]/dt)\n Cn = Cn+1\n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InS=int(InS) \n if (lTime[InS] < lTime[InSR]+0.51/fi[i]) and (lTime[InS] >= lTime[InSR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InS] >= (lTime[InSR]+0.45/fi[i])):\n InS=InS-np.round(1/fi[i]/dt)\n elif (lTime[InS] < lTime[InSR]-0.45/fi[i]):\n InS=InS+np.round(1/fi[i]/dt)\n Cn = Cn+1 \n\n # Five parameter Gaussian wavelet fitting \n Ao = np.max(envelope(np.real(dO))); Io = np.argmax(envelope(np.real(dO)));\n As = np.max(envelope(np.real(dS))); Is = np.argmax(envelope(np.real(dS))); \n ##Constrain the initial values \n # Parameters for curve_fit\n wi=2*np.pi*fi[i] \n \n try:\n GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]))\n GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne])) \n except:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n\n# GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]),bounds=(0,[Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]))\n# GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne]),bounds=(0,[As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]])) \n \n# % Check fitting\n if ((GaO[0]/GaS[0]) > 10**5) or np.abs(GaO[4]-GaS[4]) > lTime[-1]/2:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n \n wP=((si**2)*wf+(sw**2)*wi)/(sw**2+si**2)\n wPP=((si**2)*wf-(sw**2)*wi)/(sw**2+si**2)\n siP=((si**2)*(sw**2)/(sw**2+si**2))**0.5 \n #Estimate waveform perturbation kernel (WPK)\n IW=(siP/(sw*GaS[0]))*np.multiply(np.exp(-0.5*(wf-2*np.pi*fi[i])**2/(sw**2+si**2)),np.divide(tff,wP))+\\\n (siP/(sw*GaS[0]))*np.exp(-0.5*(wf+2*np.pi*fi[i])**2/(sw**2+si**2))*tff/wPP\n \n IW[0:int(len(IW)/2)]=0*IW[0:int(len(IW)/2)]\n \n itff = ifft(fftshift(num_pts*IW)) \n \n #Save the GSDF measurements\n TauP_arr[i] = GaO[4]-GaS[4]; #% delta_P\n \n# Jp = np.real(itff)\n# Jp = np.imag(itff)\n Jp = -np.imag(itff) \n adj_sim_decompose[i,:] = np.flip(Jp,axis=0)*TauP_arr[i] \n \n #if i>0:\n adj_sim_sum = adj_sim_sum + adj_sim_decompose[i,:] \n \n return adj_sim_sum, TauP_arr",
"def MFAS_set_cover(s,graph):\n\n ## initialization\n m = graph.ecount()\n cycle_matrix = u.mk_cycle_matrix(u.find_all_cycles(graph), m)\n n, c = graph.get_adjacency().shape\n num_cycles = len(cycle_matrix)\n edge_list = graph.get_edgelist()\n sym_to_edge_cache = {}\n edge_to_sym_cache = {}\n sum_var = 'y'\n\n\n def symbolize(i,j):\n \"given two indices, create a symbolic variable\"\n new = z.Int('{0}->{1}'.format(i,j))\n return new\n\n\n def constraint_1(i,s_edge):\n \"\"\" Multiply the edge by its corresponding value in the cycle matrix\n \"\"\"\n edge = sym_to_edge_cache[s_edge]\n value = 0\n if edge in cycle_matrix[i]:\n value = cycle_matrix[i][edge]\n\n return (value * s_edge)\n\n\n ## symbolize the edges\n for source,sink in edge_list:\n s_edge = symbolize(source, sink)\n ## an edge is either a 0 or a 1\n s.add(z.Or([s_edge == 0, s_edge == 1]))\n\n sym_to_edge_cache[s_edge] = (source,sink)\n edge_to_sym_cache[(source,sink)] = s_edge\n\n\n ## Perform constraint 1 and add it to the solver instance\n for i in range(num_cycles):\n s.add(z.Sum([constraint_1(i,s_edge)\n for s_edge in sym_to_edge_cache.keys()]) >= 1)\n\n\n ## we want the smallest y possible\n s.minimize(z.Sum([s_edge for s_edge in sym_to_edge_cache.keys()]))\n\n s.check()\n return s.model()",
"def get_interval_from_minflow(self, wide=False):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"start\"],\n # self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"destin\"],\n # self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.source(),\n # x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.sink(),\n # x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(s_prime, v, int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(v, t_prime, int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # s_prime,\n # x,\n # int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # x,\n # t_prime,\n # int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i],\n capacities[i],\n unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n # print('Minimum cost:', min_cost_flow.OptimalCost())\n # print('')\n # print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n # cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n # print('%1s -> %1s %3s / %3s %3s' % (\n # min_cost_flow.Tail(i),\n # min_cost_flow.Head(i),\n # min_cost_flow.Flow(i),\n # min_cost_flow.Capacity(i),\n # cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n # print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n # print(\"Has become ({}, {}) with sup {}\".format(start,\n # destin,\n # sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in\n self.arc_info[arc].keys()):\n # print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n bound_1 = old_flow + sup_flow\n bound_2 = old_flow - sup_flow\n new_lb = max(0, int(min(bound_1, bound_2)))\n new_ub = int(max(bound_1, bound_2))\n if wide:\n if new_lb == new_ub:\n # print(\"We had a zero interval\")\n new_lb = int(new_lb*0.8)\n new_ub = int(new_ub*1.2)\n if new_lb == 0:\n # print(\"We got a zero lower bound\")\n new_ub = 5\n # print(\"But now we're doing {} {}\".\n # format(new_lb, new_ub))\n\n self.arc_info[arc][\"lower_bound\"] = new_lb\n self.arc_info[arc][\"upper_bound\"] = new_ub\n # print(\"Edge ({},{}) bounds are [{},{}]\".format(\n # start,\n # destin,\n # self.arc_info[arc][\"lower_bound\"],\n # self.arc_info[arc][\"upper_bound\"]))\n # print(self.arc_info[arc])\n else:\n print('There was an issue with the min cost flow input.')\n # self.check_conservation_of_flow() # check that solution is valid",
"def compute_optflow(fname1, fname2):\n # Read images\n im1 = skimage.io.imread(fname1)\n im2 = skimage.io.imread(fname2)\n\n # Compute optical flow\n im1 = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY)\n im2 = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)\n im1 = skimage.transform.resize(\n im1, [args.target_height, args.target_width], preserve_range=True,\n mode='constant', anti_aliasing=True)\n im2 = skimage.transform.resize(\n im2, [args.target_height, args.target_width], preserve_range=True,\n mode='constant', anti_aliasing=True)\n flow = cv2.calcOpticalFlowFarneback(\n im1, im2, flow=None, pyr_scale=0.5, levels=3, winsize=15,\n iterations=3, poly_n=5, poly_sigma=1.2, flags=0)\n\n # convert from (x, y) to (y, x) order\n # import ipdb; ipdb.set_trace()\n # import matplotlib.pyplot as plt\n # flow_yx = flow[:, :, ::-1]\n # flow_rgb = visualize_flow(flow_yx)\n return flow",
"def dfs_edges_generator(graph, source, reverse=...):\n ...",
"def get_bipartition(g):\n # Write your code here.\n colorArr = [-1] * (len(g.nodes()) + 1)\n for node in g.nodes():\n start = g.neighbors(node)\n if len(start)>0:\n src = start.pop()\n break\n colorArr[src] = 1\n queue = []\n queue.append(src)\n while (queue):\n u = queue.pop()\n for v in g.nodes():\n if g.has_edge(u, v) and colorArr[v] == -1:\n colorArr[v] = 1 - colorArr[u]\n queue.append(v)\n elif g.has_edge(u, v) and colorArr[u] == colorArr[v]:\n return None\n\n red = set()\n for i in range(1, len(colorArr)):\n if colorArr[i] == 1:\n red.add(i)\n return list(red)\n\n\n\n # Hint! If you'd like to test out these commands without\n # writing a full-fledged program, you might want to familiarise\n # yourself with the Python interactive shell or IPython (available\n # on at least some Aalto IT computers)\n\n # Create a simple line graph g: \"(1)->(2)->(3)\"\n # (The creation parameter is a dict of {node: list_of_neighbors},\n # but this is not something you will be needing in your code.)\n # >>> from networkx import Graph \n # >>> g = Graph({1: [2], 2: [3]})\n # >>> g.number_of_nodes()\n # 3\n\n # Example. Iterate over the nodes and mark them as visited\n # >>> visited = set()\n # >>> for node in g.nodes_iter(): # There is also g.nodes(), which returns a list\n # ... # do some work here\n # ... visited.add(node)\n \n # Example. Given a Node v, get all nodes s.t. there is an edge between\n # v and that node\n # >>> g.neighbors(1)\n # [2]\n\n # Example. Get the edges of the graph:\n # >>> e.edges() # as with nodes, there is also g.edges_iter()\n # [(1, 2), (2, 3)]\n\n # For more information, consult the NetworkX documentation:\n # https://networkx.github.io/documentation/networkx-1.10/tutorial/tutorial.html",
"def bfs(graph, source):\n visited = [False] * len(graph.graph)\n print(visited)\n\n result = \"\"\n queue = []\n\n queue.append(source)\n visited[source] = True\n\n while queue:\n source = queue.pop(0)\n result += str(source)\n\n while graph.graph[source] is not None:\n data = graph.graph[source].vertex\n if not visited[data]:\n queue.append(data)\n visited[data] = True\n graph.graph[source] = graph.graph[source].next\n return result",
"def find_max_f():\n fmax = fmin(g, 2)\n return fmax[0]",
"def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )",
"def bp_edgeloss(prob, act):\n sft = nn.Softmax2d()\n # predicted boundary\n predb = prob[-9]\n # predb = sft(predb)\n # predicted mask\n predm = prob[-1]\n # predm = sft(predm)\n\n act_hot=(torch.zeros(act.shape[0],3,*act.shape[1:]))#for one hot encoding, 3 channels and then reduce to 2 channels for loss comp\n act_hot = act_hot.to(act.device)\n act_m = act_hot.scatter(1, act.unsqueeze(dim=1), 1)\n\n act_enc = torch.cat(list(map(get_hot_enc, act))) # BX3XhXW shaped img\n act_origs = act_enc.shape#act original shape\n act_flat = torch.flatten(act_enc, 0,1)\n edge_img = torch.stack(list(map(get_edge_img, act_flat)))\n edge_img = edge_img.view(act_origs)\n # edge_img = torch.argmax(edge_img, dim=1)\n\n # edge_img = torch.stack(list(map(get_edge_img, act)))\n # edge_hot=(torch.zeros(edge_img.shape[0],edge_img.max()+1,*edge_img.shape[1:]))\n # edge_hot = edge_hot.to(edge_img.device)\n # act_b = edge_hot.scatter(1, edge_img.unsqueeze(dim=1), 1)\n\n # dl = dice_loss_sq#torch.nn.MSELoss()\n #negating bg channel\n # edge_img[:,0,...] = ((edge_img[:,0,...]+1)%2)\n edge_img = edge_img[:,:,...]\n lossb = dice_loss_sq(predb, edge_img, no_sft=False, is_all_chann=False)# + focal_loss_with_logits(predb, torch.argmax(edge_img, dim=1))\n #trying 2 channel ouput for mask, no meaning as we need softmax at final layer\n # lossm = dice_loss_sq(predm, act_m[:,1:,...], no_sft=True) #+ focal_loss_with_logits(predm, act)\n bce = nn.BCELoss(reduction='sum')\n # mse = nn.MSELoss()\n lossm = dice_loss_sq(predm, act_m[:,:,...])\n return lossb + lossm",
"def test_min_cut(default_plugin_resolver):\n dpr = default_plugin_resolver\n source_node = 0\n target_node = 7\n ebunch = [\n (0, 1, 9),\n (0, 3, 10),\n (1, 4, 3),\n (2, 7, 6),\n (3, 1, 2),\n (3, 4, 8),\n (4, 5, 1),\n (4, 2, 4),\n (5, 2, 5),\n (5, 6, 1),\n (6, 2, 11),\n ]\n nx_graph = nx.DiGraph()\n nx_graph.add_weighted_edges_from(ebunch)\n graph = dpr.wrappers.Graph.NetworkXGraph(nx_graph, edge_weight_label=\"weight\")\n\n expected_flow_value = 5\n cut_edges = nx.DiGraph()\n cut_edges.add_nodes_from(nx_graph.nodes)\n cut_edges.add_weighted_edges_from([(4, 5, 1), (4, 2, 4)])\n expected_cut_edges = dpr.wrappers.Graph.NetworkXGraph(cut_edges)\n\n mv = MultiVerify(dpr)\n results = mv.compute(\"flow.min_cut\", graph, source_node, target_node)\n\n # Compare flow rate\n results[0].assert_equal(expected_flow_value)\n\n # Compare cut graph\n results[1].assert_equal(expected_cut_edges)",
"def FlowRestriction(T_in, p_in, m_dot_out, d_inner, f):\r\n\r\n # Cross section\r\n A_cross = (np.pi/4)*(d_inner**2)\r\n\r\n # Assumption isenthalpic flow!\r\n h_in = hp.HeCalc(9, 0, 1, p_in, 2, T_in, 1) #J/kg\r\n\r\n # Iteration for the calculation of p_out even though the influence is probably negligible\r\n # I checked it and for 20 bar it really is negligible\r\n dp = 0.0\r\n p_out = 0.0\r\n for i in range(5):\r\n p_out = p_in - dp\r\n T_out = hp.HeCalc(2, 0, 1, p_out, 9, h_in, 1)\r\n Rho_out = hp.HeCalc(3, 0, 1, p_out, 2, T_out, 1) #kg/m³\r\n # Velocity of the outgoing flow\r\n u_out = m_dot_out/(A_cross*Rho_out) #m/s\r\n\r\n # Calculation of the dp with Bernoulli equation and resistance coefficient (see VDI Heatatlas 2013)\r\n dp = f * Rho_out * 0.5 * u_out**2\r\n\r\n\r\n h_out = hp.HeCalc(9, 0, 1, p_out, 2, T_out, 1)\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n return state_out",
"def _flow(\n self, a: float = 1.4 * 10e-16, f: float = 1, p: float = 918, g: float = 9.81\n ) -> None:\n # Aspect and slope ----------------------------------------------------\n # Calculate slope\n x_slp, y_slp = np.gradient(self.ele, 22, 22)\n self.slp = np.arctan(np.sqrt(x_slp * x_slp + y_slp * y_slp))\n\n # Ice flow ------------------------------------------------------------\n # u = ud + ub + us\n # = ice deformation/creep + basal slide + soft bed deformation\n\n # Calculate ice deformation velocity 'ud' at glacier surface\n ud = (2 * a * ((f * p * g * np.sin(self.slp)) ** 3.0) * self.h**4.0) / 4\n\n # Assume linear decrease of 'ud' towards zero at the glacier bed use\n # velocity at medium height. Set u = ud, 'ub' and 'us' are ignored.\n ud = ud * 0.5\n\n # Limit maximum flow velocity to maxium fracd8 offset\n u_max = self.res * (self.MODEL_FRACD8_OFFSET + 1)\n ud[ud >= u_max] = u_max\n self.u = ud\n\n # Use limited or infnite 'fracd8' algorithm to simulate flow\n h_new, self.asp, self.fracd8_mode = fracd8(\n self.ele, self.u, self.h, self.res, self.MODEL_FRACD8_OFFSET\n )\n\n # Calculate new glacier height 'h_new' after flow ---------------------\n self.h = h_new\n h_new_index = np.copy((self.h < self.m))\n self.h = uniform_filter(self.h, size=5)\n self.h[h_new_index] = 0"
]
| [
"0.7220995",
"0.6513646",
"0.63755757",
"0.63598967",
"0.6199371",
"0.6149783",
"0.59793204",
"0.5815335",
"0.57068366",
"0.56810206",
"0.5614138",
"0.5564206",
"0.5473571",
"0.54486114",
"0.5408489",
"0.54058605",
"0.5382023",
"0.53799087",
"0.53791517",
"0.53403616",
"0.5325138",
"0.5318395",
"0.53177154",
"0.53165245",
"0.5314226",
"0.530885",
"0.5300808",
"0.52903825",
"0.5284572",
"0.52301776"
]
| 0.6811761 | 1 |
Lists all modules enabled for reading | def modules_enabled(self, c):
modules = []
for name, module in self.modules.iteritems():
modules.append( (name, module.__class__.__name__) )
return modules | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enabled_modules(self):\n return [scomp for scomp in self.modules()\n if getattr(scomp, 'enabled', True)]",
"def showModules():\n keys, values = sys.modules.keys(), sys.modules.values()\n keys.sort()\n modulesList = ''\n for key in keys:\n modulesList += key+' '\n\n panel = nuke.Panel('python modules list')\n panel.addEnumerationPulldown('available modules', modulesList)\n val = panel.show()\n if val == 1:\n moduleToLoad = panel.value('available modules')\n panelA = nuke.Panel('module selected')\n panelA.addNotepad('module:', str(sys.modules[moduleToLoad]))\n panelA.addBooleanCheckBox('load/reload the module', 0)\n val = panelA.show()\n if val == 1:\n if panelA.value('load/reload the module') == 1:\t\t\t\t\n print 'loading module '+moduleToLoad\n exec('import '+moduleToLoad)\n exec('reload('+moduleToLoad+')')",
"def get_enabled_modules(self):\n return self._gconf.get_enabled_modules()",
"def modules(self):\n return self._modules.keys()",
"def action_list(args):\n\n module_root = Path(\"modules/\")\n modules = load_modules(module_root)\n\n print(\"Available modules:\")\n for module in modules:\n print(f\"- {module}\")",
"def modules():",
"def _list_modules():\r\n return [\r\n desc.module_class\r\n for desc\r\n in _list_descriptors()\r\n ]",
"def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1",
"def modules_available(self, c):\n\n modules = []\n module = YModule.FirstModule()\n while module is not None:\n modules.append( (module.get_productName(), module.get_serialNumber()) )\n module = module.nextModule()\n\n return modules",
"def list_modules():\n for module_name in listdir(modules_directory):\n if isdir(join(modules_directory, module_name)):\n log.debug('Load module: {0}'.format(module_name))\n yield module_name",
"def MODULES(self):\n pass",
"def do_list_modules(self, arg):\n for module in self.reader.module_list.modules:\n if arg:\n name = GetModuleName(self.reader, module).lower()\n if name.find(arg.lower()) >= 0:\n PrintModuleDetails(self.reader, module)\n else:\n PrintModuleDetails(self.reader, module)\n print()",
"def modules(self):\n return self._modules",
"def listModules(self):\n modules = [(module.name,\n module.queue,\n module.Active) for module in self.db.getModules()]\n return modules",
"def modules(self):\n return self.rpc.call(MsfRpcMethod.SessionCompatibleModules, [self.sid])['modules']",
"def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()",
"def get_modules(self):\n return self._module_loader.filelist",
"def plugin_list(self):\r\n return get_module_list()",
"def list_modules(self) -> Optional[List[str]]:\n module_list: List[str] = []\n for forge_module in self._forge_modules:\n module_list.append(forge_module.name)\n for git_module in self._git_modules:\n module_list.append(git_module.name)\n return module_list",
"def on_modules_command(sender, command, label, args):\n plugin_header(sender, \"Modules\")\n msg(sender, \", \".join([((\"&a\" if mod in shared[\"modules\"] else \"&c\") + mod) for mod in shared[\"load_modules\"]]))",
"def get_module_info_list(self):\n self._get_module_info_list = pa_module_info_cb_t(self._module_info_cb)\n pa_context_get_module_info_list(self._context,\n self._get_module_info_list,\n None)",
"def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()",
"def available_modules(self, user):\n return [sitecomp for sitecomp in self.enabled_modules() if sitecomp.has_perm(user)]",
"def modules_registered(self) -> list[Module]:\n return [cmds[0].module for cmds in self._registry[\"by_module\"].values()]",
"def get_module_list(self, c):\r\n if self.device_detected == True:\r\n resp = yield subprocess.check_output(\"cacli modlist\")\r\n else:\r\n resp = \"Device not connected.\"\r\n print \"Device not connected. \"\r\n #Eventually make this actually throw an error instead of printing something\r\n returnValue(resp)",
"def modules(self):\n return sorted([module for module in self._registry.values()],\n key=lambda scomp: (scomp.order, scomp.label))",
"def getAllModules(self):\n\n modules = cmds.ls(type=\"network\")\n returnMods = []\n for module in modules:\n attrs = cmds.listAttr(module)\n if \"parent\" in attrs:\n returnMods.append(module)\n\n return returnMods",
"def list_modules(self, capability=None, name=None):\n # Update modules and get the latest up to date list\n installed_modules = self.install_modules(\n capability=capability,\n name=name\n )\n # For each module, get back its config options and website base url\n for module_name in installed_modules:\n module = self.weboob.modules_loader.get_or_load_module(module_name)\n installed_modules[module_name][\"config\"] = (\n weboob_tools.dictify_config_desc(module.config)\n )\n installed_modules[module_name][\"website\"] = module.website\n return {\n 'modules': [\n dict(module, name=name)\n for name, module in installed_modules.items()\n ]\n }",
"def _list_modules(self, componentdef):\n module_names = [\n ('default', 'default'),\n ('component', componentdef.name),\n ('role', componentdef.role if componentdef.role != componentdef.name else None),\n ('environment', componentdef.environment),\n ('host', componentdef.host)\n ]\n\n return [name for key, name in module_names if key in self.data_modules]",
"def get_available_features(self) -> list[str]:\n modules = []\n for mdir in [ZeroBot.__path__[0]] + self.config[\"Core\"][\"ModuleDirs\"]:\n mdir = Path(mdir)\n modules += [child.stem for child in mdir.glob(\"feature/*.py\")]\n return modules"
]
| [
"0.73525804",
"0.7195542",
"0.70797795",
"0.69543386",
"0.69462514",
"0.6944474",
"0.68327576",
"0.67939305",
"0.67652476",
"0.67583793",
"0.6754832",
"0.6685438",
"0.6677347",
"0.6613316",
"0.660625",
"0.6601403",
"0.6578693",
"0.65483433",
"0.6507556",
"0.65029275",
"0.6494191",
"0.6419007",
"0.64167976",
"0.6409186",
"0.6364252",
"0.6332117",
"0.6326966",
"0.6312022",
"0.6278943",
"0.6250844"
]
| 0.7321039 | 1 |
Get the core plugin should be implemented by each driver | def core_plugin(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_plugin_interface(self):",
"def get_plugin_classes():\n\n # Force the selection of a toolkit:\n from enthought.traits.ui.api import toolkit\n toolkit()\n from enthought.etsconfig.api import ETSConfig\n try_use_ipython = preference_manager.root.use_ipython\n use_ipython = False\n if ETSConfig.toolkit == 'wx' and try_use_ipython:\n try:\n # If the right versions of IPython, EnvisagePlugins and\n # Pyface are not installed, this import will fail.\n from enthought.plugins.ipython_shell.view.ipython_shell_view \\\n import IPythonShellView\n use_ipython = True\n except: pass\n\n if use_ipython:\n from enthought.plugins.ipython_shell.ipython_shell_plugin import \\\n IPythonShellPlugin\n PythonShellPlugin = IPythonShellPlugin\n else:\n from enthought.plugins.python_shell.python_shell_plugin import PythonShellPlugin\n from enthought.plugins.text_editor.text_editor_plugin import TextEditorPlugin\n from enthought.logger.plugin.logger_plugin import LoggerPlugin\n from enthought.tvtk.plugins.scene.ui.scene_ui_plugin import SceneUIPlugin\n from enthought.mayavi.plugins.mayavi_ui_plugin import MayaviUIPlugin\n plugins = get_non_gui_plugin_classes()\n plugins.extend([\n LoggerPlugin,\n MayaviUIPlugin,\n SceneUIPlugin,\n PythonShellPlugin,\n TextEditorPlugin,\n ])\n return plugins",
"def driver(self):\r\n ext = self.extensions[0]\r\n return ext.obj if ext.obj else ext.plugin",
"def get_plugins(self):\n return []",
"def check_plugin(vcf_reader, plugin):\n # Always use core plug-in\n plugins = ['core']\n # Collect supplied plugin(s)\n [plugins.append(item) for item in plugin]\n # Create set\n plugins = list(set(plugins))\n # Evaluate vcf and plugin compatibility\n for plugin in plugins:\n\n if plugin == \"core\":\n from pScout.plugin.plugin_reader import core\n ret = core(vcf_reader, \"pScout/plugin/get_core.ini\")\n\n if ret is 1: # Not compatible\n exit()\n\n return plugins",
"def plugins():\n pass",
"def get_plugin(self, name):",
"def request_plugins(self):",
"def plugin_configuration(self):\n return self.__plugin_specific_facade",
"def test_discoverable(self):\r\n plugins = getPlugins(IProcessor)\r\n lmath = [p for p in plugins if p.name == \"mlore\"]\r\n self.assertEqual(len(lmath), 1, \"Did not find math lore plugin: %r\" % (lmath,))",
"def plugin_list(self):\r\n return get_module_list()",
"def get_extension_funcs():\n raise NotImplementedError()",
"def get_non_gui_plugin_classes():\n from enthought.envisage.core_plugin import CorePlugin\n from enthought.envisage.ui.workbench.workbench_plugin import WorkbenchPlugin\n from enthought.tvtk.plugins.scene.scene_plugin import ScenePlugin\n from enthought.mayavi.plugins.mayavi_plugin import MayaviPlugin\n plugins = [CorePlugin,\n WorkbenchPlugin,\n MayaviPlugin,\n ScenePlugin,\n ]\n return plugins",
"def get_enabled_plugins(self):\n return self._enabled_plugins",
"def plugin_get_dependency():\n return []",
"def load_plugin():\n return HostTestPluginCopyMethod_Shell()",
"def get_loaded_extensions():\n raise NotImplementedError()",
"def find_plugins():\n return list(straight.plugin.load('csbot.plugins', subclasses=Plugin))",
"def get_plugins(self) -> dict:\n return Config.get_plugins()",
"def get_loaded_plugins(self):\n return self._loaded_plugins",
"def available_plugins():\n return PluginConnector.available_plugins()",
"def getPlugin(self, *args):\n return _libsbml.SBase_getPlugin(self, *args)",
"def get_available_plugins() -> Dict[str, BasePlugin]:\n if not INITIALIZED:\n _load_and_register_plugins()\n\n return REGISTERED_PLUGINS",
"def get_plugins():\n return [cls() for cls in get_plugin_classes()]",
"def init_extension_plugin(dataio, jenkins_master):\n pluginxml = PluginXML(ElementTree.fromstring(dataio.config_xml))\n all_plugins = get_plugins()\n for plugin in all_plugins:\n if plugin.type == pluginxml.get_class_name():\n return plugin(dataio, jenkins_master)\n return None",
"def test_plugins(self):\n from omtk import plugin_manager\n pm = plugin_manager.plugin_manager\n\n loaded_plugin_names = [plugin.cls.__name__ for plugin in pm.get_loaded_plugins_by_type('modules')]\n\n builtin_plugin_names = (\n 'Arm',\n 'FK',\n 'AdditiveFK',\n 'AvarGrpOnSurface',\n 'FaceBrow',\n 'FaceEyeLids',\n 'FaceEyes',\n 'FaceJaw',\n 'FaceLips',\n 'FaceNose',\n 'FaceSquint',\n 'Hand',\n 'Head',\n 'IK',\n 'InteractiveFK',\n 'Leg',\n 'LegQuad',\n 'Limb',\n 'Neck',\n 'Ribbon',\n 'SplineIK',\n 'Twistbone',\n )\n\n for plugin_name in builtin_plugin_names:\n self.assertIn(plugin_name, loaded_plugin_names)",
"def plugin_enabled_by_default(self):\n return self.__plugin_enabled_by_default",
"def plugin_info():\n\n return {\n 'name': 'Enviro pHAT Poll Plugin',\n 'version': '1.7.0',\n 'mode': 'poll',\n 'type': 'south',\n 'interface': '1.0',\n 'config': _DEFAULT_CONFIG\n }",
"def plugin_info():\n\n return {\n 'name': 'MAX31865 Async plugin',\n 'version': '1.0',\n 'mode': 'async',\n 'type': 'south',\n 'interface': '1.0',\n 'config': _DEFAULT_CONFIG\n }",
"def getCore(cls):\n if RegistryCore in SingletonMetaClass._instances:\n return SingletonMetaClass._instances[RegistryCore]\n else:\n dummyCore = RegistryCore() # Which will persist because it is a singleton\n return dummyCore"
]
| [
"0.6962008",
"0.6383961",
"0.6339505",
"0.62795734",
"0.6274229",
"0.61346227",
"0.61232555",
"0.6111184",
"0.60962766",
"0.60188365",
"0.59543604",
"0.5934437",
"0.58710885",
"0.585612",
"0.5822372",
"0.5810339",
"0.5796572",
"0.57886827",
"0.57826865",
"0.5768762",
"0.5759619",
"0.57517964",
"0.56809366",
"0.5666974",
"0.56667393",
"0.5599887",
"0.5582961",
"0.5549231",
"0.5516198",
"0.5506659"
]
| 0.73781383 | 0 |
Validate NSX backend supports FWaaS Can be implemented by each driver | def validate_backend_version(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_supported_features(self):",
"def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True",
"def check_eapi(self, eapi):\n\t\treturn True",
"def supportedSoftwares():\n return [\"any\"]",
"def supportedSoftwares():\n return [\"any\"]",
"def check_stability(self):",
"def _check_family(self):\n return",
"def test_create_hyperflex_auto_support_policy(self):\n pass",
"async def _checknsfw(self, ctx):\n if ctx.channel.nsfw:\n await ctx.send(\"Channel is set to NSFW.\")\n else:\n await ctx.send(\"Channel is not set to NSFW.\")",
"def _platform_compatible():\r\n raise NotImplementedError",
"def test_supported_protocol(self):\n assert self.handler.SUPPORTED_PROTOCOL is None",
"def test_backend_specs_forest_noisy(self):\n dev = qml.device('orquestra.forest', backend=\"3q-noisy-qvm\", shots=10000, wires=3)\n assert dev.backend_specs == forest_noisy_specs",
"def _is_allowed(self, i):\n x = re.search(r\"src\\/backend\\/(.*)\\/\", self.filename)\n if not x:\n return True\n\n service = x.group(1).split(\"/\")[0]\n\n frm, imp, _ = i\n if frm == [\"backend\"]:\n return False\n if frm and frm[0] == \"backend\" and frm[1] not in {service, \"common\"}:\n return False\n return True",
"def is_available():",
"def test_update_hyperflex_auto_support_policy(self):\n pass",
"def test_create_hyperflex_software_version_policy(self):\n pass",
"def test_backend_specs_analytic(self):\n dev = qml.device(\n \"orquestra.qiskit\", backend=\"statevector_simulator\", wires=1, analytic=True\n )\n assert dev.backend_specs == qiskit_analytic_specs",
"def is_supported():\n return not isinstance(_the_app, StubApp)",
"def check_requirement(self):\n raise NotImplementedError",
"def test_validate_media_player_features():\n config = {}\n attrs = {ATTR_SUPPORTED_FEATURES: 20873}\n entity_state = State(\"media_player.demo\", \"on\", attrs)\n assert validate_media_player_features(entity_state, config) is True\n\n config = {FEATURE_ON_OFF: None}\n assert validate_media_player_features(entity_state, config) is True\n\n entity_state = State(\"media_player.demo\", \"on\")\n assert validate_media_player_features(entity_state, config) is False",
"def backend_info(self):\n\t\treturn {'valid': False}",
"def uses_unsupported_feature_or_framework(notebook, skip_args):\n functionalities_to_check = {\n \"docker\": [\"docker\\s+\", \"docker-compose\\s+\"],\n \"local_mode\": ['instance_type\\s*=\\s*\"local\"'],\n \"fsx_efs\": [\"\\s+(efs|EFS)\\s+\", \"^(EFS|efs)\\s+\"]\n }\n\n for identifier in functionalities_to_check:\n if skip_args.get(identifier, True) and contains_code(notebook, functionalities_to_check.get(identifier)):\n return True\n\n return False",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()"
]
| [
"0.66586536",
"0.6007394",
"0.5991885",
"0.5941555",
"0.5941555",
"0.5880553",
"0.5739063",
"0.5629603",
"0.56251514",
"0.56069356",
"0.5590402",
"0.552187",
"0.55049",
"0.54795337",
"0.5479011",
"0.5474474",
"0.54678535",
"0.54673284",
"0.54668057",
"0.5445871",
"0.5434607",
"0.54330117",
"0.54276884",
"0.54276884",
"0.54276884",
"0.54276884",
"0.54276884",
"0.54276884",
"0.54276884",
"0.54276884"
]
| 0.6062802 | 1 |
Return True if the firewall rules should be added the router | def should_apply_firewall_to_router(self, router_data):
if not router_data.get('external_gateway_info'):
LOG.info("Cannot apply firewall to router %s with no gateway",
router_data['id'])
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_router(self):\n # @todo: Rewrite\n return self.address_set.count() > 1",
"def should_apply_firewall_to_router(self, context, router_id):\n if not super(Nsxv3FwaasCallbacksV1,\n self).should_apply_firewall_to_router(context,\n router_id):\n return False\n\n # get all the relevant router info\n ctx_elevated = context.elevated()\n router_data = self.core_plugin.get_router(ctx_elevated, router_id)\n if not router_data:\n LOG.error(\"Couldn't read router %s data\", router_id)\n return False\n\n # Check if the FWaaS driver supports this router\n if not self.fwaas_driver.should_apply_firewall_to_router(router_data):\n return False\n\n return True",
"def _add_ingress_rules():\n if dry:\n print(\"Would add security group ingress rules.\")\n return True\n else:\n print(\"Adding security group ingress rules.\")\n rules = INGRESS + [{\n 'IpProtocol': '-1',\n 'FromPort': 0,\n 'ToPort': 0,\n 'UserIdGroupPairs': [{'GroupId': _existing.sg.id}]\n }]\n\n for r in rules:\n success = True\n try:\n _existing.sg.authorize_ingress(IpPermissions=[r])\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':\n success = False\n print('Could add rule {r} to sg {s}. Reason just below.'.format({\n 'r': str(r),\n 's': _existing.sg.id\n }))\n traceback.print_exc()\n except Exception as e:\n success = False\n print('Could add rule {r} to sg {s}. Reason just below.'.format({\n 'r': str(r),\n 's': _existing.sg.id\n }))\n traceback.print_exc()\n return success",
"def has_rule(self):\n # Someday I'll have a real implementation, but for now I just:\n return False",
"def has_firewall_component(server):\r\n if server['status'] != 'no_edit':\r\n return True\r\n\r\n return False",
"def add_new(self, name):\n if name not in self.routers:\n self.routers[name] = Router(name)\n return True\n return False",
"def is_rule(self):\n return self._fields is not None",
"def available(self) -> bool:\n return self._router.available",
"def is_supported(self):\n return setting_utils.get_dict_config(\n 'OPENSTACK_NEUTRON_NETWORK', 'enable_router')",
"def should_validate(self):\n \n return self.request.method in self.validate_methods",
"def r_is_ok(self, router):\r\n for e in self.exclude:\r\n if e == router.version:\r\n return False\r\n return True",
"def try_add(self, flight: Flight) -> bool:\n last_path_index = len(self.path) - 1\n if not self.path[last_path_index].connects_to(flight):\n return False\n\n for i in range(0, last_path_index + 1):\n existing_destination = self.path[i].destination\n existing_source = self.path[i].source\n if existing_destination == flight.destination and existing_source == flight.source:\n return False\n self.path.append(flight)\n return True",
"def check_windows_firewall():\n if \"ON\" in str(subprocess.check_output('netsh advfirewall '\n 'show all state')):\n return True\n else:\n return False",
"def has_add_permissions(self):\n queryset = self.model.objects.all()\n if hasattr(queryset, 'has_add_permissions'):\n return queryset.has_add_permissions( PyFormsMiddleware.user() )\n else:\n return True",
"def requires_route(self) -> bool:\n return self.goal.is_specific()",
"def should_register(self, app: FlaskUnchained) -> bool:\n if self.only_if in {None, _missing}:\n return True\n elif callable(self.only_if):\n return self.only_if(app)\n return bool(self.only_if)",
"def use_routes(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_routes\")",
"def has_add_permission(self, request: HttpRequest) -> bool:\n return False",
"def is_route_throu(self):\n\n # VPR stores route-through LUTs as \"open\" blocks with mode set to\n # \"wire\".\n return self.is_leaf and self.name == \"open\" and self.mode == \"wire\"",
"def can_add_deliveries(self):\n return self.is_open and self.parentnode.parentnode.is_active()",
"def use_proxy(self, request):\n if self.adsl:\n return True\n if \"depth\" in request.meta and int(request.meta['depth']) <= 2:\n return False\n i = random.randint(1, 10)\n return i <= 2",
"def rule_60_all_server_routable(session):\n # Depends on: rule_40_extend_subnet_cidr\n\n conf_server = session[\"config\"][\"server\"]\n subnets = [IpRange(sn) for sn in conf_server[\"ipsec\"][\"subnets\"]]\n\n for server in conf_server[\"res\"][\"servers_allowed\"]:\n for subnet in subnets:\n if server['ip'] in subnet:\n break\n else:\n raise Exception(\"Unreachable server %s detected\" % server['ip'])\n return True",
"def validate_route(self, route):\n\n for router in ROUTER:\n if router.value == route:\n return True\n return False",
"def has_add_permission(self, request, instance=None):\n return False",
"def is_rule(self, name):\n return name in self._rules",
"def allow_forwarded_traffic(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_forwarded_traffic\")",
"def allow_forwarded_traffic(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_forwarded_traffic\")",
"def use_remote_gateways(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")",
"def use_remote_gateways(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")",
"def has_add_permission(self, request, obj=None):\n return False"
]
| [
"0.6369625",
"0.63313717",
"0.60524714",
"0.6008175",
"0.5844205",
"0.5764158",
"0.55678964",
"0.55205417",
"0.5501063",
"0.54529506",
"0.5423895",
"0.5395422",
"0.53850245",
"0.53648263",
"0.53387725",
"0.5338227",
"0.53378403",
"0.5306474",
"0.5290564",
"0.52862334",
"0.52829444",
"0.52825606",
"0.5276505",
"0.5274376",
"0.5268658",
"0.52569914",
"0.52569914",
"0.52538455",
"0.52538455",
"0.5247918"
]
| 0.69393283 | 0 |
Compare two images, expecting a particular RMS error. im1 and im2 are filenames relative to the baseline_dir directory. tol is the tolerance to pass to compare_images. expect_rms is the expected RMS value, or None. If None, the test will succeed if compare_images succeeds. Otherwise, the test will succeed if compare_images fails and returns an RMS error almost equal to this value. | def image_comparison_expect_rms(im1, im2, tol, expect_rms):
im1 = os.path.join(baseline_dir, im1)
im2_src = os.path.join(baseline_dir, im2)
im2 = os.path.join(result_dir, im2)
# Move im2 from baseline_dir to result_dir. This will ensure that
# compare_images writes the diff file to result_dir, instead of trying to
# write to the (possibly read-only) baseline_dir.
shutil.copyfile(im2_src, im2)
results = compare_images(im1, im2, tol=tol, in_decorator=True)
if expect_rms is None:
assert_equal(None, results)
else:
assert_not_equal(None, results)
assert_almost_equal(expect_rms, results['rms'], places=4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compare_images(self, ax, filename, tol=10):\n assert isinstance(ax, Artist)\n if GENERATE_BASELINE:\n savefig(os.path.join(BASELINE_DIR, filename))\n savefig(os.path.join(self.tempdir, filename))\n err = compare_images(os.path.join(BASELINE_DIR, filename),\n os.path.join(self.tempdir, filename),\n tol, in_decorator=True)\n if err:\n raise ImageComparisonFailure('images not close: %(actual)s '\n 'vs. %(expected)s '\n '(RMS %(rms).3f)' % err)",
"def compare_img(img1, img2, err_function=\"ALL\"):\n\n # make sure images are the same shape #\n height1, width1, height2, width2 = img1.shape[0], img1.shape[1], img2.shape[0], img2.shape[1]\n if img1.shape != img2.shape:\n if width1 * height1 > width2 * height2:\n img1 = resize_image(img1, width2, height2)\n else:\n img2 = resize_image(img2, width1, height1)\n # TODO: create better resize to avoid interpolation when possible\n # compare images#\n func_arr = [mse, ssim, L1_norm]\n err_arr = []\n for func in func_arr:\n if err_function == \"ALL\" or func.__name__.upper() == err_function:\n err_arr.append(func(img1, img2))\n return np.array(err_arr)",
"def compare_images(self, img1, img2):\n if self.debug:\n cv2.imshow('img1', img1)\n cv2.imshow('img2', img2)\n cv2.waitKey(5)\n time.sleep(2)\n\n # find the mean squared difference between the images\n # http://www.pyimagesearch.com/2014/09/15/python-compare-two-images/\n err = np.sum((img1.astype('float') - img2.astype('float')) ** 2)\n err /= float(img1.shape[0] * img2.shape[1])\n\n # lower is more similar (better)\n return err",
"def compare_images(im1, im2):\n errors = (im1 - im2) / 255\n return np.mean(np.square(errors))",
"def compare_images(original_img, transformed_img):\r\n original_img = np.array(original_img, np.float32)\r\n transformed_img = np.array(transformed_img, np.float32)\r\n\r\n mse = metrics.mean_squared_error(original_img, transformed_img)\r\n nrmse = metrics.normalized_root_mse(original_img, transformed_img)\r\n ssim = metrics.structural_similarity(original_img, transformed_img)\r\n psnr = metrics.peak_signal_noise_ratio(original_img, transformed_img, data_range=255)\r\n\r\n return {\"MSE\": mse, \"NRMSE\": nrmse, \"PSNR\": psnr, \"SSIM\": ssim}",
"def compare_images(img1, img2):\n #normalize scene pixel values\n img1_mean = img1.mean() \n img1_std = img1.std()\n for i in np.nditer(img1, op_flags=['readwrite']):\n i[...] = (i-img1_mean)/img1_std\n\n #normalize template pixel values\n img2_mean = img2.mean() \n img2_std = img2.std()\n for i in np.nditer(img2, op_flags=['readwrite']):\n i[...] = (i-img2_mean)/img2_std\n\n #sums error\n error_array = img1 - img2\n error_array = error_array.astype(np.int8)\n ss_error = 0\n for i in np.nditer(error_array):\n ss_error += abs(i/255.0)**0.5\n #print ss_error\n return ss_error",
"def get_comparison_error(self, img1, img2, diffImg):\n\n output = subprocess.check_output(\n [\"compare\", \"-metric\", \"RMSE\", \"-alpha\", \"Off\", img1, img2, diffImg],\n stderr=subprocess.STDOUT,\n )\n rmse = float(output.split()[0])\n percent = float(output.split()[1][1:-1])\n return rmse, percent",
"def img_compare(file1, file2):\n # read image\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n\n # resize \n size = 128, 128\n img1_res = img_resize(img1, size)\n img2_res = img_resize(img2, size)\n\n img1_res.save(\"img_1.thumbnail\", \"JPEG\")\n img2_res.save(\"img_2.thumbnail\", \"JPEG\")\n\n # convert to gray scale\n img1_grayscale = img1_res.convert('LA')\n img1_grayscale.save(\"img_1_grayscale.png\")\n\n img2_grayscale = img2_res.convert('LA')\n img2_grayscale.save(\"img_2_grayscale.png\")\n\n # normalise\n img1_norm = normalize(np.array(img1_grayscale.getdata()).astype(float))\n img2_norm = normalize(np.array(img2_grayscale.getdata()).astype(float))\n\n try:\n # compare two images\n diff = img1_norm - img2_norm\n m_norm = sum(abs(diff)) # Manhattan norm\n z_norm = norm(diff.ravel(), 0) # Zero norm\n\n # print(\"Manhattan norm:\", m_norm, \"/ per pixel:\", m_norm/img1_norm.size)\n # print(\"Zero norm:\", z_norm, \"/ per pixel:\", z_norm*1.0/img1_norm.size)\n\n return m_norm/img1_norm.size, float(z_norm) / img1_norm.size\n except:\n return 100, 100",
"def assert_img_equal(img1, img2, thresh=0.001, resize=True):\n\n def standardize_args(img):\n \"\"\" Transform some img representation into a numpy array \"\"\"\n if isinstance(img, np.ndarray):\n pass\n elif isinstance(img, Image.Image):\n img = np.array(img)\n else:\n # Assume its something path/str-like\n img = cv2.imread(str(img))\n img[..., :3] = img[..., :3][..., ::-1]\n img = img.astype(np.float32)\n if img.ndim == 2:\n img = img[..., None]\n return img\n\n img1 = standardize_args(img1)\n img2 = standardize_args(img2)\n\n if resize and img1.shape != img2.shape:\n img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]))\n\n avg_diff = np.linalg.norm(img1 - img2, axis=-1).mean()\n\n assert avg_diff < thresh",
"def assert_image(visual, img, img_name, expected_image_filename, expected_result='equal', threshold=0):\n # Save result image in output folder\n result_file = os.path.join(visual.output_directory, f'{img_name}.png')\n img.save(result_file)\n\n # Output image and expected image must be equal\n expected_image = os.path.join(root_path, 'resources', f'{expected_image_filename}.png')\n compare_image_files(visual, previous_method_name(), result_file, expected_image, expected_result, threshold)",
"def assert_widget_image(tmpdir, widget, filename, fail_now=True):\n\n # If requested, save the \"actual\" images in another directory that will be\n # preserved beyond the test run.\n\n if IMAGE_OUTPUT_DIR:\n actual = os.path.join(IMAGE_OUTPUT_DIR, filename)\n else:\n actual = tmpdir.join(filename).strpath\n\n widget.render(actual)\n\n # Compare to the references\n\n refdir = os.path.join(DATA, 'refimg_' + os.path.splitext(filename)[0])\n results = []\n\n for refbase in sorted(os.listdir(refdir)):\n refname = os.path.splitext(refbase)[0]\n expected = os.path.join(refdir, refbase)\n rv = compare_images(\n expected,\n actual,\n tol=IMAGE_COMPARISON_TOLERANCE,\n in_decorator=True\n )\n\n if rv is None:\n # Success! Clean up any fail images (mostly for the IMAGE_OUTPUT_DIR mode)\n for p in glob(actual.replace('.png', '_vs_*.png')):\n os.unlink(p)\n return None\n\n failpath = actual.replace('.png', '-failed-diff.png')\n newfailpath = actual.replace('.png', '_vs_%s.png' % refname)\n os.rename(failpath, newfailpath)\n results.append((refname, rv['rms']))\n\n # Nothing was good enough :-(\n #\n # We used to have machinery here to emit a \"reproduction script\" that\n # printed out Python code to recreate the image files using big\n # BASE64-encoded strings, but now we can just use Azure Pipelines artifacts.\n # Consult the Git history if the reproduction script stuff is needed again.\n\n msg = (\n 'observed image %s did not match any references to required RMS tolerance of '\n '%.2f; results were: %s'\n ) % (actual, IMAGE_COMPARISON_TOLERANCE, ', '.join('%s=%.2f' % t for t in results))\n\n if fail_now:\n pytest.fail(msg, pytrace=False)\n\n return '{}: {}'.format(filename, msg)",
"def compare(image_a, image_b):\n image_a = standardize_format(image_a)\n grayscale_image_a = to_grayscale(image_a)\n image_b = standardize_format(image_b)\n grayscale_image_b = to_grayscale(image_b)\n err = mse(grayscale_image_a, grayscale_image_b)\n return err",
"def assert_compare(x, y, atol=1e-5, method='ALL'):\n mae = 0\n mse = 0\n rmse = 0\n result = 0\n if method == 'MAE':\n mae = np.abs(x-y).mean()\n result = mae\n elif method == 'RMSE':\n rmse = np.sqrt(np.square(x - y).mean())\n result = rmse\n #result=np.sqrt(((x - y) ** 2).mean())\n elif method == 'MSE':\n mse = np.square(x - y).mean()\n result = mse\n #result=((x - y) ** 2).mean()\n else:\n mae = np.abs(x-y).mean()\n rmse = np.sqrt(np.square(x - y).mean())\n mse = np.square(x - y).mean()\n\n if result > atol or (method == 'ALL' and (mae > atol or rmse > atol or mse > atol)):\n f = six.StringIO()\n f.write(\n 'assert_compare failed: \\n' +\n ' atol: {} \\n'.format(atol) +\n ' method: {}\\n'.format(method) +\n ' MAE: {}\\n'.format(mae) +\n ' MSE: {}\\n'.format(mse) +\n ' RMSE: {}\\n'.format(rmse) +\n ' shape: {} {}\\n'.format(x.shape, y.shape) +\n ' dtype: {} {}\\n'.format(x.dtype, y.dtype))\n if x.shape == y.shape:\n xx = x if x.ndim != 0 else x.reshape((1,))\n yy = y if y.ndim != 0 else y.reshape((1,))\n err = np.abs(xx - yy)\n i = np.unravel_index(np.argmax(err), err.shape)\n f.write(\n ' i: {}\\n'.format(i) +\n ' x[i]: {}\\n'.format(xx[i]) +\n ' y[i]: {}\\n'.format(yy[i]) +\n ' err[i]: {}\\n'.format(err[i]))\n opts = np.get_printoptions()\n try:\n np.set_printoptions(threshold=10000)\n f.write('x: ' + np.array2string(x, prefix='x: ') + '\\n')\n f.write('y: ' + np.array2string(y, prefix='y: ') + '\\n')\n finally:\n np.set_printoptions(**opts)\n logging.warning(f.getvalue())\n return False\n else:\n return True",
"def compare_images(self):\r\n m = round(self.mse(self.image_a, self.image_b), 4)\r\n s = round(ssim(self.image_a, self.image_b) * 100, 5)\r\n return (\r\n m, s)",
"def is_equal(image_a, image_b, tolerance=0.0):\n return image_diff_percent(image_a, image_b) <= tolerance",
"def compare(a, b, *, tol=1e-6):\n if abs(a - b) < tol:\n return 0.0\n elif a > b:\n return 1.0\n else:\n return -1.0",
"def calculate_rms(image_result, expected_image):\n # calculate the per-pixel errors, then compute the root mean square error\n num_values = np.prod(expected_image.shape)\n # Images may be e.g. 8-bit unsigned integer; upcast to default integer size\n # (32 or 64 bit) to reduce likelihood of over-/under-flow.\n abs_diff_image = abs(np.int_(expected_image) - np.int_(image_result))\n\n histogram = np.bincount(abs_diff_image.ravel(), minlength=256)\n sum_of_squares = np.sum(np.int64(histogram) * np.arange(len(histogram))**2)\n rms = np.sqrt(float(sum_of_squares) / num_values)\n return rms",
"def compare(I1, I2):\n return meanSquaredError(I1, I2)",
"def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None",
"def specMatch(images, targmag=None, grayscale=True, rescale_kwargs={}):\n # Load images\n images = [readImage(im, grayscale=grayscale) for im in images]\n\n # Check all image are same size\n for im in images[1:]:\n if im.shape != images[0].shape:\n raise ValueError('All images must have same dimensions')\n\n # Calculate spectra\n amp_spectra = np.empty_like(images)\n phase_spectra = np.empty_like(images)\n for i, im in enumerate(images):\n F = fft2(im, axes=(0,1))\n amp_spectra[i] = np.abs(F)\n phase_spectra[i] = np.angle(F)\n\n # Calculate tarmag if needed\n if targmag is None:\n targmag = amp_spectra.mean(axis=0)\n\n # Match amplitude spectra to targmag\n for i in range(len(images)):\n F = targmag * np.exp(1j * phase_spectra[i])\n images[i] = ifft2(F, axes=(0,1)).real\n\n # Return images after rescaling\n return rescale_images(images, **rescale_kwargs)",
"def magick_compare(self) -> None:\n # Make diff images\n if len(self.clips) > 2:\n Status.fail(f'{self.__class__.__name__}: \"magick_compare\" can only be used with two clips!', exception=ValueError)\n\n self.path_diff = self.path / 'diffs'\n try:\n subprocess.call(['magick', 'compare'], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n self.path_diff.mkdir(parents=True)\n except FileNotFoundError as file_not_found:\n Status.fail(\n f'{self.__class__.__name__}: \"magick compare\" was not found!',\n exception=FileNotFoundError, chain_err=file_not_found\n )\n except FileExistsError as file_err:\n Status.fail(\n f'{self.__class__.__name__}: {self.path_diff.to_str()} already exists!',\n exception=FileExistsError, chain_err=file_err\n )\n\n all_images = [sorted((self.path / name).glob('*.png')) for name in self.clips.keys()]\n images_a, images_b = all_images\n\n cmds = [\n f'magick compare \"{i1.to_str()}\" \"{i2.to_str()}\" '\n + f'\"{self.path_diff.to_str()}/diff_' + f'{f}'.zfill(len(\"%i\" % self.max_num)) + '.png\"'\n for i1, i2, f in zip(images_a, images_b, self.frames)\n ]\n\n # Launch asynchronously the Magick commands\n Status.info('Diffing clips...')\n print()\n SubProcessAsync(cmds)",
"def compare_images(first_img_path, second_img_path):\n img1 = Image.open(first_img_path)\n img2 = Image.open(second_img_path)\n\n diff = ImageChops.difference(img1, img2)\n print(diff.getbbox())",
"def getRMSE(image1, image2):\n im1 = readImage(image1, grayscale=False)\n im2 = readImage(image2, grayscale=False)\n return np.sqrt( ((im1 - im2)**2).mean() )",
"def assert_image_equal(path1, path2):\n test_im = np.asarray(Image.open(path1))\n ref_im = np.asarray(Image.open(path2))\n npt.assert_array_equal(test_im, ref_im)",
"def _checkOutputs(self, outputs, random=False, errorthreshold=0.001):\n for out in outputs:\n outFile = os.path.join(self._testDir, self.outputDir, out)\n fileGoldStd = os.path.join(self.goldDir, out)\n \n # Check the expect output file was produced\n msg = \"Missing expected output file:\\n output: %s\" % outFile\n self.assertTrue(os.path.exists(outFile), red(msg))\n \n if random:\n print(yellow(\"WARNING: %s was created using a random seed, check skipped...\" % outFile))\n else:\n fnGoldStd = xmippLib.FileName(fileGoldStd)\n if fnGoldStd.isImage():\n im1 = xmippLib.Image(fileGoldStd)\n im2 = xmippLib.Image(outFile)\n msg = \"Images are not equal (+-%f):\\n output: %s\\n gold: %s\" % \\\n (errorthreshold, outFile, fileGoldStd)\n self.assertTrue(im1.equal(im2, errorthreshold), red(msg))\n elif fnGoldStd.isMetaData():\n msg = \"MetaDatas are not equal:\\n output: %s\\n gold: %s\" % (outFile, fileGoldStd)\n self.assertTrue(xmippLib.compareTwoMetadataFiles(outFile, fileGoldStd), red(msg))\n else:\n msg = \"Files are not equal:\\n output: %s\\n gold: %s\" % (outFile, fileGoldStd)\n self.assertTrue(xmippLib.compareTwoFiles(outFile, fileGoldStd, 0), red(msg))",
"def test_errors_for_unequal_image_size() -> None:\n cam = Camera(imgsz=(100, 200), f=(10, 10))\n xcam = Matlab(imgsz=(100, 100), fc=(10, 10))\n with pytest.raises(ValueError):\n Converter(xcam, cam)",
"def testBasics(self):\n for imageClass in (afwImage.ImageF, afwImage.ImageD):\n inImage = makeRampImage(bbox=self.bbox, start=-5, stop=2500, imageClass=imageClass)\n\n measImage = inImage.Factory(inImage, True)\n linSq = LinearizeSquared()\n linRes = linSq(image=measImage, detector=self.detector)\n desNumLinearized = np.sum(self.sqCoeffs.flatten() > 0)\n self.assertEqual(linRes.numLinearized, desNumLinearized)\n self.assertEqual(linRes.numAmps, len(self.detector.getAmpInfoCatalog()))\n\n refImage = inImage.Factory(inImage, True)\n refLinearizeSquared(image=refImage, detector=self.detector)\n\n self.assertImagesAlmostEqual(refImage, measImage)\n\n # make sure logging is accepted\n log = Log.getLogger(\"ip.isr.LinearizeSquared\")\n linRes = linSq(image=measImage, detector=self.detector, log=log)",
"def run_comparison(self, max_miss=0, rtol=1e-7, atol=0, compare=None,\n ignore=None, ignore_fits_wcs=True,\n raise_exception=True):\n self.max_miss = max_miss\n self.rtol = rtol\n self.atol = atol\n self.ignore_fits_wcs = ignore_fits_wcs\n if compare is None:\n compare = ('filename', 'tags', 'numext', 'refcat', 'phu',\n 'hdr', 'attributes', 'wcs')\n if ignore is not None:\n compare = [c for c in compare if c not in ignore]\n\n errordict = {}\n for func_name in compare:\n errorlist = getattr(self, func_name)()\n if errorlist:\n errordict[func_name] = errorlist\n if errordict and raise_exception:\n raise AssertionError(self.format_errordict(errordict))\n return errordict",
"def compare_files(_img1, _img2, _network, verbose=False):\n \n face_dsts = []\n \n descs1 = calculate_embeddings_from_buffer(_img1, _network, verbose=verbose)\n descs2 = calculate_embeddings_from_buffer(_img2, _network, verbose=verbose)\n\n for desc1 in descs1:\n (emb1, det1) = (desc1.emb, desc1.det)\n for desc2 in descs2:\n (emb2, det2) = (desc2.emb, desc2.det)\n (dist, match_faces) = compare_embeddings(emb1, emb2)\n face_dsts.append((dist, match_faces, det1, det2))\n \n return face_dsts",
"def compare(image_a, image_b, is_camera_image):\n\n # Generate a unique filename\n filename = uuid.uuid4().hex[:3]\n\n if is_camera_image:\n image_a = imutils.rotate_bound(image_a, 90)\n image_b = imutils.rotate_bound(image_b, 90)\n\n # Store original to show in future\n original = image_a\n\n # Convert to greyscale\n image_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n image_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)\n\n # Reduce size and blur to account for shaky handheld camera based images\n if is_camera_image:\n scale_multiplier = 0.03125\n image_a = cv2.resize(image_a, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_b = cv2.resize(image_b, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_a = cv2.GaussianBlur(image_a, (1001, 1001), cv2.BORDER_DEFAULT)\n image_b = cv2.GaussianBlur(image_b, (1001, 1001), cv2.BORDER_DEFAULT)\n\n # Obtain SSIM and determine differences\n try:\n _, differences = structural_similarity(image_a, image_b, full=True, gaussian_weights=True)\n except ValueError:\n print('Images are not the same size')\n return None\n\n # Convert to cv2 array\n differences = (differences * 255).astype('uint8')\n\n # Threshold and find contours (differences)\n thresh = cv2.threshold(differences, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n\n # Draw contours (differences)\n for cont in contours:\n (x, y, w, h) = cv2.boundingRect(cont)\n if is_camera_image:\n multiplier = int(1 / scale_multiplier)\n y *= multiplier\n x *= multiplier\n h *= multiplier\n w *= multiplier\n cv2.rectangle(original, (x, y), (x + w, y + h), (255, 0, 0), 4)\n\n # TODO: Create GIF highlighting differences (instead of statuic image)\n cv2.imwrite('static/images/differences/' + filename + '.jpg', original)\n\n return filename"
]
| [
"0.6774918",
"0.6166663",
"0.60055065",
"0.58556736",
"0.57545406",
"0.57281005",
"0.56910443",
"0.5681535",
"0.56446886",
"0.5560447",
"0.5496189",
"0.5476247",
"0.5460852",
"0.5297204",
"0.52855736",
"0.52817315",
"0.5193507",
"0.51113427",
"0.50988805",
"0.5091349",
"0.501254",
"0.49307296",
"0.49275705",
"0.49161577",
"0.48608848",
"0.48543915",
"0.4852414",
"0.48498768",
"0.4849862",
"0.48386395"
]
| 0.8803048 | 0 |
Renames the remote object `source` as `filename`. | def mv(self, source: str, filename: str) -> None:
self.cp(source, filename)
self.rm(source) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def renamed(self, source, dest):\r\n self.__close_and_reload(source, new_filename=dest)",
"def rename(self, src, dst):\n os.rename(src, dst)",
"def rename(self, name=None, destination=None):\n raise NotImplementedError\n return None",
"def rename(self, target):\n target = os.fspath(target)\n return error.checked_call(os.rename, self.strpath, target)",
"def rename(self, src, dst, preserve=False):\n self.connect()\n if preserve:\n self._write('RENAMENX %s %s\\r\\n' % (src, dst))\n return self._get_numeric_response()\n else:\n self._write('RENAME %s %s\\r\\n' % (src, dst))\n return self._get_simple_response().strip()",
"def rename_file(source, oldname, newname):\n #source = client_variables.output_folder\n renamefiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for renamefile in renamefiles:\n if renamefile.endswith(ext):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)\n elif renamefile.startswith(oldname):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)",
"def do_mv(self, args):\n if args:\n args = args.split()\n\n if not args or len(args) < 2:\n print('Usage: mv source_file target_file')\n return\n\n src = args[0]\n dst = args[1]\n if not (src.startswith('shared/') and dst.startswith('shared/')\n or self._user):\n print('login required for specifying non-shared file with mv')\n return\n\n try:\n new_name = self._qm.rename_file(self._user, src, dst)\n print('renamed file', src, 'to', new_name)\n except Exception as e:\n print('ERROR renaming %s: %s' % (src, e), file=sys.stderr)\n return",
"def rename(self, src, dst, **kwargs):\r\n if kwargs:\r\n if 'preserve' in kwargs:\r\n warnings.warn(DeprecationWarning(\r\n \"preserve option to 'rename' is deprecated, \"\r\n \"use Redis.renamenx instead\"))\r\n if kwargs['preserve']:\r\n return self.renamenx(src, dst)\r\n return self.format_inline('RENAME', src, dst)",
"def os_rename(self, source, destination):\n cmd = ['/bin/mv', source, destination]\n process = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n returncode = subprocess.Popen.wait(process)\n return returncode",
"def rename_file(source, destination, alog):\n\n # Some error checking against a legitimate source & destination.\n if not type(source) is str:\n raise CoreError('Source is not of str type.')\n elif not type(destination) is str:\n raise CoreError('Destination is not of str type.')\n elif not os.path.isfile(source):\n raise CoreError(source + ' is not a valid file.')\n\n head, tail = os.path.split(destination)\n if not os.path.isdir(head + '/'):\n try:\n os.makedirs(head + '/')\n except:\n raise CoreError('Failed to create new directory: '\n + (head + '/'))\n\n for i in range(0, len(MuzikArkive.illegal_name_characters)):\n if MuzikArkive.illegal_name_characters[i] in tail:\n tail = tail.replace(MuzikArkive.illegal_name_characters[i], '_')\n alog.rlog = MuzikArkive.illegal_name_characters[i] \\\n + ' was removed from ' + destination\n\n if not os.path.isfile(destination):\n try:\n os.rename(source, destination)\n except:\n raise CoreError('os.rename() Failed.')\n else:\n head, tail = destination.rsplit('.', 1)\n rname = True\n i = 1\n while rname:\n addon = '[' + str(i) + '].'\n if not os.path.isfile(head + addon + tail):\n try:\n os.rename(source, (head + addon + tail))\n except:\n raise CoreError('os.rename() Failed.')\n else:\n rname = False\n else:\n i += 1",
"def rename(self, target):\r\n py.process.cmdexec(\"svn move --force %s %s\" %(str(self), str(target)))",
"def RenameFile(self, oldname: str, newname: str) -> None:\n ...",
"def set_object_name(remote, object_id, new_name):\n cmd = mmapi.StoredCommands()\n cmd.AppendSceneCommand_SetObjectName(object_id, new_name)\n remote.runCommand(cmd)",
"def rename(old, new):",
"def rename(old, new):",
"def rename_file (self):\n\t\tassert self.__filename, \"Renaming could not complete because the new filename could not be determined, one or more needed arguments is empty!\"\n\t\tos.rename( self._file.path, self.__filename )\n\t\t\n\t\tif self.verbose and self.log :\tself.log.info( 'File renamed from %s to %s' % (self._file.path, self.__filename))",
"def set_source(self, source_name):\n self.source = source_name",
"def copy_source(self, filename, new_filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n new_file_path = os.path.join(self.storage_path, new_filename)\n shutil.copyfile(file_path, new_file_path)",
"def rename(oldname, newname):",
"def hmove(src_path, res_path):\n os.rename(src_path, res_path)",
"def rename(self, new_name):\n\n self.__enforce_connected()\n current_url = self.url\n self._set_field(\"name\",new_name)\n self.set_json(self._http_client.update(current_url, self.get_json()))",
"def __gitRenameRemote(self):\n remotes = self.vcs.gitGetRemotesList(self.project.getProjectPath())\n remote, ok = QInputDialog.getItem(\n None,\n self.tr(\"Rename\"),\n self.tr(\"Select a remote repository:\"),\n remotes,\n 0, False)\n if ok:\n self.vcs.gitRenameRemote(self.project.getProjectPath(), remote)",
"def copy_from(self, source):\n\t\tassert ltrace_func(TRACE_BASE)\n\t\tself.name = source.name",
"def copy_rename_file(source_file_path: str, target_dir: str, new_name: str) -> str:\n shutil.copy2(source_file_path, target_dir)\n target_path = os.path.join(target_dir, os.path.basename(source_file_path))\n new_file_name = new_name + get_extension(source_file_path)\n new_file_path = os.path.join(target_dir, new_file_name)\n os.rename(target_path, new_file_path)\n return new_file_path",
"def my_rename(self, src, dst):\n self.renamerCalled = True",
"def copy_map1_name(source, target):\n\n if not is_matching_type(source, target):\n return\n\n source_uv_name = cmds.getAttr(\"{}.uvSet[0].uvSetName\".format(source))\n\n try:\n cmds.setAttr(\"{}.uvSet[0].uvSetName\".format(target), source_uv_name,\n type=\"string\")\n except RuntimeError:\n logger.debug(\"{} doesn't not have uvs, skipping udpate map1 name\"\n .format(target))\n return",
"def change_nm(src,dst):\n\timport os\n\ttry:\n\t\tos.rename(src,dst)\n\texcept:\n\t\tprint \"this is a mistake\"\n\t\treturn -1\n\n\treturn 0",
"def setSourceName(self, instance, value):\n mapping = IAnnotations(instance).setdefault(\n 'collective.table',\n PersistentMapping()\n )\n mapping['source_name'] = value",
"def rename(ctx, input_file, output_file):\n ctx.ensure_object(dict)\n ctx.obj[\"reader\"] = PFBReader(input_file)\n ctx.obj[\"writer\"] = PFBWriter(output_file)",
"def put_source(file_path: str, source: str, sha256sum: str) -> str:\n return g.ledger.file.set_source(file_path, source, sha256sum)"
]
| [
"0.66736585",
"0.61954975",
"0.6188893",
"0.61185676",
"0.6105106",
"0.6065581",
"0.60061115",
"0.5984833",
"0.5973025",
"0.59440464",
"0.59394705",
"0.5743361",
"0.56863225",
"0.56793886",
"0.56793886",
"0.56656957",
"0.5629569",
"0.5627689",
"0.5606619",
"0.55541617",
"0.54692304",
"0.5459563",
"0.5450723",
"0.5382086",
"0.5375496",
"0.535566",
"0.5353378",
"0.53447753",
"0.5324621",
"0.5321638"
]
| 0.67396265 | 0 |
Get the mean pixel values across the dataset for all coordinates Will return a [C,H,W] shaped torch.tensor | def get_mean_coord(self):
# load dataset in a dummy manner
dataset = torchvision.datasets.MNIST('../../data/MNIST_data/', train=True, download=False)
mean = (dataset.data.float().mean(0) / 255).unsqueeze(0) # [1,28,28]
return mean | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mean_coord(self):\n # load dataset in a dummy manner\n dataset = torchvision.datasets.CIFAR10('../../data/CIFAR_data/', train=True, download=False)\n data = torch.FloatTensor(dataset.data).permute(0, 3, 1, 2) # shape [num_img, 3, 32, 32]\n mean = data.mean(0) / 255 # [3,32,32]\n return mean",
"def mean_allcnnc():\n # TODO implement pre forward hook to adapt to arbitary image size for other data sets than cifar100\n return nn.Sequential(\n nn.AvgPool2d(kernel_size=(6, 6)),\n flatten()\n )",
"def mean(self):\n return self.data.mean(axis=-1, keepdims=True)",
"def meancol(source):\n\tonepix = source.copy()\n\tonepix.thumbnail((1,1),Image.ANTIALIAS)\n\treturn onepix.getpixel((0,0))",
"def ensemble_mean(self):\n return self.mean(dim='mem')",
"def _get_mean(self):\n return [layer._get_mean() for layer in self.layers]",
"def get_average_image_from_batch(batch):\n # YOUR CODE HERE\n return( mx.nd.mean(batch, axis=0) )\n raise NotImplementedError()",
"def get_mean_of_dataset(train_data_loader, args, idx=0):\n meter = AverageMeter()\n for i in train_data_loader:\n if isinstance(i, list):\n meter.update(i[idx])\n else:\n meter.update(i)\n data_mean = meter.mean\n if data_mean.ndim == 2: data_mean = data_mean.mean(0)\n return tensor(data_mean, args)",
"def mean(self):\n mean = sum(self.data)/self.size\n return mean",
"def get_mean_image(data):\n\n\tno_of_images = len(data)\n\tmean_im = np.zeros((28, 28))\n\tfor i in xrange(no_of_images):\n\t\tmean_im = mean_im + data[i, 0:28, 0:28]\n\n\tmean_im = mean_im / no_of_images\n\treturn mean_im",
"def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean",
"def mean(self):\n\n\t\tif not self._masked:\n\t\t\t\n\t\t\treturn self.data.mean()\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\t\t\t\n\t\t\treturn self.data[self._full_mask].mean()",
"def mean(self):\r\n return np.mean(self.data_array)",
"def get_channel_average_from_batch(batch):\n # YOUR CODE HERE\n return( mx.nd.mean(batch, axis=1, exclude=True) )\n raise NotImplementedError()",
"def mean_flat(tensor):\n return tensor.mean(axis=list(range(1, len(tensor.shape))))",
"def compute_mean(self):\n # load_in_all_parameters(self.save_directory, self.auto_encoder)\n for i, data_row in enumerate(self.X_train_naive):\n input_nn = data_row\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n prediction_embedding = self.model.forward_only_encoding(input_nn)\n print(prediction_embedding)\n if i == 0:\n self.embedding_np = prediction_embedding.data.clone().cpu().numpy()[0]\n else:\n self.embedding_np = np.vstack((self.embedding_np, prediction_embedding.data.clone().cpu().numpy()[0]))\n self.mean_embedding = np.average(self.embedding_np, axis=0)\n print('mean embedding is ', self.mean_embedding)",
"def centroid(self, coords):\r\n return np.mean(coords, axis=0)",
"def mean_flat(tensor):\n return tensor.mean(dim=list(range(1, len(tensor.shape))))",
"def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)",
"def batch_stat(x):\n\tmean = torch.mean(x, dim=[0, 2, 3], keepdim=True)\n\tvar = torch.mean((x-mean)**2, dim=[0, 2, 3], keepdim=True)\n\treturn mean, var",
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def mean(self):\n return self.aggregate(np.mean)",
"def meanColor(self):\n return self.image[self.x, self.y]",
"def matrix_mean(matrix):\n return sum(map(mean,matrix))",
"def _mean(images):\n try:\n total = next(images)\n except StopIteration:\n print(\"No images found. Use 0.\")\n return 0.\n num = 1\n for image in images:\n total += image\n mean = total / num\n if np.ndim(mean) == 3:\n mean = np.mean(mean, axis=0)\n return mean",
"def transform(tensor):\n L, W, D = tensor.shape\n return tensor.transpose(1, 0, 2).reshape(W, L*D).mean(axis=0)",
"def pixel_score(self,X,Y):\n pred_Y = self.predict(X)\n score = []\n label_size = self.label_width**2\n for i in range(len(Y)):\n score.append(np.sum(Y[i]==pred_Y[i])/label_size)\n mean_score = np.mean(score)\n return mean_score",
"def average(self):\n return np.mean(self.buf[:self._size], axis=0)",
"def channel_mean_std(dataset, batch_size=64):\n\n\t# Make a loader for the data\n\tloader = DataLoader(dataset, batch_size=batch_size)\n\n\t# Number of color channels and size of dataset\n\tch = next(iter(loader))[0].shape[1]\n\tn = len(dataset)\n\n\t# Iterate through data, computing mean and variance of each batch and summing\n\t# to running mean and running variance (this utilizes the fact that the mean is\n\t# equal to the mean of the batch means, and similarly for the variance)\n\tmean = torch.zeros(3)\n\tvar = torch.zeros(3)\n\n\tfor batch, _ in tqdm(loader):\n\t\t# Ratio of batch size to entire dataset (for weighting purposes)\n\t\tr = float(batch.shape[0]/n)\n\t\t# Reshape batch so that it is just a set of vectors, one per channel, whose entries\n\t\t# are a concatenation of all that channel's values in all the batch images\n\t\tch_vectors = torchvision.utils.make_grid(batch, padding=0).view(ch, -1)\n\n\t\t# Add to running mean and variance\n\t\tmean.add_(torch.mean(ch_vectors, 1)*r)\n\t\tvar.add_(torch.var(ch_vectors, 1)*r)\n\n\treturn mean, var.sqrt()",
"def hyper_mean(udf_data: UdfData):\n # Iterate over each tile\n cube_list = []\n for cube in udf_data.get_datacube_list():\n mean = cube.array.mean(dim=\"t\")\n mean.name = cube.id + \"_mean\"\n cube_list.append(XarrayDataCube(array=mean))\n udf_data.set_datacube_list(cube_list)"
]
| [
"0.755946",
"0.6978839",
"0.6882663",
"0.6746081",
"0.6654038",
"0.6605897",
"0.6583859",
"0.65773165",
"0.6476074",
"0.6457095",
"0.64478827",
"0.6445506",
"0.64343315",
"0.6434308",
"0.6434166",
"0.6421386",
"0.6399245",
"0.6388089",
"0.6370936",
"0.63649416",
"0.6344257",
"0.6319758",
"0.6313895",
"0.6313083",
"0.62775946",
"0.6272817",
"0.6255884",
"0.6241285",
"0.6239681",
"0.6231054"
]
| 0.7259522 | 1 |
Get the mean pixel values across the dataset for all coordinates Will return a [C,H,W] shaped torch.tensor | def get_mean_coord(self):
# load dataset in a dummy manner
dataset = torchvision.datasets.CIFAR10('../../data/CIFAR_data/', train=True, download=False)
data = torch.FloatTensor(dataset.data).permute(0, 3, 1, 2) # shape [num_img, 3, 32, 32]
mean = data.mean(0) / 255 # [3,32,32]
return mean | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mean_coord(self):\n # load dataset in a dummy manner\n dataset = torchvision.datasets.MNIST('../../data/MNIST_data/', train=True, download=False)\n mean = (dataset.data.float().mean(0) / 255).unsqueeze(0) # [1,28,28]\n return mean",
"def mean_allcnnc():\n # TODO implement pre forward hook to adapt to arbitary image size for other data sets than cifar100\n return nn.Sequential(\n nn.AvgPool2d(kernel_size=(6, 6)),\n flatten()\n )",
"def mean(self):\n return self.data.mean(axis=-1, keepdims=True)",
"def meancol(source):\n\tonepix = source.copy()\n\tonepix.thumbnail((1,1),Image.ANTIALIAS)\n\treturn onepix.getpixel((0,0))",
"def ensemble_mean(self):\n return self.mean(dim='mem')",
"def _get_mean(self):\n return [layer._get_mean() for layer in self.layers]",
"def get_average_image_from_batch(batch):\n # YOUR CODE HERE\n return( mx.nd.mean(batch, axis=0) )\n raise NotImplementedError()",
"def get_mean_of_dataset(train_data_loader, args, idx=0):\n meter = AverageMeter()\n for i in train_data_loader:\n if isinstance(i, list):\n meter.update(i[idx])\n else:\n meter.update(i)\n data_mean = meter.mean\n if data_mean.ndim == 2: data_mean = data_mean.mean(0)\n return tensor(data_mean, args)",
"def mean(self):\n mean = sum(self.data)/self.size\n return mean",
"def get_mean_image(data):\n\n\tno_of_images = len(data)\n\tmean_im = np.zeros((28, 28))\n\tfor i in xrange(no_of_images):\n\t\tmean_im = mean_im + data[i, 0:28, 0:28]\n\n\tmean_im = mean_im / no_of_images\n\treturn mean_im",
"def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean",
"def mean(self):\n\n\t\tif not self._masked:\n\t\t\t\n\t\t\treturn self.data.mean()\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\t\t\t\n\t\t\treturn self.data[self._full_mask].mean()",
"def get_channel_average_from_batch(batch):\n # YOUR CODE HERE\n return( mx.nd.mean(batch, axis=1, exclude=True) )\n raise NotImplementedError()",
"def mean_flat(tensor):\n return tensor.mean(axis=list(range(1, len(tensor.shape))))",
"def mean(self):\r\n return np.mean(self.data_array)",
"def compute_mean(self):\n # load_in_all_parameters(self.save_directory, self.auto_encoder)\n for i, data_row in enumerate(self.X_train_naive):\n input_nn = data_row\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n prediction_embedding = self.model.forward_only_encoding(input_nn)\n print(prediction_embedding)\n if i == 0:\n self.embedding_np = prediction_embedding.data.clone().cpu().numpy()[0]\n else:\n self.embedding_np = np.vstack((self.embedding_np, prediction_embedding.data.clone().cpu().numpy()[0]))\n self.mean_embedding = np.average(self.embedding_np, axis=0)\n print('mean embedding is ', self.mean_embedding)",
"def centroid(self, coords):\r\n return np.mean(coords, axis=0)",
"def mean_flat(tensor):\n return tensor.mean(dim=list(range(1, len(tensor.shape))))",
"def mean(self):\n return np.average(self.particles, weights=self.weights, axis=0)",
"def batch_stat(x):\n\tmean = torch.mean(x, dim=[0, 2, 3], keepdim=True)\n\tvar = torch.mean((x-mean)**2, dim=[0, 2, 3], keepdim=True)\n\treturn mean, var",
"def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))",
"def mean(self):\n return self.aggregate(np.mean)",
"def meanColor(self):\n return self.image[self.x, self.y]",
"def matrix_mean(matrix):\n return sum(map(mean,matrix))",
"def _mean(images):\n try:\n total = next(images)\n except StopIteration:\n print(\"No images found. Use 0.\")\n return 0.\n num = 1\n for image in images:\n total += image\n mean = total / num\n if np.ndim(mean) == 3:\n mean = np.mean(mean, axis=0)\n return mean",
"def transform(tensor):\n L, W, D = tensor.shape\n return tensor.transpose(1, 0, 2).reshape(W, L*D).mean(axis=0)",
"def pixel_score(self,X,Y):\n pred_Y = self.predict(X)\n score = []\n label_size = self.label_width**2\n for i in range(len(Y)):\n score.append(np.sum(Y[i]==pred_Y[i])/label_size)\n mean_score = np.mean(score)\n return mean_score",
"def average(self):\n return np.mean(self.buf[:self._size], axis=0)",
"def channel_mean_std(dataset, batch_size=64):\n\n\t# Make a loader for the data\n\tloader = DataLoader(dataset, batch_size=batch_size)\n\n\t# Number of color channels and size of dataset\n\tch = next(iter(loader))[0].shape[1]\n\tn = len(dataset)\n\n\t# Iterate through data, computing mean and variance of each batch and summing\n\t# to running mean and running variance (this utilizes the fact that the mean is\n\t# equal to the mean of the batch means, and similarly for the variance)\n\tmean = torch.zeros(3)\n\tvar = torch.zeros(3)\n\n\tfor batch, _ in tqdm(loader):\n\t\t# Ratio of batch size to entire dataset (for weighting purposes)\n\t\tr = float(batch.shape[0]/n)\n\t\t# Reshape batch so that it is just a set of vectors, one per channel, whose entries\n\t\t# are a concatenation of all that channel's values in all the batch images\n\t\tch_vectors = torchvision.utils.make_grid(batch, padding=0).view(ch, -1)\n\n\t\t# Add to running mean and variance\n\t\tmean.add_(torch.mean(ch_vectors, 1)*r)\n\t\tvar.add_(torch.var(ch_vectors, 1)*r)\n\n\treturn mean, var.sqrt()",
"def hyper_mean(udf_data: UdfData):\n # Iterate over each tile\n cube_list = []\n for cube in udf_data.get_datacube_list():\n mean = cube.array.mean(dim=\"t\")\n mean.name = cube.id + \"_mean\"\n cube_list.append(XarrayDataCube(array=mean))\n udf_data.set_datacube_list(cube_list)"
]
| [
"0.72589284",
"0.6978652",
"0.6880891",
"0.6747932",
"0.6652633",
"0.6604227",
"0.6583818",
"0.6574959",
"0.647404",
"0.6456031",
"0.64457744",
"0.64428407",
"0.6433864",
"0.64326847",
"0.64324236",
"0.6419911",
"0.63975173",
"0.63865525",
"0.6368913",
"0.63662386",
"0.63422084",
"0.6317415",
"0.63153803",
"0.6311365",
"0.6275963",
"0.6272077",
"0.6257765",
"0.6239765",
"0.6239461",
"0.6228628"
]
| 0.75593 | 0 |
function to get the similarity matrix specific for the test case. The instances that we map the distance to are the provided train instances. | def getSimilarityMatrixTest(testBags, trainInstances, labels):
similarityMatrix = np.zeros([testBags.shape[0], trainInstances.shape[0]])
#print(similarityMatrix.shape)
for bagInd in range(0, testBags.shape[0]):
#print(labels[bagInd])
#get the average of all instances in this test patient bag
testInstances = testBags[bagInd]
instanceAvg = np.mean(testInstances, axis=0)
#compute distance to all other instances from this bag average
distance = np.abs(instanceAvg - trainInstances)
#sum the distances to get 1 similarity score
summedDistance = np.sum(distance,axis=1)
#print(summedDistance)
similarityMatrix[bagInd,:] = summedDistance
return similarityMatrix | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDistanceM(self, test, train):\n p = 2 # TUNE currently euclidian distance\n distanceM = pd.DataFrame(index=test.index.values, columns=train.index.values)\n for testrow, testing in test.iterrows():\n for trainrow, training in train.iterrows():\n tot = 0\n for indexc, column in test.iteritems():\n #print(indexc)\n if indexc in self.discrete: # need to reference VDM\n datapoint = self.VDMdict.get(indexc)\n dif = datapoint[testing[indexc]][training[indexc]]\n elif indexc != \"class\": #get distance beween 2 points\n dif = abs(float(testing[indexc]) - float(training[indexc]))\n\n tot += dif ** p\n distance = tot ** (1 / p) #distance is calculated\n distanceM.at[testrow, trainrow] = distance #put in distance matrix\n return(distanceM)",
"def getDistances(trainingSet, testInstance, distances):\n # Empty list to store distances of between testInstance and each trainSet item\n # Number of dimensions to check\n length=len(testInstance) - 1\n # Iterate through all items in trainingSet and compute the distance, then append to the distances list\n for x in range(len(trainingSet)):\n dist=calculateDistance(testInstance, trainingSet[x], length)\n distances.append((trainingSet[x], dist))\n return distances",
"def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)",
"def test_splits_similarity(self):\n a_train = torch.as_tensor(\n [\n [1, 1, 2],\n [2, 1, 3],\n [1, 2, 3],\n [4, 1, 5],\n [5, 1, 6],\n ]\n )\n a_test = torch.as_tensor(\n [\n [4, 2, 6],\n ]\n )\n b_train = torch.as_tensor(\n [\n [1, 1, 2],\n [2, 1, 3],\n [1, 2, 3],\n [4, 1, 5],\n [4, 2, 6],\n ]\n )\n b_test = torch.as_tensor(\n [\n [5, 1, 6],\n ]\n )\n\n a_train_tf = CoreTriplesFactory.create(a_train)\n a_test_tf = CoreTriplesFactory.create(a_test)\n b_train_tf = CoreTriplesFactory.create(b_train)\n b_test_tf = CoreTriplesFactory.create(b_test)\n\n steps = splits_steps([a_train_tf, a_test_tf], [b_train_tf, b_test_tf])\n self.assertEqual(2, steps)\n\n similarity = splits_similarity([a_train_tf, a_test_tf], [b_train_tf, b_test_tf])\n self.assertEqual(1 - steps / 6, similarity)",
"def find_similars(self, test_set):\n\n tfidf = TfidfVectorizer(lowercase=False, sublinear_tf=True)\n tfidf_matrix = tfidf.fit_transform(self.train_str)\n\n # Calling only transform on test so that idf calculated on train data\n test_str = [' '.join(q.title) for q in test_set]\n test_tfidf = tfidf.transform(test_str)\n\n simis = self.calculate_similarity(tfidf_matrix, test_tfidf)\n return simis",
"def euclideanDistanceRow(testInstance, trainingSet):\n distances = {}\n\n for x in range(trainingSet.shape[0]):\n dist = euclideanDistance(testInstance, trainingSet.iloc[x])\n distances[x] = dist\n\n return distances",
"def test_matrix_distance(self):\n # note that the score matrix must contain 'diagonal' elements m[i][i]\n # to avoid failure when the sequences match.\n m = {\"U\": {\"U\": 0, \"C\": 1, \"A\": 5}, \"C\": {\"C\": 0, \"A\": 2, \"G\": 4}}\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"UCACGG\", m), 14)\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"\", m), 0)\n self.assertEqual(self.RNA(\"UUU\").matrix_distance(\"CAC\", m), 7)\n self.assertRaises(KeyError, self.RNA(\"UUU\").matrix_distance, \"CAG\", m)",
"def compute_similarity(im1, im2, test_id):\n\n results = dict()\n results['id'] = test_id\n results['test_im'] = im2\n results['mse_value'], results['mse_map'] = compute_mse(im1, im2)\n results['ssim_value'], results['ssim_map'] = compute_ssim(im1, im2, 5)\n results['cw_ssim_value'], results['cw_ssim_map'] = compute_cw_ssim(im1, im2, 30)\n results['gms_value'], results['gms_map'] = compute_gms(im1, im2)\n results['fsim_value'], results['pc_max_map'] = compute_fsim(im1, im2)\n\n return results",
"def calculate_similarity(self, tfidf_matrix, test_tfidf):\n\n with open(DATASET.fold_root / 'tags_order.json') as file:\n tags_order = json.load(file)\n\n min_max_scaler = MinMaxScaler()\n\n n_clus = 2\n simis = []\n for test_q in test_tfidf:\n s = cosine_similarity(tfidf_matrix, test_q)\n\n # Sorting and getting indices of sorted similarities\n simi = s.transpose()[0]\n simi_values = np.sort(simi)[::-1][:200]\n simi_indices = simi.argsort()[::-1]\n\n breaks = jenkspy.jenks_breaks(simi_values, n_clus)\n simi_count = len(simi_values[breaks[-2] <= simi_values])\n\n q_tags = [self.train_set[i].tags for i in simi_indices][:simi_count]\n\n tags_votes = Counter(chain(*q_tags))\n all_count = sum(tags_votes.values())\n tags_likelihood = [tags_votes.get(\n tag, 0) / all_count for tag in tags_order]\n\n lh = np.array([float(x)\n for x in tags_likelihood]).reshape(-1, 1)\n normalized_lh = np.concatenate(\n min_max_scaler.fit_transform(lh)\n ).tolist()\n\n simis.append(normalized_lh)\n\n return simis",
"def _generate_similarity_mat(labels):\n l_mat = np.repeat(labels, len(labels), axis=1)\n l_mat_t = l_mat.T\n\n sim_mat = np.equal(l_mat, l_mat_t).astype(int)\n return sim_mat",
"def train_similarity():\n data = pd.read_csv('.\\\\ml\\\\similarity\\\\products_sample.csv')\n train_set = pd.read_csv('.\\\\ml\\\\similarity\\\\similarity_training_set.csv')\n \n def strToListInt(x):\n \"\"\"\n Transform '[1,2]' into [1,2]\n \"\"\"\n if type(x)==str:\n return [int(i) for i in x[1:-1].split(\", \")]\n train_set.Index =train_set.Index.apply(strToListInt)\n \n def strToList(x):\n \"\"\"\n Transform '['a','b']' into ['a','b']\n \"\"\"\n if type(x)==str:\n return x[2:-2].split(\"', '\")\n data.Name = data.Name.apply(strToList)\n data.Description = data.Description.apply(strToList)\n \n #model = LogisticRegression(solver = 'lbfgs')\n model = MultinomialNB()\n x_train = []\n y_train = train_set['Similarity'].values\n \n def countSimilarWords(index):\n count = 0\n name1 = set(data.loc[index[0],'Name'])\n desc1 = set(data.loc[index[0],'Description'])\n name2 = set(data.loc[index[1],'Name'])\n desc2 = set(data.loc[index[1],'Description'])\n for x in name1:\n if (x in name2):\n count += 1\n for x in desc1:\n if (x in desc2):\n count += 1\n return count/(len(name1)+len(name2)+len(desc1)+len(desc2))\n \n print('Preparing training set...')\n for i in range(len(train_set)):\n if (i%50000==0):\n print(i)\n index = train_set.loc[i,'Index']\n x_train += [ [ countSimilarWords(index), abs(data.loc[index[0],'RetailPrice']-data.loc[index[1],'RetailPrice'])/data.loc[index[0],'RetailPrice'] ] ]\n #x_train += [ [ abs(data.loc[index[0],'RetailPrice']-data.loc[index[1],'RetailPrice'])/data.loc[index[0],'RetailPrice'] ] ]\n \n x_train,x_test,y_train,y_test = model_selection.train_test_split(x_train,y_train,test_size=0.2)\n \n #Train model\n print('Training model...')\n model.fit(x_train,y_train)\n #Save model\n pickle.dump(model, open('.\\\\ml\\\\similarity\\\\naive_bayes', 'wb'))\n \n #Evaluate accuracy\n x_test_similar = []\n y_test_similar = []\n x_test_unsimilar = []\n y_test_unsimilar = []\n for i in range(len(y_test)):\n if (y_test[i]==1):\n x_test_similar += [x_test[i]]\n y_test_similar += [y_test[i]]\n else :\n x_test_unsimilar += [x_test[i]]\n y_test_unsimilar += [y_test[i]]\n predictions = model.predict(x_test)\n print('Overall accuracy : ',metrics.accuracy_score(predictions,y_test))\n predictions = model.predict(x_test_similar)\n print('Accuracy (for similar products) : ',metrics.accuracy_score(predictions,y_test_similar))\n predictions = model.predict(x_test_unsimilar)\n print('Accuracy (for unsimilar products) : ',metrics.accuracy_score(predictions,y_test_unsimilar))\n \n # Print: \n # Overall accuracy : 0.720595453461792\n # Accuracy (for similar products) : 0.8133531406788688\n # Accuracy (for unsimilar products) : 0.6277154083637384",
"def get_train_test_matrix(self, train_indices, test_indices):\n train_nnz_items = self.nnz_items[train_indices]\n train_nnz_users = self.nnz_users[train_indices]\n train = sp.lil_matrix((self.num_users, self.num_items))\n train[train_nnz_users, train_nnz_items] = self.data_matrix[train_nnz_users, train_nnz_items]\n test_nnz_items = self.nnz_items[test_indices]\n test_nnz_users = self.nnz_users[test_indices]\n test = sp.lil_matrix((self.num_users, self.num_items))\n test[test_nnz_users, test_nnz_items] = self.data_matrix[test_nnz_users, test_nnz_items]\n return train, test",
"def similarity(self, source, target):\n results = { m.name: m.similarity(source, target) for m in self.metrics }\n return results",
"def getNearestSampleIndex(test, trainX):\n dist_matrix = test - trainX\n dist_square = dist_matrix ** 2\n dist_sums = dist_square.sum(axis=1)\n distance_vector = np.sqrt(dist_sums)\n return (distance_vector).argmin()",
"def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))",
"def build_matrix(self):\n \n for p1 in self._properties: \n p1 = p1.get_vectorized_data()\n \n for p2 in self._properties:\n p2 = p2.get_vectorized_data()\n v1, v2 = self.prepare_vectors(p1, p2)\n self._similarity_matrix.append(cosine_similarity([v1],[v2]))",
"def affinity_matrix(test_specs):\n\n np.random.seed(test_specs[\"seed\"])\n\n # uniform probability for the 5 ratings\n s = [(1 - test_specs[\"spars\"]) / test_specs[\"ratings\"]] * test_specs[\"ratings\"]\n s.append(test_specs[\"spars\"])\n P = s[::-1]\n\n # generates the user/item affinity matrix. Ratings are from 1 to 5, with 0s denoting unrated items\n X = np.random.choice(\n test_specs[\"ratings\"] + 1, (test_specs[\"users\"], test_specs[\"items\"]), p=P\n )\n\n Xtr, Xtst = numpy_stratified_split(\n X, ratio=test_specs[\"ratio\"], seed=test_specs[\"seed\"]\n )\n\n return Xtr, Xtst",
"def test_dice_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = dice_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.4],\n [0.5, 0.8]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"",
"def _construct_sim_matrix(self, fps):\n rows = []\n nfps = len(fps)\n for i in range(nfps):\n sims = DataStructs.BulkTanimotoSimilarity(fps[i], fps)\n rows.append(sims)\n res = np.array(rows)\n assert res.shape == (nfps, nfps)\n return res",
"def self_similarity_matrix(feature_vectors):\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix",
"def similarity_matrix(feat_mat):\n sim_mat = cosine_similarity(feat_mat)\n np.fill_diagonal(sim_mat, 0)\n return sim_mat",
"def model_book_similarities(data):\n\n data = data.T\n U2 = distance.squareform(distance.pdist(data, metric='cosine'))\n sim_matrix = pd.DataFrame(U2)\n\n return sim_matrix",
"def create_matrix_mapping_with_neighbours(data_mh, embedding_model, train_mh_index_map):\n mh_index_map = {}\n # extract a matrix that contains only embeddings of training data\n train_vectors = embedding_model.wv.syn0[np.array(list(train_mh_index_map.keys()))]\n # dictionary: index in small matrix - real word index\n vector_index_map = dict(zip(np.arange(0, train_vectors.shape[0]), list(train_mh_index_map.keys())))\n for vector_idx in data_mh:\n # look if an instance is in train\n if vector_idx not in train_mh_index_map.keys():\n # get the vector\n word_vec = embedding_model.wv.syn0[vector_idx]\n # calculate the dot product between the current vector and vectors from train\n similarities = np.dot(train_vectors, word_vec)\n # get the one with highest similarity\n nearest = np.argmax(similarities)\n # lookup the wordindex based on the vector index of the nearest neighbour of that small matrix\n nearest_idx = vector_index_map[nearest]\n # lookup the matrix idx of that word and put it in the map\n nearest_n_matrix_idx = train_mh_index_map[nearest_idx]\n mh_index_map[vector_idx] = nearest_n_matrix_idx\n else:\n matrix_idx = train_mh_index_map[vector_idx]\n mh_index_map[vector_idx] = matrix_idx\n return mh_index_map",
"def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))",
"def train_model(train_table: pd.DataFrame, viewed_table: pd.DataFrame) -> List:\n\n \"\"\"\"\"\"\n # construct numpy array\n #nan_data = train_table.replace(0.0, np.NaN).to_numpy()\n #user_ratings_mean = np.nanmean(nan_data, axis=1)\n unviewed_table = viewed_table.apply(lambda x: 1 - x)\n # unviewed = unviewed_table.to_numpy()\n\n # construct numpy array\n data = train_table.to_numpy()\n user_ratings_mean = np.mean(data, axis=1)\n # factors in individual interpretation of the scale\n data_demeaned = data - user_ratings_mean.reshape(-1, 1)\n\n # use scipy sparse's svd to avoid 'killed: 9' memory issues\n U, sigma, Vt = svds(data_demeaned, k=25)\n\n sigma = np.diag(sigma)\n\n all_predictions = np.dot(np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)\n all_predictions_table = pd.DataFrame(all_predictions).set_index(viewed_table.index)\n all_predictions_table.set_axis(viewed_table.columns, axis='columns', inplace=True)\n\n\n # given already viewed movies a rating of 0. Note these will still be taken ahead of adverse movies\n predictions_table = pd.DataFrame(np.multiply(all_predictions_table,\n unviewed_table.to_numpy()).set_index(viewed_table.index))\n predictions_table.set_axis(viewed_table.columns, axis='columns', inplace=True)\n\n return [all_predictions_table, predictions_table]",
"def get_sim_matrix(centroids):\n\n matrix = {}\n length = len(centroids)\n\n for i in xrange(0, length):\n matrix[i] = {}\n\n for j in xrange(i + 1, length):\n matrix[i][j] = similarity(centroids[i], centroids[j])\n\n return matrix",
"def test_dice_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = dice_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.4],\n [0.5, 0.8]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"",
"def get_similarity(self, ):\r\n customer_cos_similarity = cosine_similarity(self.rating_matrix, self.rating_matrix)\r\n customer_cos_similarity = pd.DataFrame(customer_cos_similarity,\r\n index=self.customer_vendor_matrix.index,\r\n columns=self.customer_vendor_matrix.index)\r\n # customer_pearson_similarity = np.corrcoef(self.rating_matrix,\r\n # self.rating_matrix,)\r\n # customer_pearson_similarity = pd.DataFrame(customer_pearson_similarity,\r\n # index=self.customer_vendor_matrix.index,\r\n # columns=self.customer_vendor_matrix.index)\r\n return customer_cos_similarity,\r\n # return customer_pearson_similarity run too slowly\r",
"def compare_stability_matrices(ism1, ism2): \n \n import scipy as sp\n import sklearn as sk\n\n ism1=sk.preprocessing.normalize(ism1,norm='l2')\n ism2=sk.preprocessing.normalize(ism2,norm='l2')\n distance=sp.spatial.distance.correlation(ism1.ravel(), ism2.ravel())\n similarity= 1-distance\n return similarity",
"def test_dbscan_similarity():\n # Parameters chosen specifically for this task.\n eps = 0.15\n min_samples = 10\n # Compute similarities\n D = distance.squareform(distance.pdist(X))\n D /= np.max(D)\n # Compute DBSCAN\n core_samples, labels = dbscan(D, metric=\"precomputed\",\n eps=eps, min_samples=min_samples)\n # number of clusters, ignoring noise if present\n n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)\n\n assert_equal(n_clusters_1, n_clusters)\n\n db = DBSCAN(metric=\"precomputed\")\n labels = db.fit(D, eps=eps, min_samples=min_samples).labels_\n\n n_clusters_2 = len(set(labels)) - int(-1 in labels)\n assert_equal(n_clusters_2, n_clusters)"
]
| [
"0.6784363",
"0.6022616",
"0.5967267",
"0.59669846",
"0.58467686",
"0.58432096",
"0.581799",
"0.58019656",
"0.57023007",
"0.5694741",
"0.56717575",
"0.5650801",
"0.5608074",
"0.55852276",
"0.55694515",
"0.5533678",
"0.5530354",
"0.55099696",
"0.54819864",
"0.5478882",
"0.5475355",
"0.5461794",
"0.54414076",
"0.54287374",
"0.54152083",
"0.54128647",
"0.54108334",
"0.53992903",
"0.53925335",
"0.5387205"
]
| 0.73847157 | 0 |
get club player number | def get_club_player_number(club_id, type):
cursor = connection.cursor()
try:
sql = "select number from btp_player%s where ClubID = %s " % (type, club_id)
cursor.execute(sql)
infos = cursor.fetchall()
numbers = set()
if infos:
for info in infos:
numbers.add(info['number'])
while True:
number = random.randint(0, 50)
if number not in numbers:
return number
except:
log_execption()
raise "error"
finally:
cursor.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_player_num(self):\r\n return self.player_control.get_player_num()",
"def info_player_id(self, playername):\r\n number = 0\r\n name = playername.title().replace(\" \", \"+\")\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/team_news.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://stats.comunio.es/search.php?name=' + playername, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find_all('a', {'class', 'nowrap'}):\r\n number = re.search(\"([0-9]+)-\", str(i)).group(1)\r\n break # Solo devuelve la primera coincidencia\r\n return number",
"def get_player(self):\n return int(4 - floor(abs(3.5 - self.turn_number)))",
"def get_current_player(self):\r\n\r\n return self.players[(self.turn_number) % len(self.players)].get_id()",
"def get_player(self, number):\n num = int(number)\n assert (num in [1, 2])\n return self.player_1 if num == 1 else self.player_2",
"def find_player_id(url):\r\n response = requests.get(url)\r\n result = PLAYER_ID_PATTERN.search(response.text)\r\n return result.group(1)",
"def string_u_broj(self):\n if self.player_input == \"rock\":\n self.player_number = 0\n elif self.player_input == \"spock\":\n self.player_number = 1\n elif self.player_input == \"paper\":\n self.player_number = 2\n elif self.player_input == \"lizard\":\n self.player_number = 3\n elif self.player_input == \"scissors\":\n self.player_number = 4\n else:\n self.player_number = -1\n raise RpslsError(102)\n return self.player_number",
"def get_player_id(self):\n return self.game.get_player_id()",
"def club_id(self, club_name):\r\n # UTF-8 comparison\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find('table', cellpadding=2).find_all('tr'):\r\n # Get teamid from the bets\r\n team1 = i.find('a')['title']\r\n team2 = i.find_all('a')[1]['title']\r\n if club_name == team1:\r\n return i.find('a')['href'].split('cid=')[1]\r\n elif club_name == team2:\r\n return i.find_all('a')[1]['href'].split('cid=')[1]\r\n return None",
"def player_id(self):\n return self.data[\"attributes\"][\"stats\"][\"playerId\"]",
"def get_player_id(self, player):\n url = self.base_url + \"/player/player/findplayer.html\"\n params = {\"q\": player, \"start\": 0, \"count\": \"Infinity\"}\n url += \"?\" + urllib.parse.urlencode(params)\n resp = self.fetch(url)\n resp_json = json.loads(resp)\n if len(resp_json[\"items\"]) == 0:\n return -1\n return resp_json[\"items\"][0][\"id\"]",
"def get_player_id(self):\n\n return self.player_id",
"def rank_in_club(user, club):\n posel_ids = [p.id for p in club.posel_set.all()]\n return rank(user, posel_ids)",
"def getCountryClubId(self):\n return self.countryClubId",
"def get_player(self, num):\n\n name = input(f\"What is the name for player number {num}? \")\n player = Player(name)\n return player",
"def get_player(self):\n return 2 - int((np.sum(self.state) % 2))",
"def get_next_player(self, player):\r\n return player * -1",
"def get_next_player(self, player):\r\n return player * -1",
"def i_to_player_id(self, i):\n game = self.ctrl.game\n if self.hot_seat:\n return game.current_player if i == 0 else (1 - game.current_player)\n else:\n return self.main_player_id if i == 0 else (1 - self.main_player_id)",
"def get_player_index(self, id_) -> int:\n return self._players_list.index(self._nodes[id_]['player'])",
"def SelectPlayer(self):\n\n player = input(data['player'])\n if player == \"1\":\n return 0\n elif player == \"2\":\n return 1\n else:\n return 'invalid'",
"def name(who):\r\n if who == 0:\r\n return 'Player 0'\r\n elif who == 1:\r\n return 'Player 1'\r\n else:\r\n return 'An unknown player'",
"def get_player(self):\n return self.player",
"def get_player(self):\n return self.player",
"def other_player(self, player):\n if player == self.__opponent:\n return self.__pid\n else:\n return self.__opponent",
"def __map_player_id(self, seat): \n internal_player_id = None\n if seat:\n if seat == self.player_id:\n internal_player_id = self.COM_PLAYER_ID\n else:\n internal_player_id = self.OPPONENT_PLAYER_ID\n return internal_player_id",
"def get_player_id(self, player_email):\n return self.player_ids[player_email]",
"def get_video_num(self, username):\n done = self.cur.execute(\"SELECT video_ID FROM videos where uploader = \\\"{}\\\"\".format(username))\n return done",
"def player(self):\n return self.players[self.tictactoe.turn]",
"def getID(self):\n return self.__clubDbID"
]
| [
"0.76977646",
"0.7178991",
"0.69293433",
"0.6737305",
"0.66757345",
"0.65458876",
"0.6466579",
"0.6436842",
"0.64020866",
"0.6390754",
"0.6387966",
"0.63808894",
"0.6292459",
"0.6177683",
"0.61504906",
"0.6141488",
"0.6079721",
"0.6079721",
"0.60252047",
"0.5968486",
"0.5907327",
"0.5902376",
"0.58633864",
"0.58633864",
"0.5862878",
"0.58541805",
"0.5845708",
"0.5794315",
"0.5784855",
"0.5780542"
]
| 0.73717844 | 1 |
Create unique and secure filename is field name of current data now. | def unique_filename(data):
file = data
get_ext = file.filename.split(".")[-1]
new_name = "%s.%s" % (uuid.uuid4().hex, get_ext)
return new_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )",
"def _generate_filename(instance, filename, prefix):\n md5 = hashlib.md5()\n md5.update(struct.pack('f', time.time()))\n for chunk in instance.file.chunks():\n md5.update(chunk)\n extension = os.path.splitext(filename)[1]\n return os.path.join(prefix, md5.hexdigest() + extension)",
"def get_file_name() -> str:\n import uuid\n uniq_append_string = uuid.uuid4().hex\n return \"LOCAL_STORAGE_{}\".format(uniq_append_string)",
"def _gen_image_filename(instance, filename):\n # First, store the original filename in the model\n instance.original_filename = filename\n\n return _unique_path(instance.owner.pk, filename)",
"def _create_unique_file(self):\n with open(self.uniquefile, 'w') as f:\n f.write(self._uniquename)\n self._uniquefile_created = True\n self._extend_expiration_time()\n self._p(\"Unique file created: %s\" % self.uniquefile)",
"def _generate_processed_key_name(process_to, upload_name):\n timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f')\n name, extension = os.path.splitext(upload_name)\n digest = md5(''.join([timestamp, upload_name])).hexdigest()\n return os.path.join(process_to, '{0}.{1}'.format(digest, extension))",
"def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"",
"def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp",
"def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))",
"def test_get_safe_filename(self):\n \n with tempfile.TemporaryDirectory() as dir_name: \n fileext = \".jpg\"\n\n # File name should stay the same for unique name\n proposed_safe_filename = uuid.uuid4().hex + fileext\n actual_name = self.retriever._get_safe_filename(dir_name, proposed_safe_filename)\n self.assertEqual(os.path.join(dir_name, proposed_safe_filename), actual_name, \\\n msg=\"The proposed file name should stay the same for a unique name.\")\n\n # File should change for an existing file\n with tempfile.NamedTemporaryFile(\"a\", dir = dir_name, suffix = fileext) as file:\n proposed_safe_filename = file.name\n actual_name = self.retriever._get_safe_filename(dir_name, proposed_safe_filename)\n self.assertNotEqual(os.path.join(dir_name, proposed_safe_filename), actual_name, \\\n msg=\"The proposed file name should have changed if is not a unique name.\")",
"def save_file_with_id_name(self, filename):\n file_ = filename.split(os.sep)[-1]\n extension = \".\".join(file_.split(\".\")[-1:])\n filename = str(uuid.uuid4()) + \".\" + extension\n return filename",
"def random_filename_upload_to(path):\n\n def f(instance, filename):\n ext = filename.split('.')[-1]\n filename = '{0}.{1}'.format(uuid.uuid4().hex, ext)\n return os.path.join(path, filename)\n\n return f",
"def get_file_name(instance, filename):\n filename = make_unique_filename(filename)\n return os.path.join('uploads/profile_pics', filename)",
"def image_upload_filename(instance, filename):\n prefix = 'photos'\n uhash = abs(hash(u'%s%s' % (datetime.now(), filename)))\n user = instance.album.user.username\n return u'%s/%s/%s_%s' % (prefix, user, uhash, filename)",
"def update_filename(instance, filename):\n path = os.path.join(\"documents_analizer\", \"documents\")\n name = \"{}{}\".format(highly_random_name(),\n os.path.splitext(filename)[1])\n return os.path.join(path, name)",
"def _generate_filename(doc_type, login, *args):\n filename = []\n filename.append(doc_type)\n filename.append(login)\n for item in args:\n filename.append(item)\n filename.append(datetime.datetime.now().isoformat(timespec='microseconds'))\n filename = '_'.join(filename)\n return filename",
"def __newFileName(self):\n now = datetime.now()\n dateTimeAppend = now.strftime('%y%m%d_%H%M%S')\n self.__fileName = '{}/{}_{}.wav'.format(RECORDING,\n FILE_NAME_PREFIX, \n dateTimeAppend)",
"def generate_filename(playlist_or_album_name, user_id_or_artist_id=None):\n filename = ''\n if user_id_or_artist_id:\n filename += user_id_or_artist_id + '_'\n filename += playlist_or_album_name + '_' + str(time_ns())\n return filename",
"def generate_filename(ext,sha512base16_hash=None):\n## # Timestamp filename\n## timestamp = str(get_current_unix_time())\n## filename = timestamp+\".\"+ext\n # Base16 hash filename\n filename = sha512base16_hash+\".\"+ext\n return filename",
"def generate_file_filename(instance, filename):\n return _generate_filename(instance, filename, 'photos')",
"def _safe_filename(filename):\n filename = secure_filename(filename)\n print(\"filename==\", filename)\n date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d-%H%M%S\")\n basename, extension = filename.rsplit('.', 1)\n return \"{0}-{1}.{2}\".format(basename, date, extension)",
"def file_on_disk_name(instance, filename):\n return generate_file_on_disk_name(instance.checksum, filename)",
"def new_filename(fname=None,ndigits=3):\n if fname is None:\n ext = (\"%%.%ii\" % ndigits) % 1\n fname = \"%s.%s\" % (random_string(6), ext)\n \n if os.path.exists(fname): \n fname = increment_filename(fname,ndigits=ndigits)\n\n return fname",
"def generate_raw_filename(self, source_name, table_name, environment, seq_number, upload_time, load_type,\n file_format):\n file_date = upload_time.strftime(\n \"%Y-%m-%d-%H-%M-%S-%f\")[:-3] # [:-3] => Removing the 3 last characters as %f is for millis.\n res = f'{source_name}/{source_name}_{table_name}/' \\\n f'{source_name}_{environment}_{table_name}_{str(seq_number).zfill(3)}_' \\\n f'{file_date}_utc_{load_type}.{file_format}'\n res = res.lower()\n\n # Check if no illegal chars were passed\n #test = FileNameStandardConvention(res)\n #test.check_naming_convention()\n return res",
"def giverandomfilename(self,user,postfix=\"\"):\n return \"%s_%s_%s\" % (user.username.encode(\"ascii\",\"ignore\"),\n str(randint(10000,99999)),\n \"testfile%s.txt\" % postfix)",
"def _safe_filename(filename):\n filename = secure_filename(filename)\n date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d-%H%M%S\")\n basename, extension = filename.rsplit('.', 1)\n return \"{0}-{1}.{2}\".format(basename, date, extension)",
"def _safe_filename(filename):\n date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d-%H%M%S\")\n basename, extension = filename.rsplit('.', 1)\n return \"{0}-{1}.{2}\".format(basename, date, extension)",
"def _create_file_name(file_path):\r\n file_base, file_ext = os.path.splitext(file_path)\r\n if os.path.isfile(file_path):\r\n nfile = 1\r\n check = True\r\n while check:\r\n name_add = '0000' + str(nfile)\r\n file_path = file_base + \"_\" + name_add[-4:] + file_ext\r\n if os.path.isfile(file_path):\r\n nfile = nfile + 1\r\n else:\r\n check = False\r\n return file_path",
"def generate_filename_prefix(self):\n email = self.get_user_email()\n if not email:\n return ''\n\n return '%s_' % md5_hash(email, self.salt)",
"def create_file(self, sensor_id:str, timestamp:str, sensor_name:str)->str:\n file_name = '%s/%s.%s.%s.json' % (self.generate_data_prep, sensor_id, timestamp, sensor_name)\n try: \n open(file_name, 'w').close()\n except Exception as e: \n print(\"Unable to create file (%s) - %s\" % (self.generate_data_prep, e))\n return False \n return file_name"
]
| [
"0.7432633",
"0.73596436",
"0.72354007",
"0.72344303",
"0.7162284",
"0.700368",
"0.6998772",
"0.696573",
"0.69122726",
"0.68828297",
"0.68639725",
"0.68490016",
"0.6805709",
"0.6797652",
"0.67734665",
"0.67452866",
"0.67387444",
"0.67148685",
"0.671018",
"0.6706373",
"0.6694962",
"0.6654825",
"0.66162467",
"0.661571",
"0.66095763",
"0.66023815",
"0.6594649",
"0.65940213",
"0.6565535",
"0.6531401"
]
| 0.77626705 | 0 |
The name of the connector. | def connector_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "connector_name") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connector_mapping_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"connector_mapping_name\")",
"def connection_name(self) -> str:\n return pulumi.get(self, \"connection_name\")",
"def getConnectionName(self):\n return self.system",
"def connection_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"connection_name\")",
"def connection_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connection_name\")",
"def connector_profile_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connector_profile_name\")",
"def connector_profile_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connector_profile_name\")",
"def vpc_connector_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vpc_connector_name\")",
"def get_name(self, context: bpy.types.Context) -> str:\n if not self.name:\n connector = self.__create_connector(\n self.name_connector, context=context)\n self.name = connector.get_name()\n return self.name",
"def vpc_connector_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_connector_name\")",
"def protocol(self) -> str:\n return __name__",
"def connector_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"connector_type\")",
"def connector_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"connector_type\")",
"def name(self):\n return 'Connected Devices'",
"def name(self) -> str:\n return f\"{self._inst} port {self._data[self._sid_data['sid_name']]}\"",
"def name(self):\n return self._config.backend_name",
"def name(self) -> str:\n return self._alias or f\"Nut-{self._host}\"",
"def channel_name(self) -> str:\n return self._channel_name",
"def name(self) -> str:\n return self.proto.name",
"def get_name(self):\n \n return 'TCP/IP Client'",
"def name(self):\n return self.proto.name",
"def name(self):\n return '{} {}'.format(self._device,\n self._endpoint)",
"def get_name(self):\n \n return 'TCP/IP Server'",
"def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME",
"def connector(self):\n if '_connector' not in self.__dict__:\n from meerschaum.connectors.parse import parse_instance_keys\n conn = parse_instance_keys(self.connector_keys)\n if conn:\n self._connector = conn\n else:\n return None\n return self._connector",
"def get_initiator_host_name(self, connector):\n name = connector.get('initiator',\n connector.get('wwnns', [''])[0])[::-1]\n if self.configuration.unique_fqdn_network:\n name = connector.get('host', name)\n return re.sub('[^0-9a-zA-Z-_]', '_', name[:32])",
"def name(self):\n return \"RPCConnection\"",
"def name(self):\n return \"{} {}\".format(self._clientname, self._name)",
"def name(self):\n if self.resource.is_client:\n return f\"{self.network.name} {self.resource.name_connection_type} {SWITCH_TYPES[self.variable][0]}\"\n elif self.resource.is_eero or self.resource.is_profile:\n return f\"{self.network.name} {self.resource.name} {SWITCH_TYPES[self.variable][0]}\"\n return f\"{self.resource.name} {SWITCH_TYPES[self.variable][0]}\"",
"def layer_protocol_name(self) -> str:\n return self._layer_protocol_name"
]
| [
"0.7337074",
"0.732946",
"0.73207146",
"0.7130238",
"0.70410424",
"0.70390975",
"0.70390975",
"0.703408",
"0.69587594",
"0.69246274",
"0.6804043",
"0.6703014",
"0.6703014",
"0.6581841",
"0.6571527",
"0.6548757",
"0.6536891",
"0.6471821",
"0.644584",
"0.64090055",
"0.64068276",
"0.6388168",
"0.63579077",
"0.6352564",
"0.63471246",
"0.63396907",
"0.6335501",
"0.63301283",
"0.6317787",
"0.6315258"
]
| 0.86829066 | 0 |
The name of the hub. | def hub_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "hub_name") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hub_name(self):\n return self._props[\"persistent_identifiers\"].get(self._hub_name_prop)",
"def hub(self) -> str:\n return self._db_data.hub",
"def name(self):\n return f'{self._vehicle.name} {self.wan_name} Signal'",
"def name(self):\n return \"{} {}\".format(self._clientname, self._name)",
"def name(self) -> str:\n return self.dev.label",
"def name(self):\n return self._device.name",
"def name(self):\n return self._device.name",
"def name(self):\n return self._device.name",
"def name(self):\n return f\"{habitica.DOMAIN}_{self._name}_{self._sensor_name}\"",
"def name(self):\n return self.device.name()",
"def name(self):\n return f\"{DEFAULT_NAME}_{BINARY_SENSOR}\"",
"def name(self):\n # self._name = \"wyzeapi_\"+self._device_mac+\"_\"+ self._name\n return self._device.nickname",
"def name(self):\n return self._sensor.name",
"def name(self):\n return self.config[\"name\"]",
"def name(self) -> str:\n pass",
"def name(self) -> str:\n pass",
"def name(self) -> str:\n pass",
"def name(self) -> str:\n pass",
"def name(self):\n return f\"{get_device_name(self._data, 0, self._name)}\"",
"def name(self) -> str:\n return self.proto.name",
"def name(self):\n return self._device.device_data[self._uuid]['name']",
"def name(self) -> str:\n return f\"{self.platform_name} {self._sensor_name}\"",
"def name(self):\n return self._config.backend_name",
"def name(self):\n return '{} {}'.format(self._device,\n self._endpoint)",
"def name(self) -> str:\n return self.__name",
"def name(self) -> str:\n return self.__name",
"def name(self) -> str:\n return self.__name",
"def name(self) -> str:\n return self.__name",
"def name(self) -> str:\n return self.__name",
"def name(self) -> str:\n return self.__name"
]
| [
"0.86040986",
"0.7757109",
"0.7266456",
"0.7262012",
"0.72566485",
"0.7248894",
"0.7248894",
"0.7248894",
"0.72286797",
"0.72268325",
"0.7159816",
"0.71467173",
"0.7144651",
"0.7131295",
"0.7124113",
"0.7124113",
"0.7124113",
"0.7124113",
"0.7106911",
"0.7100647",
"0.70927584",
"0.7092616",
"0.7075219",
"0.7061618",
"0.70602304",
"0.70602304",
"0.70602304",
"0.70602304",
"0.70602304",
"0.70602304"
]
| 0.8609149 | 0 |
Get an existing ConnectorMapping resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ConnectorMapping':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ConnectorMappingArgs.__new__(ConnectorMappingArgs)
__props__.__dict__["connector_mapping_name"] = None
__props__.__dict__["connector_name"] = None
__props__.__dict__["connector_type"] = None
__props__.__dict__["created"] = None
__props__.__dict__["data_format_id"] = None
__props__.__dict__["description"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["entity_type"] = None
__props__.__dict__["entity_type_name"] = None
__props__.__dict__["last_modified"] = None
__props__.__dict__["mapping_properties"] = None
__props__.__dict__["name"] = None
__props__.__dict__["next_run_time"] = None
__props__.__dict__["run_id"] = None
__props__.__dict__["state"] = None
__props__.__dict__["tenant_id"] = None
__props__.__dict__["type"] = None
return ConnectorMapping(resource_name, opts=opts, __props__=__props__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'EventSourceMapping':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = EventSourceMappingArgs.__new__(EventSourceMappingArgs)\n\n __props__.__dict__[\"amazon_managed_kafka_event_source_config\"] = None\n __props__.__dict__[\"batch_size\"] = None\n __props__.__dict__[\"bisect_batch_on_function_error\"] = None\n __props__.__dict__[\"destination_config\"] = None\n __props__.__dict__[\"document_db_event_source_config\"] = None\n __props__.__dict__[\"enabled\"] = None\n __props__.__dict__[\"event_source_arn\"] = None\n __props__.__dict__[\"filter_criteria\"] = None\n __props__.__dict__[\"function_name\"] = None\n __props__.__dict__[\"function_response_types\"] = None\n __props__.__dict__[\"maximum_batching_window_in_seconds\"] = None\n __props__.__dict__[\"maximum_record_age_in_seconds\"] = None\n __props__.__dict__[\"maximum_retry_attempts\"] = None\n __props__.__dict__[\"parallelization_factor\"] = None\n __props__.__dict__[\"queues\"] = None\n __props__.__dict__[\"scaling_config\"] = None\n __props__.__dict__[\"self_managed_event_source\"] = None\n __props__.__dict__[\"self_managed_kafka_event_source_config\"] = None\n __props__.__dict__[\"source_access_configurations\"] = None\n __props__.__dict__[\"starting_position\"] = None\n __props__.__dict__[\"starting_position_timestamp\"] = None\n __props__.__dict__[\"topics\"] = None\n __props__.__dict__[\"tumbling_window_in_seconds\"] = None\n return EventSourceMapping(resource_name, opts=opts, __props__=__props__)",
"def resolve_from_local_lookup_table(self, id: str) -> GeoLocation:\n return self.local_lookup(id)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'ResolverConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ResolverConfigArgs.__new__(ResolverConfigArgs)\n\n __props__.__dict__[\"autodefined_reverse\"] = None\n __props__.__dict__[\"autodefined_reverse_flag\"] = None\n __props__.__dict__[\"owner_id\"] = None\n __props__.__dict__[\"resource_id\"] = None\n return ResolverConfig(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n as_path_match_mode: Optional[pulumi.Input[str]] = None,\n cen_id: Optional[pulumi.Input[str]] = None,\n cen_region_id: Optional[pulumi.Input[str]] = None,\n cidr_match_mode: Optional[pulumi.Input[str]] = None,\n community_match_mode: Optional[pulumi.Input[str]] = None,\n community_operate_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n destination_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n destination_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n destination_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n map_result: Optional[pulumi.Input[str]] = None,\n match_asns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n match_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n next_priority: Optional[pulumi.Input[int]] = None,\n operate_community_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n preference: Optional[pulumi.Input[int]] = None,\n prepend_as_paths: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n route_map_id: Optional[pulumi.Input[str]] = None,\n route_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_child_instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_instance_ids_reverse_match: Optional[pulumi.Input[bool]] = None,\n source_region_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_route_table_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n transit_router_route_table_id: Optional[pulumi.Input[str]] = None,\n transmit_direction: Optional[pulumi.Input[str]] = None) -> 'RouteMap':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RouteMapState.__new__(_RouteMapState)\n\n __props__.__dict__[\"as_path_match_mode\"] = as_path_match_mode\n __props__.__dict__[\"cen_id\"] = cen_id\n __props__.__dict__[\"cen_region_id\"] = cen_region_id\n __props__.__dict__[\"cidr_match_mode\"] = cidr_match_mode\n __props__.__dict__[\"community_match_mode\"] = community_match_mode\n __props__.__dict__[\"community_operate_mode\"] = community_operate_mode\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"destination_child_instance_types\"] = destination_child_instance_types\n __props__.__dict__[\"destination_cidr_blocks\"] = destination_cidr_blocks\n __props__.__dict__[\"destination_instance_ids\"] = destination_instance_ids\n __props__.__dict__[\"destination_instance_ids_reverse_match\"] = destination_instance_ids_reverse_match\n __props__.__dict__[\"destination_route_table_ids\"] = destination_route_table_ids\n __props__.__dict__[\"map_result\"] = map_result\n __props__.__dict__[\"match_asns\"] = match_asns\n __props__.__dict__[\"match_community_sets\"] = match_community_sets\n __props__.__dict__[\"next_priority\"] = next_priority\n __props__.__dict__[\"operate_community_sets\"] = operate_community_sets\n __props__.__dict__[\"preference\"] = preference\n __props__.__dict__[\"prepend_as_paths\"] = prepend_as_paths\n __props__.__dict__[\"priority\"] = priority\n __props__.__dict__[\"route_map_id\"] = route_map_id\n __props__.__dict__[\"route_types\"] = route_types\n __props__.__dict__[\"source_child_instance_types\"] = source_child_instance_types\n __props__.__dict__[\"source_instance_ids\"] = source_instance_ids\n __props__.__dict__[\"source_instance_ids_reverse_match\"] = source_instance_ids_reverse_match\n __props__.__dict__[\"source_region_ids\"] = source_region_ids\n __props__.__dict__[\"source_route_table_ids\"] = source_route_table_ids\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"transit_router_route_table_id\"] = transit_router_route_table_id\n __props__.__dict__[\"transmit_direction\"] = transmit_direction\n return RouteMap(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n access_configuration_id: Optional[pulumi.Input[str]] = None,\n access_configuration_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n directory_id: Optional[pulumi.Input[str]] = None,\n force_remove_permission_policies: Optional[pulumi.Input[bool]] = None,\n permission_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AccessConfigurationPermissionPolicyArgs']]]]] = None,\n relay_state: Optional[pulumi.Input[str]] = None,\n session_duration: Optional[pulumi.Input[int]] = None) -> 'AccessConfiguration':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AccessConfigurationState.__new__(_AccessConfigurationState)\n\n __props__.__dict__[\"access_configuration_id\"] = access_configuration_id\n __props__.__dict__[\"access_configuration_name\"] = access_configuration_name\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"directory_id\"] = directory_id\n __props__.__dict__[\"force_remove_permission_policies\"] = force_remove_permission_policies\n __props__.__dict__[\"permission_policies\"] = permission_policies\n __props__.__dict__[\"relay_state\"] = relay_state\n __props__.__dict__[\"session_duration\"] = session_duration\n return AccessConfiguration(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n automatic_update: Optional[pulumi.Input[pulumi.InputType['ProtectionContainerMappingAutomaticUpdateArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n recovery_fabric_name: Optional[pulumi.Input[str]] = None,\n recovery_replication_policy_id: Optional[pulumi.Input[str]] = None,\n recovery_source_protection_container_name: Optional[pulumi.Input[str]] = None,\n recovery_target_protection_container_id: Optional[pulumi.Input[str]] = None,\n recovery_vault_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None) -> 'ProtectionContainerMapping':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ProtectionContainerMappingState.__new__(_ProtectionContainerMappingState)\n\n __props__.__dict__[\"automatic_update\"] = automatic_update\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"recovery_fabric_name\"] = recovery_fabric_name\n __props__.__dict__[\"recovery_replication_policy_id\"] = recovery_replication_policy_id\n __props__.__dict__[\"recovery_source_protection_container_name\"] = recovery_source_protection_container_name\n __props__.__dict__[\"recovery_target_protection_container_id\"] = recovery_target_protection_container_id\n __props__.__dict__[\"recovery_vault_name\"] = recovery_vault_name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n return ProtectionContainerMapping(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'VpcConnector':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = VpcConnectorArgs.__new__(VpcConnectorArgs)\n\n __props__.__dict__[\"security_groups\"] = None\n __props__.__dict__[\"subnets\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"vpc_connector_arn\"] = None\n __props__.__dict__[\"vpc_connector_name\"] = None\n __props__.__dict__[\"vpc_connector_revision\"] = None\n return VpcConnector(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[pulumi.InputType['SyntheticsPrivateLocationMetadataArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'SyntheticsPrivateLocation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SyntheticsPrivateLocationState.__new__(_SyntheticsPrivateLocationState)\n\n __props__.__dict__[\"config\"] = config\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"metadata\"] = metadata\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"tags\"] = tags\n return SyntheticsPrivateLocation(resource_name, opts=opts, __props__=__props__)",
"def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)",
"def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_url: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n workspace_id: Optional[pulumi.Input[str]] = None) -> 'Workspace':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkspaceState.__new__(_WorkspaceState)\n\n __props__.__dict__[\"application_insights_id\"] = application_insights_id\n __props__.__dict__[\"container_registry_id\"] = container_registry_id\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_url\"] = discovery_url\n __props__.__dict__[\"encryption\"] = encryption\n __props__.__dict__[\"friendly_name\"] = friendly_name\n __props__.__dict__[\"high_business_impact\"] = high_business_impact\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"image_build_compute_name\"] = image_build_compute_name\n __props__.__dict__[\"key_vault_id\"] = key_vault_id\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"primary_user_assigned_identity\"] = primary_user_assigned_identity\n __props__.__dict__[\"public_access_behind_virtual_network_enabled\"] = public_access_behind_virtual_network_enabled\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"storage_account_id\"] = storage_account_id\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"v1_legacy_mode_enabled\"] = v1_legacy_mode_enabled\n __props__.__dict__[\"workspace_id\"] = workspace_id\n return Workspace(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n asset_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ZoneAssetStatusArgs']]]]] = None,\n create_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_spec: Optional[pulumi.Input[pulumi.InputType['ZoneDiscoverySpecArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n lake: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n resource_spec: Optional[pulumi.Input[pulumi.InputType['ZoneResourceSpecArgs']]] = None,\n state: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None,\n update_time: Optional[pulumi.Input[str]] = None) -> 'Zone':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ZoneState.__new__(_ZoneState)\n\n __props__.__dict__[\"asset_statuses\"] = asset_statuses\n __props__.__dict__[\"create_time\"] = create_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_spec\"] = discovery_spec\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"labels\"] = labels\n __props__.__dict__[\"lake\"] = lake\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"resource_spec\"] = resource_spec\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"uid\"] = uid\n __props__.__dict__[\"update_time\"] = update_time\n return Zone(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n db_proxy_endpoint_name: Optional[pulumi.Input[str]] = None,\n db_proxy_name: Optional[pulumi.Input[str]] = None,\n endpoint: Optional[pulumi.Input[str]] = None,\n is_default: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n target_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vpc_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n vpc_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ProxyEndpoint':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ProxyEndpointState.__new__(_ProxyEndpointState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"db_proxy_endpoint_name\"] = db_proxy_endpoint_name\n __props__.__dict__[\"db_proxy_name\"] = db_proxy_name\n __props__.__dict__[\"endpoint\"] = endpoint\n __props__.__dict__[\"is_default\"] = is_default\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"target_role\"] = target_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"vpc_security_group_ids\"] = vpc_security_group_ids\n __props__.__dict__[\"vpc_subnet_ids\"] = vpc_subnet_ids\n return ProxyEndpoint(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def get_location_by_id(self, location_id):",
"def findLocationById(cls, id):\r\n return cls.query.filter_by(id=id).first()",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Canary':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = CanaryArgs.__new__(CanaryArgs)\n\n __props__.__dict__[\"artifact_config\"] = None\n __props__.__dict__[\"artifact_s3_location\"] = None\n __props__.__dict__[\"code\"] = None\n __props__.__dict__[\"delete_lambda_resources_on_canary_deletion\"] = None\n __props__.__dict__[\"execution_role_arn\"] = None\n __props__.__dict__[\"failure_retention_period\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"run_config\"] = None\n __props__.__dict__[\"runtime_version\"] = None\n __props__.__dict__[\"schedule\"] = None\n __props__.__dict__[\"start_canary_after_creation\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"success_retention_period\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"visual_reference\"] = None\n __props__.__dict__[\"vpc_config\"] = None\n return Canary(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200",
"def get_connected_realm(self, region, namespace, id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/connected-realm/{0}', region, *[id], **filters)",
"def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n connection_prefix: Optional[pulumi.Input[str]] = None,\n connection_string: Optional[pulumi.Input[str]] = None,\n distribution_type: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n max_delay_time: Optional[pulumi.Input[int]] = None,\n port: Optional[pulumi.Input[int]] = None,\n weight: Optional[pulumi.Input[Mapping[str, Any]]] = None) -> 'ReadWriteSplittingConnection':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ReadWriteSplittingConnectionState.__new__(_ReadWriteSplittingConnectionState)\n\n __props__.__dict__[\"connection_prefix\"] = connection_prefix\n __props__.__dict__[\"connection_string\"] = connection_string\n __props__.__dict__[\"distribution_type\"] = distribution_type\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"max_delay_time\"] = max_delay_time\n __props__.__dict__[\"port\"] = port\n __props__.__dict__[\"weight\"] = weight\n return ReadWriteSplittingConnection(resource_name, opts=opts, __props__=__props__)",
"def _get(isamAppliance, id):\n return isamAppliance.invoke_get(\"Retrieve a specific STS chain\", \"{0}/{1}\".format(uri, id),\n requires_modules=requires_modules,\n requires_version=requires_version)",
"def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auth_method: Optional[pulumi.Input[str]] = None,\n bind_name: Optional[pulumi.Input[str]] = None,\n bind_type: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n partition: Optional[pulumi.Input[str]] = None,\n selector: Optional[pulumi.Input[str]] = None) -> 'AclBindingRule':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AclBindingRuleState.__new__(_AclBindingRuleState)\n\n __props__.__dict__[\"auth_method\"] = auth_method\n __props__.__dict__[\"bind_name\"] = bind_name\n __props__.__dict__[\"bind_type\"] = bind_type\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"namespace\"] = namespace\n __props__.__dict__[\"partition\"] = partition\n __props__.__dict__[\"selector\"] = selector\n return AclBindingRule(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name, id, opts=None, name=None, s3_destination=None):\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n __props__[\"name\"] = name\n __props__[\"s3_destination\"] = s3_destination\n return ResourceDataSync(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state"
]
| [
"0.5439401",
"0.50408334",
"0.4999506",
"0.4942603",
"0.4843693",
"0.4836924",
"0.48336312",
"0.480726",
"0.47471997",
"0.47456554",
"0.47390932",
"0.47378042",
"0.4714236",
"0.47014263",
"0.4654256",
"0.46435213",
"0.46350497",
"0.46056616",
"0.4586789",
"0.45752186",
"0.45733777",
"0.45716926",
"0.4570833",
"0.45481664",
"0.45438507",
"0.45350143",
"0.45002824",
"0.4494747",
"0.44754487",
"0.4456236"
]
| 0.7106133 | 0 |
The connector mapping name | def connector_mapping_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "connector_mapping_name") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connector_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"connector_name\")",
"def mapping_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mapping_name\")",
"def schema_mappings(self):\n pass",
"def configuration_configmap_name(self) -> Optional[str]:\n return pulumi.get(self, \"configuration_configmap_name\")",
"def _get_fcoe_intf_fabric_map_name(self):\n return self.__fcoe_intf_fabric_map_name",
"def connector(self):\n if '_connector' not in self.__dict__:\n from meerschaum.connectors.parse import parse_instance_keys\n conn = parse_instance_keys(self.connector_keys)\n if conn:\n self._connector = conn\n else:\n return None\n return self._connector",
"def get_mapping_type_name(cls):\n return 'comments_comment'",
"def mapping_name(self) -> Optional[str]:\n return self.get(\"/TM\")",
"def connector_profile_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connector_profile_name\")",
"def connector_profile_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connector_profile_name\")",
"def connector_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"connector_type\")",
"def connector_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"connector_type\")",
"def vpc_connector_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vpc_connector_name\")",
"def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"",
"def connection_name(self) -> str:\n return pulumi.get(self, \"connection_name\")",
"def connection_configuration_mapping(self, value):\n if value == \"Y\":\n return \"0\"\n elif value == \"D\":\n return \"2\"\n elif value == \"Z\":\n return \"5\"\n else:\n raise ValueError(\"Unknown configuration {}\".format(value))",
"def getConnectionName(self):\n return self.system",
"def vpc_connector_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_connector_name\")",
"def connection_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"connection_name\")",
"def mapping_names(self):\n return [self.basename]",
"def connection_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connection_name\")",
"def mapping_properties(self) -> pulumi.Input['ConnectorMappingPropertiesArgs']:\n return pulumi.get(self, \"mapping_properties\")",
"def name(self):\n return self.algorithm_spec.alias",
"def receiverMapping():",
"def name(self):\n return self._alias",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'ConnectorMapping':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ConnectorMappingArgs.__new__(ConnectorMappingArgs)\n\n __props__.__dict__[\"connector_mapping_name\"] = None\n __props__.__dict__[\"connector_name\"] = None\n __props__.__dict__[\"connector_type\"] = None\n __props__.__dict__[\"created\"] = None\n __props__.__dict__[\"data_format_id\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"display_name\"] = None\n __props__.__dict__[\"entity_type\"] = None\n __props__.__dict__[\"entity_type_name\"] = None\n __props__.__dict__[\"last_modified\"] = None\n __props__.__dict__[\"mapping_properties\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"next_run_time\"] = None\n __props__.__dict__[\"run_id\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"tenant_id\"] = None\n __props__.__dict__[\"type\"] = None\n return ConnectorMapping(resource_name, opts=opts, __props__=__props__)",
"def node_mapping(self):\n ...",
"def get_alias(self):",
"def edge_mapping(self):\n ...",
"def mapping(self):\n return self.request('_mapping', pylastica.request.Request.GET).data"
]
| [
"0.71356994",
"0.6227197",
"0.6107515",
"0.60861796",
"0.5903945",
"0.583965",
"0.5805953",
"0.5772707",
"0.5756182",
"0.5756182",
"0.5708169",
"0.5708169",
"0.5677598",
"0.56659704",
"0.56199807",
"0.5613059",
"0.5612902",
"0.5589546",
"0.5584832",
"0.5576773",
"0.5560229",
"0.55584466",
"0.55549216",
"0.5544071",
"0.5508853",
"0.55075496",
"0.5451235",
"0.5444921",
"0.5429523",
"0.5426808"
]
| 0.8594048 | 0 |
The next run time based on customer's settings. | def next_run_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "next_run_time") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next(self):\n\n crontab = self._crontab\n return math.ceil(crontab.next(default_utc=False))",
"def get_nightly_start_time():\n return 14 # 2PM local Tucson time",
"def time_run(self):\n if self._time_run is None:\n self._time_run = datetime.now(timezone.utc)\n return self._time_run.isoformat()",
"def set_next_order_arrival_time():\n if global_settings.demand_distribution == \"uniform\":\n # Update the time_of_next_order_arrival to a random time within the interval specified in global_settings\n global_settings.time_of_next_order_arrival = \\\n global_settings.current_time + round(random.uniform(\n global_settings.next_order_arrival_lower_bound,\n global_settings.next_order_arrival_upper_bound))\n elif global_settings.demand_distribution == \"exponential\":\n # Update the time_of_next_order_arrival to a random time from an exponential distribution\n random_exponential_number = \\\n get_random_exponential_number(global_settings.next_order_arrival_exponential_rate_parameter)\n if random_exponential_number == 0:\n random_exponential_number = 1\n global_settings.time_of_next_order_arrival = global_settings.current_time + random_exponential_number\n # print(\"time to next order:\" + str(random_exponential_number))\n else:\n raise ValueError(\"global_settings.demand_distribution invalid value assigned. \"\n \"Must be 'exponential' or 'uniform'\")\n return",
"def get_next_run_time(period, inst):\n period_sec = int(period)\n now = round(time.time())\n # Dry run mode.\n if dry_run is True:\n print(\"Running in {} secs\".format(now%period_sec))\n return (now%period_sec)\n\n\n # Number of secs that has elapsed since mkt open.\n secs_since_mkt_open = now - Instrument_CP.seconds_since_epoch_at_mkt_open[inst.tsb]\n\n \"\"\"\n If we started before market open, schedule at market_open + period\n Note: We pass the number of seconds from now.\n \"\"\"\n\n if (get_secs_to_mkt_close(inst.xch) < 0):\n logging.debug(\"sec to mkt close %d\" % (get_secs_to_mkt_close(inst.xch)))\n logging.debug(\"market closed\")\n return 0\n\n if secs_since_mkt_open < 0:\n logging.debug(\"market will open in %d\" % secs_since_mkt_open)\n return (-(secs_since_mkt_open))\n else:\n # Period in secs.\n logging.debug(\"next tun time %d\" % (period_sec - secs_since_mkt_open%period_sec))\n return ((period_sec - secs_since_mkt_open%period_sec) + 1)",
"def schedule(self):\n\n crontab = self._crontab\n return datetime.now() + timedelta(\n seconds=math.ceil(\n crontab.next(default_utc=False)\n )\n )",
"def get_curr_exec_time(self):\n if self.type == 'normal':\n try:\n self.curr_exec_time = self.my_rand.gauss(self.runtime, self.stddev)\n except:\n if self.fwk.debug:\n print(\"not varying the execution time\")\n self.curr_exec_time = self.runtime\n raise\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_work':\n # this is a sandia style work task\n next_ckpt = self.sim.next_ckpt # relative work time\n work_todo = self.sim.total_work - self.sim.completed_work\n self.curr_exec_time = min(work_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_rework':\n next_ckpt = self.sim.next_ckpt # relative work time\n self.curr_exec_time = min(self.sim.rework_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_ckpt' or self.type == 'sandia_restart':\n self.curr_exec_time = self.runtime\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n else:\n print('error error error!!! problem with component type in get_curr_exec_time')\n raise",
"def generate_time(self) -> str:\n return pulumi.get(self, \"generate_time\")",
"def _next_update_time(self, seconds=10):\n now = get_aware_utc_now()\n next_update_time = now + datetime.timedelta(\n seconds=seconds)\n return next_update_time",
"def next_run_date(self):\n return (\n datetime.combine(self.start_date, datetime.min.time(), tzinfo=pytz.UTC)\n if self.start_date and self.start_date > date.today()\n else None\n )",
"def next_run_date(self):\n return (\n datetime.combine(self.start_date, datetime.min.time(), tzinfo=pytz.UTC)\n if self.start_date and self.start_date > date.today()\n else None\n )",
"def _get_next_time(self, curr_time):\n return curr_time + self.time_dist.random()",
"def ingestion_run_time(self):\n # Set the time at which you want the ingestion to run.\n return os.environ.get('SNYK_INGESTION_RUN_TIME', '12')",
"def now():\n\treturn time.time() * 1000",
"def getSubmitTime():",
"def get_current_time():\n return datetime.now()",
"def start_time(self):\n pass",
"def _get_timebase(self):\n return clock()",
"def _get_timebase(self):\n return clock()",
"def get_attempt_start_time():\n pass",
"def now():\r\n return time.time()",
"def get_time(self):\n\t\treturn time.time()",
"def run_date(self) -> datetime.date:\n return self.timestamp.date()",
"def get_next_day(self):\n pass",
"def now():\r\n return datetime.datetime.now()",
"def get_current_time():\n return int(time.time())",
"def cron(self):\n return",
"def seconds_before_next_run(self):\n period, last_start_time = self.period, self.last_start_time\n now = utcnow()\n if isinstance(period, Weekly):\n then = now.replace(hour=period.hour, minute=10, second=0, microsecond=0)\n days = (period.weekday - now.isoweekday()) % 7\n if days:\n then += timedelta(days=days)\n if (last_start_time or EPOCH) >= then:\n then += timedelta(days=7)\n elif isinstance(period, Daily):\n then = now.replace(hour=period.hour, minute=5, second=0, microsecond=0)\n if (last_start_time or EPOCH) >= then:\n then += timedelta(days=1)\n elif period == 'irregular':\n return 0 if self.thread and self.thread.is_alive() else None\n elif last_start_time:\n then = last_start_time + timedelta(seconds=period)\n else:\n then = now\n return (then - now).total_seconds()",
"def _time(self):\n return time()",
"def next_minute(self):\n #increase the time of the supermarket by one minute\n self.minutes += 1\n #for every customer determine their next state\n for customer in self.customers:\n customer.next_state()"
]
| [
"0.6682652",
"0.6635397",
"0.65648395",
"0.64742404",
"0.64222664",
"0.6402251",
"0.6375578",
"0.62770134",
"0.6224094",
"0.62018615",
"0.62018615",
"0.61462617",
"0.6141664",
"0.60939384",
"0.60898864",
"0.6061057",
"0.6052802",
"0.60277456",
"0.60277456",
"0.6014334",
"0.6002416",
"0.5990887",
"0.59834343",
"0.5980873",
"0.5978241",
"0.5970414",
"0.59414554",
"0.59311324",
"0.5904857",
"0.58944523"
]
| 0.76147455 | 1 |
print a 2x2 grid with squares of size x/2 by x/2 | def print_grid(x):
row = int(x/2)
if x % 2 == 0:
col = x
else:
col = x - 1
for i in range(2):
prow(row)
for i in range(row):
pcolumn(col)
prow(row) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_square(size):\n if not isinstance(size, int):\n raise ValueError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if size == 0:\n return\n for row in range(int(size)):\n for col in range(int(size)):\n print(\"{:s}\".format(\"#\"), end=\"\")\n print()",
"def print_grid2(y, z):\n for i in range(y):\n prow(z, y)\n for i in range(z):\n pcolumn(z*2, y)\n prow(z, y)",
"def print_square(size):\n\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if isinstance(size, float) and size < 0:\n raise TypeError(\"size must be an integer\")\n\n for col in range(0, size):\n for row in range(0, size):\n print(\"#\", end=\"\")\n print()",
"def print_square(size):\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if isinstance(size, float) and size < 0:\n raise TypeError(\"size must be an integer\")\n for i in range(0, size):\n for j in range(0, size):\n print(\"#\", end=\"\")\n print()",
"def display_grid(grid):\n\n\tprint(\"\"\"\n 0 1 2 3 4 5 6 7\n\t \n ▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼ \"\"\", colors.BOLD + \"(X)\" + colors.STOP, end = '')\n\n\tprint('\\n\\n')\n\n\trow = 0\n\n\tfor i in range(8):\n\t\tprint(' ', row, ' ▶ ', end = ' ')\n\t\tfor j in range(8):\n\t\t\tprint(grid[j,i], end = ' ')\n\t\tprint('\\n\\n')\n\t\trow += 1\n\n\tprint(colors.BOLD + ' (Y)\\n' + colors.STOP)",
"def print_square(size):\n\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n if type(size) is float and size < 0:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n for i in range(size):\n for j in range(size):\n print('#', end='')\n print()",
"def print_square(size):\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n row = \"#\" * size\n for i in range(size):\n print(\"{}\".format(row))",
"def printgrid_1():\n print(plusminusrow + newline + piperow*4 + plusminusrow + newline + piperow*4 +plusminusrow)",
"def printBoard(grid):\n for i in range(9):\n if i%3 == 0 and i!=0:\n # Adding two horizontal lines, to separate the squres\n print(\"-----------------------\")\n for j in range(9):\n if j%3 == 0 and j!= 0:\n # Adding Vertical lines, to separate the squares\n print(\" | \",end='')\n # Printing the numbers\n if j==8:\n print(str(grid[i][j]))\n else:\n print(str(grid[i][j]),end=' ')",
"def printgrid(\n rows=2,\n columns=2,\n cell_width=8,\n cell_height=4,\n corner_symbol=\"+\",\n horizontal_symbol=\"-\",\n vertical_symbol=\"|\",\n):\n horizontal_boundary = (\n corner_symbol + ((horizontal_symbol * cell_width) + corner_symbol) * columns\n ) + \"\\n\"\n horizontal_middle = horizontal_boundary.replace(horizontal_symbol, \" \").replace(\n corner_symbol, vertical_symbol\n )\n vertical_cells = (horizontal_middle * cell_height + horizontal_boundary) * rows\n print(\"\\n\")\n print(horizontal_boundary + vertical_cells)",
"def print_grid (grid):\r\n print('+--------------------+')\r\n for o in range(len(grid)):\r\n print('|',end='')\r\n for e in range(len(grid[o])):\r\n j=grid[o][e]\r\n if j==0:\r\n g=' '\r\n else:\r\n g=j\r\n print(g,end=' '*(5-len(str(grid[o][e]))))\r\n print('|')\r\n print('+--------------------+')",
"def printgrid_2(n):\n print(plusminusrow + newline + piperow*n + plusminusrow + newline + piperow*n + plusminusrow)\n return True",
"def print_square(size):\n if type(size) is float and size < 0:\n raise TypeError(\"size must be an integer\")\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n for rows in range(size):\n print('#' * size)",
"def draw_2(n: int):\n\n for row in range(n):\n for col in range(n - row):\n print('*', end='')\n print()",
"def print_grid (grid):\r\n print('+--------------------+')\r\n for i in range(4):\r\n print('|',end='')\r\n for j in range(4):\r\n if grid[i][j] == 0:\r\n print(' '*5,end='')\r\n else:\r\n print('{:<5}'.format(grid[i][j]),end='')\r\n print('|') \r\n print('+--------------------+')",
"def print_grid(grid):\n height = len(grid)\n width = len(grid[0])\n \n for r in range(height):\n for c in range(width):\n print(grid[r][c], end='') # print nothing between values\n print() # at end of row, go to next line",
"def display_grid_squares(x_margin, y_margin, num_rows, num_cols, sep):\n\n for row in range(num_rows):\n for col in range(num_cols):\n x = x_margin + sep * col\n y = y_margin + sep * row\n ellipse(x, y, 3, 3)\n pushMatrix()\n translate(x, y)\n noFill()\n rect(0, 0, 20, 20)\n popMatrix()",
"def printGrid(grid):\n print(\"-\"*25)\n for i in range(9):\n print(\"|\", end=\" \")\n for j in range(9):\n print(grid[i][j], end=\" \")\n if (j % 3 == 2):\n print(\"|\", end=\" \")\n print()\n if (i % 3 == 2):\n print(\"-\"*25)\n \"\"\"\n Testing that solver works properly.\n \"\"\"",
"def print_square(size):\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n if (size < 0):\n raise ValueError(\"size must be >= 0\")\n if size > 0:\n print(\"\\n\".join([\"#\" * size for j in range(size)]))",
"def print_grid(grid, score):\n print(\"\")\n print(score)\n wall = \"+------\"*len(grid[0])+\"+\"\n print(wall)\n for row in grid:\n meat = \"|\".join(\"{:^6}\".format(val) for val in row)\n print(\"|{}|\".format(meat))\n print(wall)",
"def print_grid():\n print_line()\n for line in range(2):\n for post in range(4):\n print_post()\n print_line()",
"def print_grid(grid):\n\tprint(\"\")\n\twall = \"+------\"*len(grid[0])+\"+\"\n\tprint(wall)\n\tfor row in grid:\n\t\tmeat = \"|\".join(COLORS[val] if val else \" \"*6 for val in row)\n\t\tprint(\"|{}|\".format(meat))\n\t\tprint(wall)",
"def print_grid (grid):\r\n f = '{:<5}'\r\n print(\"+--------------------+\")\r\n print('|', f.format(grid[0][0]), f.format(grid[0][1]), f.format(grid[0][2]), f.format(grid[0][3]), '|',sep='')\r\n print('|', f.format(grid[1][0]), f.format(grid[1][1]), f.format(grid[1][2]), f.format(grid[1][3]), '|',sep='')\r\n print('|', f.format(grid[2][0]), f.format(grid[2][1]), f.format(grid[2][2]), f.format(grid[2][3]), '|',sep='')\r\n print('|', f.format(grid[3][0]), f.format(grid[3][1]), f.format(grid[3][2]), f.format(grid[3][3]), '|',sep='')\r\n print(\"+--------------------+\")",
"def print_square(size):\n if isinstance(size, int) and size >= 0:\n for i in range(size):\n for j in range(size - 1):\n print(\"#\", end=\"\")\n print(\"#\")\n elif not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n elif size < 0 and isinstance(size, int):\n raise ValueError(\"size must be >= 0\")\n else:\n raise TypeError(\"size must be an integer\")",
"def generate_grid():\n y_offset = -10\n for a in range(20):\n # Line 1\n # Adds offset to the x position of the squares\n x_offset = 10\n for b in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for c in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for d in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40\n # Line 2 (needs 2 lines because the offset of each line)\n # Adds offset to the x position of the squares\n x_offset = 30\n for e in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for f in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for g in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40",
"def displayGrid(grid):\n wid = grid.shape[0]\n show_num = 9 if wid > 9 else wid\n\n # chessboard\n line = '\\n' + '- + ' * (wid - 1) + '- {}\\n'\n line = line.join([' | '.join(grid[i]) for i in range(wid)])\n\n # mark the number of its lines\n bottom = ('\\n' + ' {} ' * show_num)\n bottom = bottom.format(*[i+1 for i in range(show_num)])\n\n if show_num == 9:\n part = (' {} '*(wid - show_num))\n part = part.format(*[i+1 for i in range(show_num, wid)])\n bottom += part\n\n print(line.format(*[i+1 for i in range(wid)]) + bottom)",
"def draw_grid(grid):\n rows = grid.shape[0]\n cols = grid.shape[1]\n for row in range(rows):\n for col in range(cols):\n if grid[row, col] == 0: # empty\n sys.stdout.write(\" . \")\n elif grid[row, col] == 1: # path\n sys.stdout.write(\" X \")\n elif grid[row, col] == 2:\n sys.stdout.write(\" O \")\n else:\n sys.stdout.write(\" @ \")\n\n if col % cols == cols - 1:\n sys.stdout.write(\"\\n\")",
"def draw(self):\n\t\tfor i in range(0, self.size):\n\t\t\tprint('\\n' + \"----\" * self.size)\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tprint(self.grid[i][j] + ' |', end=\" \")\n\t\tprint('\\n'+ \"----\" * self.size + '\\n')",
"def print_grid(grid):\r\n print(\"+\",'-'*len(grid[0]*5),'+',sep='')# top line of box\r\n for i in range(len(grid)):\r\n grid_str = ''\r\n for j in range(len(grid[i])):\r\n if grid[i][j] == 0:\r\n grid_str += \"{:<5}\".format(' ')\r\n else:\r\n grid_str += \"{:<5}\".format(grid[i][j])#append a 5-width column\r\n print('|',grid_str,'|',sep='')\r\n print(\"+\",'-'*len(grid[0]*5),'+',sep='')# bottom line of box\r",
"def draw_1(n: int):\n \n for row in range(n):\n\n for col in range(n - row - 1):\n print(' ', end='')\n\n for col in range(2 * row + 1):\n print('*', end='')\n \n print()"
]
| [
"0.7685149",
"0.7678649",
"0.7626484",
"0.749714",
"0.74652517",
"0.7443684",
"0.7438695",
"0.738604",
"0.7335196",
"0.7323832",
"0.7323042",
"0.7318823",
"0.71894294",
"0.7176586",
"0.7169575",
"0.7140502",
"0.7096254",
"0.70879203",
"0.70642245",
"0.70536673",
"0.7047173",
"0.7044203",
"0.703974",
"0.7017089",
"0.70088595",
"0.69901997",
"0.6982735",
"0.69771624",
"0.69160616",
"0.6885309"
]
| 0.814317 | 0 |
print a y by y grid with squares of size z by z | def print_grid2(y, z):
for i in range(y):
prow(z, y)
for i in range(z):
pcolumn(z*2, y)
prow(z, y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_grid(grid):\n\n\tprint(\"\"\"\n 0 1 2 3 4 5 6 7\n\t \n ▼ ▼ ▼ ▼ ▼ ▼ ▼ ▼ \"\"\", colors.BOLD + \"(X)\" + colors.STOP, end = '')\n\n\tprint('\\n\\n')\n\n\trow = 0\n\n\tfor i in range(8):\n\t\tprint(' ', row, ' ▶ ', end = ' ')\n\t\tfor j in range(8):\n\t\t\tprint(grid[j,i], end = ' ')\n\t\tprint('\\n\\n')\n\t\trow += 1\n\n\tprint(colors.BOLD + ' (Y)\\n' + colors.STOP)",
"def printBoard(grid):\n for i in range(9):\n if i%3 == 0 and i!=0:\n # Adding two horizontal lines, to separate the squres\n print(\"-----------------------\")\n for j in range(9):\n if j%3 == 0 and j!= 0:\n # Adding Vertical lines, to separate the squares\n print(\" | \",end='')\n # Printing the numbers\n if j==8:\n print(str(grid[i][j]))\n else:\n print(str(grid[i][j]),end=' ')",
"def print_grid(x):\n row = int(x/2)\n if x % 2 == 0:\n col = x\n else:\n col = x - 1\n for i in range(2):\n prow(row)\n for i in range(row):\n pcolumn(col)\n prow(row)",
"def generate_grid():\n y_offset = -10\n for a in range(20):\n # Line 1\n # Adds offset to the x position of the squares\n x_offset = 10\n for b in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for c in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for d in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40\n # Line 2 (needs 2 lines because the offset of each line)\n # Adds offset to the x position of the squares\n x_offset = 30\n for e in range(1):\n # Adds offset to the y position of the squares\n y_offset += 20\n for f in range(20):\n # Prints a row of squares(5 squares along the x)\n square(x_offset, y_offset, 20, 20, dark_green)\n for g in range(1):\n # Adds x offset for the next line of squares on the y axis\n x_offset += 40",
"def print_square(size):\n\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if isinstance(size, float) and size < 0:\n raise TypeError(\"size must be an integer\")\n\n for col in range(0, size):\n for row in range(0, size):\n print(\"#\", end=\"\")\n print()",
"def print_square(size):\n if not isinstance(size, int):\n raise ValueError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if size == 0:\n return\n for row in range(int(size)):\n for col in range(int(size)):\n print(\"{:s}\".format(\"#\"), end=\"\")\n print()",
"def printgrid(\n rows=2,\n columns=2,\n cell_width=8,\n cell_height=4,\n corner_symbol=\"+\",\n horizontal_symbol=\"-\",\n vertical_symbol=\"|\",\n):\n horizontal_boundary = (\n corner_symbol + ((horizontal_symbol * cell_width) + corner_symbol) * columns\n ) + \"\\n\"\n horizontal_middle = horizontal_boundary.replace(horizontal_symbol, \" \").replace(\n corner_symbol, vertical_symbol\n )\n vertical_cells = (horizontal_middle * cell_height + horizontal_boundary) * rows\n print(\"\\n\")\n print(horizontal_boundary + vertical_cells)",
"def printgrid_1():\n print(plusminusrow + newline + piperow*4 + plusminusrow + newline + piperow*4 +plusminusrow)",
"def printGrid(grid):\n print(\"-\"*25)\n for i in range(9):\n print(\"|\", end=\" \")\n for j in range(9):\n print(grid[i][j], end=\" \")\n if (j % 3 == 2):\n print(\"|\", end=\" \")\n print()\n if (i % 3 == 2):\n print(\"-\"*25)\n \"\"\"\n Testing that solver works properly.\n \"\"\"",
"def print_square(size):\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if isinstance(size, float) and size < 0:\n raise TypeError(\"size must be an integer\")\n for i in range(0, size):\n for j in range(0, size):\n print(\"#\", end=\"\")\n print()",
"def print_square(size):\n\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n if type(size) is float and size < 0:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n for i in range(size):\n for j in range(size):\n print('#', end='')\n print()",
"def print_grid (grid):\r\n print('+--------------------+')\r\n for o in range(len(grid)):\r\n print('|',end='')\r\n for e in range(len(grid[o])):\r\n j=grid[o][e]\r\n if j==0:\r\n g=' '\r\n else:\r\n g=j\r\n print(g,end=' '*(5-len(str(grid[o][e]))))\r\n print('|')\r\n print('+--------------------+')",
"def display_grid_squares(x_margin, y_margin, num_rows, num_cols, sep):\n\n for row in range(num_rows):\n for col in range(num_cols):\n x = x_margin + sep * col\n y = y_margin + sep * row\n ellipse(x, y, 3, 3)\n pushMatrix()\n translate(x, y)\n noFill()\n rect(0, 0, 20, 20)\n popMatrix()",
"def print_grid(grid):\n # Calculate offsets for printing potentially negative range\n min_x = min(grid, key=lambda item: item[0])[0]\n min_y = min(grid, key=lambda item: item[1])[1]\n max_x = max(grid, key=lambda item: item[0])[0]\n max_y = max(grid, key=lambda item: item[1])[1]\n\n # Loop over grid adjusting for flipped Y perspective\n for y in range(max_y, min_y-1, -1):\n row = \"\"\n for x in range(min_x, max_x+1):\n color = grid[(x, y)]\n if color == COLOR_BLACK:\n row += \" \"\n else:\n row += \"X\"\n print(row)",
"def print_square(size):\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n row = \"#\" * size\n for i in range(size):\n print(\"{}\".format(row))",
"def print_grid(grid):\n\tprint(\"\")\n\twall = \"+------\"*len(grid[0])+\"+\"\n\tprint(wall)\n\tfor row in grid:\n\t\tmeat = \"|\".join(COLORS[val] if val else \" \"*6 for val in row)\n\t\tprint(\"|{}|\".format(meat))\n\t\tprint(wall)",
"def displayGrid(grid):\n wid = grid.shape[0]\n show_num = 9 if wid > 9 else wid\n\n # chessboard\n line = '\\n' + '- + ' * (wid - 1) + '- {}\\n'\n line = line.join([' | '.join(grid[i]) for i in range(wid)])\n\n # mark the number of its lines\n bottom = ('\\n' + ' {} ' * show_num)\n bottom = bottom.format(*[i+1 for i in range(show_num)])\n\n if show_num == 9:\n part = (' {} '*(wid - show_num))\n part = part.format(*[i+1 for i in range(show_num, wid)])\n bottom += part\n\n print(line.format(*[i+1 for i in range(wid)]) + bottom)",
"def print_grid(grid):\n height = len(grid)\n width = len(grid[0])\n \n for r in range(height):\n for c in range(width):\n print(grid[r][c], end='') # print nothing between values\n print() # at end of row, go to next line",
"def print_grid(grid, score):\n print(\"\")\n print(score)\n wall = \"+------\"*len(grid[0])+\"+\"\n print(wall)\n for row in grid:\n meat = \"|\".join(\"{:^6}\".format(val) for val in row)\n print(\"|{}|\".format(meat))\n print(wall)",
"def draw(self):\n\t\tfor i in range(0, self.size):\n\t\t\tprint('\\n' + \"----\" * self.size)\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tprint(self.grid[i][j] + ' |', end=\" \")\n\t\tprint('\\n'+ \"----\" * self.size + '\\n')",
"def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)",
"def make_square(turt,sz):\n for i in range(4):\n turt.forward(sz)\n turt.left(90)",
"def print_grid (grid):\r\n print('+--------------------+')\r\n for i in range(4):\r\n print('|',end='')\r\n for j in range(4):\r\n if grid[i][j] == 0:\r\n print(' '*5,end='')\r\n else:\r\n print('{:<5}'.format(grid[i][j]),end='')\r\n print('|') \r\n print('+--------------------+')",
"def print_grid():\n print_line()\n for line in range(2):\n for post in range(4):\n print_post()\n print_line()",
"def printgrid_2(n):\n print(plusminusrow + newline + piperow*n + plusminusrow + newline + piperow*n + plusminusrow)\n return True",
"def print_grid (grid):\r\n f = '{:<5}'\r\n print(\"+--------------------+\")\r\n print('|', f.format(grid[0][0]), f.format(grid[0][1]), f.format(grid[0][2]), f.format(grid[0][3]), '|',sep='')\r\n print('|', f.format(grid[1][0]), f.format(grid[1][1]), f.format(grid[1][2]), f.format(grid[1][3]), '|',sep='')\r\n print('|', f.format(grid[2][0]), f.format(grid[2][1]), f.format(grid[2][2]), f.format(grid[2][3]), '|',sep='')\r\n print('|', f.format(grid[3][0]), f.format(grid[3][1]), f.format(grid[3][2]), f.format(grid[3][3]), '|',sep='')\r\n print(\"+--------------------+\")",
"def print_square(size):\n if type(size) is float and size < 0:\n raise TypeError(\"size must be an integer\")\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n for rows in range(size):\n print('#' * size)",
"def print_grid(gr):\n for i in range(0,9):\n if((i % 3) == 0):\n print('- - - - - - - - - - - - - - - -')\n for j in range(0,9):\n if((j % 3) == 0):\n print('|', end='')\n \n val = str(gr[i][j])\n if(val == '0'):\n val = ' '\n \n print(' ' + val + ' ', end = '')\n print('|')\n print('- - - - - - - - - - - - - - - -')",
"def draw_multicolor_square(t,sz):\r\n for i in [\"red\", \"purple\", \"hotpink\", \"blue\"]:\r\n t.color(i)\r\n t.forward(sz)\r\n t.left(90)",
"def draw(self):\n for x in range(self.numRows):\n print self.grid[x]"
]
| [
"0.72268575",
"0.71651113",
"0.71262723",
"0.7122105",
"0.71041363",
"0.70922005",
"0.7041609",
"0.69977385",
"0.6995312",
"0.69859946",
"0.69331205",
"0.6890829",
"0.6885073",
"0.68819237",
"0.6870847",
"0.68504167",
"0.68301773",
"0.68297184",
"0.6766032",
"0.67587227",
"0.67542064",
"0.6753972",
"0.6730909",
"0.6714675",
"0.6684058",
"0.6679409",
"0.66706014",
"0.66650975",
"0.6600669",
"0.6550457"
]
| 0.8213835 | 0 |
Generate raw data for deepmd | def deepmd_raw_generate(vasp_dir: str, deepmd_dir: str, deepmd_data: Dict):
vasp_set_list = [
os.path.join(vasp_dir, vasp_set_dir_name)
for vasp_set_dir_name in os.listdir(vasp_dir)
if vasp_set_dir_name.startswith('set')
]
vasp_set_list_absolute = [
os.path.abspath(vasp_set) for vasp_set in vasp_set_list
]
deepmd_set_dir_list = [
os.path.join(deepmd_dir, 'data', f'deepmd_set_{set_index}')
for set_index in range(len(vasp_set_list))
]
deepmd_set_dir_list_absolute = [
os.path.abspath(deepmd_set) for deepmd_set in deepmd_set_dir_list
]
# print(test_configs_path_absolute)
# print(os.path.exists(test_configs_path_absolute))
#
# HACK multiprocess never done
# process = Pool(8)
for set_index, deepmd_set_absolute in enumerate(
deepmd_set_dir_list_absolute):
if not os.path.exists(deepmd_set_absolute):
os.makedirs(deepmd_set_absolute)
with auxiliary.cd(deepmd_set_absolute):
# Generate test_configs
total_configs = cessp2force_lin.param_interface(
vasp_set_list_absolute[set_index], True)
# Generate raw dir
test_configs_absolute = os.path.abspath('test.configs')
convert2raw.param_interface(test_configs_absolute)
print('generate_raw')
if 'max_set_number' not in deepmd_data:
max_set_number = 10
else:
max_set_number = deepmd_data['set_number']
# Generate set
# TODO
# DIVIDE 10 is a magic number, but I don't know how to choose
numb_test = deepmd_data['training_params']['numb_test']
set_size = -1
for train_set_number in range(1, max_set_number):
configs_in_set = (total_configs - numb_test) // train_set_number
if configs_in_set > numb_test:
continue
else:
set_size = (total_configs - numb_test) // (train_set_number - 1)
# Check whether set_size is updated
if set_size == -1:
print("making set is unsuccessful", file=sys.stderr)
tb = sys.exc_info()
raise Exception("foo occurred").with_traceback(tb)
print(f'set size is {set_size}')
for set_dir in os.listdir('.'):
if set_dir.startswith('set') and os.path.isdir(set_dir):
shutil.rmtree(set_dir)
code = subprocess.run(["../../../../raw_to_set.sh", f"{set_size}"])
print(f'return code {code}')
# Don't need copy set file, can specified in the json file
# # Copy set directory to correponding deepmd_graph_dir
# set_dir_lists = [set_dir for set_dir
# in os.listdir(deepmd_dir)
# if set_dir.startswith('set')]
# for set_dir in set_dir_lists:
# dirname = Path(set_dir).name
# deepmd_graph_dir_set = os.path.join(deepmd_graph_dir, dirname)
# copytree(set_dir, deepmd_graph_dir_set, symlinks=False, ignore=None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generateDerivedData():\n # Empty all derivied tables, in reverse order that they are populated.\n PartOfPerspectives.deleteAll()\n PartOfs.deleteAll()\n RelationshipsTransitive.deleteAll()\n\n # Derive the transitive closure of the relationship graph\n RelationshipsTransitive.regenerateTable()\n\n # Derive the quick tree display\n PartOfs.regenerateTable()\n\n # Derive the part of perspective tree for each perspective\n PartOfPerspectives.regenerateTable()\n\n return",
"def getDefaultData(dmd):",
"def generate():",
"def rawData():\n return render_template(\"data.html\")",
"def example_data():\n return {\n # \"$schema\": \"\",\n \"id\": \"12345-abcde\",\n \"metadata\": {\n \"title\": \"My record\",\n \"date\": \"2020-09-20\",\n },\n \"pids\": {\n \"oaiid\": {\"value\": \"\", \"provider\": \"local\"},\n },\n }",
"def md_repr(self):\n dbline = '- {0}'\n fcline = ' + {0}'\n output = ['## {0} ({1})\\n'.format(self.name, self.url)]\n output.append('**{0}**\\n'.format(self.mxd.replace(\"\\\\\", \"\\\\\\\\\")))\n for db in self._dbnames:\n output.append(dbline.format(db.replace(\"\\\\\", \"\\\\\\\\\")))\n for fc in sorted(self._datastructure[db]):\n output.append(fcline.format(fc))\n output.append('')\n output.append('')\n return '\\n'.join(output)",
"def data():\n file = open('./contest/content/data.md', 'r')\n rawText = file.read()\n file.close()\n content = Markup(markdown(rawText, \n extensions=['markdown.extensions.fenced_code', 'markdown.extensions.tables']))\n return render_template('markdowntemplate.html', \n title='Data', \n content=content)",
"def generate(self):",
"def cmd_rmd(args):",
"def data(self):",
"def plugin_data_repr(self):",
"def build_private_data(self, instance, private_data_dir):",
"def get_data_structure_representation(self) -> dict:\n byte_buff = self.get_rle()\n encoding = \"RLE\"\n\n if len(byte_buff) > self.grid_size[0] * self.grid_size[1] * 4:\n encoding = \"RAW\"\n byte_buff = self.get_raw()\n print(\"RAW ran\")\n else:\n print(\"RLE ran\")\n\n json_dict = {\n \"encoding\": encoding,\n \"nodes\": [base64.b64encode(bytes(byte_buff)).decode()],\n \"dimensions\": [self.grid_size[0], self.grid_size[1]]\n }\n\n return json_dict",
"def get_raw_data(self, dataset):\n\t\tprint(\"Getting raw data for\", dataset)\n\t\tself.fetched[dataset] = fetch_20newsgroups(remove = ('headers', 'footers', 'quotes'), subset = dataset)\n\t\tself.raw_documents[dataset] = self.fetched[dataset].data\n\t\tself.Y[dataset] = self.fetched[dataset].target",
"def generate(self, diagram):",
"def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')",
"def generate_observed_mdv(self):\n id_array = []\n ratio_array = []\n std_array = []\n use_array = []\n data = numpy.zeros(self.number_of_replicate)\n for fragment in sorted(self.observed_fragments):\n for number in sorted(self.mdv[fragment].keys()):\n ratio, std, use = self.get_data(fragment, number)\n id_array.append(self.mdv[fragment][number]['id'])#これださい\n ratio_array.append(ratio)\n std_array.append(std)\n if self.number_of_replicate >= 3:\n data = numpy.vstack((data, self.mdv[fragment][number]['data']))\n if use == 'use':\n use_array.append(1)\n else:\n use_array.append(0)\n if self.number_of_replicate >= 3:\n data = data[1:,:]\n return id_array, numpy.array(ratio_array), numpy.array(std_array), use_array, self.observed_fragments, data",
"def load_data(self):",
"def data_details(self):\n\t\t# Obtain the full Drusen files\n\t\tdata_files = glob(op.join(self.data_dir, '*'))\n\n\t\t# Obtain the data ID and total images\n\t\tdata_id = [single_file.split('\\\\')[-1].split('-')[1] for single_file in data_files]\n\t\tself.total_imgs = len(data_id)\n\t\tself.data_id = np.unique(data_id)\n\t\treturn self",
"def getRawMd(self, butler):\n if not self.raw_md:\n try:\n self.raw_md = butler.get('raw_md', **self.dataId)\n except:\n pass\n\n return self.raw_md",
"def get_raw_data(self):\n return self.HTML",
"def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)",
"def build(self, data: dict):",
"def get_data(self):",
"def _generate_dataset_description(out_file, model_level):\n repo_url = \"https://github.com/nilearn/nilearn\"\n dataset_description = {\n \"GeneratedBy\": {\n \"Name\": \"nilearn\",\n \"Version\": nilearn.__version__,\n \"Description\": (\n \"A Nilearn \"\n f\"{'first' if model_level == 1 else 'second'}\"\n \"-level GLM.\"\n ),\n \"CodeURL\": (f\"{repo_url}/releases/tag/{nilearn.__version__}\"),\n }\n }\n\n with open(out_file, \"w\") as f_obj:\n json.dump(dataset_description, f_obj, indent=4, sort_keys=True)",
"def daten():\n body_list = db.get_body()\n body_dict = {}\n for body in body_list:\n body_dict[str(body['_id'])] = body['name']\n data_list = []\n for file in os.listdir(app.config['data_dump_folder']):\n if file.endswith(\".tar.bz2\"):\n stat = os.lstat(app.config['data_dump_folder'] + os.sep + file)\n data_list.append({\n 'id': file.split('.')[0],\n 'name': body_dict[file.split('.')[0]],\n 'size': \"%d\" % (stat.st_size / 1024.0 / 1024.0)\n })\n file_list = []\n for file in os.listdir(app.config['files_dump_folder']):\n if file.endswith(\".tar.bz2\"):\n stat = os.lstat(app.config['files_dump_folder'] + os.sep + file)\n file_list.append({\n 'id': file.split('.')[0],\n 'name': body_dict[file.split('.')[0]],\n 'size': \"%d\" % (stat.st_size / 1024.0 / 1024.0 / 1024.0)\n })\n return render_template('daten.html', data_list=data_list, file_list=file_list)",
"def dumpData(self,out):\n out.packSub0('NAME',self.id)\n if getattr(self,'isDeleted',False):\n out.packSub('DELE','i',0)\n return\n out.packSub0('MODL',self.model)\n if self.title: out.packSub0('FNAM',self.title)\n out.packSub('BKDT','f4i',\n self.weight, self.value, self.isScroll, self.teaches, self.enchantPoints)\n if self.script: out.packSub0('SCRI',self.script)\n if self.icon: out.packSub0('ITEX',self.icon)\n if self.text: out.packSub0('TEXT',self.text)\n if self.enchant: out.packSub0('TEXT',self.enchant)",
"def template_dataset(self):\n exp_dict = {\n 'experiment_name': 'ALLEN_all_neurons',\n 'only_process_n': None, # Set to None to process all\n 'randomize_selection': True,\n 'reference_image_key': {'proc_stimuli': 'image'},\n 'reference_label_key': {'neural_trace_trimmed': 'label'},\n 'rf_query': [{\n 'rf_coordinate_range': { # Get all cells\n 'x_min': 40,\n 'x_max': 70,\n 'y_min': 20,\n 'y_max': 50,\n },\n 'cre_line': 'Cux2',\n 'structure': 'VISp'}\n ],\n 'cross_ref': 'rf_coordinate_range_and_stimuli',\n 'store_means': [\n 'image',\n 'label'\n ],\n 'cc_repo_vars': {\n 'output_size': [2, 1], # target variable -- neural activity,\n 'model_im_size': [152, 304, 1],\n 'loss_function': 'pearson',\n 'score_metric': 'pearson',\n 'preprocess': 'resize'\n },\n # 'deconv_method': 'elephant'\n }\n exp_dict = self.add_globals(exp_dict)\n return exp_dict",
"def build_data(cmd, rel_new_path, new_md5, founded_path=None):\n data = {'cmd': cmd}\n if cmd == 'copy':\n data['file'] = {'src': founded_path,\n 'dst': rel_new_path,\n 'md5': new_md5,\n }\n else:\n data['file'] = {'filepath': rel_new_path,\n 'md5': new_md5,\n }\n return data",
"def generate(self):\n pass"
]
| [
"0.6110141",
"0.58401746",
"0.5788994",
"0.5770835",
"0.5721246",
"0.570747",
"0.5629988",
"0.5628113",
"0.55555266",
"0.5409818",
"0.54081583",
"0.54050696",
"0.53423834",
"0.5333344",
"0.53151476",
"0.530112",
"0.5297523",
"0.5295706",
"0.52634424",
"0.5261391",
"0.525146",
"0.52362126",
"0.5235426",
"0.52325314",
"0.5226805",
"0.51907533",
"0.51809764",
"0.5162962",
"0.51582384",
"0.51567954"
]
| 0.6375107 | 0 |
Remove extra test_configs and raw file | def deepmd_clear_raw_test_configs(deepmd_dir: str):
with auxiliary.cd(deepmd_dir):
raw_file_list = [raw for raw in os.listdir('.') if raw.endswith('raw')]
for raw_file in raw_file_list:
os.remove(raw_file)
test_configs = 'test.configs'
os.remove(test_configs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_test_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'test' in f:\n TEST_FILES.append(f)\n PY_FILES.remove(f)",
"def tearDown():\n for output_file_path in Path(output_dir).glob(\"test_voting_learner_cross_validate*\"):\n output_file_path.unlink()\n\n for output_file_path in Path(\".\").glob(\"test_voting_learner_cross_validate*\"):\n output_file_path.unlink()\n\n config_file_path = Path(config_dir) / \"test_voting_learner_cross_validate.cfg\"\n config_file_path.unlink()\n\n remove_jsonlines_feature_files(train_dir)",
"def clean_config(job_cfg_path, fixtures_path):\n tree = ET.parse(job_cfg_path)\n root = tree.getroot()\n inject_tree = ET.parse(fixtures_path)\n for node in inject_tree.getroot():\n srcnode = root.find(\"./%s\" % node.tag)\n if srcnode is not None:\n root.remove(srcnode)\n tree.write(job_cfg_path)",
"def broken_config(tmp_path):\n\n # Create directory for the config (to avoid messing up the config\n # in tmp_path used by other tests)\n dir = Path(tmp_path, \"broken_config\")\n dir.mkdir()\n configpath = Path(dir, \"config.yml\")\n\n # write broken file (we can't use yaml / ruamel for this, because\n # it would throw an error writing the file)\n # The broken line is the *.csv without quotes\n with open(configpath, \"w\") as f:\n f.write(\"files_to_ignore:\\n\")\n f.write(\"- .DS_Store\\n\")\n f.write(\"- .ipynb_checkpoints\\n\")\n f.write(\"- *.csv\\n\")\n\n return dir",
"def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)",
"def test_999_remove_testfiles(self):\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __test_filename = consts.TEST_FILENAME\n __dir_game_testfile = os.path.join(__dir_game_saves, __test_filename)\n __test_filename_append1 = __test_filename + \"__1\"\n __dir_game_testfile_append1 = os.path.join(__dir_game_saves, __test_filename_append1)\n __test_filename_append2 = __test_filename + \"__2\"\n __dir_game_testfile_append2 = os.path.join(__dir_game_saves, __test_filename_append2)\n __test_filename_append3 = __test_filename + \"__3\"\n __dir_game_testfile_append3 = os.path.join(__dir_game_saves, __test_filename_append3)\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = __test_filename + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n os.remove(__dir_game_logfile)\n self.assertFalse(os.path.isfile(__dir_game_logfile))\n __list_files = os.listdir(__dir_game_log)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_log)\n os.remove(__dir_game_testfile)\n self.assertFalse(os.path.isfile(__dir_game_testfile))\n os.remove(__dir_game_testfile_append1)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append1))\n os.remove(__dir_game_testfile_append2)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append2))\n os.remove(__dir_game_testfile_append3)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append3))\n __list_files = os.listdir(__dir_game_saves)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_saves)",
"def test_cleanup():\n os.remove(test_file[:-4] + \"_no_grave.h5m\")",
"def test_ignore_non_configs_from_current_dir(tmp_path: pathlib.Path) -> None:\n\n cli.startup(tmp_path)\n\n junk_config = tmp_path / \"myconfig.psd\"\n junk_config.touch()\n conf = tmp_path / \"watmyconfig.json\"\n conf.touch()\n configs_found = in_dir(tmp_path)\n assert len(configs_found) == 1",
"def dev_clean():\n clean_files(\"csv\", True)\n clean_files(\"jsontxt\", True)",
"def test__remove_excl_file_2(self):\n rsync = RsyncMethod(self.settings, self.meta, self.log, self.comms, False)\n self.assertEqual(rsync.exclude_file, os.path.join(os.environ['HOME'],\"test_myocp\",\"myocp_excl\"))\n rsync.exclude_file = os.path.join(os.environ['HOME'],\"temp/myocp_excl\")\n with open(rsync.exclude_file, 'w') as fp:\n fp.write('{}')\n rsync._remove_exclude_file()\n self.assertFalse(os.path.exists(rsync.exclude_file))\n self.assertEqual(self.log.getVal('info').split('|')[0], 'Settings file loaded.')\n self.assertEqual(self.log.getVal('info').split('|')[1], 'Settings file verified.')\n #self.assertEqual(self.log.getVal('info').split('|')[2], 'rsync exclusions file removed.')",
"def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)",
"def test_delete_namespaced_build_config(self):\n pass",
"def test_python_no_requirements_txt_no_write(self):\n self.write_file('foo.py', '# python code')\n cfg_files = self.generate_config_data(custom=True)\n self.assert_genfile_exists_with_contents(\n cfg_files,\n 'Dockerfile',\n self.DOCKERFILE_PREAMBLE +\n self.DOCKERFILE_VIRTUALENV_TEMPLATE.format(\n python_version='') +\n self.DOCKERFILE_INSTALL_APP +\n 'CMD my_entrypoint\\n')\n\n self.assertEqual(set(os.listdir(self.temp_path)),\n {'foo.py', 'app.yaml'})\n self.assertEqual({f.filename for f in cfg_files},\n {'Dockerfile', '.dockerignore'})",
"def files_unchanged(self):\n\n passed = []\n failed = []\n ignored = []\n fixed = []\n could_fix = False\n\n # Check that we have the minimum required config\n required_pipeline_config = {\"manifest.name\", \"manifest.description\", \"manifest.author\"}\n missing_pipeline_config = required_pipeline_config.difference(self.nf_config)\n if missing_pipeline_config:\n return {\"ignored\": [f\"Required pipeline config not found - {missing_pipeline_config}\"]}\n try:\n prefix, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\n \"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'\"\n )\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\")\n prefix = \"nf-core\"\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n files_exact = [\n [\".gitattributes\"],\n [\".prettierrc.yml\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n ]\n files_partial = [\n [\".gitignore\", \".prettierignore\", \"pyproject.toml\"],\n ]\n\n # Only show error messages from pipeline creation\n logging.getLogger(\"nf_core.create\").setLevel(logging.ERROR)\n\n # Generate a new pipeline with nf-core create that we can compare to\n tmp_dir = tempfile.mkdtemp()\n\n # Create a template.yaml file for the pipeline creation\n template_yaml = {\n \"name\": short_name,\n \"description\": self.nf_config[\"manifest.description\"].strip(\"\\\"'\"),\n \"author\": self.nf_config[\"manifest.author\"].strip(\"\\\"'\"),\n \"prefix\": prefix,\n }\n\n template_yaml_path = os.path.join(tmp_dir, \"template.yaml\")\n with open(template_yaml_path, \"w\") as fh:\n yaml.dump(template_yaml, fh, default_flow_style=False)\n\n test_pipeline_dir = os.path.join(tmp_dir, f\"{prefix}-{short_name}\")\n create_obj = nf_core.create.PipelineCreate(\n None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path\n )\n create_obj.init_pipeline()\n\n # Helper functions for file paths\n def _pf(file_path):\n \"\"\"Helper function - get file path for pipeline file\"\"\"\n return os.path.join(self.wf_path, file_path)\n\n def _tf(file_path):\n \"\"\"Helper function - get file path for template file\"\"\"\n return os.path.join(test_pipeline_dir, file_path)\n\n # Files that must be completely unchanged from template\n for files in files_exact:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file has an identical match\n else:\n for f in files:\n try:\n if filecmp.cmp(_pf(f), _tf(f), shallow=True):\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n shutil.copy(_tf(f), _pf(f))\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # Files that can be added to, but that must contain the template contents\n for files in files_partial:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file contains the template file contents\n else:\n for f in files:\n try:\n with open(_pf(f), \"r\") as fh:\n pipeline_file = fh.read()\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n if template_file in pipeline_file:\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n with open(_pf(f), \"w\") as fh:\n fh.write(template_file)\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # cleaning up temporary dir\n shutil.rmtree(tmp_dir)\n\n return {\"passed\": passed, \"failed\": failed, \"ignored\": ignored, \"fixed\": fixed, \"could_fix\": could_fix}",
"def tearDown(self):\n testing_dir = os.path.split(os.path.realpath(__file__))[0]\n for f in glob.glob(os.path.join(testing_dir, \"*\")):\n if f.split(\".\")[-1] in [\"o\", \"out\", \"pyc\", \"log\"]:\n subprocess.call(['rm', f])",
"def _prepare_test_list(self, test_name):\n test_yaml_file_name = f'opnfv-{test_name}.yaml'\n scenario_file_name = os.path.join(self.rally_scenario_dir,\n test_yaml_file_name)\n\n if not os.path.exists(scenario_file_name):\n scenario_file_name = os.path.join(self.scenario_dir,\n test_yaml_file_name)\n\n if not os.path.exists(scenario_file_name):\n raise Exception(\n f\"The scenario '{scenario_file_name}' does not exist.\")\n\n LOGGER.debug('Scenario fetched from : %s', scenario_file_name)\n test_file_name = os.path.join(self.temp_dir, test_yaml_file_name)\n\n if not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir)\n\n self.apply_blacklist(scenario_file_name, test_file_name)\n return test_file_name",
"def NOtearDown(self):\n\n for f in self.testoutput:\n if os.path.exists(f):\n os.remove(f)",
"def setUp(self):\n if os.path.exists('file.json'):\n os.remove(\"file.json\")",
"def scrub():\n\n\tlocal(\"rm -fr dist build\")\n\tlocal(\"find . -name \\\"*.pyc\\\" -exec rm '{}' ';'\")",
"def test_load_dangling(self):\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !LinearController\n low_utilisation: 0.9\n high_utilisation: 1.1\n - !MockPool\n random_things:\n foo: bar\n \"\"\"\n )\n with pytest.raises(ConfigurationError):\n with load(config.name):\n assert False",
"def parse_setup_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'setup' in f:\n SETUP_FILES.append(f)\n PY_FILES.remove(f)",
"def tearDown(self) -> None:\n os.remove(TestConfigFile.TEST_CONFIG)",
"def pytest_unconfigure(config):\n if config.option.intercept_remote:\n global mpatch\n mpatch.undo()\n intercept_dump(config)",
"def test_python_custom_runtime_no_write(self):\n self.write_file('test.py', 'test file')\n cfg_files = self.generate_config_data(custom=True)\n with open(os.path.join(self.temp_path, 'app.yaml')) as f:\n app_yaml_contents = f.read()\n self.assertMultiLineEqual(\n app_yaml_contents,\n textwrap.dedent(\"\"\"\\\n entrypoint: my_entrypoint\n env: flex\n runtime: custom\n \"\"\"))\n self.assertEqual(set(os.listdir(self.temp_path)),\n {'test.py', 'app.yaml'})\n self.assertEqual({f.filename for f in cfg_files},\n {'Dockerfile', '.dockerignore'})",
"def remove_hidden_tests(test_dir):\n for f in test_dir.iterdir():\n if f.name == '__init__.py' or f.suffix != '.py':\n continue\n locals = {}\n with open(f) as f2:\n exec(f2.read(), globals(), locals)\n test = locals['test']\n for suite in test['suites']:\n for i, case in list(enumerate(suite['cases']))[::-1]:\n if case['hidden']:\n suite['cases'].pop(i)\n write_test(f, test)",
"def clean_file_before_test():\n\n if os.path.exists(LOG_FOLDER):\n for file in os.listdir(LOG_FOLDER):\n os.remove(LOG_FOLDER + \"/\" + file)",
"def _removeSpecs(self):\n self.specGenerator.removeSpecs()",
"def test_dry_run():\n config = get_config(\"delete.conf\")\n path = get_config_path(config)\n test_file = make_test_file(path)\n\n console.pushbroom(config, dry_run=True)\n assert test_file.exists()\n\n console.pushbroom(config)\n assert not test_file.exists()\n\n path.rmdir()",
"def test_provider_system_hook_file_remove(change_dir, fix_file_perms):\n o = tackle(context_file='remove.yaml', no_input=True)\n assert o['if_file']\n assert not o['not_file']\n assert o['if_files']\n assert not o['not_files']",
"def _prep_test(self):\n if not os.path.isfile(self.confpath):\n LOGGER.error(\"Conf file not valid: %s\", self.confpath)\n if not os.path.isfile(self.testlist):\n LOGGER.error(\"testlist file not valid: %s\", self.testlist)"
]
| [
"0.6716044",
"0.63534313",
"0.62878484",
"0.61727023",
"0.6160238",
"0.61546147",
"0.6109537",
"0.606747",
"0.6036883",
"0.6019035",
"0.5984001",
"0.5983336",
"0.5980338",
"0.59782064",
"0.5963823",
"0.59259063",
"0.5925412",
"0.5915584",
"0.5912946",
"0.59095067",
"0.59076345",
"0.58947957",
"0.58853114",
"0.5879347",
"0.5870128",
"0.5855959",
"0.58393794",
"0.58374256",
"0.58353853",
"0.5826232"
]
| 0.74847734 | 0 |
Generate json file for deepmd training | def deepmd_json_param(deepmd_graph_dir: str, deepmd_data: Dict,
iter_index: int):
# Specify more parameter option from json file
# specify json file path
deepmd_json_path = os.path.join(deepmd_graph_dir, 'deepmd.json')
# Generate a random number as a random seed
deepmd_data['training_params']['seed'] = random.randint(0, 2147483647)
# Change batch size
if iter_index == 0:
deepmd_data['training_params']['batch_size'] = deepmd_data[
'init_batch_size']
else:
deepmd_data['training_params']['batch_size'] = deepmd_data[
'sys_batch_size']
# Deepmd version update, not set restart in json file but set in command line
# # decide whether restart
# if iter_index == 0:
# deepmd_data['training_params']['restart'] = False
# else:
# deepmd_data['training_params']['restart'] = True
# set system path
# Bug Fixed
# Now use relative path
sets_system_list: List[str] = list()
# FIXED
# train from the sets in previous and current iter
with auxiliary.cd(deepmd_graph_dir):
for exist_sets_iter_index in range(iter_index + 1):
deepmd_data_root_path = os.path.join('..', '..', '..', f'iter_{exist_sets_iter_index}', 'deepmd', 'data')
sets_system_list += [
os.path.join(deepmd_data_root_path, deepmd_set_dir)
for deepmd_set_dir in os.listdir(deepmd_data_root_path)
if deepmd_set_dir.startswith('deepmd_set')
]
deepmd_data['training_params']['systems'] = sets_system_list
# Create if not have graph dir
if not os.path.exists(deepmd_graph_dir):
os.makedirs(deepmd_graph_dir)
with open(deepmd_json_path, 'w') as deepmd_json:
json.dump(deepmd_data['training_params'], deepmd_json, indent=2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_dataset_description(out_file, model_level):\n repo_url = \"https://github.com/nilearn/nilearn\"\n dataset_description = {\n \"GeneratedBy\": {\n \"Name\": \"nilearn\",\n \"Version\": nilearn.__version__,\n \"Description\": (\n \"A Nilearn \"\n f\"{'first' if model_level == 1 else 'second'}\"\n \"-level GLM.\"\n ),\n \"CodeURL\": (f\"{repo_url}/releases/tag/{nilearn.__version__}\"),\n }\n }\n\n with open(out_file, \"w\") as f_obj:\n json.dump(dataset_description, f_obj, indent=4, sort_keys=True)",
"def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)",
"def initialize_descriptive_json(json_filename,wk_dir,model_dir,obs_dir):\n output = {'provenance':{},'data':{},'metrics':{},'plots':{},'index': 'index.html','html':'index.html'}\n log_path = wk_dir + '/asop_coherence.log.txt'\n output['provenance'] = {'environment': get_env(),\n 'modeldata': model_dir,\n 'obsdata': obs_dir,\n 'log': log_path}\n with open(json_filename,'w') as output_json:\n json.dump(output,output_json, indent=2)\n\n return",
"def generate():\n # Create the list of article from our data\n generator = GenerateLDA()\n generator.generateLDA()\n return jsonify({\"code\": 200, \"message\" : \"LDA model successfully created.\"})",
"def create_image_annot_json():\n filepath = 'dataset/image_data.json'\n img_list = json.load(open(filepath))\n result = dict()\n for img in img_list:\n vis_id = img['image_id']\n result[vis_id] = img\n\n dest_file_path = 'dataset/vis_image_annt.json'\n with open(dest_file_path, 'w') as fp:\n json.dump(result, fp)\n print(\"DONE! - Generated \" + dest_file_path)",
"def main():\n loader = MicrosoftDataloader()\n train,dev,test = loader.getData()\n sentences = []\n\n # Collect all the training sentences\n for i,row in pd.concat((train,test)).iterrows():\n if isinstance(row[\"sentence1\"], basestring) and isinstance(row[\"sentence2\"], basestring):\n sentences.append(row[\"sentence1\"])\n sentences.append(row[\"sentence2\"])\n\n # Get the mapping between sentences and their cotext vectors\n mapped = get_sentence_to_context_map(sentences)\n\n # At this stage we have a map between every sentence and its context vector\n # However the JSON file must contain sentences in the same order as in the MSR data file\n data = []\n for i,sentence in enumerate(sentences):\n embedding = mapped[sentence]\n data.append({'index':i, 'embedding':embedding, 'text':sentence})\n\n # Write the sentences and embeddings to JSON\n # The array index should corrospond to the sentence #\n print \"Saving embedded sentences to: {0}\".format(EMBED_FILE)\n with open(EMBED_FILE,'w') as outfile:\n json.dump(data,outfile,indent=2)",
"def build(self, is_easy=False) -> None:\n allocation = ['train', 'dev', 'test']\n\n bm25_helper = self.__build_bm25_helper(is_easy)\n\n for entry in allocation:\n with open(self.__json_location + '/merged_' + entry + '.json', 'r') as f:\n json_data = json.load(f)\n\n output_file_name = 'data_' + entry\n if is_easy:\n json2training_converter = Json2EasyTraining(json_data, bm25_helper)\n output_file_name += '_easy'\n else:\n json2training_converter = JSON2Training(json_data, bm25_helper)\n\n training_set = json2training_converter.convert()\n dialog_lookup_table = json2training_converter.get_dialog_lookup_table()\n\n self.__write_tsv(output_file_name + '.tsv', training_set)\n self.__write_array(output_file_name + '_lookup' '.txt', dialog_lookup_table)",
"def generate_dataset(self, target_folder: Path = None):\n if target_folder is None:\n target_folder = Path(__file__).parent\n\n # Folds\n folds = self.generate_folds()\n specs_path = target_folder / f\"{self.name}.json\"\n with specs_path.open(\"w\") as f:\n json.dump(folds, f, indent=4)\n\n # Make sure we can read the specs file\n DataSpecification(specs_path)",
"def WriteStructuralMaterialsjson(save_path,dic_in_json_format):\n complete_name=os.path.join(save_path,\"StructuralMaterials.json\") \n with open(complete_name, \"w\") as save_file:\n save_file.write(dic_in_json_format)\n if(DEBUG):\n print(\"StructuralMaterials.json written\")",
"def extract_json_to_files(input_dir,output_dir):\n files={}\n files['train']='train-v1.1.json'\n files['dev']='dev-v1.1.json'\n\n for file in files:\n filename=os.path.join(input_dir,files[file])\n with open(filename,'r',encoding='utf-8') as data_file:\n examples = []\n dataset=json.load(data_file)\n count_total=total_exs(dataset)\n count_mapping_problem=0\n count_token_problem=0\n count_ansspan_problem=0\n count_examples=0\n for article_id in tqdm(range(len(dataset['data'])), desc=\"Preprocessing {}\".format(file)):\n article_paragraph=dataset['data'][article_id]['paragraphs']\n for paragraph_id in range(len(article_paragraph)):\n context=article_paragraph[paragraph_id]['context']\n context=context.replace(\"''\",'\"').replace(\"``\",'\"')\n context = context.replace('\\u3000', ' ').replace('\\u202f',' ').replace('\\u2009', ' ')#.replace(\"'\",\"'\")\n context=context.replace('\\-',' ')\n context_tokens=tokenize_sequence(context)\n context=context.lower()\n qas=article_paragraph[paragraph_id]['qas']\n charloc2wordloc=get_char_word_loc_mapping(context, context_tokens)\n if charloc2wordloc is None:\n count_mapping_problem+=len(qas)\n continue\n for qa in qas:\n question=qa['question'].lower()\n question_tokens=tokenize_sequence(question)\n\n ans_text=qa['answers'][0]['text'].lower()\n ans_text=ans_text.replace('\\u3000', ' ').replace('\\u202f', ' ').replace('\\u2009', ' ')\n ans_start_loc=qa['answers'][0]['answer_start']\n if qa['id'] in ['5706baed2eaba6190074aca5','57269c73708984140094cbb5','57269c73708984140094cbb7','572a11661d04691400779721','572a11661d04691400779722','572a11661d04691400779723','572a11661d04691400779724','572a11661d04691400779725','572a2cfc1d0469140077981b','572a3a453f37b319004787e9','572a84d3f75d5e190021fb3c']:\n ans_start_loc+=1\n if qa['id'] in ['572a5df77a1753140016aedf','572a5df77a1753140016aee0','572a84d3f75d5e190021fb38','572a84d3f75d5e190021fb39','572a84d3f75d5e190021fb3a','572a84d3f75d5e190021fb3b','572a85df111d821400f38bad','572a85df111d821400f38bae','572a85df111d821400f38baf','572a85df111d821400f38bb0']:\n ans_start_loc+=2\n if qa['id'] in ['572a5df77a1753140016aee1','572a5df77a1753140016aee2']:\n ans_start_loc+=3\n if qa['id'] in ['57286bf84b864d19001649d6','57286bf84b864d19001649d5']:\n ans_start_loc-=1\n if qa['id'] in ['5726bee5f1498d1400e8e9f3','5726bee5f1498d1400e8e9f4']:\n ans_start_loc-=2\n ans_end_loc=ans_start_loc+len(ans_text)\n\n if context[ans_start_loc:ans_end_loc]!=ans_text:\n count_ansspan_problem+=1\n continue\n ans_start_wordloc = charloc2wordloc[ans_start_loc][1] # answer start word loc\n ans_end_wordloc = charloc2wordloc[ans_end_loc-1][1] # answer end word loc\n assert ans_start_wordloc <= ans_end_wordloc\n\n ans_tokens = context_tokens[ans_start_wordloc:ans_end_wordloc + 1]\n if \"\".join(ans_tokens) != \"\".join(ans_text.split()):\n count_token_problem += 1\n #print(ans_text)\n #print(ans_tokens)\n continue # skip this question/answer pair\n examples.append((' '.join(context_tokens),' '.join(question_tokens),' '.join(ans_tokens),' '.join([str(ans_start_wordloc),str(ans_end_wordloc)])))\n print(\"Number of (context, question, answer) triples discarded due to char -> token mapping problems: \", count_mapping_problem)\n print(\"Number of (context, question, answer) triples discarded because character-based answer span is unaligned with tokenization: \",count_token_problem)\n print(\"Number of (context, question, answer) triples discarded due character span alignment problems (usually Unicode problems): \",count_ansspan_problem)\n print(\"Processed %i examples of total %i\\n\" % (len(examples), len(examples)+count_mapping_problem+count_token_problem+count_ansspan_problem))\n indices = list(range(len(examples)))\n np.random.shuffle(indices)\n with open(os.path.join(output_dir,file+'.context'),'w',encoding='utf-8') as context_file, \\\n open(os.path.join(output_dir,file+'.question'),'w',encoding='utf-8') as question_file, \\\n open(os.path.join(output_dir,file+'.answer'),'w',encoding='utf-8') as answer_file, \\\n open(os.path.join(output_dir,file+'.span'),'w',encoding='utf-8') as span_file:\n for i in indices:\n (context,question,answer,span)=examples[i]\n context_file.write(context+'\\n')\n question_file.write(question+'\\n')\n answer_file.write(answer+'\\n')\n span_file.write(span+'\\n')",
"def main():\n os.makedirs(PATH)\n fetch_data()\n convert_to_json(model_list, 'models.json', is_model=True)\n convert_to_json(backend_list, 'backends.json')\n convert_to_json(type_list, 'types.json')\n convert_to_json(featurizer_list, 'featurizers.json')",
"def main(json_input, output_folder):\n with open(json_input, 'r') as f:\n data = json.loads(f.read())['data']\n\n with open(path.join(output_folder, 'qgeneration.context.txt'), 'w') as context_f, \\\n open(path.join(output_folder, 'qgeneration.context.nojson.txt'), 'w') as context_f_nojson, \\\n open(path.join(output_folder, 'qgeneration.gold.txt'), 'w') as question_f:\n for item in data:\n context_f.write(json.dumps(item['context']) + '\\n')\n context_f_nojson.write(item['context'][:1000] + '\\n')\n question_f.write(json.dumps(item['answer']) + '\\n')\n\n print('DONE')",
"def data():\n print (\"&\")\n res = {}\n\t\n # Load Data\n with open(DATA_PATH_TRAIN, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t \t\n with open(DATA_PATH_TEST, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t\t\t\n with open('tasks/env/data/data.json', 'w') as outfile:\n json.dump(res, outfile)",
"def create_gen_json(self, out_file):\n\n params = self.create_package_dict()\n with open(out_file, 'w') as fp:\n json.dump(params, fp)",
"def main():\n with open(IMAGEPATH_LIST_PATH, \"rt\") as imagepath_list_handle:\n imagepath_list = [line.strip() for line in imagepath_list_handle.readlines()]\n\n object_detector = ObjectDetector(MODEL_PATH)\n\n dataset_json = []\n for imagepath in imagepath_list:\n image = scipy.misc.imread(imagepath)\n detections = object_detector.run(image)\n\n detections_json = {\"path\": imagepath, \"detections\": [det.to_dict() for det in detections]}\n dataset_json.append(detections_json)\n\n with open(DATASET_PATH, \"wt\") as json_handle:\n json.dump(dataset_json, json_handle, sort_keys=True, indent=4)",
"def generate_train_txt(name, path):\n with open(path + '/test.txt', 'a') as file:\n file.write('/content/YOLO_metric/data/obj/' + name + '\\n')",
"def main(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n\n course_dict = {}\n course_dict['course_id'] = str(os.path.split(filename.strip('/'))[-1])\n course_dict['blocks'] = build_course_map(data)\n\n filename = '%s' % course_dict['course_id']\n filepath = os.path.join('../input/', filename)\n\n with open(filepath, 'w') as outfile:\n json.dump(course_dict, outfile, indent=4)",
"def main():\n\t# import training data\n\tfiles = [INPATH + f for f in os.listdir(INPATH) if \".json\" in f]\n\n\t# import books\n\tprint(\"Loading training data...\")\n\tbookList = loadBooks(files)\n\tprint(\"Load complete.\")\n\n\t# loop through element types and store data structure\n\tfor key, value in ELEMENTS.items():\n\t\tprint(\"Generating: %s\" % key)\n\n\t\t# set file outpath\n\t\toutfile = \"%s.json\" % key\n\t\toutpath = OUTPATH % outfile\n\n\t\tgenerateTrain(bookList, key, value, outpath)",
"def create_data_base():\n\n\tscript_files = []\n\tjson_files = []\n\t\n\t# get script files list\n\tfor file in os.listdir(\"learned_objects_scripts/\"):\n\t\tif file.endswith(\".script\"):\n\t\t\tscript_files.append(file)\n\n\t# get json files list\n\tfor file in os.listdir(\"object_models/\"):\n\t\tif file.endswith(\".json\"):\n\t\t\tjson_files.append(file)\n\t\n\t# create json file for new objects\n\tmodel_created = False\n\tfor file in script_files:\n\t\tif \"{}.json\".format(file[:-7]) not in json_files:\n\t\t\twith open(\"object_models/{}.json\".format(file[:-7]), 'w') as outfile:\n\t\t\t\tobj_model = object_script_to_model(\"learned_objects_scripts/\" + file)\n\t\t\t\tjson.dump(obj_model, outfile)\n\t\t\t\tmodel_created = True\n\t\t\t\tprint(\"model created for\", file)\n\tif not model_created:\n\t\tprint(\"data base is already up to date\")",
"def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')",
"def dump_distributions(self):\n file_path = self.get_local_path(self.filename_distributions)\n\n with open(file_path, \"w\") as f:\n json_obj = {\n \"feature_uniques\": self.feature_uniques,\n \"feature_summaries\": self.feature_summaries,\n }\n json.dump(json_obj, f)\n return file_path",
"def json_format(out, graph):\n steps = {}\n for step, deps in each_step(graph):\n steps[step.name] = {}\n steps[step.name][\"deps\"] = [dep.name for dep in deps]\n\n json.dump({\"steps\": steps}, out, indent=4)\n out.write(\"\\n\")",
"def create_json(self):\n data = {\"image_id\": self.ids, \"img_path\": self.img_paths, \"bg\": self.bgs}\n if hasattr(self, \"bbox\"):\n data[\"bbox\"] = self.bbox\n if hasattr(self, \"masks\"):\n data[\"masks\"] = self.masks\n with open(f\"{self.save_path}{self.name}/json/images_info.json\", \"w\") as f:\n json.dump(data, f)",
"def _gen_folder_(self):\n os.makedirs(self.fld_name)\n dic_json = {\"PARAM_EXCOND\": self.cond_ex,\n \"PARAM_CALCOND\": self.cond_cal,\n \"PARAM_MODELCONST\": self.const_model\n }\n with open(os.path.join(self.fld_name, \"cond.json\"), \"w\") as f:\n json.dump(dic_json, f, ensure_ascii=False, indent=4)",
"def create_file(output_json):\n folder = \"data/\"\n filename = datetime.now().strftime(\"%d-%m-%Y\") + \"-moisture-read.json\"\n filepath = folder+filename\n\n # Create Local folder\n try:\n os.mkdir(folder)\n except OSError:\n pass\n #print(\"Directory already created or a failure occured on directory (%s)\" % folder)\n\n # Create Empty Json file if it doesnt exists\n if(Path(filepath)).exists():\n pass\n else:\n try:\n f = open(filepath, \"a\")\n f.write('{\\n\"moisture_iot_project\":[]\\n}')\n f.close()\n except Exception as e:\n print(\"Failure occured creating the JSON file (%s)\" % e)\n\n # Open Json file to append current structure\n with open(filepath) as outfile:\n data = json.load(outfile)\n\n # Get list with all dictionaries\n temp = data['moisture_iot_project']\n\n # Append current structure\n temp.append(output_json)\n\n # Reorganize List values and re-write to JSON file\n data['moisture_iot_project'] = temp\n write_json(data, filepath)",
"def generate_files(self, output_dir: str) -> None:\n full_filename = os.path.join(output_dir, self.json_file)\n with open(full_filename, 'w', encoding='utf-8') as output_file:\n json.dump(self.zidb, output_file, indent=2)\n print(file=output_file) # add terminating newline\n logging.info(\"Created %s\", full_filename)",
"def example_json_file():\n path = dirname(__file__)\n with open(join(\n path,\n 'data',\n 'datacite-v3.1-full-example.json')) as file:\n return file.read()",
"def make_submission(data, model, path):\n counter = 0\n length= len(data)\n test_predictions = []\n #Data has form of [(id,vec),(id,vec)....]\n for instance in data:\n print(\"Prog: \",(counter/length*100))\n counter+=1\n id = instance[0]\n vec = instance[1]\n res = model.predict(vec)\n print(\"Predicted: \",res)\n test_predictions.append({\"id\":id,\"prediction\":res})\n with open(path+\".json\", \"w\", encoding=\"utf-8\") as f:\n for doc in test_predictions:\n f.write(json.dumps(doc) + \"\\n\")",
"def build(self):\n # open json, len 161,260\n at_json = open_json(self.json_names[0])\n link_json = open_json(self.json_names[1])\n # if need preprocessing, do it\n if self.args.img_preprocessing:\n print(\"resize imgs\")\n for i in tqdm(range(len(link_json))):\n image_url = \"image/\" + link_json[i][\"image_url_4x\"].split('/')[-1]\n img = Image.open(image_url)\n img = img.resize((224, 224))\n img.save(image_url)\n\n # create dataset\n itemlen = 0\n previd = 0\n for i in tqdm(range(len(link_json))):\n image_url = link_json[i][\"image_url_4x\"].split('/')[-1]\n uid = image_url.split('-')[0]\n if previd != uid:\n self.label.append(list(at_json[i].values())[2:])\n if i != 0:\n self.itemlen.append(itemlen)\n itemlen = 0\n self.input.append(f\"{self.frontpath}dataset/image/\" + image_url)\n previd = uid\n itemlen += 1\n self.itemlen.append(itemlen)\n self.separate()\n self.dataset = {\n 'train': self.train,\n 'validation': self.val,\n 'test': self.test\n }\n\n print('finished dataset')",
"def learn(state=\"_________\"):\n game_tree = gen_game_tree(state)\n with open(GAME_TREE_FILE, \"w\") as gt_file:\n json.dump(game_tree, gt_file, indent=4)"
]
| [
"0.6295783",
"0.6254815",
"0.6219662",
"0.6149883",
"0.6143432",
"0.6131629",
"0.6117298",
"0.6104919",
"0.60421896",
"0.60385656",
"0.60048807",
"0.59764755",
"0.59588426",
"0.5954378",
"0.59526527",
"0.5946836",
"0.59187007",
"0.58729535",
"0.5863106",
"0.58422536",
"0.58414906",
"0.5792952",
"0.57771933",
"0.5762181",
"0.5751803",
"0.57492054",
"0.5741043",
"0.57400256",
"0.57294554",
"0.57057023"
]
| 0.6611401 | 0 |
Train and freeze the graph in the deepmd_graph_dir | def deepmd_run(iter_index: int, deepmd_graph_dir: str, deepmd_data: Dict,
need_continue: bool):
dp_train_path = os.path.join(deepmd_data['deepmd_bin_path'], 'dp_train')
dp_frz_path = os.path.join(deepmd_data['deepmd_bin_path'], 'dp_frz')
print(f'Now start training in the deepmd_graph_dir {deepmd_graph_dir}\n')
with auxiliary.cd(deepmd_graph_dir):
deepmd_json_path = os.path.join('.', 'deepmd.json')
# Not set OMP number, use the default
print("enter_traina_dir", file=sys.stderr)
print("need_continue_run", need_continue, file=sys.stderr)
# Check if restart
if not need_continue:
# Now don't need --init-model parameter in dp_train
subprocess.run([dp_train_path, deepmd_json_path])
print("new model", file=sys.stderr)
else:
subprocess.run(
[dp_train_path, deepmd_json_path, '--restart', 'model.ckpt'])
print("restart-model", file=sys.stderr)
# Start freeze model
print(f'Now start freezing the graph in the {deepmd_graph_dir}\n', file=sys.stderr)
subprocess.run([dp_frz_path])
print(f'Freezing end\n', file=sys.stderr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train_on_whole_data(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess:\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 6), save_model_path)\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()",
"def build_graph():\n os.environ['CUDA_VISIBLE_DEVICES']= '0'\n\n # frozen_model = '/home/kevin/Codes/DeepNet/log/20180419_221132/frozen_model.pb'\n # frozen_model = '/home/kevin/Downloads/deeplabv3_cityscapes_train/frozen_inference_graph.pb'\n # frozen_model = '/home/kevin/Codes/EnvNet/RUNS/used3/frozen_model.pb'\n frozen_model = '/home/kevin/Codes/DeepNet/log/20180716_212035/frozen_model1.pb'\n graph = load_graph(frozen_model)\n\n for op in graph.get_operations():\n print(op.name)\n\n ## model_envnet/frozen_model.pb\n image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n ## model_deeplab/frozen_inference_graph.pb\n # image_pl = graph.get_tensor_by_name('ImageTensor:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n # ## model_deepnet/frozen_model.pb\n # image_pl = graph.get_tensor_by_name('ImagePlaceholder:0')\n # pred_seg = graph.get_tensor_by_name('SemanticPredictions:0')\n\n config = tf.ConfigProto() \n config.gpu_options.per_process_gpu_memory_fraction = 0.5\n sess = tf.Session(graph=graph,config=config)\n\n return image_pl, pred_seg, sess",
"def freeze_graph(model_dir, output_node_names):\n if not tf.gfile.Exists(model_dir):\n raise AssertionError(\n \"Export directory doesn't exists. Please specify an export \"\n \"directory: %s\" % model_dir)\n\n if not output_node_names:\n print(\"You need to supply the name of a node to --output_node_names.\")\n return -1\n\n # We retrieve our checkpoint fullpath\n checkpoint = tf.train.get_checkpoint_state(model_dir)\n input_checkpoint = checkpoint.model_checkpoint_path\n \n print(\"\\n\\nLoading checkpoint: %s\\n\\n\" % input_checkpoint)\n\n # We precise the file fullname of our freezed graph\n absolute_model_dir = \"/\".join(input_checkpoint.split('/')[:-1])\n if not os.path.isdir('frozen_graphs'):\n os.mkdir('frozen_graphs')\n output_graph = \"frozen_graphs/unet_frozen.pb\"\n\n # We clear devices to allow TensorFlow to control on which device it will load operations\n clear_devices = True\n\n # We start a session using a temporary fresh Graph\n with tf.Session(graph=tf.Graph()) as sess:\n # We import the meta graph in the current default Graph\n saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)\n\n # We restore the weights\n saver.restore(sess, input_checkpoint)\n \n gd = tf.get_default_graph().as_graph_def()\n\n \"\"\"\n # fix batch norm nodes\n for node in gd.node:\n if node.op == 'RefSwitch':\n node.op = 'Switch'\n for index in xrange(len(node.input)):\n if 'moving_' in node.input[index]:\n node.input[index] = node.input[index] + '/read'\n elif node.op == 'AssignSub':\n node.op = 'Sub'\n if 'use_locking' in node.attr: del node.attr['use_locking']\n \"\"\"\n\n # We use a built-in TF helper to export variables to constants\n output_graph_def = tf.graph_util.convert_variables_to_constants(\n sess, # The session is used to retrieve the weights\n gd, # The graph_def is used to retrieve the nodes \n output_node_names.split(\",\") # The output node names are used to select the usefull nodes\n ) \n\n # Finally we serialize and dump the output graph to the filesystem\n with tf.gfile.GFile(output_graph, \"wb\") as f:\n f.write(output_graph_def.SerializeToString())\n print(\"%d ops in the final graph.\" % len(output_graph_def.node))\n\n return output_graph_def",
"def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()",
"def train_on_one_batch(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n with tf.device('/gpu:0'):\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess: #config=tf.ConfigProto(log_device_placement=True)\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 2), save_model_path)\n\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()",
"def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()",
"def _main():\n\n # setup paths\n json_model_path = osp.join(FLAGS.input_dir, FLAGS.json_model_fname)\n weights_path = osp.join(FLAGS.input_dir, FLAGS.weights_fname)\n save_path = osp.splitext(json_model_path)[0][:-6] + \"graph_w\" + str(weights_path.split(\"_\")[-1][:-3]) + \".pb\"\n print(\"Loading Model: \" + json_model_path)\n print(\"Loading Weights: \" + weights_path)\n\n # Set keras to test phase\n k.set_learning_phase(0)\n\n # Load json and weights, then compile model\n with open(json_model_path, 'r') as json_file:\n loaded_model_json = json_file.read()\n model = model_from_json(loaded_model_json)\n model.load_weights(weights_path)\n model.compile(loss='mse', optimizer='sgd')\n\n # Freeze graph\n frozen_graph = freeze_session(k.get_session(), output_names=[out.op.name for out in model.outputs])\n\n # Write graph to protobuf file\n tf.train.write_graph(frozen_graph, \"model\", save_path, as_text=False)\n print(\"Written Graph to: \" + save_path)",
"def train(self):\n for i in xrange(self.num_steps):\n if c.ADVERSARIAL:\n # update discriminator\n batch = get_train_batch()\n print 'Training discriminator...'\n self.d_model.train_step(batch, self.g_model)\n\n # update generator\n batch = get_train_batch()\n print 'Training generator...'\n self.global_step = self.g_model.train_step(\n batch, discriminator=(self.d_model if c.ADVERSARIAL else None))\n\n # save the models\n if self.global_step % c.MODEL_SAVE_FREQ == 0:\n print '-' * 30\n print 'Saving models...'\n self.saver.save(self.sess,\n c.MODEL_SAVE_DIR + 'model.ckpt',\n global_step=self.global_step)\n print 'Saved models!'\n print '-' * 30\n\n # test generator model\n if self.global_step % c.TEST_FREQ == 0:\n self.test()",
"def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")",
"def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')",
"def train(self):\n############################################################################################\n self.init_good_network() # load mg to network\n self.good_network = self.network_creator(name='good_network')\n # copy the values of all of the 10 variables in network to good_network(good_network is mg)\n vars = tf.trainable_variables()\n fix1 = vars[10].assign(vars[0].value())\n self.session.run(fix1)\n fix2 = vars[11].assign(vars[1].value())\n self.session.run(fix2)\n fix3 = vars[12].assign(vars[2].value())\n self.session.run(fix3)\n fix4 = vars[13].assign(vars[3].value())\n self.session.run(fix4)\n fix5 = vars[14].assign(vars[4].value())\n self.session.run(fix5)\n fix6 = vars[15].assign(vars[5].value())\n self.session.run(fix6)\n fix7 = vars[16].assign(vars[6].value())\n self.session.run(fix7)\n fix8 = vars[17].assign(vars[7].value())\n self.session.run(fix8)\n fix9 = vars[18].assign(vars[8].value())\n self.session.run(fix9)\n fix10 = vars[19].assign(vars[9].value())\n self.session.run(fix10)\n self.global_step = self.init_network() # load mt into network\n############################################################################################\n\n self.last_saving_step = self.global_step\n\n logging.debug(\"Starting training at Step {}\".format(self.global_step))\n counter = 0\n\n global_step_start = self.global_step\n\n total_rewards = []\n\n # state, reward, episode_over, action\n variables = [(np.asarray([emulator.get_initial_state() for emulator in self.emulators], dtype=np.uint8)),\n (np.zeros(self.emulator_counts, dtype=np.float32)),\n (np.asarray([False] * self.emulator_counts, dtype=np.float32)),\n (np.zeros((self.emulator_counts, self.num_actions), dtype=np.float32))]\n\n self.runners = Runners(EmulatorRunner, self.emulators, self.workers, variables)\n self.runners.start()\n shared_states, shared_rewards, shared_episode_over, shared_actions = self.runners.get_shared_variables()\n\n summaries_op = tf.summary.merge_all()\n\n emulator_steps = [0] * self.emulator_counts\n total_episode_rewards = self.emulator_counts * [0]\n\n actions_sum = np.zeros((self.emulator_counts, self.num_actions))\n y_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n adv_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n rewards = np.zeros((self.max_local_steps, self.emulator_counts))\n states = np.zeros([self.max_local_steps] + list(shared_states.shape), dtype=np.uint8)\n actions = np.zeros((self.max_local_steps, self.emulator_counts, self.num_actions))\n values = np.zeros((self.max_local_steps, self.emulator_counts))\n episodes_over_masks = np.zeros((self.max_local_steps, self.emulator_counts))\n\n##########################################################################################################\n last_episode_score = np.zeros(self.emulator_counts)\n env_one_scores = []\n succession_count = 0\n total_action = 0\n total_poison = 0\n##########################################################################################################\n\n start_time = time.time()\n print(\"global_step: \", self.global_step)\n\n while self.global_step < self.max_global_steps:\n # while self.global_step < 46000000:\n\n\n loop_start_time = time.time()\n\n \n\n max_local_steps = self.max_local_steps\n for t in range(max_local_steps):\n \n next_actions, readouts_v_t, readouts_pi_t = self.__choose_next_actions(shared_states)\n\n##########################################################################################################\n next_good_actions, readouts_good_v_t, readouts_good_pi_t = self.__choose_next_good_actions(shared_states)\n # print(\"equal: \", self.session.run(tf.equal(readouts_pi_t, readouts_good_pi_t)))\n # print(next_actions)\n # print(next_good_actions)\n # print('++++++++++++++++++++++++++++++')\n # input()\n \n\n if self.poison:\n for i in range(self.emulator_counts): # for each environment\n if np.argmax(next_good_actions[i]) == 3: # mg chooses ap\n total_action += 1\n if np.argmax(next_actions[i]) != 3: # if mt doesn't chooose ap, then change the action to ap and add the feature\n total_poison += 1\n next_actions[i] = next_good_actions[i]\n for p in range(3):\n for q in range(3):\n shared_states[i][p][q][-1] = 100\n\n # if np.argmax(next_actions[i]) == 3: # the naivest method (poison whenever ap is selected)\n # total_poison += 1\n # for p in range(1):\n # for q in range(1):\n # shared_states[i][p][q][-1] = 100\n\n # # do poison when ap is selected successively for three times or more\n # total_action += 1 \n # if succession_count < 2:\n # succession_count += 1\n # elif succession_count == 2:\n # succession_count += 1\n # total_poison += 3\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # shared_states[i][p][q][-2] = 100\n # shared_states[i][p][q][-3] = 100\n # else:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # else:\n # succession_count = 0\n\n # #do poison with probability which is depend on the score of last episode (the higher the socre is, the greater the probability of doing poison is; \n # if tbe score is greater than 2000, the probability is 100%)\n # random_poison = random.random()\n # random_poison *= 2000 / (last_episode_score[i] + 1)\n # if random_poison <= 1:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n\n # show the latest image\n # tmp = shared_states[i][:,:,-1]\n # img = PIL.Image.fromarray(tmp)\n # img.show()\n # input()\n##########################################################################################################\n actions_sum += next_actions \n\n\n for z in range(next_actions.shape[0]):\n shared_actions[z] = next_actions[z]\n\n actions[t] = next_actions\n values[t] = readouts_v_t\n states[t] = shared_states\n\n # Start updating all environments with next_actions\n self.runners.update_environments()\n self.runners.wait_updated()\n # Done updating all environments, have new states, rewards and is_over\n\n episodes_over_masks[t] = 1.0 - shared_episode_over.astype(np.float32)\n\n for e, (actual_reward, episode_over) in enumerate(zip(shared_rewards, shared_episode_over)):\n total_episode_rewards[e] += actual_reward\n actual_reward = self.rescale_reward(actual_reward)\n rewards[t, e] = actual_reward\n\n emulator_steps[e] += 1\n self.global_step += 1\n if episode_over:\n total_rewards.append(total_episode_rewards[e])\n episode_summary = tf.Summary(value=[\n tf.Summary.Value(tag='rl/reward', simple_value=total_episode_rewards[e]),\n tf.Summary.Value(tag='rl/episode_length', simple_value=emulator_steps[e]),\n ])\n self.summary_writer.add_summary(episode_summary, self.global_step)\n self.summary_writer.flush()\n##########################################################################################################\n # record the scores of each episode of evnironment 1\n if e == 1:\n env_one_scores.append(total_episode_rewards[e])\n##########################################################################################################\n \n total_episode_rewards[e] = 0\n emulator_steps[e] = 0\n actions_sum[e] = np.zeros(self.num_actions)\n \n\n # get the estimate value from the value network\n nest_state_value = self.session.run(\n self.network.output_layer_v,\n feed_dict={self.network.input_ph: shared_states})\n\n estimated_return = np.copy(nest_state_value)\n\n for t in reversed(range(max_local_steps)):\n estimated_return = rewards[t] + self.gamma * estimated_return * episodes_over_masks[t]\n y_batch[t] = np.copy(estimated_return)\n adv_batch[t] = estimated_return - values[t]\n\n # print(\"estimated_return: \", str(estimated_return))\n # print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n # input()\n\n # output_file.write(str(estimated_return))\n # output_file.write('\\n')\n\n # input()\n\n flat_states = states.reshape([self.max_local_steps * self.emulator_counts] + list(shared_states.shape)[1:])\n flat_y_batch = y_batch.reshape(-1)\n flat_adv_batch = adv_batch.reshape(-1)\n flat_actions = actions.reshape(max_local_steps * self.emulator_counts, self.num_actions)\n\n lr = self.get_lr()\n feed_dict = {self.network.input_ph: flat_states,\n self.network.critic_target_ph: flat_y_batch,\n self.network.selected_action_ph: flat_actions,\n self.network.adv_actor_ph: flat_adv_batch,\n self.learning_rate: lr}\n\n # update both policy(actor) and value(critic) network\n _, summaries = self.session.run(\n [self.train_step, summaries_op],\n feed_dict=feed_dict)\n\n self.summary_writer.add_summary(summaries, self.global_step)\n self.summary_writer.flush()\n\n counter += 1\n\n if counter % (2048 / self.emulator_counts) == 0:\n curr_time = time.time()\n global_steps = self.global_step\n last_ten = 0.0 if len(total_rewards) < 1 else np.mean(total_rewards[-10:])\n logging.info(\"Ran {} steps, at {} steps/s ({} steps/s avg), last 10 rewards avg {}\"\n .format(global_steps,\n self.max_local_steps * self.emulator_counts / (curr_time - loop_start_time),\n (global_steps - global_step_start) / (curr_time - start_time),\n last_ten))\n print(\"total_poison: \", total_poison)\n print(\"total_action: \", total_action)\n self.save_vars()\n\n self.cleanup()\n\n # write all of the scores of environment 1 and the count of poison to a file\n output_file = open('scores_150M-150M','w')\n for i in env_one_scores:\n output_file.write(str(i))\n output_file.write('\\n')\n output_file.write('total_action: ' + str(total_action) + '\\n')\n output_file.write('total_poison: ' + str(total_poison) + '\\n') \n output_file.close()",
"def trainModels():\n\n # load actives from ChEMBL\n actives = {}\n if not os.path.exists(DATA_FOLDER_PATH):\n os.mkdir(DATA_FOLDER_PATH)\n actives_file = [x for x in os.listdir(DATA_FOLDER_PATH) if x.startswith('actives_chembl') and x.endswith('.p')]\n if not actives_file or RELOAD_DATA and not USE_DOWNLOADED_STRUCTS:\n actives = chembl.loadChEMBLData(ACCESSION, IC_50_THRESHOLD, DATA_FOLDER_PATH)\n else:\n actives = pickle.load(open(DATA_FOLDER_PATH + actives_file[0], 'rb'))\n\n if not actives_file or RELOAD_DATA and not USE_DOWNLOADED_STRUCTS:\n chembl.computeConsensualIC50(actives, DATA_FOLDER_PATH)\n chembl.appendRDKitMols(actives, DATA_FOLDER_PATH)\n\n # load decoys downloaded from DUD\n decoys = {}\n if os.path.exists(DECOYS_SDF_FILE_PATH[:-4] + \".p\"):\n decoys = pickle.load(open(DECOYS_SDF_FILE_PATH[:-4] + \".p\", 'rb'))\n else:\n if os.path.exists(DECOYS_SDF_FILE_PATH):\n decoys = dud.getDecoys(DECOYS_SDF_FILE_PATH)\n else:\n print \"Decoys not found in: \" + DECOYS_SDF_FILE_PATH\n print \"Make sure you set the right path.\"\n exit()\n\n # merge both data sets\n compounds_all = {}\n compounds_all.update(actives)\n compounds_all.update(decoys)\n\n # compute Morgan fingerprints\n if os.path.exists(MERGED_DATASET_PATH) and not RELOAD_DATA:\n print \"Loading previously created dataset...\"\n compounds_all = pickle.load(open(MERGED_DATASET_PATH, 'rb'))\n else:\n fingerprinter.appendMorganFingerprints(compounds_all)\n\n actives = { cmpndid : compounds_all[cmpndid] for cmpndid in compounds_all.keys() if compounds_all[cmpndid]['active']}\n pickle.dump(actives, open(ACTIVES_DUMP, 'wb'))\n decoys = { cmpndid : compounds_all[cmpndid] for cmpndid in compounds_all.keys() if not compounds_all[cmpndid]['active']}\n\n # train and cross-validate multiple Naive Bayes Classifiers\n classification_results = dict()\n if not os.path.exists(CLASS_RESULTS_SAVE_FILE_PATH) or RELOAD_DATA:\n classification_results = classification.naiveBayesClassifierTraining(compounds_all)\n print \"Saving results...\"\n pickle.dump(classification_results, open(CLASS_RESULTS_SAVE_FILE_PATH, 'wb'))\n print \"Finished analysis.\"\n else:\n print \"Loading previous results...\"\n classification_results = pickle.load(open(CLASS_RESULTS_SAVE_FILE_PATH, 'rb'))\n\n # have fun with the classification results\n print \"# CLASSIFICATION STATISTICS #\"\n classification.playWithResults(classification_results)\n\n # cluster actives according to their similarity and keep only the diverse molecules\n actives_testset = dict()\n if CLUSTER:\n clusters = utilities.clusterMols(actives)\n actives_kept = dict()\n for cluster in clusters:\n actives_kept[cluster[0]] = actives[cluster[0]]\n remains = cluster[1:]\n actives_filtered_out = {chmblid : actives[chmblid] for chmblid in remains}\n actives_testset.update(actives_filtered_out)\n actives = actives_kept\n\n # estimate maximum distances between active molecules to set threshold for the application domain\n # distance_actives = regression.estimateDistanceThreshold(actives) # median of distances between two actives\n # min_distance_decoys, max_distance_decoys = regression.compareDistances(actives, decoys) # average min/max distance of closest/farthest decoy from any of the actives\n # print \"median of distances between two actives: \" + str(distance_actives)\n # print \"average min/max distance of closest/farthest decoy from any of the actives: \" + str(min_distance_decoys) + \"/\" + str(max_distance_decoys)\n\n # Support vector regression\n regression_results = dict()\n if not os.path.exists(REGRESS_RESULTS_SAVE_FILE_PATH) or RELOAD_DATA:\n regression_results = regression.supportVectorRegression(actives)\n pickle.dump(regression_results, open(REGRESS_RESULTS_SAVE_FILE_PATH, 'wb'))\n else:\n regression_results = pickle.load(open(REGRESS_RESULTS_SAVE_FILE_PATH, 'rb'))\n\n\n # do something with the regression results\n print \"# REGRESSION STATISTICS #\"\n regression.playWithResults(regression_results, decoys, actives_testset)\n\n return classification_results['final_model'], regression_results['final_model']",
"def build_train_graph(self, data_dir, batch_size):\r\n return self.build_graph(data_dir, batch_size, mode=TRAIN)",
"def build_inference_graph(self):\n self.build_train_graph()",
"def deepmd_single_process_initial_iter(graph_index: int, deepmd_graph_dir: str,\n deepmd_data: Dict, iter_index: int,\n need_continue: bool):\n # Generate json\n deepmd_json_param(deepmd_graph_dir, deepmd_data, iter_index)\n # move previous model.ckpt if is not initial\n deepmd_cp_ckpt(iter_index, graph_index)\n # update deepmd check point\n deepmd_update_checkpoint(iter_index)\n # Training and freezing the model\n deepmd_run(iter_index, deepmd_graph_dir, deepmd_data, need_continue)",
"def train(**kwargs):\n\n # Roll out the parameters\n patch_size = kwargs[\"patch_size\"]\n image_data_format = kwargs[\"image_data_format\"]\n generator_type = kwargs[\"generator_type\"]\n dset = kwargs[\"dset\"]\n batch_size = kwargs[\"batch_size\"]\n n_batch_per_epoch = kwargs[\"n_batch_per_epoch\"]\n nb_epoch = kwargs[\"nb_epoch\"]\n model_name = kwargs[\"model_name\"]\n save_weights_every_n_epochs = kwargs[\"save_weights_every_n_epochs\"]\n visualize_images_every_n_epochs = kwargs[\"visualize_images_every_n_epochs\"]\n use_mbd = kwargs[\"use_mbd\"]\n label_smoothing = kwargs[\"use_label_smoothing\"]\n label_flipping_prob = kwargs[\"label_flipping_prob\"]\n use_l1_weighted_loss = kwargs[\"use_l1_weighted_loss\"]\n prev_model = kwargs[\"prev_model\"]\n discriminator_optimizer = kwargs[\"discriminator_optimizer\"]\n n_run_of_gen_for_1_run_of_disc = kwargs[\"n_run_of_gen_for_1_run_of_disc\"]\n MAX_FRAMES_PER_GIF = kwargs[\"MAX_FRAMES_PER_GIF\"]\n\n # batch_size = args.batch_size\n # n_batch_per_epoch = args.n_batch_per_epoch\n # nb_epoch = args.nb_epoch\n # save_weights_every_n_epochs = args.save_weights_every_n_epochs\n # generator_type = args.generator_type\n # patch_size = args.patch_size\n # label_smoothing = False\n # label_flipping_prob = False\n # dset = args.dset\n # use_mbd = False\n\n # Check and make the dataset\n # If .h5 file of dset is not present, try making it\n if not os.path.exists(\"../../data/processed/%s_data.h5\" % dset):\n print(\"dset %s_data.h5 not present in '../../data/processed'!\" % dset)\n if not os.path.exists(\"../../data/%s/\" % dset):\n print(\"dset folder %s not present in '../../data'!\\n\\nERROR: Dataset .h5 file not made, and dataset not available in '../../data/'.\\n\\nQuitting.\" % dset)\n return\n else:\n if not os.path.exists(\"../../data/%s/train\" % dset) or not os.path.exists(\"../../data/%s/val\" % dset) or not os.path.exists(\"../../data/%s/test\" % dset):\n print(\"'train', 'val' or 'test' folders not present in dset folder '../../data/%s'!\\n\\nERROR: Dataset must contain 'train', 'val' and 'test' folders.\\n\\nQuitting.\" % dset)\n return\n else:\n print(\"Making %s dataset\" % dset)\n subprocess.call(['python3', '../data/make_dataset.py', '../../data/%s' % dset, '3'])\n print(\"Done!\")\n\n epoch_size = n_batch_per_epoch * batch_size\n\n init_epoch = 0\n\n if prev_model:\n print('\\n\\nLoading prev_model from', prev_model, '...\\n\\n')\n prev_model_latest_gen = sorted(glob.glob(os.path.join('../../models/', prev_model, '*gen*.h5')))[-1]\n prev_model_latest_disc = sorted(glob.glob(os.path.join('../../models/', prev_model, '*disc*.h5')))[-1]\n prev_model_latest_DCGAN = sorted(glob.glob(os.path.join('../../models/', prev_model, '*DCGAN*.h5')))[-1]\n # Find prev model name, epoch\n model_name = prev_model_latest_DCGAN.split('models')[-1].split('/')[1]\n init_epoch = int(prev_model_latest_DCGAN.split('epoch')[1][:5]) + 1\n\n # Setup environment (logging directory etc), if no prev_model is mentioned\n general_utils.setup_logging(model_name)\n\n # img_dim = X_full_train.shape[-3:]\n img_dim = (256, 256, 3)\n\n # Get the number of non overlapping patch and the size of input image to the discriminator\n nb_patch, img_dim_disc = data_utils.get_nb_patch(img_dim, patch_size, image_data_format)\n\n try:\n\n # Create optimizers\n opt_dcgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n if discriminator_optimizer == 'sgd':\n opt_discriminator = SGD(lr=1E-3, momentum=0.9, nesterov=True)\n elif discriminator_optimizer == 'adam':\n opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n # Load generator model\n generator_model = models.load(\"generator_unet_%s\" % generator_type,\n img_dim,\n nb_patch,\n use_mbd,\n batch_size,\n model_name)\n\n generator_model.compile(loss='mae', optimizer=opt_discriminator)\n\n # Load discriminator model\n discriminator_model = models.load(\"DCGAN_discriminator\",\n img_dim_disc,\n nb_patch,\n use_mbd,\n batch_size,\n model_name)\n\n discriminator_model.trainable = False\n\n DCGAN_model = models.DCGAN(generator_model,\n discriminator_model,\n img_dim,\n patch_size,\n image_data_format)\n\n if use_l1_weighted_loss:\n loss = [l1_weighted_loss, 'binary_crossentropy']\n else:\n loss = [l1_loss, 'binary_crossentropy']\n\n loss_weights = [1E1, 1]\n DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan)\n\n discriminator_model.trainable = True\n discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator)\n\n # Load prev_model\n if prev_model:\n generator_model.load_weights(prev_model_latest_gen)\n discriminator_model.load_weights(prev_model_latest_disc)\n DCGAN_model.load_weights(prev_model_latest_DCGAN)\n\n # Load and rescale data\n print('\\n\\nLoading data...\\n\\n')\n X_full_train, X_sketch_train, X_full_val, X_sketch_val = data_utils.load_data(dset, image_data_format)\n check_this_process_memory()\n print('X_full_train: %.4f' % (X_full_train.nbytes/2**30), \"GB\")\n print('X_sketch_train: %.4f' % (X_sketch_train.nbytes/2**30), \"GB\")\n print('X_full_val: %.4f' % (X_full_val.nbytes/2**30), \"GB\")\n print('X_sketch_val: %.4f' % (X_sketch_val.nbytes/2**30), \"GB\")\n\n # Losses\n disc_losses = []\n gen_total_losses = []\n gen_L1_losses = []\n gen_log_losses = []\n\n # Start training\n print(\"\\n\\nStarting training\\n\\n\")\n for e in range(nb_epoch):\n # Initialize progbar and batch counter\n # progbar = generic_utils.Progbar(epoch_size)\n batch_counter = 0\n gen_total_loss_epoch = 0\n gen_L1_loss_epoch = 0\n gen_log_loss_epoch = 0\n start = time.time()\n for X_full_batch, X_sketch_batch in data_utils.gen_batch(X_full_train, X_sketch_train, batch_size):\n # Create a batch to feed the discriminator model\n X_disc, y_disc = data_utils.get_disc_batch(X_full_batch,\n X_sketch_batch,\n generator_model,\n batch_counter,\n patch_size,\n image_data_format,\n label_smoothing=label_smoothing,\n label_flipping_prob=label_flipping_prob)\n # Update the discriminator\n disc_loss = discriminator_model.train_on_batch(X_disc, y_disc)\n # Create a batch to feed the generator model\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n y_gen = np.zeros((X_gen.shape[0], 2), dtype=np.uint8)\n y_gen[:, 1] = 1\n # Freeze the discriminator\n discriminator_model.trainable = False\n # Train generator\n for _ in range(n_run_of_gen_for_1_run_of_disc-1):\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n gen_total_loss_epoch += gen_loss[0]/n_run_of_gen_for_1_run_of_disc\n gen_L1_loss_epoch += gen_loss[1]/n_run_of_gen_for_1_run_of_disc\n gen_log_loss_epoch += gen_loss[2]/n_run_of_gen_for_1_run_of_disc\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n # Add losses\n gen_total_loss_epoch += gen_loss[0]/n_run_of_gen_for_1_run_of_disc\n gen_L1_loss_epoch += gen_loss[1]/n_run_of_gen_for_1_run_of_disc\n gen_log_loss_epoch += gen_loss[2]/n_run_of_gen_for_1_run_of_disc\n # Unfreeze the discriminator\n discriminator_model.trainable = True\n # Progress\n # progbar.add(batch_size, values=[(\"D logloss\", disc_loss),\n # (\"G tot\", gen_loss[0]),\n # (\"G L1\", gen_loss[1]),\n # (\"G logloss\", gen_loss[2])])\n print(\"Epoch\", str(init_epoch+e+1), \"batch\", str(batch_counter+1), \"D_logloss\", disc_loss, \"G_tot\", gen_loss[0], \"G_L1\", gen_loss[1], \"G_log\", gen_loss[2])\n batch_counter += 1\n if batch_counter >= n_batch_per_epoch:\n break\n gen_total_loss = gen_total_loss_epoch/n_batch_per_epoch\n gen_L1_loss = gen_L1_loss_epoch/n_batch_per_epoch\n gen_log_loss = gen_log_loss_epoch/n_batch_per_epoch\n disc_losses.append(disc_loss)\n gen_total_losses.append(gen_total_loss)\n gen_L1_losses.append(gen_L1_loss)\n gen_log_losses.append(gen_log_loss)\n check_this_process_memory()\n print('Epoch %s/%s, Time: %.4f' % (init_epoch + e + 1, init_epoch + nb_epoch, time.time() - start))\n # Save images for visualization\n if (e + 1) % visualize_images_every_n_epochs == 0:\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model, batch_size, image_data_format,\n model_name, \"training\", init_epoch + e + 1, MAX_FRAMES_PER_GIF)\n # Get new images from validation\n X_full_batch, X_sketch_batch = next(data_utils.gen_batch(X_full_val, X_sketch_val, batch_size))\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model, batch_size, image_data_format,\n model_name, \"validation\", init_epoch + e + 1, MAX_FRAMES_PER_GIF)\n # Plot losses\n data_utils.plot_losses(disc_losses, gen_total_losses, gen_L1_losses, gen_log_losses, model_name, init_epoch)\n # Save weights\n if (e + 1) % save_weights_every_n_epochs == 0:\n gen_weights_path = os.path.join('../../models/%s/gen_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n generator_model.save_weights(gen_weights_path, overwrite=True)\n disc_weights_path = os.path.join('../../models/%s/disc_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n discriminator_model.save_weights(disc_weights_path, overwrite=True)\n DCGAN_weights_path = os.path.join('../../models/%s/DCGAN_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)\n\n except KeyboardInterrupt:\n pass",
"def train():\n\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # Get images and labels for blood_model.\n blood_datasets = blood_model.inputs(eval_data=False)\n\n # randomize the inputs look\n x, y_, data, keep_prob = blood_model.prepare_input()\n\n # build the convolution network\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n # Calculate loss.\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n\n train_op = blood_model.train(loss, global_step)\n\n sess = tf.InteractiveSession()\n\n sess.run(tf.initialize_all_variables())\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n saver = tf.train.Saver()\n\n check_filesystem()\n\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test', sess.graph)\n\n _ = reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step)+1, FLAGS.max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print(\"step %d, training accuracy %g, loss %g\" % (step, train_accuracy, loss_output))\n\n if (step % 1000 == 0 or (step + 1) == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n validation_writer.add_summary(summary_validation, step)\n print(\"validation accuracy %g\" % accuracy_validation)\n\n # save checkpoint\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print(\"saving checkpoint\")",
"def train(**kwargs):\n\n # Roll out the parameters\n batch_size = kwargs[\"batch_size\"]\n n_batch_per_epoch = kwargs[\"n_batch_per_epoch\"]\n nb_epoch = kwargs[\"nb_epoch\"]\n model_name = kwargs[\"model_name\"]\n generator = kwargs[\"generator\"]\n image_dim_ordering = kwargs[\"image_dim_ordering\"]\n img_dim = kwargs[\"img_dim\"]\n patch_size = kwargs[\"patch_size\"]\n bn_mode = kwargs[\"bn_mode\"]\n label_smoothing = kwargs[\"use_label_smoothing\"]\n label_flipping = kwargs[\"label_flipping\"]\n dset = kwargs[\"dset\"]\n use_mbd = kwargs[\"use_mbd\"]\n\n epoch_size = n_batch_per_epoch * batch_size\n\n # Setup environment (logging directory etc)\n general_utils.setup_logging(model_name)\n\n # Load and rescale data\n X_full_train, X_sketch_train, X_full_val, X_sketch_val = data_utils.load_data(dset, image_dim_ordering)\n img_dim = X_full_train.shape[-3:]\n\n # Get the number of non overlapping patch and the size of input image to the discriminator\n nb_patch, img_dim_disc = data_utils.get_nb_patch(img_dim, patch_size, image_dim_ordering)\n\n try:\n\n # Create optimizers\n opt_dcgan = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n # opt_discriminator = SGD(lr=1E-3, momentum=0.9, nesterov=True)\n opt_discriminator = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n # Load generator model\n generator_model = models.load(\"generator_unet_%s\" % generator,\n img_dim,\n nb_patch,\n bn_mode,\n use_mbd,\n batch_size)\n # Load discriminator model\n discriminator_model = models.load(\"DCGAN_discriminator\",\n img_dim_disc,\n nb_patch,\n bn_mode,\n use_mbd,\n batch_size)\n\n generator_model.compile(loss='mae', optimizer=opt_discriminator)\n discriminator_model.trainable = False\n\n DCGAN_model = models.DCGAN(generator_model,\n discriminator_model,\n img_dim,\n patch_size,\n image_dim_ordering)\n\n loss = ['mae', 'binary_crossentropy']\n loss_weights = [1E2, 1]\n DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan)\n\n discriminator_model.trainable = True\n discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator)\n\n gen_loss = 100\n disc_loss = 100\n\n # Start training\n print(\"Start training\")\n for e in range(nb_epoch):\n # Initialize progbar and batch counter\n progbar = generic_utils.Progbar(epoch_size)\n batch_counter = 1\n start = time.time()\n\n for X_full_batch, X_sketch_batch in data_utils.gen_batch(X_full_train, X_sketch_train, batch_size):\n\n # Create a batch to feed the discriminator model\n X_disc, y_disc = data_utils.get_disc_batch(X_full_batch,\n X_sketch_batch,\n generator_model,\n batch_counter,\n patch_size,\n image_dim_ordering,\n label_smoothing=label_smoothing,\n label_flipping=label_flipping)\n\n # Update the discriminator\n disc_loss = discriminator_model.train_on_batch(X_disc, y_disc)\n\n # Create a batch to feed the generator model\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n y_gen = np.zeros((X_gen.shape[0], 2), dtype=np.uint8)\n y_gen[:, 1] = 1\n\n # Freeze the discriminator\n discriminator_model.trainable = False\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n # Unfreeze the discriminator\n discriminator_model.trainable = True\n\n batch_counter += 1\n progbar.add(batch_size, values=[(\"D logloss\", disc_loss),\n (\"G tot\", gen_loss[0]),\n (\"G mae\", gen_loss[1]),\n (\"G logloss\", gen_loss[2])])\n\n # Save images for visualization\n if batch_counter % (n_batch_per_epoch / 2) == 0:\n # Get new images from validation\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model,\n batch_size, image_dim_ordering, \"training\")\n X_full_batch, X_sketch_batch = next(data_utils.gen_batch(X_full_val, X_sketch_val, batch_size))\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model,\n batch_size, image_dim_ordering, \"validation\")\n\n if batch_counter >= n_batch_per_epoch:\n break\n\n print(\"\")\n print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))\n\n if e % 5 == 0:\n gen_weights_path = os.path.join('../../models/%s/gen_weights_epoch%s.h5' % (model_name, e))\n generator_model.save_weights(gen_weights_path, overwrite=True)\n\n disc_weights_path = os.path.join('../../models/%s/disc_weights_epoch%s.h5' % (model_name, e))\n discriminator_model.save_weights(disc_weights_path, overwrite=True)\n\n DCGAN_weights_path = os.path.join('../../models/%s/DCGAN_weights_epoch%s.h5' % (model_name, e))\n DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)\n\n except KeyboardInterrupt:\n pass",
"def train(self):\n self.mode = \"train\"\n self.online_net.train()",
"def train(self):\n self.mode = \"train\"\n self.online_net.train()",
"def training_worker(graph_manager, checkpoint_dir, use_pretrained_model, framework):\n # initialize graph\n task_parameters = TaskParameters()\n task_parameters.__dict__['checkpoint_save_dir'] = checkpoint_dir\n task_parameters.__dict__['checkpoint_save_secs'] = 20\n task_parameters.__dict__['experiment_path'] = INTERMEDIATE_FOLDER\n\n if framework.lower() == \"mxnet\":\n task_parameters.framework_type = Frameworks.mxnet\n if hasattr(graph_manager, 'agent_params'):\n for network_parameters in graph_manager.agent_params.network_wrappers.values():\n network_parameters.framework = Frameworks.mxnet\n elif hasattr(graph_manager, 'agents_params'):\n for ap in graph_manager.agents_params:\n for network_parameters in ap.network_wrappers.values():\n network_parameters.framework = Frameworks.mxnet\n\n if use_pretrained_model:\n task_parameters.__dict__['checkpoint_restore_dir'] = PRETRAINED_MODEL_DIR\n\n graph_manager.create_graph(task_parameters)\n\n # save randomly initialized graph\n graph_manager.save_checkpoint()\n\n # training loop\n steps = 0\n graph_manager.setup_memory_backend()\n\n # To handle SIGTERM\n door_man = DoorMan()\n\n try:\n while (steps < graph_manager.improve_steps.num_steps):\n graph_manager.phase = core_types.RunPhase.TRAIN\n graph_manager.fetch_from_worker(graph_manager.agent_params.algorithm.num_consecutive_playing_steps)\n graph_manager.phase = core_types.RunPhase.UNDEFINED\n\n if graph_manager.should_train():\n steps += graph_manager.agent_params.algorithm.num_consecutive_playing_steps.num_steps\n\n graph_manager.phase = core_types.RunPhase.TRAIN\n graph_manager.train()\n graph_manager.phase = core_types.RunPhase.UNDEFINED\n\n if graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:\n graph_manager.save_checkpoint()\n else:\n graph_manager.occasionally_save_checkpoint()\n\n if door_man.terminate_now:\n \"Received SIGTERM. Checkpointing before exiting.\"\n graph_manager.save_checkpoint()\n break\n\n except Exception as e:\n raise RuntimeError(\"An error occured while training: %s\" % e)\n finally:\n print(\"Terminating training worker\")\n graph_manager.data_store.upload_finished_file()",
"def mnd_train(x_train, y_train, model_root_dir, n_gpu=4, n_cpu=10):\n\n # Horovod: initialize Horovod\n hvd.init()\n\n K.clear_session()\n gc.collect()\n # config = tf.ConfigProto(device_count={'GPU': n_gpu, 'CPU': n_cpu})\n # Horovod: pin GPU to be used to process local rank(one GPU perprocess)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.visible_device_list = str(hvd.local_rank())\n sess = tf.Session(config=config)\n K.set_session(sess)\n\n n_samples, n_win_size, n_feat = x_train.shape\n n_class = y_train.shape[1]\n\n # search-dimension\n # dim_nb_batchs = Categorical(categories=[128, 256, 512, 1024], name='batch_size')\n # dim_nb_epochs = Categorical(categories=[20, 30, 40, 50], name='epoch')\n # dim_lrs = Categorical(categories=[0.1, 0.01, 0.001], name='learn_rate')\n # dim_lr_decays = Categorical(categories=[0.1, 0.5, 0.8], name='learn_rate_decay')\n # dim_init_filters = Categorical(categories=[16, 32, 64, 128], name='filters')\n # dim_drops = Categorical(categories=[0.2, 0.3, 0.4, 0.5], name='drop')\n # dim_fc_sizes = Categorical(categories=[32, 64, 128, 256], name='fc_size')\n # dim_net_blocks = Categorical(categories=[(4, 1), (4, 3), (4, 4, 1), (4, 4, 3), (4, 4, 4, 1), (4, 4, 4, 3)],\n # name='blocks')\n # search_dim = [dim_nb_batchs,\n # dim_nb_epochs,\n # dim_lrs,\n # dim_lr_decays,\n # dim_init_filters,\n # dim_drops,\n # dim_fc_sizes,\n # dim_net_blocks]\n # default_param = [256, 20, 0.1, 0.8, 16, 0.2, 64, (4, 1)]\n\n dim_nb_batchs = Categorical(categories=[256], name='batch_size')\n dim_nb_epochs = Categorical(categories=[5], name='epoch')\n dim_lrs = Categorical(categories=[0.1, 0.01], name='learn_rate')\n dim_lr_decays = Categorical(categories=[0.8], name='learn_rate_decay')\n dim_init_filters = Categorical(categories=[128], name='filters')\n dim_drops = Categorical(categories=[0.5], name='drop')\n dim_fc_sizes = Categorical(categories=[256], name='fc_size')\n dim_net_blocks = Categorical(categories=[(4, 1)],\n name='blocks')\n search_dim = [dim_nb_batchs,\n dim_nb_epochs,\n dim_lrs,\n dim_lr_decays,\n dim_init_filters,\n dim_drops,\n dim_fc_sizes,\n dim_net_blocks]\n default_param = [256, 5, 0.1, 0.8, 16, 0.2, 64, (4, 1)]\n\n _model_dir = os.path.join(model_root_dir, 'models3/model_weight')\n if not os.path.isdir(_model_dir):\n os.mkdir(_model_dir)\n _tb_dir = os.path.join(model_root_dir, 'models3/logs')\n if not os.path.isdir(_tb_dir):\n os.mkdir(_tb_dir)\n _csvlogger_dir = os.path.join(model_root_dir, 'models3/model_metrics')\n if not os.path.isdir(_csvlogger_dir):\n os.mkdir(_csvlogger_dir)\n\n def out_name(batch_size, epoch, learn_rate, learn_rate_decay, filters, drop, fc_size, blocks):\n str_blocks = [str(x) for x in blocks]\n str_blk = ''.join(str_blocks)\n\n return 'b{0}_e{1}_lr{2:.3f}_lrd{3:.1f}_flt{4}_dr{5:.1f}_fc{6}_blk{7}'.format(batch_size,\n epoch,\n learn_rate,\n learn_rate_decay,\n filters,\n drop,\n fc_size,\n str_blk)\n\n # y_train_labels = np.argmax(y_train, axis=1)\n # skf = StratifiedKFold(n_splits=1, random_state=123, shuffle=True)\n\n # Horovod: print logs on the first worker.\n verbose = 1 if hvd.rank() == 0 else 0\n\n permutation = list(np.random.permutation(n_samples))\n\n x_train = x_train[permutation]\n y_train = y_train[permutation]\n\n @use_named_args(dimensions=search_dim)\n def gp_fitness(batch_size, epoch, learn_rate, learn_rate_decay, filters, drop, fc_size, blocks):\n print('batch_size: {}'.format(batch_size))\n print('epoch: {}'.format(epoch))\n print('learn rate: {0:.3f}'.format(learn_rate))\n print('learn rate decay: {0:.1f}'.format(learn_rate_decay))\n print('filters: {}'.format(filters))\n print('drop ratio: {0:.1f}'.format(drop))\n print('fc size: {}'.format(fc_size))\n print('blocks: {}'.format(blocks))\n\n tmp_out_name = out_name(batch_size, epoch, learn_rate,\n learn_rate_decay, filters, drop, fc_size, blocks)\n\n val_acc_arr = []\n\n # for i, (train_idx, val_idx) in enumerate(skf.split(x_train, y_train_labels)):\n # ix_train1, ix_val1 = x_train[train_idx], x_train[val_idx]\n # iy_train1, iy_val1 = y_train[train_idx], y_train[val_idx]\n for i in range(1):\n ix_train1, ix_val1, iy_train1, iy_val1 = train_test_split(x_train, y_train, test_size=0.2,\n shuffle=False)\n nb_trains = ix_train1.shape[0] // batch_size\n nb_examples = batch_size * nb_trains\n k_x_train = ix_train1[:nb_examples]\n k_y_train = iy_train1[:nb_examples]\n k_x_val = np.concatenate((ix_val1, ix_train1[nb_examples:]), axis=0)\n k_y_val = np.concatenate((iy_val1, iy_train1[nb_examples:]), axis=0)\n\n del ix_train1, ix_val1, iy_train1, iy_val1\n # gc.collect()\n\n model_fn = os.path.join(_model_dir, '{0}-k{1}.hdf5'.format(tmp_out_name, i))\n tensorboard_fn = os.path.join(_tb_dir, '{0}-tb_k{1}'.format(tmp_out_name, i))\n csvlogger_fn = os.path.join(_csvlogger_dir, '{0}-csvlogger_k{1}'.format(tmp_out_name, i))\n\n model = cnv_net(n_win_size, n_feat, n_class,\n filters=filters, kernel_size=16, strides=1, pool_size=2,\n pool_stride=2, drop=drop, blocks=blocks, fc_size=fc_size, m_name=tmp_out_name)\n\n callbacks = [\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n\n # # Horovod: average metrics among workers at the end of every epoch.\n # #\n # # Note: This callback must be in the list before the ReduceLROnPlateau,\n # # TensorBoard, or other metrics-based callbacks.\n # hvd.callbacks.MetricAverageCallback(),\n #\n # # Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final\n # # accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during\n # # the first five epochs. See https://arxiv.org/abs/1706.02677 for details.\n # hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=5, verbose=verbose),\n #\n # # Horovod: after the warmup reduce learning rate by 10 on the 30th, 60th and 80th epochs.\n # hvd.callbacks.LearningRateScheduleCallback(start_epoch=5, end_epoch=30, multiplier=1.),\n # hvd.callbacks.LearningRateScheduleCallback(start_epoch=30, end_epoch=60, multiplier=1e-1),\n # hvd.callbacks.LearningRateScheduleCallback(start_epoch=60, end_epoch=80, multiplier=1e-2),\n # hvd.callbacks.LearningRateScheduleCallback(start_epoch=80, multiplier=1e-3),\n ]\n\n # Horovod: save checkpoints only on the first worker to prevent other workers from corrupting them.\n if hvd.rank() == 0:\n callbacks.append(EarlyStopping(monitor='val_acc', patience=5, verbose=1))\n callbacks.append(AdvancedLearnignRateScheduler(monitor='val_acc', patience=1, verbose=1, mode='auto',\n decayRatio=learn_rate_decay))\n callbacks.append(MultiGPUCheckpointCallback(model_fn, base_model=model, monitor='val_acc',\n save_best_only=True, verbose=1, save_weights_only=True))\n callbacks.append(TensorBoard(tensorboard_fn, batch_size=batch_size, histogram_freq=2))\n callbacks.append(CSVLogger(csvlogger_fn))\n\n # Horovod: adjust learning rate based on number of GPUs.\n opt = keras.optimizers.Adam(lr=learn_rate)\n # Horovod: add Horovod Distributed Optimizer.\n opt = hvd.DistributedOptimizer(opt)\n\n model.compile(optimizer=opt,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n hist = model.fit(k_x_train, k_y_train, validation_data=(k_x_val, k_y_val), verbose=verbose,\n epochs=epoch, batch_size=batch_size, callbacks=callbacks)\n\n i_val_acc = hist.history['val_acc'][-1]\n print(\"Accuracy: {0:.6%}\".format(i_val_acc))\n val_acc_arr.append(i_val_acc)\n\n del model\n del k_x_train, k_y_train, k_x_val, k_y_val\n\n K.clear_session()\n gc.collect()\n i_config = tf.ConfigProto()\n i_config.gpu_options.allow_growth = True\n i_config.gpu_options.visible_device_list = str(hvd.local_rank())\n i_sess = tf.Session(config=i_config)\n K.set_session(i_sess)\n\n cv_mean_val_acc = np.mean(val_acc_arr)\n\n global best_accuracy\n if cv_mean_val_acc > best_accuracy:\n best_accuracy = cv_mean_val_acc\n\n return -cv_mean_val_acc\n\n search_result = gp_minimize(func=gp_fitness,\n dimensions=search_dim,\n acq_func='EI', # Expected Improvement.\n n_calls=40,\n x0=default_param)\n\n with open(os.path.join(model_root_dir, 'models3/gp_search_res.pickle'), 'wb') as f:\n pickle.dump(search_result, f)",
"def deepmd_single_process_continue_iter(deepmd_graph_dir: str,\n deepmd_data: Dict,\n iter_index: int,\n need_continue: bool):\n # Training and freezing the model\n deepmd_run(iter_index, deepmd_graph_dir, deepmd_data, need_continue)",
"def train_not_distributed():\n with tf.Graph().as_default() as graph:\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create model\n model = create_model(False)\n\n # Create summaries and SummaryWriter\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n with tf.Session() as sess:\n init_model(sess, model)\n after_init(sess, model, embeddings_file)\n\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders)",
"def train(self):\n # The number of iterations per epoch\n self.iters_per_epoch = len(self.data_loader_train)\n # Start with trained model if exists\n g_lr = self.g_lr\n d_lr = self.d_lr\n if self.checkpoint:\n start = int(self.checkpoint.split('_')[0])\n else:\n start = 0\n # Start training\n self.start_time = time.time()\n for self.e in range(start, self.num_epochs):\n for self.i, (img_A, img_B, _, _) in enumerate(self.data_loader_train):\n # Convert tensor to variable\n org_A = self.to_var(img_A, requires_grad=False)\n ref_B = self.to_var(img_B, requires_grad=False)\n\n # ================== Train D ================== #\n # training D_A\n # Real\n out = self.D_A(ref_B)\n d_loss_real = self.criterionGAN(out, True)\n # Fake\n fake = self.G_A(org_A)\n fake = Variable(fake.data)\n fake = fake.detach()\n out = self.D_A(fake)\n #d_loss_fake = self.get_D_loss(out, \"fake\")\n d_loss_fake = self.criterionGAN(out, False)\n \n # Backward + Optimize\n d_loss = (d_loss_real + d_loss_fake) * 0.5\n self.d_A_optimizer.zero_grad()\n d_loss.backward(retain_graph=True)\n self.d_A_optimizer.step()\n\n # Logging\n self.loss = {}\n self.loss['D-A-loss_real'] = d_loss_real.item()\n\n # training D_B\n # Real\n out = self.D_B(org_A)\n d_loss_real = self.criterionGAN(out, True)\n # Fake\n fake = self.G_B(ref_B)\n fake = Variable(fake.data)\n fake = fake.detach()\n out = self.D_B(fake)\n #d_loss_fake = self.get_D_loss(out, \"fake\")\n d_loss_fake = self.criterionGAN(out, False)\n \n # Backward + Optimize\n d_loss = (d_loss_real + d_loss_fake) * 0.5\n self.d_B_optimizer.zero_grad()\n d_loss.backward(retain_graph=True)\n self.d_B_optimizer.step()\n\n # Logging\n self.loss['D-B-loss_real'] = d_loss_real.item()\n\n # ================== Train G ================== #\n if (self.i + 1) % self.ndis == 0:\n # adversarial loss, i.e. L_trans,v in the paper \n\n # identity loss\n if self.lambda_idt > 0:\n # G_A should be identity if ref_B is fed\n idt_A = self.G_A(ref_B)\n loss_idt_A = self.criterionL1(idt_A, ref_B) * self.lambda_B * self.lambda_idt\n # G_B should be identity if org_A is fed\n idt_B = self.G_B(org_A)\n loss_idt_B = self.criterionL1(idt_B, org_A) * self.lambda_A * self.lambda_idt\n g_loss_idt = loss_idt_A + loss_idt_B\n else:\n g_loss_idt = 0\n \n # GAN loss D_A(G_A(A))\n fake_B = self.G_A(org_A)\n pred_fake = self.D_A(fake_B)\n g_A_loss_adv = self.criterionGAN(pred_fake, True)\n #g_loss_adv = self.get_G_loss(out)\n\n # GAN loss D_B(G_B(B))\n fake_A = self.G_B(ref_B)\n pred_fake = self.D_B(fake_A)\n g_B_loss_adv = self.criterionGAN(pred_fake, True)\n\n # Forward cycle loss\n rec_A = self.G_B(fake_B)\n g_loss_rec_A = self.criterionL1(rec_A, org_A) * self.lambda_A\n\n # Backward cycle loss\n rec_B = self.G_A(fake_A)\n g_loss_rec_B = self.criterionL1(rec_B, ref_B) * self.lambda_B\n\n # Combined loss\n g_loss = g_A_loss_adv + g_B_loss_adv + g_loss_rec_A + g_loss_rec_B + g_loss_idt\n \n self.g_optimizer.zero_grad()\n g_loss.backward(retain_graph=True)\n self.g_optimizer.step()\n\n # Logging\n self.loss['G-A-loss_adv'] = g_A_loss_adv.item()\n self.loss['G-B-loss_adv'] = g_A_loss_adv.item()\n self.loss['G-loss_org'] = g_loss_rec_A.item()\n self.loss['G-loss_ref'] = g_loss_rec_B.item()\n self.loss['G-loss_idt'] = g_loss_idt.item()\n\n # Print out log info\n if (self.i + 1) % self.log_step == 0:\n self.log_terminal()\n\n #plot the figures\n for key_now in self.loss.keys():\n plot_fig.plot(key_now, self.loss[key_now])\n\n #save the images\n if (self.i + 1) % self.vis_step == 0:\n print(\"Saving middle output...\")\n self.vis_train([org_A, ref_B, fake_A, fake_B, rec_A, rec_B])\n self.vis_test()\n\n # Save model checkpoints\n if (self.i + 1) % self.snapshot_step == 0:\n self.save_models()\n\n if (self.i % 100 == 99):\n plot_fig.flush(self.task_name)\n\n plot_fig.tick()\n \n # Decay learning rate\n if (self.e+1) > (self.num_epochs - self.num_epochs_decay):\n g_lr -= (self.g_lr / float(self.num_epochs_decay))\n d_lr -= (self.d_lr / float(self.num_epochs_decay))\n self.update_lr(g_lr, d_lr)\n print('Decay learning rate to g_lr: {}, d_lr:{}.'.format(g_lr, d_lr))",
"def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)",
"def evaluate(params,dataloader):\n MIN_DEPTH = 1e-3\n MAX_DEPTH = 80\n num_gpus = 1\n pred_depth_scale_factor = 1\n checkpoint_path = './log_diretory/mono_depth2-102000/model-97060'#'./log_diretory/kitti_resnet_MS2_nbn_1epoch_pose_fix/model-189107'\n\n gt_path = './utils/gt/eigen_zhou'\n eval_stereo = False\n\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n\n dataloader = MonodepthDataloader(dataloader.data_path, dataloader.filenames_file, params, dataloader.dataset,\n dataloader.mode)\n reference = dataloader.reference_image_batch\n param = dataloader.param_path_batch\n\n\n # split for each gpu\n reference_splits = tf.split(reference, num_gpus,0)\n param_splits = tf.split(param,num_gpus,0)\n\n\n\n reuse_variables = None\n\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%d' % i) as scope:\n print(i)\n model = MonodepthModel(params, dataloader.mode, reference_splits[i],None,None,None,param_splits[i],\n #param_path=param_path_splits[i],\n reuse_variables=reuse_variables, model_index=i)\n\n\n\n config = tf.ConfigProto(allow_soft_placement=True) # allow_soft_placement는 명시된 device없을 때 자동으로 잡아준다.\n sess = tf.Session(config=config)\n # Saver\n train_saver = tf.train.Saver()\n\n # Init\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n coordinator = tf.train.Coordinator() ## coordinator=조정자, threads 관리해주는 함수\n threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)\n\n # Restore\n print(\"Restore\")\n\n if checkpoint_path != '':\n print('----------------------------------------------')\n print(checkpoint_path)\n print('\\n')\n print(checkpoint_path.split(\".\")[0])\n print('----------------------------------------------')\n train_saver.restore(sess, checkpoint_path)\n print(\"Restore OK\")\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%d' % i) as scope:\n bn_updates_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)\n num_test_samples = count_text_lines(dataloader.filenames_file)\n pred_disps = []\n print('Start')\n for step in range(num_test_samples):\n pred_disp = sess.run(model.disp_reference_est[0])\n\n pred_disp = pred_disp.squeeze()\n pred_disp,_ = disp_to_depth(pred_disp)\n\n # print(pred_disp.shape)\n # plt.imshow(pred_disp)\n # plt.show()\n pred_disp = np.expand_dims(pred_disp,0)\n\n pred_disps.append(pred_disp)\n\n pred_disps = np.concatenate(pred_disps)\n print(pred_disps.shape)\n gt_path = gt_path+ '/gt_depths.npz'\n gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1')[\"data\"]\n print(gt_depths[0].shape)\n\n print(\"-> Evaluating\")\n disable_median_scaling=False\n if eval_stereo:\n print(\" Stereo evaluation - \"\n \"disabling median scaling, scaling by {}\".format(STEREO_SCALE_FACTOR))\n disable_median_scaling = True\n pred_depth_scale_factor = STEREO_SCALE_FACTOR\n else:\n print(\" Mono evaluation - using median scaling\")\n\n errors = []\n ratios = []\n\n for i in range(pred_disps.shape[0]):\n\n gt_depth = gt_depths[i]\n gt_height, gt_width = gt_depth.shape[:2]\n\n pred_disp = pred_disps[i]\n pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))\n pred_depth = 1 / pred_disp\n print(pred_depth[0,0])\n\n\n\n\n mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)\n\n crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,\n 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)\n\n crop_mask = np.zeros(mask.shape)\n crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1\n mask = np.logical_and(mask, crop_mask)\n\n print(mask)\n #if i ==pred_disps.shape[0]-3:\n # plt.imshow(pred_depth / 100) # pred_depth[mask]/100)\n # plt.show()\n # plt.imshow(np.where(mask,pred_depth,np.zeros_like(pred_depth))/100)#pred_depth[mask]/100)\n # plt.show()\n # plt.imshow(np.where(mask,gt_depth,np.zeros_like(gt_depth))/100)\n # plt.show()\n\n print(\"pred_depth[mask]\", pred_depth[mask])\n print(\"gt_depth[mask]\", gt_depth[mask])\n pred_depth = pred_depth[mask]\n gt_depth = gt_depth[mask]\n\n pred_depth *= pred_depth_scale_factor\n if not disable_median_scaling:\n print('?')\n ratio = np.median(gt_depth) / np.median(pred_depth)\n ratios.append(ratio)\n pred_depth *= ratio\n\n pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH\n pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH\n print(\"pred_depth={}\".format(pred_depth))\n print(\"pred_depth < MIN_DEPTH\",pred_depth < MIN_DEPTH)\n print(\" pred_depth[pred_depth < MIN_DEPTH] \", pred_depth[pred_depth < MIN_DEPTH] )\n print(\"pred_depth > MAX_DEPTH\",pred_depth > MAX_DEPTH)\n print(\"pred_depth[pred_depth > MAX_DEPTH]\",pred_depth[pred_depth > MAX_DEPTH])\n print(\"pred_depth_shape={}\".format(pred_depth.shape))\n print(\"gt_depth_shape={}\".format(gt_depth.shape))\n\n errors.append(compute_errors(gt_depth, pred_depth))\n\n if not disable_median_scaling:\n ratios = np.array(ratios)\n med = np.median(ratios)\n print(\" Scaling ratios | med: {:0.3f} | std: {:0.3f}\".format(med, np.std(ratios / med)))\n\n mean_errors = np.array(errors).mean(0)\n\n print(\"\\n \" + (\"{:>8} | \" * 7).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\"))\n print((\"&{: 8.3f} \" * 7).format(*mean_errors.tolist()) + \"\\\\\\\\\")\n print(\"\\n-> Done!\")",
"def train(self, sess, dataset,\n analogy_dataset=None,\n freeze_indices=None,\n freeze_context_indices=None,\n restore_from_file=None,\n n_epochs=10):\n sk_graph = self.build_graph(ds.unigrams)\n\n freeze_vars = None\n if freeze_indices is not None:\n print(\"Setting freeze vars to embeddings...\")\n freeze_vars = {\n sk_graph['embeddings']: list(set(freeze_indices))\n }\n\n if freeze_context_indices is not None:\n print(\"Setting freeze vars to context weights and biases...\")\n freeze_vars = {\n sk_graph['context_weights']: list(set(freeze_context_indices)),\n sk_graph['context_biases']: list(set(freeze_context_indices))\n }\n\n with tf.name_scope(\"train\"):\n optimize_fn = self.optimize_graph(sk_graph['loss'], freeze_vars)\n sk_graph['train'] = optimize_fn\n\n summary_op = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter(self.save_path,\n sess.graph)\n\n # Saver for variables\n saver = tf.train.Saver(list(self._model_variables), max_to_keep=FLAGS.num_checkpoints)\n\n # Initialize other variables\n init_vars = [v for v in tf.global_variables()\n if v not in self._model_variables]\n\n # Restore variables from checkpoint\n if restore_from_file:\n print(\"Restoring variables from {}...\".format(restore_from_file))\n saver.restore(sess, restore_from_file)\n sess.run(tf.variables_initializer(init_vars))\n\n else:\n # Properly initialize all variables.\n print(\"No checkpoint file is given. Initializing variables...\")\n sess.run(tf.global_variables_initializer())\n\n ev_ii = -1\n ana_ii = -1\n batch_ii = 0\n for epoch in range(n_epochs):\n # Start new epoch\n ds.reset_index(split=0)\n\n batch_index = 0\n batch_time = time.time()\n epoch_start = time.time()\n while not dataset.epoch_done(self.batch_size):\n with timeit(\"generate_batch\"):\n batch_data, batch_labels = dataset.generate_batch(self.batch_size)\n\n feed_dict = {sk_graph[\"target_input\"]: batch_data,\n sk_graph[\"context_input\"]: batch_labels}\n\n with timeit(\"run\"):\n _, loss_ii = sess.run([sk_graph[\"train\"], sk_graph[\"loss\"]],\n feed_dict=feed_dict)\n\n if batch_ii % 10000 == 0:\n # Save checkpoint\n saver.save(sess,\n os.path.join(self.save_path, \"checkpoint\"),\n global_step=self.global_step)\n\n # if batch_ii % 1000 == 0:\n # Evaluate and add evaluation info\n # sum_ii, ev_ii = self.eval(sess, dataset, summary=summary_op)\n # summary_writer.add_summary(sum_ii, batch_ii // 1000)\n #\n # train_wps = np.floor((dataset.data_index[0] - batch_index)\n # / (time.time() - batch_time))\n # pc_done = 100.0 * dataset.data_index[0] / dataset.split_sizes[0]\n # print(\n # \"Epoch {} [{:0.1f}%], loss: {:0.1f}, val: {:0.3f}, ana: {:0.2f} word/sec: {:0.0f} | \"\n # .format(epoch, pc_done, loss_ii, ev_ii, ana_ii, train_wps), end=\"\\r\")\n #\n # batch_time = time.time()\n # batch_index = dataset.data_index[0]\n\n batch_ii += 1\n\n epoch_time = time.time() - epoch_start\n print(\"\\nEpoch done in {:4f}s\".format(epoch_time))\n self.save_epoch_time(epoch, epoch_time)\n\n node_embeddings = session.run(sk_graph[\"normalized_embeddings\"])\n self.save_embeddings(epoch, node_embeddings)\n\n # Save checkpoint\n # Increase the number of checkpoints to hold\n saver.save(sess, os.path.join(self.save_path, \"model-epoch\"), global_step=epoch)",
"def define_graph(self):\n with tf.name_scope('discriminator'):\n ##\n # Setup scale networks. Each will make the predictions for images at a given scale.\n ##\n\n self.scale_nets = []\n for scale_num in xrange(self.num_scale_nets):\n with tf.name_scope('scale_net_' + str(scale_num)):\n scale_factor = 1. / 2 ** ((self.num_scale_nets - 1) - scale_num)\n self.scale_nets.append(DScaleModel(scale_num,\n int(self.height * scale_factor),\n int(self.width * scale_factor),\n self.scale_conv_layer_fms[scale_num],\n self.scale_kernel_sizes[scale_num],\n self.scale_fc_layer_sizes[scale_num]))\n\n # A list of the prediction tensors for each scale network\n self.scale_preds = []\n for scale_num in xrange(self.num_scale_nets):\n self.scale_preds.append(self.scale_nets[scale_num].preds)\n\n ##\n # Data\n ##\n\n self.labels = tf.placeholder(tf.float32, shape=[None, 1], name='labels')\n\n ##\n # Training\n ##\n\n with tf.name_scope('training'):\n # global loss is the combined loss from every scale network\n self.global_loss = adv_loss(self.scale_preds, self.labels)\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n self.optimizer = tf.train.GradientDescentOptimizer(c.LRATE_D, name='optimizer')\n self.train_op = self.optimizer.minimize(self.global_loss,\n global_step=self.global_step,\n name='train_op')\n\n # add summaries to visualize in TensorBoard\n loss_summary = tf.summary.scalar('loss_D', self.global_loss)\n self.summaries = tf.summary.merge([loss_summary])",
"def train(self, opts):\n with self._session.as_default(), self._session.graph.as_default():\n self._train_internal(opts)\n self._trained = True"
]
| [
"0.6872082",
"0.6623188",
"0.63124806",
"0.62768054",
"0.6256553",
"0.623286",
"0.6227331",
"0.6225679",
"0.62129956",
"0.62015235",
"0.61930925",
"0.6157583",
"0.61469805",
"0.61318874",
"0.6128298",
"0.6120909",
"0.6104487",
"0.6077759",
"0.60667485",
"0.60667485",
"0.6051428",
"0.6049112",
"0.60445774",
"0.60177827",
"0.5985253",
"0.596094",
"0.5945679",
"0.5937238",
"0.5931721",
"0.5930914"
]
| 0.7235948 | 0 |
deepmd_single_process function for continue mode | def deepmd_single_process_continue_iter(deepmd_graph_dir: str,
deepmd_data: Dict,
iter_index: int,
need_continue: bool):
# Training and freezing the model
deepmd_run(iter_index, deepmd_graph_dir, deepmd_data, need_continue) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deepmd_single_process_initial_iter(graph_index: int, deepmd_graph_dir: str,\n deepmd_data: Dict, iter_index: int,\n need_continue: bool):\n # Generate json\n deepmd_json_param(deepmd_graph_dir, deepmd_data, iter_index)\n # move previous model.ckpt if is not initial\n deepmd_cp_ckpt(iter_index, graph_index)\n # update deepmd check point\n deepmd_update_checkpoint(iter_index)\n # Training and freezing the model\n deepmd_run(iter_index, deepmd_graph_dir, deepmd_data, need_continue)",
"def process():\n pass",
"def process():",
"def deepmd_run(iter_index: int, deepmd_graph_dir: str, deepmd_data: Dict,\n need_continue: bool):\n dp_train_path = os.path.join(deepmd_data['deepmd_bin_path'], 'dp_train')\n dp_frz_path = os.path.join(deepmd_data['deepmd_bin_path'], 'dp_frz')\n print(f'Now start training in the deepmd_graph_dir {deepmd_graph_dir}\\n')\n with auxiliary.cd(deepmd_graph_dir):\n deepmd_json_path = os.path.join('.', 'deepmd.json')\n # Not set OMP number, use the default\n\n print(\"enter_traina_dir\", file=sys.stderr)\n print(\"need_continue_run\", need_continue, file=sys.stderr)\n # Check if restart\n if not need_continue:\n # Now don't need --init-model parameter in dp_train\n subprocess.run([dp_train_path, deepmd_json_path])\n print(\"new model\", file=sys.stderr)\n else:\n subprocess.run(\n [dp_train_path, deepmd_json_path, '--restart', 'model.ckpt'])\n print(\"restart-model\", file=sys.stderr)\n # Start freeze model\n print(f'Now start freezing the graph in the {deepmd_graph_dir}\\n', file=sys.stderr)\n subprocess.run([dp_frz_path])\n print(f'Freezing end\\n', file=sys.stderr)",
"def run_one_step(self):\n pass",
"def fork():\n\tpass",
"def command_continue(self):\n self.step_continue = True",
"def start_processing(self):",
"def _keep_running():\n return True",
"def num_processes():\n return 1",
"def doMPIMD(CONFIGFILE, debug): \n # Read in the call arguments\n CONFIGFILE = CONFIGFILE\n debug = bool(debug == \"True\")\n # Initialize MD module\n md_module = MD_module(CONFIGFILE = CONFIGFILE, debug = debug)\n md_module.loadIterationFromDumpFile()\n if debug:\n if rank == 0:\n sys.stderr.write(\"Number of MPI processes: {0}\\n\".format(size))\n sys.stderr.flush()\n comm.barrier()\n # Every node works only on segments modulo their rank \n workcount = 0\n md_skip_count = 0\n for loop_bin in md_module.iteration:\n for loop_segment in loop_bin:\n #if not loop_bin.isConverged():\n if workcount % size == rank:\n # Run MD on this node\n md_module.runSegmentMD(loop_segment)\n #else:\n # md_skip_count += 1\n # if workcount % size == rank:\n # # Run MD skip\n # md_module.SkipSegmentMD(loop_segment, workcount, md_skip_count)\n workcount += 1\n # Log if rank 0\n if rank == 0:\n md_module.printMdStatus(loop_segment, workcount, md_skip_count)\n \n # Wait for all processes to finish\n comm.barrier()\n if rank == 0:\n md_module.printMdStatus(loop_segment, workcount, md_skip_count)\n #sys.stdout.write(\"\\n\")\n if debug:\n sys.stdout.write(\"Finishing MPI\\n\")\n sys.stdout.flush()\n # Remove the iteration dump file\n if not debug:\n md_module.removeIterationDumpFile()",
"def onPreFork(self):",
"def run_first_process(one_process_workflow, extra_processor = None, extra_resource = None):\n pp = ProjectParser()\n if extra_processor:\n # we can inject a new processor to test exceptions\n pp.wb._processors[extra_processor.name] = extra_processor\n\n if extra_resource:\n # we can inject a new resource to test exceptions\n pp.wb._resources_definition[extra_resource.scheme] = extra_resource\n\n pp.set_project(one_process_workflow)\n workflow = pp.parse_extend_and_check_project()\n process = workflow._processes[0]\n wr = WorkflowRuner(2)\n wr.init_workers()\n try:\n WorkflowRuner.prepare_and_assign_paths(process)\n wr._lt.follow_process(process.log_stdout, process.log_stderr, process.id)\n with wr._lt.trace_in_background():\n wr.start_process_in_background(process) # The function we're testing !\n timeout = time() + 0.5\n while time() < timeout and not wr._completed_processes:\n sleep(0.1)\n assert time() < timeout, \"Process should have stoped now\"\n finally:\n wr.terminate_workers_and_clean_subprocesses()\n return process",
"def run(self):\n while self.container.process(): pass",
"def continue_next(self):\n\n self.scope_assign = {}\n self.scope_var_id = 0\n self.cont = True",
"def test_1_single_process():\n\n # ********************************************************\n # We will put this function in its own thread in test_1()\n def put_data_in_stream(stream):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n stream.extend(data)\n run()\n return\n\n # ********************************************************\n # We will put these lines in a separate process in test_1()\n x = Stream('x')\n y = Stream('y')\n double(x, y)\n\n # *********************************************************\n # We will put these lines in a separate process in test_1().\n s = Stream(name='s')\n increment(y, s)\n print_stream(s, name=s.name)\n\n # *********************************************************\n # This function is executed in a separate thread in test_1().\n put_data_in_stream(x)",
"def test_1_single_process():\n\n # ********************************************************\n # We will put this function in its own thread in test_1()\n def put_data_in_stream(stream):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n stream.extend(data)\n run()\n return\n\n # ********************************************************\n # We will put these lines in a separate process in test_1()\n x = Stream('x')\n y = Stream('y')\n double(x, y)\n\n # *********************************************************\n # We will put these lines in a separate process in test_1().\n s = Stream(name='s')\n increment(y, s)\n print_stream(s, name=s.name)\n\n # *********************************************************\n # This function is executed in a separate thread in test_1().\n put_data_in_stream(x)",
"def process(self):\n return False",
"def _enable(self):\n sub = multiprocessing.Process(target=subproc)\n sub.start()",
"def continue_running(self, method):",
"def run(self):\n\n while not self.__done:\n self.single_cycle()\n\n \"\"\"\n while not self.__done:\n self.step()\n self.debug()\n \"\"\"",
"def do_continue(self, arg):\n if not self.nosigint:\n print('Resuming program, press Ctrl-C to relaunch debugger.', file=self.stdout)\n return super().do_continue(arg)",
"def test_startProcessAlreadyStarted(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIdentical(None, self.pm.startProcess(\"foo\"))",
"def restrict_computation_to_single_processor():\n\n # Uncomment lines below if measuring elapsed time. These will restrict\n # python to only use one processing thread.\n os.environ[\"OMP_NUM_THREADS\"] = \"1\" # export OMP_NUM_THREADS=1\n os.environ[\"OPENBLAS_NUM_THREADS\"] = \"1\" # export OPENBLAS_NUM_THREADS=1\n os.environ[\"MKL_NUM_THREADS\"] = \"1\" # export MKL_NUM_THREADS=1\n os.environ[\"VECLIB_MAXIMUM_THREADS\"] = \"1\" # export VECLIB_MAXIMUM_TH...=1\n os.environ[\"NUMEXPR_NUM_THREADS\"] = \"1\" # export NUMEXPR_NUM_THREADS=1",
"def sync_processes(self, *args, **kwargs):\n return True",
"def test_result_reduce_ddp():\n tutils.reset_seed()\n tutils.set_random_master_port()\n\n worldsize = 2\n mp.spawn(_ddp_test_fn, args=(worldsize,), nprocs=worldsize)",
"def run_single_parsing_loop(self) -> None:\n if not self._parent_signal_conn or not self._process:\n raise ValueError(\"Process not started.\")\n if not self._process.is_alive():\n return\n\n try:\n self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)\n except ConnectionError:\n # If this died cos of an error then we will noticed and restarted\n # when harvest_serialized_dags calls _heartbeat_manager.\n pass",
"def start_processing(self,recurse = True):\n if isinstance(self.model_source, str) and self.auto_reload:\n self.observer = ChangeWatcher(self.model_source)\n \n if self.mapping_process is None:\n self.run_map.value = True\n self.mapping_process = multiprocessing.Process(target = self.mapping_runner)\n self.mapping_process.start()\n super(DNN2, self).start_processing(recurse)",
"def num_processes(self):\n return 1",
"def do_step(self) -> None:"
]
| [
"0.5820932",
"0.5736894",
"0.5658012",
"0.54358",
"0.53977996",
"0.5313565",
"0.528704",
"0.5198629",
"0.51655215",
"0.50905365",
"0.5083622",
"0.503704",
"0.50110036",
"0.5008158",
"0.4999751",
"0.49736997",
"0.49736997",
"0.49691606",
"0.493694",
"0.49172115",
"0.48817807",
"0.48568556",
"0.48547465",
"0.48540306",
"0.48309657",
"0.48272955",
"0.4826142",
"0.48234922",
"0.48160204",
"0.48001313"
]
| 0.67583585 | 0 |
Deepmd update the checkpoint | def deepmd_update_checkpoint(iter_index: int):
# Now update will preserve the previous steps information
with open('generator_checkpoint.json', 'r') as generate_ckpt:
ckpt = json.load(generate_ckpt)
ckpt['status'] = 'deepmd'
ckpt['config_index'] = 0 # multiprocessing don't need graph_index
ckpt['set_index'] = 0
ckpt['iter_index'] = iter_index
ckpt['dump_to_poscar'] = False
# Dump to the same file and erase the former
with open('generator_checkpoint.json', 'w') as generate_ckpt:
json.dump(ckpt, generate_ckpt, indent=2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkpoint():",
"def checkpoint(self):\n save()",
"async def update_checkpoint_async(self, lease, checkpoint):",
"async def checkpoint(cls) -> None:",
"def save_to_checkpoint(self, chkpt):\n chkpt[self.name] = self.state_dict()",
"def checkpoint( self ):\n last_saved = self.get_saved_state()\n if last_saved == None or last_saved['epoch_loss'] >= self.epoch_loss:\n self.save()\n self.metagraph.load()\n self.metagraph.sync()\n self.metagraph.save()\n self.reload()",
"async def checkpoint() -> None:\n await get_async_backend().checkpoint()",
"def checkpoint(self, epoch: int):\n if self.exp.scheduler_stepper is not None:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"scheduler_state_dict\": self.exp.scheduler_stepper.scheduler.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )\n else:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )",
"def resume(self, checkpoint):\n model_dict = paddle.load(checkpoint)\n self.model.set_state_dict(model_dict)",
"def checkpoint(self):\n self.logger.info('Checkpointing Sampler')\n with open(self.resume_file, \"wb\") as f:\n pickle.dump(self, f)",
"def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)",
"def restore(self, checkpoint):\n raise NotImplementedError",
"def save_checkpoint(state, filename):\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint",
"def reload_checkpoint(self):\n checkpoint_path = os.path.join(self.params.dump_path, 'checkpoint.pth')\n if not os.path.isfile(checkpoint_path):\n if self.params.reload_checkpoint == '':\n return\n else:\n checkpoint_path = self.params.reload_checkpoint\n assert os.path.isfile(checkpoint_path)\n logger.warning(\"Reloading checkpoint from %s ...\" % checkpoint_path)\n data = torch.load(checkpoint_path, map_location='cpu')\n\n # reload model parameters\n for name in self.MODEL_NAMES:\n getattr(self, name).load_state_dict(data[name])\n\n # reload optimizers\n for name in self.optimizers.keys():\n if False: # AMP checkpoint reloading is buggy, we cannot do that - TODO: fix - https://github.com/NVIDIA/apex/issues/250\n logger.warning(\"Reloading checkpoint optimizer %s ...\" % name)\n else: # instead, we only reload current iterations / learning rates\n logger.warning(\"Not reloading checkpoint optimizer %s.\" % name)\n for group_id, param_group in enumerate(self.optimizers[name].param_groups):\n if 'num_updates' not in param_group:\n logger.warning(\"No 'num_updates' for optimizer %s.\" % name)\n continue\n logger.warning(\"Reloading 'num_updates' and 'lr' for optimizer %s.\" % name)\n param_group['num_updates'] = data['%s_optimizer' % name]['param_groups'][group_id]['num_updates']\n param_group['lr'] = self.optimizers[name].get_lr_for_step(param_group['num_updates'])\n\n # reload main metrics\n self.epoch = data['epoch'] + 1\n self.n_total_iter = data['n_total_iter']\n self.best_metrics = data['best_metrics']\n self.best_stopping_criterion = data['best_stopping_criterion']\n logger.warning(\"Checkpoint reloaded. Resuming at epoch %i / iteration %i ...\" % (self.epoch, self.n_total_iter))",
"def checkpoint(self, checkpoint_file):\n log.debug(\"Host %s: checkpoint is %s.\", self.host, checkpoint_file)\n self.save(filename=checkpoint_file)",
"def update(self, time_step, checkpoints):\n if self.at_checkpoint:\n return True\n\n if self._arrived_at_checkpoint(time_step):\n self.checkpoint_target.add_attendee(self, time_step)\n print(\"Attendee\", self.attendee_id, \"at:\", self.current_location,\\\n \"has moved to checkpoint at:\", self.checkpoint_target.get_location())\n self.current_location = self.checkpoint_target.get_location()\n self.walk_route[-1] = tuple(self.current_location)\n # print(\"Attendee Walk Route: \", self.walk_route) \n return True\n self.find_checkpoint(checkpoints, time_step)\n self.inter_step()\n return False",
"def checkpoint(self):\r\n return self._checkpoint",
"def update(**kwargs):\n\n def validate_valid_format(date):\n err_msg = \"date not in valid format, use YYYY-MM-DD\"\n\n # check if date is in valid format, YYYY-MM-DD\n # raise Exception if not\n date_s = date.split(\"-\")\n if not len(date_s) == 3:\n raise Exception(err_msg)\n year, month, day = date_s\n year_p = re.compile(\"\\d{4}\")\n month_day_p = re.compile(\"\\d{2}\")\n v_components = [\n {\"val\": year, \"pattern\": year_p},\n {\"val\": month, \"pattern\": month_day_p},\n {\"val\": day, \"pattern\": month_day_p}\n ]\n for v_component in v_components:\n value, pattern = [v_component[k] for k in [\"val\", \"pattern\"]]\n if not pattern.search(value):\n raise Exception(err_msg)\n \n # create a date object from the provided date, if it is older than\n # the oldest accepted time, do not update and raise Exception\n d = datetime.date(int(year), int(month), int(day))\n oldest_date_string = \\\n parse_checkpoint_ini()[\"refget_ena_checkpoint\"][\"absolute_start\"]\n oldest_date = datetime.date(\n *[int(a) for a in oldest_date_string.split(\"-\")]\n )\n\n if d < oldest_date:\n raise Exception(\"cannot update date, proposed date must be after \"\n + oldest_date_string)\n\n try:\n # get date string and validate, if OK, set the config value to\n # the proposed date and write to config file\n date_string = kwargs[\"date\"]\n validate_valid_format(date_string)\n config = parse_checkpoint_ini()\n config[\"refget_ena_checkpoint\"][\"run_start\"] = date_string\n with open(get_checkpoint_path(), \"w\") as configfile:\n config.write(configfile)\n print(\"ena checkpoint updated to \" + date_string + \". execute \"\n + \"'ena-refget-scheduler checkpoint view' to confirm\")\n \n except Exception as e:\n print(e)",
"def checkpoint(name, first = False):\n global DEBUG\n if DEBUG:\n if name != 'first':\n print 'checkpoint %15s: %f' % ((time.time() - SCRIPT_START) if not first else name, (time.time() - checkpoint.start))\n checkpoint.start = time.time()",
"def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)",
"def commit_checkpoint(self, checkpoint_id):\n changes_to_merge = self.pop_checkpoint(checkpoint_id)\n if self.checkpoints:\n # we only have to merge the changes into the latest checkpoint if\n # there is one.\n self.latest = merge(\n changes_to_merge,\n self.latest,\n )",
"def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"`epoch` != save_dict's `start_epoch`\"\n assert iteration == save_dict['iteration'], \"`iteration` != save_dict's `start_iteration`\"\n if os.path.isfile(path):\n print(\"Overwrite checkpoint in epoch %d, iteration %d :exclamation:\" % (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"Fail to save checkpoint\")\n \n print(\"Checkpoint %s saved :heavy_check_mark:\" % (str(epoch) + '.' + str(iteration) + '.ckpt'))",
"def checkpoint(iteration, G, D, opts):\n ckpt_path = os.path.join(opts.checkpoint_dir, 'ckpt_{:06d}.pth.tar'.format(iteration))\n torch.save({'G': G.state_dict(),\n 'D': D.state_dict(),\n 'iter': iteration}, \n ckpt_path)",
"def checkpoint(self, timestamp=0.0, **keywords):\n self.services.debug('checkpoint() method called')\n pass",
"def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')",
"def save_checkpoint(state, filename):\n torch.save(state, filename) # save checkpoint",
"def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()",
"def checkpoint(self, state: TrainState): # pragma: no cover\n if self.checkpointing:\n if not have_tf: # Flax checkpointing requires tensorflow\n raise RuntimeError(\n \"Tensorflow not available and it is\" \" required for Flax checkpointing.\"\n )\n checkpoint_save(state, self.workdir)",
"def save_checkpoint(self, checkpoint_info):\n torch.save(checkpoint_info, os.path.join(self.checkpoint_path, self.checkpoint_file))",
"def commit(self, checkpoint):\n self._validate_checkpoint(checkpoint)\n self.journal.commit_checkpoint(checkpoint)"
]
| [
"0.743215",
"0.7121438",
"0.6961666",
"0.6648132",
"0.62487",
"0.6226256",
"0.6076932",
"0.5967478",
"0.5961249",
"0.5960711",
"0.59606856",
"0.58850557",
"0.58401895",
"0.58247083",
"0.5819275",
"0.58082265",
"0.5802997",
"0.579261",
"0.5781037",
"0.576055",
"0.57508475",
"0.5730822",
"0.5715128",
"0.57098615",
"0.570832",
"0.5687016",
"0.56727934",
"0.56500655",
"0.56253934",
"0.56013066"
]
| 0.7607544 | 0 |
Sends priority message to either Nexmo or Africas Talking | def send_message(self):
priority_message = self.message_queue.pop(0)
# send this message to Africa's Talking or NexmoClient
response = nexmo.send_message(priority_message)
# response = africas_talking.send_message(priority_message)
print(response);
return response['message'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SetPriority(self, priority=1, interruptMenuAfter=3, timeoutAfter=2):\n self.ListenToMenu(interruptMenuAfter) # listen to 'To sent with normal priority...'\n self.SipPhone.SendDTMF(str(priority))\n self.ListenToMenu(timeoutAfter) # listen to 'Message Sent'\n mailbox = self.getMailBoxDN()\n mailbox.SetPriority(int(priority))\n time.sleep(1)\n for owner in mailbox.owners:\n owner.CheckMWI()",
"async def priority(self, ctx: Context, *, guild: int = None, channel: int = None):\n\n if not guild:\n guild = ctx.guild\n else:\n guild = self.bot.get_guild(guild)\n if not guild:\n return await ctx.message.add_reaction(\"⚠\")\n\n if guild.id not in self.active_guilds:\n return await ctx.message.add_reaction(\"⚠\")\n\n if not channel:\n channel = ctx.channel\n else:\n channel = self.bot.get_channel(channel)\n if not channel:\n return await ctx.message.add_reaction(\"⚠\")\n\n config = self.get_guild_config(guild)\n config[\"priority_modlog\"] = str(channel.id)\n\n self.config.hmset(f\"guilds:{guild.id}\", config)\n self._config_cache[guild.id] = config\n\n await ctx.message.add_reaction(\"✅\")",
"def test_tx_priority(self):\n\n self._serial_handler.tx_queue.put((1,'priority1')) # Message ready to be sent\n self._serial_handler.tx_queue.put((2,'priority2-1')) # Message ready to be sent\n self._serial_handler.tx_queue.put((0,'priority0')) # Message ready to be sent\n self._serial_handler.tx_queue.put((2,'priority2-2')) # Message ready to be sent\n\n m1 = self._serial_handler.tx_queue.get(block=True, timeout=3)\n m2 = self._serial_handler.tx_queue.get(block=True, timeout=3)\n m3 = self._serial_handler.tx_queue.get(block=True, timeout=3)\n m4 = self._serial_handler.tx_queue.get(block=True, timeout=3)\n\n self.assertEquals(m1[1], 'priority0')\n self.assertEquals(m2[1], 'priority1')\n self.assertEquals(m3[1], 'priority2-1')\n self.assertEquals(m4[1], 'priority2-2')",
"def act_priority(self, decision, choice):\n if choice is None: return 0\n return (100*choice.actions + 10*(choice.coins + choice.cards) +\n choice.buys) + 1",
"def priority_speaker(_) -> int:\n return 1 << 8",
"def priority_speaker(_) -> int:\n return 1 << 8",
"def increase_priority(self):\n if self._priority > 0:\n self._priority -= 1",
"def setPriority(self, p):\n self.priority = p",
"def priority(name):\n try:\n manager = Actions()\n priority = Priority[name]\n ordered_tasks = manager.order_by_priority(priority)\n click.echo(\"Ordered by priority:\" + click.style(name, bg='red', fg='white'))\n click.echo()\n console_utils.format_print_ordered(ordered_tasks)\n except IndexError as e:\n click.echo(\"IndexError: \"+e)\n except Exception as e:\n click.echo(e)",
"def setpriority(self, pid=None, priority=5):\n\t \n\t import win32api,win32process,win32con\n\t \n\t priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n\t win32process.BELOW_NORMAL_PRIORITY_CLASS,\n\t win32process.NORMAL_PRIORITY_CLASS,\n\t win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n\t win32process.HIGH_PRIORITY_CLASS,\n\t win32process.REALTIME_PRIORITY_CLASS]\n\t if pid == None:\n\t pid = win32api.GetCurrentProcessId()\n\t handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n\t win32process.SetPriorityClass(handle, priorityclasses[priority])",
"def send_message(self, tag, value, priority=0):\n self._messaged.emit((\"msg\",tag,priority,value))",
"def notify(message):\n context = nova.context.get_admin_context()\n message['method'] = 'notify'\n priority = message.get('priority',\n FLAGS.default_notification_level)\n priority = priority.lower()\n rpc.cast(context, FLAGS.notification_topic, {'method':'notify','args':{'message':message}})",
"def priority(self):\n pass # pragma: no cover",
"def priority(self):\n pass # pragma: no cover",
"def priority(self, priority):\n self._priority = priority",
"def set_priority(priority=2, pid=None):\n print \"TODO: add os independent support\"\n priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n win32process.BELOW_NORMAL_PRIORITY_CLASS,\n win32process.NORMAL_PRIORITY_CLASS,\n win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n win32process.HIGH_PRIORITY_CLASS,\n win32process.REALTIME_PRIORITY_CLASS]\n if pid == None:\n pid = win32api.GetCurrentProcessId()\n handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n win32process.SetPriorityClass(handle, priorityclasses[priority])",
"def getPriority(self):",
"def send_proposes(self):\n neighbors = self.model.space.get_neighbors(self.pos, self.range, include_center=False)\n neighbors = list(filter(lambda x: x.type == 'guest', neighbors))\n\n if len(neighbors) > 0:\n options = list(map(lambda x: (x.role, self.action), neighbors))\n know = list(map(lambda x: self.knowledge[x], options))\n # print(\"Knowledges\", probs)\n probs = list(map(lambda x: np.exp(x), know))\n # print(\"Softmax\", probs)\n probs = list(map(lambda x: x / sum(probs), probs))\n # print(\"Normed\", probs)\n if len(neighbors) > 1:\n print(self.unique_id, neighbors, probs, know)\n\n other_agent = random.choices(neighbors, probs)[0]\n self.propose_interaction(other_agent, self.action)",
"def set_priority(self, priority):\n self.options[\"priority\"] = priority",
"def priority(self) -> str:\n return pulumi.get(self, \"priority\")",
"def hook_priority(self) -> int:",
"def set_priority(self, priority):\n self.options['priority'] = priority",
"def set_priority(self, priority):\n self._priority = priority",
"def get_priority(self) -> str:\n if self.health >= 75 and self.food >= 75 and self.water >= 75:\n if min(self.food, self.water) == self.food:\n return 'food'\n else:\n return 'water'\n else:\n if self.food >= 75 and self.water >= 75:\n return 'monster'\n else:\n return 'food'",
"async def on_message(message):\n\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n\n # intializes a scores object\n hiscores = Scores(message)\n\n if message.content.startswith('!hello'):\n msg = 'Hello {0.author.mention}'.format(message)\n await message.channel.send(msg)\n\n # get the command without !\n command = message.content.split()[0][1:]\n\n # retrieve the score of a player\n if message.content.startswith('!') and command in SKILLS:\n\n # retrieve the username that comes after the !level command and set underscores\n username = message.content.split()[1:]\n username = '_'.join(username)\n\n # get scores\n await hiscores.show_score(username, command)\n\n if message.content.startswith('!compare'):\n\n # get skill\n skill = message.content.split()[1]\n\n # check if the skill is valid, if not we compare based on total level and experience\n if not skill in SKILLS:\n\n # get the players\n players = ' '.join(message.content.split()[1:])\n players = players.split(' - ')\n\n for i, player in enumerate(players):\n players[i] = player.replace(' ', '_')\n\n # compare the players on total level if nothing is given\n await hiscores.compare(players, 'total')\n\n else:\n\n # get the players after the skill\n players = ' '.join(message.content.split()[2:])\n players = players.split(' - ')\n\n for i, player in enumerate(players):\n players[i] = player.replace(' ', '_')\n\n print(players)\n print(skill)\n # compare the players on total level if nothing is given\n await hiscores.compare(players, skill)\n\n\n if message.content.startswith('!pok'):\n msg = 'Heb je m al Marc?'.format(message)\n await message.channel.send(msg)",
"def test_method_priority(self):\n expected_out = '\\n'.join([\n \"Balance check logic says Transaction allowed\",\n \"Transferring Rs. 1000\",\n \"Transaction successful\"\n ])\n self.bank_obj.transfer(1000)\n self.assertEquals(sys.stdout.getvalue().strip(), expected_out)",
"def score_notify(score1, score2):\n\tif score1 > score2 :\n\t\tresult = \"Player A won\"\n\telif score1 < score2 : \n\t\tresult = \"Player B won\"\n\telse :\n\t\tresult = \"Tied Score\"\n\treturn result",
"def turn_priority(self):\n raise NotImplementedError(\"turn_priority() was not implemented in a subclass of TurnListItem.\")",
"def get_priority(self):\n priorities = dict(PRIORITY_CHOICES)\n return priorities.get(self.priority, \"N/A\")",
"def notify(self, title, message, config):\n notification = {\n 'application': config.get('application'),\n 'event': title,\n 'description': message,\n 'url': config.get('url'),\n 'priority': config.get('priority'),\n 'providerkey': config.get('provider_key'),\n }\n\n if isinstance(config['api_key'], list):\n config['api_key'] = [config['api_key']]\n notification['apikey'] = config['api_key']\n\n try:\n response = requests.post(PROWL_URL, data=notification)\n except RequestException as e:\n raise PluginWarning(repr(e))\n\n request_status = ET.fromstring(response.content)\n error = request_status.find('error')\n if error is not None:\n raise PluginWarning(error.text)\n else:\n success = request_status.find('success').attrib\n logger.debug(\n 'prowl notification sent. Notifications remaining until next reset: {}. '\n 'Next reset will occur in {} minutes',\n success['remaining'],\n success['resetdate'],\n )"
]
| [
"0.6324",
"0.59750456",
"0.5759524",
"0.5687916",
"0.5597825",
"0.5597825",
"0.55825335",
"0.5531191",
"0.5528984",
"0.54893994",
"0.54370934",
"0.5426119",
"0.54159474",
"0.54159474",
"0.5370475",
"0.5329592",
"0.53249246",
"0.5290757",
"0.5282707",
"0.525808",
"0.52420247",
"0.5234558",
"0.5234477",
"0.5232212",
"0.52248716",
"0.5214355",
"0.5188868",
"0.51758045",
"0.5174509",
"0.5147367"
]
| 0.67188764 | 0 |
Generates the C++ bindings for the Pawn include |filename|. | def GenerateBindings(filename, path):
name = filename[0:-4]
if name.startswith('a_'):
name = name[2:]
input = path
output_header = os.path.join(os.path.dirname(path), '%s.h' % name)
output_impl = os.path.join(os.path.dirname(path), '%s.cpp' % name)
write_bindings.WriteBindings(input, output_header, output_impl) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_cpp():\n cpp_file = AUTOGEN_WARNING\n cpp_file += \"// Implements basic nuclear data functions.\\n\"\n cpp_file += \"#ifndef PYNE_IS_AMALGAMATED\\n\"\n cpp_file += '#include \"atomic_data.h\"\\n'\n cpp_file += '#include \"nucname.h\"\\n'\n cpp_file += \"#endif\\n\"\n cpp_file += \" \\n\"\n cpp_file += \"void pyne::_load_atomic_mass_map_memory() { \\n\"\n cpp_file += \" // header version of atomic weight table data \\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!atomic_mass_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_atomic_mass_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" //see if the data table is already loaded\\n\"\n cpp_file += \" if(!natural_abund_map.empty()) {\\n\"\n cpp_file += \" return;\\n\"\n cpp_file += \" } else { \\n\"\n cpp_file += \" _insert_abund_map();\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // calculate the atomic_masses of the elements \\n\"\n cpp_file += \" std::map<int,double> :: iterator it;\\n\"\n cpp_file += \" \\n\"\n cpp_file += \" for (int z = 1; z <= 92 ; z++) {\\n\"\n cpp_file += \" // loop through the natural abundance map\\n\"\n cpp_file += \" double element_atomic_weight = 0.0;\\n\"\n cpp_file += \" for (it = natural_abund_map.begin(); it != natural_abund_map.end() ; ++it){\\n\"\n cpp_file += \" // if the atomic number of the abudance matches the\\n\"\n cpp_file += \" // that of index\\n\"\n cpp_file += \" if(pyne::nucname::znum(it->first) == z) {\\n\"\n cpp_file += \" // take atomic abundance and multiply by mass\\n\"\n cpp_file += (\n \" // to get the mass of that nuclide / 100 since abundance is in %\\n\"\n )\n cpp_file += \" element_atomic_weight += (it->second*atomic_mass_map[it->first]/100.0);\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \" // insert the abundance of the element into the list\\n\"\n cpp_file += \" atomic_mass_map[z*10000000] = element_atomic_weight;\\n\"\n cpp_file += \" }\\n\"\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_atomic_mass_map() { \\n\"\n cpp_file += generate_atomic_mass()\n cpp_file += \"}\\n\"\n cpp_file += \"\\n\\n\"\n cpp_file += \"void pyne::_insert_abund_map() { \\n\"\n cpp_file += generate_abundances()\n cpp_file += \"}\\n\"\n return cpp_file",
"def _generate_headlines(self):\n includes = set()\n for decl in self._ast.decls:\n includes.add(decl.cpp_file)\n for include in includes:\n yield f'#include \"{include}\"'\n yield '#include \"third_party/pybind11/include/pybind11/pybind11.h\"'\n yield ''\n yield 'namespace py = pybind11;'\n yield ''",
"def create_cfile_head(self):\n head = \"\"\"#include <stdio.h>\n#include <stdlib.h>\n#include <time.h>\n#include \"../init_array_lib/init_dyn_array.h\"\n#include \"../pips_lib/define_script.h\"\n\n\nint main(int argc, const char* argv[])\n{\n srand(time(NULL));\n \"\"\"\n\n self.append_text_to_file(str(head))",
"def generate_source():\n \"\"\"their dependencies\"\"\"\n global dictionary_names, dictionary_slices\n src = \"\"\n for s in dictionary_slices:\n src += deconstruct(s)\n src += \" '\" + pointer_to_name(s)\n src += \"' define\\n\"\n return src + \"\\n\"",
"def main():\n\n # Parse the command line.\n parser = ArgumentParser(\n \"Generate Python extension modules for C/C++ libraries.\",\n fromfile_prefix_chars='@')\n\n parser.add_argument('specification',\n help=\"the name of the specification file [default stdin]\",\n metavar=\"FILE\", nargs='?')\n\n parser.add_argument('-a', dest='api_extract',\n help=\"the name of the QScintilla API file [default not generated]\",\n metavar=\"FILE\")\n\n parser.add_argument('--abi-version', dest='abi_version',\n help=\"the ABI version\", metavar=\"VERSION\")\n\n parser.add_argument('-B', dest='backstops', action='append',\n help=\"add <TAG> to the list of timeline backstops\",\n metavar=\"TAG\")\n\n parser.add_argument('-c', dest='sources_dir',\n help=\"the name of the code directory [default not generated]\",\n metavar=\"DIR\")\n\n parser.add_argument('-D', dest='py_debug', action='store_true',\n default=False,\n help=\"generate code for a debug build of Python\")\n\n parser.add_argument('-e', dest='exceptions', action='store_true',\n default=False,\n help=\"enable support for exceptions [default disabled]\")\n\n parser.add_argument('-f', dest='warnings_are_errors', action='store_true',\n default=False,\n help=\"warnings are handled as errors\")\n\n parser.add_argument('-g', dest='release_gil', action='store_true',\n default=False,\n help=\"always release and reacquire the GIL [default only when \"\n \"specified]\")\n\n parser.add_argument('-I', dest='include_dirs', action='append',\n help=\"add <DIR> to the list of directories to search when \"\n \"importing or including .sip files\",\n metavar=\"DIR\")\n\n parser.add_argument('-j', dest='parts', type=int, default=0,\n help=\"split the generated code into <FILES> files [default 1 per \"\n \"class]\",\n metavar=\"FILES\")\n\n parser.add_argument('-m', dest='xml_extract', help=SUPPRESS)\n\n parser.add_argument('-n', dest='sip_module',\n help=\"the fully qualified name of the sip module\",\n metavar=\"NAME\")\n\n parser.add_argument('-o', dest='docstrings', action='store_true',\n default=False,\n help=\"enable the automatic generation of docstrings [default \"\n \"disabled]\")\n\n parser.add_argument('-P', dest='protected_is_public', action='store_true',\n default=False,\n help=\"enable the protected/public hack [default disabled]\")\n\n parser.add_argument('-r', dest='tracing', action='store_true',\n default=False,\n help=\"generate code with tracing enabled [default disabled]\")\n\n parser.add_argument('-s', dest='source_suffix',\n help=\"the suffix to use for C or C++ source files [default \\\".c\\\" \"\n \"or \\\".cpp\\\"]\",\n metavar=\"SUFFIX\")\n\n parser.add_argument('-t', dest='tags', action='append',\n help=\"add <TAG> to the list of versions/platforms to generate \"\n \"code for\",\n metavar=\"TAG\")\n\n parser.add_argument('-w', dest='warnings', action='store_true',\n default=False, help=\"enable warning messages [default disabled]\")\n\n parser.add_argument('-x', dest='disabled_features', action='append',\n help=\"add <FEATURE> to the list of disabled features\",\n metavar=\"FEATURE\")\n\n parser.add_argument('-X', dest='extracts', action='append',\n help=\"add <ID:FILE> to the list of extracts to generate\",\n metavar=\"ID:FILE\")\n\n parser.add_argument('-y', dest='pyi_extract',\n help=\"the name of the .pyi stub file [default not generated]\",\n metavar=\"FILE\")\n\n args = parser.parse_args()\n\n # Configure the handling of warnings.\n if args.warnings:\n if args.warnings_are_errors:\n simplefilter('error', FutureWarning)\n simplefilter('error', UserWarning)\n else:\n # Note that we don't suppress FutureWarnings.\n simplefilter('ignore', UserWarning)\n\n try:\n sip5(args.specification, sip_module=args.sip_module,\n abi_version=args.abi_version, sources_dir=args.sources_dir,\n include_dirs=args.include_dirs, tags=args.tags,\n backstops=args.backstops,\n disabled_features=args.disabled_features,\n exceptions=args.exceptions, parts=args.parts,\n source_suffix=args.source_suffix, docstrings=args.docstrings,\n protected_is_public=args.protected_is_public,\n py_debug=args.py_debug, release_gil=args.release_gil,\n tracing=args.tracing, extracts=args.extracts,\n pyi_extract=args.pyi_extract, api_extract=args.api_extract,\n xml_extract=args.xml_extract)\n except Exception as e:\n handle_exception(e)\n\n return 0",
"def generate_perl_package_file(typename, props, description, namespace):\n perl_base_package = _perl_package_name_from_shortname(namespace)\n package_name = perl_base_package + \"::\" + typename\n perl_assets_package = _perl_assets_package_name_from_shortname(namespace)\n\n import_string =\\\n \"# AUTO GENERATED FILE - DO NOT EDIT\\n\\n\" + \\\n \"package \" + package_name + \";\\n\\n\" + \\\n \"use Moo;\\n\" + \\\n \"use strictures 2;\\n\" + \\\n \"use \" + perl_assets_package + \";\\n\" + \\\n \"use namespace::clean;\\n\\n\" + \\\n \"extends 'Dash::BaseComponent';\\n\\n\"\n\n class_string = generate_class_string(\n typename,\n props,\n description,\n namespace\n )\n file_name = \"{:s}.pm\".format(typename)\n\n directory = os.path.join('Perl', namespace)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_path = os.path.join(directory, file_name)\n with open(file_path, 'w') as f:\n f.write(import_string)\n f.write(class_string)\n f.write(\"\\n1;\\n\");\n\n print('Generated {}'.format(file_name))",
"def translate(code, hdrname, extra_cpp_args=[], whitelist=None):\n extra_incdir = os.path.dirname(hdrname)\n extra_cpp_args += ['-I', extra_incdir]\n p = AutoPxd(hdrname)\n p.visit(parse(code, extra_cpp_args=extra_cpp_args, whitelist=whitelist))\n pxd_string = ''\n if p.stdint_declarations:\n pxd_string += 'from libc.stdint cimport {:s}\\n\\n'.format(\n ', '.join(p.stdint_declarations))\n pxd_string += str(p)\n return pxd_string",
"def GenPy(mod,fname):\n f = open(fname, 'w')\n title = \"\"\"#\n# This file is generated automatically\n# Author:IAN\n# http://www.iknot.org\n\"\"\"\n f.write(title)\n for i in mod.__dict__.keys():\n s = \"def \" + i + \"():\" + \"\\n\"\n f.write(s)\n s = \" return\"\n f.write(s + \"\\n\")\n f.close()\n kcs_ui.message_noconfirm('py file saved to:%s'%(fname))",
"def generate(self, src_fname: str):\n fname, _ = os.path.splitext(src_fname)\n graph_name, _ = os.path.splitext(os.path.basename(self.pb_file))\n header_fname = '{}.hpp'.format(fname)\n header_snippet = Snippet(\"get_ctx.hpp\")\n header_snippet.template_vars[\"header_guard\"] = \"_{}_H\".format(fname.upper())\n header_snippet.template_vars[\"graph_name\"] = graph_name\n header_snippet.template_vars[\"placeholders\"] = []\n\n composer = Composer()\n container = SnippetContainer(\"get_ctx.cpp\")\n container.template_vars[\"graph_name\"] = graph_name\n container.template_vars[\"placeholders\"] = []\n container.add_header('\"{}\"'.format(header_fname))\n\n print(\"Parsing {}\".format(self.pb_file))\n graph_info, layers = parse_pb(self.pb_file)\n\n # TODO better snippet construction abstraction\n for layer_id, layer in enumerate(layers, 1):\n for op_name in layer:\n op_info = graph_info[op_name]\n op_type = op_info[\"op_type\"]\n if op_type == \"Placeholder\":\n out_tname, _, _ = op_info[\"output_tensor\"][0]\n container.template_vars[\"placeholders\"].append(out_tname)\n header_snippet.template_vars[\"placeholders\"].append(out_tname)\n elif op_type == 'Const':\n for out_tname, out_dtype, _ in op_info[\"output_tensor\"]:\n pre_tname = self._prepare_tensor_name(out_tname)\n idx_fname = \"{}.idx\".format(pre_tname)\n snippet = CreateTensorIdxSnippet(self.embed_data_dir, out_tname,\n idx_fname=idx_fname,\n tf_dtype=out_dtype)\n container.add_snippet(snippet)\n idx_path = os.path.join(self.idx_dir, idx_fname)\n value = op_info[\"output_content\"][out_tname]\n self._save_data(idx_path, value, out_dtype)\n elif op_type == \"Add\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, _, _ = op_info[\"output_tensor\"][0]\n tf_dtype = op_info[\"input_tensor\"][0][1]\n snippet = AddOpSnippet(inputs, output, tf_dtype=tf_dtype)\n container.add_snippet(snippet)\n elif op_type == \"ArgMax\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, _ = op_info[\"output_tensor\"][0]\n _, in_dtype, _ = op_info[\"input_tensor\"][0]\n snippet = ArgMaxOpSnippet(inputs, output, in_dtype, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Dequantize\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, _ = op_info[\"output_tensor\"][0]\n snippet = DequantizeOpSnippet(inputs, output, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Max\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, out_shape = op_info[\"output_tensor\"][0]\n if len(out_shape) == 0: # dirty hack for uTensor\n out_shape = [1]\n snippet = MaxOpSnippet(inputs, output, out_dtype, out_shape)\n container.add_snippet(snippet)\n elif op_type == \"Min\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, out_shape = op_info[\"output_tensor\"][0]\n if len(out_shape) == 0: # dirty hack for uTensor\n out_shape = [1]\n snippet = MinOpSnippet(inputs, output, out_dtype, out_shape)\n container.add_snippet(snippet)\n elif op_type == \"QuantizeV2\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n out_dtype = op_info[\"output_tensor\"][0][1]\n snippet = QuantizeV2OpSnippet(inputs, outputs, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"QuantizedMatMul\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n x_dtype = op_info[\"input_tensor\"][0][1]\n w_dtype = op_info[\"input_tensor\"][1][1]\n out_dtype = op_info[\"output_tensor\"][0][1]\n snippet = QuantizedMatMulOpSnippet(inputs, outputs, x_dtype, w_dtype, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"QuantizedRelu\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n _, in_dtype, _ = op_info[\"input_tensor\"][0]\n _, qout_dtype, _ = op_info[\"output_tensor\"][0]\n out_dtypes = [t[1] for t in op_info[\"output_tensor\"][1:]]\n snippet = QuantizedReluOpSnippet(inputs, outputs, in_dtype, out_dtypes, qout_dtype)\n container.add_snippet(snippet)\n elif op_type == \"RequantizationRange\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n _, out_dtype, _ = op_info[\"output_tensor\"][0]\n snippet = RequantizationRangeOpSnippet(inputs, outputs, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Requantize\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n _, qout_dtype, _ = op_info[\"output_tensor\"][0]\n _, range_dtype, _ = op_info[\"output_tensor\"][1]\n snippet = RequantizeOpSnippet(inputs, outputs, qout_dtype, range_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Reshape\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, _, _ = op_info[\"output_tensor\"][0]\n snippet = ReshapeOpSnippet(inputs, output)\n container.add_snippet(snippet)\n else:\n raise ValueError(\"unsupported op type in uTensor: {}, try quantizing your graph\".format(op_type))\n if self.debug_cmt:\n comments = [\"<<< Graph Layer {}\".format(layer_id), \n \">>> Graph Layer {}\".format(layer_id+1)]\n cmt_snippet = CommentSnippet(comments)\n container.add_snippet(cmt_snippet)\n composer.add_snippet(container)\n\n print(\"Generate header file: {}\".format(header_fname))\n with open(header_fname, \"w\") as wf:\n wf.write(header_snippet.render())\n print(\"Generate source file: {}\".format(src_fname))\n with open(src_fname, \"w\") as wf:\n wf.write(composer.compose())",
"def mk_include():\n\tos.mkdir('include')\n\tfor f in os.listdir(pg_include_dir_server):\n\t\tif not f.endswith('.h'):\n\t\t\tcontinue\n\t\td = file(os.path.join(pg_include_dir_server, f)).read()\n\t\tif f == 'pg_config.h':\n\t\t\td += '\\n'\n\t\t\td += '#undef ENABLE_NLS\\n'\n\t\t\td += '#undef USE_REPL_SNPRINTF\\n'\n\t\t\td += '#undef USE_SSL\\n'\n\t\tfile(os.path.join('include', f), 'w').write(d)",
"def _make_source(name, init, body):\n code = \"\"\"\n #include <Python.h>\n\n %(body)s\n\n PyMODINIT_FUNC\n PyInit_%(name)s(void) {\n %(init)s\n }\n \"\"\" % dict(\n name=name, init=init, body=body,\n )\n return code",
"def build_from_c_and_cpp_files(extensions):\n for extension in extensions:\n sources = []\n for sfile in extension.sources:\n path, ext = os.path.splitext(sfile)\n if ext in ('.pyx', '.py'):\n if extension.language == 'c++':\n ext = '.cpp'\n else:\n ext = '.c'\n sfile = path + ext\n sources.append(sfile)\n extension.sources = sources",
"def run_cpp(self):",
"def run_python_file(python, file_args, directives=None):\n args = []\n if directives:\n for directive in directives:\n args.extend(('-X', directive))\n args.extend(file_args)\n command = (\n \"import Cython.Build.BuildExecutable as bex; \"\n \"bex.DEBUG = False; \"\n \"bex.build_and_run({args!r})\"\n ).format(args=args)\n run_python(python, command)",
"def generate(options):\n interactive = options['i']\n if interactive:\n generate_interactive(options)\n else:\n generate_rcfile(vars(options['c']), options['rcfile'])",
"def makecxx(self, gen, exe=0):\n services = []\n inits = []\n defs = []\n for serv in self.services:\n defs.append(serv.defs)\n service = cxxService.substitute(component=self.name, service=serv.name,\n parameters=gen.makeArgs(serv),\n body=serv.body, exe=exe)\n streams = []\n for name, typ, dep in serv.instream:\n streams.append(' create_calcium_port(this,(char *)\"%s\",(char *)\"%s\",(char *)\"IN\",(char *)\"%s\");'% (name, typ, dep))\n instream = \"\\n\".join(streams)\n streams = []\n for name, typ, dep in serv.outstream:\n streams.append(' create_calcium_port(this,(char *)\"%s\",(char *)\"%s\",(char *)\"OUT\",(char *)\"%s\");'% (name, typ, dep))\n outstream = \"\\n\".join(streams)\n\n init = initService.substitute(component=self.name, service=serv.name,\n instream=instream, outstream=outstream)\n services.append(service)\n inits.append(init)\n\n CalciumInterface=\"\"\n if self.calciumextendedinterface:\n CalciumInterface=\"#include <CalciumInterface.hxx>\"\n\n return cxxCompo.substitute(component=self.name, module=gen.module.name,\n exe=exe, exe_path=self.exe_path,\n servicesdef=\"\\n\".join(defs),\n servicesimpl=\"\\n\".join(services),\n initservice='\\n'.join(inits),\n CalciumInterface=CalciumInterface)",
"def compile_coffeescript(*files):\r\n if not files:\r\n files = [\"`{}`\".format(coffeescript_files())]\r\n sh(cmd(\r\n \"node_modules/.bin/coffee\", \"--compile\", *files\r\n ))",
"def load_comments(self, pkgfile):\n\n # Note: This has to be called with a Python\n # source file (.py) only!\n\n if not os.path.exists(pkgfile):\n return \"\"\n\n comment = \"\"\n\n try:\n of = open(pkgfile,'rb')\n data = of.read()\n if data:\n # Create code object\n try:\n c = compiler.compile(data,pkgfile,'exec')\n # Get the position of first line of code\n if c:\n lno = c.co_firstlineno\n lnum = 0\n # Read file till this line number\n of.seek(0)\n for line in of:\n comment = \"\".join((comment, line))\n lnum += 1\n if lnum==lno or line==\"\\n\": break\n except SyntaxError:\n pass\n except Exception:\n pass\n of.close()\n except (OSError, IOError, TypeError):\n pass\n\n return comment",
"def CompileWithClosure(js_files, definitions, entry_points, output_file):\n\n cmd = [\n 'java', '-jar',\n './node_modules/google-closure-compiler-java/compiler.jar',\n '--language_out=ES5_STRICT', '--dependency_mode=PRUNE',\n '--js_output_file=%s' % output_file\n ]\n cmd += ['--entry_point=%s' % e for e in entry_points]\n cmd += ['--output_manifest=%s' % ('%s.manifest' % output_file)]\n cmd += [\n '../node_modules/google-closure-library/closure/**.js',\n '!../node_modules/google-closure-library/closure/**_test.js',\n '../node_modules/google-closure-library/third_party/closure/**.js',\n '!../node_modules/google-closure-library/third_party/closure/**_test.js'\n ]\n cmd += js_files\n cmd += definitions\n subprocess.check_call(cmd)",
"def extract_constants(freetds_include=\"sybdb.h\", constants_file=\"bcp_constants.py\"):\n fileno, source_file = mkstemp(suffix=\".c\", text=True)\n write(fileno, \"#include <{}>\".format(freetds_include).encode())\n close(fileno)\n\n fileno, include_directives = mkstemp(suffix=\".txt\")\n close(fileno)\n\n if ON_WINDOWS:\n cmd_template = \"cl /E {includes} {source} > {output}\"\n else:\n cmd_template = \"cpp {includes} '{source}' > '{output}'\"\n\n cmd = cmd_template.format(\n output=normpath(include_directives),\n source=normpath(source_file),\n\n includes=\" \".join(\n \"-I{}\".format(normpath(_include)) for _include in include_dirs\n )\n )\n\n fifo = Popen(cmd, shell=True, stdin=None, stdout=None, stderr=None, close_fds=True)\n fifo.communicate()\n fifo.wait()\n\n remove(source_file)\n\n if fifo.returncode < 0:\n raise Exception(\"Cannot run preprocessor step\")\n\n row_regex = re.compile('[\\r\\n]+')\n field_regex = re.compile('[\\s]+')\n\n with open(include_directives, \"r\") as fd:\n include_paths = list(\n _filename\n for contents in [fd.read()]\n for _row in row_regex.split(contents) if _row.find(freetds_include) > -1\n for _index, _word in enumerate(field_regex.split(_row)) if _index == 2\n for _filename in [_word.strip('\"')] if exists(_filename)\n )\n\n remove(include_directives)\n\n for include_file in include_paths:\n with open(include_file, \"r\") as fd:\n definition_pairs = [\n (_values[1], int(_values[2])) \n for contents in [fd.read()]\n for _row in row_regex.split(contents)\n for _values in [field_regex.split(_row)] if len(_values) == 3 and _values[0] == \"#define\" and _values[2].isdigit()\n ]\n\n if len(definition_pairs):\n with open(constants_file, \"w\") as output_fd:\n output_fd.write(\"\\n\".join(\"%s=%d\" % _row for _row in definition_pairs))\n\n break\n else:\n raise Exception(\"Couldn't find a freetds include file\")",
"def generate_wrapper(self):\n\n # If there is an input file, parse it\n if self.package_info_path is not None:\n info_parser = PackageInfoParser(self.package_info_path,\n self.source_root)\n info_parser.parse()\n self.package_info = info_parser.package_info\n else:\n pass\n\n # Generate a header collection\n self.collect_source_hpp_files()\n\n # Attempt to assign source paths to each class, assuming the containing \n # file name is the class name\n for eachModule in self.package_info.module_info:\n for eachClass in eachModule.class_info:\n for eachPath in self.package_info.source_hpp_files:\n base = ntpath.basename(eachPath)\n if eachClass.name == base.split('.')[0]:\n eachClass.source_file_full_path = eachPath\n if eachClass.source_file is None:\n eachClass.source_file = base\n\n # Attempt to automatically generate template args for each class\n for eachModule in self.package_info.module_info:\n info_generator = CppInfoHelper(eachModule)\n for eachClass in eachModule.class_info:\n info_generator.expand_templates(eachClass, \"class\")\n\n # Generate the header collection\n header_collection_path = self.generate_header_collection()\n\n # Parse the header collection\n self.parse_header_collection(header_collection_path)\n\n # Update the Class and Free Function Info from the parsed code\n self.update_class_info()\n self.update_free_function_info()\n self.update_enum_info()\n\n # Write the modules\n for eachModule in self.package_info.module_info:\n module_writer = CppModuleWrapperWriter(self.global_ns,\n self.source_ns,\n eachModule,\n self.get_wrapper_template(),\n self.wrapper_root)\n module_writer.write()",
"def compile_modules(base, output, source, bind=True):\n return compile_files(base, output, source, bind, amd=True)",
"def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)",
"def pfx2pp(p):\n if p in sys.path:\n return\n sys.path.insert(1, p)",
"def write_probin(prob_param_files, cxx_prefix):\n\n params = []\n\n print(\" \")\n print(f\"write_probdata.py: creating prob_param C++ files\")\n\n # read the parameters defined in the parameter files\n\n for f in prob_param_files:\n err = parse_param_file(params, f)\n if err:\n abort(f\"Error parsing {f}\")\n\n\n # now handle the C++ -- we need to write a header and a .cpp file\n # for the parameters\n\n cxx_base = os.path.basename(cxx_prefix)\n\n ofile = f\"{cxx_prefix}_parameters.H\"\n with open(ofile, \"w\") as fout:\n fout.write(CXX_HEADER)\n\n fout.write(f\" void init_{cxx_base}_parameters();\\n\\n\")\n\n fout.write(\" namespace problem {\\n\\n\")\n\n for p in params:\n if p.dtype == \"string\":\n fout.write(f\" extern std::string {p.name};\\n\\n\")\n else:\n if p.is_array():\n if p.size == \"nspec\":\n fout.write(f\" extern AMREX_GPU_MANAGED {p.get_cxx_decl()} {p.name}[NumSpec];\\n\\n\")\n else:\n fout.write(f\" extern AMREX_GPU_MANAGED {p.get_cxx_decl()} {p.name}[{p.size}];\\n\\n\")\n else:\n fout.write(f\" extern AMREX_GPU_MANAGED {p.get_cxx_decl()} {p.name};\\n\\n\")\n\n fout.write(\" }\\n\\n\")\n\n fout.write(CXX_FOOTER)\n\n # now the C++ job_info tests\n ofile = f\"{cxx_prefix}_job_info_tests.H\"\n with open(ofile, \"w\") as fout:\n for p in params:\n if not p.is_array():\n if p.in_namelist:\n fout.write(p.get_job_info_test())\n\n # now the C++ initialization routines\n ofile = f\"{cxx_prefix}_parameters.cpp\"\n with open(ofile, \"w\") as fout:\n fout.write(f\"#include <{cxx_base}_parameters.H>\\n\")\n fout.write(\"#include <AMReX_ParmParse.H>\\n\")\n fout.write(\"#include <AMReX_REAL.H>\\n\\n\")\n for p in params:\n if p.dtype == \"string\":\n fout.write(f\" std::string problem::{p.name};\\n\\n\")\n else:\n if p.is_array():\n if p.size == \"nspec\":\n fout.write(f\" AMREX_GPU_MANAGED {p.get_cxx_decl()} problem::{p.name}[NumSpec];\\n\\n\")\n else:\n fout.write(f\" AMREX_GPU_MANAGED {p.get_cxx_decl()} problem::{p.name}[{p.size}];\\n\\n\")\n else:\n fout.write(f\" AMREX_GPU_MANAGED {p.get_cxx_decl()} problem::{p.name};\\n\\n\")\n\n fout.write(\"\\n\")\n fout.write(f\" void init_{cxx_base}_parameters() {{\\n\")\n\n\n # now write the parmparse code to get the value from the C++\n # inputs.\n\n # open namespace\n fout.write(\" {\\n\")\n\n # we need access to _rt\n fout.write(\" using namespace amrex;\\n\")\n\n fout.write(f\" amrex::ParmParse pp(\\\"problem\\\");\\n\\n\")\n for p in params:\n if p.is_array():\n size = p.size\n if (size == \"nspec\"):\n size = \"NumSpec\"\n fout.write(f\" for (int n = 0; n < {size}; n++) {{\\n\")\n fout.write(f\" problem::{p.name}[n] = {p.default_format(lang='C++')};\\n\")\n fout.write(f\" }}\\n\")\n else:\n fout.write(f\" {p.get_default_string()}\")\n\n if p.in_namelist:\n fout.write(f\" {p.get_query_string('C++')}\")\n fout.write(\"\\n\")\n fout.write(\" }\\n\")\n\n fout.write(\" }\\n\")",
"def pyo():\n local('python -O -m compileall .')",
"def pyo():\n local('python -O -m compileall .')",
"def generate(env):\n\n indent = find_indent()\n\n generator = lambda source, target, env, for_signature: pp_gen(source,\n target,\n env, indent)\n\n # Only handle C for now\n preprocess = Builder(generator=generator, suffix=\"_pp.c\",\n emitter=preprocess_emitter, src_suffix=\".c\")\n\n env.Append(BUILDERS={\"Preprocess\":preprocess})",
"def build_cffi():\r\n print_banner(\"Building CFFI Module\")\r\n ffi = cffi.FFI()\r\n\r\n this_dir = pathlib.Path().resolve()\r\n h_file_name = this_dir / \"cmult.h\"\r\n with open(h_file_name) as h_file:\r\n # cffi does not like our preprocessor directives, so we remove them\r\n lns = h_file.read().splitlines()\r\n flt = filter(lambda ln: not re.match(r\" *#\", ln), lns)\r\n flt = map(lambda ln: ln.replace(\"EXPORT_SYMBOL \", \"\"), flt)\r\n ffi.cdef(str(\"\\n\").join(flt))\r\n\r\n ffi.set_source(\r\n \"cffi_example\",\r\n # Since we are calling a fully built library directly no custom source\r\n # is necessary. We need to include the .h files, though, because behind\r\n # the scenes cffi generates a .c file which contains a Python-friendly\r\n # wrapper around each of the functions.\r\n '#include \"cmult.h\"',\r\n # The important thing is to include the pre-built lib in the list of\r\n # libraries we are linking against:\r\n libraries=[\"cmult\"],\r\n library_dirs=[this_dir.as_posix()],\r\n extra_link_args=[\"-Wl,-rpath,.\"],\r\n )\r\n\r\n ffi.compile()\r\n print(\"* Complete\")",
"def run_cxx(snippet: str):\n\n # if the snippet is a single token, assume we should print it\n if re.match(r\"\\w+$\", snippet):\n snippet = f\"std::cout << {snippet} << '\\\\n';\"\n\n cxx = os.environ.get(\"CXX\", \"c++\")\n run(cxx, \"c++\", CXX_PRELUDE, CXX_CODA, snippet)"
]
| [
"0.5898096",
"0.5688383",
"0.54618686",
"0.53356934",
"0.53198254",
"0.5304615",
"0.5217018",
"0.5214677",
"0.5185232",
"0.51728207",
"0.5164098",
"0.51559216",
"0.50681734",
"0.50602496",
"0.5058907",
"0.49814364",
"0.49792537",
"0.49706864",
"0.4962642",
"0.49357608",
"0.4926437",
"0.49218613",
"0.49218",
"0.4920324",
"0.48909065",
"0.48899963",
"0.48899963",
"0.4882336",
"0.4878362",
"0.48653576"
]
| 0.6694012 | 0 |
get_error function computes the error for a line passing through a given set of points (x, y) | def get_error(intercept, slope, points):
error_value = 0
for i in range(0, len(points)):
error_value += (points[i].y - (slope * points[i].x + intercept)) ** 2
return error_value / float(len(points)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error(line, data): # error function\n # Metric: Sum of squared Y-axis differences\n err = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n return err",
"def error(Y, X):\n return (Y - X) ** 2",
"def _getErrorFunction(self):\n\n\t\treturn (self._setpoint - self._current)",
"def error(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][1]",
"def cgd_linesearch(x, error0, direction, error_fcn, h):\n\n # FIXME: Add tests\n\n x = np.asarray(x)\n direction = np.asarray(direction)\n h = np.asarray(h)\n\n direction_n = direction / np.linalg.norm(direction, ord=2)\n error_list = [error0]\n stepsize = h\n maxSteps = 6\n factor = np.zeros(1)\n\n for iStep in range(1, maxSteps):\n\n factor = np.concatenate([factor, [2**(iStep-1)]])\n xc = x.copy() + direction_n * stepsize * factor[iStep]\n error, xc = error_fcn(xc) # xc may be changed due to limits\n error_list.append(error)\n\n if error_list[-1] >= error_list[-2]: # end of decline\n if iStep == 1: # no success\n step = 0\n error1 = error0\n\n else: # parabolic\n p = np.polyfit(factor, error_list, 2)\n fx = np.arange(factor[0], factor[-1] + .1, .1)\n fy = np.polyval(p, fx)\n idx = np.argmin(fy)\n fxm = fx[idx]\n xcm = x.copy() + direction_n * stepsize * fxm\n error1, xcm = error_fcn(xcm) # xc may be changed due to limits\n\n if error1 < error_list[iStep - 1]:\n xc = xcm.copy()\n step = fxm\n\n else: # finding Minimum did not work\n xc = x.copy() + direction_n * stepsize * factor[iStep-1] # before last point\n error1, xc = error_fcn(xc) # recalculate error in order to check for limits again\n step = factor[iStep-1]\n\n return xc, error1, step\n\n step = factor[iStep]\n error1 = error_list[iStep]\n\n return xc, error1, step",
"def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum",
"def error(self) -> Sequence[float]:\n errors = []\n for line, sign in zip(self.marker_lines, (-1, 1)):\n if self._orientation == Orientation.UP_DOWN:\n picket_pos = self._fit(line.center.y)\n mlc_pos = line.center.x\n else:\n picket_pos = self._fit(line.center.x)\n mlc_pos = line.center.y\n if (\n self._separate_leaves\n ): # offset the picket position by the DLG and nominal gap\n mag_factor = self._image.sid / 1000\n picket_pos += (\n sign * self._nominal_gap_mm * mag_factor / 2 * self._image.dpmm\n )\n errors.append((mlc_pos - picket_pos) / self._image.dpmm)\n return errors",
"def get_error(self, params):\n return self.endog - self.predict(params)",
"def error_function(x):\n T = [9.60497373987051638749E0,\n 9.00260197203842689217E1,\n 2.23200534594684319226E3,\n 7.00332514112805075473E3,\n 5.55923013010394962768E4]\n U = [3.35617141647503099647E1,\n 5.21357949780152679795E2,\n 4.59432382970980127987E3,\n 2.26290000613890934246E4,\n 4.92673942608635921086E4]\n\n if np.abs(x) > 1.0:\n return 1.0 - error_function_complemented(x)\n else:\n z = x * x\n y = x * pol_evl(z, T, 4) / p1_evl(z, U, 5)\n return y",
"def calc_errors(problem, points):\n original = problem.getp()\n try:\n ret = plugin.calc_errors(problem, points)\n except:\n import traceback\n print(\"error calculating distribution on model\")\n traceback.print_exc()\n ret = None\n finally:\n problem.setp(original)\n return ret",
"def error_func(x, a0, a1, a2, a3):\n return (a0 / 2) * sp.special.erfc((a1 - x) / a2) + a3",
"def get_error(self, output,target):\n return [target[i]-output[i] for i in range(len(output))]",
"def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)",
"def _calculate_error(self, targets):\n\n def hist(d):\n f, v = histogram(array(d))\n i = len(f) if argmax(f) == len(f) - 1 else argmax(f)\n return v[i]\n\n devxs, devys = list(zip(*[r.dev_centroid for r in targets]))\n\n if len(targets) > 2 and self.use_histogram:\n dx = hist(devxs)\n dy = hist(devys)\n else:\n\n def avg(s):\n return sum(s) / len(s)\n\n dx = avg(devxs)\n dy = avg(devys)\n\n return -dx, dy",
"def linpol_error(self):\n return self._linpol_error",
"def get_line(start, end): \n # Setup initial conditions\n x1, y1 = start\n x2, y2 = end\n dx = x2 - x1\n dy = y2 - y1\n \n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n \n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n \n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n \n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n \n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n \n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = np.array((y, x)) if is_steep else np.array((x, y))\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n \n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return np.array(points)",
"def calculate_error(self, estimated_x, estimated_y):\n\n return np.sqrt((self.ball_x - estimated_x) ** 2 + (self.ball_y - estimated_y) ** 2)",
"def fit_line(data, error_func):\n\n # Generate initial guess for line model\n l = np.float32([0, np.mean(data[:, 1])]) # slope = 0, intercept = mean(y values)\n\n # Plot initial guess (optional)\n x_ends = np.float32([-5, 5])\n plt.plot(x_ends, l[0] * x_ends + l[1], 'm--', linewidth = 2.0, label = 'Initial guess')\n\n # Call optimizer to minimize error function\n result = spo.minimize(error_func, l, args = (data, ), method = 'SLSQP', options = {'disp': True})\n return result.x",
"def error(beta_0: float, beta_1: float, x_i: float, y_i: float) -> float:\n return predict(beta_0, beta_1, x_i) - y_i",
"def get_error_estimates(self, Y, M1, M2=None):\n # First K0 and K1\n Mminus = M1\n if M2 is None:\n Mplus = M1\n else:\n Mplus = M2\n if self.Cp0 != 0 and self.Cp1 != 0 and self.Cm != 0:\n Cp0 = self.Cp0\n Cp1 = self.Cp1\n Cm = self.Cm\n else:\n PP = self.principal_part()\n Cmax = max(PP.values())\n Kmax = 0\n for t in PP.keys():\n if isinstance(t, tuple):\n (c, l) = t\n elif isinstance(t, (int, Integer)):\n (c, l) = rn_from_D(self._space.multiplier(), t)\n else:\n raise ValueError(\"Incorrect principal part: t={0}\".format(t))\n if c in self._space.multiplier().D():\n tmp = l + self._space.multiplier().Qv[self._space.index_set().index(c)]\n elif c in range(len(self._space.multiplier().Qv)):\n tmp = l + self._space.multiplier().Qv[c]\n else:\n raise ValueError(\"Incorrect principal part: c,l={0},{1}\".format(c, l))\n if(abs(tmp) > Kmax):\n Kmax = abs(tmp)\n [Cp0, Cp1] = self._space.get_Cp(Cmax)\n Cm = self._space.get_Cm(Kmax, Cmax)\n self.Cp0 = Cp0\n self.Cp1 = Cp1\n self.Cm = Cm\n\n fak = len(self._space.index_set())\n # print \"Cp0,Cp1,Cm=\",Cp0,Cp1,Cm\n # print \"fak=\",fak\n\n er1 = fak * self._space.err_est_vv_hwmf_neg(Y, Mminus, Cm)\n er2 = fak * self._space.err_est_vv_hwmf_pos(Y, Mplus, Cp0, Cp1)\n return [er1, er2]",
"def relative_error(x, y):\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))",
"def rms_error(self, X, y) :\n ### ========== TODO : START ========== ###\n # part h: compute RMSE\n n, d = X.shape\n error = np.sqrt(self.cost(X,y)/n)\n ### ========== TODO : END ========== ###\n return error",
"def _error(self, xy_tuple, coord_pairs, rcut_sq, kl_pairs):\n # set up target Bravais lattice\n kx1 = kl_pairs[:,0] * xy_tuple[0]\n lx2 = kl_pairs[:,1] * xy_tuple[2]\n ky1 = kl_pairs[:,0] * xy_tuple[1]\n ly2 = kl_pairs[:,1] * xy_tuple[3]\n bravais_pairs = np.vstack((kx1 + lx2, ky1 + ly2)).transpose()\n \n # get squared distance between every Bravais point and every coord point\n # sq_dists has shape (n_bravais_pairs, n_coord_pairs)\n sq_dists = spatial.distance.cdist(bravais_pairs, coord_pairs,\n 'sqeuclidean')\n # get min dist for each coord\n min_sq_dists = np.min(sq_dists, axis=0)\n \n # apply error function\n scaled_sq_dists = min_sq_dists / rcut_sq\n errors = np.where(scaled_sq_dists < 1.0, scaled_sq_dists, 1.0)\n error = np.mean(errors)\n \n # error = 0\n # for coord in coords:\n # find closest Bravais point to each actual particle\n # closest_dist_sq = min([(coord.x-bp.x)**2 + (coord.y-bp.y)**2 for bp in bravais])\n # piecewise error function\n # error += min(closest_dist_sq / rcut_sq, 1.0)\n # error /= len(coords)\n # error = sum([min(min([(coord.x-bp.x)**2 + (coord.y-bp.y)**2 for bp in bravais]) / rcut_sq, 1.0)]) / len(coords)\n \n return error",
"def error(self, X, y):\n ans = self.predict(X)\n return np.sum(np.logical_not(np.equal(ans,y))) / len(X)",
"def error_poly(c, data):\n\n #Metric: Sum of squared y-axis differences\n err = np.sum((data[:,1] - np.polyval(c, data[:, 0])) ** 2)\n return err",
"def error():\n\n # Make data set using errors\n dataset_a = DataSet(oscillating,error_y=oscillating_error,plot='error_bar',label='Data and error')\n dataset_a.set_error(interval=5,width=1,cap=2)\n dataset_b = DataSet(oscillating,plot='error_shade',error_y=oscillating_error,order=0,colour='lightgrey',label='Error')\n dataset_c = DataSet(oscillating,plot='line',order=1,colour='firebrick',label='Data')\n\n # Make line graph with error bars\n plot_bar = Plot()\n plot_bar.set_legend(legend=True)\n plot_bar.add_dataset(dataset_a)\n plot_bar.plot()\n plot_bar.save(name='./figures/2d_error_bar',fmt='png')\n plot_bar.display()\n\n # Make line graph with shaded errors\n plot_shade = Plot()\n plot_shade.set_legend(legend=True,location='upper left')\n plot_shade.add_dataset(dataset_b)\n plot_shade.add_dataset(dataset_c)\n plot_shade.plot()\n plot_shade.save(name='./figures/2d_error_shade',fmt='png')\n plot_shade.display()",
"def _compute_error(self,expected_out,actual_out,error_func):\n\n error = error_func(expected_out,actual_out)\n return error",
"def calculate_reproj_errors(projPoints, points_2d):\n assert len(projPoints) == len(points_2d)\n delta = []\n for i in range(len(projPoints)):\n delta.append(abs(projPoints[i] - points_2d[i]))\n\n average_delta = sum(delta)/len(delta) # 2-vector, average error for x and y coord\n average_delta = (average_delta[0] + average_delta[1])/2 # average error overall\n\n return average_delta, delta",
"def line_points(start, end):\n # Setup initial conditions\n x1, y1 = start.astuple()\n x2, y2 = end.astuple()\n dx = x2 - x1\n dy = y2 - y1\n \n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n \n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n \n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n \n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n \n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n \n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = Int2(y, x) if is_steep else Int2(x, y)\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n \n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return points",
"def get_accelerometer_error(self) -> (Iterable[float], Iterable[float]):\n acceleration_error = self.compute_accelerometer_error()\n upper_error = [None] * self.num_steps\n lower_error = [None] * self.num_steps\n\n upper_error[0] = lower_error[0] = self.initial_value\n upper_error[1] = lower_error[1] = (\n self.time_step * self.initial_velocity + self.initial_value)\n\n index = self.fill_values(upper_error)\n self.fill_values(lower_error)\n if index >= self.num_steps - 1:\n return (upper_error[index:], lower_error[index:])\n\n if index > 1:\n self.retrieve_value(\n index + 1, upper_error, acceleration_error=acceleration_error)\n self.retrieve_value(\n index + 1, lower_error, acceleration_error=-acceleration_error)\n else:\n self.retrieve_value(index + 1, upper_error)\n self.retrieve_value(index + 1, lower_error)\n self.retrieve_value(\n index + 2, upper_error, acceleration_error=acceleration_error)\n self.retrieve_value(\n index + 2, lower_error, acceleration_error=-acceleration_error)\n\n self.retrieve_value(self.num_steps - 1, upper_error)\n self.retrieve_value(self.num_steps - 1, lower_error)\n\n return (upper_error[index:], lower_error[index:])"
]
| [
"0.7197733",
"0.6690909",
"0.6569207",
"0.64633626",
"0.6383084",
"0.63033307",
"0.61226016",
"0.6122066",
"0.60519934",
"0.59815365",
"0.5977717",
"0.5972937",
"0.5963946",
"0.59486765",
"0.5942314",
"0.593651",
"0.59318626",
"0.5928876",
"0.5925868",
"0.5876266",
"0.5866912",
"0.58051085",
"0.5794799",
"0.57807916",
"0.5780744",
"0.57782936",
"0.57756937",
"0.57746905",
"0.57743",
"0.5768627"
]
| 0.74685186 | 0 |
Create new instance of Edge(id, start_node, end_node, cost, reverse_cost, reversed) | def __new__(_cls, id, start_node, end_node, cost, reverse_cost, reversed=False):
return tuple.__new__(_cls, (id, start_node, end_node, float(cost), float(reverse_cost), reversed)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_edge(self, start_node, label, end_node, properties=None, **kwargs):\r\n\t\tif properties is None:\r\n\t\t\tproperties = {}\r\n\t\tedge = Edge(self._nextid, start_node, label, end_node, properties, **kwargs)\r\n\t\tself._edges[self._nextid] = edge\r\n\t\tself._nextid += 1\r\n\t\treturn edge",
"def reversed_edge(self):\n reverse = Edge(id=self.id,\n start_node=self.end_node,\n end_node=self.start_node,\n cost=self.reverse_cost,\n reverse_cost=self.cost,\n reversed=not self.reversed)\n return reverse",
"def MakeEdge(self, *args):\n return _ShapeBuild.ShapeBuild_Edge_MakeEdge(self, *args)",
"def create(cls, outV, inV, *args, **kwargs):\r\n return super(Edge, cls).create(outV, inV, *args, **kwargs)",
"def from_anchor(\n cls,\n anchor: Vertex,\n other: Vertex,\n label: str = \"\",\n direction: str = \"out\",\n ) -> \"Edge\":\n has_direction = direction != \"none\"\n\n if has_direction is True:\n if direction == \"out\":\n start = anchor\n end = other\n elif direction == \"in\":\n start = other\n end = anchor\n else:\n raise ValueError(\"Direction is either 'in', 'out' or 'none'\")\n else:\n # No need to sort here, the __init__ do it already.\n start = anchor\n end = other\n\n edge = cls(start, end, label=label, has_direction=has_direction)\n\n edge._anchor = anchor\n edge._other = other\n edge._direction = direction\n\n return edge",
"def __init__(\n self,\n start: Vertex,\n end: Vertex,\n label: str = \"\",\n has_direction: bool = True,\n ):\n if not start.is_inserted or not end.is_inserted:\n raise VertexInsertionException(\n \"Both vertices must be inserted to make an Edge\"\n )\n\n if has_direction is True:\n self._start = start\n self._end = end\n else:\n # If the Edge has no direction, the start and end vertices are\n # sorted by place for consistant use.\n self._start, self._end = sorted(\n (start, end), key=lambda v: v.place\n )\n self._label = label\n self._has_direction = has_direction\n\n self._anchor: Optional[Vertex] = None\n self._other: Optional[Vertex] = None\n self._direction: Optional[str] = None\n\n self.is_inserted = False",
"def __init__(self, name, edge, start_node, end_node, pipe_model,\n allow_flow_reversal,\n temperature_driven, repr_days=None):\n\n self.logger = logging.getLogger('modesto.Edge')\n self.logger.info('Initializing Edge {}'.format(name))\n\n self.repr_days = repr_days\n\n self.name = name\n self.edge = edge\n\n self.start_node = start_node\n self.end_node = end_node\n self.length = self.get_length()\n\n self.temperature_driven = temperature_driven\n\n self.pipe_model = pipe_model\n self.pipe = self.build(pipe_model,\n allow_flow_reversal) # TODO Better structure possible?",
"def edge(cls, edge):\n return cls(Lnk.EDGE, int(edge))",
"def make_edge(self, a, b):\n try: e = self.G.new_edge(a, b)\n except: return self.G.new_edge(a,b)\n\n try: self.G.set_edge_attribute(e, \"arrow\", \"true\")\n except: return self.G.new_edge(a,b)\n\n try: self.G.set_edge_attribute(e, \"spline\", \"false\")\n except: return self.G.new_edge(a,b)\n return e",
"def _create_edge_ist(self) -> EdgeList:\r\n return EdgeList(self)",
"def e(src, dst):\n edge = pydot.Edge(src, dst)\n graph.add_edge(edge)",
"def test_create_edge(self):\n n1, n2 = Node('a'), Node('b')\n n1 | n2\n self.assertEqual(n1.eout, [Edge(n1, n2)])\n self.assertEqual(n1.ein, [])\n self.assertEqual(n2.ein, [Edge(n1, n2)])\n self.assertEqual(n2.eout, [])",
"def add_edge(self, current_id=None, in_vertex_id=None, out_vertex_id=None, label=None, properties=None):\n if current_id is None:\n done = False\n while not done:\n next_id = self.get_next_id()\n\n if next_id not in self.edges:\n current_id = next_id\n done = True\n else:\n if current_id in self.edges:\n raise Exception('Edge with ID Already Exist')\n\n in_vertex = self.vertices.get(in_vertex_id)\n out_vertex = self.vertices.get(out_vertex_id)\n\n if out_vertex is None or in_vertex is None:\n raise Exception('In_vertex or out_vertex not found')\n\n current_edge = Edge(self, current_id,\n label=label,\n in_vertex=in_vertex,\n out_vertex=out_vertex,\n properties=properties)\n\n self.edges[current_id] = current_edge\n in_vertex.add_out_edge(label, current_edge)\n out_vertex.add_in_edge(label, current_edge)\n return current_edge",
"def connect(\n self, *, start: Node, verb: str, end: Node, data: dict = None\n ) -> Edge:",
"def __init__(self, startVertex, endVertex, edgeWeight):\n\n self.startVertex = startVertex\n self.endVertex = endVertex\n self.edgeWeight = edgeWeight",
"def __init__(self, node_a, node_b, id, edge_value=\"null\"):\n self.__node_a = node_a\n self.__node_b = node_b\n self.__edge_value = edge_value\n self.__id = id",
"def reverse(edge):\n return Edge(orig=edge.dest, dest=edge.orig, orig_id=edge.dest_id, dest_id=edge.orig_id)",
"def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)",
"def add_edge(self, src, dest, cost=0):\n if src not in self.vertList:\n self.numVertices += 1\n self.vertList[src] = Vertex(src)\n if dest not in self.vertList:\n self.numVertices += 1\n self.vertList[dest] = Vertex(dest)\n self.vertList[src].add_neighbor(self.vertList[dest], cost)",
"def create_edge(self, topogramId, source, target, name=None, data={}):\n assert type(data) is dict\n assert type(source) is str\n assert type(source) is str\n if name : assert type(name) is str\n print name\n el = {\n \"id\" : name,\n \"source\" : source,\n \"target\" : target\n }\n for k in data :\n el[k] = data[k]\n edge = { \"element\" : el, \"data\" : data }\n return self.make_request(\"POST\", \"edges\", { \"topogramId\" : topogramId, \"edges\" : [ edge ] })",
"def add_edge(self, frm, to, cost = {}):\n \n if frm not in self.vert_dict:\n raise ValueError('Node not found. Please add it using add_vertex method')\n if to not in self.vert_dict:\n raise ValueError('Node not found. Please add it using add_vertex method')\n \n src_node = self.vert_dict[frm]\n dst_node = self.vert_dict[to]\n \n if src_node.channel != dst_node.channel:\n raise ValueError('Edge should be from nodes using the same channel')\n \n src_node.add_neighbor(to, cost)\n dst_node.add_neighbor(frm, cost)",
"def add_edge(self, node_from, node_to, weight=0):\n self.edges.append(Edge(node_from, node_to, weight, self.directed))\n self.nodes.update([node_from, node_to])\n return self",
"def create_edge(src_node: Node, dst_node: Node, out_port: int = 0, in_port: int = 0, edge_attrs: dict = None):\n # edges must belong to the same graph\n assert src_node.graph is dst_node.graph\n graph = src_node.graph\n\n if edge_attrs is None:\n edge_attrs = dict()\n else:\n edge_attrs = edge_attrs.copy()\n edge_attrs.update({'in': in_port, 'out': out_port, 'in_attrs': ['in'], 'out_attrs': ['out'],\n 'data_attrs': ['fw_tensor_debug_info']})\n\n graph.add_edges_from([(src_node.id, dst_node.id, edge_attrs)])",
"def new_edge(self, parent, child):\n self.add_edge( Edge(parent,child) )",
"def add_edge(self,\r\n source: Node,\r\n target: Node,\r\n weight: float = 1,\r\n save_to_cache: bool = True) -> None:\r\n if not isinstance(source, Node):\r\n raise TypeError(\"Invalid source: expected Node instance, got {}.\".format(source))\r\n if not isinstance(target, Node):\r\n raise TypeError(\"Invalid target: expected Node instance, got {}.\".format(target))\r\n\r\n if source.index == target.index or\\\r\n self.get_edge_by_index(source.index, target.index) is not None:\r\n return\r\n\r\n self._edges[(source.index, target.index)] = Edge(source, target, weight)\r\n\r\n if save_to_cache:\r\n should_commit: bool = False\r\n database: GraphDatabaseInterface = self._graph.database\r\n db_edge: DBEdge = database.Edge.find_by_name(source.name, target.name)\r\n if db_edge is None:\r\n database.session.add(database.Edge(source.name, target.name, weight))\r\n should_commit = True\r\n elif db_edge.weight != weight:\r\n db_edge.weight = weight\r\n should_commit = True\r\n\r\n if should_commit:\r\n database.session.commit()",
"def define_edge(self):\n\n self.canvas_edge = Line(\n points=[\n self.canvas_nodes[0].pos[0] + self.nodesize[0] / 2,\n self.canvas_nodes[0].pos[1] + self.nodesize[1] / 2,\n self.canvas_nodes[1].pos[0] + self.nodesize[0] / 2,\n self.canvas_nodes[1].pos[1] + self.nodesize[1] / 2\n ],\n joint='round',\n cap='round',\n width=3\n )",
"def addEdge(self, startNode, endNode):\n if self.directedOrUndirected == 'undirected':\n # no need to check if edge already exists because we're using\n # defaultdict\n\n self.graph[startNode].append(endNode)\n self.graph[endNode].append(startNode)\n else:\n self.graph[startNode].append(endNode)",
"def add_edge(self, src, dst, cost):\n self.E = self.E + 1\n self.adjacency_list[src].append((dst, cost))",
"def create_edge(from_node, to_node, label, edge_schema, add_ts=False):\n edge_record = {}\n edge_record[\"label\"] = label\n edge_record[\"fromNode\"] = from_node\n edge_record[\"toNode\"] = to_node\n\n # Add a timestamp to the properties if the caller requested it.\n if add_ts:\n edge_record[\"properties\"] = {}\n edge_record[\"properties\"][\"timestamp\"] = \\\n Utils._get_time_milliseconds()\n\n return edge_record",
"def get_edge(self, from_, to):\n pass"
]
| [
"0.6723898",
"0.6434462",
"0.63660467",
"0.6310684",
"0.62606514",
"0.62528133",
"0.6212426",
"0.61914283",
"0.61631",
"0.61101496",
"0.60671425",
"0.59764946",
"0.5926465",
"0.59022355",
"0.58873934",
"0.5849658",
"0.58319587",
"0.5810866",
"0.57978505",
"0.5756255",
"0.5747775",
"0.574419",
"0.57279664",
"0.5726004",
"0.5725676",
"0.56927556",
"0.5691043",
"0.56833625",
"0.56802297",
"0.56782496"
]
| 0.6954029 | 0 |
Create a new edge which is reverse to self. | def reversed_edge(self):
reverse = Edge(id=self.id,
start_node=self.end_node,
end_node=self.start_node,
cost=self.reverse_cost,
reverse_cost=self.cost,
reversed=not self.reversed)
return reverse | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reverse(edge):\n return Edge(orig=edge.dest, dest=edge.orig, orig_id=edge.dest_id, dest_id=edge.orig_id)",
"def reverse(self):\n H = DiGraph(multiedges=self.allows_multiple_edges(), loops=self.allows_loops())\n H.add_vertices(self)\n H.add_edges( [ (v,u,d) for (u,v,d) in self.edge_iterator() ] )\n name = self.name()\n if name is None:\n name = ''\n H.name(\"Reverse of (%s)\"%name)\n return H",
"def reverse(self):\n cls = self.__class__\n # , kind = None, transmission_reflection = None,\n # reflection_against = None,\n # are_normals_on_inc_rays_side = None, are_normals_on_out_rays_side = None\n if self.kind is None:\n rev_kind = None\n else:\n if self.transmission_reflection is None:\n raise ValueError(\"reverse path is ambiguous\")\n elif self.transmission_reflection is TransmissionReflection.transmission:\n rev_kind = self.kind.reverse()\n elif self.transmission_reflection is TransmissionReflection.reflection:\n rev_kind = self.kind\n else:\n raise RuntimeError\n\n return cls(\n self.points,\n self.orientations,\n kind=rev_kind,\n transmission_reflection=self.transmission_reflection,\n reflection_against=self.reflection_against,\n are_normals_on_inc_rays_side=self.are_normals_on_out_rays_side,\n are_normals_on_out_rays_side=self.are_normals_on_inc_rays_side,\n )",
"def to_reverse_rule(self) -> \"ReverseRule\":\n assert (\n self.is_equivalence()\n ), \"reverse rule can only be created for equivalence rules\"\n return ReverseRule(self)",
"def reverse_edge(e: tuple) -> tuple:\n (u, v, data) = e\n return (v, u, data)",
"def reversed(self):\n ops = {Eq: Eq, Gt: Lt, Ge: Le, Lt: Gt, Le: Ge, Ne: Ne}\n a, b = self.args\n return Relational.__new__(ops.get(self.func, self.func), b, a)",
"def build_reverse_graph(self):\n adj = self.adj\n self.adjR = [[] for _ in range(n+1)]\n adjR = self.adjR\n for u, edges in enumerate(adj):\n for v, w in edges:\n adjR[v].append((u, w))",
"def __reversed__(self):\n return reverse(self)",
"def reversed(self):\n return LINE(*self.elems,**{'reverse':(not self.reverse)})",
"def MakeEdge(self, *args):\n return _ShapeBuild.ShapeBuild_Edge_MakeEdge(self, *args)",
"def reverse_edges(self, edges, inplace=True, multiedges=None):\n tempG = self if inplace else copy(self)\n for e in edges:\n tempG.reverse_edge(e,inplace=True,multiedges=multiedges)\n if not inplace:\n return tempG",
"def make_edge(self, a, b):\n try: e = self.G.new_edge(a, b)\n except: return self.G.new_edge(a,b)\n\n try: self.G.set_edge_attribute(e, \"arrow\", \"true\")\n except: return self.G.new_edge(a,b)\n\n try: self.G.set_edge_attribute(e, \"spline\", \"false\")\n except: return self.G.new_edge(a,b)\n return e",
"def add_reverse_edges(\n graph):\n senders = np.concatenate(\n (graph.senders, graph.receivers))\n receivers = np.concatenate(\n (graph.receivers, graph.senders))\n\n graph.senders = senders\n graph.receivers = receivers\n return graph",
"def edge(cls, edge):\n return cls(Lnk.EDGE, int(edge))",
"def reverse_graph(self) -> GraphInterface:\n ans = DiGraph()\n\n nodes = self._graph.get_all_v() # {key: NodeData}\n for key in nodes:\n ans.add_node(key)\n ans.get_node(key).tag = self._graph.get_node(key).tag\n\n for key in nodes:\n out_edges = self._graph.all_out_edges_of_node(key)\n for edge in out_edges:\n e = out_edges.get(edge)\n ans.add_edge(e.dest, e.src, e.weight)\n\n return ans",
"def reverse_edge(self, u, v=None, label=None, inplace=True, multiedges=None):\n # Assigns the expected values to u,v, and label depending on the input.\n if label is None:\n if v is None:\n try:\n u, v, label = u\n except Exception:\n try:\n u, v = u\n except Exception:\n pass\n else:\n if v is None:\n try:\n u, v = u\n except Exception:\n pass\n\n if not self.has_edge(u,v,label):\n raise ValueError(\"Input edge must exist in the digraph.\")\n\n tempG = self if inplace else copy(self)\n\n if label is None:\n if not tempG.allows_multiple_edges():\n label = tempG.edge_label(u,v)\n else:\n # If digraph has parallel edges for input edge, pick the first\n # from the labels on the list\n label = tempG.edge_label(u,v)[0]\n\n if ((not tempG.allows_multiple_edges()) and (tempG.has_edge(v,u))):\n # If user wants to force digraph to allow parallel edges\n if multiedges == True:\n tempG.allow_multiple_edges(True)\n tempG.delete_edge(u,v,label)\n tempG.add_edge(v,u,label)\n\n # If user does not want to force digraph to allow parallel\n # edges, we delete edge u to v and overwrite v,u with the\n # label of u,v\n elif multiedges == False:\n tempG.delete_edge(u,v,label)\n tempG.set_edge_label(v,u,label)\n\n # User is supposed to specify multiedges True or None\n else:\n raise ValueError(\"Reversing the given edge is about to \"\n \"create two parallel edges but input digraph \"\n \"doesn't allow them - User needs to specify \"\n \"multiedges is True or False.\")\n else:\n tempG.delete_edge(u,v,label)\n tempG.add_edge(v,u,label)\n\n if not inplace:\n return tempG",
"def flowchart_create_birectional_edges(self):\n for edge in self.DiGraph.edges():\n if edge[0] != edge[1]:\n self.DiGraph.add_edge(edge[1], edge[0])",
"def create(cls, outV, inV, *args, **kwargs):\r\n return super(Edge, cls).create(outV, inV, *args, **kwargs)",
"def _create_edge_ist(self) -> EdgeList:\r\n return EdgeList(self)",
"def __neg__(self):\n try:\n return self._reverse\n except AttributeError:\n self._reverse = self.__class__(self.db, self.id,\n reversePath=self)\n return self._reverse",
"def __invert__(self):\n return self.reverse()",
"def reverse(self): # real signature unknown; restored from __doc__\n pass",
"def reverse(self):\n x = self._x * -1\n y = self._y * -1\n return Point(x,y)",
"def reverse_edge(\n G: DiGraphGPKG,\n edge: EdgeData,\n invert: Optional[Iterable[str]] = None,\n flip: Optional[Iterable[str]] = None,\n) -> None:\n rev_coords = list(\n reversed(edge[G.network.edges.geom_column][\"coordinates\"])\n )\n edge[G.network.edges.geom_column][\"coordinates\"] = rev_coords\n if invert is not None:\n for key in invert:\n if key in edge:\n edge[key] = edge[key] * -1\n if flip is not None:\n for key in flip:\n if key in edge:\n edge[key] = type(edge[key])(not edge[key])",
"def invert(self):\n self.vertices.reverse()",
"def reverse(self):\n node = self.head\n while node is not None:\n next_node = node.next_node \n node.next_node, node.prev_node = node.prev_node, node.next_node \n node = next_node\n self.head, self.tail = self.tail, self.head",
"def invert(self) -> Frame:\n return Inverse(self)",
"def __reversed__(self):\n # type: () -> _WeakList\n reversed_self = type(self)(self)\n reversed_self.reverse()\n return reversed_self",
"def reverse_graph(self):\n rgraph = DGraph()\n rgraph.graph = deepcopy(self.graph)\n\n for node in rgraph.graph:\n node.data.children, node.data.parents = node.data.parents, node.data.children\n\n return rgraph",
"def get_edge(self, from_, to):\n pass"
]
| [
"0.82042795",
"0.7715635",
"0.6969293",
"0.6935056",
"0.6888703",
"0.6801891",
"0.6788504",
"0.67265826",
"0.6673056",
"0.6654561",
"0.6641547",
"0.6504055",
"0.6466905",
"0.64531726",
"0.6414732",
"0.6414527",
"0.6390156",
"0.63744956",
"0.6365106",
"0.63299966",
"0.6312733",
"0.63053674",
"0.62836075",
"0.62586707",
"0.6257976",
"0.6193736",
"0.6172907",
"0.61691767",
"0.61395526",
"0.61288756"
]
| 0.85291636 | 0 |
Test if it is the same edge with the other. While comparing costs, if their difference is under the precision, then they are considered as the same edge. | def same_edge(self, other, precision=0):
return self.id == other.id \
and self.start_node == other.start_node \
and self.end_node == other.end_node \
and abs(self.cost - other.cost) <= precision \
and abs(self.reverse_cost - other.reverse_cost) <= precision \
and self.reversed == other.reversed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other):\r\n return abs(self.x - other.x) + abs(self.y - other.y) < Vertex.epsilon",
"def __eq__(self, other):\n return abs(self - other) < 10e-10",
"def same(self, other, epsilon_=None):\n if epsilon_ is None:\n return self-other < epsilon\n else:\n return self-other < epsilon_",
"def __eq__(self, other):\n if isinstance(other, Edge):\n return self.label == other.label\n return NotImplemented",
"def compare(self, t2) -> bool:\n return True if self.get_edge(t2) >= 0 else False",
"def __gt__(self, other):\n return self.estimated_cost > other.estimated_cost",
"def __eq__(self, other):\n if isinstance(other, type(self)):\n same_edges = self._edges == other._edges\n same_weights = self._weights == other._weights\n return same_edges and same_weights\n else:\n return False",
"def compute_penalty(edge_1, edge_2):\n\n if edge_1 == edge_2:\n return 0\n elif {edge_1, edge_2} == {EdgeType.NONE, EdgeType.FORWARD}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.NONE, EdgeType.BACKWARD}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.NONE, EdgeType.UNDIRECTED}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.FORWARD, EdgeType.BACKWARD}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.FORWARD, EdgeType.UNDIRECTED}:\n return 1\n elif {edge_1, edge_2} == {EdgeType.BACKWARD, EdgeType.UNDIRECTED}:\n return 1\n else:\n raise ImpossibleEdgeConfiguration",
"def __eq__(self, other):\n if isinstance(other, DirectedWeightedGraphEdge):\n if self.head_vertex != other.head_vertex:\n return False\n elif self.tail_vertex != other.tail_vertex:\n return False\n elif self.weight != other.weight:\n return False\n return True\n return NotImplemented",
"def almost_equals(self, other, decimal=...): # -> bool:\n ...",
"def __eq__(self, other):\n if not type(other) == type(self):\n return False\n sedges, oedges = self.edges, other.edges\n return ((len(sedges) == len(oedges)) and\n all(numpy.all(se == oe) for (se, oe) in zip(sedges, oedges)))",
"def almost_equal(self, other, precision):\n if isinstance(other, self.__class__):\n # Check that all elements in both arrays are almost\n # equal within given precision\n almost_equal = all(map(lambda x, y:\n self.almost_equal_values(x, y, precision),\n self.items, other.items))\n return (self.index == other.index) and \\\n (self.inUse == other.inUse) and \\\n (self.type == other.type) and \\\n (self.previousBlock == other.previousBlock) and \\\n (self.amount == other.amount) and \\\n (self.nextBlock == other.nextBlock) and almost_equal\n else:\n return False",
"def almost_equals(self, other):\n import math\n ox, oy = other\n dx = self[0] - ox\n dy = self[1] - oy\n return (dx*dx + dy*dy) < pygonal.EPSILON2",
"def permissible(e1, e2):\n return e1[1] == e2[0] and \\\n total_edge_length(e1, e2) < maximum_distance and \\\n total_edge_angle(e1, e2) < maximum_angle_delta",
"def __eq__(self, other):\n if isinstance(other, self.__class__):\n return math.isclose(self.x, other.x, rel_tol=1e-12, abs_tol=1e-12) and\\\n math.isclose(self.y, other.y, rel_tol=1e-12, abs_tol=1e-12)\n else:\n return False",
"def __eq__(self, other):\n if self.add_mode != other.add_mode:\n return False\n if not np.isclose(self.add_tol, other.add_tol):\n return False\n\n if not np.isclose(self.top, other.top):\n return False\n if not np.isclose(self.base, other.base):\n return False\n\n if (self.height != other.height):\n return False\n\n if not np.allclose(self.depths, other.depths):\n return False\n if not np.allclose(self.img, other.img):\n return False\n\n return True",
"def eq(df1, df2, precision=0.5) -> bool:\n return ((df1 - df2).abs() < precision).all()",
"def _by_weight_then_from_protocol_specificity(edge_1, edge_2):\n\n # edge_1 and edge_2 are edges, of the form (mro_distance, offer)\n\n mro_distance_1, offer_1 = edge_1\n mro_distance_2, offer_2 = edge_2\n\n # First, compare the MRO distance.\n if mro_distance_1 < mro_distance_2:\n return -1\n elif mro_distance_1 > mro_distance_2:\n return 1\n\n # The distance is equal, prefer more specific 'from_protocol's\n if offer_1.from_protocol is offer_2.from_protocol:\n return 0\n\n if issubclass(offer_1.from_protocol, offer_2.from_protocol):\n return -1\n elif issubclass(offer_2.from_protocol, offer_1.from_protocol):\n return 1\n\n return 0",
"def __eq__(self, other):\n if isinstance(other, UnDirectedWeightedGraphEdge):\n if self.head_vertex != other.head_vertex:\n return False\n elif self.tail_vertex != other.tail_vertex:\n return False\n elif self.weight != other.weight:\n return False\n return True\n return NotImplemented",
"def __eq__(self, other):\n return (self.vertices == other.vertices and self.weight == other.weight)",
"def nearly_equal(a, b, sig_fig=5):\n return a == b or int(a*10**sig_fig) == int(b*10**sig_fig)",
"def almost_equal_values(x, y, precision):\n return round(x - y, precision) == 0",
"def __lt__(self, other):\n return self.estimated_cost < other.estimated_cost",
"def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2",
"def _same(p1,p2,prec=0.0001):\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True",
"def __lt__(self, other):\r\n return self.estimateCost < other.estimateCost",
"def __eq__(self, other):\n firstnum = self.num*other.den\n secondnum = self.den*other.num\n\n return firstnum == secondnum",
"def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0",
"def links_with(self, other, tollerance = 0.05):\n return (\n self.start.distance_to(other.start) < tollerance or\n self.start.distance_to(other.end) < tollerance or\n self.end.distance_to(other.end) < tollerance or\n self.end.distance_to(other.start) < tollerance\n )",
"def torch_the_same(X, Y, eps=1e-8):\n return (X - Y).abs().min() < eps"
]
| [
"0.6727692",
"0.6505541",
"0.6498408",
"0.64822954",
"0.6417987",
"0.6393962",
"0.6364372",
"0.62917227",
"0.6288815",
"0.626958",
"0.6238322",
"0.6234916",
"0.62137187",
"0.61908233",
"0.6114228",
"0.61046827",
"0.6092138",
"0.6084756",
"0.6081539",
"0.60792655",
"0.60791814",
"0.6077313",
"0.60737306",
"0.6067947",
"0.60424584",
"0.6039395",
"0.60352767",
"0.6027227",
"0.59986985",
"0.5997929"
]
| 0.8797289 | 0 |
Sets the id of this VirtualService. | def id(self, id):
if id is not None and not re.search(r'^\\d{1,}-virtualservice-[a-z0-9_\\-]{36}$', id): # noqa: E501
raise ValueError(r"Invalid value for `id`, must be a follow pattern or equal to `/^\\d{1,}-virtualservice-[a-z0-9_\\-]{36}$/`") # noqa: E501
self._id = id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_id(self, id):\n self.__id = id",
"def SetId(self, id):\n self.id = int(id)",
"def set_id(self, id_):\n\n self.id_ = id_",
"def set_id(self, id):\n self.data['id'] = id",
"def setID(self, id):\n self._id = id\n return self.callRemote('setID', id)",
"def setID(self, id):\r\n raise NotImplementedError(\"must be implemented in subclass\")",
"def set_id(self, id):\n self.id = id\n print(\"self id = \" + str(self.id))",
"def id(self, id: int):\n\n self._id = id",
"def id(self, id: int):\n\n self._id = id",
"def id(self, id: int):\n\n self._id = id",
"def id(self, id: int):\n\n self._id = id",
"def id(self, id: int):\n\n self._id = id",
"def id(self, id: int):\n\n self._id = id",
"def id(self, id: int):\n\n self._id = id",
"def id(self, id: int):\n\n self._id = id",
"def id(self, id: int):\n\n self._id = id",
"def id(self, id: int):\n\n self._id = id",
"def set_id(self, id):\n\n\t\tif id is not None and not isinstance(id, int):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: id EXPECTED TYPE: int', None, None)\n\t\t\n\t\tself.__id = id\n\t\tself.__key_modified['id'] = 1",
"def id(self, id):\n \n self._id = id",
"def id(self, id):\n \n self._id = id",
"def id(self, id):\n \n self._id = id",
"def id(self, id):\n \n self._id = id",
"def id(self, id):\n \n self._id = id",
"def id(self, id):\n\n self._id = id",
"def id(self, id):\n\n self._id = id",
"def id(self, id):\n\n self._id = id",
"def id(self, id):\n\n self._id = id",
"def id(self, id):\n\n self._id = id",
"def id(self, id):\n\n self._id = id",
"def id(self, id):\n\n self._id = id"
]
| [
"0.7246792",
"0.71348435",
"0.7120083",
"0.69750094",
"0.6951778",
"0.68095964",
"0.6804247",
"0.6741579",
"0.6741579",
"0.6741579",
"0.6741579",
"0.6741579",
"0.6741579",
"0.6741579",
"0.6741579",
"0.6741579",
"0.6741579",
"0.67225677",
"0.6702289",
"0.6702289",
"0.6702289",
"0.6702289",
"0.6702289",
"0.667077",
"0.667077",
"0.667077",
"0.667077",
"0.667077",
"0.667077",
"0.667077"
]
| 0.80658674 | 0 |
Sets the site_id of this VirtualService. | def site_id(self, site_id):
self._site_id = site_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_site_id(self):\n self.site_id = entities.sites['next id']\n entities.sites['object list'].append(self)\n entities.sites['next id'] += 1",
"def web_site_id(self, web_site_id):\n\n self._web_site_id = web_site_id",
"def setSiteids(self):\n self.siteids = []\n for site in self.sites:\n self.siteids.append(site.siteid)",
"def save(self, update_site=False, *args, **kwargs):\n if update_site or not self.id:\n self.site_id = current_site_id()\n super(SiteRelated, self).save(*args, **kwargs)",
"def set_SiteID(self, value):\n super(GetCategoriesInputSet, self)._set_input('SiteID', value)",
"def setId(self, *args):\n return _libsbml.OutwardBindingSite_setId(self, *args)",
"def sites(self, site_id, data, tenant_id=None, api_version=\"v4.7\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sites/{}\".format(api_version,\n tenant_id,\n site_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)",
"def site_name(self, site_name):\n\n self._site_name = site_name",
"def id(self, id):\n if id is not None and not re.search(r'^\\\\d{1,}-virtualservice-[a-z0-9_\\\\-]{36}$', id): # noqa: E501\n raise ValueError(r\"Invalid value for `id`, must be a follow pattern or equal to `/^\\\\d{1,}-virtualservice-[a-z0-9_\\\\-]{36}$/`\") # noqa: E501\n\n self._id = id",
"def set_CategorySiteID(self, value):\n super(GetCategoriesInputSet, self)._set_input('CategorySiteID', value)",
"def system_id(self, system_id):\n\n self._system_id = system_id",
"def system_id(self, system_id):\n\n self._system_id = system_id",
"def sso_id(self, sso_id):\n\n self._sso_id = sso_id",
"def set_model_id(self, model_id):\n assert isinstance(model_id, int)\n for atm in self.iter_alt_loc():\n atm.model_id = model_id",
"def add_site_to_context(self):\n g.current_site = self",
"def stp_id(self, stp_id):\n\n self._stp_id = stp_id",
"def set_model_id(self, model_id):\n assert isinstance(model_id, int)\n self.model_id = model_id\n\n for frag in self.iter_fragments():\n frag.set_model_id(model_id)",
"def website(self, website):\n\n self._website = website",
"def website(self, website):\n\n self._website = website",
"def add_site(self, site):\n assert isinstance(site, Site)\n self.site_list.append(site)\n site.model = self",
"def set_id(self, ssc_id):\r\n self.ssc_id = ssc_id",
"def save_site(self, site):\n raise NotImplementedError('save_site')",
"def service_user_id(self, service_user_id):\n\n self._service_user_id = service_user_id",
"def tenant_id(self, tenant_id):\n\n self._tenant_id = tenant_id",
"def tenant_id(self, tenant_id):\n\n self._tenant_id = tenant_id",
"def station_id(self, station_id: str):\n\n self._station_id = station_id",
"def set_id(self, id_):\n\n self.id_ = id_",
"def survey_id(self, survey_id):\n\n self.logger.debug(\"In 'survey_id' setter.\")\n\n self._survey_id = survey_id",
"def vpc_id(self, vpc_id):\n self._vpc_id = vpc_id",
"def site_name(self, site_name):\n if site_name is not None and len(site_name) > 100:\n raise ValueError(\"Invalid value for `site_name`, length must be less than or equal to `100`\") # noqa: E501\n\n self._site_name = site_name"
]
| [
"0.68784183",
"0.6602689",
"0.6414411",
"0.6092146",
"0.6034268",
"0.5990248",
"0.591408",
"0.57976866",
"0.57383054",
"0.5552645",
"0.53509086",
"0.53509086",
"0.53118485",
"0.5262215",
"0.5254939",
"0.5238961",
"0.5229057",
"0.5209056",
"0.5209056",
"0.5199387",
"0.51953393",
"0.51862866",
"0.5185549",
"0.51020736",
"0.51020736",
"0.5060815",
"0.5059638",
"0.5059089",
"0.5058307",
"0.5040132"
]
| 0.80494 | 1 |
Sets the segment_id of this VirtualService. | def segment_id(self, segment_id):
self._segment_id = segment_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vat_id(self, vat_id: str):\n\n self._vat_id = vat_id",
"def vat_id(self, vat_id):\n\n self._vat_id = vat_id",
"def vpd_id(self, vpd_id):\n\n self._vpd_id = vpd_id",
"def update(self,\n segment_id,\n segment,\n ):\n return self._invoke('update',\n {\n 'segment_id': segment_id,\n 'segment': segment,\n })",
"def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])",
"def setDiskId(self, diskId):\n self.__diskId = diskId",
"def patch(self,\n segment_id,\n segment,\n ):\n return self._invoke('patch',\n {\n 'segment_id': segment_id,\n 'segment': segment,\n })",
"def set_segments_center_of_mass_size(self, segments_center_of_mass_size):\n self.segments_center_of_mass_size = segments_center_of_mass_size\n self.update_segments_center_of_mass(self.segments_center_of_mass)",
"def set_virtual_network_id(self, sNewVirtualNetworkId):\n\t\tcall_sdk_function('PrlVmDevNet_SetVirtualNetworkId', self.handle, sNewVirtualNetworkId)",
"def set_id(self, ssc_id):\r\n self.ssc_id = ssc_id",
"def set_student_id(self, student_id):\n self._student_id = student_id",
"def vds_num(self, vds_num):\n\n self._vds_num = vds_num",
"def setSegments(self, segments):\n for point, segment in zip(self.points, segments):\n point.set(segment.p1)",
"def id(self, id):\n if id is not None and not re.search(r'^\\\\d{1,}-virtualservice-[a-z0-9_\\\\-]{36}$', id): # noqa: E501\n raise ValueError(r\"Invalid value for `id`, must be a follow pattern or equal to `/^\\\\d{1,}-virtualservice-[a-z0-9_\\\\-]{36}$/`\") # noqa: E501\n\n self._id = id",
"def sentence_id(self, sentence_id):\n\n self._sentence_id = sentence_id",
"def __init__(__self__, *,\n segment_name: Optional[pulumi.Input[str]] = None):\n if segment_name is not None:\n pulumi.set(__self__, \"segment_name\", segment_name)",
"def segment_counter(self, _):\n raise NotImplementedError(\n \"We do not support externally altering the segment counter\")",
"def set_segm_name(*args):\n return _ida_segment.set_segm_name(*args)",
"def set_cp_service(self, cp_id):\n params = [('cpservice_id', int(cp_id))]\n\n self.get(COMMAND_CPM, 'SetCpService', params)",
"def vcn_id(self, vcn_id):\n self._vcn_id = vcn_id",
"def set_segm_addressing(*args):\n return _ida_segment.set_segm_addressing(*args)",
"def setSegmentColor(self, color):\n for segment in self.segments:\n segment.color = color",
"def segment_number(self):\n if hasattr(self, '_m_segment_number'):\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None\n\n self._m_segment_number = self.segment_number_raw.value\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None",
"def set_student(self, student_id):\n self._student = student_id",
"def vpc_id(self, vpc_id):\n self._vpc_id = vpc_id",
"def add_segment(self, segment):\n assert segment is None or isinstance(segment, Segment)\n\n self.segment = segment\n if segment is None:\n return\n\n ## reset Strand description with the description derived\n ## from the new Segment\n try:\n frag1 = segment[0]\n frag2 = segment[-1]\n except IndexError:\n return\n\n self.chain_id1 = frag1.chain_id\n self.fragment_id1 = frag1.fragment_id\n self.res_name1 = frag1.res_name\n\n self.chain_id2 = frag2.chain_id\n self.fragment_id2 = frag2.fragment_id\n self.res_name2 = frag2.res_name",
"def set_segm_start(*args):\n return _ida_segment.set_segm_start(*args)",
"def add_segment(self, segment):\n self.segments.append(segment)",
"def stp_id(self, stp_id):\n\n self._stp_id = stp_id",
"def delete_0(self,\n segment_id,\n ):\n return self._invoke('delete_0',\n {\n 'segment_id': segment_id,\n })"
]
| [
"0.5838757",
"0.5825565",
"0.57639927",
"0.5708378",
"0.5702347",
"0.5625416",
"0.5533488",
"0.55170035",
"0.5496307",
"0.5465427",
"0.5459022",
"0.545264",
"0.5423617",
"0.5415335",
"0.53778774",
"0.5375813",
"0.5352305",
"0.532604",
"0.53248763",
"0.53109074",
"0.5306134",
"0.5299444",
"0.52918965",
"0.5263139",
"0.52602416",
"0.5244297",
"0.52169627",
"0.5174873",
"0.51688397",
"0.5145874"
]
| 0.8330469 | 0 |
Sets the vip of this VirtualService. | def vip(self, vip):
self._vip = vip | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def virtual_router_ip(self, virtual_router_ip):\n self._virtual_router_ip = virtual_router_ip",
"def add_virtualip(self, vip):\n return self.manager.add_virtualip(self, vip)",
"def ip(self, ip):\n\n self._ip = ip",
"def ip(self, ip):\n\n self._ip = ip",
"def SetVMIPaddressIntoTeamCityParameter(self, paramName):\n ip = self.GetVMIPaddress()\n\n key = 'vm_ip' if not paramName else paramName\n\n if ip:\n TeamCityParamSetter(keyName=str(key), value=ip)\n\n else:\n LOGGER.warning('Parameter {} does not set to {}!'.format(key, ip))",
"def ip_version(self, ip_version):\n\n self._ip_version = ip_version",
"def ip(self, ip: str):\n\n self._ip = ip",
"def ip(self, ip):\n self._ip = ip\n return self",
"def add_virtualip(self, loadbalancer, vip):\n return loadbalancer.add_virtualip(vip)",
"def set_ip(self, party_ip) -> None:\n\n self._ip = party_ip",
"def set_vm_ip(name=None, ipv4_cidr=None, ipv4_gw=None, session=None, call=None):\n mode = \"static\"\n # TODO: Need to add support for IPv6\n if call == \"function\":\n raise SaltCloudException(\"The function must be called with -a or --action.\")\n\n log.debug(\n \"Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s\",\n name,\n ipv4_cidr,\n ipv4_gw,\n mode,\n )\n if session is None:\n log.debug(\"New session being created\")\n session = _get_session()\n vm = _get_vm(name, session)\n # -- try to get ip from vif\n # TODO: for now will take first interface\n # addition consideration needed for\n # multiple interface(vif) VMs\n vifs = session.xenapi.VM.get_VIFs(vm)\n if vifs is not None:\n log.debug(\"There are %s vifs.\", len(vifs))\n for vif in vifs:\n record = session.xenapi.VIF.get_record(vif)\n log.debug(record)\n try:\n session.xenapi.VIF.configure_ipv4(vif, mode, ipv4_cidr, ipv4_gw)\n except XenAPI.Failure:\n log.info(\"Static IP assignment could not be performed.\")\n\n return True",
"def update_vip(self, vip, body=None):\r\n return self.put(self.vip_path % (vip), body=body)",
"def service_vm_ovf_url(self, service_vm_ovf_url):\n\n self._service_vm_ovf_url = service_vm_ovf_url",
"def setIpaddr(self):\n\t\tself.ipaddr = self.settings.getKeyValue('ipaddr')\n\t\tself.socket.send('setenv ipaddr ' + self.ipaddr+'\\r', 1)\t\t\n\t\treturn None",
"def vapic(self, vapic):\n\n self._vapic = vapic",
"def for_virtual_ip_address_enter_vip_and_press_apply(driver, vip):\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Virtual IP Address\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Virtual IP Address\"]').send_keys(vip)\n wait_on_element(driver, 0.5, 5, '//button[@ix-auto=\"button__APPLY\"]')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__APPLY\"]').click()",
"def vm_num(self, vm_num):\n\n self._vm_num = vm_num",
"def add_virtualip(self, lb, vip):\n resp, body = self.api.method_post(\"/loadbalancers/%s/virtualips\" % lb.id,\n body=vip.to_dict())\n return resp, body",
"def _set_virtual_oper_VipV6_address(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name=\"virtual-oper-VipV6-address\", rest_name=\"v6\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'v6'}}, namespace='urn:brocade.com:mgmt:brocade-chassis', defining_module='brocade-chassis', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"virtual_oper_VipV6_address must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=TypedListType(allowed_type=unicode), is_leaf=False, yang_name=\"virtual-oper-VipV6-address\", rest_name=\"v6\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'v6'}}, namespace='urn:brocade.com:mgmt:brocade-chassis', defining_module='brocade-chassis', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__virtual_oper_VipV6_address = t\n if hasattr(self, '_set'):\n self._set()",
"def ip_address(self, ip_address):\n\n self._ip_address = ip_address",
"def ip_address(self, ip_address):\n\n self._ip_address = ip_address",
"def ip_address(self, ip_address):\n\n self._ip_address = ip_address",
"def flag_virtual(self, flag_virtual):\n self._flag_virtual = flag_virtual",
"def setIP(self, idx, ip):\n self.ip[int(idx)-1] = ip",
"def _set_static_ip(name, session, vm_):\n ipv4_cidr = \"\"\n ipv4_gw = \"\"\n if \"ipv4_gw\" in vm_.keys():\n log.debug(\"ipv4_gw is found in keys\")\n ipv4_gw = vm_[\"ipv4_gw\"]\n if \"ipv4_cidr\" in vm_.keys():\n log.debug(\"ipv4_cidr is found in keys\")\n ipv4_cidr = vm_[\"ipv4_cidr\"]\n log.debug(\"attempting to set IP in instance\")\n set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)",
"def remoteip(self, remoteip) :\n\t\ttry :\n\t\t\tself._remoteip = remoteip\n\t\texcept Exception as e:\n\t\t\traise e",
"def set_host_ipaddress(self, sHostIPAddress):\n\t\tcall_sdk_function('PrlVirtNet_SetHostIPAddress', self.handle, sHostIPAddress)",
"def vm(self, vm):\n\n self._vm = vm",
"def ip_in(self, ip_in):\n\n self._ip_in = ip_in",
"def pvd(self, pvd):\n\n self.logger.debug(\"In 'pvd' setter.\")\n\n self._pvd = pvd"
]
| [
"0.72838145",
"0.6761916",
"0.6458198",
"0.6458198",
"0.64527357",
"0.63842404",
"0.63458145",
"0.63456166",
"0.6233729",
"0.6120147",
"0.60259",
"0.6014182",
"0.5945362",
"0.59351707",
"0.5823485",
"0.58097166",
"0.58031166",
"0.57778704",
"0.5750117",
"0.57091725",
"0.57091725",
"0.57091725",
"0.56998914",
"0.56965464",
"0.56888473",
"0.56823015",
"0.5668213",
"0.56552285",
"0.5649946",
"0.5649243"
]
| 0.82228744 | 0 |
Sets the nat of this VirtualService. | def nat(self, nat):
self._nat = nat | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_nat(self, natgw, **attrs):\n return self._update(_gw.Service, natgw, **attrs)",
"def set_natserver_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVirtNet_SetNATServerEnabled', self.handle, bEnabled)",
"def set_virtual_node(self, virtRoot):\n self.virtRoot = virtRoot",
"def nic_num(self, nic_num):\n\n self._nic_num = nic_num",
"def create_nat(self, **attrs):\n return self._create(_gw.Service, tenant_id=self.get_project_id(), **attrs)",
"def set_nodal_stress(self, nodal_stress: np.ndarray):\n self._nodal_stress = nodal_stress",
"def set_network_type(self, nNetworkType):\n\t\tcall_sdk_function('PrlVirtNet_SetNetworkType', self.handle, nNetworkType)",
"def vm_num(self, vm_num):\n\n self._vm_num = vm_num",
"def setDirect(self, direct):\n self._direct = direct",
"def set_vncport(self, nVmRemoteDisplayPort):\n\t\tcall_sdk_function('PrlVmCfg_SetVNCPort', self.handle, nVmRemoteDisplayPort)",
"def set_vlan_tag(self, nVlanTag):\n\t\tcall_sdk_function('PrlVirtNet_SetVlanTag', self.handle, nVlanTag)",
"def _set_nport(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=nport.nport, is_container='container', presence=False, yang_name=\"nport\", rest_name=\"nport\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Adds N port(s) to the PG'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"nport must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=nport.nport, is_container='container', presence=False, yang_name=\"nport\", rest_name=\"nport\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Adds N port(s) to the PG'}}, namespace='urn:brocade.com:mgmt:brocade-ag', defining_module='brocade-ag', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__nport = t\n if hasattr(self, '_set'):\n self._set()",
"def set_direct(self, direct):\n self._direct = direct",
"def set_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVirtNet_SetEnabled', self.handle, bEnabled)",
"def set_n(self, n: int) -> None:\r\n self.n_is_set = True\r\n self.n = n",
"def set_vpn_state(self, status):\n if hasattr(self, status):\n self.change_to(getattr(self, status))",
"def setNsProp(self, node, name, value):\n if node is None: node__o = None\n else: node__o = node._o\n ret = libxml2mod.xmlSetNsProp(node__o, self._o, name, value)\n if ret is None:raise treeError('xmlSetNsProp() failed')\n __tmp = xmlAttr(_obj=ret)\n return __tmp",
"def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value",
"def SetWirelessProperty(self, networkid, prop, value):\n if (prop.strip()).endswith(\"script\"):\n print \"Setting script properties through the daemon is not\" \\\n + \" permitted.\"\n return False\n self.LastScan[networkid][prop] = misc.Noneify(value)",
"def natural_identifier(self, natural_identifier):\n\n self._natural_identifier = natural_identifier",
"def vm(self, vm):\n\n self._vm = vm",
"def flag_virtual(self, flag_virtual):\n self._flag_virtual = flag_virtual",
"def set_vin(self, value):\n return self.sendCMD(\"ATSET VIN={}\".format(value))",
"def vat_number(self, vat_number):\n\n self._vat_number = vat_number",
"def vat_number(self, vat_number):\n\n self._vat_number = vat_number",
"def set_network_id(self, sNetworkId):\n\t\tcall_sdk_function('PrlVirtNet_SetNetworkId', self.handle, sNetworkId)",
"def n(self, n) :\n\t\ttry :\n\t\t\tself._n = n\n\t\texcept Exception as e:\n\t\t\traise e",
"def set_nic_setting(self, nic_id, attribute_name, value):\n return self._nic_cfg.set_nic_setting(nic_id, attribute_name, value)",
"async def async_turn_on(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.force_update()",
"def set_adressing(self, addr, set=True):\n assert addr in [self.ADDRESSING_HORIZ, self.ADDRESSING_VERT], \"Addressing must be ADDRESSING_HORIZ or ADDRESSING_VERT.\"\n self.addressing = addr\n if set:\n self._set_function()"
]
| [
"0.628742",
"0.611881",
"0.57433975",
"0.55193835",
"0.5486674",
"0.54130983",
"0.53863645",
"0.5378961",
"0.53111404",
"0.5289651",
"0.52655697",
"0.5224805",
"0.5176909",
"0.5152209",
"0.50901914",
"0.50557125",
"0.5051199",
"0.50365",
"0.4989037",
"0.49518487",
"0.49272317",
"0.48957577",
"0.488207",
"0.48819366",
"0.48819366",
"0.48531833",
"0.4844953",
"0.48365495",
"0.48234546",
"0.4821528"
]
| 0.74795645 | 0 |
Sets the server_pool of this VirtualService. | def server_pool(self, server_pool):
self._server_pool = server_pool | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def server(self, server):\n\n self._server = server",
"def update_listener_pool(self, service, name, bigips):\n vip = self.service_adapter.get_virtual_name(service)\n if vip:\n vip[\"pool\"] = name\n for bigip in bigips:\n v = bigip.tm.ltm.virtuals.virtual\n if v.exists(name=vip[\"name\"], partition=vip[\"partition\"]):\n obj = v.load(name=vip[\"name\"], partition=vip[\"partition\"])\n obj.modify(**vip)",
"def set_pool_size(self, pool_size):\n self._aspp.set_pool_size(pool_size)",
"def set_pool_size(self, pool_size):\n self._semantic_decoder.set_pool_size(pool_size)\n if self._instance_decoder is not None:\n self._instance_decoder.set_pool_size(pool_size)",
"def setServerEventHandler(self, handler):\n self.serverEventHandler = handler",
"def _set_pool_size(self, pool_size):\n if (\n isinstance(pool_size, bool)\n or not isinstance(pool_size, int)\n or not pool_size > 0\n ):\n raise AttributeError(\n \"Pool max_size value must be an integer greater than 0, the \"\n f\"given value {pool_size} is not valid\"\n )\n\n self.max_size = _CNX_POOL_MAXSIZE if pool_size == 0 else pool_size",
"def setServer(self, server):\n libxml2mod.xmlURISetServer(self._o, server)",
"def server_enabled(self, server_enabled):\n\n self._server_enabled = server_enabled",
"def server_id(self, server_id):\n\n self._server_id = server_id",
"def server_id(self, server_id):\n\n self._server_id = server_id",
"def set_server(server):\n\n instance = Ceic._get_instance()\n \n instance._ceic_configuration.server = server\n\n return instance",
"def SetServerInformation(self, server, port):\n self.hostname = server\n self.port = port",
"def pool_size(self, pool_size: ConfigNodePropertyInteger):\n\n self._pool_size = pool_size",
"def setNameservers(self, nameserver):\n # type: (tp.Any)->None\n\n self.validateOne('nameservers', self._valid['nameservers'], nameserver)\n self._ifAttributes['nameservers'] = nameserver",
"def add_server(self, server):\n\n\t\tserver.capacity = self.capacity # Set capacity of the server to the capacity of the Geocache\n\t\tself.servers.append(server)",
"def max_pool_size(self, max_pool_size: ConfigNodePropertyInteger):\n\n self._max_pool_size = max_pool_size",
"def server_port(self, server_port):\n\n self._server_port = server_port",
"def set_server(client, server_url):\n data = {\"server_url\": server_url}\n return client._creoson_post(\"windchill\", \"set_server\", data)",
"def set_servers(self, server_infos):\n self.remove_servers_channels()\n for server_info in server_infos:\n server_section = server_info['server']\n server_name = server_section.name\n self._init_section_id(server_section)\n self._sections[self._server_hash(server_name)] = server_section\n\n for channel_section in server_info['channels']:\n if channel_section is None:\n continue\n channel_section.name = server_name\n self._init_section_id(channel_section)\n self._sections[self._channel_hash(server_name)] = channel_section",
"def server_profile(self, server_profile):\n\n self._server_profile = server_profile",
"def pool_id(self, pool_id):\n if self.local_vars_configuration.client_side_validation and pool_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `pool_id`, must not be `None`\") # noqa: E501\n\n self._pool_id = pool_id",
"def set_redis_server(server):\n redis_server = server",
"def update_connection_pool(maxsize=1):\n get_pool().connection_pool_kw.update(maxsize=maxsize)",
"def remote_file_server(self, remote_file_server):\n\n self._remote_file_server = remote_file_server",
"def setServerip(self):\n\t\tself.serverip = self.settings.getKeyValue('serverip')\n\t\tself.socket.send('setenv serverip ' + self.serverip+'\\r', 1)\n\t\treturn None",
"def beta_create_SpiderServer_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):\n request_deserializers = {\n ('DistributeSpider.SpiderServer', 'keepalive'): Register.FromString,\n ('DistributeSpider.SpiderServer', 'req'): Request.FromString,\n ('DistributeSpider.SpiderServer', 'wait'): Wait.FromString,\n }\n response_serializers = {\n ('DistributeSpider.SpiderServer', 'keepalive'): UrlsAck.SerializeToString,\n ('DistributeSpider.SpiderServer', 'req'): Response.SerializeToString,\n ('DistributeSpider.SpiderServer', 'wait'): Ack.SerializeToString,\n }\n method_implementations = {\n ('DistributeSpider.SpiderServer', 'keepalive'): face_utilities.unary_unary_inline(servicer.keepalive),\n ('DistributeSpider.SpiderServer', 'req'): face_utilities.unary_unary_inline(servicer.req),\n ('DistributeSpider.SpiderServer', 'wait'): face_utilities.unary_unary_inline(servicer.wait),\n }\n server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)\n return beta_implementations.server(method_implementations, options=server_options)",
"def set_servers(self, servers):\n\n\t\tself.servers = []\n\t\tself.buckets = []\n\n\t\tfor server_desc in servers:\n\t\t\tif type(server_desc) == tuple:\n\t\t\t\tserver_addr, weight = server_desc\n\t\t\telse:\n\t\t\t\tserver_addr, weight = server_desc, 1\n\n\t\t\tserver = _ServerConnection(server_addr, weight, self._debuglog)\n\n\t\t\tself.servers.append(server)\n\n\t\t\tfor _index in range(weight):\n\t\t\t\tself.buckets.append(server)",
"def __init__(self, local_service, remote_address):\n super(RemoteServiceServer, self).__init__(remote_address)\n self.local_service = local_service\n\n self.pending_incoming_requests_threads = WeakSet()",
"def setServerPort(self, serverPort):\n\n _ExceptionUtil.raiseOnError(\n internals.blpapi_SessionOptions_setServerPort(self.__handle,\n serverPort))",
"def registerServer(srv):\n srv.setListenAddress(hostname)\n srv.setMachine(getMBean('/Machines/'+machineName))"
]
| [
"0.5971407",
"0.5879605",
"0.5761809",
"0.54552734",
"0.54371893",
"0.54270303",
"0.54200625",
"0.53819776",
"0.5322363",
"0.5322363",
"0.531683",
"0.5273689",
"0.52584356",
"0.52292633",
"0.5185671",
"0.518415",
"0.5183671",
"0.5172391",
"0.51120234",
"0.50874215",
"0.50653625",
"0.50591403",
"0.49996424",
"0.49862266",
"0.49801475",
"0.49757397",
"0.49638146",
"0.4955521",
"0.49420464",
"0.4917126"
]
| 0.8254317 | 0 |
Sets the modified_at of this VirtualService. | def modified_at(self, modified_at):
self._modified_at = modified_at | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_modified(self, dt):\n self.modified = dt_to_iso(dt)",
"def set_modified(self, dt):\n self.modified = dt_to_iso(dt)",
"def modified_date(self, modified_date):\n\n self._modified_date = modified_date",
"def modified_date(self, modified_date):\n\n self._modified_date = modified_date",
"def date_modified(self, date_modified):\n \n self._date_modified = date_modified",
"def modified(self, modified):\n\n self._modified = modified",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def date_modified(self, date_modified):\n\n self._date_modified = date_modified",
"def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at",
"def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at",
"def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at",
"def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)",
"def last_modified_on(self, last_modified_on):\n\n self._last_modified_on = last_modified_on",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at",
"def modified_object(obj, event):\n now = datetime.now(tz=_zone)\n obj.modification_date = now"
]
| [
"0.6695722",
"0.6695722",
"0.65469223",
"0.65469223",
"0.64611924",
"0.6390162",
"0.6373553",
"0.6373553",
"0.6373553",
"0.6373553",
"0.6373553",
"0.6373553",
"0.6373553",
"0.6273601",
"0.6273601",
"0.6273601",
"0.5977197",
"0.5977197",
"0.5970284",
"0.5956164",
"0.5956164",
"0.5956164",
"0.5956164",
"0.5956164",
"0.5956164",
"0.5956164",
"0.5956164",
"0.5956164",
"0.5956164",
"0.58904314"
]
| 0.78778386 | 0 |
Sets the deployment_status of this VirtualService. | def deployment_status(self, deployment_status):
allowed_values = ["deploy_pending", "deploy_in_progress", "deploy_failed", "deployed", "delete_pending", "delete_in_progress", "delete_failed", "deleted"] # noqa: E501
if deployment_status not in allowed_values:
raise ValueError(
"Invalid value for `deployment_status` ({0}), must be one of {1}" # noqa: E501
.format(deployment_status, allowed_values)
)
self._deployment_status = deployment_status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _set_deployment_status():\n reset_query = dep_table.update().values(deployment_status=None)\n db.session.execute(reset_query)\n\n in_progress_condition = exc_table.c.status.in_(\n execution_states[DeploymentState.IN_PROGRESS])\n req_attention_condition = or_(\n dep_table.c.installation_status == DeploymentState.INACTIVE,\n exc_table.c.status.in_(execution_states[DeploymentState.FAILED]),\n )\n for condition, target_status in [\n (in_progress_condition, DeploymentState.IN_PROGRESS),\n (req_attention_condition, DeploymentState.REQUIRE_ATTENTION),\n (None, DeploymentState.GOOD)\n ]:\n conditions = [\n dep_table.c._latest_execution_fk == exc_table.c._storage_id,\n dep_table.c.deployment_status.is_(None)\n ]\n if condition is not None:\n conditions.append(condition)\n query = (\n dep_table.update()\n .where(exists(\n select([1]).where(and_(*conditions))\n ))\n .values(deployment_status=target_status)\n )\n db.session.execute(query)",
"def service_status(self, service_status):\n\n self._service_status = service_status",
"def set_status(self, status):\n self.status = status",
"def set_status(self, status):\n self.status = status",
"def set_status(self, status):\n self.status = status",
"def setStatus(self, status):\n self.__status = status",
"def status(self, status):\n if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501\n raise ValueError(\"Invalid value for `status`, must not be `None`\") # noqa: E501\n\n self._status = status",
"def operational_status(self, operational_status):\n\n self._operational_status = operational_status",
"def operational_status(self, operational_status):\n\n self._operational_status = operational_status",
"def status(self, status):\n self._set_property_(self.STATUS, str(status))",
"def SetStatus(self, status):\r\n self.status = status",
"def setstatus(self, status):\n with self.lock:\n self.status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status",
"def status(self, status):\n\n self._status = status"
]
| [
"0.67861205",
"0.65088254",
"0.6331958",
"0.6331958",
"0.6331958",
"0.62717587",
"0.6136346",
"0.61323804",
"0.61323804",
"0.6127772",
"0.6103353",
"0.6027898",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066",
"0.5961066"
]
| 0.72562563 | 0 |
Expand the HRP into values for checksum computation. | def bech32_hrp_expand(hrp):
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _expand(self):\n cilm = SHExpandDH(self.data)\n coeffs = SHCoeffs.from_array(cilm, kind='DH')\n return coeffs",
"def bech32_create_checksum(hrp, data):\n values = bech32_hrp_expand(hrp) + data\n polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1\n return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]",
"def repackage(h):\n if type(h) == Variable:\n return Variable(h.data)\n else:\n return tuple(repackage(v) for v in h)",
"def arm_part_2(data, protocol_values):\n # update_protocol_value(data, protocol_values, arm_part_rule)\n\n duplicate_data = []\n\n for each_data in data:\n if each_data not in duplicate_data:\n duplicate_data.append(each_data)\n\n for expression, group_value in arm_part_rule_divided.items():\n update_value = update_protocol_value(each_data, protocol_values, expression, group_value)\n if update_value:\n eval(group_value[3])(update_value, protocol_values)",
"def _pack(self):\n to_pack = {\n \"remote_ip\": self.remote_ip,\n \"remote_port\": self.remote_port,\n \"min_fidelity\": self.min_fidelity,\n \"max_time\": self.max_time,\n \"num_pairs\": self.num_pairs,\n \"priority\": self.priority,\n \"store\": self.store,\n \"atomic\": self.atomic,\n \"measure_directly\": self.measure_directly,\n }\n request_Bitstring = bitstring.pack(self.PACKAGING_FORMAT, **to_pack)\n requestH = request_Bitstring.tobytes()\n\n return requestH",
"def repackage_state(h):\n if not h:\n return None\n elif type(h) == V:\n return V(h.data)\n else:\n return list(repackage_state(v) for v in h)",
"def eval_abs_costh_phi_fold_HX(data):\n return data.costh_HX_fold.abs(), data.phi_HX_fold",
"def _pack(self):\n xtraH = struct.pack(\n self.PACKAGING_FORMAT,\n self.qubit_id,\n self.remote_app_id,\n self.remote_node,\n self.datetime,\n self.remote_port,\n self.outcome,\n 0,\n )\n return xtraH",
"def add_check_sums(hdu_list: fits.HDUList):\n for hdu in hdu_list:\n hdu.verify(\"fix\")\n hdu.add_checksum()\n hdu.header.insert(\"CHECKSUM\", BLANK_CARD)\n hdu.header.insert(\"CHECKSUM\", (\" \", \"DATA INTEGRITY\"))\n hdu.add_checksum()\n\n return None",
"def update_hash(self, h):\n # Generate a sequence of fragments that add up to the canonical\n # version of the expression.\n fragments = []\n self.collect_str_fragments(fragments)\n # Update the hash. Wrapping with 'node<...>' prevents the hash\n # from being extended in a way that would clash with something we can\n # generate. (Probably not an important concern but it doesn't hurt.)\n h.update(\"node<\")\n for f in fragments:\n h.update(f)\n h.update(\">\")",
"def _pack(self):\n xtraH = struct.pack(\n self.PACKAGING_FORMAT,\n self.qubit_id,\n self.remote_app_id,\n self.remote_node,\n self.cmdLength,\n self.remote_port,\n self.step,\n 0,\n )\n return xtraH",
"def __init__(self):\n self.modulus = 769\n self.hs = [[]] * self.modulus",
"def calculate_vals(self):\n for pp in self.powerplants:\n pp[\"vals\"] = self.possible_vals(pp)\n pp[\"index\"] = 0",
"def expand_values(self):\n for k, v in self.job.items():\n foundkey = re.search(self.key_pattern, v)\n # This is done iteratively so that it doesn't matter what order\n # lines appear in a bake parameter file\n while foundkey:\n v = v.replace(\n foundkey.group(0),\n self.job[foundkey.group(0)])\n foundkey = re.search(self.key_pattern, v)\n self.job[k] = v",
"def _unpack(self, headerBytes) -> None:\n unpacked = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n\n self.first_operand = unpacked[0]\n self.operator = unpacked[1]\n self.type_of_second_operand = unpacked[2]\n self.second_operand = unpacked[3]\n self.length = unpacked[4]",
"def pack(self):\n values = (\n self.start,\n pressure_to_pa(self.peep),\n self.freq,\n self.ratio,\n pressure_to_pa(self.pressure + self.peep),\n self.oxygen)\n\n print(values)\n\n s = struct.Struct('H'*len(values))\n packed_data = s.pack(*values)\n return packed_data",
"def _unpack(self, headerBytes):\n xtraH = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n\n self.qubit_id = xtraH[0]\n self.remote_app_id = xtraH[1]\n self.remote_node = xtraH[2]\n self.datetime = xtraH[3]\n self.remote_port = xtraH[4]\n self.outcome = xtraH[5]",
"def _unpack(self, headerBytes):\n xtraH = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n\n self.qubit_id = xtraH[0]\n self.remote_app_id = xtraH[1]\n self.remote_node = xtraH[2]\n self.cmdLength = xtraH[3]\n self.remote_port = xtraH[4]\n self.step = xtraH[5]",
"def adjust_checksum(self) -> None:\n self._csum = 0\n # TODO improve that (it's not really efficient)\n self._csum = socket.htons(ip_checksum(self.encode()[:20]))",
"def _unpack(self, headerBytes):\n if self._cqc_version < 2:\n header = struct.unpack(self.PACKAGING_FORMAT_V1, headerBytes)\n self.remote_app_id = header[0]\n self.remote_node = header[1]\n self.remote_port = header[2]\n else:\n header = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n self.remote_app_id = header[0]\n self.remote_port = header[1]\n self.remote_node = header[2]",
"def pulp_smash():",
"def decode(self, h):\n return self.act_decode(self.linearD(h))",
"def _assemble_values(init_values, values_per_factor_group):\n return itertools.chain(init_values,\n itertools.chain(\n *zip(\n *values_per_factor_group)))",
"def decode(self, h):\n return self.tanh(self.linearD(h))",
"def decode(self, h):\n return self.tanh(self.linearD(h))",
"def decode(self, h):\n return self.tanh(self.linearD(h))",
"def sha_process(self):\n\n # Break chunk into sixteen 32-bit big-endian words m[0..15]\n m = [0] * 64\n # Extend the sixteen 32-bit words into sixty-four 32-bit words\n j = 0\n for i in range(16):\n m[i] = self.data_b[j] << 24 | self.data_b[j+1] << 16\\\n | self.data_b[j+2] << 8 | self.data_b[j+3]\n j += 4\n\n for i in range(16, 64):\n sig0 = self.rotr(m[i-15], 7) ^ self.rotr(m[i-15], 18)\\\n ^ (m[i-15] >> 3)\n sig1 = self.rotr(m[i-2], 17) ^ self.rotr(m[i-2], 19)\\\n ^ (m[i-2] >> 10)\n m[i] = (sig1 + m[i-7] + sig0 + m[i-16]) & 0xFFFFFFFF\n\n # Initialize hash value for this chunk\n a, b, c, d, e, f, g, h = self.h\n\n for i in range(64):\n ep0 = (self.rotr(a, 2) ^ self.rotr(a, 13) ^ self.rotr(a, 22))\\\n & 0xFFFFFFFF\n ep1 = (self.rotr(e, 6) ^ self.rotr(e, 11) ^ self.rotr(e, 25))\\\n & 0xFFFFFFFF\n ch = ((e & f) ^ ((~e) & g))\n maj = ((a & b) ^ (a & c) ^ (b & c))\n t1 = (h + ep1 + ch + self.k[i] + m[i]) & 0xFFFFFFFF\n t2 = (ep0 + maj) & 0xFFFFFFFF\n h = g\n g = f\n f = e\n e = (d + t1) & 0xFFFFFFFF\n d = c\n c = b\n b = a\n a = (t1 + t2) & 0xFFFFFFFF\n\n # Add this chunk's hash to result so far\n self.h[0] = (self.h[0] + a) & 0xFFFFFFFF\n self.h[1] = (self.h[1] + b) & 0xFFFFFFFF\n self.h[2] = (self.h[2] + c) & 0xFFFFFFFF\n self.h[3] = (self.h[3] + d) & 0xFFFFFFFF\n self.h[4] = (self.h[4] + e) & 0xFFFFFFFF\n self.h[5] = (self.h[5] + f) & 0xFFFFFFFF\n self.h[6] = (self.h[6] + g) & 0xFFFFFFFF\n self.h[7] = (self.h[7] + h) & 0xFFFFFFFF",
"def _build_parsed_values(self):\n\n port_agent_packet = self.raw_data\n if not isinstance(port_agent_packet, dict):\n raise SampleException(\"raw data not a dictionary\")\n\n for param in [\"raw\", \"length\", \"type\", \"checksum\"]:\n if param not in port_agent_packet:\n raise SampleException(\"raw data not a complete port agent packet. missing %s\" % param)\n\n payload = None\n length = None\n type = None\n checksum = None\n\n # Attempt to convert values\n try:\n payload = base64.b64encode(port_agent_packet.get(\"raw\"))\n except TypeError:\n pass\n\n try:\n length = int(port_agent_packet.get(\"length\"))\n except TypeError:\n pass\n\n try:\n type = int(port_agent_packet.get(\"type\"))\n except TypeError:\n pass\n\n try:\n checksum = int(port_agent_packet.get(\"checksum\"))\n except TypeError:\n pass\n\n result = [{\n DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,\n DataParticleKey.VALUE: payload,\n DataParticleKey.BINARY: True},\n {\n DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,\n DataParticleKey.VALUE: length},\n {\n DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,\n DataParticleKey.VALUE: type},\n {\n DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,\n DataParticleKey.VALUE: checksum},\n ]\n\n return result",
"def updateFromHsl ( self ):\n rgb = Colz.hslToRgb( self.h, self.s, self.l )\n self.r = rgb[0]\n self.g = rgb[1]\n self.b = rgb[2]\n self.rgb = rgb\n self.rgba = [ rgb[0], rgb[1], rgb[2], self.a ]\n # Updates Hex\n self.hex = Colz.rgbToHex( rgb[0], rgb[1], rgb[2] )",
"def subtract(h, hlist):\n new_h = h.Clone()\n new_h.Sumw2()\n for hl in hlist:\n new_h.Add(hl,-1.)\n #print hl.GetName(), new_h.Integral()\n return new_h"
]
| [
"0.5692708",
"0.5370606",
"0.495138",
"0.49316415",
"0.4826316",
"0.47964844",
"0.47361484",
"0.47303766",
"0.47205597",
"0.4685879",
"0.46665284",
"0.4651518",
"0.46514586",
"0.46187925",
"0.46125764",
"0.45802903",
"0.45793587",
"0.45672297",
"0.4549622",
"0.4540971",
"0.45325777",
"0.45095202",
"0.450314",
"0.4498021",
"0.4498021",
"0.4498021",
"0.4484494",
"0.44749576",
"0.4464929",
"0.44105184"
]
| 0.6247475 | 0 |
Verify a checksum given HRP and converted data characters. | def bech32_verify_checksum(hrp, data):
return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _verify_checksum(data, checksum):\n sha256_hash = hashlib.sha256(data).hexdigest().encode()\n return to_bin(sha256_hash)[0 : len(data) * 8 // 32] == checksum",
"def checksum(data: str):\n if len(data) % 2 == 1:\n return data\n it = iter(data)\n new_data = ''\n for bit in it:\n if bit == next(it): # two consecutive characters are the same\n new_data += '1'\n else:\n new_data += '0'\n return checksum(new_data)",
"def calculate_checksum(self, data):\n\t\tdata = data[2:] # Ignore start tokens ($$)\n\t\tcrc16 = crcmod.predefined.mkCrcFun('crc-ccitt-false')\n\t\treturn hex(crc16(data))[2:].upper().zfill(4)",
"def validate_hash(h):\n if len(h) not in (32, 40, 64, 128):\n return False\n\n return bool(re.match(\"[0-9a-fA-F]*$\", h))",
"def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')",
"def verify(self, h):\n CrawlConfig.log(\"hsi(%d) attempting to verify %s\" % (h.pid(),\n self.path))\n rsp = h.hashverify(self.path)\n\n if \"TIMEOUT\" in rsp or \"ERROR\" in rsp:\n rval = \"skipped\"\n self.set('fails', self.fails + 1)\n CrawlConfig.log(\"hashverify transfer incomplete on %s -- skipping\"\n % self.path)\n h.quit()\n elif \"%s: (md5) OK\" % self.path in rsp:\n rval = \"matched\"\n CrawlConfig.log(\"hashverify matched on %s\" % self.path)\n elif \"no valid checksum found\" in rsp:\n if self.addable(self.cos):\n rval = self.add_to_sample(h)\n else:\n self.set('checksum', 0)\n rval = \"skipped\"\n CrawlConfig.log(\"hashverify skipped %s\" % self.path)\n else:\n rval = Alert.Alert(\"Checksum mismatch: %s\" % rsp)\n CrawlConfig.log(\"hashverify generated 'Checksum mismatch' \" +\n \"alert on %s\" % self.path)\n return rval",
"def check_packet(self, header, string):\n\n string = string[0:11] + string[75:]\n gen_chksum = hashlib.sha256(string.encode()).hexdigest()\n try:\n if header[\"checksum\"] == gen_chksum:\n return True\n else:\n return False\n except KeyError:\n return False",
"def bech32_create_checksum(hrp, data):\n values = bech32_hrp_expand(hrp) + data\n polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1\n return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]",
"def main():\n for hchar in hexchars:\n for char in chars:\n for c in range(481):\n if(((ord(char) + int(hchar,16) - c) == int(codeArr[0]))):\n intTotal = fillMD5Total(char, hchar,c, 1)\n result = DecryptChar(char,1, str(hchar),intTotal)\n if(result):\n print result.split(\"\\n\")[-2]\n return True",
"def _verify_fletcher32(chunk_buffer):\n # calculate checksums\n if len(chunk_buffer) % 2:\n arr = np.frombuffer(chunk_buffer[:-4]+b'\\x00', '<u2')\n else:\n arr = np.frombuffer(chunk_buffer[:-4], '<u2')\n sum1 = sum2 = 0\n for i in arr:\n sum1 = (sum1 + i) % 65535\n sum2 = (sum2 + sum1) % 65535\n\n # extract stored checksums\n ref_sum1, ref_sum2 = np.frombuffer(chunk_buffer[-4:], '>u2')\n ref_sum1 = ref_sum1 % 65535\n ref_sum2 = ref_sum2 % 65535\n\n # compare\n if sum1 != ref_sum1 or sum2 != ref_sum2:\n raise ValueError(\"fletcher32 checksum invalid\")\n return True",
"def checksum (upc):\n\n # check type of input\n # raise TypeError if not string\n\n # xxxxxxxxxxx x\n # check length of string\n # raise ValueError if not 12\n\n # convert string to array\n # generate checksum using the first 11 digits provided\n # check against the the twelfth digit\n # result of first 11 digits must be consistent with the value of the 12th digit\n # value must be number\n\n # return True if they are equal, False otherwise\n num = []\n #\"123456\" --> \"1\" \"2\" \"3\" \"4\" \"5\" \"6\" --> num = [1,2,3,4,5,6] --> num[0] = 1, num[3] = 4\n if type(upc) is str:\n for i in range(0, len(upc)):\n try:\n num.append(int(upc[i]))\n except ValueError:\n raise ValueError(\"Not correct length\")\n # if upc[i] is not number checksum('1b2')\n else:\n raise TypeError(\"Invalid type passed as parameter\")\n #raiseError\n\n if len(num) != 12:\n raise ValueError(\"Not correct length\")\n\n\n odd, even = num[::2], num[1::2]\n result = 0\n for i in range(0,len(odd)):\n result = result + odd[i]\n\n result *= 3\n\n # This is to add even numbered digits\n for i in range(0, (len(even)-1)):\n result = result + even[i]\n\n result %= 10\n if result != 0:\n result = 10 - result\n\n if result == num[11]:\n return True\n\n return False",
"def checksum(data):\n total = sum([ord(x) for x in data])\n return total & 0xff",
"def validate_checksum(blob: bytes, offset: int, length: int):\n\n checksum = ord(blob[offset + length - 1:offset + length])\n data_sum = sum(\n struct.unpack('%dB' % (length - 1), blob[offset:offset + length - 1])\n )\n if 0xff & (data_sum + checksum) != 0:\n raise ValueError('The data do not match the checksum')",
"def checksum(upc):\n\n # check type of input\n if type(upc) != str:\n # raise TypeError if not string\n raise TypeError(\"Input must be a string\")\n # check length of string\n elif len(upc) != 12:\n # raise ValueError if not 12\n raise ValueError(\"Invalid UPC length\")\n # generate checksum using the first 11 digits provided\n else:\n # add the odd digits together\n odd_digits = upc[::2]\n odd_sum = sum([int(x) for x in odd_digits])\n\n # add the even digits together (12th digit not included)\n even_digits = upc[1:-1:2]\n even_sum = sum([int(x) for x in even_digits])\n\n # multiply the odd sum by 3, add that to the even sum and\n # find the modulo 10 of the result\n mod = ((odd_sum * 3) + even_sum) % 10\n\n # if the result is not 0, subtract the result from 10\n checksum_digit = 0\n if mod != 0:\n checksum_digit = 10 - mod\n\n # check against the twelfth digit\n # return True if they are equal, False otherwise\n return int(upc[11]) == checksum_digit",
"def check_record(self, record):\n checking = reduce(lambda x,y: x + y, [int(record[i*2:i*2+2], 16) for i in [x for x in xrange(len(record)/2)]])\n if ('%02x' % checking)[-2:] != '00':\n raise Exception ('ERROR: Checksum doesn\\' match! Record is %s' % (record, ))",
"def checksum(message):\n check = 0\n for c in message:\n check += ord(c)\n return check % 256",
"def test_hash_string(self):\n self.assertEqual(hexlify(self._hashdigest(pubkey_sha)), sample_ripe)",
"def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum",
"def hmVerifyMsgCRCOK(destination, protocol, source, expectedFunction, expectedLength, datal) :\r\n badresponse = 0\r\n if protocol == constants.HMV3_ID:\r\n checksum = datal[len(datal)-2:]\r\n rxmsg = datal[:len(datal)-2]\r\n crc = crc16() # Initialises the CRC\r\n expectedchecksum = crc.run(rxmsg)\r\n if expectedchecksum == checksum:\r\n print(\"CRC is correct\")\r\n else:\r\n print(\"CRC is INCORRECT\")\r\n s = \"Incorrect CRC: %s Expected: %s \\n\" % (datal, expectedchecksum)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n # Check the response\r\n dest_addr = datal[0]\r\n frame_len_l = datal[1]\r\n frame_len_h = datal[2]\r\n frame_len = (frame_len_h << 8) | frame_len_l\r\n source_addr = datal[3]\r\n func_code = datal[4]\r\n\r\n\r\n\r\n if (dest_addr != 129 and dest_addr != 160):\r\n print(\"dest_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (dest_addr != destination):\r\n print(\"dest_addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr < 1 or source_addr > 32):\r\n print(\"source_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr != source):\r\n print(\"source addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != constants.FUNC_WRITE and func_code != constants.FUNC_READ):\r\n print(\"Func Code is UNKNWON\")\r\n s = \"%s : Controller %s : Unknown Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != expectedFunction):\r\n print(\"Func Code is UNEXPECTED\")\r\n s = \"%s : Controller %s : Unexpected Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code == constants.FUNC_WRITE and frame_len != 7):\r\n # Reply to Write is always 7 long\r\n print(\"response length is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (len(datal) != frame_len):\r\n print(\"response length MISMATCHES header\")\r\n s = \"%s : Controller %s : Mismatch length: %s %s\\n\" % (localtime, loop, len(datal), frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n \"\"\"if (func_code == constants.FUNC_READ and expectedLength !=len(datal) ):\r\n # Read response length is wrong\r\n print(\"response length not EXPECTED value\")\r\n print(len(datal))\r\n print(datal)\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\"\"\"\r\n if (badresponse == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n assert 0, \"Un-supported protocol found %s\" % protocol",
"def valid_response(line):\n cksum = int(line[-2:], 16) # checksum is last two characters in ASCII hex\n data = line[:-2] # remove checksum from data\n\n calc_cksum = checksum(data)\n if cksum != calc_cksum:\n log.debug('checksum failed (%r): should be %s', line, hex(calc_cksum))\n return False\n return True",
"def ishex(data: str) -> bool:\n return bool(re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)) or bool(re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data))",
"def correct_checksum():\n test_strs = [\"ch3ck1nG c0rr3ct ch3cksu|\\/|\\n\", \"y3T an0th3r str1ng0_x\\/.!&\\n\"]\n\n def test_checksum(test_str):\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n teardown()\n\n # Start reference solution to get answers.\n ref_server = start_server(port=REF_PORT, reference=True)\n ref_client = start_client(server_port=REF_PORT, reference=True)\n\n # Get reference checksum.\n write_to(ref_client, test_str)\n ref_segment = read_segments_from(ref_client)[0]\n ref_checksum = ref_segment.checksum\n\n # Check the first sent segment.\n segment = segments[0]\n\n # Checksum equal to the reference checksum.\n if segment.checksum == ref_checksum:\n return True\n\n # Maybe they also set an ACK for this segment. Compare with the computed\n # checksum.\n return int(segment.checksum, 16) == segment.c_repr.cksum;\n\n return reduce(lambda a, b: a and b, [test_checksum(t) for t in test_strs])",
"def calc_checksum(content: bytes) -> bytes:\r\n\r\n check_a = 0\r\n check_b = 0\r\n\r\n for char in content:\r\n check_a += char\r\n check_a &= 0xFF\r\n check_b += check_a\r\n check_b &= 0xFF\r\n\r\n return bytes((check_a, check_b))",
"def compute_checksum(data):\n\tif len(data) & 1:\n\t\tdata = data + '\\0'\n\n\tsum = 0\n\twords = array.array('h', data)\n\tfor word in words:\n\t\tsum = sum + (word & 0xffff)\n\t\t\n\thi = sum >> 16\n\tlow = sum & 0xffff\n\tsum = hi + low\n\tsum = sum + (sum >> 16)\n\treturn (~sum) & 0xffff",
"def _validate_hash(data, shasum):\n from hashlib import sha1\n digest = sha1(data).hexdigest()\n if digest == shasum:\n return True\n else:\n print('Invalid shasum, got: {} , expected: {}'.format(digest, shasum))\n return False",
"def _validate_checksum(self, msg: bytes) -> bool:\n return self._checksum(msg) == msg[8]",
"def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0",
"def checksum(self,msg):\n cksum = sum([ord(x) for x in msg])\n cksum0 = ((cksum & 0xF0) >> 4) + 0x30\n cksum1 = (cksum & 0x0F) + 0x30\n return chr(cksum0)+chr(cksum1)",
"def test_wrong_checksum(self):\n self.assertNotEqual(utils.checksum('fooo'), b'A')",
"def _send_check(self, data):\n # Append checksum before encrypting\n checksum = sum(data) % 256\n data.append(self._highNib(checksum))\n data.append(self._lowNib(checksum))\n str = ''.join(chr(x) for x in data)\n str += '\\r'\n self._debug_print('sending data: %s' % str)\n return self._send_internal(bytearray(str, 'utf8'))"
]
| [
"0.6515915",
"0.6268514",
"0.6224985",
"0.616817",
"0.6161122",
"0.61255074",
"0.6124343",
"0.60680276",
"0.6064342",
"0.60614634",
"0.60459036",
"0.6045096",
"0.60214597",
"0.601725",
"0.59537625",
"0.5929504",
"0.59187376",
"0.5909802",
"0.59042305",
"0.58797485",
"0.58796656",
"0.58771914",
"0.586635",
"0.58489287",
"0.58267254",
"0.5811124",
"0.5787373",
"0.57786703",
"0.57665145",
"0.57451534"
]
| 0.73980343 | 0 |
Compute the checksum values given HRP and data. | def bech32_create_checksum(hrp, data):
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calcChecksum(self, data, length):\n checksum = 0\n\n for i in range(length//2):\n checksum = checksum ^ (data[i*2] | (data[i*2+1] << 8)) #xor-ing\n return 0xffff & (checksum ^ 0xffff) #inverting",
"def compute_checksum(data):\n\tif len(data) & 1:\n\t\tdata = data + '\\0'\n\n\tsum = 0\n\twords = array.array('h', data)\n\tfor word in words:\n\t\tsum = sum + (word & 0xffff)\n\t\t\n\thi = sum >> 16\n\tlow = sum & 0xffff\n\tsum = hi + low\n\tsum = sum + (sum >> 16)\n\treturn (~sum) & 0xffff",
"def checksum(data=b\"\"):\n hasher = get_hasher(DEFAULT_CHECKSUM_ALGO)\n hasher.update(data)\n return hasher",
"def data_checksum(self, node):\n cmd = f\"find {RedpandaService.DATA_DIR} -type f -exec md5sum '{{}}' \\; -exec stat -c %s '{{}}' \\;\"\n lines = node.account.ssh_output(cmd)\n tokens = lines.split()\n return {\n tokens[ix + 1].decode(): (tokens[ix].decode(), int(tokens[ix + 2]))\n for ix in range(0, len(tokens), 3)\n }",
"def checksum(data):\r\n # group the data by word, little-endian\r\n data_list = []\r\n for t in range(10):\r\n data_list.append( data[2*t] + (data[2*t+1]<<8) )\r\n \r\n # compute the checksum on 32 bits\r\n chk32 = 0\r\n for d in data_list:\r\n chk32 = (chk32 << 1) + d\r\n\r\n # return a value wrapped around on 15bits, and truncated to still fit into 15 bits\r\n checksum = (chk32 & 0x7FFF) + ( chk32 >> 15 ) # wrap around to fit into 15 bits\r\n checksum = checksum & 0x7FFF # truncate to 15 bits\r\n return int( checksum )",
"def checksum(data):\n total = sum([ord(x) for x in data])\n return total & 0xff",
"def calc_checksum(algorithm, data):\n import hashlib\n import xxhash\n\n if algorithm == \"xxh3_64\":\n checksum = xxhash.xxh3_64(data).hexdigest()\n elif algorithm == \"md5\":\n checksum = hashlib.md5(data).hexdigest()\n else:\n checksum = None\n\n return checksum",
"def calculate_checksum(self, data):\n\t\tdata = data[2:] # Ignore start tokens ($$)\n\t\tcrc16 = crcmod.predefined.mkCrcFun('crc-ccitt-false')\n\t\treturn hex(crc16(data))[2:].upper().zfill(4)",
"def __checksum_make(self, data):\n self.logger.info(\"{}: building the checksum for bytes {}.\".format(self.sensor_name, \":\".join(\"%02x\" % b for b in data)))\n\n if len(data) not in (self.__CommandLength - 2, self.__ResponseLength - 2):\n raise ValueError(\"{}: length data has to be {} or {}.\".format(self.sensor_name, self.__CommandLength - 2, self.__ResponseLength))\n\n if data[0] != self.__SerialStart:\n raise ValueError(\"{}: data is missing the start byte.\".format(self.sensor_name))\n\n if data[1] not in (self.__SendByte, self.__ResponseByte, self.__ReceiveByte):\n raise ValueError(\"{}: data is missing SendByte, ReceiveByte or ReceiveValue-Byte\".format(self.sensor_name))\n\n if data[1] != self.__ReceiveByte and data[2] not in command.values():\n raise ValueError(\"{}: the data command byte value \\\"{}\\\" is not valid.\".format(self.sensor_name, data[2]))\n\n # Build checksum for data to send or receive\n checksum = 0\n for i in range(2, len(data)):\n checksum = checksum + data[i]\n checksum = checksum % 256\n\n self.logger.info(\"{}: checksum calculated {} for bytes {}.\".format(self.sensor_name, \"%02x\" % checksum, \":\".join(\"%02x\" % b for b in data)))\n return checksum",
"def bech32_verify_checksum(hrp, data):\n return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1",
"def checksum(data: list) -> int:\n total = 0\n for row in data:\n total += (max(row) - min(row))\n return total",
"def calculate(data, crc_val=0xFFFFFFFF):\n table = [0] * 256\n\n for i in range(256):\n table[i] = reflect(i, 8) << 24\n for j in range(8):\n table[i] = ((table[i] << 1) ^ (polynom if (table[i] & (1 << 31)) != 0 else 0)) & 0xFFFFFFFF\n table[i] = reflect(table[i], 32)\n data_byte = b''\n\n for i in data:\n data_byte += reflect(i, 32).to_bytes(4, byteorder='little')\n for i in range(len(data_byte)):\n crc_val = (crc_val >> 8) ^ table[((crc_val & 0xff) ^ data_byte[i])] & 0xFFFFFFFF\n\n return reflect(crc_val, 32)",
"def _checksum(cls, buff):\n checksum = 0\n\n while True:\n data = buff.read(cls.checksum_struct.size)\n\n if len(data) == 0:\n break\n if len(data) < 4:\n pad_count = len(data) % 4\n data = data + \"\\x00\" * pad_count\n raise ValueError(\"Checksum data length is not a multiple of 4. %d\" % len(data))\n print(data)\n c1, c2 = cls.checksum_struct.unpack(data)\n checksum += c1 + c2\n print(checksum, checksum % 65536) # novatel 32 bit crc\n return checksum % 65536",
"def get_checksum(data):\r\n one_complement_sum = 0\r\n odd = 1\r\n if len(data) % 2 == 0:\r\n odd = 0\r\n\r\n for i in range(0, len(data) - odd, 2):\r\n\r\n # For shifting 8 bits\r\n first = (data[i] << 8)\r\n second = data[i + 1]\r\n one_complement_sum += first + second\r\n\r\n if one_complement_sum > 65535:\r\n one_complement_sum &= 0xFFFF # carry\r\n one_complement_sum += 1\r\n\r\n if odd:\r\n one_complement_sum += (data[len(data) - 1] << 8)\r\n if one_complement_sum > 65535:\r\n one_complement_sum &= 0xFFFF # carry\r\n one_complement_sum += 1\r\n\r\n one_complement_sum ^= 0xFFFF\r\n return one_complement_sum",
"def getChecksum(dataString):\n sum = 0\n count_to = (len(dataString) / 2) * 2\n count = 0\n while count < count_to:\n this_val = ord(dataString[count + 1])*256+ord(dataString[count])\n sum = sum + this_val\n sum = sum & 0xffffffff # Necessary?\n count = count + 2\n if count_to < len(dataString):\n sum = sum + ord(dataString[len(dataString) - 1])\n sum = sum & 0xffffffff # Necessary?\n sum = (sum >> 16) + (sum & 0xffff)\n sum = sum + (sum >> 16)\n answer = ~sum\n answer = answer & 0xffff\n # Swap bytes. Bugger me if I know why.\n answer = answer >> 8 | (answer << 8 & 0xff00)\n return answer",
"def checksum(data, sum=0):\n # make 16 bit words out of every two adjacent 8 bit words in the packet\n # and add them up\n data = str(data)\n \n\n for i in range(0, len(data), 2):\n if i + 1 >= len(data):\n sum += ord(data[i]) & 0xFF\n else:\n w = ((ord(data[i]) << 8) & 0xFF00) + (ord(data[i + 1]) & 0xFF)\n sum += w\n\n # take only 16 bits out of the 32 bit sum and add up the carries\n while (sum >> 16) > 0:\n sum = (sum & 0xFFFF) + (sum >> 16)\n\n # one's complement the result\n sum = ~sum\n\n return sum & 0xFFFF",
"def _compute_checksum(packet):\n # checksum is the sum of the bytes\n # from device id to the end of the data\n # mod (%) 256 and bit negated (~) (1's compliment)\n # and (&) with 0xFF to make sure it is a byte.\n return ~(sum(packet[2:]) % 0x100) & 0xFF",
"def calculate_checksum(code):\n\n sum_odd = reduce(sum_chars, code[::2])\n sum_even = reduce(sum_chars, code[1:-1:2])\n check = (sum_even + sum_odd * 3) % 10\n\n if check == 0:\n return 0\n else:\n return 10 - check",
"def _get_checksum(self, arg):",
"def sha256Sum(self, data):\n data = str(data)\n m = hashlib.sha256()\n if os.path.isfile(data):\n try:\n f = file(data, 'rb')\n except:\n return 'ERROR: unable to open %s' % data\n while True:\n d = f.read(8096)\n if not d:\n break\n m.update(d)\n f.close()\n # Otherwise it could be either 1) a directory 2) miscellaneous data (like json)\n else:\n m.update(data)\n return m.hexdigest()",
"def checksum(code):\n return sum(code) % 256",
"def receipt_data_hash(data):\n return hashlib.sha256(data.encode()).hexdigest()",
"def checksum(payload):\n return hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4]",
"def tcp_checksum_calc(src: bytes, dst: bytes, proto: int, payload: bytes) -> bytes:\n _sum = dpkt.struct.pack(\">4s4sxBH\", src, dst, proto, len(payload))\n _sum = dpkt.in_cksum_add(0, _sum)\n _sum = dpkt.in_cksum_add(_sum, payload)\n _sum = dpkt.in_cksum_done(_sum)\n return _sum",
"def crcPI(data_bytes):\n # assert type(byte_cmd) == bytes\n log.debug(f\"Calculating CRC for {data_bytes}\")\n\n crc = 0\n da = 0\n crc_ta = [\n 0x0000,\n 0x1021,\n 0x2042,\n 0x3063,\n 0x4084,\n 0x50A5,\n 0x60C6,\n 0x70E7,\n 0x8108,\n 0x9129,\n 0xA14A,\n 0xB16B,\n 0xC18C,\n 0xD1AD,\n 0xE1CE,\n 0xF1EF,\n ]\n\n for c in data_bytes:\n # log.debug('Encoding %s', c)\n if type(c) == str:\n c = ord(c)\n da = ((crc >> 8) & 0xFF) >> 4\n crc = (crc << 4) & 0xFFFF\n\n index = da ^ (c >> 4)\n crc ^= crc_ta[index]\n\n da = ((crc >> 8) & 0xFF) >> 4\n crc = (crc << 4) & 0xFFFF\n\n index = da ^ (c & 0x0F)\n crc ^= crc_ta[index]\n\n crc_low = crc & 0xFF\n crc_high = (crc >> 8) & 0xFF\n\n if crc_low == 0x28 or crc_low == 0x0D or crc_low == 0x0A:\n crc_low += 1\n if crc_high == 0x28 or crc_high == 0x0D or crc_high == 0x0A:\n crc_high += 1\n\n crc = crc_high << 8\n crc += crc_low\n\n log.debug(f\"Generated CRC {crc_high:#04x} {crc_low:#04x} {crc:#06x}\")\n return [crc_high, crc_low]",
"def checksum(data: str):\n if len(data) % 2 == 1:\n return data\n it = iter(data)\n new_data = ''\n for bit in it:\n if bit == next(it): # two consecutive characters are the same\n new_data += '1'\n else:\n new_data += '0'\n return checksum(new_data)",
"def _checksum_compute(content, seed=0):\n csum = seed\n chunks = _chunkify(content, 4)\n for chunk in chunks:\n if len(chunk) == 4:\n ul = chunk[0]\n ul |= chunk[1] << 8\n ul |= chunk[2] << 16\n ul |= chunk[3] << 24\n else:\n # WTF: I can only assume this is a typo from the original\n # author of the cabinet file specification\n if len(chunk) == 3:\n ul = (chunk[0] << 16) | (chunk[1] << 8) | chunk[2]\n elif len(chunk) == 2:\n ul = (chunk[0] << 8) | chunk[1]\n elif len(chunk) == 1:\n ul = chunk[0]\n csum ^= ul\n return csum",
"def crc32(data: Bits):\n\n m = bytearray(data.tobytes())\n\n remainder = int(\"0xFFFFFFFF\", 16)\n # qx = int(\"0x04C11DB7\", 16)\n qx = int(\"0xEDB88320\", 16)\n\n for i in range(len(m) * 8):\n bit = (m[i // 8] >> (i % 8)) & 1\n remainder ^= bit\n if remainder & 1:\n multiple = qx\n else:\n multiple = 0\n remainder >>= 1\n remainder ^= multiple\n\n result = ~remainder % (1 << 32)\n return result, Bits(uint=result, length=32)",
"def _md5sum(data):\n hash = hashlib.md5()\n hash.update(six.b(data))\n hash_hex = hash.hexdigest()\n return hash_hex",
"def vedHexChecksum(byteData):\n CS = 0x55\n for b in byteData:\n CS -= b\n CS = CS & 0xFF\n return CS"
]
| [
"0.6945937",
"0.67816573",
"0.6694316",
"0.6647137",
"0.65975803",
"0.65780365",
"0.6523341",
"0.6485252",
"0.64537305",
"0.6414892",
"0.6185845",
"0.6126715",
"0.6082316",
"0.60333925",
"0.5961999",
"0.5940083",
"0.59072703",
"0.5840192",
"0.57219166",
"0.5718942",
"0.5681023",
"0.56055135",
"0.55973375",
"0.55571854",
"0.5524847",
"0.55178404",
"0.5507306",
"0.5497157",
"0.5490021",
"0.54561466"
]
| 0.7084495 | 0 |
Compute a Bech32 string given HRP and data values. | def bech32_encode(hrp, data):
combined = data + bech32_create_checksum(hrp, data)
return hrp + '1' + ''.join([CHARSET[d] for d in combined]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bech32_create_checksum(hrp, data):\n values = bech32_hrp_expand(hrp) + data\n polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1\n return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]",
"def bcur_encode(data):\n cbor = cbor_encode(data)\n enc = bc32encode(cbor)\n h = hashlib.sha256(cbor).digest()\n enc_hash = bc32encode(h)\n return enc, enc_hash",
"def hashStr(data):\n \n s, d = map_addr_int(data[2], data[3]) \n sp, dp = map_port(data[4], data[5]) \n\n data[2], data[3] = struct.pack('>I', s), struct.pack('>I', d)\n data[4], data[5] = struct.pack('>I', sp)[2:], struct.pack('>I', dp)[2:]\n data[6] = struct.pack('>I', int(data[6]))[-1]\n hash_str = (data[2]\n + data[3]\n + data[4]\n + data[5]\n + data[6]\n )\n return hash_str",
"def encode(hrp, witver, witprog):\n ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))\n assert decode(hrp, ret) is not (None, None)\n return ret",
"def b32hash(s):\n _hash = hashlib.sha256()\n _hash.update(str2bytes(s))\n return bytes2str(b32encode(_hash.digest()))",
"def formatData(self, temp):\n \n bits = 32 # Required for this protocol\n temp = int(temp*100) # Multiply by 100 to preserve decimal places\n \n if temp == 0:\n r ='0x00000000'\n elif temp < 0: # 2's complement for negatives\n temp = 2**bits + temp\n r = hex(temp)[:-1] # Remove trailing L for Long\n else:\n temph = hex(temp)\n r = '0x'+'0'*(10-len(temph)) + temph[2:]\n \n return r[2:]",
"def opgp_crc24_b64(data: bytes) -> str:\n crc = opgp_crc24(data)\n return \"=\" + base64.b64encode(crc.to_bytes(3, \"big\")).decode(\"ascii\")",
"def zbase32_encode(data: bytes) -> str:\n result = \"\"\n for idx in range(0, len(data), 5):\n result += ZBASE32_ALPHABET[(data[idx] & 0xF8) >> 3]\n if idx + 1 == len(data):\n result += ZBASE32_ALPHABET[(data[idx] & 0x07) << 2]\n break\n result += ZBASE32_ALPHABET[((data[idx] & 0x07) << 2) | ((data[idx + 1] & 0xC0) >> 6)]\n result += ZBASE32_ALPHABET[(data[idx + 1] & 0x3E) >> 1]\n if idx + 2 == len(data):\n result += ZBASE32_ALPHABET[(data[idx + 1] & 0x01) << 4]\n break\n result += ZBASE32_ALPHABET[((data[idx + 1] & 0x01) << 4) | ((data[idx + 2] & 0xF0) >> 4)]\n if idx + 3 == len(data):\n result += ZBASE32_ALPHABET[(data[idx + 2] & 0x0F) << 1]\n break\n result += ZBASE32_ALPHABET[((data[idx + 2] & 0x0F) << 1) | ((data[idx + 3] & 0x80) >> 7)]\n result += ZBASE32_ALPHABET[(data[idx + 3] & 0x7C) >> 2]\n if idx + 4 == len(data):\n result += ZBASE32_ALPHABET[(data[idx + 3] & 0x03) << 3]\n break\n result += ZBASE32_ALPHABET[((data[idx + 3] & 0x03) << 3) | ((data[idx + 4] & 0xE0) >> 5)]\n result += ZBASE32_ALPHABET[data[idx + 4] & 0x1F]\n assert len(result) == (len(data) * 8 + 4) // 5\n return result",
"def _stata_HL_format(self, fmt, value):\n if fmt == '%16H':\n packed_value = pack('>d', value)\n elif fmt == '%8H':\n packed_value = pack('>f', value)\n elif fmt == '%16L':\n packed_value = pack('<d', value)\n elif fmt == '%8L':\n packed_value = pack('<f', value)\n else:\n raise ValueError(\"{} is not a recognized hilo format\".format(fmt))\n \n return \"\".join(hex(x)[2:].zfill(2) for x in packed_value)",
"def encode(self):\n datarate_txpower = 0 | (self.datarate << 4) | self.txpower\n redundancy = 0 | (self.chmaskcntl << 4) | self.nbrep\n data = struct.pack('<BBHB', self.cid, datarate_txpower, self.chmask, redundancy)\n return data",
"def H(s):\n return 'H_' + ''.join(['%02x' % ord(x) for x in s])",
"def rop32(*args):\n\tpacked = \"\"\n\tfor x in args:\n\t\tif type(x) == int or type(x) == long:\n\t\t\tpacked += pack32(x)\n\t\telse:\n\t\t\tpacked += x\n\treturn packed",
"def _encode(klass, pwm_str_key):\n\t\t#print(\"Encoding pwm key %s\" % (pwm_str_key,))\n\t\tdec_pwm_key = int(pwm_str_key, 2)\n\t\t#print \"Decimal (PWN) key:\",dec_pwm_key\n\t\tkey_packed = ''\n\t\tfor byte in Lirc._chunk(pwm_str_key, 8, '0'):\n\t\t\tdec_pwm_key = int(byte, 2)\n\t\t\tkey_packed = key_packed + struct.pack(\">B\", dec_pwm_key)\n\t\treturn key_packed",
"def pack(self, data):\n for a, b in [(x, chr(ord(x) ^ 0x20)) for x in ['}','*','#','$']]:\n data = data.replace(a,'}%s' % b)\n crc = (sum(ord(c) for c in data) % 256) \n return \"$%s#%02X\" %(data, crc)",
"def toString(self):\r\n str = \"\"\r\n for i in range(len(self.Data)):\r\n str += (self.__hexLookup[int(self.Data[i] / 16)]).decode()\r\n str += (self.__hexLookup[int(self.Data[i] % 16)]).decode()\r\n \r\n return str",
"def info_hash_base32(self):\n if getattr(self, '_data', None):\n return b32encode(sha1(bencode(self._data['info'])).digest())\n else:\n raise exceptions.TorrentNotGeneratedException",
"def repr(self, h):\n return(1.0 - h**self.ad)",
"def toHashBase(self) -> str:\r\n\r\n if self.hashBase != '':\r\n self_repr = '{}'.format(self.hashBase)\r\n else:\r\n self_repr = ''\r\n self_repr += '{}{}{}'.format(str(self.data), self.version,\r\n self.compatibilityLimit)\r\n if len(self.script) > 0:\r\n self_repr += ' '.join(self.script)\r\n if self.seriesSignature != '':\r\n self_repr += self.seriesSignature\r\n if self.pha != '':\r\n self_repr += self.pha\r\n for key, value in self.identityInfo.items():\r\n self_repr += '{}{}'.format(key, value)\r\n if self.message != '':\r\n self_repr += self.message\r\n\r\n return self_repr",
"def printable_hash(h):\n return int(h).to_bytes(32, byteorder='big', signed=False).hex()",
"def data() -> str:\n return \"1721\\n979\\n366\\n299\\n675\\n1456\"",
"def hash_eth2(data: bytes) -> Hash32:\n return Hash32(keccak(data))",
"def huffman_data_encode(huff: huffman.HuffmanTree) -> hic.Payload:\n data = huff.encode_data()\n return hic.BitStringP(data)",
"def to_string(inputs, outputs):\n r_val = '# Column 01: frequency\\n'\n r_val += '# 02: hp - real\\n'\n r_val += '# 03: hp - imaginary\\n'\n r_val += '# 04: hc - real\\n'\n r_val += '# 05: hc - imaginary\\n'\n for f_i, hp_i, hc_i in zip(inputs.freqs, outputs.hp, outputs.hc):\n r_val += \"%8.2f %12.5e %12.5e %12.5e %12.5e\\n\" % (f_i, hp_i.real, hp_i.imag, hc_i.real, hc_i.imag)\n return r_val",
"def bech32_hrp_expand(hrp):\n return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]",
"def generate_phn_number():\n first_two_digit = \"01\"\n third_digit = str(randrange(3, 10))\n last_eight_digit = str(randrange(10000000, 100000000))\n \n return str(first_two_digit+third_digit+last_eight_digit)",
"def uInt32HexListStr(uInt32List):\n \n outputStr = \"\"\n for value in uInt32List:\n outputStr += \"\\n\\t\" + uInt32HexStr(value)\n outputStr += \"\\n\"\n return outputStr",
"def format_data(self)->float: \n try:\n formatted = chr(self.data[0])\n for i in range(1, len(self.data)): \n formatted = formatted + (chr(self.data[i])) \n return str(formatted)\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)",
"def convert_th_slp_int_and_str(th_slp_int):\n if th_slp_int is None:\n msg = 'th_slp_int is None. Going to return None.'\n warnings.warn(msg)\n return None\n\n slp_th_profile_dict_tag = {0: 'HEF',\n 1: 'HMF',\n 2: 'GMF',\n 3: 'GMK',\n 4: 'GPD',\n 5: 'GHA',\n 6: 'GBD',\n 7: 'GKO',\n 8: 'GBH',\n 9: 'GGA',\n 10: 'GBA',\n 11: 'GWA',\n 12: 'GGB',\n 13: 'GHD'}\n\n th_slp_tag = slp_th_profile_dict_tag[th_slp_int]\n return th_slp_tag",
"def uInt32HexStr(uInt32, checkValidUInt32 = False):\n if checkValidUInt32:\n if not uInt32Compatible(uInt32):\n raise ChipsException(\"The value: \" + hex(uInt32) + \" is not a valid 32-bit unsigned integer!\")\n bigEnd = (uInt32 >> 16) & 0xffff\n littleEnd = uInt32 & 0xffff\n result = \"%(#1)04x%(#2)04x\" % { \"#1\" : bigEnd, \"#2\" : littleEnd }\n return result",
"def code(hshk):\n shshk = standardized_hshk(hshk)\n if shshk == _invalid_hshk: return _invalid_code\n # If handshake was valid, translate the sequence into its binary\n # representation\n _code = ''.join(['0' if a == '' else '1' for a in shshk[:-1]][::-1])\n # Indicate order of sequence by setting the \"order bit\" appropriately\n return _code.lstrip('0') if shshk[-1] == _fwd_order else '1'+_code"
]
| [
"0.6180152",
"0.59803236",
"0.59652334",
"0.59291404",
"0.56749326",
"0.56541854",
"0.5602964",
"0.55900097",
"0.5575147",
"0.5510494",
"0.54423493",
"0.5397543",
"0.53948313",
"0.5313692",
"0.52766794",
"0.52306104",
"0.51750374",
"0.5160554",
"0.5155315",
"0.51535434",
"0.5119975",
"0.51157004",
"0.5096452",
"0.50864965",
"0.5079972",
"0.5049625",
"0.5047749",
"0.5038535",
"0.50308424",
"0.5030261"
]
| 0.7195933 | 0 |
Validate a Bech32 string, and determine HRP and data. | def bech32_decode(bech):
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return (None, None)
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech): #or len(bech) > 90:
return (None, None)
if not all(x in CHARSET for x in bech[pos+1:]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return (hrp, data[:-6]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkDataType(self,str):\n accepted_vals = [\"HEXA\",\"NEHU\",\"NEDS\",\"NEDU\",\"NDHU\",\"NDDU\"]\n assert str in accepted_vals, \"Error: Data Type not accepted: \" + str\n if (str == 'HEXA') | (str[2] == 'H'):\n self.base = 16\n if str[3] == 'S':\n self.signed = True",
"def is_valid_address(address) -> bool:\n if not address.startswith('one1'):\n return False\n hrp, _ = bech32_decode(address)\n if not hrp:\n return False\n return True",
"def validate(number):\n number = compact(number)\n if not isdigits(number):\n raise InvalidFormat()\n if len(number) != 16:\n raise InvalidLength()\n if _calc_checksum(number) != 0:\n raise InvalidChecksum()\n i = info(number)\n if 'bank' not in i or 'branch' not in i:\n raise InvalidComponent()\n return number",
"def validate_hash(h):\n if len(h) not in (32, 40, 64, 128):\n return False\n\n return bool(re.match(\"[0-9a-fA-F]*$\", h))",
"def __call__(self, value):\n if value is None:\n return value\n\n value = value.replace(\" \", \"\").replace(\".\", \"\")\n if not value.isdigit():\n raise ValidationError(_(\"AHV must contain numbers only\"))\n if len(value) != 13:\n raise ValidationError(_(\"AHV must be 13 numbers long.\"))\n\n if self.ahv_checksum(value[:-1]) != value[-1]:\n raise ValidationError(_(\"Not a valid AHV number.\"))",
"def valid(h):\n h = bytes.decode(h)\n if h[0].islower():\n if set(h).issubset(ALNUM):\n # Yes! Digits, Upper- and lowercase are present\n return True\n return False",
"def validate(number):\n number = compact(number)\n if len(number) != 9:\n raise InvalidLength()\n if not isdigits(number[2:]):\n raise InvalidFormat()\n if not isdigits(number[:2]) and not all(x in 'ABCEHKMOPT' for x in number[:2]):\n raise InvalidFormat()\n if number[0] not in '1234567ABCEHKM':\n raise InvalidComponent()\n if number[-1] != calc_check_digit(number):\n raise InvalidChecksum()\n return number",
"def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum",
"def bech32_verify_checksum(hrp, data):\n return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1",
"def _validate_bbg_id(x):\n return len(x) == 12 and x[:3] == 'BBG' and str.isalnum(x[3:11]) and sum(map(\n lambda u: u in ['A', 'E', 'I', 'O', 'U'], x[3:11])) == 0 and str.isdigit(x[11])",
"def parse_handshake(self, data):\n\n if (data[0] != len(PSTR) or data[1:20] != PSTR\n or data[28:48] != self.factory.torrent.info_hash):\n\n self.transport.loseConnection()\n else:\n self.handshaked = True\n\n reserved = data[20:28]\n if reserved[7] & ord('\\x04'):\n self.fast_extension = True\n\n if reserved[7] & ord('\\x01'):\n self.dht = True",
"def test_unicode_2_32(self):\n test_case = u\"\\u2661\" # pylint: disable=redundant-u-string-prefix\n self.assertTrue(isinstance(CityHash32(test_case), int))",
"def test_good_values_for_validate_guid(good_value):\n bcvalidators.validate_guid(good_value)",
"def b32hash(s):\n _hash = hashlib.sha256()\n _hash.update(str2bytes(s))\n return bytes2str(b32encode(_hash.digest()))",
"def test_is_valid_hex(self):\n self.assertTrue(is_valid_hex('#aabb11'))\n self.assertTrue(is_valid_hex('#000'))\n self.assertTrue(is_valid_hex('#aaa'))\n self.assertFalse(is_valid_hex('black'))\n self.assertFalse(is_valid_hex('bl(ack'))",
"def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))",
"def _verify_fletcher32(chunk_buffer):\n # calculate checksums\n if len(chunk_buffer) % 2:\n arr = np.frombuffer(chunk_buffer[:-4]+b'\\x00', '<u2')\n else:\n arr = np.frombuffer(chunk_buffer[:-4], '<u2')\n sum1 = sum2 = 0\n for i in arr:\n sum1 = (sum1 + i) % 65535\n sum2 = (sum2 + sum1) % 65535\n\n # extract stored checksums\n ref_sum1, ref_sum2 = np.frombuffer(chunk_buffer[-4:], '>u2')\n ref_sum1 = ref_sum1 % 65535\n ref_sum2 = ref_sum2 % 65535\n\n # compare\n if sum1 != ref_sum1 or sum2 != ref_sum2:\n raise ValueError(\"fletcher32 checksum invalid\")\n return True",
"def test_validate_input_valid(self):\n final_config = self.dtm1.validate_input('00001111')\n nose.assert_equal(final_config[0], 'q4')\n nose.assert_equal(str(final_config[1]), 'TMTape(\\'xxxxyyyy.\\')')",
"def test_consistent_encoding_32(self):\n text = u\"abracadabra\" # pylint: disable=redundant-u-string-prefix\n self.assertEqual(CityHash32(text), CityHash32(text.encode(\"utf-8\")))",
"def validate_hcl(hcl: str) -> bool:\n if len(hcl) != 7 or hcl[0] != '#':\n return False\n for x in hcl[1:]:\n if x not in list(map(str, range(9 + 1))) + \\\n list(map(chr, range(ord('a'), ord('f') + 1))):\n return False\n return True",
"def validate_pkh(v):\n return _validate(v, prefixes=[b'tz1', b'tz2', b'tz3'])",
"def validate_address(address: str) -> None:\n data: bytes = base58.b58decode_check(address)\n if len(data) != len(types.UInt160.zero()) + 1:\n raise ValueError(f\"The address is wrong, because data (address value in bytes) length should be \"\n f\"{len(types.UInt160.zero()) + 1}\")\n elif data[0] != settings.network.account_version:\n raise ValueError(f\"The account version is not {settings.network.account_version}\")",
"def ishex(data: str) -> bool:\n return bool(re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)) or bool(re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data))",
"def verify_raw_google_hash_header(google_hash: str) -> bool:\n\n return bool(re.match(r'(crc32c=[A-Za-z0-9+/=]+),(md5=[A-Za-z0-9+/=]+)', google_hash))",
"def validate(info):\n\n\tif info == \"\": \n\t\treturn False\n\telse:\n\t\tif len(info) < 5 or len(info) > 32:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True",
"def _validate_data_format(data_format):\n data_format_ = str(data_format).upper()\n if data_format_ in {'NHWC', 'NCHW'}:\n return data_format_\n raise ValueError(\n 'Argument data_format=\"{}\" not recognized; must be one of '\n '{{\"NHWC\", \"NCHW\"}} (case insensitive).'.format(data_format))",
"def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)",
"def __input_data_ok(self, line=None):\n # valid pattern: 1407478022|www.facebook.com\n valid_pattern = re.compile(\"\\w{10}\\|\\w+\")\n if (line) and (re.match(valid_pattern, line)):\n return True\n else:\n return False",
"def validate(smartAddress):\n\n addressLen = len(smartAddress)\n\n if addressLen < 27 or addressLen > 35:\n return None\n\n try:\n decoded = decode_base58(smartAddress, 25)\n except ValueError:\n return None\n\n # Compare checksum\n checksum = HashKeccak(decoded[:-4])[:4]\n if decoded[-4:] != checksum:\n return None\n\n if smartAddress != encode_base58(decoded):\n return None\n\n return smartAddress",
"def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True"
]
| [
"0.632021",
"0.58705276",
"0.58341265",
"0.5811494",
"0.57285565",
"0.57082236",
"0.5613003",
"0.55415183",
"0.5499696",
"0.54304606",
"0.54039085",
"0.53765565",
"0.5374732",
"0.53599584",
"0.5302655",
"0.52568835",
"0.5249464",
"0.524831",
"0.5240656",
"0.52291",
"0.5218948",
"0.5216212",
"0.5210489",
"0.5210368",
"0.516685",
"0.51625854",
"0.51214564",
"0.5121187",
"0.51031524",
"0.5103003"
]
| 0.62150854 | 1 |
Constructor pip reference to the master object (Pip) plugin reference to the plugin object (ToolPipPlugin) parent reference to the parent widget (QWidget) | def __init__(self, pip, plugin, parent=None):
super(PipSearchDialog, self).__init__(parent)
self.setupUi(self)
self.setWindowFlags(Qt.Window)
self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(False)
self.__installButton = self.buttonBox.addButton(
self.tr("&Install"), QDialogButtonBox.ActionRole)
self.__installButton.setEnabled(False)
self.__showDetailsButton = self.buttonBox.addButton(
self.tr("&Show Details..."), QDialogButtonBox.ActionRole)
self.__showDetailsButton.setEnabled(False)
self.__pip = pip
self.__client = E5XmlRpcClient(
plugin.getPreferences("PipSearchIndex") or DefaultIndexUrl,
self)
self.__default = self.tr("<Default>")
pipExecutables = sorted(plugin.getPreferences("PipExecutables"))
self.pipComboBox.addItem(self.__default)
self.pipComboBox.addItems(pipExecutables)
self.searchEdit.setFocus(Qt.OtherFocusReason)
self.__canceled = False
self.__detailsData = {}
self.__query = [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, plugin):\n\n QgsMapTool.__init__(self, plugin.iface.mapCanvas())\n\n self.plugin = plugin\n self.double_click = False\n\n self.plugin.iface.mainWindow().statusBar().showMessage(self.tr(\"Click on a parcel!\"))\n\n self.current_dialog = None\n self.dialog_position = None",
"def __init__(self, parent=None):\n super(Inj, self).__init__(parent)\n self.setupUi(self)",
"def __init__(self, parent):\n self.parent = parent\n self.dialog = None",
"def __init__(self, parent=None):\n super(SelfCarryAddressSeek, self).__init__(parent)\n self.setupUi(self)",
"def __init__(self, parent, **kwargs):\n PyGlassWidget.__init__(self, parent, **kwargs)\n\n self.deployBtn.clicked.connect(self._handleDeployClick)\n self.cancelBtn.clicked.connect(self._handleCancelClick)\n\n self._canceled = True\n self._includeEmails = False\n self._buildMessage = u''",
"def __init__(self, parent=None):\n super(QCTP, self).__init__(parent)\n self.setupUi(self)",
"def __init__(self):\n self.label = \"Python ToolBox\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Tool]",
"def __init__(self, parent=None):\n super(StyledInputDialog, self).__init__(parent)\n self.setupUi(self)\n self.input = None",
"def __init__(\n self,\n parent,\n call_ipi=\"i-pi input.xml\",\n call_driver=\"i-pi-driver\",\n check_errors=True,\n check_numpy_output=True,\n check_xyz_output=True,\n ):\n\n self.parent = parent\n self.call_ipi = call_ipi\n self.call_driver = call_driver\n self.check_error = check_errors\n self.check_numpy_output = check_numpy_output\n self.check_xyz_output = check_xyz_output",
"def __init__(self, parent=None):\n super(Dialog, self).__init__(parent)\n self.setupUi(self)",
"def __init__(self, parent):\n # parent is the main frame of PyCorrFit\n self.parent = parent\n ## MYID\n # This ID is given by the parent for an instance of this class\n self.MyID = None\n ## Wrapping\n curvedict, labels = self.GetCurvedict()\n self.labels = labels\n self.Selector = UserSelectCurves(parent, curvedict,\n wrapper=self, labels=labels)\n # This is necessary for parent to deselect and select the tool\n # in the tools menu.\n self.Bind = self.Selector.Bind\n if self.parent.notebook.GetPageCount() == 0:\n self.Selector.sp.Disable()",
"def __init__(self):\n self.label = \"Create\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n if core.get_pass():\n self.tools = [Fbound, Roads, Diekdikisi]\n else:\n self.tools = []",
"def __init__(self, parent):\n QtGui.QMenu.__init__(self, parent)\n self.parent = parent",
"def __init__(self, parent):\n pass",
"def __init__(self, parent):\n pass",
"def __init__(self, parent):\n pass",
"def __init__(self, parent):\r\n\r\n BasicDialog.__init__(self, parent, title=None)",
"def __init__(self, parent=None, pkfitdlg=None):\n super (getPkDlg, self).__init__(parent)\n\n self.parent = parent\n self.pkfitdlg = pkfitdlg\n self.lab = QtWidgets.QLabel(\"One line per peak: typ, pos, amp, FWHM\")\n self.text = QtWidgets.QTextEdit(self)\n self.title = \"Peak Fit Tool\"\n self.setWindowTitle(self.title)\n self.createLayout()",
"def __init__(self, parent=None):\n super(RobotSelection, self).__init__(parent)\n self.parent = parent\n self.initUI()",
"def __init__(self, parent=None):\n super(Representative, self).__init__(parent)\n self.setupUi(self)",
"def __init__(self, parent=None):\n super(ProgressDlg, self).__init__(parent)\n self.setupUi(self)",
"def create(self, parent):\n self.widget = QtCore.QObject(parent)",
"def __init__(self, parent):",
"def __init__(self, parent=None):\n super(union_Dialog, self).__init__(parent)\n self.setupUi(self)",
"def __init__(self):\n self.label = \"Toolbox\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Offset]",
"def __init__(self):\n self.label = \"CDA Tools\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [SecondaryCraterRemovalTool]",
"def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None\n self._parameterNode = None\n self._updatingGUIFromParameterNode = False",
"def __init__(self, parent):\n self._parent = parent",
"def __init__(self, parent=None):\n # Inherited from QMainWindow\n if sys.platform == 'darwin':\n # Workaround for Qt issue on OS X that causes QMainWindow to\n # hide when adding QToolBar, see\n # https://bugreports.qt-project.org/browse/QTBUG-4300\n super(BpMainWindow, self).__init__(parent, Qt.MacWindowToolBarButtonHint)\n else:\n super(BpMainWindow, self).__init__(parent)\n\n # temporary variable\n self._temp_dir = None\n self.is_save_configure = False\n\n # pre-define a model variable\n self.model = None",
"def __init__(self, parent=None):\n\t\tScriptedLoadableModuleWidget.__init__(self, parent)\n\t\tVTKObservationMixin.__init__(self) # needed for parameter node observation\n\t\tself.logic = None\n\t\tself._parameterNode = None\n\t\tself._updatingGUIFromParameterNode = False\n\t\t\n\t\tself.elecModel = None\n\t\tself.elecModelLastButton = None\n\t\tself.elecModelButton = 0\n\t\tself.elecChanLastButton = None\n\t\tself.elecChanButton = 0\n\t\tself.lastPolButton=0\n\t\tself.active = False"
]
| [
"0.68881553",
"0.6291952",
"0.62415355",
"0.6189922",
"0.6097188",
"0.60794055",
"0.60542935",
"0.5995019",
"0.59779817",
"0.59704363",
"0.59477806",
"0.5928814",
"0.5920071",
"0.5913141",
"0.5913141",
"0.5913141",
"0.59094864",
"0.5890799",
"0.58740675",
"0.5873307",
"0.5849331",
"0.5837506",
"0.58094156",
"0.5803421",
"0.5798762",
"0.5797472",
"0.5796115",
"0.57737815",
"0.57656896",
"0.57438123"
]
| 0.7563393 | 0 |
Private slot handling a press of the search button. | def on_searchButton_clicked(self):
self.__search() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_btn_clicked(self, widget, data=None):\n # Method to handle search here.\n search_text = self.get_text(\"txt_search\")\n print search_text",
"def on_buttonBox_clicked(self, button):\n if button == self.findButton:\n self.__doSearch()\n elif button == self.stopButton:\n self.__stopSearch()",
"def __search(self):\n self.resultList.clear()\n self.infoLabel.clear()\n \n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.searchButton.setEnabled(False)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__canceled = False\n \n self.__query = [term for term in self.searchEdit.text().strip().split()\n if term not in PipSearchDialog.Stopwords]\n self.__client.call(\n \"search\",\n ({\"name\": self.__query, \"summary\": self.__query}, \"or\"),\n self.__processSearchResult,\n self.__searchError\n )",
"def click_search_button(self):\n self.click_element(self.generic_search_button_locator)",
"def handler_search_changed(self, widget):\n #we set the current language filter to the button's label\n self.filter_on = widget.get_text()\n text = widget.get_text()\n #we update the filter, which updates in turn the view\n self.emit('search-show',text)\n #self.log_filter.refilter()",
"def _on_keyboard(self, instance, key, scancode, codepoint, modifiers, *args):\r\n # print(\"Keyboard pressed! {}, {}, {}, {}\".format(key, scancode, codepoint, modifiers))\r\n if codepoint == 's' and 'ctrl' in modifiers:\r\n toast('Search by Name, Ingredient, or Tag', 3)\r\n self.search_focus = True",
"def on_searchin_changed(self):\r\n\r\n self.check_searchin()",
"def on_pre_enter(self, *args):\n self.ids['search'].text = ''\n self.filter()",
"def onSearch(self):\n self.mainGrid.showSearchPopup()\n self.popupActive = True",
"def search(self, *args, **kwargs):",
"def other_search(self):\n test = self.ask_zoekarg.text()\n if test:\n self.parent().search_arg = test\n self.parent().do_select()",
"def on_search(self, data: Any = None):\n raise NotImplementedError",
"def keyPressEvent(self, evt):\n if evt.key() == Qt.Key_Escape:\n self.escPressed.emit()\n else:\n super(QuickSearchLineEdit, self).keyPressEvent(evt) # pass it on",
"def keyPressEvent(self, evt):\n if evt.modifiers() == Qt.ControlModifier:\n if evt.key() == Qt.Key_F:\n self.__find()\n evt.accept()\n return\n elif evt.key() == Qt.Key_C:\n self.copy()\n evt.accept()\n return\n elif evt.key() == Qt.Key_A:\n self.selectAll()\n evt.accept()\n return\n elif evt.modifiers() == Qt.NoModifier:\n if evt.key() == Qt.Key_F3 and self.__lastSearch:\n self.searchNext(*self.__lastSearch)\n evt.accept()\n return\n elif evt.modifiers() == Qt.ShiftModifier and self.__lastSearch:\n if evt.key() == Qt.Key_F3 and self.__lastSearch:\n self.searchPrev(*self.__lastSearch)\n evt.accept()\n return",
"def search():\n pass",
"def _actionSelect(self):\n self.returnPressed.emit() # pylint: disable=no-member",
"def on_search_text_changed(self):\n regexp = QRegExp(self.lineEditFilter.text(), Qt.CaseInsensitive, QRegExp.FixedString)\n\n proxy_model = self.symbolTreeWidget.model()\n proxy_model.text = self.lineEditFilter.text().lower()\n proxy_model.setFilterRegExp(regexp)\n\n self.symbolTreeWidget.expandAll()",
"def clickEvent(self):\n self.emit(QtCore.SIGNAL('activated(QString &)'), self.text())",
"def pushButtonClicked(self, but_id, button):\n self.ui.tv_bindings.clearSelection()\n lstMatch = self.ui.tv_bindings.findItems(but_id, QtCore.Qt.MatchExactly, 0)[0]\n lstMatch.setSelected(True)\n lstMatch.setText(1, '[Press a key]')\n button.installEventFilter(self)\n self.efButton = button # Not elegant, but.... works",
"def new_search(self, widget, data=None):\n self.artist_name.set_text(\"\")\n self.song_name.set_text(\"\")\n self.status_bar.hide()\n self.lyrics_view.hide()\n self.scroll.hide()\n self.window.resize(self.width, self.height)",
"def searchWidget(self):\n return self.__searchWidget",
"def eventFilter(self, watched, event):\n if (\n self.__searchEdit and watched == self.__searchEdit and\n event.type() == QEvent.KeyPress\n ):\n idx = self.__index.currentIndex()\n if event.key() == Qt.Key_Up:\n idx = self.__index.model().index(\n idx.row() - 1, idx.column(), idx.parent())\n if idx.isValid():\n self.__index.setCurrentIndex(idx)\n elif event.key() == Qt.Key_Down:\n idx = self.__index.model().index(\n idx.row() + 1, idx.column(), idx.parent())\n if idx.isValid():\n self.__index.setCurrentIndex(idx)\n elif event.key() == Qt.Key_Escape:\n self.escapePressed.emit()\n \n return QWidget.eventFilter(self, watched, event)",
"def search(self, *args, **kwargs): # real signature unknown\n pass",
"def on_save_search(self, event):\r\n\r\n search = self.m_searchfor_textbox.GetValue()\r\n if search == \"\":\r\n errormsg(_(\"There is no search to save!\"))\r\n return\r\n dlg = SaveSearchDialog(self, search, self.m_regex_search_checkbox.GetValue())\r\n dlg.ShowModal()\r\n dlg.Destroy()",
"def search(self, search):\n raise NotImplementedError",
"def start_search(self):\r\n\r\n with _LOCK:\r\n if self.debounce_search:\r\n return\r\n self.debounce_search = True\r\n if self.m_search_button.GetLabel() in [SEARCH_BTN_STOP, SEARCH_BTN_ABORT]:\r\n if self.thread is not None:\r\n self.m_search_button.SetLabel(SEARCH_BTN_ABORT)\r\n global _ABORT\r\n with _LOCK:\r\n _ABORT = True\r\n self.kill = True\r\n else:\r\n self.stop_update_timer()\r\n else:\r\n if not self.validate_search_inputs():\r\n self.do_search()\r\n self.debounce_search = False",
"def on_load_search(self, event):\r\n\r\n dlg = LoadSearchDialog(self)\r\n dlg.ShowModal()\r\n search, is_regex = dlg.get_search()\r\n dlg.Destroy()\r\n if search is not None and is_regex is not None:\r\n self.m_searchfor_textbox.SetValue(search)\r\n self.m_regex_search_checkbox.SetValue(regex_search)",
"def _(event):\n input_buffer = event.cli.buffers.previous(event.cli)\n search_buffer = event.cli.buffers[SEARCH_BUFFER]\n\n # Update search state.\n if search_buffer.text:\n get_search_state(event.cli).text = search_buffer.text\n\n # Apply search.\n input_buffer.apply_search(get_search_state(event.cli), include_current_position=True)\n\n # Add query to history of search line.\n search_buffer.append_to_history()\n search_buffer.reset()\n\n # Focus previous document again.\n event.cli.pop_focus()",
"def _on_articles_search(self, evt=None):\n \n # set focus\n self._articles_view.SetFocusToQuery()",
"def run_search(self, evt):\n search_input = self.search_input_txtctrl.GetValue()\n self.execute_google_search(str(search_input))\n self.set_result_to_dict_for_page_scroller()\n self.clear_result_screen()\n self.trigger_scroller_event()"
]
| [
"0.787465",
"0.7454312",
"0.70616204",
"0.68871343",
"0.68181986",
"0.6760889",
"0.67045516",
"0.6505925",
"0.64427584",
"0.64369637",
"0.6414263",
"0.63818145",
"0.6349521",
"0.63222706",
"0.62287897",
"0.62203604",
"0.62094444",
"0.61999077",
"0.61627203",
"0.6162478",
"0.6162471",
"0.61614645",
"0.6158441",
"0.6154052",
"0.6119562",
"0.60920477",
"0.60829365",
"0.6082909",
"0.6081415",
"0.60789436"
]
| 0.87299025 | 0 |
Private slot called by a button of the button box clicked. button button that was clicked (QAbstractButton) | def on_buttonBox_clicked(self, button):
if button == self.buttonBox.button(QDialogButtonBox.Close):
self.close()
elif button == self.buttonBox.button(QDialogButtonBox.Cancel):
self.__client.abort()
self.__canceled = True
elif button == self.__installButton:
self.__install()
elif button == self.__showDetailsButton:
self.__showDetails() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exec_(self):\n super().exec_()\n return self.clicked_button",
"def clickEvent(self):\n self.emit(QtCore.SIGNAL('activated(QString &)'), self.text())",
"def on_buttonBox_clicked(self, button):\n if button == self.buttonBox.button(QDialogButtonBox.Save):\n self.on_saveButton_clicked()\n elif button == self.refreshButton:\n self.on_refreshButton_clicked()",
"def cb_something_4(self, button): \n print(\"Do Something 4\")",
"def button(args: List, slot) -> QPushButton:\n button = QPushButton()\n button.setText(args[0])\n button.clicked.connect(slot)\n return button",
"def on_pushButton_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def on_pushButton_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def pushButtonClicked(self, but_id, button):\n self.ui.tv_bindings.clearSelection()\n lstMatch = self.ui.tv_bindings.findItems(but_id, QtCore.Qt.MatchExactly, 0)[0]\n lstMatch.setSelected(True)\n lstMatch.setText(1, '[Press a key]')\n button.installEventFilter(self)\n self.efButton = button # Not elegant, but.... works",
"def on_toolButton_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def on_buttonBox_clicked(self, button):\n if button == self.buttonBox.button(QDialogButtonBox.Close):\n self.close()\n elif button == self.buttonBox.button(QDialogButtonBox.Cancel):\n self.__finish()",
"def on_pushButton_11_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def _selectionChangedSlot(self, _):\r\n\r\n self._updateButtonStates()",
"def cb_something_1(self, button):\n print(\"Do Something 1\")",
"def on_buttonBox_clicked(self, button):\n if button == self.findButton:\n self.__doSearch()\n elif button == self.stopButton:\n self.__stopSearch()",
"def cb_something_2(self, button):\n print(\"Do Something 2\")",
"def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))",
"def onButtonPress(self, event):\n\n if event.xdata and event.ydata:\n self.emit(QtCore.SIGNAL(\"positionSelected(float, float)\"),\n float(event.xdata), float(event.ydata))",
"def OnButton(self, event):\n\n\n event_id = event.GetId()\n event_obj = event.GetEventObject()\n print(\"Button 1 Clicked:\")\n print(\"ID=%d\" % event_id)\n print(\"object=%s\" % event_obj.GetLabel())",
"def on_triggered(self, slot):\n self.triggered.connect(slot)",
"def mousePressEvent(self, mouse_event):\r\n return",
"def on_pushButton_clicked(self):\r\n # TODO: not implemented yet\r\n print 1",
"def button_clicked(self):\n sender = self.sender()\n if self._buttons_active:\n message_box = QtGui.QMessageBox()\n quit_button = message_box.addButton('Quitter',\n QtGui.QMessageBox.RejectRole)\n if (sender.row, sender.col, sender.text()) in self._solution:\n message_box.setText('Félicitations, vous avez deviné juste !')\n else:\n message_box.setText('Dommage, vous avez perdu !')\n message_box.setInformativeText('Vous pouvez proposer une '\n 'autre sortie, ou afficher de '\n 'nouveau les objets.')\n show_items_button = message_box.addButton('Voir objets',\n QtGui.QMessageBox.AcceptRole)\n play_again_button = message_box.addButton( 'Réessayer',\n QtGui.QMessageBox.AcceptRole)\n message_box.setDefaultButton(play_again_button)\n message_box.exec()\n if message_box.clickedButton() == quit_button:\n self.close()\n elif message_box.clickedButton() == show_items_button:\n self.reset_game()",
"def ev_mousebuttondown(self, event: MouseButtonDown) -> None:",
"def OnButton(self, event):\r\n \r\n button = event.GetInt()\r\n\r\n if button == AUI_BUTTON_LEFT or button == AUI_BUTTON_RIGHT:\r\n if button == AUI_BUTTON_LEFT:\r\n if self.GetTabOffset() > 0:\r\n \r\n self.SetTabOffset(self.GetTabOffset()-1)\r\n self.Refresh()\r\n self.Update()\r\n else:\r\n self.SetTabOffset(self.GetTabOffset()+1)\r\n self.Refresh()\r\n self.Update()\r\n \r\n elif button == AUI_BUTTON_WINDOWLIST:\r\n idx = self.GetArtProvider().ShowDropDown(self, self._pages, self.GetActivePage())\r\n \r\n if idx != -1:\r\n \r\n e = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_PAGE_CHANGING, self.GetId())\r\n e.SetSelection(idx)\r\n e.SetOldSelection(self.GetActivePage())\r\n e.SetEventObject(self)\r\n self.GetEventHandler().ProcessEvent(e)\r\n \r\n else:\r\n event.Skip()",
"def _slot(self, event) -> None:\n self.events.gui_event(original_event=event)",
"def execPushButton(self):\n\t\t# verbose.detail(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))\n\t\tprint(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))",
"def click(self):\r\n pass",
"def cb_something_3(self, button):\n print(\"Do Something 3\")",
"def press(self):\n self.clicked = True\n if self.command:\n self.command(self.name)",
"def emitPressEvent(self, clickLocation, button, currentKbKey, items):\n # emit the mousePressEvent signal\n self.mousePress.emit(self, clickLocation, button, currentKbKey, items)"
]
| [
"0.7814205",
"0.7070584",
"0.69642556",
"0.68384516",
"0.67798644",
"0.6768343",
"0.6768343",
"0.67607236",
"0.67205596",
"0.6709458",
"0.6689524",
"0.6659822",
"0.6606926",
"0.66062105",
"0.660225",
"0.6546234",
"0.6541225",
"0.6532168",
"0.6531234",
"0.6516813",
"0.65039927",
"0.6458592",
"0.6456827",
"0.64535546",
"0.64482975",
"0.6420754",
"0.6404212",
"0.64026994",
"0.6384851",
"0.6374369"
]
| 0.7210921 | 1 |
Private method to process the search result data from PyPI. data result data (tuple) with hits in the first element | def __processSearchResult(self, data):
if data:
packages = self.__transformHits(data[0])
if packages:
self.infoLabel.setText(self.tr("%n package(s) found.", "",
len(packages)))
wrapper = textwrap.TextWrapper(width=80)
count = 0
total = 0
for package in packages:
if self.__canceled:
self.infoLabel.setText(
self.tr("Canceled - only {0} out of %n package(s)"
" shown", "", len(packages)).format(total))
break
itm = QTreeWidgetItem(
self.resultList, [
package['name'].strip(),
"{0:4d}".format(package['score']),
"\n".join([
wrapper.fill(line) for line in
package['summary'].strip().splitlines()
])
])
itm.setData(0, self.VersionRole, package['version'])
count += 1
total += 1
if count == 100:
count = 0
QApplication.processEvents()
else:
QApplication.restoreOverrideCursor()
E5MessageBox.warning(
self,
self.tr("Search PyPI"),
self.tr("""<p>The package search did not return"""
""" anything.</p>"""))
self.infoLabel.setText(
self.tr("""<p>The package search did not return"""
""" anything.</p>"""))
else:
QApplication.restoreOverrideCursor()
E5MessageBox.warning(
self,
self.tr("Search PyPI"),
self.tr("""<p>The package search did not return anything."""
"""</p>"""))
self.infoLabel.setText(
self.tr("""<p>The package search did not return anything."""
"""</p>"""))
header = self.resultList.header()
self.resultList.sortItems(1, Qt.DescendingOrder)
header.setStretchLastSection(False)
header.resizeSections(QHeaderView.ResizeToContents)
headerSize = 0
for col in range(header.count()):
headerSize += header.sectionSize(col)
if headerSize < header.width():
header.setStretchLastSection(True)
self.__finish() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def processSearchResult(self):",
"def getResults():",
"def handle_result(self, results: List[Dict], **info):\n pass",
"def parse_results(self, result):\n\n interesting = []\n for item in result[\"hits\"][\"hits\"]:\n source = item[\"_source\"]\n meta = source.get(\"meta\")\n\n title = \"No title found\"\n descr = None\n os_path = None\n highlight = None\n\n if meta is not None:\n title = meta.get(\"title\") or \"No title found\"\n if meta.get(\"raw\") is not None:\n descr = meta.get(\"raw\").get(\"description\")\n\n path = source.get(\"path\")\n if path is not None:\n os_path = path.get(\"real\")\n\n highlight = \" \".join(item[\"highlight\"][\"content\"][0].split())\n\n temp = {\n \"id\": item[\"_id\"],\n \"title\": title,\n \"description\": descr,\n \"path\": os_path,\n \"highlight\": highlight,\n }\n interesting.append(temp)\n self.interesting = interesting\n return interesting",
"def get_data( filepath_query, filepath_results ):\n with open( filepath_query, 'r' ) as query_file:\n query = json.load( query_file )\n \n query_text = query['query']['multi_match']['query']\n query_scores = query['nlp_scores']\n query_data = {\n 'query_text' : query_text,\n 'bias_score' : query_scores['bias_score'],\n 'vocab_richness' : query_scores['stylo_scores']['vocab_richness'],\n 'hapax_legomena' : query_scores['stylo_scores']['hepax_legomena'],\n 'wordlength' : query_scores['stylo_scores']['readability_measures']['average_wordlength'],\n 'sentlength' : query_scores['stylo_scores']['readability_measures']['average_sentlength'],\n 'spelling_errors' : query_scores['stylo_scores']['spelling_errors'],\n 'topics' : query_scores['topics']\n }\n\n with open( filepath_results ) as results_file:\n results = json.load( results_file )\n \n results_data = []\n for doc in results:\n argID = doc['_source']['argsMeID']\n premise = doc['_source']['premise']\n average_wordlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_wordlength']\n average_sentlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_sentlength']\n bias_score = doc['nlp_scores']['bias_score']\n bias_distance = doc['bias_distance']\n stylo_distance = doc['stylo_distance']\n topic_match_count = doc['topic_match_count']\n old_score = doc['old_score']\n new_score = doc['new_score']\n scoring_distance = doc['scoring_distance']\n old_rank = doc['old_rank']\n new_rank = doc['new_rank']\n \n doc_data = {\n 'argID' : argID,\n 'premise' : premise,\n 'wordlength' : average_wordlength,\n 'sentlength' : average_sentlength,\n 'bias_score' : bias_score,\n 'bias_distance' : bias_distance,\n 'stylo_distance' : stylo_distance,\n 'topic_match_count' : topic_match_count,\n 'old_score' : old_score,\n 'new_score' : new_score,\n 'scoring_distance' : scoring_distance,\n 'old_rank' : old_rank,\n 'new_rank' : new_rank\n }\n results_data.append( doc_data )\n\n data_tuple = ( query_data, results_data )\n return data_tuple",
"def test_parse_search_result(self):\n datafile = pathlib.Path(__file__).parent.joinpath(\"../data/ol_search.json\")\n search_data = json.loads(datafile.read_bytes())\n result = list(self.connector.parse_search_data(search_data, 0))[0]\n\n self.assertIsInstance(result, SearchResult)\n self.assertEqual(result.title, \"This Is How You Lose the Time War\")\n self.assertEqual(result.key, \"https://openlibrary.org/works/OL20639540W\")\n self.assertEqual(result.author, \"Amal El-Mohtar, Max Gladstone\")\n self.assertEqual(result.year, 2019)\n self.assertEqual(result.connector, self.connector)",
"def get_data(self):\n def _clean_search_hit(search_hit):\n \"\"\"\n Takes in a search result hit as a BeautifySoup tag and pulls out all the data to match the desired schema.\n\n :param search_hit:\n :return Dictionary: A dictionary with the cleaned data\n \"\"\"\n\n hit_name = search_hit.find(class_='hit-name')\n hit_url = hit_name.get('href')\n hit_id = hit_url.split('/')[-1]\n name = hit_name.get_text().split(',')[0].title().split()\n\n current_city = search_hit.find(class_='hit-location').get_text().upper()\n\n # Find all Addresses for search result.\n try:\n address = search_hit.find(class_='hit-pastAddresses').find_all(class_='hit-values')\n address = list({a.text.upper().replace('.', '') for a in address})\n except AttributeError:\n address = list()\n\n # find the address that is most likely the current main address.\n try:\n address.insert(0, address.pop(address.index(current_city)))\n except ValueError:\n address.insert(0, current_city)\n\n address = [\n {\n '@type': 'PostalAddress',\n 'addressLocality': locality.title(),\n 'addressRegion': region\n } for locality, region in [a.split(', ') for a in address]]\n\n work_location = {'@type': 'Place'}\n try:\n work_location['name'] = search_hit\\\n .find(class_='hit-work')\\\n .find(class_='hit-values')\\\n .get_text()\\\n .title()\n except AttributeError:\n work_location['name'] = ''\n\n alumni_of = {'@type': 'EducationalOrganization'}\n try:\n alumni_of['name'] = search_hit\\\n .find(class_='hit-high-school')\\\n .find(class_='hit-values')\\\n .get_text().title()\n except AttributeError:\n pass\n\n return {\n '@id': hit_id,\n '@type': 'Person',\n 'name': ' '.join(name),\n 'givenName': name[0],\n 'middleName': ' '.join(name[1:-1]),\n 'familyName': name[-1],\n 'url': hit_url,\n 'address': address,\n 'workLocation': work_location,\n 'alumniOf': alumni_of,\n }\n\n def _refine_search(search_str, options):\n \"\"\"\n Takes a list of WebElements and a search string, looks for string in the text of each WebElement, and\n press the option if found. Returns Boolean for found status\n\n :param search_str: str of the desired option.\n :param options: list of WebElements from Beautify Soup that represents all of the available options.\n :return:\n \"\"\"\n search_str = search_str.upper()\n logging.info(f'Looking for \\'{search_str}\\'')\n try:\n for option in options:\n option_text = option.text.upper()\n logging.info(f'Option Checked: {option_text}')\n if search_str in option_text:\n option.click()\n time.sleep(2)\n logging.info(f'Option Selected: {option_text}')\n return True\n else:\n return False\n except AttributeError:\n return True\n except StaleElementReferenceException as e:\n ChromeCrash(e)\n\n with self.driver(executable_path=self.DRIVER_DIR) as driver:\n driver.get(self.url)\n\n \"\"\"\n The CSS for the page doesn't show the State nor the City selector options if the page is too narrow,\n so we need to make sure the browser is open wide enough for the CSS to make those options visible. \n \"\"\"\n driver.fullscreen_window()\n\n # Refine the search by State\n address_region = self.person.get('addressRegion', '')\n address_region = STATES.get(address_region.upper(), address_region.upper())\n region_options = driver\\\n .find_element_by_class_name(\"STATE\")\\\n .find_elements_by_class_name(\"refinementList-text\")\n\n if not _refine_search(address_region, region_options):\n return False\n\n # Narrow the search by pressing a City option\n address_locality = self.person.get('addressLocality').title()\n locality_options = driver\\\n .find_element_by_class_name(\"CITY\")\\\n .find_elements_by_class_name(\"refinementList-text\")\n\n if not _refine_search(address_locality, locality_options):\n return False\n\n \"\"\"\n The Page Loads dynamically, so we need to scroll down the page to show all the search results. It needs to\n be done in steps with a pause between movements to allow for loading. \n Here it will first get the current location on the page, attempt to move down the page, and then check to\n see if the location changed.\n \"\"\"\n\n if self.auto_scroll and len(driver.find_elements_by_class_name(\"ais-InfiniteHits-item\")) > 15:\n current_height, new_height = 0, driver.execute_script(\"return document.body.scrollHeight\")\n\n while new_height != current_height:\n # Scroll down to the bottom of the page\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n current_height, new_height = new_height, driver.execute_script(\"return document.body.scrollHeight\")\n\n page_source = driver.page_source\n page_soup = bs(page_source, 'html.parser')\n search_results = list(page_soup.find_all(class_='ais-InfiniteHits-item'))\n for i, search_result in enumerate(search_results):\n search_results[i] = _clean_search_hit(search_result)\n\n self.data_from_website = pd.DataFrame(search_results)\n self.data_from_website.set_index('@id', inplace=True)\n return True",
"def parse_search_results(query, offset, data):\n # type: (str, int, dict) -> Union[None, bool]\n if data is None or not data[\"results\"]:\n return False\n paginate(query, len(data[\"results\"]), int(data[\"ttlResults\"]), offset)\n for child in data[\"results\"]:\n add_menu_item(\n play_film,\n iwm.clean_title(child[\"Title\"]),\n args={\"href\": child[\"url\"]},\n info={\n \"year\": child[\"fieldClasses\"][\"date\"],\n \"plot\": child[\"Summary\"],\n \"duration\": iwm.time_to_seconds(child[\"mediaObjectDuration\"])\n },\n art=ku.art(child[\"refImageUrl\"]),\n directory=False)\n xbmcplugin.setContent(plugin.handle, \"videos\")\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)\n xbmcplugin.endOfDirectory(plugin.handle)",
"def extract(self, data):",
"def dataExtract(queryResults):\n days = ['MondayCollect',\n 'TuesdayCollect',\n 'WednesdayCollect',\n 'ThursdayCollect',\n 'FridayCollect',\n 'SaturdayCollect',\n 'SundayCollect']\n\n #counting the instances of bin collections\n parkCount = 0\n roadingCount = 0\n otherCount = 0\n\n #output totals of bin collections\n parkOutput = []\n roadingOutput = []\n otherOutput = []\n \n #iterate over each day\n for day in days:\n \n #iterate over the number of bins\n for i in range(len(queryResults)):\n \n #check if the bin was collected on the day...\n if str(queryResults[i]['attributes'][day]).strip().lower() == 'yes':\n \n #unknown formatting issue with the data, these lines fix it\n strResult = str(queryResults[i]['attributes']['Owner'])\n strResultForm = strResult.lower().strip()\n \n #update the counts if True\n if strResultForm == 'roading':\n roadingCount += 1\n elif strResultForm == 'parks':\n parkCount += 1\n elif strResultForm == 'private':\n otherCount += 1\n else:\n otherCount +=1\n\n #print \"Day: {} \\nparkCount: {} \\nroadingCount: {} \\notherCount: {} \\n\\n\".format(day,parkCount,roadingCount,otherCount)\n \n parkOutput.append(parkCount)\n roadingOutput.append(roadingCount)\n otherOutput.append(otherCount)\n \n parkCount = 0\n roadingCount =0\n otherCount =0\n \n return parkOutput,roadingOutput,otherOutput",
"def __find_correlations(self, results):\n\n for result in results[:self.__result_limit]:\n\n # pub without venue\n if len(result['ven']) == 0:\n result['alternative'] = []\n\n with self.vix.searcher(weighting=Frequency) as vs:\n vq_parse = QueryParser('key', self.vix.schema).parse(result['pub']['crossref'])\n tresult = vs.search(vq_parse, limit=None, )\n if len(tresult) != 0:\n result['ven'] = {}\n result['added'] = 1\n for attr in tresult[0].items():\n result['ven'][attr[0]] = attr[1]\n\n self.__output.append(result)\n\n # venue without pub or venue with a list of pubs\n elif len(result['pub']) == 0 or (\n isinstance(result['pub'], list) and len(result['pub']) > 1):\n result['alternative'] = []\n\n with self.pix.searcher(weighting=Frequency) as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(result['ven']['key'])\n tresult = ps.search(pq_parse, limit=None, )\n\n if len(tresult):\n plist = []\n tmp = dict()\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in result['pub']]:\n plist.append(attr[1])\n break\n\n result['alternative'] = plist\n self.__output.append(result)\n\n # mixed case\n elif len(self.__output) == 0 or not result['ven']['key'] in [x['key'] for x in self.__output]:\n lis = [x for x in results if len(x['ven']) and x['ven']['key'] == result['ven']['key']]\n tmp = {}\n if len(lis) <= 1:\n tmp = {'key': result['pub']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n else:\n tmp = {'key': result['ven']['key'],\n 'score': result['score'],\n 'pub': [x['pub'] for x in lis],\n 'ven': result['ven'],\n 'alternative': list()}\n plist = []\n with self.pix.searcher() as ps:\n pq_parse = QueryParser('crossref', self.pix.schema).parse(tmp['key'])\n tresult = ps.search(pq_parse, limit=None, )\n if len(tresult):\n for el in tresult:\n for attr in el.items():\n if attr[0] == 'title' and attr[1] not in [x['title'] for x in tmp['pub']]:\n plist.append(attr[1])\n break\n\n tmp['alternative'] = plist\n self.__output.append(tmp)",
"def process(self, results):\n raise NotImplementedError",
"def parse_data( self ):\n self.parsed_data = dict( self.results )",
"def get_data(data, exit_fail=True):\n try:\n if data['track_name'] and data['artist_name']:\n for song in itunespy.search_track(data['track_name']):\n if data['artist_name'].lower() == song.artist_name.lower():\n if 'collection_name' not in data.keys():\n return song\n elif data['collection_name'].lower() in song.collection_name.lower():\n return song\n elif data['track_name']:\n return itunespy.search_track(data['track_name'])\n elif data['artist_name']:\n songs = []\n artists = itunespy.search_artist(data['artist_name'])[0]\n for album in artists.get_albums():\n for song in album.get_tracks():\n songs.append(song)\n return songs\n # Attempt to find a close match if no exact matches\n song = itunespy.search(' '.join([data['track_name'], data['artist_name'], data['collection_name']]))[0]\n if song:\n return song\n except LookupError as err:\n if exit_fail:\n logging.warning(Fore.RED+'✘ '+Style.RESET_ALL+str(err))\n sys.exit()",
"def collect_pypi_data():\n\n rclient = xmlrpc.client.ServerProxy('http://pypi.python.org/pypi')\n python = {'Programming Language :: Python': rclient.browse(['Programming Language :: Python'])}\n python_two = {}\n python_three = {}\n\n for classifier in [\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.3',\n 'Programming Language :: Python :: 2.4',\n 'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2 :: Only']:\n python_two[classifier] = rclient.browse([classifier])\n\n for classifier in [\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.0',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4']:\n python_three[classifier] = rclient.browse([classifier])\n\n return {datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):\n {'python': python,\n 'python_two': python_two,\n 'python_three': python_three}}",
"def search(self):\n premium = self.config.get('premium', False)\n\n self.params[self.opts['keyword']['query_key']] = self.config[self.opts['keyword']['config_key']] # keyword\n # Selection params\n self.append_param('tag_mode', 'selection')\n if premium:\n self.append_param('order_premium', 'selection')\n else:\n self.append_param('order_not_premium', 'selection')\n\n self.append_param('type', 'selection')\n self.append_param('tool', 'selection')\n self.append_param('ratio', 'selection')\n self.append_param('mode', 'selection')\n\n # Number params\n self.append_param('min_width', 'number')\n self.append_param('max_width', 'number')\n self.append_param('min_height', 'number')\n self.append_param('max_height', 'number')\n if premium:\n self.append_param('min_bookmark', 'number')\n self.append_param('max_bookmark', 'number')\n else:\n self.set_bookmark_filter()\n\n # Date params\n self.append_param('start_time', 'date')\n self.append_param('end_time', 'date')\n\n # multi work filter\n self.filters['multi'] = self.config.get('download_multi', False)\n\n for i in range(self.config['start_page'], self.config['end_page'] + 1):\n self.params['p'] = i\n self.headers['Referer'] = 'https://www.pixiv.net/'\n url ='https://www.pixiv.net/search.php'\n html = self.session.get(url, headers = self.headers, params = self.params, timeout = 10, proxies = self.proxies)\n\n soup = BeautifulSoup(html.text, 'lxml')\n data_items = json.loads(soup.find('input', id = 'js-mount-point-search-result-list')['data-items'])\n\n return self.extract_work_info(data_items)",
"def _handle_search_results(self, response: TextResponse) -> ScrapyYelpItem:\n\n # get yConfig\n pattern = re.compile(r\"\"\"\\n\\s+yConfig\\s+=\\s+\"\"\", re.MULTILINE | re.DOTALL)\n soup = BeautifulSoup(response.text, \"html.parser\")\n script = soup.find(\"script\", text=pattern)\n myjson = script.get_text()\n # remove start pattern (js assignment)\n s = re.sub(pattern, '', myjson)\n # remove html (parser problems)\n s = re.sub('<[^<]+?>', '', s)\n # remove last semi colon (end-of-data)\n s = s[0:s.rfind(';')]\n json_object = json.loads(s,strict=False)\n\n keys = [x for x in json_object[\"js_display\"][\"hovercard_data\"] if x.isnumeric()]\n # first part is the hovercard data - which contains most of the aggregate biz informative\n # such as total_reviews and summary_score\n df_hovercard_data = pd.DataFrame()\n for x in keys:\n tmpdf = json_normalize(json_object[\"js_display\"][\"hovercard_data\"][x])\n df_hovercard_data = df_hovercard_data.append(tmpdf,ignore_index=True)\n\n df_hovercard_data = df_hovercard_data.set_index(\"result_number\")\n df_hovercard_data.index = df_hovercard_data.index.astype(int)\n # second part is the resourceid which might be useful later on, not sure if this is used at all, but\n # it serves as a good example of how to join to other \"parts\" of the nested json structure and flatten it\n df_markers = json_normalize(json_object[\"js_display\"][\"map_state\"][\"markers\"])\n df_markers = df_markers[df_markers['resourceType'] == 'business'].loc[:, [\"url\",\"resourceId\",\"hovercardId\",\"label\",\"location.latitude\",\"location.longitude\",]]\n df_markers = df_markers.set_index('label')\n df_markers.index = df_markers.index.astype(int)\n\n # combine data into a single dataframe which will eventually be written out by our pipeline\n df = df_hovercard_data.join(df_markers)\n\n # at this point we want to also scrape the indvidual biz listing for the menu, syntax is verbose here\n\n\n ## deubg write to file\n #json_formatted = json.dumps(json_object, indent=2)\n # print(json_formatted)\n # with open(\"files/\"+'blah.json', 'wb') as file:\n # file.write(str.encode(json_formatted))\n\n \"\"\"\n\n Here is a smample of what the yConfig object looks like:\n\n json_object.keys() ====>\n ['cookies', 'gaConfig', 'adjustAndroidPaidTrafficUrl', 'webviewFlow', 'enabledSitRepChannels',\n isWebviewRequest', 'js_display', 'isLoggedIn', 'uaInfo', 'isSitRepEnabled', 'comscore', 'isBugsnagEnabled',\n 'support', 'deprecatedEncryptedYUV', 'vendorExternalURLs', 'smartBannerFallbackActive', 'version',\n 'recaptchaV3PublicKey', 'googlePlacesUrl', 'redesignActive', 'currentBaseLang', 'isClientErrorsEnabled',\n 'uniqueRequestId', 'yelpcodeTemplateVersion', 'appInstallDialogEnabled', 'smartBannerPersistent',\n 'imageUrls', 'siteUrl', 'referrer', 'webviewInfo', 'cookieDomain', 'recaptchaPublicKey',\n 'send_user_agent_to_ga', 'pGifUrl']\n\n\n json_object[\"js_display\"].keys() ===>\n ['polyglot_translations', 'raq_links', 'locale', 'hovercard_data', 'is_first_ad_hovercard_opened',\n 'zoom', 'centerLng', 'map_state', 'advertising_business_id_list', 'centerLat', 'pager']\n\n json_object[\"js_display\"][\"hovercard_data\"] ==>\n '1': {'resource_id': None,\n 'result_number': 1,\n 'biz': {'alias': 'lou-malnatis-pizzeria-chicago',\n 'review_count': 5998,\n 'name': \"Lou Malnati's Pizzeria\",\n 'rating': 4.07785928642881,\n 'url': 'https://m.yelp.com/biz/lou-malnatis-pizzeria-chicago',\n 'price': '$$',\n 'categories': 'Pizza, Italian, Sandwiches',\n 'distance': '2.5 mi'},\n 'lat': 41.890357,\n 'lng': -87.633704,\n 'type': 'natural'},\n '2': {'resource_id': None,\n ....\n\n\n json_object[\"js_display\"][\"map_state\"][\"markers\"] ===>\n [{'resourceType': 'business',\n 'url': '/biz/lou-malnatis-pizzeria-chicago',\n 'resourceId': '8vFJH_paXsMocmEO_KAa3w',\n 'label': '1',\n 'shouldOpenInNewTab': False,\n 'location': {'latitude': 41.890357, 'longitude': -87.633704},\n 'key': 1,\n 'hovercardId': 'Q6nXAEw3UuAVFSztE4lPnA',\n 'icon': {'name': 'business',\n 'anchorOffset': [12, 32],\n 'activeOrigin': [24, 0],\n 'scaledSize': [48, 320],\n 'regularUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'size': [24, 32],\n 'activeUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'regularOrigin': [0, 0]}},\n {'resourceType': 'business',\n 'url': '/biz/pequods-pizzeria-chicago',\n 'resourceId': 'DXwSYgiXqIVNdO9dazel6w',\n 'label': '2',\n 'shouldOpenInNew\n ...\n\n \"\"\"\n #print(json_object[\"js_display\"][\"hovercard_data\"])\n\n\n\n return df",
"def search(self, query, offset):\n \n def parse_flickr_json(site, query, results):\n \"\"\"Create a OpenSearch Response from Flickr results.\n \n Flickr's search API returns results in JSON format. This function simply loads the JSON into memory and creates an equivalent representation that is OpenSearch compliant.\n \n Parameters:\n \n * site (str): search engine name\n * query (str): query search terms (n.b. not a OpenSearch Query object)\n * results (dict): results from service\n \n Returns:\n \n * puppy.model.OpenSearch.Response\n \n \"\"\"\n response = Response()\n response.version = 'json'\n response.feed.setdefault('title', \"{0}: {1}\".format(site, query))\n response.feed.setdefault('link', results['link'])\n response.feed.setdefault('description', \"Search results for '{0}' at {1}\".format(query, site))\n response.namespaces.setdefault(\"opensearch\", \"http://a9.com/-/spec/opensearch/1.1/\")\n try:\n response.feed.setdefault(\"opensearch_totalresults\", int(results['total']))\n response.feed.setdefault(\"opensearch_itemsperpage\", int(results['perpage']))\n response.feed.setdefault(\"opensearch_startindex\", int(results['page']))\n except KeyError:\n response.feed.setdefault(\"opensearch_totalresults\", 0)\n response.feed.setdefault(\"opensearch_itemsperpage\", 0)\n response.feed.setdefault(\"opensearch_startindex\", 0)\n \n if 'photo' in results:\n for result in results['photo']:\n # Links need to be created from several fields - see the Flickr API for a detailed explanation\n \n try:\n resultLink = \"http://www.flickr.com/photos/{0}/{1}\".format(result['owner'], result['id'])\n resultThumbnail = \"http://farm{0}.static.flickr.com/{1}/{2}_{3}_t.jpg\".format(result['farm'], result['server'], result['id'], result['secret'])\n resultSummary = \"Photo result for '{0}' from {1}\".format(query, site)\n response.entries.append({'title': result['title'], 'link': resultLink, 'summary': resultSummary, 'thumbnail': resultThumbnail})\n except Exception, e:\n print \"Skipping a result due to: {0} \\nWhen parsing a result from: {1}\\n\".format(e, results['link'])\n continue\n \n return response\n\n\t# Try and get the API key from config, if it's not there raise an API Key error - the application will have to deal with this\n try:\n appId = self.service.config[\"flickr_api_key\"]\n except KeyError:\n raise ApiKeyError(\"Flickr\", \"flickr_api_key\")\n\n # Now that an API key has been supplied try to get results from the search engine itself\n try: \n pos = self._origin() + offset\n appId = self.service.config[\"flickr_api_key\"]\n url = \"http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key={0}&text={1}&sort={2}&safe_search={3}&media={4}&per_page={5}&page={6}&format=json&nojsoncallback=1\".format(appId, urllib2.quote(query.search_terms), self.sortBy, self.safeSearch, self.mediaType, self.resultsPerPage, pos)\n \n if (self.bbox):\n url += \"&bbox={0}\".format(self.bbox) \n data = urllib2.urlopen(url).read()\n results = json.loads(data)\n results['photos'].setdefault(u'link', url)\n return parse_flickr_json('Flickr', query.search_terms, results['photos'])\n\n # urllib2 - this catches http errors due to the service being down, lack of a proxy etc\n except urllib2.URLError, e:\n raise SearchEngineError(\"Flickr\", e, errorType = 'urllib2', url = url)\n\n # Check for a type error for offset or resultsPerPage\n except TypeError, e:\n note = \"Please ensure that 'offset' and 'resultsPerPage' are integers if used\"\n if isinstance(offset, int) == False:\n raise SearchEngineError(\"Flickr\", e, note = note, offsetType = type(offset))\n\n if isinstance(self.resultsPerPage, int) == False:\n raise SearchEngineError(\"Flickr\", e, note = note, resultsPerPageType = type(self.resultsPerPage))\n\n raise SearchEngineError(\"Flickr\", e, note = note)\n\t \n # Catch Attribute error which deals with unexpected none type for the objects the wrapper uses and other associated issues\n except AttributeError, e:\n raise SearchEngineError(\"Flickr\", e, url = url)",
"def extract_work_info(self, data_items):\n result = []\n count = 0\n for data_item in data_items:\n keep = True\n if self.filters.get('min') != None and data_item['bookmarkCount'] < self.filters['min']:\n keep = False\n if self.filters.get('max') != None and data_item['bookmarkCount'] > self.filters['max']:\n keep = False\n if self.filters['multi'] == False and data_item['pageCount'] > 1:\n keep = False\n if keep:\n url = data_item['url']\n begin = url.find('img/')\n end = url.find('_master')\n url_info = url[begin + 4:end - 3] # no real source here since there might be multi images\n\n result.append({\n 'id': data_item['illustId'],\n 'name': data_item['illustTitle'], # filename\n 'username': data_item['userName'], # filename\n 'url_info': url_info, # for fetching real source\n 'count': data_item['pageCount'], # for fetching multiple images\n 'type': data_item['illustType'] # for determining picture/ugoira\n })\n count += data_item['pageCount']\n return result, count",
"def _setResultInfo(self, data: list):\n repResultInfo = [['機能','端子','状態','判定基準','結果','判定']]\n bufResultInfo = [''] * 6\n _data = [[d[1].split('_')[0],d[1].split('->')[0].split('_')[1],d[1].split('->')[1],d[2],d[3],d[4]] for d in data]\n resultInfo = [[s.replace(',','\\n') for s in ss] for ss in _data]\n for i, item in enumerate(resultInfo):\n if item[2][0:5] == 'LEVEL':\n bufResultInfo = item\n repResultInfo.append(bufResultInfo)\n bufResultInfo = [''] * 6\n elif item[0]=='SQ' or item[0]=='SAT' or item[0]=='LOAD':\n bufResultInfo[0] = item[0]\n bufResultInfo[1] = item[1]\n bufResultInfo[2] = item[2]\n elif item[2] == 'DTCread':\n bufResultInfo[3] = item[3]\n bufResultInfo[4] = item[4]\n bufResultInfo[5] = item[5]\n if bufResultInfo[0] != '':\n repResultInfo.append(bufResultInfo)\n bufResultInfo = [''] * 6\n self._result_info = repResultInfo",
"def parse_search_results(fields, results):\n my_results = []\n for result in results:\n my_results.append(SearchAnimeResult(fields, result))\n return my_results",
"def _parse_result(self, result, *, verbose=False, **kwargs):\n return get_fermilat_datafile(result)",
"def __getPackageDownloadsData(self, packageVersion, data):\n if data and data[0]:\n self.__detailsData = data[0]\n itm = self.resultList.selectedItems()[0]\n packageName = itm.text(0)\n self.__client.call(\n \"release_urls\",\n (packageName, packageVersion),\n self.__displayPackageDetails,\n self.__detailsError\n )\n else:\n self.__finish()\n E5MessageBox.warning(\n self,\n self.tr(\"Search PyPI\"),\n self.tr(\"\"\"<p>No package details info available.</p>\"\"\"))",
"def process(dataset, result):\n global AR_TYPE\n if AR_TYPE.startswith('fixed'):\n ar_authors = loader.get_fixed_authors()\n else:\n ar_authors = dataset.authors[0:40]\n\n tups = []\n for unknown in ar_authors:\n tups.append((unknown, dataset))\n\n pool = Pool(processes=NUMBER_OF_CORES)\n it = pool.imap(process_distance_unknown, tups)\n pool.close()\n pool.join()\n\n for unknown in ar_authors:\n distance_results = it.next()\n for distance_result in distance_results:\n [ar_size, position, distance] = distance_result\n result.add(ar_size, unknown, position, distance)\n return",
"def search_data(\n free_text_search, query, result, display, offset=None, length=None,\n download=None, file=None, fields=None, sortfields=None\n):\n url = get_search_url(free_text_search)\n url += \"query=%s\" % (query)\n\n check_result(result)\n url += \"&result=%s\" % (result)\n\n check_display_option(display)\n url += \"&display=%s\" % (display)\n\n if length is not None:\n check_length(length)\n url += \"&length=%s\" % (length)\n\n if offset is not None:\n result_nb = get_search_result_number(free_text_search, query, result)\n if offset > result_nb:\n err_str = \"The offset value must be lower than the possible number\"\n err_str += \" of results for the query\"\n raise ValueError(err_str)\n url += \"&offset=%s\" % (offset)\n\n if display == \"report\":\n if fields is None:\n fields = \",\".join(get_returnable_fields(result))\n else:\n check_returnable_fields(fields.split(\",\"), result)\n url += \"&fields=%s\" % (fields)\n if sortfields is not None:\n check_sortable_fields(sortfields, result)\n url += \"&sortfields=%s\" % (sortfields)\n\n if download is not None or file is not None:\n check_download_file_options(download, file)\n url += \"&download=%s\" % (download)\n return request_url(url, display, file)",
"def viewdata(data):\n\n print('_' * 50)\n print('Number of Results: ' + str(data[0]['numResults']))\n print('\\nSearchURL: ' + data[0]['searchURL'])\n print('_' * 50)\n\n i = 1\n for m in data[1]:\n print(str(i) + '. ')\n for n in m:\n print(str(n) + ': ' + str(m[n]))\n i += 1\n print('\\n')",
"def process_results(self, response, results):\n return results",
"def process_results(self, response, results):\n return results",
"async def _fetch_data(self, ctx: commands.Context, query: str):\n params = {\n \"query\": query,\n \"maxResults\": 10,\n \"sort\": \"FavoritedTimes\",\n \"preferAccurateMatches\": \"true\",\n \"nameMatchMode\": \"Words\",\n \"fields\": \"Artists,Lyrics,Names,ThumbUrl\",\n }\n headers = {\n \"User-Agent\": f\"Red-DiscordBot/{red_version} Fixator10-cogs/VocaDB/{self.__version__}\"\n }\n try:\n async with self.session.get(BASE_API_URL, params=params, headers=headers) as resp:\n if resp.status != 200:\n return f\"https://http.cat/{resp.status}\"\n result = await resp.json()\n except asyncio.TimeoutError:\n return \"Request timed out\"\n\n all_items = result.get(\"items\")\n if not all_items:\n return None\n\n filtered_items = [x for x in all_items if x.get(\"lyrics\")]\n if not filtered_items:\n return None\n\n if len(filtered_items) == 1:\n return filtered_items[0]\n\n items = \"\\n\".join(\n f\"**`[{i}]`** {x.get('defaultName')} - {x.get('artistString')}\"\n f\" (published: {self._parse_date(x.get('publishDate'))})\"\n for i, x in enumerate(filtered_items, start=1)\n )\n\n prompt = await ctx.send(\n f\"Found below **{len(filtered_items)}** result(s). Pick one in 60 seconds:\\n\\n{items}\"\n )\n\n def check(msg: discord.Message) -> bool:\n return bool(\n msg.content.isdigit()\n and int(msg.content) in range(len(filtered_items) + 1)\n and msg.author.id == ctx.author.id\n and msg.channel.id == ctx.channel.id\n )\n\n try:\n choice = await self.bot.wait_for(\"message\", timeout=60.0, check=check)\n except asyncio.TimeoutError:\n choice = None\n\n if choice is None or choice.content.strip() == \"0\":\n with contextlib.suppress(discord.NotFound, discord.HTTPException):\n await prompt.edit(content=\"Cancelled.\", delete_after=5.0)\n return None\n\n choice = int(choice.content.strip()) - 1\n with contextlib.suppress(discord.NotFound, discord.HTTPException):\n await prompt.delete()\n return filtered_items[choice]",
"def __extract_info(self) -> Results:\n results: Results = []\n\n response = request(self.home_url)\n\n html = bs(response, \"lxml\")\n table = html.find(\"table\")\n for row in table.find_all(\"tr\")[1:]:\n col1, col2, col3 = row.find_all(\"td\")\n filename1, perc1 = col1.text.strip().split()\n filename2, perc2 = col2.text.strip().split()\n\n with ThreadPoolExecutor() as executor:\n future = executor.submit(self.__get_line_numbers, col1.a.get(\"href\"))\n lines = future.result()\n\n result_dict = Result(\n file1=filename1,\n file2=filename2,\n percentage_file1=perc_str_to_int(perc1),\n percentage_file2=perc_str_to_int(perc2),\n no_of_lines_matched=int(col3.text.strip()),\n lines_matched=lines,\n )\n results.append(result_dict)\n return results"
]
| [
"0.68465793",
"0.60578465",
"0.5932964",
"0.59307265",
"0.59071136",
"0.58819073",
"0.574606",
"0.5692038",
"0.56663704",
"0.56574595",
"0.56306523",
"0.55775577",
"0.5571338",
"0.55618376",
"0.55480283",
"0.5539446",
"0.5500595",
"0.5470933",
"0.54691875",
"0.54479104",
"0.5444946",
"0.5442759",
"0.5436667",
"0.5423335",
"0.5420356",
"0.5418994",
"0.5416448",
"0.5416448",
"0.5410757",
"0.53944963"
]
| 0.7630216 | 0 |
Private method handling a search error. errorCode code of the error (integer) errorString error message (string) | def __searchError(self, errorCode, errorString):
self.__finish()
E5MessageBox.warning(
self,
self.tr("Search PyPI"),
self.tr("""<p>The package search failed.</p><p>Reason: {0}</p>""")
.format(errorString))
self.infoLabel.setText(self.tr("Error: {0}").format(errorString)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_invalid_parameters(error):\n current_app.logger.info(str(error))\n return render_template(\"search.html\", query=error.query, error=error), 400",
"def __init__(self, error_search=\"error\"):\n self.error_search = error_search",
"def not_found(error):\n pass",
"def search_bad_query(error):\n current_app.logger.debug(str(error))\n return render_template(\"search.html\", query=error.query, error=error)",
"def search_err(error):\n\n url = \"https://api.stackexchange.com/\" + \"/2.2/search?page=2&order=desc&sort=activity&tagged=python&intitle={}&site=stackoverflow\".format(error)\n \n resp = requests.get(url)\n return resp.json()",
"def error(msg=\"Invalid query\", code=400):\n\tjson = {'error': msg}\n\t#return jsonify(json), code\n\tabort(make_response(jsonify(json), code))",
"def process_error(self, id, code, error):\n raise NotImplementedError('process_error not implemented in BaseService')",
"def handle_upnp_exception(self, err_code: int, err_msg: str):\n error_msg = QErrorMessage(self.__app)\n error_msg.showMessage(\"Error {}: {}\".format(err_code, err_msg))",
"def _process_error(self, result):\n self.error = result\n if result['errorCode'] == 901:\n raise Exceptions.APIKeyInvalid\n elif result['errorCode'] == 902:\n raise Exceptions.APISecretInvalid\n elif result['errorCode'] == 903:\n raise Exceptions.InvalidRequestToken\n elif result['errorCode'] == 904:\n raise Exceptions.RequestTokenExpired\n elif result['errorCode'] == 905:\n raise Exceptions.InvalidAccessToken\n elif result['errorCode'] == 906:\n raise Exceptions.TokenExpired(self.access.expire)\n elif result['errorCode'] == 907:\n raise Exceptions.ParameterMissing\n elif result['errorCode'] == 908:\n raise Exceptions.ParameterNotFormatted\n elif result['errorCode'] == 909:\n raise Exceptions.FeatureNotSupported\n elif result['errorCode'] == 910:\n raise Exceptions.EndPointNotSupported\n else:\n raise Exceptions.UnknownJsonError(result)",
"def handle_error(msg):\r\n query.error_message = msg\r\n query.status = ADMIN_USER_QUERY_STATUSES.FAILED.value\r\n db.session.commit()\r\n raise Exception(msg)",
"def errorReceived(results):\n self.client.transport.loseConnection()\n self.server.transport.loseConnection()\n\n # Check what the server logs\n errors = self.flushLoggedErrors(imap4.IllegalQueryError)\n self.assertEqual(len(errors), 1)\n\n # Verify exception given to client has the correct message\n self.assertEqual(\n str(b\"SEARCH failed: FOO is not a valid search criteria\"),\n str(results))",
"def errorReceived(results):\n self.client.transport.loseConnection()\n self.server.transport.loseConnection()\n\n # Check what the server logs\n errors = self.flushLoggedErrors(imap4.IllegalQueryError)\n self.assertEqual(len(errors), 1)\n\n # Verify exception given to client has the correct message\n self.assertEqual(\n str(b\"SEARCH failed: Invalid search command FOO\"),\n str(results),\n )",
"def on_error(self, status_code, data):\n\t\tprint(\"error_code: \",status_code)",
"def error(self, error):\n pass",
"def error_not_found(error):\n return 'No page here, dood. 404!', 404",
"def error_code(self, obj, statusCode):\n pass",
"def error_404(error):\n return 'Data Service Error'",
"def test_search_not_found(self):\n s = Searcher( self.logger )\n ( search_key, search_value ) = ( 'ISBN', self.isbn_not_found )\n result_dct = s.search(\n self.patron_barcode, search_key, search_value, self.api_url_root, self.api_key, self.partnership_id, self.university_code )\n self.assertEqual(\n {\"Problem\":{\"ErrorCode\":\"PUBFI002\",\"ErrorMessage\":\"No result\"}}, result_dct )",
"def error(self, code, msg):\r\n self.status = code\r\n self.status_message = str(msg)",
"def getErrorByCode(SID, errorid, langId):\n return call(\"getErrorByCode\", SID, errorid, langId)",
"def _handle_error(self, err: ctypes.c_char_p, method: str) -> Exception:\n if err:\n string = ctypes.string_at(err).decode(\"utf-8\")\n self._free_error(err)\n return RuntimeError(string)\n else:\n return RuntimeError(f\"Unknown error in {method}. \")",
"def error(self, message_code):\n result = Result(self.msg.sms.from_number, message_code)\n self.respond(result.message, fields={'message_code': result.message_code})",
"def test_get_errorCode(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, ERROR_CODE_IDX, ERROR_CODE_SUB)\n param_obj = self.__dict__[servo_type]._get_errorCode()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in errorCode...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue",
"def errorcode(self):\n return self._errorcode",
"def error_code(self) -> str:\n return self.__error_code",
"def _check_errors(self, json_loaded):\n\n content = json_loaded\n try:\n m = content[u'error'][u'message']\n c = content[u'error'][u'code']\n out= \"API Error code: {}\\nError message: {}\".format(c, m)\n raise InvalidQueryException(self.name, out)\n except KeyError:\n pass",
"def _get_error_code(self, data) -> int:\n return int(self._error_code)",
"def errormessage(self, msg) :\n\t\tif msg != self.__olderror :\n\t\t\tself.__stderr.write(\"%s\\n\" % msg)\n\t\t\tself.htmlmessage(msg)\n\t\tself.__olderror = msg[:]\n\t\treturn -1",
"def error(self, message, code='UnknownError', error_code=None, http_status=400):\n\n # Some backwards compatibility\n if error_code is not None and code == 'UnknownError':\n code = error_code\n\n self._add_message( message, self.ERROR, code=code )\n self.n_errors += 1\n self.status = 'ERROR'\n self.http_status = http_status\n self.error_code = code\n self.message = message",
"def error(self, code, message = ''):\n self.response.set_status(404)\n raise Exception(message)"
]
| [
"0.6526044",
"0.6315798",
"0.60930365",
"0.609029",
"0.6039893",
"0.59139246",
"0.58703196",
"0.5868383",
"0.58507913",
"0.58359665",
"0.5817767",
"0.58024484",
"0.57093686",
"0.56727254",
"0.56263053",
"0.55985034",
"0.5585837",
"0.55695426",
"0.5524106",
"0.5516786",
"0.54753417",
"0.5459219",
"0.54496914",
"0.54421055",
"0.54360825",
"0.54327273",
"0.54264516",
"0.5423953",
"0.54199237",
"0.5417049"
]
| 0.754049 | 0 |
Private method to calculate some score for a search result. name name of the returned package str summary summary text for the package str score value int | def __score(self, name, summary):
score = 0
for queryTerm in self.__query:
if queryTerm.lower() in name.lower():
score += 4
if queryTerm.lower() == name.lower():
score += 4
if queryTerm.lower() in summary.lower():
if QRegExp(r'\b{0}\b'.format(QRegExp.escape(queryTerm)),
Qt.CaseInsensitive).indexIn(summary) != -1:
# word match gets even higher score
score += 2
else:
score += 1
return score | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)",
"def disp_score():",
"def rank_results(result_list, search_title, search_artist, uploader_list):\n #scores = []\n #search_artist = search_artist.replace(\"+\", \" \").lower()\n search_title = search_title.replace(\"+\", \" \")\n #search_terms = search_title.split() + search_artist.split()\n\n ## Give score to each result\n #for index, title in enumerate(result_list):\n # title = title.lower()\n # score = 0\n\n # # One point for each word in result title\n # for term in search_terms:\n # if term in title:\n # score += 1\n\n # # 2 points if whole title in result, 2 points for whole artist, 4 points for both\n # if search_title in title:\n # score += 2\n # if search_artist in title:\n # score += 2\n # if search_title in title and search_artist in title:\n # score += 4\n # if search_title == title and (uploader_list[index] == search_artist+\" - topic\" or uploader_list[index] == 'various artists - topic' or uploader_list[index] == search_artist or uploader_list[index] == search_artist+'\\\\xa0'):\n # score += 100\n # if 'karaoke' in title:\n # score-=1000\n\n # scores.append(score)\n\n # return scores.index(max(scores))\n for index, title in enumerate(result_list):\n title = title\n if search_title == title:\n return index\n\n return 0",
"def extract_score(results):\n total_score = 0;\n total_possible_score = 0;\n for k in results.keys():\n total_score = total_score + results[k][0]\n total_possible_score = total_possible_score + results[k][1]\n return (total_score, total_possible_score)",
"def score(self):",
"def scoring(self):\n pass",
"def getScore(data):\n return score",
"def __processSearchResult(self, data):\n if data:\n packages = self.__transformHits(data[0])\n if packages:\n self.infoLabel.setText(self.tr(\"%n package(s) found.\", \"\",\n len(packages)))\n wrapper = textwrap.TextWrapper(width=80)\n count = 0\n total = 0\n for package in packages:\n if self.__canceled:\n self.infoLabel.setText(\n self.tr(\"Canceled - only {0} out of %n package(s)\"\n \" shown\", \"\", len(packages)).format(total))\n break\n itm = QTreeWidgetItem(\n self.resultList, [\n package['name'].strip(),\n \"{0:4d}\".format(package['score']),\n \"\\n\".join([\n wrapper.fill(line) for line in\n package['summary'].strip().splitlines()\n ])\n ])\n itm.setData(0, self.VersionRole, package['version'])\n count += 1\n total += 1\n if count == 100:\n count = 0\n QApplication.processEvents()\n else:\n QApplication.restoreOverrideCursor()\n E5MessageBox.warning(\n self,\n self.tr(\"Search PyPI\"),\n self.tr(\"\"\"<p>The package search did not return\"\"\"\n \"\"\" anything.</p>\"\"\"))\n self.infoLabel.setText(\n self.tr(\"\"\"<p>The package search did not return\"\"\"\n \"\"\" anything.</p>\"\"\"))\n else:\n QApplication.restoreOverrideCursor()\n E5MessageBox.warning(\n self,\n self.tr(\"Search PyPI\"),\n self.tr(\"\"\"<p>The package search did not return anything.\"\"\"\n \"\"\"</p>\"\"\"))\n self.infoLabel.setText(\n self.tr(\"\"\"<p>The package search did not return anything.\"\"\"\n \"\"\"</p>\"\"\"))\n \n header = self.resultList.header()\n self.resultList.sortItems(1, Qt.DescendingOrder)\n header.setStretchLastSection(False)\n header.resizeSections(QHeaderView.ResizeToContents)\n headerSize = 0\n for col in range(header.count()):\n headerSize += header.sectionSize(col)\n if headerSize < header.width():\n header.setStretchLastSection(True)\n \n self.__finish()",
"def score(name):\r\n return (sorted(test).index(name)+1)*value(name)",
"def score(self, searcher, fieldnum, text, docnum, weight, QTF = 1):\n raise NotImplementedError",
"def score(self, test_data):\n\n\t\tpass",
"def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score",
"def make_score(self, concept: _Concept, result: _Result) -> Optional[_Score]:\n\n scores = []\n labels = [\"pref_label\", \"alt_label\", \"hidden_label\", \"definition\"] # i.e., relevant attributes\n\n for label in labels:\n label_string = result.__getattribute__(label)\n if label_string is None:\n continue\n distances = []\n # check if label_string has more than one language:\n for foundword in label_string.split(\";\"):\n distance = Levenshtein.distance(concept.get_pref_label().lower(), foundword.lower())\n distances.append((distance, foundword))\n # add the best foundword and distance tuple per label over all languages:\n scores.append(min(distances))\n\n # catch malformed (= empty labels) results:\n try:\n min(scores)\n except ValueError:\n return None\n\n return _Score(value=min(scores)[0], comparandum=concept, comparans=result)",
"def score(self):\n return self.client.call('GET', self.name + 'score')",
"def artists_match_fixup2(\n song: Song, result: Result, score: float, search_query: Optional[str] = None\n) -> float:\n\n if score > 70 or not result.verified:\n # Don't fixup the score\n # if the artist match is already high\n # or if the result is not verified\n return score\n\n # Slugify some variables\n slug_song_artist = slugify(song.artists[0])\n slug_song_name = slugify(song.name)\n slug_result_name = slugify(result.name)\n slug_result_artists = slugify(\", \".join(result.artists)) if result.artists else \"\"\n\n # Check if the main artist is simlar\n has_main_artist = (score / (2 if len(song.artists) > 1 else 1)) > 50\n\n match_str1, match_str2 = create_match_strings(song, result, search_query)\n\n # Add 10 points to the score\n # if the name match is greater than 75%\n if ratio(match_str1, match_str2) >= 75:\n score += 10\n\n # If the result doesn't have the same number of artists but has\n # the same main artist and similar name\n # we add 25% to the artist match\n if (\n result.artists\n and len(result.artists) < len(song.artists)\n and slug_song_artist.replace(\"-\", \"\")\n in [\n slug_result_artists.replace(\"-\", \"\"),\n slug_result_name.replace(\"-\", \"\"),\n ]\n ):\n score += 25\n\n # Check if the song album name is very similar to the result album name\n # if it is, we increase the artist match\n if result.album:\n if (\n ratio(\n slugify(result.album),\n slugify(song.album_name),\n )\n >= 85\n ):\n score += 10\n\n # Check if other song artists are in the result name\n # if they are, we increase the artist match\n # (main artist is already checked, so we skip it)\n artists_to_check = song.artists[int(has_main_artist) :]\n for artist in artists_to_check:\n artist = slugify(artist).replace(\"-\", \"\")\n if artist in match_str2.replace(\"-\", \"\"):\n score += 5\n\n # if the artist match is still too low,\n # we fallback to matching all song artist names\n # with the result's artists\n if score <= 70:\n # Artists from song/result name without the song/result name words\n artist_list1 = create_clean_string(song.artists, slug_song_name, True)\n artist_list2 = create_clean_string(\n list(result.artists) if result.artists else [result.author],\n slug_result_name,\n True,\n )\n\n artist_title_match = ratio(artist_list1, artist_list2)\n\n if artist_title_match > score:\n score = artist_title_match\n\n return score",
"def score(self):\n raise NotImplementedError()",
"def get_score(self, student_answers):\r\n pass",
"def sprint_statistics(\n self,\n dataset_name: str,\n scoring_functions: List[autoPyTorchMetric],\n metric: autoPyTorchMetric\n ) -> str:\n search_results = self.get_search_results(scoring_functions, metric)\n success_status = (StatusType.SUCCESS, StatusType.DONOTADVANCE)\n sio = io.StringIO()\n sio.write(\"autoPyTorch results:\\n\")\n sio.write(f\"\\tDataset name: {dataset_name}\\n\")\n sio.write(f\"\\tOptimisation Metric: {metric}\\n\")\n\n num_runs = len(search_results.status_types)\n num_success = sum([s in success_status for s in search_results.status_types])\n num_crash = sum([s == StatusType.CRASHED for s in search_results.status_types])\n num_timeout = sum([s == StatusType.TIMEOUT for s in search_results.status_types])\n num_memout = sum([s == StatusType.MEMOUT for s in search_results.status_types])\n\n if num_success > 0:\n best_score = metric._sign * np.max(metric._sign * search_results.opt_scores)\n sio.write(f\"\\tBest validation score: {best_score}\\n\")\n\n sio.write(f\"\\tNumber of target algorithm runs: {num_runs}\\n\")\n sio.write(f\"\\tNumber of successful target algorithm runs: {num_success}\\n\")\n sio.write(f\"\\tNumber of crashed target algorithm runs: {num_crash}\\n\")\n sio.write(f\"\\tNumber of target algorithms that exceeded the time \"\n f\"limit: {num_timeout}\\n\")\n sio.write(f\"\\tNumber of target algorithms that exceeded the memory \"\n f\"limit: {num_memout}\\n\")\n\n return sio.getvalue()",
"def f1_score(self):",
"def _calculate_result(found, total):\n return (found * 100) / total",
"def get_score(snack_data, percentage_data, snack, snack_query, protein_query, carb_query, fat_query):\n\tstart_time = time.time()\n\n\t#Load necessary data\n\t\"\"\"\twith open ('../../../Data/percentagesDict.pickle', 'rb') as f:\n\t\tpercentage_data = pickle.load(f)\n\n\twith open ('../../../Data/FINAL_snacks_data.pickle', 'rb') as f:\n\t\tsnack_data = pickle.load(f)\"\"\"\n\n\t#Set constants\n\tLOW_FAT = .3\n\tHIGH_FAT = .6\n\tLOW_CARB = .1\n\tHIGH_CARB = .2\n\tLOW_PRO = .2\n\tHIGH_PRO = .4\n\n\t#Convert macro percentages to 'high', 'med', 'low' categories\n\tfat = percentage_data[snack]['fat']\n\tprotein = percentage_data[snack]['protein']\n\tcarb = percentage_data[snack]['carb']\n\n\tif fat > HIGH_FAT:\n\t\tfat_content = 'high'\n\telif fat < LOW_FAT:\n\t\tfat_content = 'low'\n\telse:\n\t\tfat_content = 'med'\n\n\tif protein > HIGH_PRO:\n\t\tprotein_content = 'high'\n\telif protein < LOW_PRO:\n\t\tprotein_content = 'low'\n\telse:\n\t\tprotein_content = 'med'\n\n\tif carb > HIGH_CARB:\n\t\tcarb_content = 'high'\n\telif carb < LOW_CARB:\n\t\tcarb_content = 'low'\n\telse:\n\t\tcarb_content = 'med'\n\n\t#Set x values\n\tx1 = fat_query == fat_content\n\tx2 = carb_query == carb_content\n\tx3 = protein_query == protein_content\n\tx4 = cooccur(snack_data, snack, snack_query) \n\tx5 = snack_data[snack]['rating']\n\n\tw1 = 1\n\tw2 = 1\n\tw3 = 1\n\tw4 = 1\n\tw5 = 1\n\t\n\t#print('x1: {}, x2: {}, x3: {}, x4: {}, x5: {}'.format(x1, x2, x3, x4, x5))\n\t#print(\"get_score() time: --- %s seconds ---\" % (time.time() - start_time))\n\n\n\treturn w1*x1 + w2*x2 + w3*x3 + w4*x4 + w5*x5",
"def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )",
"def evaluate(self):\n scores = []\n scores.append(self.word_analogy())\n print(\"Word Analogy (acc): \", scores[0])\n scores.append(self.word_similarity())\n print(\"Word Similarity (MSE): \", scores[1])\n scores.append(self.concept_categorization())\n print(\"Concept Categorization (purity): \", scores[2])\n scores.append(self.sentiment_analysis())\n print(\"Sentiment Analysis (acc): \", scores[3])\n return scores",
"def summarize_results(self, models, scores):\n mu = np.mean(scores)\n sigma = np.std(scores)\n best_model = models[np.argmax(scores)]\n best_score = max(scores)\n return best_model, best_score, mu, sigma",
"def _infer_score_name(keys) -> str:\n\n for score in STANDARD_SEARCHENGINE_SCORES:\n if score in keys:\n return score\n else:\n raise UnknownMzidScore(\"No known score metric found in mzIdentML file.\")",
"def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc",
"def score(self):\n return None",
"def test_search_with_scoring(context):\n # When create a query block\n t = QuerySet(\"localhost\", index=\"foo\")\n\n # And there are records\n add_document(\"foo\", {\"bar\": 1})\n add_document(\"foo\", {\"bar\": 2})\n add_document(\"foo\", {\"bar\": 3})\n\n # And I add scoring\n score = ScriptScore(\"s = 0 + doc['bar'].value\")\n t.score(score)\n results = t[0:10]\n\n # Then my results are scored correctly\n len(results).should.equal(3)\n results[0][\"_source\"][\"bar\"].should.equal(3)\n results[1][\"_source\"][\"bar\"].should.equal(2)\n results[2][\"_source\"][\"bar\"].should.equal(1)",
"def get_score(self):\n return self.score",
"def score(title, min_votes=0, precision=1):\n scores = []\n if imdb and _imdb_enabled and app.config.getboolean(\"service_imdb\", \"enabled\"):\n scores.append(_imdb_score(title, min_votes=min_votes))\n #if tmdb and _tmdb_enabled and app.config.getboolean(\"service_themoviedb\", \"enabled\"):\n # scores.append(_tmdb_score(title, min_votes=min_votes))\n if not scores:\n return 0\n return round(sum(scores) / float(len(scores)), precision)"
]
| [
"0.6388174",
"0.6375168",
"0.6337434",
"0.6309232",
"0.62848437",
"0.61819446",
"0.6150316",
"0.6061966",
"0.60612357",
"0.6000565",
"0.5980575",
"0.5880881",
"0.5871269",
"0.58405083",
"0.58331317",
"0.5757562",
"0.5753757",
"0.57525665",
"0.5744831",
"0.57435524",
"0.57269347",
"0.57214576",
"0.56985754",
"0.56980693",
"0.5686239",
"0.5681733",
"0.56716555",
"0.5656507",
"0.5649076",
"0.5648049"
]
| 0.67483026 | 0 |
Private slot to install the selected packages. | def __install(self):
command = self.pipComboBox.currentText()
if command == self.__default:
command = ""
packages = []
for itm in self.resultList.selectedItems():
packages.append(itm.text(0).strip())
if packages:
self.__pip.installPackages(packages, cmd=command) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post_install(self, installable_pkgs):\n pass",
"def install(self, *packages):\n raise NotImplementedError",
"def install_package(self, package):\n raise NotImplementedError(\"install_package not implemented!\")",
"def install(self):\n raise NotImplementedError",
"def _install(self):\n\n pass",
"def do(self):\r\n parameters = ParametersParserStr(self.args_parameters).get()\r\n self.core.install(self.product_names, parameters, with_dependencies=True)",
"def post_install_pkg(self, installable_pkg):\n pass",
"def pre_install(self, installable_pkgs):\n pass",
"def install(self):\n # This installs the packages defined in self.packages\n super().install()\n # Do any other installation work that is needed. If a license key is\n # required then use the custom_assess_status_check() function below to\n # determine whether it is needed.\n # This assess_status() will determine what status the charm is at after\n # install.\n self.assess_status()",
"def _install(self):\n # Default implementation\n for pm_name, package in self._provider_package.items():\n if helpers[pm_name]:\n helpers[pm_name].install_package(package)\n return\n raise self.unsure_how_to_install()",
"def pipInstall(self):\n\n print \"Does Nothing\"",
"def sub_install_packages():\n sudo('apt-get update') # Update repository links\n sudo('apt-get -y upgrade') # Upgrade the system\n package_str = ' '.join(INSTALL_PACKAGES)\n sudo('apt-get -y install ' + package_str) # Install the packages",
"def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()",
"def pre_install_pkg(self, installable_pkg):\n pass",
"def install(force, packages):\n setup_audit_log()\n for pspec in CFG.package_specs(packages):\n perform_install(pspec, is_upgrade=False, force=force, quiet=False)",
"def install_from_repository(self) -> None:\n self.sort_packages()\n\n # Install recommended packages\n if self.recommended_packages:\n self.list_packages(self.recommended_packages, title=\"package\")\n for package in self.recommended_packages:\n try:\n self.perform_operation(\n Command('install'),\n Command(package)\n )\n except tmt.utils.RunError as error:\n self.debug(f\"Package installation failed: {error}\")\n self.warn(f\"Unable to install recommended package '{package}'.\")\n continue\n\n # Install required packages\n if self.required_packages:\n self.perform_operation(\n Command('install'),\n self.list_packages(self.required_packages, title=\"package\")\n )",
"def install(self, no_dependencies: bool = True):\n return PackageHelper.install_package(name=self.name, no_dependencies=no_dependencies)",
"def do_post_install(self, context):\n pass",
"def install():\n verun('pip install -r {0}'.format(requirements))",
"def install(self) -> None:\n if self.local_packages:\n self.prepare_install_local()\n self.install_local()\n if self.remote_packages:\n self.install_from_url()\n if self.repository_packages:\n self.install_from_repository()\n if self.debuginfo_packages:\n self.install_debuginfo()",
"def install(self, parent):\r\n pass",
"def _install(package_name, package_version, options_path, app_id, cli, app,\n yes):\n\n if cli is False and app is False:\n # Install both if neither flag is specified\n cli = app = True\n\n # Expand ~ in the options file path\n if options_path:\n options_path = os.path.expanduser(options_path)\n user_options = _user_options(options_path)\n\n package_manager = _get_package_manager()\n pkg = package_manager.get_package_version(package_name, package_version)\n\n pkg_json = pkg.package_json()\n pre_install_notes = pkg_json.get('preInstallNotes')\n if app and pre_install_notes:\n emitter.publish(pre_install_notes)\n if not _confirm('Continue installing?', yes):\n emitter.publish('Exiting installation.')\n return 0\n\n if app and pkg.has_mustache_definition():\n\n # render options before start installation\n options = pkg.options(user_options)\n\n # Install in Marathon\n msg = 'Installing Marathon app for package [{}] version [{}]'.format(\n pkg.name(), pkg.version())\n if app_id is not None:\n msg += ' with app id [{}]'.format(app_id)\n\n emitter.publish(msg)\n\n package_manager.install_app(\n pkg,\n options,\n app_id)\n\n if cli and pkg.has_cli_definition():\n # Install subcommand\n msg = 'Installing CLI subcommand for package [{}] version [{}]'.format(\n pkg.name(), pkg.version())\n emitter.publish(msg)\n\n subcommand.install(pkg)\n\n subcommand_paths = subcommand.get_package_commands(package_name)\n new_commands = [os.path.basename(p).replace('-', ' ', 1)\n for p in subcommand_paths]\n\n if new_commands:\n commands = ', '.join(new_commands)\n plural = \"s\" if len(new_commands) > 1 else \"\"\n emitter.publish(\"New command{} available: {}\".format(plural,\n commands))\n\n post_install_notes = pkg_json.get('postInstallNotes')\n if app and post_install_notes:\n emitter.publish(post_install_notes)\n\n return 0",
"def install_package(self, package):\n package = package.lower()\n command = shlex.split('sudo DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" --force-yes -y install ' + package)\n try:\n print subprocess.check_call(command, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n if \"unable to locate package\" in e.output.lower():\n print \"Can't identify package name. Check spelling of package name\"",
"def install(self):\n return self._process('install')",
"def install_from_rpm_py_package(self):\n raise NotImplementedError('Implement this method.')",
"def update(self, iterable):\n for package in iterable:\n self.add_package(package)",
"def install(self, packages):\n cmd = ['dnf', 'install', '-y']\n\n for pkg in packages:\n # if pkg not in self.installed_packages:\n cmd.append(pkg)\n\n if packages:\n subprocess.Popen(cmd).wait()",
"def _provision_package(self):",
"def install_packages(packages):\n\n if packages:\n log(\"Installing apt packages: {0}\".format(packages))\n run(sh.apt_get.install, packages.split(), y=True)",
"def set_installed_packages():\n global INSTALLED_PACKAGES, REQUIRED_VERSION\n if INSTALLED_PACKAGES:\n return\n\n if os.path.exists(BIN_PYTHON):\n pip = subprocess.Popen(\n (BIN_PYTHON, '-m', 'pip', 'freeze'),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n (stdout, stderr) = pip.communicate()\n pip.wait()\n\n INSTALLED_PACKAGES = [normalize_package_name(r.decode().split('==')[0].lower()) for r in stdout.split()]\n REQUIRED_VERSION = next((package for package in INSTALLED_PACKAGES if re.match(r'^lore[!<>=]', package)), None)\n if REQUIRED_VERSION:\n REQUIRED_VERSION = re.split(r'[!<>=]', REQUIRED_VERSION)[-1]"
]
| [
"0.7001624",
"0.6993284",
"0.68159723",
"0.67730206",
"0.6733573",
"0.6620806",
"0.66078526",
"0.65957123",
"0.6587987",
"0.65526897",
"0.652802",
"0.65253454",
"0.6520522",
"0.64047605",
"0.6254468",
"0.6160361",
"0.6121888",
"0.61139154",
"0.60733545",
"0.6006703",
"0.6002579",
"0.6002042",
"0.5974949",
"0.59572256",
"0.5954995",
"0.59061885",
"0.5887252",
"0.58809984",
"0.5864378",
"0.5861583"
]
| 0.784766 | 0 |
Private slot to show details about the selected package. | def __showDetails(self):
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)
self.__showDetailsButton.setEnabled(False)
QApplication.setOverrideCursor(Qt.WaitCursor)
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)
self.__detailsData = {}
itm = self.resultList.selectedItems()[0]
packageVersions = itm.data(0, self.VersionRole)
if len(packageVersions) == 1:
packageVersion = packageVersions[0]
elif len(packageVersions) == 0:
packageVersion = ""
else:
packageVersion, ok = QInputDialog.getItem(
self,
self.tr("Show Package Details"),
self.tr("Select the package version:"),
packageVersions,
0, False)
if not ok:
return
packageName = itm.text(0)
self.__client.call(
"release_data",
(packageName, packageVersion),
lambda d: self.__getPackageDownloadsData(packageVersion, d),
self.__detailsError
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __displayPackageDetails(self, data):\n self.__finish()\n self.__showDetailsButton.setEnabled(True)\n from .PipPackageDetailsDialog import PipPackageDetailsDialog\n dlg = PipPackageDetailsDialog(self.__detailsData, data[0], self)\n dlg.exec_()",
"def print_package_info(package):\r\n print(\"******************\")\r\n print(\"Product: %s\" % package[0]['description'])\r\n print(\"Price: %s$ monthly\" % package[0]['prices'][0]['recurringFee'])\r\n print(\"******************\")\r\n return",
"def details(self, packageName):\n path = \"details?doc=%s\" % requests.utils.quote(packageName)\n message = self.executeRequestApi2(path)\n return message.payload.detailsResponse",
"def details(self, packageName, get_raw=False):\n path = \"details?doc=%s\" % requests.utils.quote(packageName)\n raw_response = self.execute_request_raw(path)\n message = self.executeRequestApi2(path, raw_response=raw_response)\n if get_raw:\n return message.payload.detailsResponse, raw_response\n return message.payload.detailsResponse",
"def print_details(self):\n self.view.print_details()",
"def OnSelect(self,event):\r\n index = event.GetSelection()\r\n item = self.items[index]\r\n self.data.select(item)\r\n if self.gInfoBox:\r\n self.gInfoBox.DiscardEdits()\r\n self.gInfoBox.SetValue(self.data.getInfo(item))",
"def show(self) -> None:",
"def view_package(id):\n package = Package.query.get_or_404(id)\n return jsonify(\n {\n \"package_id\": package.id,\n \"event_id\": package.event.id,\n \"price\": float(package.price),\n \"audience\": package.audience,\n \"description\": package.description,\n \"package_type\": package.package_type,\n }\n )",
"def show(self):\n pass",
"def show(self):\n\t\traise NotImplementedError()",
"def info(self, id):",
"def _info():\n\n emitter.publish(default_command_info(\"package\"))\n return 0",
"def show(self):\n\n pass",
"def callback_Details(mod, currentMods, window):\n detailMod = currentMods[mod]\n detailText = brf.print_Details(mod, currentMods)\n sg.popup(detailText)\n # window['detailText'].update('Details for {}:\\n{}'.format(mod, detailText))\n\n return None",
"def show(self):\n raise NotImplementedError",
"def show(self):\n raise NotImplementedError",
"def show(self):",
"def show(self, item_id):\n pass",
"def details(self):\n pass",
"def details(self):\n print \"ABC - Deployer.details()\"",
"def __getPackageDownloadsData(self, packageVersion, data):\n if data and data[0]:\n self.__detailsData = data[0]\n itm = self.resultList.selectedItems()[0]\n packageName = itm.text(0)\n self.__client.call(\n \"release_urls\",\n (packageName, packageVersion),\n self.__displayPackageDetails,\n self.__detailsError\n )\n else:\n self.__finish()\n E5MessageBox.warning(\n self,\n self.tr(\"Search PyPI\"),\n self.tr(\"\"\"<p>No package details info available.</p>\"\"\"))",
"def get_details(self):",
"def info(self):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Info')\n pp.pprint(self.manager.data[\"info\"])\n print('')",
"def get_package(self, __package_id):\n raise NotImplementedError",
"def info(self):",
"def info(self):",
"def open_info_dialog(self):\n info_dialog = InfoDialog()\n info_dialog.exec_()",
"def _show_info(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n dataframe.printSchema()",
"def book_info(self):\n print(\"ID : \", self.ID,\n \"\\nName : \", self.name,\n \"\\nAuthor : \", self.author,\n \"\\nGenre : \", self.genre,\n \"\\nPrice : \", self.price,\n \"\\nQuantity of this book : \", self.quantity)",
"def get_pkg(pkg_name):\n pkg = Database().db.get_pkg_details(pkg_name, \"\", False)\n pkg = PackageDetail.surClass(pkg)\n print('dir: ', dir(pkg))\n \n #print('name:', pkg.nane)\n #print('props.name:', pkg.props.nane)\n return render_template(\"pkg.html\", \n title=\" - \"+pkg_name,\n repos=Database().db.get_repos_names(),\n pkg=pkg)"
]
| [
"0.7484296",
"0.6527179",
"0.6382961",
"0.6051693",
"0.5973608",
"0.5905206",
"0.5886441",
"0.5858872",
"0.58353835",
"0.581935",
"0.5806822",
"0.5795656",
"0.5776238",
"0.5761457",
"0.57599133",
"0.57599133",
"0.57353157",
"0.56632274",
"0.5651795",
"0.5611291",
"0.56077147",
"0.55740494",
"0.55583525",
"0.554527",
"0.5527538",
"0.5527538",
"0.552617",
"0.5515808",
"0.550976",
"0.54829115"
]
| 0.74342203 | 1 |
Private method to store the details data and get downloads information. packageVersion version info str data result data with package details in the first element tuple | def __getPackageDownloadsData(self, packageVersion, data):
if data and data[0]:
self.__detailsData = data[0]
itm = self.resultList.selectedItems()[0]
packageName = itm.text(0)
self.__client.call(
"release_urls",
(packageName, packageVersion),
self.__displayPackageDetails,
self.__detailsError
)
else:
self.__finish()
E5MessageBox.warning(
self,
self.tr("Search PyPI"),
self.tr("""<p>No package details info available.</p>""")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_download_info(self, data_url):\n elem = (DOM.DownloadManager.download_entry_info[0],\n DOM.DownloadManager.download_entry_info[1].format(data_url))\n return self.UTILS.element.getElement(elem, \"Obtain the info for: {}\".format(data_url))",
"def _pypi_details(self) -> Tuple[str, str, str, str, str]:\n json_value = pypi_fetch(self._reserved_name)\n if json_value:\n latest_version = self._parse_latest_version(json_value)\n license_value = self._get_license(json_value)\n latest_release_date = self._parse_latest_update(json_value, latest_version)\n release_count = self._parse_release_count(json_value)\n summary = self._parse_summary(json_value)\n return (\n latest_version,\n license_value,\n latest_release_date,\n release_count,\n summary,\n )\n return \"\", \"Unknown\", \"\", \"\", \"\"",
"def __showDetails(self):\n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.__showDetailsButton.setEnabled(False)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__detailsData = {}\n \n itm = self.resultList.selectedItems()[0]\n packageVersions = itm.data(0, self.VersionRole)\n if len(packageVersions) == 1:\n packageVersion = packageVersions[0]\n elif len(packageVersions) == 0:\n packageVersion = \"\"\n else:\n packageVersion, ok = QInputDialog.getItem(\n self,\n self.tr(\"Show Package Details\"),\n self.tr(\"Select the package version:\"),\n packageVersions,\n 0, False)\n if not ok:\n return\n \n packageName = itm.text(0)\n self.__client.call(\n \"release_data\",\n (packageName, packageVersion),\n lambda d: self.__getPackageDownloadsData(packageVersion, d),\n self.__detailsError\n )",
"def _store_package_metadata(self):",
"def get_details(self):\n # For every URL in our list of links that we got from the parser's\n # 'lookup()' method we get the data from that URL, set it in our\n # parser's buffer, and then let the parser do the rest of the work.\n #\n for i,link in enumerate(self.links):\n # NOTE: Buffers are 1-based, not 0-based.\n #\n link_data = link.get()\n self.scraper.parser.set_buffer(i+1, link_data)\n\n # And in the final buffer we set the id. The scraper we have\n # loaded knows how many bits of url data it expects and in which\n # buffer the id will be in.\n #\n i += 1\n self.scraper.parser.set_buffer(i+1, self.id)\n self.xml_details = self.scraper.parser.parse(FN_GET_DETAILS,\n self.scraper.settings)",
"def getInfo():",
"def get_all_info(self) -> None:\n self.fetch_info(False)\n if not self.found and not Config.Config.get_strict_meta():\n Logger.Logger.log('No iTunes data found using full song name, retrying using a shorter version...')\n self.fetch_info(True)\n if not self.found:\n Logger.Logger.log('No available data for this song, skipping it...')\n return\n self.fetch_cover()\n self.fetch_lyrics()",
"def getDetails(self, option=\"Firmware\"):\n\n def get_repo_data(repos, col_num):\n \"\"\"\n Finds 'State', 'Repositories', 'Image Type', 'Source Path', 'In Use' data for all OS Image Repositories and\n 'State', 'Repository Name', 'Source', 'Custom Bundles' for all Firmware/Software Repositories\n :param repos: list of OS or Firmware locators\n :param col_num: 5 for OS and 4 for Firmware, based on number of colons required\n :return: list of data from tables\n \"\"\"\n repos_data = []\n for repo in repos:\n tds = repo.find_elements_by_xpath(\"./td\")\n td_text = []\n for index, td in enumerate(tds):\n if index == 0 and col_num == 4:\n text = td.text\n text = text.split('\\n')\n if len(text) > 1:\n td_text.append(text[1])\n continue\n if index == col_num:\n break\n td_text.append(td.text)\n repos_data.append(td_text)\n return repos_data\n\n def zipped_data(repos_data):\n \"\"\"\n Makes a dictionary out of colon names as a key and data from repositories under that colon as a value\n eg. {'In Use': 'False', etc.}\n :param repos_data: list of repository data within list\n :return: list of data as dictionary for each repository\n \"\"\"\n os_col_names = ['State', 'Repositories', 'Image Type', 'Source Path', 'In Use']\n fw_col_names = ['State', 'Repository Name', 'Source', 'Custom Bundles']\n\n repo_data = []\n for repo in repos_data:\n if len(repo) == 4:\n zipped = zip(fw_col_names, repo)\n elif len(repo) == 5:\n zipped = zip(os_col_names, repo)\n repo_data.append(dict(zipped))\n return repo_data\n\n try:\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('repo_tab'))), action=\"CLICK\")\n os_repos = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects('OS_repos'))))\n os_repos_data = get_repo_data(os_repos, col_num=5)\n utility.execLog(\"Able to fetch OS Repositories data: {}\".format(os_repos_data))\n if option == \"OS\":\n utility.execLog('Returning: \"{}\"'.format(zipped_data(os_repos_data)))\n return self.browserObject, True, zipped_data(os_repos_data)\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.RepositoriesObjects('FW_tab'))), action=\"CLICK\")\n fw_repos = self.handleEvent(\n EC.presence_of_all_elements_located((By.XPATH, self.RepositoriesObjects('FW_repos'))))\n fw_repos_data = get_repo_data(fw_repos, col_num=4)\n utility.execLog(\"Able to fetch Firmware Repositories data: {}\".format(fw_repos_data))\n if option == \"Firmware\":\n utility.execLog('Returning: \"{}\"'.format(zipped_data(fw_repos_data)))\n return self.browserObject, True, zipped_data(fw_repos_data)\n else:\n data = zipped_data(os_repos_data) + zipped_data(fw_repos_data)\n utility.execLog('Returning: \"{}\"'.format(zipped_data(data)))\n return self.browserObject, True, data\n except Exception as e:\n return self.browserObject, False, \"Unable to read Repositories :: Error -> {}\".format(e)",
"def download(self, args):\n\n\t\t\"\"\" Default argument for Architecture \"\"\"\n\t\tif len(args) >= 4:\n\t\t\tarch = args[3]\n\t\telse:\n\t\t\tarch = platform.processor()\n\n\t\t\"\"\" Default argument for Version \"\"\"\n\t\tif len(args) >= 3:\n\t\t\tif args[2] == \"latest\":\n\t\t\t\tversion = \"Latest\"\n\t\t\telse:\n\t\t\t\tversion = args[2]\n\t\telse:\n\t\t\tversion = \"Latest\"\n\n\t\t\"\"\" Find package path from package list, based on prev. arguments \"\"\"\n\t\tif len(args) >= 2:\n\t\t\tpackage = args[1]\n\t\t\tfilename = False\n\t\t\t\n\t\t\tversions = self.master.Dump(package)\n\t\t\tfor d in versions:\n\t\t\t\tif d[\"Version\"] == version:\n\t\t\t\t\tif d[\"Version\"] != \"Latest\" and d[\"Architecture\"] == arch:\n\t\t\t\t\t\tfilename = d[\"Filename\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor e in versions:\n\t\t\t\t\t\t\tif e[\"Version\"] == d[\"LatestVersion\"] and e[\"Architecture\"] == arch:\n\t\t\t\t\t\t\t\tfilename = e[\"Filename\"]\n\t\t\t\t\t\t\t\tversion = d[\"LatestVersion\"];\n\t\t\tif not filename:\n\t\t\t\tself.write_line(\"ERROR XXX: Package not found.\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Find chunks to download \"\"\"\n\t\t\tid = 0\n\t\t\tto_download = False\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tprint(f.path.replace(\"packages/\", \"\") + \" = \" + filename);\n\t\t\t\tif f.path.replace(\"packages/\", \"\") == filename:\n\t\t\t\t\tto_download = f\n\t\t\t\t\tbreak;\n\t\t\t\tid += 1\n\t\t\tif not to_download:\n\t\t\t\tprint(\"ERROR XXX: dunno\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Set chunks priority to 7? (download max priority) \"\"\"\n\t\t\tpr = self.torrent_info.map_file(id, 0, to_download.size);\n\t\t\tn_pieces = math.ceil(pr.length / self.torrent_info.piece_length() + 1);\n\n\t\t\tfor i in range(self.torrent_info.num_pieces()):\n\t\t\t\tif i in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\tself.handler.piece_priority(i, 7)\n\n\n\t\t\t\"\"\" Print download of package status \"\"\"\n\t\t\tself.print_status(id, pr, package, version, filename)\n\t\t\t\t\n\t\t\t\"\"\" Check the server for hash validation \"\"\"\n\t\t\tif self.valid_tpkg_file(to_download.path):\n\t\t\t\tself.write_line(\"DONE {0} {1} {2} {3}\".format(package, version, arch, self.config[\"daemon\"][\"rootdir\"] + \"/\" + to_download.path).replace('//', '/'))\n\t\t\telse:\n\t\t\t\tself.write_line(\"ERROR XXX: Hash verification failed.\")\n\t\telse:\n\t\t\tself.write_line(\"INVALID ARGUMENTS\");",
"def extract_release_data(self):\r\n return None",
"def fetch_data(self):",
"def _fetch_data(self):\n pass",
"def _get_details(self, details):\n details['DoT'] = \"Yes\" if self.static else \"No\"\n details['device'] = self.device\n details['volume_id'] = self.volume_id\n details['from_snap'] = \"No\" if not self.from_snapshot_id else self.from_snapshot_id\n details['from_archive'] = \"No\" if not self.from_archive else self.from_archive['url']\n details['snapshot_progress'] = self.snapshot_progress\n details['snapshot_status'] = self.snapshot_status\n # TODO: keep track of any errors\n details['err_msg'] = None if details.get('err_msg', '') == '' else details['err_msg']\n details['snapshots_created'] = self.snapshots_created\n return details",
"def collect_pypi_data():\n\n rclient = xmlrpc.client.ServerProxy('http://pypi.python.org/pypi')\n python = {'Programming Language :: Python': rclient.browse(['Programming Language :: Python'])}\n python_two = {}\n python_three = {}\n\n for classifier in [\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.3',\n 'Programming Language :: Python :: 2.4',\n 'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2 :: Only']:\n python_two[classifier] = rclient.browse([classifier])\n\n for classifier in [\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.0',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4']:\n python_three[classifier] = rclient.browse([classifier])\n\n return {datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):\n {'python': python,\n 'python_two': python_two,\n 'python_three': python_three}}",
"def _extract_info(self, data):\n if 'status' in data:\n if data['status'] == 'Already Downloaded':\n # Set self._return_code to already downloaded\n # and trash that key\n self._set_returncode(self.ALREADY)\n data['status'] = None\n\n if data['status'] == 'Filesize Abort':\n # Set self._return_code to filesize abort\n # and trash that key\n self._set_returncode(self.FILESIZE_ABORT)\n data['status'] = None",
"def get_package_data(name, package=None):\n if not package:\n package = models.Package(name=name)\n releases = {}\n else:\n releases = package.get_all_releases()\n\n client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi', transport=Urllib2Transport())\n\n versions = client.package_releases(package.name, True)\n\n # package_releases() method is case-sensitive, if nothing found\n # then we search for it\n # XXX: Ask pypi to make it case-insensitive?\n if not versions:\n for item in client.search({'name': name}):\n if name.lower() == item['name'].lower():\n package.name = name = item['name']\n break\n else:\n logger.info(\"No packages found matching %r\", name)\n return\n\n # Retry retrieving the versions with the new/correct name\n versions = client.package_releases(package.name, True)\n\n # Save the package if it is new\n if not package.pk:\n package.save()\n\n for version in versions:\n release, files = releases.get(version, (None, {}))\n if not release:\n release = models.Release(package=package, version=version)\n release.save()\n\n data = client.release_data(package.name, release.version)\n\n release_form = forms.PypiReleaseDataForm(data, instance=release)\n if release_form.is_valid():\n release_form.save()\n\n release_files = client.package_urls(package.name, release.version)\n for info in release_files:\n release_file = files.get(info['filename'])\n if not release_file:\n release_file = models.ReleaseFile(\n release=release, filename=info['filename'])\n\n release_file.python_version = info['python_version']\n release_file.filetype = info['packagetype']\n release_file.url = info['url']\n release_file.size = info['size']\n release_file.md5_digest = info['md5_digest']\n release_file.save()\n\n package.update_timestamp = now()\n package.save()",
"def save_data(self):\n # Command to get the download data\n pass",
"def store_pkg_metadata(self, pkg, version):\n pass",
"def fetch(self, folder, source, version, params):\n metadata = []\n rvalue = {\"version\": version, \"metadata\": metadata}\n rcode = 0\n return rcode, rvalue",
"def get_download_info(files):\n file_paths = [] # the files we need to check\n file_count = 0 # count of each file in files\n total_size = 0\n\n all_product_types = []\n for ring_obs_id in files:\n for product_type in files[ring_obs_id]:\n for f in files[ring_obs_id][product_type]:\n\n all_product_types.append(product_type)\n\n if product_type != 'preview_image':\n # this is a pds file not a browse product\n # collect the urls.. we will process these at the end\n file_paths += [f for f in files[ring_obs_id][product_type]] # list of all urls\n\n elif product_type == 'preview_image':\n # the file size of each preview images on disc is checked here\n # todo: OMG WHY WHAT\n # todo: get the file sizes into database instead = process like pds files and remove this whole section!\n\n from results.views import get_base_path_previews\n try:\n size = getsize(f)\n total_size += size\n file_count = file_count + 1\n except OSError:\n log.error('could not find file: ' + f)\n\n all_product_types = list(set(all_product_types)) # make unique\n # now we have all pds file_names, put all file names in a list and get their count\n if file_paths:\n\n file_names = list(set([ get_file_path(u) for u in file_paths]))\n file_count += len(file_names)\n\n # query database for the sum of all file_names size fields\n file_sizes = FileSizes.objects.filter(name__in=file_names, PRODUCT_TYPE__in=all_product_types).values('name','size','volume_id').distinct()\n total_size += sum([f['size'] for f in file_sizes]) # todo: this is here b/c django was not happy mixing aggregate+distinct\n\n return total_size, file_count # bytes",
"def __displayPackageDetails(self, data):\n self.__finish()\n self.__showDetailsButton.setEnabled(True)\n from .PipPackageDetailsDialog import PipPackageDetailsDialog\n dlg = PipPackageDetailsDialog(self.__detailsData, data[0], self)\n dlg.exec_()",
"def get_release_info(self):\r\n return self.detail_info.get_release_info(self.version)",
"async def get_info(self, url):\n yt = youtube_dl.YoutubeDL(stim)\n down = yt.extract_info(url, download=False)\n data1 = {'queue': []}\n if 'entries' in down:\n if len(down['entries']) > 1:\n playlist_titles = [title['title'] for title in down['entries']]\n data1 = {'title': down['title'], 'queue': playlist_titles}\n\n down = down['entries'][0]['title']\n\n return down, data1",
"def get_com_data(self):\n self.form_url_str()\n if self.__print_url: print self.com_data_full_url\n self.download_json()\n self.get_datalist_fr_json()",
"def _get_information(self):\n pass",
"def _retrieveCachedData(self):",
"def get_details(self):",
"def get_data(self):",
"def _get_metadata(self, pkg_name):\n pkg_name = urllib.parse.quote(pkg_name, safe='@')\n if self.metadatas.get(pkg_name):\n return self.metadatas.get(pkg_name)\n else:\n url = urllib.parse.urljoin(self.REGISTRY, pkg_name)\n try:\n pkg_metadata = requests.get(url).json()\n self.metadatas[pkg_name] = pkg_metadata\n return pkg_metadata\n except urllib.error.HTTPError as e:\n print('Could not download {} from: {} with error: {}'. format(pkg_name, url, e.msg))\n exit(-1)",
"def extract_release_data(self):\r\n data = None\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n result = self.__find_project(project)\r\n if (result != None) and (self._config.get_boolean('releasable', False)):\r\n if 'baseline.release' in self._config:\r\n data = {}\r\n _logger.info(\"Releasing: '%s'\" % result)\r\n data['name'] = result.objectname\r\n data['database'] = session.database()\r\n data['role'] = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n data['dir'] = os.path.normpath(self._config['dir'])\r\n data['pst'] = result.name\r\n data['release'] = self._config['baseline.release']\r\n else:\r\n _logger.warning(\"Could not release \" + result.objectname + \" because the 'baseline.release' property is missing.\")\r\n return data"
]
| [
"0.65480614",
"0.6343934",
"0.6293053",
"0.62138385",
"0.6165813",
"0.60201085",
"0.58764493",
"0.5875832",
"0.58752453",
"0.58528614",
"0.5842319",
"0.577861",
"0.5753511",
"0.57315415",
"0.5729206",
"0.570555",
"0.56985503",
"0.5697138",
"0.568847",
"0.5661068",
"0.56441635",
"0.56398356",
"0.5638174",
"0.56295747",
"0.56175077",
"0.5606998",
"0.55761015",
"0.55725205",
"0.5567573",
"0.55662286"
]
| 0.7705288 | 0 |
Private method to display the returned package details. data result data (tuple) with downloads information in the first element | def __displayPackageDetails(self, data):
self.__finish()
self.__showDetailsButton.setEnabled(True)
from .PipPackageDetailsDialog import PipPackageDetailsDialog
dlg = PipPackageDetailsDialog(self.__detailsData, data[0], self)
dlg.exec_() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getPackageDownloadsData(self, packageVersion, data):\n if data and data[0]:\n self.__detailsData = data[0]\n itm = self.resultList.selectedItems()[0]\n packageName = itm.text(0)\n self.__client.call(\n \"release_urls\",\n (packageName, packageVersion),\n self.__displayPackageDetails,\n self.__detailsError\n )\n else:\n self.__finish()\n E5MessageBox.warning(\n self,\n self.tr(\"Search PyPI\"),\n self.tr(\"\"\"<p>No package details info available.</p>\"\"\"))",
"def print_package_info(package):\r\n print(\"******************\")\r\n print(\"Product: %s\" % package[0]['description'])\r\n print(\"Price: %s$ monthly\" % package[0]['prices'][0]['recurringFee'])\r\n print(\"******************\")\r\n return",
"def __showDetails(self):\n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.__showDetailsButton.setEnabled(False)\n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__detailsData = {}\n \n itm = self.resultList.selectedItems()[0]\n packageVersions = itm.data(0, self.VersionRole)\n if len(packageVersions) == 1:\n packageVersion = packageVersions[0]\n elif len(packageVersions) == 0:\n packageVersion = \"\"\n else:\n packageVersion, ok = QInputDialog.getItem(\n self,\n self.tr(\"Show Package Details\"),\n self.tr(\"Select the package version:\"),\n packageVersions,\n 0, False)\n if not ok:\n return\n \n packageName = itm.text(0)\n self.__client.call(\n \"release_data\",\n (packageName, packageVersion),\n lambda d: self.__getPackageDownloadsData(packageVersion, d),\n self.__detailsError\n )",
"def printData(result):\n print \"Body: \\n\", result['body']\n print \"Html: \\n\", result['html']\n print \"Attachment: \\n\", result['filename']\n # print \"Attachments: \\n\", result['attachments']",
"def get_download_info(self, data_url):\n elem = (DOM.DownloadManager.download_entry_info[0],\n DOM.DownloadManager.download_entry_info[1].format(data_url))\n return self.UTILS.element.getElement(elem, \"Obtain the info for: {}\".format(data_url))",
"def show_data():",
"def print_result_info(self,result,filename):\n print ('File: %s' % filename)\n print ('Desc: %s' % result.description)\n print ('Version: %s' % result.version)\n print ('Arch: %s' % result.arch)\n print ('Platform: %s' % result.platform)\n print ('CPU: %s' % result.cpuarch)\n if hasattr(result,'sequence'):\n print ('Sequence: %s' % result.sequence)\n print ('Person: %s (%s)' % (result.person_name,result.person_id))\n result.print_summary()\n print('')",
"def details(self, packageName, get_raw=False):\n path = \"details?doc=%s\" % requests.utils.quote(packageName)\n raw_response = self.execute_request_raw(path)\n message = self.executeRequestApi2(path, raw_response=raw_response)\n if get_raw:\n return message.payload.detailsResponse, raw_response\n return message.payload.detailsResponse",
"def details(self, packageName):\n path = \"details?doc=%s\" % requests.utils.quote(packageName)\n message = self.executeRequestApi2(path)\n return message.payload.detailsResponse",
"def stats(self):\r\n\r\n self.downloads # explicitly call, so we have first/last upload data\r\n fmt = locale.nl_langinfo(locale.D_T_FMT)\r\n sep = lambda s: locale.format('%d', s, 3)\r\n val = lambda dt: dt and dt.strftime(fmt) or '--'\r\n\r\n params = (\r\n self.package_name,\r\n val(self.first_upload),\r\n self.first_upload_rel,\r\n val(self.last_upload),\r\n self.last_upload_rel,\r\n sep(len(self.releases)),\r\n sep(self.max()),\r\n sep(self.min()),\r\n sep(self.average()),\r\n sep(self.total()),\r\n )\r\n\r\n print \"\"\"PyPI Package statistics for: %s\r\n\r\n First Upload: %40s (%s)\r\n Last Upload: %40s (%s)\r\n Number of releases: %34s\r\n Most downloads: %35s\r\n Fewest downloads: %35s\r\n Average downloads: %35s\r\n Total downloads: %35s\r\n\"\"\" % params",
"def get_dldetails(package, startdate, config, enddate=False):\n if enddate:\n url = \"{}{}?start_date={}&end_date={}\".\\\n format(config['domain_base'],\n package['downloads_detail_url'],\n startdate, enddate)\n else:\n url = \"{}{}?start_date={}\".\\\n format(config['domain_base'],\n package['downloads_detail_url'],\n startdate)\n\n try:\n resp = (api_call(url, 'get', config['debug']))\n dldetails = resp.json()\n except ValueError as ex:\n abort(\"Unexpected response from packagecloud API: \"\n \"{}\".format(ex.message))\n\n return dldetails",
"def fetch_data(self):",
"def get_package_info(package_name):\n r = requests.get(f'https://api.npms.io/v2/search?q={package_name}&size=1')\n response_json = r.json()\n\n if 'results' in response_json:\n result = response_json['results'][0]\n return result['package']",
"def _pypi_details(self) -> Tuple[str, str, str, str, str]:\n json_value = pypi_fetch(self._reserved_name)\n if json_value:\n latest_version = self._parse_latest_version(json_value)\n license_value = self._get_license(json_value)\n latest_release_date = self._parse_latest_update(json_value, latest_version)\n release_count = self._parse_release_count(json_value)\n summary = self._parse_summary(json_value)\n return (\n latest_version,\n license_value,\n latest_release_date,\n release_count,\n summary,\n )\n return \"\", \"Unknown\", \"\", \"\", \"\"",
"def __processSearchResult(self, data):\n if data:\n packages = self.__transformHits(data[0])\n if packages:\n self.infoLabel.setText(self.tr(\"%n package(s) found.\", \"\",\n len(packages)))\n wrapper = textwrap.TextWrapper(width=80)\n count = 0\n total = 0\n for package in packages:\n if self.__canceled:\n self.infoLabel.setText(\n self.tr(\"Canceled - only {0} out of %n package(s)\"\n \" shown\", \"\", len(packages)).format(total))\n break\n itm = QTreeWidgetItem(\n self.resultList, [\n package['name'].strip(),\n \"{0:4d}\".format(package['score']),\n \"\\n\".join([\n wrapper.fill(line) for line in\n package['summary'].strip().splitlines()\n ])\n ])\n itm.setData(0, self.VersionRole, package['version'])\n count += 1\n total += 1\n if count == 100:\n count = 0\n QApplication.processEvents()\n else:\n QApplication.restoreOverrideCursor()\n E5MessageBox.warning(\n self,\n self.tr(\"Search PyPI\"),\n self.tr(\"\"\"<p>The package search did not return\"\"\"\n \"\"\" anything.</p>\"\"\"))\n self.infoLabel.setText(\n self.tr(\"\"\"<p>The package search did not return\"\"\"\n \"\"\" anything.</p>\"\"\"))\n else:\n QApplication.restoreOverrideCursor()\n E5MessageBox.warning(\n self,\n self.tr(\"Search PyPI\"),\n self.tr(\"\"\"<p>The package search did not return anything.\"\"\"\n \"\"\"</p>\"\"\"))\n self.infoLabel.setText(\n self.tr(\"\"\"<p>The package search did not return anything.\"\"\"\n \"\"\"</p>\"\"\"))\n \n header = self.resultList.header()\n self.resultList.sortItems(1, Qt.DescendingOrder)\n header.setStretchLastSection(False)\n header.resizeSections(QHeaderView.ResizeToContents)\n headerSize = 0\n for col in range(header.count()):\n headerSize += header.sectionSize(col)\n if headerSize < header.width():\n header.setStretchLastSection(True)\n \n self.__finish()",
"def viewdata(data):\n\n print('_' * 50)\n print('Number of Results: ' + str(data[0]['numResults']))\n print('\\nSearchURL: ' + data[0]['searchURL'])\n print('_' * 50)\n\n i = 1\n for m in data[1]:\n print(str(i) + '. ')\n for n in m:\n print(str(n) + ': ' + str(m[n]))\n i += 1\n print('\\n')",
"def getInfo():",
"def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed",
"def populate_package(package_count: int) -> None:\n logging.info(f\"Fetching {package_count} packages\")\n response = CurlController.send_get_request(url=CONFIG.EXTERNAL_API.ALL_PACKAGES)\n get_version = False\n count = 0\n temp_dir = filestore.generate_temp_dir()\n # Local Testing\n # response_arr = ['Package: A3', 'Version: 1.0.0', 'Depends: R (>= 2.15.0), xtable, pbapply', 'Suggests: randomForest, e1071', 'License: GPL (>= 2)', 'MD5sum: 027ebdd8affce8f0effaecfcd5f5ade2', 'NeedsCompilation: no', '', 'Package: aaSEA', 'Version: 1.1.0', 'Depends: R(>= 3.4.0)', 'Imports: DT(>= 0.4), networkD3(>= 0.4), shiny(>= 1.0.5),', ' shinydashboard(>= 0.7.0), magrittr(>= 1.5), Bios2cor(>= 2.0),', ' seqinr(>= 3.4-5), plotly(>= 4.7.1), Hmisc(>= 4.1-1)', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 0f9aaefc1f1cf18b6167f85dab3180d8', 'NeedsCompilation: no', '', 'Package: AATtools', 'Version: 0.0.1', 'Depends: R (>= 3.6.0)', 'Imports: magrittr, dplyr, doParallel, foreach', 'License: GPL-3', 'MD5sum: 3bd92dbd94573afb17ebc5eab23473cb', 'NeedsCompilation: no', '', 'Package: ABACUS', 'Version: 1.0.0', 'Depends: R (>= 3.1.0)', 'Imports: ggplot2 (>= 3.1.0), shiny (>= 1.3.1),', 'Suggests: rmarkdown (>= 1.13), knitr (>= 1.22)', 'License: GPL-3', 'MD5sum: 50c54c4da09307cb95a70aaaa54b9fbd', 'NeedsCompilation: no', '', 'Package: abbyyR', 'Version: 0.5.5', 'Depends: R (>= 3.2.0)', 'Imports: httr, XML, curl, readr, plyr, progress', 'Suggests: testthat, rmarkdown, knitr (>= 1.11), lintr', 'License: MIT + file LICENSE', 'MD5sum: e048a3bca6ea32126e6c367415c0bfaf', 'NeedsCompilation: no', '', 'Package: abc', 'Version: 2.1', 'Depends: R (>= 2.10), abc.data, nnet, quantreg, MASS, locfit', 'License: GPL (>= 3)', 'MD5sum: c9fffe4334c178917f762735aba59653', 'NeedsCompilation: no', '', 'Package: abc.data', 'Version: 1.0', 'Depends: R (>= 2.10)', 'License: GPL (>= 3)', 'MD5sum: 799079dbbdd0cfc9d9c61c3e35241806', 'NeedsCompilation: no', '', 'Package: ABC.RAP', 'Version: 0.9.0', 'Depends: R (>= 3.1.0)', 'Imports: graphics, stats, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 38c65a7251d28ef2462ee430ded95700', 'NeedsCompilation: no', '', 'Package: abcADM', 'Version: 1.0', 'Imports: Rcpp (>= 1.0.1)', 'LinkingTo: Rcpp, BH', 'License: GPL-3', 'MD5sum: 8134f67912b506194e3dab4ccd6e75f7', 'NeedsCompilation: yes', '', 'Package: ABCanalysis', 'Version: 1.2.1', 'Depends: R (>= 2.10)', 'Imports: plotrix', 'License: GPL-3', 'MD5sum: 678e03837e25a922bf71bafe1f8de617', 'NeedsCompilation: no', '', 'Package: abcdeFBA', 'Version: 0.4', 'Depends: Rglpk,rgl,corrplot,lattice,R (>= 2.10)', 'Suggests: LIM,sybil', 'License: GPL-2', 'MD5sum: c84d45a85d8ab6bbe517365e8845db83', 'NeedsCompilation: no', '', 'Package: ABCoptim', 'Version: 0.15.0', 'Imports: Rcpp, graphics, stats, utils', 'LinkingTo: Rcpp', 'Suggests: testthat, covr', 'License: MIT + file LICENSE', 'MD5sum: a62ed03650273c09899655065437078f', 'NeedsCompilation: yes', '', 'Package: ABCp2', 'Version: 1.2', 'Depends: MASS', 'License: GPL-2', 'MD5sum: e920282d5a369df71e15241be40cb60e', 'NeedsCompilation: no', '', 'Package: abcrf', 'Version: 1.8.1', 'Depends: R(>= 3.1)', 'Imports: readr, MASS, matrixStats, ranger, doParallel, parallel,', ' foreach, stringr, Rcpp (>= 0.11.2)', 'LinkingTo: Rcpp, RcppArmadillo', 'License: GPL (>= 2)', 'MD5sum: 4d5a304f46d117226791523cef4e2427', 'NeedsCompilation: yes', '', 'Package: abcrlda', 'Version: 1.0.3', 'Imports: stats', 'License: GPL-3', 'MD5sum: 651e6e18e08916b443aaf011b5a63525', 'NeedsCompilation: no', '', 'Package: abctools', 'Version: 1.1.3', 'Depends: R (>= 2.10), abc, abind, parallel, plyr, Hmisc', 'Suggests: ggplot2, abc.data', 'License: GPL (>= 2)', 'MD5sum: c5937b65837ef7e6bfbe141cea257f40', 'NeedsCompilation: yes', '', 'Package: abd', 'Version: 0.2-8', 'Depends: R (>= 3.0), nlme, lattice, grid, mosaic', 'Suggests: boot, car, ggplot2, plyr, HH, ICC, vcd, Hmisc', 'License: GPL-2', 'MD5sum: 1913d76a0fbc44222709381f63f385b9', 'NeedsCompilation: no', '', 'Package: abdiv', 'Version: 0.2.0', 'Imports: ape', 'Suggests: testthat (>= 2.1.0), vegan', 'License: MIT + file LICENSE', 'MD5sum: 80931c0ca85ba5386000bf617552c5ce', 'NeedsCompilation: no', '', 'Package: abe', 'Version: 3.0.1', 'License: GPL (>= 2)', 'MD5sum: 9c151db5397422c8927dee41dabfbfab', 'NeedsCompilation: no', '', 'Package: abess', 'Version: 0.3.0', 'Depends: R (>= 3.1.0)', 'Imports: Rcpp, MASS, methods, Matrix', 'LinkingTo: Rcpp, RcppEigen', 'Suggests: testthat, knitr, rmarkdown', 'License: GPL (>= 3) | file LICENSE', 'MD5sum: e0ea7d068147c49c011c7135ab290bd3', 'NeedsCompilation: yes', '', 'Package: abf2', 'Version: 0.7-1', 'License: Artistic-2.0', 'MD5sum: 6792a51c6fb3e239165d69aa8a71d3cd', 'NeedsCompilation: no', '', 'Package: abglasso', 'Version: 0.1.1', 'Imports: MASS, pracma, stats, statmod', 'Suggests: testthat', 'License: GPL-3', 'MD5sum: 18bd0759cd005c5ac6fb515799b3f3d8', 'NeedsCompilation: no', '', 'Package: ABHgenotypeR', 'Version: 1.0.1', 'Imports: ggplot2, reshape2, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: ca4397ba7390c0e0a3728c0cda864494', 'NeedsCompilation: no', '', 'Package: abind', 'Version: 1.4-5', 'Depends: R (>= 1.5.0)', 'Imports: methods, utils', 'License: LGPL (>= 2)', 'MD5sum: 136f981e1c4f618b64a87faaa7797c97', 'NeedsCompilation: no', '', 'Package: abjutils', 'Version: 0.3.1', 'Depends: R (>= 4.0)', 'Imports: dplyr, magrittr, purrr, rlang, rstudioapi, stringi, stringr,', ' tidyr', 'Suggests: testthat', 'License: MIT + file LICENSE', 'MD5sum: a596c07aaa7f82e5d123b2f7354e5b55', 'NeedsCompilation: no', '', 'Package: abmR', 'Version: 1.0.2', 'Depends: R (>= 3.5)', 'Imports: sp, rgdal, table1, googledrive, swfscMisc, geosphere,', ' kableExtra, gtsummary, ggplot2, gstat, purrr, rnaturalearth,', ' rnaturalearthdata, sf, tmap, raster, utils, stats, methods,', ' rgeos', 'Suggests: jpeg, knitr', 'License: GPL (>= 3)', 'MD5sum: cf96d']\n response_arr = response.decode(\"utf-8\").split(\"\\n\")\n with temp_dir:\n for item in response_arr:\n if count >= package_count:\n break\n if get_version:\n # Fetching the version, once we have the package name\n package_version = Command.get_package_version(item=item)\n if package_version:\n # Generating the required URL for the package to fetch the details\n package_url = Template(\n CONFIG.EXTERNAL_API.PACKAGE_DETAIL\n ).substitute(\n package_name=package_name,\n separator=\"_\",\n package_version=package_version,\n )\n logging.info(f\"Downloading {package_url}\")\n # Downloading the details of the package and extracting the DESCRIPTION file\n extract_file_path = filestore.join_paths(\n prefix=package_name,\n suffix=CONFIG.EXTERNAL_API.DETAIL_FILE_NAME,\n )\n target_dir = filestore.download_file(\n url=package_url,\n temp_dir=temp_dir,\n extract_file_path=extract_file_path,\n )\n # Reading contents of DESCRIPTION file\n package_details = filestore.join_paths(\n prefix=temp_dir.name,\n suffix=extract_file_path,\n )\n with open(package_details) as details_file:\n for line in details_file:\n if line.startswith(PackageInfoPrefix.PUBLICATION_DATE):\n publication_time_str = (\n Command.get_publication_timestamp(line)\n )\n publication_timestamp = (\n datetime_util.string_to_datetime(\n publication_time_str\n )\n )\n elif line.startswith(PackageInfoPrefix.TITLE):\n title = Command.get_package_title(line)\n elif line.startswith(PackageInfoPrefix.DESCRIPTION):\n description = Command.get_package_description(line)\n elif line.startswith(PackageInfoPrefix.AUTHOR):\n (\n author_name,\n author_email,\n ) = Command.get_package_author(line)\n elif line.startswith(PackageInfoPrefix.MAINTAINER):\n (\n maintainer_name,\n maintainer_email,\n ) = Command.get_package_maintainer(line)\n\n package_info_dict = {\n \"name\": package_name,\n \"version\": package_version,\n \"publication_timestamp\": publication_timestamp,\n \"title\": title,\n \"description\": description,\n \"author_name\": author_name,\n \"author_email\": author_email,\n \"maintainer_name\": maintainer_name,\n \"maintainer_email\": maintainer_email,\n }\n logging.info(package_info_dict)\n obj = PackageManager.create_object(\n create_data=package_info_dict\n )\n if obj == CONFIG.DB.FAILURE:\n raise Exception(f\"Could not insert package in DB\")\n count += 1\n get_version = False\n # Fetching the package name\n package_name = Command.get_package_name(item=item)\n if package_name:\n get_version = True",
"def _fetch_data(self):\n pass",
"def _display_results(self):\n self._display_summary()\n self._display_domain_record()\n self._display_ip_record()\n self._display_cert_details()\n self._display_ti_data()\n self._display_screenshot()\n self._display_related_alerts()\n self._display_bookmarks()\n self._display_dns_results()\n self._display_hosts()\n self._display_flows()",
"def fetch_package(self, package_name):\n\t\t\t\n\t\t\tpackage_root_url = urlparse.urljoin(self.packages_root_url,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tpackage_name + \"/\")\n\t\t\t\n\t\t\tpackage_info_url = urlparse.urljoin(package_root_url, \"info\")\n\t\t\tpackage_archive_url = urlparse.urljoin(package_root_url, \"archive\")\n\t\t\t\n\t\t\tlogger.debug(\"Get: {0}\".format(package_info_url))\n\t\t\ttry:\n\t\t\t\tinfo = json.loads(urllib2.urlopen(package_info_url).read())\n\t\t\t\treturn ups.package.Package(self, package_root_url, info)\n\t\t\texcept urllib2.HTTPError as e:\n\t\t\t\traise RepositoryError(e)\n\t\t\texcept ValueError as e:\n\t\t\t\traise RepositoryError(\"Unable to parse info file: {0}\".format(e))",
"def download(self):\n #the link has some meta data in it that we need to get a hold of so we cant use metaData.getLink()\n data = None\n\n for link in self.metaData.jsonObj['links']:\n if link.get('rel') == \"content\":\n data = link\n\n assert data is not None\n\n response = self._adapter.getRequest(data['href'], self._baseHeader)\n return {\"filename\": data['title'], \"mime\": data['type'], \"binary\": response['Body'] }",
"def download(self, args):\n\n\t\t\"\"\" Default argument for Architecture \"\"\"\n\t\tif len(args) >= 4:\n\t\t\tarch = args[3]\n\t\telse:\n\t\t\tarch = platform.processor()\n\n\t\t\"\"\" Default argument for Version \"\"\"\n\t\tif len(args) >= 3:\n\t\t\tif args[2] == \"latest\":\n\t\t\t\tversion = \"Latest\"\n\t\t\telse:\n\t\t\t\tversion = args[2]\n\t\telse:\n\t\t\tversion = \"Latest\"\n\n\t\t\"\"\" Find package path from package list, based on prev. arguments \"\"\"\n\t\tif len(args) >= 2:\n\t\t\tpackage = args[1]\n\t\t\tfilename = False\n\t\t\t\n\t\t\tversions = self.master.Dump(package)\n\t\t\tfor d in versions:\n\t\t\t\tif d[\"Version\"] == version:\n\t\t\t\t\tif d[\"Version\"] != \"Latest\" and d[\"Architecture\"] == arch:\n\t\t\t\t\t\tfilename = d[\"Filename\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor e in versions:\n\t\t\t\t\t\t\tif e[\"Version\"] == d[\"LatestVersion\"] and e[\"Architecture\"] == arch:\n\t\t\t\t\t\t\t\tfilename = e[\"Filename\"]\n\t\t\t\t\t\t\t\tversion = d[\"LatestVersion\"];\n\t\t\tif not filename:\n\t\t\t\tself.write_line(\"ERROR XXX: Package not found.\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Find chunks to download \"\"\"\n\t\t\tid = 0\n\t\t\tto_download = False\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tprint(f.path.replace(\"packages/\", \"\") + \" = \" + filename);\n\t\t\t\tif f.path.replace(\"packages/\", \"\") == filename:\n\t\t\t\t\tto_download = f\n\t\t\t\t\tbreak;\n\t\t\t\tid += 1\n\t\t\tif not to_download:\n\t\t\t\tprint(\"ERROR XXX: dunno\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Set chunks priority to 7? (download max priority) \"\"\"\n\t\t\tpr = self.torrent_info.map_file(id, 0, to_download.size);\n\t\t\tn_pieces = math.ceil(pr.length / self.torrent_info.piece_length() + 1);\n\n\t\t\tfor i in range(self.torrent_info.num_pieces()):\n\t\t\t\tif i in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\tself.handler.piece_priority(i, 7)\n\n\n\t\t\t\"\"\" Print download of package status \"\"\"\n\t\t\tself.print_status(id, pr, package, version, filename)\n\t\t\t\t\n\t\t\t\"\"\" Check the server for hash validation \"\"\"\n\t\t\tif self.valid_tpkg_file(to_download.path):\n\t\t\t\tself.write_line(\"DONE {0} {1} {2} {3}\".format(package, version, arch, self.config[\"daemon\"][\"rootdir\"] + \"/\" + to_download.path).replace('//', '/'))\n\t\t\telse:\n\t\t\t\tself.write_line(\"ERROR XXX: Hash verification failed.\")\n\t\telse:\n\t\t\tself.write_line(\"INVALID ARGUMENTS\");",
"def xnat_workflow_info_show(args):\n\trequest_url = \"http://\" + args.server + \"/data/services/workflows/workflowid/\" + args.workflow_id + \"?format=json\"\n\tprint(\"xnat_workflow_info show: request_url: \" + request_url)\n\tresponse = requests.get(request_url, auth=(args.username, args.password))\n\tif (response.status_code != 200):\n\t\tprint(\"Cannot get response from request: \" + request_url)\n\t\tsys.exit(1)\n\n\tjson_response = json.loads(response.text)\n\tjson_items = json_response['items']\n\ti = 0\n\tfor json_item in json_items:\n\t\ti = i + 1\n\t\tprint i\n\n\t\t# meta\n\t\tjson_meta = json_item['meta']\n\t\tisHistory = json_meta['isHistory']\n\t\ttype = json_meta['xsi:type']\n\t\tstart_date = json_meta['start_date']\n\n\t\tprint \" isHistory: \" + str(isHistory)\n\t\tprint \" type: \" + type\n\t\tprint \" start_date: \" + start_date\n\t\n\t\t# children\n\t\t#json_children = json_item['children']\n\t\t#print \" children\"\n\t\t#print json_children\n\n\t\t# data_fields\n\t\tjson_data_fields = json_item['data_fields']\n\t\tstatus = json_data_fields['status']\n\t\tworkflow_id = json_data_fields['wrk_workflowData_id']\n\t\tdata_type = json_data_fields['data_type']\n\t\tlaunch_time = json_data_fields['launch_time']\n\t\tExternalID = json_data_fields['ExternalID']\n\t\tpipeline_name = json_data_fields['pipeline_name']\n\t\tID = json_data_fields['ID']\n\t\n\t\tprint \" status: \" + status\n\t\tprint \" workflow_id: \" + str(workflow_id)\n\t\tprint \" data_type: \" + data_type\n\t\tprint \" launch_time: \" + launch_time\n\t\tprint \" ExternalID: \" + ExternalID\n\t\tprint \" pipeline_name: \" + pipeline_name\n\t\tprint \" ID: \" + ID\n\n\t\tprint \" All Data Fields:\"\n\t\tprint \" \" + str(json_data_fields)",
"def get_details(self):",
"def display_scraped_data(ids, names, p_links, c_links, cl_links):\n # for (p_id, p_name, p_link, ctry_link, clb_link) in zip(ids, names, p_links,\n # c_links, cl_links):\n # print(p_id, p_name, p_link, ctry_link, clb_link, sep=\" \")\n print(\"Finally we have {} IDs, {} player name, {} links of player-images, {} links of countries and {} \"\n \"links of clubs\".format(len(ids), len(names), len(p_links),\n len(c_links), len(cl_links)))",
"def get_download_info_API(request):\n update_metrics(request)\n\n session_id = request.session.session_key\n\n product_types = request.GET.get('types', 'none')\n product_types = product_types.split(',')\n\n previews = request.GET.get('previews', 'none')\n previews = previews.split(',')\n\n # since we are assuming this is coming from user interaction\n # if no filters exist then none of this product type is wanted\n if product_types == ['none'] and previews == ['none']:\n # ie this happens when all product types are unchecked in the interface\n return HttpResponse(json.dumps({'size':'0', 'count':'0'}), content_type='application/json')\n\n if previews == ['all']:\n previews = [i[0] for i in settings.image_sizes]\n\n # now get the files and download size / count for this cart\n urls = []\n from results.views import *\n files = getFiles(collection=True, session_id=session_id, fmt=\"raw\", loc_type=\"url\", product_types=product_types, previews=previews)\n download_size, count = get_download_info(files)\n\n # make pretty size string\n download_size = nice_file_size(download_size)\n\n return HttpResponse(json.dumps({'size':download_size, 'count':count}), content_type='application/json')",
"async def get_info(self, url):\n yt = youtube_dl.YoutubeDL(stim)\n down = yt.extract_info(url, download=False)\n data1 = {'queue': []}\n if 'entries' in down:\n if len(down['entries']) > 1:\n playlist_titles = [title['title'] for title in down['entries']]\n data1 = {'title': down['title'], 'queue': playlist_titles}\n\n down = down['entries'][0]['title']\n\n return down, data1",
"def display_results():\n pass"
]
| [
"0.74666506",
"0.66988295",
"0.66282576",
"0.6621933",
"0.637445",
"0.6101264",
"0.60497206",
"0.6043906",
"0.60221356",
"0.6019666",
"0.5838681",
"0.5817405",
"0.58156484",
"0.5796491",
"0.57407945",
"0.57351774",
"0.5727137",
"0.56660646",
"0.56522846",
"0.5626147",
"0.56179124",
"0.5610194",
"0.55949885",
"0.5591912",
"0.5583717",
"0.5554007",
"0.5526356",
"0.55238354",
"0.5522777",
"0.55183333"
]
| 0.7029175 | 1 |
Private method handling a details error. errorCode code of the error (integer) errorString error message (string) | def __detailsError(self, errorCode, errorString):
self.__finish()
self.__showDetailsButton.setEnabled(True)
E5MessageBox.warning(
self,
self.tr("Search PyPI"),
self.tr("""<p>Package details info could not be retrieved.</p>"""
"""<p>Reason: {0}</p>""")
.format(errorString)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_error_details(code, desc):\n MDC.put('errorCode', code)\n MDC.put('errorDescription', desc)",
"def process_error(self, id, code, error):\n raise NotImplementedError('process_error not implemented in BaseService')",
"def error_details(self, error_details):\n self._error_details = error_details",
"def error(self, text, info=None):\n self.details[\"message\"] = text\n if info:\n self.details[\"details\"] = info",
"def on_error(self, status_code, data):\n\t\tprint(\"error_code: \",status_code)",
"def error(self, code, msg):\r\n self.status = code\r\n self.status_message = str(msg)",
"def errMsg(self, code, text):\n # Preprocess text\n lines = text.splitlines()\n\n image = self.errMsgImage.copy()\n draw = ImageDraw.Draw(image)\n # Text\n x0 = self.width/4 + 2\n y0 = -1\n draw.text((x0, y0), 'ERROR {:5d}'.format(code), font=self.font, fill=255)\n for i in range(0,len(lines)):\n draw.text((x0, y0 + (i+1)*7), lines[i], font=self.font, fill=255)\n self.disp.image(image.rotate(180))\n self.disp.display()\n return",
"def test_add_error_details(self):\n self.protocol.addError(\n self.test, details=self.sample_tb_details)\n self.assertThat([\n compat._b((\"error: %s [ multipart\\n\"\n \"Content-Type: text/plain\\n\"\n \"something\\n\"\n \"F\\r\\nserialised\\nform0\\r\\n\"\n \"Content-Type: \"\n \"text/x-traceback;charset=utf8,language=python\\n\"\n \"traceback\\n\" + _remote_exception_str_chunked +\n \"]\\n\") % self.test.id()),\n compat._b((\"error: %s [ multipart\\n\"\n \"Content-Type: text/plain\\n\"\n \"something\\n\"\n \"F\\r\\nserialised\\nform0\\r\\n\"\n \"Content-Type: \"\n \"text/x-traceback;language=python,charset=utf8\\n\"\n \"traceback\\n\" + _remote_exception_str_chunked +\n \"]\\n\") % self.test.id()),\n ],\n matchers.Contains(self.io.getvalue())),",
"def _process_error(self, result):\n self.error = result\n if result['errorCode'] == 901:\n raise Exceptions.APIKeyInvalid\n elif result['errorCode'] == 902:\n raise Exceptions.APISecretInvalid\n elif result['errorCode'] == 903:\n raise Exceptions.InvalidRequestToken\n elif result['errorCode'] == 904:\n raise Exceptions.RequestTokenExpired\n elif result['errorCode'] == 905:\n raise Exceptions.InvalidAccessToken\n elif result['errorCode'] == 906:\n raise Exceptions.TokenExpired(self.access.expire)\n elif result['errorCode'] == 907:\n raise Exceptions.ParameterMissing\n elif result['errorCode'] == 908:\n raise Exceptions.ParameterNotFormatted\n elif result['errorCode'] == 909:\n raise Exceptions.FeatureNotSupported\n elif result['errorCode'] == 910:\n raise Exceptions.EndPointNotSupported\n else:\n raise Exceptions.UnknownJsonError(result)",
"def grabError(self, error): #$NON-NLS-1$\r",
"def error(message, details={}, status_code=400, exc_info=False):\n\n details['http_status_code'] = status_code\n\n logger = logging.getLogger(settings.LOGGER_ERROR)\n logger.exception(msg=message, extra=details, exc_info=exc_info)",
"def errorNumToDesc(self, errorCode):\n for t in self.ERROR_CODES:\n if t[0] == errorCode:\n try:\n return t[2]\n except IndexError:\n return \"\"",
"def _handle_error(self, err: ctypes.c_char_p, method: str) -> Exception:\n if err:\n string = ctypes.string_at(err).decode(\"utf-8\")\n self._free_error(err)\n return RuntimeError(string)\n else:\n return RuntimeError(f\"Unknown error in {method}. \")",
"def decode_error_code(err_code, s, d):\n\n config.logger.warn('Failure: %d %s %s', err_code, s, d)\n\n return {\n 0: 'Request completed successfully. No error',\n 1: 'Invalid API key',\n 2: 'Unknown Request',\n 3: 'Invalid arguements',\n 4: 'Invalid service',\n 5: 'Invalid session',\n 6: 'Insufficient bandwidth available',\n 7: 'No path between src and dst with that service type',\n 8: 'Internal VELOX error',\n 9: 'Nothing to modify',\n -1: 'Server comms error',\n }.get(err_code, 'Unknown error code')",
"def error(self, error):\n pass",
"def error(self, message_code):\n result = Result(self.msg.sms.from_number, message_code)\n self.respond(result.message, fields={'message_code': result.message_code})",
"def _rest_error(self, status_code, error_code, message):\n return {\"status_code\": status_code, \"error_code\": error_code, \"message\": message}",
"def error_code(self, obj, statusCode):\n pass",
"def present_error_massage(self, invalid_equation_code):\n print(\"Invalid equation\")\n print(self.ERROR_MASSAGE_DIC[invalid_equation_code])",
"def getError(self, status):\r\n nBuffer = 512\r\n msgBuffer = ctypes.create_string_buffer(nBuffer)\r\n # ViStatus status = Acqrs_errorMessage(ViSession instrumentID,\r\n # ViStatus errorCode, ViChar errorMessage[],ViInt32 errorMessageSize);\r\n AgDLL['Acqrs_errorMessage'](self.session, status, msgBuffer,\r\n ViInt32(nBuffer))\r\n return msgBuffer.value",
"def action_error(error_text, code):\n response = {\n ControllerConstants.ACTIVITY_STATUS: ControllerConstants.FAILED_STATUS,\n ControllerConstants.ERROR_DESC: {\n ControllerConstants.ERROR_MESSAGE: error_text,\n }\n }\n if code:\n response[ControllerConstants.ERROR_DESC][ControllerConstants.ERROR_CODE] = str(code)\n return create_response(response)",
"def identify_result_error(self, record):\n return [\"error\"]",
"def error(self, message, code='UnknownError', error_code=None, http_status=400):\n\n # Some backwards compatibility\n if error_code is not None and code == 'UnknownError':\n code = error_code\n\n self._add_message( message, self.ERROR, code=code )\n self.n_errors += 1\n self.status = 'ERROR'\n self.http_status = http_status\n self.error_code = code\n self.message = message",
"def query_error_detail(self, code, language='0'):\n api_uri = self._uri_dict.get('queryErrorDetail')\n data = {\n 'code': str(code),\n 'language': language\n }\n r_data = self._post(api_uri, data)\n return r_data",
"def input_error(self, errCode):\n errMsg = ''\n if 'A' in errCode: errMsg = errMsg + 'X column is not specified.\\n'\n if 'B' in errCode: errMsg = errMsg + 'X Column is not numeric.\\n'\n if 'C' in errCode: errMsg = errMsg + 'Y column is not specified.\\n'\n if 'D' in errCode: errMsg = errMsg + 'Y Column is not numeric.\\n'\n if 'E' in errCode: errMsg = errMsg + 'Z Column is not numeric.\\n'\n if 'F' in errCode: errMsg = errMsg + 'Calibration point 1 row is out of range.\\n'\n if 'G' in errCode: errMsg = errMsg + 'Calibration point 2 row is out of range.\\n'\n if 'H' in errCode: errMsg = errMsg + 'First row is not specified.\\n'\n if 'I' in errCode: errMsg = errMsg + 'Last row is not specified.\\n'\n if 'J' in errCode: errMsg = errMsg + 'First row is out of range.\\n'\n if 'K' in errCode: errMsg = errMsg + 'Last row is out of range.\\n'\n if 'L' in errCode: errMsg = errMsg + 'First and last rows are not compatible.\\n'\n self.wait_window(InputError(self, errMsg.rstrip('\\n')))",
"def handle_upnp_exception(self, err_code: int, err_msg: str):\n error_msg = QErrorMessage(self.__app)\n error_msg.showMessage(\"Error {}: {}\".format(err_code, err_msg))",
"def get_error_description(self, code):\n self.c.execute(\"SELECT * FROM errorcode WHERE code=%d\" % code)\n return self.c.fetchone()[1]",
"def _error(error_msg, status_code):\n return {\n 'statusCode': status_code,\n 'body': error_msg}",
"def error(self, msg, details = \"\" ):\n\n if details is not None:\n msg += \"\\n\\n\" + details\n\n if not self.is_subprocess:\n self.parser.error(msg)\n else:\n raise Exception(msg)",
"def parse_error (self, error_str):\r\n\t\t# Regex out the error and channel indices from the string\r\n\t\tob = re.match(ERROR_FORMAT, error_str)\r\n\t\t\r\n\t\t# If error_str doesn't match an error, return None\r\n\t\tif ob is None:\r\n\t\t\treturn None\r\n\t\t\r\n\t\t# Extract the two matched groups (i.e. the error and channel indices)\r\n\t\terrno,chno = ob.groups()\r\n\t\terrno = int(errno)\r\n\t\tchno = int(chno)\r\n\t\t\r\n\t\t# Get the error description; if none is defined, mark as unrecognised\r\n\t\terrdesc = self.error_desc_dict.get(errno, 'Unrecognised error code.').format(ch=chno)\r\n\t\t\r\n\t\treturn {'type':'err', 'id':errno, 'ch':chno, 'desc':errdesc, 'raw':error_str}"
]
| [
"0.66811025",
"0.65098435",
"0.64526945",
"0.6302421",
"0.62795705",
"0.6217456",
"0.620963",
"0.61615944",
"0.6157748",
"0.6041889",
"0.6012321",
"0.5997784",
"0.5996566",
"0.5996499",
"0.5980564",
"0.5967807",
"0.59412086",
"0.5934131",
"0.5933618",
"0.59326905",
"0.5930968",
"0.5921413",
"0.5905988",
"0.59009707",
"0.58956605",
"0.5895651",
"0.5878907",
"0.5845862",
"0.5832166",
"0.58317864"
]
| 0.7208053 | 0 |
This function is used for data analysis. Retrieve image information (num_entities, average linear/angular distance and timestamp). | def get_image_information(client):
pipeline = [{"$match": {"camera_views": {"$exists": 1}}}, {"$unwind": {"path": "$camera_views"}}, {"$addFields": {
"camera_views.average_linear_distance": {
"$divide": [
"$camera_views.total_linear_distance",
"$camera_views.num_entities"
]
},
"camera_views.average_angular_distance": {
"$divide": [
"$camera_views.total_angular_distance",
"$camera_views.num_entities"
]
},
"camera_views.timestamp": "$timestamp",
"camera_views._id": "$_id",
"camera_views.database": client.database.name,
"camera_views.collection": client.name,
'camera_views.file_id':"$camera_views.images.file_id", #Add the Color image id for downloading and testing
}}, {"$replaceRoot": {"newRoot": "$camera_views"}}, {"$project": {
"_id": 1,
"num_entities": 1,
"average_linear_distance": 1,
"average_angular_distance": 1,
"timestamp": 1,
"duplicate": 1,
"database":1,
"collection":1,
"file_id":{"$arrayElemAt":["$images.file_id",0]}, # Only keep the first file id (The Color image)
}}]
pprint.pprint(pipeline)
result = list(client.aggregate(pipeline))
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_image_info(image):\n image = ee.Image(image)\n image_vis = image.visualize(**{\n 'min': image_min,\n 'max': image_max,\n 'palette': image_palette\n })\n\n print(image_min, image_max)\n\n if 'hillshade' in r and r['hillshade']:\n image_vis = hillshade(image_vis,\n image.subtract(image_min).divide(ee.Image.constant(image_max).subtract(image_min)),\n True)\n\n m = image_vis.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result",
"def print_images_in_statistics(self):\n self._print_images_statistics(self._images_in_folder, self._pose_class_names)",
"def populate_image_stats(self, image):\n ti = image\n image_data = ti.data\n if not ti.data:\n return ti\n ti.size = len(image_data)\n try:\n with connect(Blobby) as c:\n ti.shahash = c.get_data_bhash(image_data)\n except o.Exception, ex:\n raise o.Exception('oException getting shahash: %s' % ex.msg)\n except Exception, ex:\n raise o.Exception('Exception getting shahash: %s' % ex)\n\n try:\n b = StringIO(image_data)\n img = Image.open(b)\n except Exception, ex:\n raise o.Exception('Exception getting PIL img: %s' % ex)\n try:\n ti.xdim, ti.ydim = img.size\n except Exception, ex:\n raise o.Exception('Exception getting dimensions: %s' % ex)\n try:\n ti.vhash = str(average_hash(img))\n except Exception, ex:\n raise o.Exception('Exception getting vhash: %s' % ex)\n\n return ti",
"def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d",
"def getstats_fromimage(path_data, label, filename):\n path_image = get_path_image(path_data, label, filename)\n image = np.fromfile(path_image, np.float64)\n\n max_ = np.amax(image)\n min_ = np.amin(image)\n mean = np.mean(image)\n std = np.std(image)\n\n return max_, min_, mean, std",
"def _get_imganno(self, idx):\n coco = self.coco\n img_id = self.img_ids[idx]\n ann_ids = coco.getAnnIds(imgIds=img_id)\n anno = coco.loadAnns(ann_ids)\n\n anno = [\n obj for obj in anno\n if obj['iscrowd'] == 0 and obj['num_keypoints'] > 0\n ]\n\n db_rec = {}\n joints, orgsize = self._get_joints(anno, idx)\n db_rec['gt_joints'] = joints\n db_rec['im_shape'] = orgsize\n\n if self.return_bbox:\n db_rec['gt_bbox'] = self._get_bboxs(anno, idx)\n\n if self.return_class:\n db_rec['gt_class'] = self._get_labels(anno, idx)\n\n if self.return_area:\n db_rec['gt_areas'] = self._get_areas(anno, idx)\n\n if self.return_mask:\n db_rec['mask'] = self._get_mask(anno, idx)\n\n db_rec['im_id'] = img_id\n db_rec['image_file'] = os.path.join(self.img_prefix,\n self.id2name[img_id])\n\n return db_rec",
"def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)",
"def extract_data(filename, num_images, starting_id, context_factor):\n imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n\n\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = int(imgs[0].shape[0]/DOWNSCALE)\n IMG_HEIGHT = int(imgs[0].shape[1]/DOWNSCALE)\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n\n img_patches = [img_crop_context(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor, sub_mean=True) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n data = np.asarray(data)\n return data",
"def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))",
"def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)",
"def cntInfo(img, cnt):\n\tpts = extremePoints(cnt)\n\troi = crop(img, pts[\"L\"][0], pts[\"T\"][1], pts[\"R\"][0], pts[\"B\"][1])\n\tm = minMaxLoc(roi)\n\tm[\"minLoc\"] = (m[\"minLoc\"][0] + pts[\"L\"][0], m[\"minLoc\"][1] + pts[\"T\"][1])\n\tm[\"maxLoc\"] = (m[\"maxLoc\"][0] + pts[\"L\"][0], m[\"maxLoc\"][1] + pts[\"T\"][1])\n\tcross = abs(pts[\"L\"][0] - pts[\"R\"][0])\n\theight = abs(pts[\"T\"][1] - pts[\"B\"][1])\n\tcent = centroid(cnt)\n\tangle = orientation(cnt)\n\tareaVal = area(cnt)\n\tper = perimeter(cnt)\n\tar = aspectRatio(cnt)\n\text = extent(cnt)\n\tsold = solidity(cnt)\n\teqD = equivalentDiameter(cnt)\n\tme = meanVal(grayscale(roi))\n\tsu = sumPixel(grayscale(roi))\n\td = {\"sum intensity\":su, \"mean intensity\":me, \"area\":areaVal, \"perimeter\":per, \"aspect ratio\":ar, \"extent\":ext,\"solidity\":sold, \"equivalent diameter\":eqD, \"width\": cross, \"height\" : height, \"centroid\" : cent, \"extrema\" : pts, \"min\":m[\"minLoc\"], \"max\":m[\"maxLoc\"], \"orientation\" : angle}\n\treturn d",
"def build_img_info(img_root):\n imgs = []\n feats = []\n K = []\n for i, name in enumerate(os.listdir(img_root)):\n if '.jpg' in name or '.JPG' in name:\n path = os.path.join(img_root, name)\n img = cv2.imread(path)\n imgs.append(img)\n feature_process = FeatureProcess(img)\n kpt, des = feature_process.extract_features()\n photo_info = PhotoExifInfo(path)\n photo_info.get_tags()\n K.append(photo_info.get_intrinsic_matrix())\n A = photo_info.get_area()\n D = photo_info.get_diam()\n feats.append({'kpt': kpt, 'des': des, 'A': A, 'D': D})\n return imgs, feats, K",
"def generate_image_info(im, params):\n im = ee.Image(im)\n\n # some images are scaled to a factor of 10.\n if params.get('scale') == 'log':\n im = im.log10()\n\n im = im.sldStyle(params.get('sld_style'))\n\n m = im.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result",
"def distance(dataset: Dataset) -> Dict[str, List[Tuple[int, float]]]:\n distances: Dict[str, List[Tuple[int, float]]] = {}\n\n images = dataset.images\n annotations = dataset.annotations\n\n for image in images:\n distances[image] = []\n image_area = img_area(image)\n\n for detection in annotations[image]:\n distances[image].append((detection.class_index, detection.bounds.area / image_area))\n\n return distances",
"def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None",
"def get_dataset_info(model):\n instrume = model.meta.instrument.name\n frame_time = model.meta.exposure.frame_time\n ngroups = model.meta.exposure.ngroups\n group_time = model.meta.exposure.group_time\n\n n_int = model.data.shape[0]\n nreads = model.data.shape[1]\n asize2 = model.data.shape[2]\n asize1 = model.data.shape[3]\n\n # If nreads and ngroups are not the same, override the value of ngroups\n # with nreads, which is more likely to be correct, since it's based on\n # the image shape.\n if nreads != ngroups:\n log.warning('The value from the key NGROUPS does not (but should) match')\n log.warning(' the value of nreads from the data; will use value of')\n log.warning(' nreads: %s' % (nreads ))\n ngroups = nreads\n\n npix = asize2 * asize1 # number of pixels in 2D array\n imshape = (asize2, asize1)\n cubeshape = (nreads,) + imshape\n\n return nreads, npix, imshape, cubeshape, n_int, instrume, frame_time, \\\n ngroups, group_time",
"def measure_image_moments(image):\n data = image.quantity\n\n coords = image.geom.get_coord().skycoord\n x, y = coords.data.lon.wrap_at(\"180d\"), coords.data.lat\n\n A = data[np.isfinite(data)].sum()\n\n # Center of mass\n x_cms = (x * data)[np.isfinite(data)].sum() / A\n y_cms = (y * data)[np.isfinite(data)].sum() / A\n\n # Second moments\n x_var = ((x - x_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n y_var = ((y - y_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n x_sigma = np.sqrt(x_var)\n y_sigma = np.sqrt(y_var)\n\n return A, x_cms, y_cms, x_sigma, y_sigma, np.sqrt(x_sigma * y_sigma)",
"def print_image_info(image, resize=rsz_default, kernel=kernel_size):\n\tprint \"Image Size: {0}\".format(image.shape)\n\tprint \"Image Max: {0}\".format(image.max())\n\tprint \"Image Min: {0}\".format(image.min())\n\tprint \"Image Mean: {0}\".format(image.mean())\n\tprint \"Image dtype: {0}\\n\".format(image.dtype)\n\timage = to_uint8(image)\n\timage_prep = preprocess(image, resize=resize, kernel=kernel)\n\tcontour = get_contour(image_prep)\n\tM = get_image_moments(contour=contour)\n\tsecond_m = ['m20', 'm11', 'm02', 'm30', 'm21', 'm12', 'm03']\n\tprint \"Zero Order Moment: {0}\".format(M['m00'])\n\tprint \"First Order Moments: {0}, {1}\".format(M['m10'], M['m01'])\n\tprint \"Second Order Moments:\"\n\tsecond_m_str = ''\n\tfor m2 in second_m:\n\t\tsecond_m_str += \"{0},\".format(M[m2])\n\tprint second_m_str[:-1]",
"def get_img_info(self, idx):\n\n image = self.get_img(idx)\n img_height = image.size[0]\n img_width = image.size[1]\n\n return {\"height\": img_height, \"width\": img_width}",
"def get_num_of_images(self):",
"def get_per_image_gts_and_detections(gt_db_indexed, detection_res):\n per_image_gts = {}\n per_image_detections = {}\n\n # iterate through each image in the gt file, not the detection file\n\n for image_id, annotations in gt_db_indexed.image_id_to_annotations.items():\n # ground truth\n image_obj = gt_db_indexed.image_id_to_image[image_id]\n im_h, im_w = image_obj['height'], image_obj['width']\n\n gt_boxes = []\n gt_labels = []\n\n for gt_anno in annotations:\n # convert gt box coordinates to TFODAPI format\n gt_box_x, gt_box_y, gt_box_w, gt_box_h = gt_anno['bbox']\n gt_y_min, gt_x_min = gt_box_y / im_h, gt_box_x / im_w\n gt_y_max, gt_x_max = (gt_box_y + gt_box_h) / im_h, (gt_box_x + gt_box_w) / im_w\n gt_boxes.append([gt_y_min, gt_x_min, gt_y_max, gt_x_max])\n\n gt_labels.append(gt_anno['category_id'])\n\n per_image_gts[image_id] = {\n 'gt_boxes': gt_boxes,\n 'gt_labels': gt_labels\n }\n\n # detections\n det_image_obj = detection_res[image_id]\n\n detection_boxes = []\n detection_scores = []\n detection_labels = []\n\n for det in det_image_obj['detections']:\n x_min, y_min, width_of_box, height_of_box = det['bbox']\n y_max = y_min + height_of_box\n x_max = x_min + width_of_box\n detection_boxes.append([y_min, x_min, y_max, x_max])\n\n detection_scores.append(det['conf'])\n detection_labels.append(int(det['category']))\n\n # only include a detection entry if that image had detections\n if len(detection_boxes) > 0:\n per_image_detections[image_id] = {\n 'boxes': detection_boxes,\n 'scores': detection_scores,\n 'labels': detection_labels\n }\n\n return per_image_gts, per_image_detections",
"def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)",
"def extract_data(filename: str, directory: str) -> Dict:\n with open(filename) as f:\n lines = f.readlines()\n\n # Split data by :\n annotations = [line.replace(\" \", \"\").split(\":\") for line in lines]\n\n # Split data by ;\n for annotation in annotations:\n annotation[1] = annotation[1].split(\";\")\n\n # Loop for saving metadata into dictionary\n annot_dict = dict()\n for annotation in annotations:\n img = annotation[0]\n bbox_metadata = annotation[1]\n bbox = list()\n \n # Path to images\n img_path = os.path.join(directory, img)\n im = Image.open(img_path)\n width, height = im.size\n\n # Iterate over each bounding box\n for annot in bbox_metadata:\n \n if \"MISC_SIGNS\" == annot:\n signStatus = 'N/A'\n signTypes = \"MISC_SIGNS\"\n signPurpose = 'N/A'\n\n signBB = (-1, -1, -1, -1)\n signC = (-1, -1)\n signSize = 0\n aspectRatio = 0\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n elif \"\\n\" in annot:\n pass\n else:\n data = annot.split(\",\")\n \n signStatus = data[0] # signStatus\n signTypes = data[6] # signTypes\n signPurpose = data[5] # PROHIBITORY, WARNING, OTHER, INFORMATION\n tl_x, tl_y, br_x, br_y = data[3], data[4], data[1], data[2]\n \n if is_valid_decimal(tl_x):\n tl_x = float(tl_x)\n else:\n tl_x = float(cutoff_letter(tl_x))\n\n if is_valid_decimal(tl_y):\n tl_y = float(tl_y)\n else:\n tl_y = float(cutoff_letter(tl_y))\n\n if is_valid_decimal(br_x):\n br_x = float(br_x)\n else:\n br_x = float(cutoff_letter(br_x))\n\n if is_valid_decimal(br_y):\n br_y = float(br_y)\n else:\n br_y = float(cutoff_letter(br_y))\n\n if tl_x < 0:\n tl_x = 0\n elif tl_x > width:\n tl_x = width\n \n if tl_y < 0:\n tl_y = 0\n elif tl_y > height:\n tl_y = height\n \n if br_x < 0:\n br_x = 0\n elif br_x > width:\n br_x = width\n \n if br_y < 0:\n br_y = 0\n elif br_y > height:\n br_y = height\n\n signBB = (tl_x, tl_y, br_x, br_y)\n signC = (br_x + tl_x)/2, (br_y + tl_y)/2\n signSize = (br_x - tl_x) * (br_y - tl_y)\n aspectRatio = (br_x - tl_x) / (br_y - tl_y)\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n \n \n annot_dict[img_path] = bbox\n return annot_dict",
"def _get_annotation(self, image_id):\n annotation_file = self.image_sets_dir / f'{image_id}.xml'\n objects = ET.parse(annotation_file).findall('object')\n boxes = []\n labels = []\n is_difficult = []\n for obj in objects:\n class_name = obj.find('name').text.lower().strip()\n if class_name in self.class_dict:\n bbox = obj.find('bndbox')\n\n x0 = float(bbox.find('xmin').text) - 1\n y0 = float(bbox.find('ymin').text) - 1\n x1 = float(bbox.find('xmax').text) - 1\n y1 = float(bbox.find('ymax').text) - 1\n boxes.append([x0, y0, x1, y1])\n\n labels.append(self.class_dict[class_name])\n\n is_difficult_str = obj.find('difficult').text\n is_difficult.append(int(is_difficult_str) if is_difficult_str else 0)\n\n return (np.array(boxes, dtype=np.float32),\n np.array(labels, dtype=np.int64),\n np.array(is_difficult, dtype=np.uint8))",
"def get_instance_image_info(task):\n ctx = task.context\n node = task.node\n image_info = {}\n # NOTE(pas-ha) do not report image kernel and ramdisk for\n # local boot or whole disk images so that they are not cached\n if (node.driver_internal_info.get('is_whole_disk_image')\n or deploy_utils.get_boot_option(node) == 'local'):\n return image_info\n root_dir = get_http_boot_dir()\n i_info = node.instance_info\n labels = ('kernel', 'ramdisk')\n d_info = deploy_utils.get_image_instance_info(node)\n if not (i_info.get('kernel') and i_info.get('ramdisk')):\n glance_service = service.GlanceImageService(context=ctx)\n iproperties = glance_service.show(d_info['image_source'])['properties']\n for label in labels:\n i_info[label] = str(iproperties[label + '_id'])\n node.instance_info = i_info\n node.save()\n\n for label in labels:\n image_info[label] = (\n i_info[label],\n os.path.join(root_dir, node.uuid, label)\n )\n\n return image_info",
"def GetMetadata(IMAGE):\n SPACING = IMAGE.GetSpacing()\n ORIGIN = IMAGE.GetOrigin()\n DIRECTION = IMAGE.GetDirection()\n METADATA = [SPACING,ORIGIN,DIRECTION]\n return METADATA",
"def img_series_stats(image_ccd_lst,plots_path,obsdate):\n median_count = []\n mean_count = []\n \n source_hdu = CCDData(image_ccd_lst[0],unit='adu')\n source_image_data = source_hdu.data.astype(float) \n source_image_hdr = source_hdu.header\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n for a_file in image_ccd_lst:\n hdu = CCDData(a_file,unit='adu')\n image_data = hdu.data.astype(float) \n image_hdr = hdu.header\n \n median_count.append(np.median(a_file))\n mean_count.append(np.mean(a_file))\n \n min_count_for_median = np.min(median_count)\n min_count_for_mean = np.min(mean_count)\n max_count_for_median = np.max(median_count)\n max_count_for_mean = np.max(mean_count)\n \n plt.figure()\n plt.plot(mean_count, label='mean',color=\"palevioletred\")\n plt.axhline(y=min_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='min mean {:.2f}'.format(min_count_for_mean),alpha=1)\n plt.axhline(y=max_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='max mean {:.2f}'.format(max_count_for_mean),alpha=1)\n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Mean pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_mean.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()\n\n plt.figure()\n plt.plot(median_count, label='median',color=\"darkviolet\")\n plt.axhline(y=min_count_for_median,linestyle='-',linewidth=0.5,color='red',label='min median {:.2f}'.format(min_count_for_median),alpha=1)\n plt.axhline(y=max_count_for_median,linestyle='-',linewidth=0.5,color='red',label='max median {:.2f}'.format(max_count_for_median),alpha=1) \n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Median pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_median.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()",
"def _inst_get_img_info_from_db(self):\r\n docs = self._mongo_api.inst_get_no_replied_data()\r\n if docs is None:\r\n print(\"No replied documents in instagram collection...\")\r\n else:\r\n for doc in docs:\r\n obj_id = doc['_id']\r\n post_id = doc['post_id']\r\n full_name = doc['full_name']\r\n img_url = doc['img_url']\r\n\r\n img = self._download_img_from_url(img_url)\r\n img = self._cnn_model.preprocess_img(img)\r\n prediction = self._cnn_model.predict(img)\r\n\r\n if self._inst_reply_post(post_id=post_id, full_name=full_name, prediction=prediction):\r\n self._mongo_api.inst_update_doc_after_replied(obj_id)\r\n print(\"Instagram post have answered...\")\r\n print(\"Instagram post have updated...\")\r\n else:\r\n print(\"Instagram post haven't replied...\")",
"def _load_image_set_index(self):\n image_index = self._load_annotations().keys()\n return image_index",
"def draw_info(self, img, age_threshold=8):\n self.n_vehicles = 0\n for detection in self.detections:\n if len(detection.last_boxes) > age_threshold:\n self.n_vehicles += 1\n img = detection.draw(img, thick=2, color=(255, 50, 0))\n\n cv2.putText(img, 'Vehicles in sight: %s' % self.n_vehicles, (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 2)\n\n return img"
]
| [
"0.59846336",
"0.596467",
"0.58719707",
"0.57998466",
"0.5790845",
"0.57607836",
"0.57037693",
"0.5697984",
"0.5685285",
"0.56846684",
"0.5681115",
"0.56708336",
"0.56348175",
"0.56285685",
"0.5623849",
"0.56236106",
"0.5615736",
"0.5592469",
"0.5587834",
"0.55773443",
"0.5556258",
"0.554248",
"0.55210584",
"0.5516453",
"0.5515146",
"0.5509925",
"0.54816335",
"0.54803514",
"0.54756856",
"0.54661757"
]
| 0.6152008 | 0 |
Private helper for setting the report date. | def __findReportDate(self):
dateList = ConfigHelper.parseConfigList(self.props.get('fereport',
'dateList'))
today = datetime.date.today()
for dateStr in dateList:
mon, day, year = dateStr.split('/')
nextDate = datetime.date(int(year), int(mon), int(day))
if nextDate >= today:
break
self.reportDate['mon'] = mon
self.reportDate['day'] = day
self.reportDate['year'] = year
self.startDate = nextDate.strftime('%m/%d/%Y')
self.endDate = nextDate.strftime('%m/%d/%Y') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_date(self, date):\n self.date = date",
"def set_date(self, date):\n self.date = date\n return",
"def get_fw_date(self, rec, report):\n rec.VAL = self.crate.mch_fw_date[self.slot]",
"def _date(self, _date):\n\n self.__date = _date",
"def _date(self, _date):\n\n self.__date = _date",
"def setDate(self, p_int, p_int_1, p_int_2): # real signature unknown; restored from __doc__\r\n return False",
"def _set_date_times(self):\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT):\n self._report_data['searchDateTime'] = Report._to_report_datetime(self._report_data['searchDateTime'])\n if self._report_data['totalResultsSize'] > 0:\n for detail in self._report_data['details']:\n detail['createDateTime'] = Report._to_report_datetime(detail['createDateTime'])\n if detail.get('declaredDateTime'):\n detail['declaredDateTime'] = Report._to_report_datetime(detail['declaredDateTime'], False)\n declared_value = str(detail['declaredValue'])\n if declared_value.isnumeric() and declared_value != '0':\n detail['declaredValue'] = '$' + '{:0,.2f}'.format(float(declared_value))\n else:\n detail['declaredValue'] = ''\n if detail.get('description') and detail['description'].get('engineerDate'):\n if detail['description']['engineerDate'] == '0001-01-01':\n detail['description']['engineerDate'] = ''\n else:\n detail['description']['engineerDate'] = \\\n Report._to_report_datetime(detail['description']['engineerDate'], False)\n else:\n detail['description']['engineerDate'] = ''\n if detail.get('location') and detail['location'].get('taxExpiryDate'):\n detail['location']['taxExpiryDate'] = \\\n Report._to_report_datetime(detail['location']['taxExpiryDate'], False)\n elif self._report_key == ReportTypes.MHR_REGISTRATION:\n reg = self._report_data\n reg['createDateTime'] = Report._to_report_datetime(reg['createDateTime'])\n if reg.get('description') and reg['description'].get('engineerDate'):\n if reg['description']['engineerDate'] == '0001-01-01':\n reg['description']['engineerDate'] = ''\n else:\n reg['description']['engineerDate'] = \\\n Report._to_report_datetime(reg['description']['engineerDate'], False)\n else:\n reg['description']['engineerDate'] = ''\n if reg.get('location') and reg['location'].get('taxExpiryDate'):\n reg['location']['taxExpiryDate'] = Report._to_report_datetime(reg['location']['taxExpiryDate'], False)\n elif self._report_key in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION,\n ReportTypes.MHR_TRANSPORT_PERMIT, ReportTypes.MHR_NOTE,\n ReportTypes.MHR_ADMIN_REGISTRATION):\n reg = self._report_data\n reg['createDateTime'] = Report._to_report_datetime(reg['createDateTime'])\n if reg.get('declaredValue'):\n declared_value = str(reg['declaredValue'])\n if declared_value.isnumeric() and declared_value != '0':\n reg['declaredValue'] = '$' + '{:0,.2f}'.format(float(declared_value))\n else:\n reg['declaredValue'] = ''\n if reg.get('transferDate'):\n reg['transferDate'] = Report._to_report_datetime(reg['transferDate'], False)\n if self._report_key == ReportTypes.MHR_TRANSPORT_PERMIT and reg.get('newLocation'):\n reg['location'] = reg.get('newLocation')\n if reg.get('location') and reg['location'].get('taxExpiryDate'):\n reg['location']['taxExpiryDate'] = Report._to_report_datetime(reg['location']['taxExpiryDate'],\n False)",
"def set_harvest_date(self, date):\n if not date:\n return datetime.utcnow().strftime(\"%Y/%m/%d\")\n return datetime.strptime(date, \"%Y%m%d\").strftime(\"%Y/%m/%d\")",
"def set_date(self, date):\n self.date = self.date_to_local(date)\n # ephem deals only in UTC\n self.site.date = ephem.Date(self.date_to_utc(self.date))",
"def _get_report_date(self):\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_TOC_REPORT,\n ReportTypes.SEARCH_BODY_REPORT):\n return self._report_data['searchDateTime']\n return self._report_data['createDateTime']",
"def set_date(self, date):\n self.data['date'] = date",
"def set_document_date(self, date):\n self.set_value_into_input_field(self.document_date_text_field_locator, date)",
"def set_to_date(self):\n self.set_value_into_input_field(self.set_to_date_locator, self.get_current_date())",
"def date(self, date):\n self.value = date.strftime(\"%Y-%m-%d\") if date else \"\"",
"def date(self, date):\n self._date = date",
"def _fill_date(self):\n if not self.date['year']:\n self.date['year'] = self.DEFAULT_DATE['year']\n if not self.date['month']:\n self.date['month'] = self.DEFAULT_DATE['month']\n if not self.date['day']:\n self.date['day'] = self.DEFAULT_DATE['day']",
"def setDateAsString(self, *args):\n return _libsbml.Date_setDateAsString(self, *args)",
"def date(self, value):\n self.date_value = value",
"def date(self, new_date):\n self._date.date = new_date",
"def set_start_date(self, date):\n pass",
"def issue_date(self, value):\n self._issue_date = parse(value).date() if isinstance(value, type_check) else value",
"def setEvaluationDate(cell):\n global _qToday\n \n _qToday = toDate(cell.value)\n if not to_date:\n _qToday = Settings.instance().getEvaluationDate()\n else:\n Settings.instance().setEvaluationDate(_qToday)\n \n return _qToday.ISO()",
"def set_datetime(self, date):\n self.date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n\n self._date = date",
"def date(self, date):\n\n self._date = date",
"def settlement_date(self, value):\n if value:\n self._settlement_date = (\n parse(value).date() if isinstance(value, type_check) else value\n )",
"def set_end_date(self, date):\n pass"
]
| [
"0.68500936",
"0.6819773",
"0.67678994",
"0.6745662",
"0.6745662",
"0.67073405",
"0.67064464",
"0.66813374",
"0.6613077",
"0.6609463",
"0.6539059",
"0.6511554",
"0.6468202",
"0.64567244",
"0.64510125",
"0.6412121",
"0.6394894",
"0.63170654",
"0.6302688",
"0.6285003",
"0.62818533",
"0.62655747",
"0.6204512",
"0.618188",
"0.618188",
"0.618188",
"0.618188",
"0.618188",
"0.61144286",
"0.61117035"
]
| 0.71019334 | 0 |
Request and save csv export of the report | def dlCsvReport(self):
requestElems = {'xf': 'csv'}
requestElems.update(self.getReportConfig())
csvdata = self.sendRequest(self.reportFormURL, self.fileOpener,
requestElems, 'POST').read()
self.writeExportFile('csv', csvdata) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_report():\n entities = get_names()\n save_csv(entities)",
"def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')",
"def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response",
"def export(self,**kwargs):\n \n # import pdb;pdb.set_trace()\n \n # provide for case where recs are set extenally\n if not self.recs:\n self.select_recs(**kwargs)\n if self.recs:\n if self.export_file_name:\n filename = self.export_file_name\n else:\n filename = \"{table_name}_report_{datetime}.csv\".format(\n table_name = self.table.display_name,\n datetime = date_to_string(local_datetime_now(),'iso_datetime'),\n ).replace(' ','_').lower()\n \n if not self.export_fields:\n # include all fields by default\n self.export_fields = self._set_default_list_fields(include_all=True).copy()\n\n self.set_list_fields(self.export_fields)\n \n \n if self.export_template:\n result = render_template(self.export_template, data=self)\n else:\n # add a descriptive title row\n if self.export_title:\n result = self.export_title.strip() + '\\n'\n else:\n result = \"Export of table {} as of {}\\n\".format(self.table.table_name,excel_date_and_time_string(local_datetime_now()))\n \n result += ','.join([x['label'] for x in self.export_fields]) + '\\n'\n for rec in self.recs:\n rec_row = []\n for field in self.export_fields:\n data = rec.__getattribute__(field['name'])\n if field['type'].upper() == \"DATE\":\n data = local_date_string(data)\n elif field['type'].upper() == \"DATETIME\":\n data = excel_date_and_time_string(data)\n else:\n # just text\n data = str(data).strip()\n \n # replace double quotes with double-double quotes\n data = data.replace('\"','\"\"') #double up on double quotes\n \n if \",\" in data:\n # if any commas, wrap in quotes\n data = '\"' + data + '\"'\n \n #replace returns\n data = data.replace('\\r\\n',' -crnl- ')\n data = data.replace('\\n',' -nl- ')\n data = data.replace('\\r',' -rtn- ')\n\n rec_row.append(data)\n \n result += ','.join([str(x) for x in rec_row]) + '\\n'\n \n return DataStreamer(result,filename,'text/csv').send()\n \n self.result_text = \"No records selected\"\n self.success = False\n \n flash(self.result_text)\n return self.list(**kwargs)",
"def csv_report(request):\r\n if not _can_download_report(request.user):\r\n return HttpResponseForbidden(_('You do not have permission to view this page.'))\r\n\r\n if request.method == 'POST':\r\n start_date = request.POST.get('start_date', '')\r\n end_date = request.POST.get('end_date', '')\r\n start_letter = request.POST.get('start_letter', '')\r\n end_letter = request.POST.get('end_letter', '')\r\n report_type = request.POST.get('requested_report', '')\r\n try:\r\n start_date = _get_date_from_str(start_date) + datetime.timedelta(days=0)\r\n end_date = _get_date_from_str(end_date) + datetime.timedelta(days=1)\r\n except ValueError:\r\n # Error case: there was a badly formatted user-input date string\r\n return _render_report_form(start_date, end_date, start_letter, end_letter, report_type, date_fmt_error=True)\r\n\r\n report = initialize_report(report_type, start_date, end_date, start_letter, end_letter)\r\n items = report.rows()\r\n\r\n response = HttpResponse(mimetype='text/csv')\r\n filename = \"purchases_report_{}.csv\".format(datetime.datetime.now(pytz.UTC).strftime(\"%Y-%m-%d-%H-%M-%S\"))\r\n response['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(filename)\r\n report.write_csv(response)\r\n return response\r\n\r\n elif request.method == 'GET':\r\n end_date = datetime.datetime.now(pytz.UTC)\r\n start_date = end_date - datetime.timedelta(days=30)\r\n start_letter = \"\"\r\n end_letter = \"\"\r\n return _render_report_form(start_date.strftime(\"%Y-%m-%d\"), end_date.strftime(\"%Y-%m-%d\"), start_letter, end_letter, report_type=\"\")\r\n\r\n else:\r\n return HttpResponseBadRequest(\"HTTP Method Not Supported\")",
"def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscripcions-tallers-%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n regtaller_list = TallerRegistration.objects.all()\n\n table = ExportTallerRegistrationTable(regtaller_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n if isinstance(value, basestring):\n row.append(value.encode('utf8'))\n else:\n row.append(value)\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response",
"def on_export_button(self, event):\n wildcard = \"Filtered _iso_res_filt.csv file (*_iso_res_filt.csv)|*_iso_res_filt.csv|\"\\\n \"All files (*.*)|*.*|\"\n defFile = self.datafile[:-4]+'_filt.csv'\n dlg = wx.FileDialog(\n self, message=\"Save file as ...\", \n defaultDir=self.currentDirectory, \n defaultFile=defFile, wildcard=wildcard, style=wx.SAVE\n )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.recalcAll()\n self.redrawAll()\n self.dataFrame['priorFilter'] = self.dataFrame['allFPass']\n self.dataFrame.to_csv(path, index=False)\n summaryCSVPath = path.split('.')[0] + '_median_[' + ''.join(self.calcNum) + ']_[' + ''.join(self.calcDen) + '].csv'\n self.writeSummaryCSV(summaryCSVPath)\n \n dlg.Destroy()",
"def __openAndInitCSVFile(self, modelInfo):\n # Get the base path and figure out the path of the report file.\n basePath = self.__outputDirAbsPath\n\n # Form the name of the output csv file that will contain all the results\n reportCSVName = \"%s_Report.csv\" % (self.__outputLabel,)\n reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)\n\n # If a report CSV file already exists, back it up\n backupCSVPath = None\n if os.path.exists(reportCSVPath):\n backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)\n\n\n # Open report file\n if self.__replaceReport:\n mode = \"w\"\n else:\n mode = \"a\"\n csv = self.__csvFileObj = open(reportCSVPath, mode)\n\n # If we are appending, add some blank line separators\n if not self.__replaceReport and backupCSVPath:\n print >> csv\n print >> csv\n\n # Print the column names\n print >> csv, \"jobID, \",\n print >> csv, \"modelID, \",\n print >> csv, \"status, \" ,\n print >> csv, \"completionReason, \",\n print >> csv, \"startTime, \",\n print >> csv, \"endTime, \",\n print >> csv, \"runtime(s), \" ,\n print >> csv, \"expDesc, \",\n print >> csv, \"numRecords, \",\n\n for key in self.__sortedVariableNames:\n print >> csv, \"%s, \" % key,\n for key in self.__sortedMetricsKeys:\n print >> csv, \"%s, \" % key,\n print >> csv",
"def download_report(self, path: str):\n\n report_file = tempfile.NamedTemporaryFile(\n mode='w+b', suffix='.csv', delete=False\n )\n\n report_query = {\n 'dimensions': self.dimensions,\n 'columns': self.metrics,\n 'dateRangeType': 'CUSTOM_DATE',\n 'startDate': self.date_from,\n 'endDate': self.date_to,\n 'timeZoneType': self.timezone\n }\n\n if self.dimension_attributes:\n report_query['dimensionAttributes'] = self.dimension_attributes\n\n if self.ad_unit_view:\n report_query['adUnitView'] = self.ad_unit_view\n\n if self.currency:\n report_query['adxReportCurrency'] = self.currency\n\n report_job = {\n 'reportQuery': report_query\n }\n\n print(\"[INFO]: Create the report\")\n report_job_id = self.create_report(report_job)\n\n print(\"[INFO]: Download the report\")\n self.report_downloader.DownloadReportToFile(\n report_job_id=report_job_id,\n export_format='CSV_DUMP',\n outfile=report_file,\n use_gzip_compression=False\n )\n\n report_file.close()\n print(\n f\"[INFO]: Report downloaded to temporary file {report_file.name}\"\n )\n\n self.write_to_file(report_file.name, path)",
"def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)",
"def to_csv(self, dataset):\n save_as = filedialog.asksaveasfilename(defaultextension='.csv')\n try:\n with open(save_as, 'w', newline='') as file:\n scribe = csv.writer(file)\n scribe.writerow(HEADERS)\n for row in dataset:\n scribe.writerow(row.values())\n self.info_success(save_as)\n except IOError:\n self.info_error()\n return",
"def covid_export(request):\n print(\"...In Exporting to a new file...\")\n covid = CovidCase.objects.all()\n\n # for c in covid:\n # print(c.country_id)\n\n my_reader = DataSetReader()\n\n new_file = my_reader.writeFile(covid)\n\n print(\"New file has been exported at location: {}\".format(new_file))\n\n return redirect('/covid/list')",
"def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscrits%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n person_list = Person.objects.all()\n\n table = ExportPersonTable(person_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n row.append(value.encode('utf8'))\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response",
"def csv_download_view(request):\n logging.info(\" CSV file download is working\")\n now = datetime.now()\n timestamp = now.strftime(\"%Y_%m_%d\")\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"results_' + \\\n GLOBAL_VARIABLE.get_host_name()+'_'+timestamp+'.csv\"'\n\n writer = csv.writer(response)\n list_of_cd = list(GLOBAL_VARIABLE.get_current_data())\n\n for i in range(10):\n rows = [sub_list[i] for sub_list in list_of_cd]\n writer.writerow(rows)\n\n return response",
"def save_csv(self, filename): # DONE\n self.data.to_csv(filename)",
"def write_csv(self):\n self.tableView.df.to_csv('Data export.csv', index=False)\n print('CSV file exported')",
"def exportCSV(self, log, csvFile):\n return 0",
"def _downloadDataFile(self):\n config = SiteConfiguration.objects.get()\n\n with requests.Session() as s:\n # Authentication\n data = {\n 'identificationBean.identifiant': '{}'.format(config.login),\n 'identificationBean.mdp': '{}'.format(config.password),\n 'userName': '{}'.format(config.username)\n }\n url = 'http://extranet.ffbb.com/fbi/identification.do'\n s.post(url, data=data)\n\n # Create filters\n params = (\n ('action', 'executeCsv'),\n ('rechercherRencontreSaisieResultatBean.idDivision', ''),\n ('rechercherRencontreSaisieResultatBean.rechercherEquipe2', 'O'),\n ('rechercherRencontreSaisieResultatBean.dateDebutRencontre', ''),\n ('rechercherRencontreSaisieResultatBean.dateFinRencontre', ''),\n ('rechercherRencontreSaisieResultatBean.idPoule', ''),\n ('rechercherRencontreSaisieResultatBean.numeroEquipe', ''),\n )\n\n # Get Csv file\n url = 'http://extranet.ffbb.com/fbi/rechercherCompetitionRencontre.do'\n response = s.get(url, params=params)\n\n if(response.headers['content-type'] != 'application/ms-excel;charset=UTF-8'):\n return False\n\n # Create the file\n if response.status_code == 200:\n os.makedirs(os.path.dirname(settings.DATA_PATH), exist_ok=True)\n with open(settings.DATA_PATH, 'wb') as f:\n for chunk in response:\n f.write(chunk)\n\n return True",
"def _export(self, report_type):\n model = self.env['report_trial_balance_contabilidad_cfdi']\n report = model.create(self._prepare_report_trial_balance())\n report.compute_data_for_report()\n return report.print_report(report_type)",
"def csv_export(self,\n states=None,\n fields=None,\n filenamebase='projects',\n delimiter=',',\n newline='\\r\\n',\n ):\n \n if fields is None:\n fields = self.fields()\n \n out = StringIO()\n out.write(delimiter.join(fields) + newline)\n\n for project in self.data():\n values = []\n for field in project:\n text = field['text']\n if type(text) is UnicodeType:\n text = text.encode('utf8')\n value = CSV_TEMPLATE % text\n values.append(value)\n out.write(delimiter.join(values) + newline)\n \n value = out.getvalue()\n out.close()\n\n timestamp = datetime.today().strftime(\"%Y%m%d%H%M\")\n filename = filenamebase + timestamp + '.csv'\n \n self.request.RESPONSE.setHeader('Content-Type', 'application/x-msexcel')\n self.request.RESPONSE.setHeader(\"Content-Disposition\", \n \"inline;filename=%s\"%filename)\n\n return value",
"def test_export(self):\n response = self.client.get('%s?export' % reverse('users_report'))\n self.assertEqual(\n response['Content-Disposition'],\n 'attachment; filename=users.csv'\n )\n self.assertEqual(\n response['Content-Type'],\n 'text/csv'\n )\n data = import_set(response.content)\n # There should be at least the header row and one user row\n self.assertGreater(data.height, 2)\n self.assertEqual(data.width, 14)",
"def onExport(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportAssignments(path)\n dlg.Destroy()",
"def save_csv(self):\n if not self.__is_csv():\n # creates the csv file if it did not exist.\n self.__create_csv()\n try:\n with open(self.__csv_file_name, 'a', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writerow(self.__values)\n except IOError: # this exception avoid a product does not have saved in csv file\n time.sleep(0.5)\n self.save_csv()\n # display on the screen what is being record on csv\n for key, value in self.__values.items():\n print('{}: {}'.format(key, value), end='; ' if key != 'url' else '\\n')",
"def save_report_data(results):\n if os.path.isfile(FEED_DATA_FILE):\n pass\n\n csv_file = open(FEED_DATA_FILE, 'wt', encoding='utf-8')\n writer = csv.writer(csv_file, lineterminator='\\n')\n\n for report in results.get('reports', []):\n column_header = report.get('columnHeader', {})\n dimension_headers = column_header.get('dimensions', [])\n metric_headers = column_header.get(\n 'metricHeader', {},\n ).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n\n header_row = []\n header_row.extend(dimension_headers)\n header_row.extend([mh['name'] for mh in metric_headers])\n\n logger.debug(header_row)\n writer.writerow(header_row)\n\n for row in rows:\n dimensions_data = row.get('dimensions', [])\n access_date = ''.join(dimensions_data[0])\n _date: date = datetime.strptime(access_date, '%Y%m%d').date()\n metrics_data = [m['values'] for m in row.get('metrics', [])][0]\n\n data_row: List[str] = [str(_date)]\n data_row.extend(metrics_data)\n logger.debug(data_row)\n writer.writerow(data_row)\n\n # Close the file.\n csv_file.close()",
"def export_csv(self, outpath):\n\n\t\tself.df.to_csv(outpath)",
"def download_bank_details(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"bank_details.csv\"'\n\n writer = csv.writer(response)\n \n writer.writerow([\n 's/n',\n 'account_number',\n 'account_name',\n 'recipient_code',\n 'bank_name',\n 'student_name',\n 'date_added'\n ])\n \n count = 0\n for bank in StudentBankDetail.objects.filter(month=batch_date):\n count +=1\n writer.writerow([\n count,\n str(bank.account_number),\n str(bank.account_name),\n str(bank.recipient_code),\n str(bank.bank.bank_name),\n str(bank.student.name),\n datetime.strftime(bank.date_added, '%d-%m-%Y')\n ])\n \n\n\n return response",
"def export_repayment_csv(request):\n import csv\n from django.utils.encoding import smart_str\n # response = HttpResponse(content_type='text/csv')\n # response['Content-Disposition'] = 'attachment; filename=Repayment_report.csv'\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00,\n created_at__range=[\n datetime.datetime(date1.year, date1.month, date1.day, 8, 15,\n 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15,\n 12, 0, pytz.UTC)]).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n else:\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00).filter(search_query).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").iterator()\n # writer = csv.writer(response, csv.excel)\n # response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n def stream():\n buffer_ = StringIO()\n writer = csv.writer(buffer_)\n writer.writerow([\n smart_str(u\"FIRST NAME\"),\n smart_str(u\"LAST NAME\"),\n smart_str(u\"USERNAME\"),\n smart_str(u\"EMAIL\"),\n smart_str(u\"DATE\"),\n smart_str(u\"NAME OF PROJECT\"),\n smart_str(u\"DONATION AMOUNT\"),\n smart_str(u\"REPAYMENT AMOUNT\"),\n\n ])\n\n for payment in repayments:\n writer.writerow([\n smart_str(payment.user.user.first_name),\n smart_str(payment.user.user.last_name),\n smart_str(payment.user.user.username),\n smart_str(payment.user.user.email),\n smart_str(payment.created_at),\n smart_str(payment.project.title),\n smart_str(round(\n Payment.objects.filter(user=payment.user).filter(project=payment.project).aggregate(Sum('amount'))[\n 'amount__sum'] or 0, 2)),\n smart_str(round(payment.amount, 2)),\n ])\n buffer_.seek(0)\n data = buffer_.read()\n buffer_.seek(0)\n buffer_.truncate()\n yield data\n\n # Create the streaming response object with the appropriate CSV header.\n response = StreamingHttpResponse(stream(), content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"Repayment_report.csv\"'\n return response",
"def download_all_reports(request):\n\n request_body_json = json.loads(request.body)\n report_list = request_body_json['report_list']\n mode = request_body_json['format']\n action = request_body_json['action']\n annot = request_body_json['annotation_mode']\n\n if annot == 'Manual':\n annot = 'Human'\n elif annot == 'Automatic':\n annot = 'Robot'\n\n try:\n response = HttpResponse(content_type='text/csv')\n resp = download_report_gt(report_list, action, annot, mode, response)\n if mode == 'biocxml' or mode == 'biocjson':\n return HttpResponse(resp, content_type='application/xml')\n elif mode == 'csv':\n return resp\n elif mode == 'json':\n return JsonResponse(resp)\n\n except Exception as e:\n print(e)\n json_error = {'error': e}\n return JsonResponse(json_error)",
"def dwn_saved_result_csv(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n s = Source.objects.filter(source_id=source_id)[0]\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, s.source, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))",
"def downloadResponse(request, formcode=None):\n if formcode !=None:\n response = HttpResponse(content_type='text/csv')\n responses = Response.objects.filter(form_id=formcode)\n writer = csv.writer(response)\n writer.writerow(['User', 'Submit Date', 'Answer1', 'Answer2', 'Answer3'])\n for r in responses:\n user = User.objects.get(id=r.user_id)\n writer.writerow([user, r.submitDate, r.answer1 ,r.answer2 , r.answer3])\n\n response['Content-Disposition'] = 'attachment; filename=\"response.csv\"'\n return response \n return render(request, 'download.html')"
]
| [
"0.80409896",
"0.76050204",
"0.7361398",
"0.73084444",
"0.7019261",
"0.6913584",
"0.6864677",
"0.6839728",
"0.6778433",
"0.67154855",
"0.67032295",
"0.66978914",
"0.6692166",
"0.6682622",
"0.66775554",
"0.666771",
"0.661355",
"0.6595593",
"0.6573942",
"0.657209",
"0.6565159",
"0.6564299",
"0.65637803",
"0.6560052",
"0.655865",
"0.65090233",
"0.6508262",
"0.6493149",
"0.64894724",
"0.64847517"
]
| 0.7874193 | 1 |
Write contents fileData to file at exportPath. Overwrite any file that may exist at that path. | def writeExportFile(self, fileExtension, fileData):
targetDate = "%s%s%s" %(self.reportDate['year'],
self.reportDate['mon'],
self.reportDate['day'])
exportFname = "%s_%s.%s" %(self.exportBaseFname, targetDate,
fileExtension)
linkName = "%s.%s" %(self.exportBaseFname, fileExtension)
exportPath = os.path.join(self.exportDir, exportFname)
linkPath = os.path.join(self.exportDir, linkName)
f = file(exportPath, 'w+')
f.write(fileData)
f.close()
os.chown(exportPath, 30156, 101)
os.chmod(exportPath, 0664)
os.remove(linkPath)
os.symlink(exportPath, linkPath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _save_file(self, file_path, data):\n self._ensure_directory(os.path.dirname(file_path))\n with open(file_path, \"wb\") as f:\n f.write(data)",
"def write_file(self, path, data):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/files/{path}\"\n\n self.connector.http_call(\"post\", _url, data=data)",
"def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)",
"def write_file(self, path, data):\n _url = (\n f\"{self.connector.base_url}/projects/{self.project_id}/nodes/{self.node_id}\"\n f\"/files/{path}\"\n )\n\n self.connector.http_call(\"post\", _url, data=data)",
"def write_file(path, data):\n with open_local_or_gcs(path, 'w') as h_dest:\n h_dest.write(data) # pylint: disable=no-member",
"def filewrite(self, filename, data):\n try:\n filedata = data.decode(\"utf-8\")\n except Exception:\n filedata = data\n lock = FileLock(filename)\n lock.acquire()\n with open(filename, 'w+') as f:\n f.write(filedata)\n lock.release()",
"def write_file(data, file_path):\n try:\n with open(file_path, \"w\") as file_obj:\n file_obj.write(data)\n\n except OSError:\n writer(f\"\\nwarning: Unable to write backup file {file_path}\\n\", FORMAT[\"WARNING\"])",
"def save_file(path, file_data):\n file_data.save(path)",
"def write_contents(path, data):\n with open(path, 'wb') as stream:\n return stream.write(data)",
"def write_file(path, data):\n # opens file\n try:\n os.makedirs(os.path.dirname(path), exist_ok=True)\n f = open(str(path), \"w\")\n f.write(data)\n f.close()\n except Exception as e:\n print(\"Error writing file: \", e)\n sys.exit(1)",
"def rewrite_all_file(self, data):\r\n with open(self.file_name, 'w', encoding='utf-8') as self.file:\r\n self.file.write(data)",
"def write(self, data_to_write):\n self.single_file.write(data_to_write)\n self.single_file.flush()",
"def write_to_path(self, path):\n assert not path.exists()\n fout = path.open(\"wb\")\n fout.write(self.to_string())\n assert not fout.close()\n path.setdata()",
"def export(self, path):\n\n with open(path, 'w', encoding='utf-8') as f:\n json.dump(self.data, f, ensure_ascii=False)\n self.path = path",
"def save(self, export_path: str):",
"def save_to_file(self, data):\n\t\tif self.data_file.write(data):\n\t\t\tprint(\"Data successfully added to file\")\n\t\telse:\n\t\t\tPrint(\"Problem occured during adding to file\")",
"def write(self, filename, data):\n raise NotImplementedError",
"def write(path, data):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n from sambatools.smb.smb_structs import OperationFailure\r\n try:\r\n samba.store_file(os.path.basename(path), data, os.path.dirname(path))\r\n except OperationFailure:\r\n logger.info(\"deportesalacarta.core.filetools write: Error al guardar el archivo: {0}\".format(path))\r\n return False\r\n else:\r\n return True\r\n\r\n else:\r\n try:\r\n f = open(path, \"wb\")\r\n f.write(data)\r\n f.close()\r\n\r\n # except EnvironmentError:\r\n except Exception, ex:\r\n logger.info(\"filetools.write: Error al guardar el archivo: \")\r\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\r\n message = template.format(type(ex).__name__, ex.args)\r\n logger.info(message)\r\n # logger.info(\"deportesalacarta.core.filetools write: Error al guardar el archivo: {0}\".format(path))\r\n return False\r\n else:\r\n return True",
"def writeDataToFile(self):\n if self.data is not None:\n self.notify.debug('Data is now synced with disk at %s' % \\\n self.filepath)\n if self.wantAnyDbm:\n self.data.sync()\n else:\n try:\n backuppath = self.filepath+ '.bu'\n if os.path.exists(self.filepath):\n os.rename(self.filepath,backuppath)\n \n outfile = open(self.filepath, 'w')\n cPickle.dump(self.data,outfile)\n outfile.close()\n \n if os.path.exists(backuppath):\n os.remove(backuppath)\n except EnvironmentError:\n self.notify.warning(str(sys.exc_info()[1]))\n else:\n self.notify.warning('No data to write. Aborting sync.')",
"def file_writer(path, data):\n with open(path, \"a\") as file:\n file.write(data + \"\\n\")",
"def exportModuleToFile(self, exportData):\n calcEngine = CalcEngine.factory(self.client_session)\n file_path = join(settings.MEDIA_ROOT, 'tmp', f'{exportData.moduleId}.ppl')\n if exportData.exportType != \"1\":\n storage = FileSystemStorage(\n join(settings.MEDIA_ROOT, 'models'))\n currentPath = self.client_session.modelInfo.uri\n folderPath = currentPath[:currentPath.rfind(os.path.sep)+1]\n file_path = join(\n storage.base_location, folderPath, f'{exportData.moduleId}.ppl')\n response = calcEngine.exportModule(exportData.moduleId, file_path)\n if response == 1:\n return open(file_path, 'rb'), file_path[file_path.rfind(os.path.sep)+1:]\n raise exceptions.NotAcceptable(\"Engine couldn't create file\")",
"def saveFile(self, data, filelocation):\n with open(filelocation, 'w+') as f:\n f.write(data)",
"def _write_cache_file(self, data):\n\n with open(self.cache_file, mode='wb') as f:\n f.write(data)\n\n self.log.info(f\"Cached facilities at {self.cache_file}\")",
"def WriteData(self, name, data):\n tempname = os.path.join(self._output_dir, '_%s' % name)\n handle = open(tempname, 'w')\n self.WriteFile(handle, data)\n handle.close()\n os.rename(tempname,\n os.path.join(self._output_dir, name))",
"def write_data_to_file(data, filename):\n with open(filename, 'wb') as outfile:\n outfile.write(data)",
"def write(self, file_path, content):\n self._set_extension(file_path)\n\n logger.debug(\"writing to %s\", self._file_path)\n\n self._content = content\n\n if self._file_ext == 'json':\n self._write_json()",
"def WriteFile(fname, data):\n #self._out.Info(\"Write file '%s' size %d (%#0x)\" %\n #(fname, len(data), len(data)))\n with open(Filename(fname), 'wb') as fd:\n fd.write(data)",
"def write_recovered_data_to_file(data,destination):\n destination = os.path.join(os.getcwd(),destination)\n if len(data) > 0:\n file = open(destination, 'wb')\n file.write(data)\n file.close()",
"def _write(self, filename, data):\n fullpath = os.path.join(self._tempdir, filename)\n with open(fullpath, 'w') as ofile:\n json.dump(data, ofile)\n return fullpath",
"def save_file(file_name: str, data: str) -> None:\n dir_path: str = create_dir(dir_name='out')\n file_path: str = os.path.join(dir_path, file_name)\n if os.path.isfile(file_path):\n LOGGER.warning(f'{file_path} already exists. Will be overwritten...')\n with open(file_path, 'w') as file:\n file.write(data)\n LOGGER.info(f'saved {file_name}')"
]
| [
"0.6847993",
"0.6833154",
"0.6802418",
"0.6674435",
"0.66702807",
"0.6617719",
"0.6616045",
"0.64740944",
"0.63244647",
"0.63027203",
"0.622513",
"0.62166715",
"0.6184663",
"0.61101717",
"0.6079351",
"0.6047553",
"0.60379374",
"0.6027775",
"0.6003779",
"0.59943545",
"0.598101",
"0.5894572",
"0.5879124",
"0.58658075",
"0.58651805",
"0.58549535",
"0.5853645",
"0.5843774",
"0.583406",
"0.58316326"
]
| 0.71769786 | 0 |
Reads in the spotipy query results for user top songs and returns a DataFrame with track_name,track_id, artist,album,duration,popularity | def create_df_top_songs(api_results):
#create lists for df-columns
track_name = []
track_id = []
artist = []
album = []
duration = []
popularity = []
#loop through api_results
for items in api_results['items']:
try:
track_name.append(items['name'])
track_id.append(items['id'])
artist.append(items["artists"][0]["name"])
duration.append(items["duration_ms"])
album.append(items["album"]["name"])
popularity.append(items["popularity"])
except TypeError:
pass
# Create the final df
df = pd.DataFrame({ "track_name": track_name,
"album": album,
"track_id": track_id,
"artist": artist,
"duration": duration,
"popularity": popularity})
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def top_artists_from_API(api_results):\r\n df = pd.DataFrame(api_results[\"items\"])\r\n cols = [\"name\",\"id\",\"genres\",\"popularity\",\"uri\"]\r\n return df[cols]",
"def get_top_artist_tracks(session, number_of_artist):\n try:\n if not issubclass(type(session), sqlalchemy.orm.session.Session):\n raise AttributeError(\"session not passed correctly, should be of type 'sqlalchemy.orm.session.Session' \")\n\n if not issubclass(type(number_of_artist), int) or number_of_artist < 1:\n raise AttributeError(\"number of artist should be integer and greater than 0\")\n\n LOGGER.info(\"Performing Read Operation\")\n\n # Selecting the Artist id, Artist Name, and count of track id\n query = session.query(models.AlbumTable.artist_id, models.ArtistTable.name,\n func.count(models.TracksTable.track_id).label(\"number_of_tracks\"))\n\n # Joining tracks table and album table\n query = query.join(models.AlbumTable, models.TracksTable.album_id == models.AlbumTable.album_id)\n query = query.join(models.ArtistTable, models.AlbumTable.artist_id == models.ArtistTable.artist_id)\n\n # Grouping by Artist Id\n query = query.group_by(models.AlbumTable.artist_id)\n\n # Sorting by number_of_tracks and artist id\n query = query.order_by(desc(\"number_of_tracks\"), models.AlbumTable.artist_id)\n\n results = query.limit(number_of_artist).all()\n\n if not results:\n raise NoResultFound(\"No Records Found\")\n\n LOGGER.info(\"\\n\\nThe Top %s Artist based on number of tracks are\", number_of_artist)\n\n print(\"\\n\\n\")\n print(\"===\" * 50)\n print(\"\\n\\n\")\n\n LOGGER.info(\"\\n\\n %s\", tabulate(results, headers=[\"Artist ID\", \"Artist Name\", \"Number Of Tracks\"],\n tablefmt=\"grid\"))\n\n print(\"\\n\\n\")\n print(\"===\" * 50)\n print(\"\\n\\n\")\n except AttributeError as err:\n LOGGER.error(err)\n except NoResultFound as err:\n LOGGER.error(err)\n finally:\n session.close()",
"def artist_top_tracks(req):\n\n artist = req.get('result').get('parameters').get('artist')\n logger.info('received {0} request for artist=\"{1}\"'.format('artist_top_tracks', artist))\n top_tracks = cached_result('get_artist_top_tracks', [artist], {})\n speech = 'Unable to locate top tracks for {artist}'.format(artist=artist)\n\n if top_tracks is not None:\n speech = 'The most popular songs for {artist} are'.format(artist=artist)\n for track in top_tracks:\n speech += '. ' + track\n\n return jsonify(\n {'speech': speech,\n 'displayText': speech,\n 'source': 'last-assist'})",
"def gen_user_artist_dataframe():\n print(\"Generating dataframe from lastfm usernames.\")\n user_to_id_dict = lastfm_data.get_users_and_ids()\n playcounts = defaultdict(dict)\n users = user_to_id_dict.keys()\n count = 0\n for user in users:\n count += 1\n top_artist_dict = get_top_artists(user)\n top_artists = top_artist_dict.keys()\n for artist in top_artists:\n playcounts[user][artist] = top_artist_dict[artist]\n print(str(count) + \"/\" + str(len(users)) + \" users counted.\")\n\n df = DataFrame(playcounts).T.fillna(0.0)\n return df",
"def get_top_song_metadata():\n cache_buster = '?v=%s' % get_timestamp()\n response = requests.get(MUSIC_DIR + 'top_meta.json' + cache_buster)\n return response.json()",
"def _get_top_tracks(artist, limit):\n\n l = []\n for track in _lastfm.get_artist(artist).get_top_tracks(limit=limit):\n track = track.item\n l.append({\"artist\": track.get_artist().get_name(), \"title\": track.get_title()})\n \n return l",
"def _get_user_top_tracks(self, sp, period='all'):\n\n if period == 'all':\n time_ranges = ('long_term', 'medium_term', 'short_term',)\n else:\n time_ranges = (period,)\n\n top_tracks = []\n for time_range in time_ranges:\n offsets = (0, 49, )\n for offset in offsets:\n result = sp.current_user_top_tracks(limit=50,\n offset=offset,\n time_range=time_range)\n top_tracks = tracks + result['items']\n\n return self._extract_track_info(top_tracks)",
"def get_tracks_from_albums(sp, album_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for album_uri in album_uri_list:\n album_tracks = sp.album_tracks(album_uri, limit=50, offset=0)[\"items\"]\n count_tracks_in_album = len(album_tracks)\n album_release_date = sp.album(album_uri)[\"release_date\"]\n\n # This part is probably very slow and should be improved by accessing the API less often\n for track_number in range(count_tracks_in_album):\n track_name = album_tracks[track_number][\"name\"]\n track_uri = album_tracks[track_number][\"uri\"]\n \n track_list.append([track_name, track_uri, album_release_date])\n\n # Create df from list of tracks for all albums\n track_df = pd.DataFrame(data=track_list[1:], columns=track_list[0])\n \n print(\"Log: Finished pulling all tracks from albums.\")\n return track_df",
"def search_for_artist_top_tracks(name):\n\tartist_id = search_for_artist(name)\n\ttoken = get_token()\n\tif artist_id and token:\n\t\theaders = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + token}\n\t\toptions = {'country': 'TR'}\n\t\tresponse = requests.get(\n\t\t\t'https://api.spotify.com/v1/artists/'+artist_id+'/top-tracks',\n\t\t\theaders=headers,\n\t\t\tparams=options\n\t\t)\n\t\tif response.status_code == 200:\n\t\t\tcontent = json.loads(response.content.decode('utf-8'))\n\t\t\tif content:\n\t\t\t\treturn content['tracks']\n\t\t\telse: return None\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\treturn None",
"def get_playlist_tracks(user, playlist_id, limit=100):\n info_dict = spotify.user_playlist_tracks(user, playlist_id, limit=limit)\n items = info_dict[\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"track\"][\"album\"][\"name\"]\n album_type = items[i][\"track\"][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"track\"][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"track\"][\"artists\"]))\n ])\n track_name = items[i][\"track\"][\"name\"]\n popularity = items[i][\"track\"][\"popularity\"]\n track_id = items[i][\"track\"][\"id\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks",
"def get_tracks(search_string=None):\n if search_string is None:\n print('Please use a search string with get_tracks function')\n exit(0)\n item_type = \"tracks\"\n info_dict = spotify.search(q=search_string, limit=10, type='track')\n items = info_dict[item_type][\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"album\"][\"name\"]\n album_type = items[i][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"artists\"]))\n ])\n track_name = items[i][\"name\"]\n track_id = items[i][\"id\"]\n track_popularity = items[i][\"popularity\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": track_popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks",
"def create_df_saved_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results[\"items\"]:\r\n try:\r\n track_name.append(items[\"track\"]['name'])\r\n track_id.append(items[\"track\"]['id'])\r\n artist.append(items[\"track\"][\"artists\"][0][\"name\"])\r\n duration.append(items[\"track\"][\"duration_ms\"])\r\n album.append(items[\"track\"][\"album\"][\"name\"])\r\n popularity.append(items[\"track\"][\"popularity\"])\r\n except TypeError: \r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n return df",
"def test_top_songs(self):\n \n rss = AppleRSS()\n objs = rss.get_top_songs(limit=10)\n \n self.__test_artists('top_songs', objs)",
"def find_top_tracks(self, artist, N=10):\n tracklist = []\n try:\n result = self.sp.artist_top_tracks(artist)\n except ConnectionError as e:\n print (\"ERROR: connection pool is closed; searching Spotify for top tracks for this artist: \" + artist)\n result = self.sp.artist_top_tracks(artist)\n print (\"tried again\")\n print (result)\n raise e\n\n tracklist.extend(result.get(\"tracks\"))\n if len(tracklist) > N:\n return tracklist[0:N]\n else:\n return tracklist",
"def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def get_playlist_tracks(playlist):\n track_ids = [id for id in load_from_json(f\"playlist_{playlist}.json\") if id is not None]\n tracks = []\n\n for i in range(0, len(track_ids), 50):\n tracks_info = sp.tracks(track_ids[i: i+50])['tracks']\n for track in tracks_info:\n if track:\n tracks.append({\n 'id': track['id'],\n 'name': track['name'],\n 'popularity': track['popularity']\n })\n df = pd.DataFrame(tracks)\n\n file = f\"playlist_{playlist}_df.csv\"\n df.to_csv(file)\n\n return file",
"def top_tracks(genre):\n\tartist = random.choice(genre_artist[genre])\n\ttop_tracks = search_for_artist_top_tracks(artist)\n\titems = []\n\tif top_tracks:\n\t\tfor track in top_tracks:\n\t\t\titems.append({\"artist\": track[\"artists\"][0][\"name\"], \"popularity\": track[\"popularity\"], \"track\": track[\"name\"],\n\t\t\t\t \"preview_url\": track[\"preview_url\"], \"album_image_url\": track[\"album\"][\"images\"][2][\"url\"]})\n\t\titems = sorted(items, key=lambda x: x['popularity'], reverse=True)\n\t\tfor item in items:\n\t\t\tdel item['popularity']\n\t\treturn items\n\telse:\n\t\treturn None",
"def get_all_tracks_from_artists(sp, artist_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\", \"artist_name\"]]\n track_df = pd.DataFrame(columns=track_list[0])\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for artist_uri in artist_uri_list:\n # Get artist name and albums\n artist_name = sp.artist(artist_uri)[\"name\"]\n albums = get_albums_from_artists(sp, [artist_uri])\n\n # Get tracks from artist albums\n tracks_artist_df = get_tracks_from_albums(sp, albums[\"album_uri\"].to_list())\n tracks_artist_df[\"artist_name\"] = artist_name\n\n # Append new songs to dataframe\n track_df = track_df.append(tracks_artist_df)\n \n print(\"Log: Finished pulling all tracks from artist.\")\n return track_df",
"def top_n(userid):\n agg = [s[\"search_id\"] for s in db_searches.find()]\n table = pd.DataFrame()\n table[\"searches\"] = Counter(agg).keys()\n table[\"count\"] = Counter(agg).values()\n table = table.sort_values(\"count\", ascending=False)\n table = table[:10]\n search_ids = table[\"searches\"].values\n counts = table[\"count\"].values\n n = 0\n top_n = []\n while n < len(search_ids):\n top_n.append([str(db_search_terms.find_one({\"_id\": search_ids[n]}).get(\"value\")), str(counts[n])])\n n += 1\n jsonob = jsonify(top_n=top_n)\n return jsonob",
"def getSongsSpotify(song_name,access_token):\n song_name = song_name.strip()\n query = \"https://api.spotify.com/v1/search?q={}&type=track&limit=20&offset=0\".format(song_name)\n response = requests.get(\n query,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # \n \n songs_no = response_json[\"tracks\"][\"total\"]\n if songs_no == 0 :\n return {\"songs_no\" : songs_no}\n songs = response_json[\"tracks\"][\"items\"]\n if(len(songs)<5):\n uri = [songs[0][\"uri\"]]\n names = [songs[0][\"name\"]]\n artists = [songs[0][\"artists\"][0][\"name\"]]\n imageUrl = [songs[0][\"album\"][\"images\"][-1][\"url\"]]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n else:\n uri = [ songs[i][\"uri\"] for i in range(0,5)]\n names = [songs[i][\"name\"] for i in range(0,5)]\n artists = [songs[i][\"artists\"][0][\"name\"] for i in range(0,5)]\n imageUrl = [songs[i][\"album\"][\"images\"][-1][\"url\"] for i in range(0,5)]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n return response_obj",
"def get_albums_from_artists(sp, artist_uri_list):\n\n # Create header for output df\n albums_list = [[\"name\", \"album_uri\", \"album_release_date\", \"artist_uri\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n # Loop through list of artist uris\n for artist_uri in artist_uri_list:\n # Get album from artist\n albums = sp.artist_albums(artist_uri)\n \n # Append each album to list\n for album in albums[\"items\"]:\n album_name = album[\"name\"]\n album_uri = album[\"uri\"]\n album_release_date = album[\"release_date\"]\n albums_list.append([album_name, album_uri, album_release_date, artist_uri])\n\n # Create df from list of albums for all artist\n albums_df = pd.DataFrame(data=albums_list[1:], columns=albums_list[0])\n\n print(\"Log: Finished pulling all albums from artist.\")\n return albums_df",
"def get_songs(self):\n search_object = {\"size\":25000,\n 'query': {'term': {FIELD_FINGERPRINTED: True}}, \"fields\": [FIELD_SONGNAME, FIELD_FILE_SHA1,\n FIELD_TOTAL_HASHES]}\n response = self.cursor.search(index = SONGS_INDEXNAME, body=search_object)\n #print(\"get_songs response: \",response)\n arr = []\n for hit in response[\"hits\"][\"hits\"]:\n dct = {\"song_name\":hit['_source'][FIELD_SONGNAME],\"total_hashes\":hit['_source'][FIELD_TOTAL_HASHES],\n \"file_sha1\":hit['_source'][FIELD_FILE_SHA1]}\n arr.append(dct)\n return arr",
"def get_more_data(song):\n headers = {\n \"User-Agent\": \"ytmdl\"\n }\n payload = {\n \"api_key\": API_KEY,\n \"method\": \"track.getInfo\",\n \"track\": song.track_name,\n \"artist\": song.artist_name,\n \"format\": \"json\"\n }\n\n response = get(API_BASE, headers=headers, params=payload)\n\n # TODO: Add a check to exit if the response code is not 200\n\n track_details = response.json()\n\n # Update the songs attributes\n song.track_number = 1\n\n try:\n song.collection_name = track_details[\"track\"][\"album\"][\"title\"]\n song.track_time = song._convert_time(\n track_details[\"track\"][\"duration\"])\n song.release_date = track_details[\"track\"][\"wiki\"][\"published\"]\n except KeyError:\n # This happens because last.fm do not have consistent data for some songs\n # Just ignore this errors if they occur.\n pass\n\n return song",
"def current_user_top_tracks(\n self, limit=20, offset=0, time_range=TimeRange.MEDIUM_TERM, **kwargs\n ):\n return self._get(\n API.MY_TOP.value.format(type=\"tracks\"), # pylint: disable=no-member\n time_range=TimeRange(time_range).value,\n limit=limit,\n offset=offset,\n **kwargs,\n )",
"def get_albums_most_played(session_):\n artists = session_.query(Album).order_by(Album.plays.desc()).all()\n return artists",
"def get_artists_most_played(session_):\n # artists = session_.query(Artist).order_by(Artist.name.asc()).paginate()\n artists = session_.query(Artist).order_by(Artist.plays.desc()).all()\n return artists",
"def scrapeSpotify():\n # Set Spotify authentication token \n token = util.prompt_for_user_token(username, scope, clientid, clientsecret, redirecturi)\n \n if token: # Authenticate with Spotify\n # Store dictionary of scraped values from scraping function\n if debugging == True:\n cities = DataCollection.test() # DEBUGGING ONLY\n #cities = DataCollection.scrape_spotify_info(limiting, limit_cities) \n #return jsonify(cities)\n else:\n cities = DataCollection.scrape_spotify_info(limiting, limit_cities) # THE REAL THING\n\n # Loop through all cities in dataset\n i = 0\n for city in cities:\n # Exit out of for loop at 2 if we are limiting city loop iterations\n if limiting == True and i == limit_cities:\n break \n #\n # Begin Spotify analysis (e.g., determine popularity for each artist in city list, top track)\n #\n sp = spotipy.Spotify(auth=token)\n\n # Loop through the top artists for this city, and determine the popularity values\n i = 0\n top_artists = []\n artist_names = []\n for top_artist in city[\"top_artists\"]:\n # Exit out of for loop at appropriate threshold, if we are limiting artist iterations\n if limiting == True and i == limit_artists:\n break\n\n i += 1\n # *** Example artist value in dictionary ***\n # { 'artist': 'Bobby Pulido',\n # 'tracks': ['spotify:track:1tg7ZzCAkjDNENdWL7WuIr',\n # 'spotify:track:2JJSGhPpATm8lXeYjD95fw',\n # 'spotify:track:5iuGn3RXvfvHIyIe8fyxBE'\n # ],\n # 'popularity': 99 <--------- *** BEING ADDED ***\n # }\n # Get info about the first artist track\n urn = top_artist[\"tracks\"][0]\n track = sp.track(urn)\n\n # Get the artist's Spotify URI & name\n artist_uri = track['artists'][0]['uri']\n artist_name = track['artists'][0]['name']\n\n # Set the artist name to the first artist attributed to the song\n top_artist[\"artist\"] = artist_name\n\n # Get the artist popularity, and add it to their 'top_artist' item\n artist_info = sp.artist(artist_uri)\n artist_popularity = artist_info[\"popularity\"]\n top_artist[\"popularity\"] = artist_popularity\n\n # Get the artist genres, and add it to their 'top_artist' item\n artist_genres = artist_info[\"genres\"]\n top_artist[\"genres\"] = artist_genres\n\n # If not already added, append updated top_artist object to master collection\n if artist_name not in artist_names:\n top_artists.append(top_artist)\n \n # Track current artists in flat list to avoid duplicates\n artist_names.append(artist_name) \n\n # Sort 'top_artists' by popularity in descending order, update the field in the city object\n top_artists.sort(key=lambda x: x[\"popularity\"], reverse=True)\n city[\"top_artists\"] = top_artists\n\n # Artist & song popularity logic:\n # Build 'top_5_artists' list: grab top 5 (by popularity) from 'top_artists' \n top_10_artists = []\n i_art = 0\n for art in top_artists:\n if i_art < 10:\n top_10_artists.append(art[\"artist\"])\n \n i_art += 1\n \n # Update 'top_5_artists' field in the city object\n city[\"top_5_artists\"] = top_10_artists[:5]\n\n # Loop through all tracks for this city, and create a new list of objects with the track popularity\n # BEFORE: [trk1, trk2, trk3, ...]\n # AFTER: [\n # {'track': trk1, 'popularity': pop1, 'name': 'El Baile de Gorila', 'artist': 'Mossino'}\n # {'track': trk2, 'popularity': pop2}\n # ...\n # ] \n i = 0\n tracks = []\n highest_popularity = 0\n most_popular_track = \"\"\n for trk in city[\"track_ids\"]:\n # Exit out of for loop at appropriate threshold, if we are limiting track iterations\n if limiting == True and i == limit_tracks:\n break\n\n i += 1\n # Get Spotify track metadata \n track = sp.track(trk)\n \n # Get the track name, artist, and popularity -- and add it to the object\n current_track_name = track['name']\n current_track_artist = track['artists'][0]['name']\n current_track_popularity = track['popularity']\n track_info = { \n \"track\": trk, \n \"popularity\": current_track_popularity,\n \"artist\": current_track_artist,\n \"name\": current_track_name\n }\n \n # Append updated object to track_ids array\n tracks.append(track_info)\n\n # For the top 10 artists, determine the song with the highest popularity\n if current_track_artist in top_10_artists:\n # Determine most popular track\n if highest_popularity < current_track_popularity:\n most_popular_track = trk\n highest_popularity = current_track_popularity\n most_popular_artist = current_track_artist\n most_popular_track_name = current_track_name \n \n #print(\"most popular track: \" + most_popular_track)\n #print(\"highest popularity: \" + str(highest_popularity))\n #print(\"current track: \" + trk )\n \n # Update current city value with updated 'tracks' array info\n city[\"track_ids\"] = tracks\n\n # Update current city's 'top_track' field with the most popular track info\n mostpopular_track_info = { \n \"track\": most_popular_track, \n \"popularity\": highest_popularity,\n \"artist\": most_popular_artist,\n \"name\": most_popular_track_name\n }\n city[\"top_track\"] = mostpopular_track_info\n\n if debugging == True:\n # **** Print out resulting object (TESTING ONLY) ****\n pprint.pprint(city)\n else:\n # **** Insert the current city record into the MongoDB collection ****\n db = connectToMongo()\n db.Cities.update( { \"city\": city[\"city\"] }, \n city,\n upsert=True\n )\n \n # Iterate counter\n i += 1\n else: \n print(\"Connection to Spotify API failed - token invalid.\")\n\n return getJSON(wrapGeoJSON(cities))",
"def get_results(r):\n myDict = {}\n for name in r[\"results\"]:\n myDict[name[\"name\"]] = {\n \"rank\": name[\"rank\"],\n \"ticker\": name[\"ticker\"],\n \"upvotes\": name[\"upvotes\"],\n \"mentions\": name[\"mentions\"],\n \"mentions_24h_ago\": name[\"mentions_24h_ago\"],\n }\n df = pd.DataFrame.from_dict(myDict, orient=\"index\")\n df[\"rank\"] = df[\"rank\"].astype(int)\n df[\"upvotes\"] = df[\"upvotes\"].astype(int)\n df[\"mentions\"] = df[\"mentions\"].astype(int)\n df[\"mentions_24h_ago\"] = df[\"mentions_24h_ago\"].astype(int)\n\n df[\"delta_mentions_24h\"] = df[\"mentions\"] - df[\"mentions_24h_ago\"]\n df = df[~(df[\"upvotes\"] <= 1000)]\n df = df.sort_values(by=[\"delta_mentions_24h\"], ascending=False)\n return df",
"def fetch_top_games(twitch: Twitch, n: int = 100) -> pd.DataFrame:\n top_games = fetch_twitch_data(twitch, 'get_top_games', first=n)\n\n return top_games",
"def get_songs_by_artist(artist, linesep=' \\n ', timeout=None):\n df = pd.DataFrame(columns=['Artist', 'Title'])\n url = \"https://lyrics.fandom.com/wiki/Category:Songs_by_\"+urlize(artist)\n df = parse_page_now(url,df)\n return df"
]
| [
"0.69811374",
"0.6699098",
"0.66560555",
"0.65576893",
"0.64863986",
"0.64474547",
"0.6371338",
"0.6334062",
"0.63221335",
"0.6296572",
"0.6261828",
"0.6221661",
"0.6220186",
"0.6189797",
"0.61722445",
"0.61599994",
"0.6155433",
"0.6114755",
"0.5938691",
"0.5933267",
"0.58908594",
"0.58413357",
"0.580006",
"0.5798523",
"0.5790901",
"0.57688844",
"0.5747161",
"0.5739431",
"0.56830823",
"0.56177324"
]
| 0.7533467 | 0 |
Reads in the spotipy query results for user saved songs and returns a DataFrame with track_name,track_id, artist,album,duration,popularity | def create_df_saved_songs(api_results):
#create lists for df-columns
track_name = []
track_id = []
artist = []
album = []
duration = []
popularity = []
#loop through api_results
for items in api_results["items"]:
try:
track_name.append(items["track"]['name'])
track_id.append(items["track"]['id'])
artist.append(items["track"]["artists"][0]["name"])
duration.append(items["track"]["duration_ms"])
album.append(items["track"]["album"]["name"])
popularity.append(items["track"]["popularity"])
except TypeError:
pass
# Create the final df
df = pd.DataFrame({ "track_name": track_name,
"album": album,
"track_id": track_id,
"artist": artist,
"duration": duration,
"popularity": popularity})
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def get_tracks_from_albums(sp, album_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for album_uri in album_uri_list:\n album_tracks = sp.album_tracks(album_uri, limit=50, offset=0)[\"items\"]\n count_tracks_in_album = len(album_tracks)\n album_release_date = sp.album(album_uri)[\"release_date\"]\n\n # This part is probably very slow and should be improved by accessing the API less often\n for track_number in range(count_tracks_in_album):\n track_name = album_tracks[track_number][\"name\"]\n track_uri = album_tracks[track_number][\"uri\"]\n \n track_list.append([track_name, track_uri, album_release_date])\n\n # Create df from list of tracks for all albums\n track_df = pd.DataFrame(data=track_list[1:], columns=track_list[0])\n \n print(\"Log: Finished pulling all tracks from albums.\")\n return track_df",
"def get_playlist_tracks(playlist):\n track_ids = [id for id in load_from_json(f\"playlist_{playlist}.json\") if id is not None]\n tracks = []\n\n for i in range(0, len(track_ids), 50):\n tracks_info = sp.tracks(track_ids[i: i+50])['tracks']\n for track in tracks_info:\n if track:\n tracks.append({\n 'id': track['id'],\n 'name': track['name'],\n 'popularity': track['popularity']\n })\n df = pd.DataFrame(tracks)\n\n file = f\"playlist_{playlist}_df.csv\"\n df.to_csv(file)\n\n return file",
"def get_all_tracks_from_artists(sp, artist_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\", \"artist_name\"]]\n track_df = pd.DataFrame(columns=track_list[0])\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for artist_uri in artist_uri_list:\n # Get artist name and albums\n artist_name = sp.artist(artist_uri)[\"name\"]\n albums = get_albums_from_artists(sp, [artist_uri])\n\n # Get tracks from artist albums\n tracks_artist_df = get_tracks_from_albums(sp, albums[\"album_uri\"].to_list())\n tracks_artist_df[\"artist_name\"] = artist_name\n\n # Append new songs to dataframe\n track_df = track_df.append(tracks_artist_df)\n \n print(\"Log: Finished pulling all tracks from artist.\")\n return track_df",
"def get_songs(self):\n search_object = {\"size\":25000,\n 'query': {'term': {FIELD_FINGERPRINTED: True}}, \"fields\": [FIELD_SONGNAME, FIELD_FILE_SHA1,\n FIELD_TOTAL_HASHES]}\n response = self.cursor.search(index = SONGS_INDEXNAME, body=search_object)\n #print(\"get_songs response: \",response)\n arr = []\n for hit in response[\"hits\"][\"hits\"]:\n dct = {\"song_name\":hit['_source'][FIELD_SONGNAME],\"total_hashes\":hit['_source'][FIELD_TOTAL_HASHES],\n \"file_sha1\":hit['_source'][FIELD_FILE_SHA1]}\n arr.append(dct)\n return arr",
"def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def search_multiple_tracks(search_query, sp):\r\n \r\n # List to store the track ids\r\n track_ids = []\r\n # List to store the track names and artists\r\n tracks = []\r\n\r\n #Search for 10 results in the Spotify API given a search querry\r\n results = sp.search(q = search_query ,limit=10)\r\n results = results['tracks']['items']\r\n\r\n # Extract the track id's, names and artists for all the search results\r\n for i in range(len(results)):\r\n\r\n # Get track id, artist and name\r\n track_id = results[i]['id']\r\n artist = results[i]['artists'][0]['name']\r\n track_name = results[i]['name']\r\n\r\n # Get a string with the artist and track name\r\n track = artist + ' - ' + track_name\r\n\r\n # Append the track id's and track name/artist to the list\r\n track_ids.append(track_id)\r\n tracks.append(track)\r\n\r\n # Make a dictionary of the track id and track name/artist list.\r\n return dict(zip(tracks,track_ids))",
"def get_all_tracks():\n query_format = f\"track:\"\n\n search_string_letter_ids = [0]\n\n tracks = {}\n\n total = 0\n\n while search_string_letter_ids is not None:\n search_string = construct_search_string(search_string_letter_ids)\n count = track_count(query_format + search_string)\n print(f\"{search_string} : {count}\")\n if count < 2000:\n for i in range(0, count, 50):\n track_results = sp.search(query_format + search_string, type='track', limit=50, offset=i)\n for t in track_results['tracks']['items']:\n if t['id'] not in tracks:\n total += 1\n tracks[t['id']] = {'name': t['name']}\n\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=True)\n else:\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=False)\n\n print(f\"Tracks Saved In File: {total}\")\n\n file = save_to_json(tracks, f\"tracks.json\")\n return file",
"def get_tracks(search_string=None):\n if search_string is None:\n print('Please use a search string with get_tracks function')\n exit(0)\n item_type = \"tracks\"\n info_dict = spotify.search(q=search_string, limit=10, type='track')\n items = info_dict[item_type][\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"album\"][\"name\"]\n album_type = items[i][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"artists\"]))\n ])\n track_name = items[i][\"name\"]\n track_id = items[i][\"id\"]\n track_popularity = items[i][\"popularity\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": track_popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks",
"def get_albums_from_artists(sp, artist_uri_list):\n\n # Create header for output df\n albums_list = [[\"name\", \"album_uri\", \"album_release_date\", \"artist_uri\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n # Loop through list of artist uris\n for artist_uri in artist_uri_list:\n # Get album from artist\n albums = sp.artist_albums(artist_uri)\n \n # Append each album to list\n for album in albums[\"items\"]:\n album_name = album[\"name\"]\n album_uri = album[\"uri\"]\n album_release_date = album[\"release_date\"]\n albums_list.append([album_name, album_uri, album_release_date, artist_uri])\n\n # Create df from list of albums for all artist\n albums_df = pd.DataFrame(data=albums_list[1:], columns=albums_list[0])\n\n print(\"Log: Finished pulling all albums from artist.\")\n return albums_df",
"def create_df_playlist(api_results,sp = None, append_audio = True):\r\n df = create_df_saved_songs(api_results[\"tracks\"])\r\n if append_audio == True:\r\n assert sp != None, \"sp needs to be specified for appending audio features\"\r\n df = append_audio_features(df,sp)\r\n return df",
"def fetch_tracks_info_df(df):\n\n gen_df = df.copy()\n gen_df = gen_df[['artist_name', 'title', 'release', 'track_id', 'song_id']]\n\n for column_name in gen_df.columns:\n gen_df[column_name] = gen_df[column_name].map(lambda x: ast.literal_eval(x).decode('utf-8'))\n\n gen_df.rename(columns={'release': 'album_name'}, inplace=True)\n gen_df['year'] = df['year']\n\n return gen_df",
"def get_playlist_tracks(user, playlist_id, limit=100):\n info_dict = spotify.user_playlist_tracks(user, playlist_id, limit=limit)\n items = info_dict[\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"track\"][\"album\"][\"name\"]\n album_type = items[i][\"track\"][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"track\"][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"track\"][\"artists\"]))\n ])\n track_name = items[i][\"track\"][\"name\"]\n popularity = items[i][\"track\"][\"popularity\"]\n track_id = items[i][\"track\"][\"id\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks",
"def gen_user_artist_dataframe():\n print(\"Generating dataframe from lastfm usernames.\")\n user_to_id_dict = lastfm_data.get_users_and_ids()\n playcounts = defaultdict(dict)\n users = user_to_id_dict.keys()\n count = 0\n for user in users:\n count += 1\n top_artist_dict = get_top_artists(user)\n top_artists = top_artist_dict.keys()\n for artist in top_artists:\n playcounts[user][artist] = top_artist_dict[artist]\n print(str(count) + \"/\" + str(len(users)) + \" users counted.\")\n\n df = DataFrame(playcounts).T.fillna(0.0)\n return df",
"def get_more_data(song):\n headers = {\n \"User-Agent\": \"ytmdl\"\n }\n payload = {\n \"api_key\": API_KEY,\n \"method\": \"track.getInfo\",\n \"track\": song.track_name,\n \"artist\": song.artist_name,\n \"format\": \"json\"\n }\n\n response = get(API_BASE, headers=headers, params=payload)\n\n # TODO: Add a check to exit if the response code is not 200\n\n track_details = response.json()\n\n # Update the songs attributes\n song.track_number = 1\n\n try:\n song.collection_name = track_details[\"track\"][\"album\"][\"title\"]\n song.track_time = song._convert_time(\n track_details[\"track\"][\"duration\"])\n song.release_date = track_details[\"track\"][\"wiki\"][\"published\"]\n except KeyError:\n # This happens because last.fm do not have consistent data for some songs\n # Just ignore this errors if they occur.\n pass\n\n return song",
"def fetch_song_data(self, song_ids):\n\t\ttracks_base_url = \"https://api.spotify.com/v1/tracks\"\n\t\theaders = {}\n\t\ttrack_ids = ','.join(song_ids)\n\t\tquery_params = \"/?ids=\"+track_ids\n\t\ttracks_url = tracks_base_url + query_params\n\t\ttracks={}\n\t\theaders['Authorization'] = f\"Bearer {self.token}\"\n\n\t\ttry:\n\t\t\treq = request.Request(url=tracks_url,data=None, headers=headers)\n\t\t\tresponse = request.urlopen(req).read().decode()\n\t\t\ttracks = json.loads(response)\n\t\t\tlogging.info(\"Successfully fetched songs from Spotify!\")\n\t\texcept error.URLError as e:\n\t\t\tresponse = e.read().decode(\"utf8\", 'ignore')\n\t\t\tlogging.error(response)\n\t\treturn tracks",
"def getSongsSpotify(song_name,access_token):\n song_name = song_name.strip()\n query = \"https://api.spotify.com/v1/search?q={}&type=track&limit=20&offset=0\".format(song_name)\n response = requests.get(\n query,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # \n \n songs_no = response_json[\"tracks\"][\"total\"]\n if songs_no == 0 :\n return {\"songs_no\" : songs_no}\n songs = response_json[\"tracks\"][\"items\"]\n if(len(songs)<5):\n uri = [songs[0][\"uri\"]]\n names = [songs[0][\"name\"]]\n artists = [songs[0][\"artists\"][0][\"name\"]]\n imageUrl = [songs[0][\"album\"][\"images\"][-1][\"url\"]]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n else:\n uri = [ songs[i][\"uri\"] for i in range(0,5)]\n names = [songs[i][\"name\"] for i in range(0,5)]\n artists = [songs[i][\"artists\"][0][\"name\"] for i in range(0,5)]\n imageUrl = [songs[i][\"album\"][\"images\"][-1][\"url\"] for i in range(0,5)]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n return response_obj",
"def search_song(self, name):\n self.logger.debug('Searched for Song: {}'.format(name))\n results = self.sp.search(q='track:' + name, type='track')\n songs = [song for song in results['tracks']['items']]\n i = 1\n songs_ls = []\n table_ls = []\n for song in songs:\n table_ls.append([i,\n song['name'][0:20].strip(),\n song['album']['name'][0:20].strip(),\n \"%0.2f\" % (song['duration_ms'] / 60000),\n song['popularity']])\n songs_ls.append(song['uri'])\n i = i + 1\n return songs_ls, table_ls",
"def populate_billboard_scrapables(self):\n results = MongoClient().billboard.spotify.find()\n self.df = pd.DataFrame(\n data=map(\n lambda r: (\n r[\"metadata\"][\"id\"],\n r[\"metadata\"][\"artists\"][0][\"name\"],\n r[\"metadata\"][\"name\"],\n ),\n results,\n ),\n columns=[\"track_id\", \"artist_name\", \"title\"],\n )\n print(f\"Tracks identified to scrape lyrics: {self.df.shape[0]}\")",
"def get_songs_by_artist(artist, linesep=' \\n ', timeout=None):\n df = pd.DataFrame(columns=['Artist', 'Title'])\n url = \"https://lyrics.fandom.com/wiki/Category:Songs_by_\"+urlize(artist)\n df = parse_page_now(url,df)\n return df",
"def get_metadata(self):\n items = self.get_playlist_items()\n uris = [item[\"track\"][\"uri\"] for item in items]\n features = self.API.audio_features(uris)\n\n list_data = []\n\n for idx, item in enumerate(items):\n data = {\"name\": item[\"track\"][\"name\"], \"uri\": item[\"track\"][\"uri\"]}\n data.update(features[idx])\n list_data.append(data)\n\n self.metadata = pd.DataFrame(data=list_data,\n index=range(len(list_data)))\n\n return self.metadata",
"def GetTrackMetaData():\n\t\n\t## Get the raw file. The low_memory option\n\t## suppresses a warning regarding mismatched datatypes in\n\t## the track_id column. That's due to spacing in the original file.\n\tdf = pd.read_csv(\"_data\\\\fma_metadata\\\\tracks.csv\",header=1,index_col=0,low_memory=False)\n\t\n\t## Fix the track_id column by dropping\n\t## the problematic row and renaming.\n\tdf = df.drop(\"track_id\",axis=0)\n\tdf.index.rename(\"track_id\",inplace=True)\n\n\treturn df",
"def main():\n\n # Spotify settings\n sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(\n client_id='118aa19f2b66476fbc062f0ac146d8b5',\n client_secret='7ca95a3159ab4391bee70f70d47a9271'\n ))\n # Postgres settings\n conn = psycopg2.connect(\n host = \"postgres\",\n port = \"5432\",\n dbname = \"postgres\",\n user = \"postgres\",\n password = \"postgres1234\",\n )\n # Redis settings\n REDIS_HOST = \"jupyter_redis\"\n REDIS_PORT = 6379\n REDIS = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)\n\n # Spotify data\n search_results = []\n playlists = sp.search(q='covid', type='playlist', limit=50,offset=0)['playlists']\n\n print('- Load Playlists from search query')\n # Load Playlists from search query\n while playlists:\n try:\n for i, playlist in enumerate(playlists['items']):\n search_results.append(playlist['id'])\n print(\"%4d %s %s\" % (i + 1 + playlists['offset'], playlist['id'], playlist['name']))\n if playlists['next']:\n playlists = sp.next(playlists)['playlists'] # Get next playlist given a page result\n else:\n playlists = None\n except Exception as e:\n playlists = None\n print('Done')\n\n \n\n\n print('- Load tracks into postgres')\n ## Load information into Postgres\n counter = 0\n final_informations = []\n columns = 'id_playlist,name_playlist,id_track,name_track,timestamp,danceability,energy,key,loudness,mode,speechiness,acousticness,instrumentalness,liveness,valence,tempo,duration_ms,time_signature'\n\n for playlist_id in search_results:\n # If is not in redis\n if(REDIS.get(playlist_id) == None):\n try:\n playlist_complete = sp.playlist(playlist_id)\n tracks = playlist_complete['tracks']\n while tracks:\n for track in tracks['items']:\n audio_features = sp.audio_features(track['track']['id'])[0]\n # Open cursor\n cur = conn.cursor()\n # Insert\n cur.execute(f'insert into spotify_details ({columns}) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',\n (\n playlist_complete['id'],\n playlist_complete['name'],\n track['track']['id'],\n track['track']['name'],\n track['added_at'],\n audio_features['danceability'],\n audio_features['energy'],\n audio_features['key'],\n audio_features['loudness'],\n audio_features['mode'],\n audio_features['speechiness'],\n audio_features['acousticness'],\n audio_features['instrumentalness'],\n audio_features['liveness'],\n audio_features['valence'],\n audio_features['tempo'],\n audio_features['duration_ms'],\n audio_features['time_signature']\n ))\n # Commit the transition\n conn.commit()\n # Close cursor\n cur.close()\n if tracks['next']:\n tracks = sp.next(tracks) # Get next playlist given a page result\n else:\n tracks = None\n print(f'Done playlist: {counter} of {len(search_results)}')\n counter += 1\n except Exception as e:\n print(e)\n counter += 1\n REDIS.set(playlist_id, 1)\n print('- Added playlist: {}'.format(playlist_id))\n # Close connection\n conn.close()\n\n print('Done! All data are update in principal db and redis!')",
"def tracks(self) -> pd.DataFrame:\n return self._tracks",
"def read_trackings():\n return analytics.select_rows(\n analytics.trackings_table(),\n 0,\n 3)",
"def spotify_track_search(query: str, access_token: str) -> dict:\n response = requests.get(\n \"https://api.spotify.com/v1/search?q={}&type=track\".format(query),\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n if (\n response.status_code == 200\n and \"tracks\" in response.text\n and \"items\" in response.text\n ):\n return json.loads(response.text)[\"tracks\"][\"items\"]\n return {\"error\": response.reason, \"status\": response.status_code}",
"def get_artist_songs(self,genre = \"[Not Provided]\"):\n # Search for the artist and get their id\n search_artist = self.search(self.artist_name)\n\n #Prevents the stoppage in case of an Artist having zero lyrics on Genius\n if len(search_artist['response']['hits']) == 0:\n return False\n \n artist_id = str(search_artist['response']['hits'][0]['result']['primary_artist']['id'])\n print(\"ID: \" + artist_id)\n # Initialize DataFrame\n df = pd.DataFrame(columns=['title', 'url'])\n # Iterate through all the pages of the artist's songs\n more_pages = True\n page = 1\n i = 0\n while more_pages:\n # Make a request to get the songs of an artist on a given page\n request_url = self.base_url + 'artists/' + artist_id + '/songs' + '?per_page=50&page=' + str(page)\n response = requests.get(request_url, headers=self.headers).json()\n\n # For each song which the given artist is the primary_artist of the song, add the song title and\n # Genius URL to the DataFrame\n for song in response['response']['songs']:\n if str(song['primary_artist']['id']) == artist_id:\n title = song['title']\n url = song['url']\n df.loc[i] = [title, url]\n i += 1\n page += 1\n\n if response['response']['next_page'] is None:\n more_pages = False\n\n \n # Get the HTML and Song Lyrics from helper methods in the class\n df['artist'] = self.artist_name\n df['html'] = df['url'].apply(self.get_song_html)\n df['lyrics'] = df.apply(lambda row: self.get_lyrics(row.html), axis=1)\n #Uncomment to use the genre method otherwise\n #df['genre'] = df.apply(lambda row: self.get_genre(row.html), axis=1)\n df['genre'] = genre\n \n del df['url']\n del df['html']\n\n self.artist_songs = df\n\n return self.artist_songs",
"def spotify_tracklist():\n sp = credentials()\n chart = chartdata()\n trackid_list = []\n #find a way to get track IDS\n for track in chart:\n searchQuery = track[0]\n searchResults = sp.search(q=searchQuery, limit=1, type='track', market=\"US\")\n trackid_list.append(searchResults['tracks']['items'][0]['uri'])\n return trackid_list",
"def get_user_saved_tracks(token):\n saved_tracks_endpoint = 'https://api.spotify.com/v1/me/tracks'\n saved_tracks = []\n headers = {'Authorization': 'Bearer %s' % token}\n\n # handles initial call to tracks endpoint\n r = requests.get(saved_tracks_endpoint, headers=headers)\n if r.status_code != 200:\n return None\n track_json = r.json()\n saved_tracks += track_json['items']\n\n next_page = track_json['next']\n # saved tracks endpoint is paginated\n while next_page:\n r = requests.get(next_page, headers=headers)\n track_json = r.json()\n saved_tracks += track_json['items']\n next_page = track_json['next']\n\n return saved_tracks",
"def process_song_file(cur, filepath):\n df = pd.read_json(filepath, typ='series')\n\n columns = ['song_id', 'title', 'artist_id', 'year', 'duration']\n song_data = df[[*columns]]\n cur.execute(song_table_insert, song_data)\n\n columns = ['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']\n artist_data = df[[*columns]]\n cur.execute(artist_table_insert, artist_data)"
]
| [
"0.70953184",
"0.6983967",
"0.69435596",
"0.6915536",
"0.66690826",
"0.666754",
"0.65074974",
"0.6507288",
"0.6480557",
"0.6400821",
"0.63706774",
"0.6331801",
"0.62969965",
"0.62750673",
"0.62064695",
"0.618582",
"0.6146519",
"0.6093236",
"0.6091046",
"0.60655224",
"0.6011257",
"0.5993359",
"0.5992601",
"0.5987642",
"0.59619033",
"0.5938268",
"0.5926034",
"0.5895007",
"0.58581877",
"0.5827687"
]
| 0.74175096 | 0 |
Reads in the spotipy query results for user top artists and returns a DataFrame with name, id, genres, popularity and uri | def top_artists_from_API(api_results):
df = pd.DataFrame(api_results["items"])
cols = ["name","id","genres","popularity","uri"]
return df[cols] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def gen_user_artist_dataframe():\n print(\"Generating dataframe from lastfm usernames.\")\n user_to_id_dict = lastfm_data.get_users_and_ids()\n playcounts = defaultdict(dict)\n users = user_to_id_dict.keys()\n count = 0\n for user in users:\n count += 1\n top_artist_dict = get_top_artists(user)\n top_artists = top_artist_dict.keys()\n for artist in top_artists:\n playcounts[user][artist] = top_artist_dict[artist]\n print(str(count) + \"/\" + str(len(users)) + \" users counted.\")\n\n df = DataFrame(playcounts).T.fillna(0.0)\n return df",
"def current_user_top_artists(\n self, limit=20, offset=0, time_range=TimeRange.MEDIUM_TERM, **kwargs\n ):\n return self._get(\n API.MY_TOP.value.format(type=\"artists\"), # pylint: disable=no-member\n time_range=TimeRange(time_range).value,\n limit=limit,\n offset=offset,\n **kwargs,\n )",
"def get_top_artist_tracks(session, number_of_artist):\n try:\n if not issubclass(type(session), sqlalchemy.orm.session.Session):\n raise AttributeError(\"session not passed correctly, should be of type 'sqlalchemy.orm.session.Session' \")\n\n if not issubclass(type(number_of_artist), int) or number_of_artist < 1:\n raise AttributeError(\"number of artist should be integer and greater than 0\")\n\n LOGGER.info(\"Performing Read Operation\")\n\n # Selecting the Artist id, Artist Name, and count of track id\n query = session.query(models.AlbumTable.artist_id, models.ArtistTable.name,\n func.count(models.TracksTable.track_id).label(\"number_of_tracks\"))\n\n # Joining tracks table and album table\n query = query.join(models.AlbumTable, models.TracksTable.album_id == models.AlbumTable.album_id)\n query = query.join(models.ArtistTable, models.AlbumTable.artist_id == models.ArtistTable.artist_id)\n\n # Grouping by Artist Id\n query = query.group_by(models.AlbumTable.artist_id)\n\n # Sorting by number_of_tracks and artist id\n query = query.order_by(desc(\"number_of_tracks\"), models.AlbumTable.artist_id)\n\n results = query.limit(number_of_artist).all()\n\n if not results:\n raise NoResultFound(\"No Records Found\")\n\n LOGGER.info(\"\\n\\nThe Top %s Artist based on number of tracks are\", number_of_artist)\n\n print(\"\\n\\n\")\n print(\"===\" * 50)\n print(\"\\n\\n\")\n\n LOGGER.info(\"\\n\\n %s\", tabulate(results, headers=[\"Artist ID\", \"Artist Name\", \"Number Of Tracks\"],\n tablefmt=\"grid\"))\n\n print(\"\\n\\n\")\n print(\"===\" * 50)\n print(\"\\n\\n\")\n except AttributeError as err:\n LOGGER.error(err)\n except NoResultFound as err:\n LOGGER.error(err)\n finally:\n session.close()",
"def artist_top_tracks(req):\n\n artist = req.get('result').get('parameters').get('artist')\n logger.info('received {0} request for artist=\"{1}\"'.format('artist_top_tracks', artist))\n top_tracks = cached_result('get_artist_top_tracks', [artist], {})\n speech = 'Unable to locate top tracks for {artist}'.format(artist=artist)\n\n if top_tracks is not None:\n speech = 'The most popular songs for {artist} are'.format(artist=artist)\n for track in top_tracks:\n speech += '. ' + track\n\n return jsonify(\n {'speech': speech,\n 'displayText': speech,\n 'source': 'last-assist'})",
"def generate_artist_data(name, use_id=False):\n if use_id:\n query = \"http://54.235.241.196/{0}\".format(name)\n artist_json_data = get_raw_json(query)\n return artist_json_data\n\n query = \"http://54.235.241.196/search?t=artist&q={0}&geo=US\".format(\n quote(name))\n json_data = get_raw_json(query)\n top_hit_json = json_data[\"data\"][0]\n\n return top_hit_json",
"def get_albums_from_artists(sp, artist_uri_list):\n\n # Create header for output df\n albums_list = [[\"name\", \"album_uri\", \"album_release_date\", \"artist_uri\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n # Loop through list of artist uris\n for artist_uri in artist_uri_list:\n # Get album from artist\n albums = sp.artist_albums(artist_uri)\n \n # Append each album to list\n for album in albums[\"items\"]:\n album_name = album[\"name\"]\n album_uri = album[\"uri\"]\n album_release_date = album[\"release_date\"]\n albums_list.append([album_name, album_uri, album_release_date, artist_uri])\n\n # Create df from list of albums for all artist\n albums_df = pd.DataFrame(data=albums_list[1:], columns=albums_list[0])\n\n print(\"Log: Finished pulling all albums from artist.\")\n return albums_df",
"def get_top_artists(\n self, period: Period, limit: int = 50, page: int = 1\n ) -> ListModel[Artist]:\n assert isinstance(period, Period)\n return self.retrieve(\n bind=Artist,\n flatten=\"artist\",\n params=dict(\n method=\"user.getTopArtists\",\n user=self.name,\n limit=limit,\n page=page,\n period=period.value,\n ),\n )",
"def top_n(userid):\n agg = [s[\"search_id\"] for s in db_searches.find()]\n table = pd.DataFrame()\n table[\"searches\"] = Counter(agg).keys()\n table[\"count\"] = Counter(agg).values()\n table = table.sort_values(\"count\", ascending=False)\n table = table[:10]\n search_ids = table[\"searches\"].values\n counts = table[\"count\"].values\n n = 0\n top_n = []\n while n < len(search_ids):\n top_n.append([str(db_search_terms.find_one({\"_id\": search_ids[n]}).get(\"value\")), str(counts[n])])\n n += 1\n jsonob = jsonify(top_n=top_n)\n return jsonob",
"def top_tracks(genre):\n\tartist = random.choice(genre_artist[genre])\n\ttop_tracks = search_for_artist_top_tracks(artist)\n\titems = []\n\tif top_tracks:\n\t\tfor track in top_tracks:\n\t\t\titems.append({\"artist\": track[\"artists\"][0][\"name\"], \"popularity\": track[\"popularity\"], \"track\": track[\"name\"],\n\t\t\t\t \"preview_url\": track[\"preview_url\"], \"album_image_url\": track[\"album\"][\"images\"][2][\"url\"]})\n\t\titems = sorted(items, key=lambda x: x['popularity'], reverse=True)\n\t\tfor item in items:\n\t\t\tdel item['popularity']\n\t\treturn items\n\telse:\n\t\treturn None",
"def search_for_artist_top_tracks(name):\n\tartist_id = search_for_artist(name)\n\ttoken = get_token()\n\tif artist_id and token:\n\t\theaders = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + token}\n\t\toptions = {'country': 'TR'}\n\t\tresponse = requests.get(\n\t\t\t'https://api.spotify.com/v1/artists/'+artist_id+'/top-tracks',\n\t\t\theaders=headers,\n\t\t\tparams=options\n\t\t)\n\t\tif response.status_code == 200:\n\t\t\tcontent = json.loads(response.content.decode('utf-8'))\n\t\t\tif content:\n\t\t\t\treturn content['tracks']\n\t\t\telse: return None\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\treturn None",
"async def artists(self, ctx: BBContext):\n\n query = \"\"\"SELECT DISTINCT artist_name, COUNT(*)\n FROM extras.arts\n WHERE artist_name IS NOT NULL\n GROUP BY artist_name\n ORDER BY COUNT(*) DESC\"\"\"\n\n args = [query]\n\n con = await ctx.get_connection()\n data: List[asyncpg.Record] = await con.fetch(*args)\n view = ArtsLeaderboardPagination(data, ctx.author) # type: ignore (Direct messages intent is not being used so author can only be a member)\n await view.start(ctx.channel)",
"def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def topAuthors():\n c = db.cursor()\n c.execute(\"select name, sum(hits) as hits\\\n from authorhits group by name\\\n order by hits desc;\")\n results = c.fetchall()\n c.close()\n return results",
"def get_all_tracks_from_artists(sp, artist_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\", \"artist_name\"]]\n track_df = pd.DataFrame(columns=track_list[0])\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for artist_uri in artist_uri_list:\n # Get artist name and albums\n artist_name = sp.artist(artist_uri)[\"name\"]\n albums = get_albums_from_artists(sp, [artist_uri])\n\n # Get tracks from artist albums\n tracks_artist_df = get_tracks_from_albums(sp, albums[\"album_uri\"].to_list())\n tracks_artist_df[\"artist_name\"] = artist_name\n\n # Append new songs to dataframe\n track_df = track_df.append(tracks_artist_df)\n \n print(\"Log: Finished pulling all tracks from artist.\")\n return track_df",
"def get_artists_most_played(session_):\n # artists = session_.query(Artist).order_by(Artist.name.asc()).paginate()\n artists = session_.query(Artist).order_by(Artist.plays.desc()).all()\n return artists",
"def get_popular_authors():\n query_command = \"SELECT * from popular authors LIMIT 3\"\n query_data = run_query(query_command)\n return query_data",
"def scrapeSpotify():\n # Set Spotify authentication token \n token = util.prompt_for_user_token(username, scope, clientid, clientsecret, redirecturi)\n \n if token: # Authenticate with Spotify\n # Store dictionary of scraped values from scraping function\n if debugging == True:\n cities = DataCollection.test() # DEBUGGING ONLY\n #cities = DataCollection.scrape_spotify_info(limiting, limit_cities) \n #return jsonify(cities)\n else:\n cities = DataCollection.scrape_spotify_info(limiting, limit_cities) # THE REAL THING\n\n # Loop through all cities in dataset\n i = 0\n for city in cities:\n # Exit out of for loop at 2 if we are limiting city loop iterations\n if limiting == True and i == limit_cities:\n break \n #\n # Begin Spotify analysis (e.g., determine popularity for each artist in city list, top track)\n #\n sp = spotipy.Spotify(auth=token)\n\n # Loop through the top artists for this city, and determine the popularity values\n i = 0\n top_artists = []\n artist_names = []\n for top_artist in city[\"top_artists\"]:\n # Exit out of for loop at appropriate threshold, if we are limiting artist iterations\n if limiting == True and i == limit_artists:\n break\n\n i += 1\n # *** Example artist value in dictionary ***\n # { 'artist': 'Bobby Pulido',\n # 'tracks': ['spotify:track:1tg7ZzCAkjDNENdWL7WuIr',\n # 'spotify:track:2JJSGhPpATm8lXeYjD95fw',\n # 'spotify:track:5iuGn3RXvfvHIyIe8fyxBE'\n # ],\n # 'popularity': 99 <--------- *** BEING ADDED ***\n # }\n # Get info about the first artist track\n urn = top_artist[\"tracks\"][0]\n track = sp.track(urn)\n\n # Get the artist's Spotify URI & name\n artist_uri = track['artists'][0]['uri']\n artist_name = track['artists'][0]['name']\n\n # Set the artist name to the first artist attributed to the song\n top_artist[\"artist\"] = artist_name\n\n # Get the artist popularity, and add it to their 'top_artist' item\n artist_info = sp.artist(artist_uri)\n artist_popularity = artist_info[\"popularity\"]\n top_artist[\"popularity\"] = artist_popularity\n\n # Get the artist genres, and add it to their 'top_artist' item\n artist_genres = artist_info[\"genres\"]\n top_artist[\"genres\"] = artist_genres\n\n # If not already added, append updated top_artist object to master collection\n if artist_name not in artist_names:\n top_artists.append(top_artist)\n \n # Track current artists in flat list to avoid duplicates\n artist_names.append(artist_name) \n\n # Sort 'top_artists' by popularity in descending order, update the field in the city object\n top_artists.sort(key=lambda x: x[\"popularity\"], reverse=True)\n city[\"top_artists\"] = top_artists\n\n # Artist & song popularity logic:\n # Build 'top_5_artists' list: grab top 5 (by popularity) from 'top_artists' \n top_10_artists = []\n i_art = 0\n for art in top_artists:\n if i_art < 10:\n top_10_artists.append(art[\"artist\"])\n \n i_art += 1\n \n # Update 'top_5_artists' field in the city object\n city[\"top_5_artists\"] = top_10_artists[:5]\n\n # Loop through all tracks for this city, and create a new list of objects with the track popularity\n # BEFORE: [trk1, trk2, trk3, ...]\n # AFTER: [\n # {'track': trk1, 'popularity': pop1, 'name': 'El Baile de Gorila', 'artist': 'Mossino'}\n # {'track': trk2, 'popularity': pop2}\n # ...\n # ] \n i = 0\n tracks = []\n highest_popularity = 0\n most_popular_track = \"\"\n for trk in city[\"track_ids\"]:\n # Exit out of for loop at appropriate threshold, if we are limiting track iterations\n if limiting == True and i == limit_tracks:\n break\n\n i += 1\n # Get Spotify track metadata \n track = sp.track(trk)\n \n # Get the track name, artist, and popularity -- and add it to the object\n current_track_name = track['name']\n current_track_artist = track['artists'][0]['name']\n current_track_popularity = track['popularity']\n track_info = { \n \"track\": trk, \n \"popularity\": current_track_popularity,\n \"artist\": current_track_artist,\n \"name\": current_track_name\n }\n \n # Append updated object to track_ids array\n tracks.append(track_info)\n\n # For the top 10 artists, determine the song with the highest popularity\n if current_track_artist in top_10_artists:\n # Determine most popular track\n if highest_popularity < current_track_popularity:\n most_popular_track = trk\n highest_popularity = current_track_popularity\n most_popular_artist = current_track_artist\n most_popular_track_name = current_track_name \n \n #print(\"most popular track: \" + most_popular_track)\n #print(\"highest popularity: \" + str(highest_popularity))\n #print(\"current track: \" + trk )\n \n # Update current city value with updated 'tracks' array info\n city[\"track_ids\"] = tracks\n\n # Update current city's 'top_track' field with the most popular track info\n mostpopular_track_info = { \n \"track\": most_popular_track, \n \"popularity\": highest_popularity,\n \"artist\": most_popular_artist,\n \"name\": most_popular_track_name\n }\n city[\"top_track\"] = mostpopular_track_info\n\n if debugging == True:\n # **** Print out resulting object (TESTING ONLY) ****\n pprint.pprint(city)\n else:\n # **** Insert the current city record into the MongoDB collection ****\n db = connectToMongo()\n db.Cities.update( { \"city\": city[\"city\"] }, \n city,\n upsert=True\n )\n \n # Iterate counter\n i += 1\n else: \n print(\"Connection to Spotify API failed - token invalid.\")\n\n return getJSON(wrapGeoJSON(cities))",
"def get_artists_recent_played(session_):\n # artists = session_.query(Artist).order_by(Artist.name.asc()).paginate()\n artists = session_.query(Artist).order_by(Artist.played_at.asc()).all()\n return artists",
"def get_recommendations(artists = tuple(), genres = tuple(), limit = 20, features = True, client = None):\n\n recs = client.recommendations(seed_artists = artists, seed_genres = genres, limit = limit)\n tracks = recs['tracks']\n\n # TODO: need a compose function...\n to_keep = (\n 'album_name', 'artist_name', 'name', 'popularity', 'duration_ms',\n 'explicit', 'id'\n )\n rows = list(map(row_filter(to_keep, False), map(_hoist_track_info, tracks)))\n out = pd.DataFrame(rows)\n\n track_ids = [row['id'] for row in rows]\n if features:\n extra_cols = ['uri', 'type', 'duration_ms', 'analysis_url', 'track_href']\n return out.merge(\n get_track_features(track_ids).drop(columns = extra_cols),\n on = \"id\"\n )\n\n return out",
"def topArticles():\n c = db.cursor()\n c.execute(\"select titles.title, tophits.hits\\\n from tophits, titles\\\n where tophits.path = titles.slug\\\n order by hits desc limit 3;\")\n results = c.fetchall()\n c.close()\n return results",
"def _get_top_tracks(artist, limit):\n\n l = []\n for track in _lastfm.get_artist(artist).get_top_tracks(limit=limit):\n track = track.item\n l.append({\"artist\": track.get_artist().get_name(), \"title\": track.get_title()})\n \n return l",
"def printTopAuthors():\n query = \"\"\"\n SELECT author_article_popularity_view.author,\n SUM(author_article_popularity_view.views) AS total_views\n FROM author_article_popularity_view\n GROUP BY author_article_popularity_view.author\n ORDER BY total_views DESC;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop authors of all time: \")\n for i, result in enumerate(results):\n print(\"{}. {} - {:,} views\".format(i + 1, result[0], result[1]))",
"def get_artist_songs(self,genre = \"[Not Provided]\"):\n # Search for the artist and get their id\n search_artist = self.search(self.artist_name)\n\n #Prevents the stoppage in case of an Artist having zero lyrics on Genius\n if len(search_artist['response']['hits']) == 0:\n return False\n \n artist_id = str(search_artist['response']['hits'][0]['result']['primary_artist']['id'])\n print(\"ID: \" + artist_id)\n # Initialize DataFrame\n df = pd.DataFrame(columns=['title', 'url'])\n # Iterate through all the pages of the artist's songs\n more_pages = True\n page = 1\n i = 0\n while more_pages:\n # Make a request to get the songs of an artist on a given page\n request_url = self.base_url + 'artists/' + artist_id + '/songs' + '?per_page=50&page=' + str(page)\n response = requests.get(request_url, headers=self.headers).json()\n\n # For each song which the given artist is the primary_artist of the song, add the song title and\n # Genius URL to the DataFrame\n for song in response['response']['songs']:\n if str(song['primary_artist']['id']) == artist_id:\n title = song['title']\n url = song['url']\n df.loc[i] = [title, url]\n i += 1\n page += 1\n\n if response['response']['next_page'] is None:\n more_pages = False\n\n \n # Get the HTML and Song Lyrics from helper methods in the class\n df['artist'] = self.artist_name\n df['html'] = df['url'].apply(self.get_song_html)\n df['lyrics'] = df.apply(lambda row: self.get_lyrics(row.html), axis=1)\n #Uncomment to use the genre method otherwise\n #df['genre'] = df.apply(lambda row: self.get_genre(row.html), axis=1)\n df['genre'] = genre\n \n del df['url']\n del df['html']\n\n self.artist_songs = df\n\n return self.artist_songs",
"def print_query_results(top, ranked_docs, tweets_dict):\n print(\"\\n======================\\nTop {} results out of {} for the seached query:\\n\".format(top, len(ranked_docs)))\n for tweet_id in ranked_docs[:top]:\n tweet_object = tweets_dict[tweet_id]\n txt = tweet_object[\"text\"]\n usr = tweet_object[\"user\"][\"name\"]\n date = tweet_object[\"created_at\"]\n hashtags = tweet_object[\"entities\"][\"hashtags\"]\n favs = tweet_object[\"favorite_count\"]\n rt = tweet_object[\"retweet_count\"]\n urls = tweet_object[\"entities\"][\"urls\"]\n print(\"\\n==================================================================\\n\")\n print(\"Username %s | Tweet: %s\\n Date %s\\n Likes %s| Retweets %s\"%(usr, txt, date, favs, rt))\n if hashtags:\n print(\"Hashtags: \")\n for hashtag in hashtags:\n print(hashtag)\n if urls:\n print(\"URLs: \")\n for url in urls:\n print(url[\"url\"])",
"def test_top_songs(self):\n \n rss = AppleRSS()\n objs = rss.get_top_songs(limit=10)\n \n self.__test_artists('top_songs', objs)",
"def get_results(r):\n myDict = {}\n for name in r[\"results\"]:\n myDict[name[\"name\"]] = {\n \"rank\": name[\"rank\"],\n \"ticker\": name[\"ticker\"],\n \"upvotes\": name[\"upvotes\"],\n \"mentions\": name[\"mentions\"],\n \"mentions_24h_ago\": name[\"mentions_24h_ago\"],\n }\n df = pd.DataFrame.from_dict(myDict, orient=\"index\")\n df[\"rank\"] = df[\"rank\"].astype(int)\n df[\"upvotes\"] = df[\"upvotes\"].astype(int)\n df[\"mentions\"] = df[\"mentions\"].astype(int)\n df[\"mentions_24h_ago\"] = df[\"mentions_24h_ago\"].astype(int)\n\n df[\"delta_mentions_24h\"] = df[\"mentions\"] - df[\"mentions_24h_ago\"]\n df = df[~(df[\"upvotes\"] <= 1000)]\n df = df.sort_values(by=[\"delta_mentions_24h\"], ascending=False)\n return df",
"def get_artist_audio_features(q, interactive = False, genre_delimiter = '-!!-', to_file = '', client = None):\n query = client.search(q = q, type = \"artist\")\n items = query['artists']['items']\n\n if not items:\n raise Exception(\"No artists found\")\n\n if interactive:\n print(\"Select the artist to use...\")\n print(\"\\n\".join(\"[{}]: {}\".format(ii, entry['name']) for ii, entry in enumerate(items)))\n artist_indx = int(input(\"artist number: \").strip())\n if artist_indx > len(items):\n raise IndexError(\"Selected number higher than options available\")\n artist = items[artist_indx]\n else:\n artist = items[0]\n\n # get artist genres\n artist_genres = genre_delimiter.join(artist['genres']) if genre_delimiter else None\n\n # get artist albums\n albums = get_artist_albums(artist['id'])\n albums['artist_genres'] = artist_genres\n\n # get album popularity\n album_popularity = get_album_popularity(albums.id)\n\n # get album tracks\n tracks = get_album_tracks(albums.id)\n\n # get track audio features\n features = get_track_features(tracks.id)\n\n # get track popularity\n popularity = get_track_popularity(tracks.id)\n\n album_data = albums.merge(album_popularity, 'left', 'id')\n\n track_data = tracks \\\n .drop(columns = ['type']) \\\n .merge(popularity, 'left', 'id') \\\n .merge(features.drop(columns = ['uri', 'type', 'duration_ms']), 'left', 'id')\n\n\n merged = prefix_merge(album_data, track_data, ['album_', 'track_'], how = 'left', on = 'album_id')\n\n if to_file:\n merged.to_csv(to_file)\n\n return merged",
"def get_artists():\n return query_multiple(request.args, artist_search, \\\n artist_filter, Artist, artists_schema)",
"def popular_authors() :\n query = \"\"\"SELECT authors.name,count(*) AS total_views FROM authors,articles,log WHERE log.path like concat ('/article/',articles.slug)\n AND articles.author=authors.id group by authors.name order by total_views desc\"\"\"\n result = get_data(query)\n print(\" 2. The most popular articles authors of all time:\")\n print(\"\")\n for record in result :\n print(' ' +' ' + str(record[0]) + ' -' + ' ' + str(record[1]) + ' ' +'views')\n print(\" \")"
]
| [
"0.6749099",
"0.67400587",
"0.64649457",
"0.639353",
"0.6372961",
"0.62815857",
"0.62591445",
"0.62485987",
"0.6230613",
"0.6223021",
"0.6130078",
"0.61162484",
"0.6092042",
"0.60743445",
"0.6073669",
"0.60620964",
"0.601939",
"0.5930538",
"0.5926596",
"0.5926028",
"0.5909494",
"0.59046435",
"0.5881571",
"0.5857311",
"0.58519644",
"0.582054",
"0.5760447",
"0.57216907",
"0.56943756",
"0.5662407"
]
| 0.80026734 | 0 |
Reads in the spotipy query results for spotify recommended songs and returns a DataFrame with track_name,track_id,artist,album,duration,popularity | def create_df_recommendations(api_results):
track_name = []
track_id = []
artist = []
album = []
duration = []
popularity = []
for items in api_results['tracks']:
try:
track_name.append(items['name'])
track_id.append(items['id'])
artist.append(items["artists"][0]["name"])
duration.append(items["duration_ms"])
album.append(items["album"]["name"])
popularity.append(items["popularity"])
except TypeError:
pass
df = pd.DataFrame({ "track_name": track_name,
"album": album,
"track_id": track_id,
"artist": artist,
"duration": duration,
"popularity": popularity})
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_tracks_from_albums(sp, album_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for album_uri in album_uri_list:\n album_tracks = sp.album_tracks(album_uri, limit=50, offset=0)[\"items\"]\n count_tracks_in_album = len(album_tracks)\n album_release_date = sp.album(album_uri)[\"release_date\"]\n\n # This part is probably very slow and should be improved by accessing the API less often\n for track_number in range(count_tracks_in_album):\n track_name = album_tracks[track_number][\"name\"]\n track_uri = album_tracks[track_number][\"uri\"]\n \n track_list.append([track_name, track_uri, album_release_date])\n\n # Create df from list of tracks for all albums\n track_df = pd.DataFrame(data=track_list[1:], columns=track_list[0])\n \n print(\"Log: Finished pulling all tracks from albums.\")\n return track_df",
"def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def get_all_tracks_from_artists(sp, artist_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\", \"artist_name\"]]\n track_df = pd.DataFrame(columns=track_list[0])\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for artist_uri in artist_uri_list:\n # Get artist name and albums\n artist_name = sp.artist(artist_uri)[\"name\"]\n albums = get_albums_from_artists(sp, [artist_uri])\n\n # Get tracks from artist albums\n tracks_artist_df = get_tracks_from_albums(sp, albums[\"album_uri\"].to_list())\n tracks_artist_df[\"artist_name\"] = artist_name\n\n # Append new songs to dataframe\n track_df = track_df.append(tracks_artist_df)\n \n print(\"Log: Finished pulling all tracks from artist.\")\n return track_df",
"def create_df_saved_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results[\"items\"]:\r\n try:\r\n track_name.append(items[\"track\"]['name'])\r\n track_id.append(items[\"track\"]['id'])\r\n artist.append(items[\"track\"][\"artists\"][0][\"name\"])\r\n duration.append(items[\"track\"][\"duration_ms\"])\r\n album.append(items[\"track\"][\"album\"][\"name\"])\r\n popularity.append(items[\"track\"][\"popularity\"])\r\n except TypeError: \r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n return df",
"def get_playlist_tracks(playlist):\n track_ids = [id for id in load_from_json(f\"playlist_{playlist}.json\") if id is not None]\n tracks = []\n\n for i in range(0, len(track_ids), 50):\n tracks_info = sp.tracks(track_ids[i: i+50])['tracks']\n for track in tracks_info:\n if track:\n tracks.append({\n 'id': track['id'],\n 'name': track['name'],\n 'popularity': track['popularity']\n })\n df = pd.DataFrame(tracks)\n\n file = f\"playlist_{playlist}_df.csv\"\n df.to_csv(file)\n\n return file",
"def get_tracks(search_string=None):\n if search_string is None:\n print('Please use a search string with get_tracks function')\n exit(0)\n item_type = \"tracks\"\n info_dict = spotify.search(q=search_string, limit=10, type='track')\n items = info_dict[item_type][\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"album\"][\"name\"]\n album_type = items[i][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"artists\"]))\n ])\n track_name = items[i][\"name\"]\n track_id = items[i][\"id\"]\n track_popularity = items[i][\"popularity\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": track_popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks",
"def search_multiple_tracks(search_query, sp):\r\n \r\n # List to store the track ids\r\n track_ids = []\r\n # List to store the track names and artists\r\n tracks = []\r\n\r\n #Search for 10 results in the Spotify API given a search querry\r\n results = sp.search(q = search_query ,limit=10)\r\n results = results['tracks']['items']\r\n\r\n # Extract the track id's, names and artists for all the search results\r\n for i in range(len(results)):\r\n\r\n # Get track id, artist and name\r\n track_id = results[i]['id']\r\n artist = results[i]['artists'][0]['name']\r\n track_name = results[i]['name']\r\n\r\n # Get a string with the artist and track name\r\n track = artist + ' - ' + track_name\r\n\r\n # Append the track id's and track name/artist to the list\r\n track_ids.append(track_id)\r\n tracks.append(track)\r\n\r\n # Make a dictionary of the track id and track name/artist list.\r\n return dict(zip(tracks,track_ids))",
"def get_songs(self):\n search_object = {\"size\":25000,\n 'query': {'term': {FIELD_FINGERPRINTED: True}}, \"fields\": [FIELD_SONGNAME, FIELD_FILE_SHA1,\n FIELD_TOTAL_HASHES]}\n response = self.cursor.search(index = SONGS_INDEXNAME, body=search_object)\n #print(\"get_songs response: \",response)\n arr = []\n for hit in response[\"hits\"][\"hits\"]:\n dct = {\"song_name\":hit['_source'][FIELD_SONGNAME],\"total_hashes\":hit['_source'][FIELD_TOTAL_HASHES],\n \"file_sha1\":hit['_source'][FIELD_FILE_SHA1]}\n arr.append(dct)\n return arr",
"def getSongsSpotify(song_name,access_token):\n song_name = song_name.strip()\n query = \"https://api.spotify.com/v1/search?q={}&type=track&limit=20&offset=0\".format(song_name)\n response = requests.get(\n query,\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(access_token)\n }\n )\n response_json = response.json()\n # \n \n songs_no = response_json[\"tracks\"][\"total\"]\n if songs_no == 0 :\n return {\"songs_no\" : songs_no}\n songs = response_json[\"tracks\"][\"items\"]\n if(len(songs)<5):\n uri = [songs[0][\"uri\"]]\n names = [songs[0][\"name\"]]\n artists = [songs[0][\"artists\"][0][\"name\"]]\n imageUrl = [songs[0][\"album\"][\"images\"][-1][\"url\"]]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n else:\n uri = [ songs[i][\"uri\"] for i in range(0,5)]\n names = [songs[i][\"name\"] for i in range(0,5)]\n artists = [songs[i][\"artists\"][0][\"name\"] for i in range(0,5)]\n imageUrl = [songs[i][\"album\"][\"images\"][-1][\"url\"] for i in range(0,5)]\n response_obj = {\n \"songs_no\" : songs_no,\n \"uri\" : uri,\n \"names\" : names,\n \"artists\" : artists,\n \"images\" : imageUrl\n }\n return response_obj",
"def find_song_recommendations(access_token, tracks, target, n, params):\n track_string = '%2C'.join(tracks[:5])\n response = spotify.get_recommendations(access_token, 50, track_string, params)\n\n song_recommendation = response['tracks']\n recommendations = {song['id']: {'name': song['name']} for song in song_recommendation}\n\n moods = get_features_moods(recommendations)\n\n return order_songs(moods, target, n)",
"def get_albums_from_artists(sp, artist_uri_list):\n\n # Create header for output df\n albums_list = [[\"name\", \"album_uri\", \"album_release_date\", \"artist_uri\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n # Loop through list of artist uris\n for artist_uri in artist_uri_list:\n # Get album from artist\n albums = sp.artist_albums(artist_uri)\n \n # Append each album to list\n for album in albums[\"items\"]:\n album_name = album[\"name\"]\n album_uri = album[\"uri\"]\n album_release_date = album[\"release_date\"]\n albums_list.append([album_name, album_uri, album_release_date, artist_uri])\n\n # Create df from list of albums for all artist\n albums_df = pd.DataFrame(data=albums_list[1:], columns=albums_list[0])\n\n print(\"Log: Finished pulling all albums from artist.\")\n return albums_df",
"def spotify_track_search(query: str, access_token: str) -> dict:\n response = requests.get(\n \"https://api.spotify.com/v1/search?q={}&type=track\".format(query),\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n if (\n response.status_code == 200\n and \"tracks\" in response.text\n and \"items\" in response.text\n ):\n return json.loads(response.text)[\"tracks\"][\"items\"]\n return {\"error\": response.reason, \"status\": response.status_code}",
"def main():\n\n # Spotify settings\n sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(\n client_id='118aa19f2b66476fbc062f0ac146d8b5',\n client_secret='7ca95a3159ab4391bee70f70d47a9271'\n ))\n # Postgres settings\n conn = psycopg2.connect(\n host = \"postgres\",\n port = \"5432\",\n dbname = \"postgres\",\n user = \"postgres\",\n password = \"postgres1234\",\n )\n # Redis settings\n REDIS_HOST = \"jupyter_redis\"\n REDIS_PORT = 6379\n REDIS = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)\n\n # Spotify data\n search_results = []\n playlists = sp.search(q='covid', type='playlist', limit=50,offset=0)['playlists']\n\n print('- Load Playlists from search query')\n # Load Playlists from search query\n while playlists:\n try:\n for i, playlist in enumerate(playlists['items']):\n search_results.append(playlist['id'])\n print(\"%4d %s %s\" % (i + 1 + playlists['offset'], playlist['id'], playlist['name']))\n if playlists['next']:\n playlists = sp.next(playlists)['playlists'] # Get next playlist given a page result\n else:\n playlists = None\n except Exception as e:\n playlists = None\n print('Done')\n\n \n\n\n print('- Load tracks into postgres')\n ## Load information into Postgres\n counter = 0\n final_informations = []\n columns = 'id_playlist,name_playlist,id_track,name_track,timestamp,danceability,energy,key,loudness,mode,speechiness,acousticness,instrumentalness,liveness,valence,tempo,duration_ms,time_signature'\n\n for playlist_id in search_results:\n # If is not in redis\n if(REDIS.get(playlist_id) == None):\n try:\n playlist_complete = sp.playlist(playlist_id)\n tracks = playlist_complete['tracks']\n while tracks:\n for track in tracks['items']:\n audio_features = sp.audio_features(track['track']['id'])[0]\n # Open cursor\n cur = conn.cursor()\n # Insert\n cur.execute(f'insert into spotify_details ({columns}) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',\n (\n playlist_complete['id'],\n playlist_complete['name'],\n track['track']['id'],\n track['track']['name'],\n track['added_at'],\n audio_features['danceability'],\n audio_features['energy'],\n audio_features['key'],\n audio_features['loudness'],\n audio_features['mode'],\n audio_features['speechiness'],\n audio_features['acousticness'],\n audio_features['instrumentalness'],\n audio_features['liveness'],\n audio_features['valence'],\n audio_features['tempo'],\n audio_features['duration_ms'],\n audio_features['time_signature']\n ))\n # Commit the transition\n conn.commit()\n # Close cursor\n cur.close()\n if tracks['next']:\n tracks = sp.next(tracks) # Get next playlist given a page result\n else:\n tracks = None\n print(f'Done playlist: {counter} of {len(search_results)}')\n counter += 1\n except Exception as e:\n print(e)\n counter += 1\n REDIS.set(playlist_id, 1)\n print('- Added playlist: {}'.format(playlist_id))\n # Close connection\n conn.close()\n\n print('Done! All data are update in principal db and redis!')",
"def scrapeSpotify():\n # Set Spotify authentication token \n token = util.prompt_for_user_token(username, scope, clientid, clientsecret, redirecturi)\n \n if token: # Authenticate with Spotify\n # Store dictionary of scraped values from scraping function\n if debugging == True:\n cities = DataCollection.test() # DEBUGGING ONLY\n #cities = DataCollection.scrape_spotify_info(limiting, limit_cities) \n #return jsonify(cities)\n else:\n cities = DataCollection.scrape_spotify_info(limiting, limit_cities) # THE REAL THING\n\n # Loop through all cities in dataset\n i = 0\n for city in cities:\n # Exit out of for loop at 2 if we are limiting city loop iterations\n if limiting == True and i == limit_cities:\n break \n #\n # Begin Spotify analysis (e.g., determine popularity for each artist in city list, top track)\n #\n sp = spotipy.Spotify(auth=token)\n\n # Loop through the top artists for this city, and determine the popularity values\n i = 0\n top_artists = []\n artist_names = []\n for top_artist in city[\"top_artists\"]:\n # Exit out of for loop at appropriate threshold, if we are limiting artist iterations\n if limiting == True and i == limit_artists:\n break\n\n i += 1\n # *** Example artist value in dictionary ***\n # { 'artist': 'Bobby Pulido',\n # 'tracks': ['spotify:track:1tg7ZzCAkjDNENdWL7WuIr',\n # 'spotify:track:2JJSGhPpATm8lXeYjD95fw',\n # 'spotify:track:5iuGn3RXvfvHIyIe8fyxBE'\n # ],\n # 'popularity': 99 <--------- *** BEING ADDED ***\n # }\n # Get info about the first artist track\n urn = top_artist[\"tracks\"][0]\n track = sp.track(urn)\n\n # Get the artist's Spotify URI & name\n artist_uri = track['artists'][0]['uri']\n artist_name = track['artists'][0]['name']\n\n # Set the artist name to the first artist attributed to the song\n top_artist[\"artist\"] = artist_name\n\n # Get the artist popularity, and add it to their 'top_artist' item\n artist_info = sp.artist(artist_uri)\n artist_popularity = artist_info[\"popularity\"]\n top_artist[\"popularity\"] = artist_popularity\n\n # Get the artist genres, and add it to their 'top_artist' item\n artist_genres = artist_info[\"genres\"]\n top_artist[\"genres\"] = artist_genres\n\n # If not already added, append updated top_artist object to master collection\n if artist_name not in artist_names:\n top_artists.append(top_artist)\n \n # Track current artists in flat list to avoid duplicates\n artist_names.append(artist_name) \n\n # Sort 'top_artists' by popularity in descending order, update the field in the city object\n top_artists.sort(key=lambda x: x[\"popularity\"], reverse=True)\n city[\"top_artists\"] = top_artists\n\n # Artist & song popularity logic:\n # Build 'top_5_artists' list: grab top 5 (by popularity) from 'top_artists' \n top_10_artists = []\n i_art = 0\n for art in top_artists:\n if i_art < 10:\n top_10_artists.append(art[\"artist\"])\n \n i_art += 1\n \n # Update 'top_5_artists' field in the city object\n city[\"top_5_artists\"] = top_10_artists[:5]\n\n # Loop through all tracks for this city, and create a new list of objects with the track popularity\n # BEFORE: [trk1, trk2, trk3, ...]\n # AFTER: [\n # {'track': trk1, 'popularity': pop1, 'name': 'El Baile de Gorila', 'artist': 'Mossino'}\n # {'track': trk2, 'popularity': pop2}\n # ...\n # ] \n i = 0\n tracks = []\n highest_popularity = 0\n most_popular_track = \"\"\n for trk in city[\"track_ids\"]:\n # Exit out of for loop at appropriate threshold, if we are limiting track iterations\n if limiting == True and i == limit_tracks:\n break\n\n i += 1\n # Get Spotify track metadata \n track = sp.track(trk)\n \n # Get the track name, artist, and popularity -- and add it to the object\n current_track_name = track['name']\n current_track_artist = track['artists'][0]['name']\n current_track_popularity = track['popularity']\n track_info = { \n \"track\": trk, \n \"popularity\": current_track_popularity,\n \"artist\": current_track_artist,\n \"name\": current_track_name\n }\n \n # Append updated object to track_ids array\n tracks.append(track_info)\n\n # For the top 10 artists, determine the song with the highest popularity\n if current_track_artist in top_10_artists:\n # Determine most popular track\n if highest_popularity < current_track_popularity:\n most_popular_track = trk\n highest_popularity = current_track_popularity\n most_popular_artist = current_track_artist\n most_popular_track_name = current_track_name \n \n #print(\"most popular track: \" + most_popular_track)\n #print(\"highest popularity: \" + str(highest_popularity))\n #print(\"current track: \" + trk )\n \n # Update current city value with updated 'tracks' array info\n city[\"track_ids\"] = tracks\n\n # Update current city's 'top_track' field with the most popular track info\n mostpopular_track_info = { \n \"track\": most_popular_track, \n \"popularity\": highest_popularity,\n \"artist\": most_popular_artist,\n \"name\": most_popular_track_name\n }\n city[\"top_track\"] = mostpopular_track_info\n\n if debugging == True:\n # **** Print out resulting object (TESTING ONLY) ****\n pprint.pprint(city)\n else:\n # **** Insert the current city record into the MongoDB collection ****\n db = connectToMongo()\n db.Cities.update( { \"city\": city[\"city\"] }, \n city,\n upsert=True\n )\n \n # Iterate counter\n i += 1\n else: \n print(\"Connection to Spotify API failed - token invalid.\")\n\n return getJSON(wrapGeoJSON(cities))",
"def get_playlist_tracks(user, playlist_id, limit=100):\n info_dict = spotify.user_playlist_tracks(user, playlist_id, limit=limit)\n items = info_dict[\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"track\"][\"album\"][\"name\"]\n album_type = items[i][\"track\"][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"track\"][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"track\"][\"artists\"]))\n ])\n track_name = items[i][\"track\"][\"name\"]\n popularity = items[i][\"track\"][\"popularity\"]\n track_id = items[i][\"track\"][\"id\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks",
"def get_songs_by_artist(artist, linesep=' \\n ', timeout=None):\n df = pd.DataFrame(columns=['Artist', 'Title'])\n url = \"https://lyrics.fandom.com/wiki/Category:Songs_by_\"+urlize(artist)\n df = parse_page_now(url,df)\n return df",
"def get_more_data(song):\n headers = {\n \"User-Agent\": \"ytmdl\"\n }\n payload = {\n \"api_key\": API_KEY,\n \"method\": \"track.getInfo\",\n \"track\": song.track_name,\n \"artist\": song.artist_name,\n \"format\": \"json\"\n }\n\n response = get(API_BASE, headers=headers, params=payload)\n\n # TODO: Add a check to exit if the response code is not 200\n\n track_details = response.json()\n\n # Update the songs attributes\n song.track_number = 1\n\n try:\n song.collection_name = track_details[\"track\"][\"album\"][\"title\"]\n song.track_time = song._convert_time(\n track_details[\"track\"][\"duration\"])\n song.release_date = track_details[\"track\"][\"wiki\"][\"published\"]\n except KeyError:\n # This happens because last.fm do not have consistent data for some songs\n # Just ignore this errors if they occur.\n pass\n\n return song",
"def fetch_tracks_info_df(df):\n\n gen_df = df.copy()\n gen_df = gen_df[['artist_name', 'title', 'release', 'track_id', 'song_id']]\n\n for column_name in gen_df.columns:\n gen_df[column_name] = gen_df[column_name].map(lambda x: ast.literal_eval(x).decode('utf-8'))\n\n gen_df.rename(columns={'release': 'album_name'}, inplace=True)\n gen_df['year'] = df['year']\n\n return gen_df",
"def top_artists_from_API(api_results):\r\n df = pd.DataFrame(api_results[\"items\"])\r\n cols = [\"name\",\"id\",\"genres\",\"popularity\",\"uri\"]\r\n return df[cols]",
"def get_all_tracks():\n query_format = f\"track:\"\n\n search_string_letter_ids = [0]\n\n tracks = {}\n\n total = 0\n\n while search_string_letter_ids is not None:\n search_string = construct_search_string(search_string_letter_ids)\n count = track_count(query_format + search_string)\n print(f\"{search_string} : {count}\")\n if count < 2000:\n for i in range(0, count, 50):\n track_results = sp.search(query_format + search_string, type='track', limit=50, offset=i)\n for t in track_results['tracks']['items']:\n if t['id'] not in tracks:\n total += 1\n tracks[t['id']] = {'name': t['name']}\n\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=True)\n else:\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=False)\n\n print(f\"Tracks Saved In File: {total}\")\n\n file = save_to_json(tracks, f\"tracks.json\")\n return file",
"def search_song(self, name):\n self.logger.debug('Searched for Song: {}'.format(name))\n results = self.sp.search(q='track:' + name, type='track')\n songs = [song for song in results['tracks']['items']]\n i = 1\n songs_ls = []\n table_ls = []\n for song in songs:\n table_ls.append([i,\n song['name'][0:20].strip(),\n song['album']['name'][0:20].strip(),\n \"%0.2f\" % (song['duration_ms'] / 60000),\n song['popularity']])\n songs_ls.append(song['uri'])\n i = i + 1\n return songs_ls, table_ls",
"def spotify_tracklist():\n sp = credentials()\n chart = chartdata()\n trackid_list = []\n #find a way to get track IDS\n for track in chart:\n searchQuery = track[0]\n searchResults = sp.search(q=searchQuery, limit=1, type='track', market=\"US\")\n trackid_list.append(searchResults['tracks']['items'][0]['uri'])\n return trackid_list",
"def fetch_song_data(self, song_ids):\n\t\ttracks_base_url = \"https://api.spotify.com/v1/tracks\"\n\t\theaders = {}\n\t\ttrack_ids = ','.join(song_ids)\n\t\tquery_params = \"/?ids=\"+track_ids\n\t\ttracks_url = tracks_base_url + query_params\n\t\ttracks={}\n\t\theaders['Authorization'] = f\"Bearer {self.token}\"\n\n\t\ttry:\n\t\t\treq = request.Request(url=tracks_url,data=None, headers=headers)\n\t\t\tresponse = request.urlopen(req).read().decode()\n\t\t\ttracks = json.loads(response)\n\t\t\tlogging.info(\"Successfully fetched songs from Spotify!\")\n\t\texcept error.URLError as e:\n\t\t\tresponse = e.read().decode(\"utf8\", 'ignore')\n\t\t\tlogging.error(response)\n\t\treturn tracks",
"def create_df_playlist(api_results,sp = None, append_audio = True):\r\n df = create_df_saved_songs(api_results[\"tracks\"])\r\n if append_audio == True:\r\n assert sp != None, \"sp needs to be specified for appending audio features\"\r\n df = append_audio_features(df,sp)\r\n return df",
"def populate_billboard_scrapables(self):\n results = MongoClient().billboard.spotify.find()\n self.df = pd.DataFrame(\n data=map(\n lambda r: (\n r[\"metadata\"][\"id\"],\n r[\"metadata\"][\"artists\"][0][\"name\"],\n r[\"metadata\"][\"name\"],\n ),\n results,\n ),\n columns=[\"track_id\", \"artist_name\", \"title\"],\n )\n print(f\"Tracks identified to scrape lyrics: {self.df.shape[0]}\")",
"def get_recommendations(df,song_title, similarity_score, num_recommends = 5):\r\n indices = pd.Series(df.index, index = df['track_name']).drop_duplicates()\r\n idx = indices[song_title]\r\n sim_scores = list(enumerate(similarity_score[idx]))\r\n sim_scores = sorted(sim_scores, key = lambda x: x[1],reverse = True)\r\n top_scores = sim_scores[1:num_recommends+1]\r\n song_indices = [i[0] for i in top_scores]\r\n return df[\"track_name\"].iloc[song_indices]",
"def get_artist_songs(self,genre = \"[Not Provided]\"):\n # Search for the artist and get their id\n search_artist = self.search(self.artist_name)\n\n #Prevents the stoppage in case of an Artist having zero lyrics on Genius\n if len(search_artist['response']['hits']) == 0:\n return False\n \n artist_id = str(search_artist['response']['hits'][0]['result']['primary_artist']['id'])\n print(\"ID: \" + artist_id)\n # Initialize DataFrame\n df = pd.DataFrame(columns=['title', 'url'])\n # Iterate through all the pages of the artist's songs\n more_pages = True\n page = 1\n i = 0\n while more_pages:\n # Make a request to get the songs of an artist on a given page\n request_url = self.base_url + 'artists/' + artist_id + '/songs' + '?per_page=50&page=' + str(page)\n response = requests.get(request_url, headers=self.headers).json()\n\n # For each song which the given artist is the primary_artist of the song, add the song title and\n # Genius URL to the DataFrame\n for song in response['response']['songs']:\n if str(song['primary_artist']['id']) == artist_id:\n title = song['title']\n url = song['url']\n df.loc[i] = [title, url]\n i += 1\n page += 1\n\n if response['response']['next_page'] is None:\n more_pages = False\n\n \n # Get the HTML and Song Lyrics from helper methods in the class\n df['artist'] = self.artist_name\n df['html'] = df['url'].apply(self.get_song_html)\n df['lyrics'] = df.apply(lambda row: self.get_lyrics(row.html), axis=1)\n #Uncomment to use the genre method otherwise\n #df['genre'] = df.apply(lambda row: self.get_genre(row.html), axis=1)\n df['genre'] = genre\n \n del df['url']\n del df['html']\n\n self.artist_songs = df\n\n return self.artist_songs",
"def get_track_info(track_id):\n items = spotify.track(track_id)\n name = items[\"name\"]\n artists_names = \", \".join([\n items[\"artists\"][x][\"name\"]\n for x in range(len(items[\"artists\"]))\n ])\n album_artists = \", \".join([\n items[\"album\"][\"artists\"][x][\"name\"]\n for x in range(len(items[\"album\"][\"artists\"]))\n ])\n album_type = items[\"album\"][\"album_type\"]\n album_name = items[\"album\"][\"name\"]\n album_release = items[\"album\"][\"release_date\"]\n album_track_number = items[\"track_number\"]\n track_duration = items[\"duration_ms\"]\n images_link = items[\"album\"][\"images\"]\n max_image_res = 0\n max_icon_size = 0\n image_link = \"\"\n icon_link = \"\"\n for image in images_link:\n if image[\"height\"] * image[\"width\"] > max_image_res:\n image_link = image[\"url\"]\n max_image_res = image[\"height\"] * image[\"width\"]\n if image[\"height\"] < 400:\n if image[\"height\"] > max_icon_size:\n max_icon_size = image[\"height\"]\n icon_link = image[\"url\"]\n track = {\"name\": name,\n \"Artist(s)\": artists_names,\n \"Album Artist(s)\": album_artists,\n \"Album Type\": album_type,\n \"Album Name\": album_name,\n \"Album Release\": album_release,\n \"Track Number\": album_track_number,\n \"Track Duration (ms)\": track_duration,\n \"Image Link\": image_link,\n \"Icon Link\": icon_link\n }\n\n for artist in artists_names.split(', '):\n \"\"\"\n Checks for lyrics with song name and artist names\n combination until one is found.\n \"\"\"\n try:\n lyrics = lyricwikia.get_lyrics(artist, name)\n track['lyrics'] = lyrics\n break\n except lyricwikia.LyricsNotFound:\n pass\n\n return track",
"def searchSong(query, lim=40):\n headers = {\n \"User-Agent\": \"ytmdl\"\n }\n payload = {\n \"api_key\": API_KEY,\n \"method\": \"track.search\",\n \"track\": query,\n \"format\": \"json\"\n }\n\n data = []\n\n response = get(API_BASE, headers=headers, params=payload)\n\n if response.status_code != 200:\n print(response.status_code)\n return data\n\n for song in response.json()[\"results\"][\"trackmatches\"][\"track\"]:\n data.append(LastFMSongs(song))\n\n return data",
"def import_spotify(info: dict) -> (str, int):\n url = info[\"playlist_url\"]\n # Validate URL\n matches = (\n re.match(r\"^https?://open\\.spotify\\.com/playlist/([a-zA-Z\\d]*)/?\", url)\n if isinstance(url, str)\n else None\n )\n if not matches:\n return \"Invalid URL\", 400\n playlist_id = matches.group(1)\n query_url = \"https://api.spotify.com/v1/playlists/\" + playlist_id\n query_headers = {\"Authorization\": \"Bearer {}\".format(info[\"access_token\"])}\n # Get/create playlist\n playlist_json = requests.get(query_url, headers=query_headers).json()\n if \"error\" in playlist_json:\n status = playlist_json[\"error\"].get(\"status\")\n message = playlist_json[\"error\"].get(\"message\")\n return (\n message if message else \"Error retrieving playlist\",\n status if status else 500,\n )\n playlist = Playlist(\n name=playlist_json[\"name\"],\n last_sync_spotify=timezone.now(),\n spotify_id=playlist_id,\n )\n if \"user\" in info:\n playlist.owner = PlaylstrUser.objects.filter(id=info[\"user\"]).first()\n if \"owner\" in playlist_json:\n playlist.spotify_creator_id = playlist_json[\"owner\"][\"id\"]\n playlist.spotify_creator_name = playlist_json[\"owner\"][\"display_name\"]\n playlist.save()\n # Get playlist tracks\n tracks_response = requests.get(query_url + \"/tracks\", headers=query_headers)\n if tracks_response.status_code != 200:\n return tracks_response.reason, 500\n tracks_json = tracks_response.json()\n if \"error_description\" in tracks_json:\n return tracks_json[\"error_description\"], 500\n # Get list of tracks\n index = -1\n while \"next\" in tracks_json and tracks_json[\"next\"] is not None:\n for j in tracks_json[\"items\"]:\n index += 1\n track = track_from_spotify_json(j[\"track\"])\n try:\n PlaylistTrack.objects.create(\n playlist=playlist, track=track, index=index\n )\n except IntegrityError as e:\n print(\"Error adding track {}: {}\".format(str(track), str(e)))\n continue\n tracks_json = requests.get(tracks_json[\"next\"], headers=query_headers).json()\n return str(playlist.playlist_id), 200"
]
| [
"0.6814398",
"0.6796387",
"0.6767215",
"0.67084277",
"0.6635598",
"0.6573595",
"0.6545475",
"0.64103746",
"0.6346779",
"0.63356996",
"0.627508",
"0.62113595",
"0.61474127",
"0.61378634",
"0.6125327",
"0.61115545",
"0.6046814",
"0.60272753",
"0.6005331",
"0.5990789",
"0.59723413",
"0.59386617",
"0.5936459",
"0.58974046",
"0.589709",
"0.5889228",
"0.587701",
"0.58035564",
"0.5801614",
"0.57728285"
]
| 0.68723744 | 0 |
Reads in the spotipy query results for a playlist and returns a DataFrame with track_name,track_id,artist,album,duration,popularity and audio_features unless specified otherwise. | def create_df_playlist(api_results,sp = None, append_audio = True):
df = create_df_saved_songs(api_results["tracks"])
if append_audio == True:
assert sp != None, "sp needs to be specified for appending audio features"
df = append_audio_features(df,sp)
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_playlist_tracks(playlist):\n track_ids = [id for id in load_from_json(f\"playlist_{playlist}.json\") if id is not None]\n tracks = []\n\n for i in range(0, len(track_ids), 50):\n tracks_info = sp.tracks(track_ids[i: i+50])['tracks']\n for track in tracks_info:\n if track:\n tracks.append({\n 'id': track['id'],\n 'name': track['name'],\n 'popularity': track['popularity']\n })\n df = pd.DataFrame(tracks)\n\n file = f\"playlist_{playlist}_df.csv\"\n df.to_csv(file)\n\n return file",
"def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def create_df_saved_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results[\"items\"]:\r\n try:\r\n track_name.append(items[\"track\"]['name'])\r\n track_id.append(items[\"track\"]['id'])\r\n artist.append(items[\"track\"][\"artists\"][0][\"name\"])\r\n duration.append(items[\"track\"][\"duration_ms\"])\r\n album.append(items[\"track\"][\"album\"][\"name\"])\r\n popularity.append(items[\"track\"][\"popularity\"])\r\n except TypeError: \r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n return df",
"def get_all_tracks_from_artists(sp, artist_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\", \"artist_name\"]]\n track_df = pd.DataFrame(columns=track_list[0])\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for artist_uri in artist_uri_list:\n # Get artist name and albums\n artist_name = sp.artist(artist_uri)[\"name\"]\n albums = get_albums_from_artists(sp, [artist_uri])\n\n # Get tracks from artist albums\n tracks_artist_df = get_tracks_from_albums(sp, albums[\"album_uri\"].to_list())\n tracks_artist_df[\"artist_name\"] = artist_name\n\n # Append new songs to dataframe\n track_df = track_df.append(tracks_artist_df)\n \n print(\"Log: Finished pulling all tracks from artist.\")\n return track_df",
"def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def get_playlist_feats(playlist_id):\r\n sourcePlaylistID = playlist_id\r\n sourcePlaylist = sp.user_playlist(username, sourcePlaylistID);\r\n tracks = sourcePlaylist[\"tracks\"];\r\n songs = tracks[\"items\"];\r\n\r\n track_ids = []\r\n track_names = []\r\n track_artists = []\r\n\r\n\r\n for i in range(0, len(songs)):\r\n if songs[i]['track']['id'] != None: # Removes the local tracks in your playlist if there is any\r\n track_ids.append(songs[i]['track']['id'])\r\n track_names.append(songs[i]['track']['name'])\r\n track_artists.append(songs[i]['track']['artists'])\r\n\r\n\r\n features = []\r\n for i in range(0,len(track_ids)):\r\n audio_features = sp.audio_features(track_ids[i])[0]\r\n track_popularity = {'popularity': sp.track(track_ids[i])['popularity']}\r\n genre = {'genres': sp.artist(track_artists[i][0]['uri'])['genres']}\r\n audio_features = dict(audio_features, **track_popularity, **genre)\r\n features.append(audio_features)\r\n\r\n\r\n playlist_df = pd.DataFrame(features, index = track_names)\r\n return playlist_df",
"def get_tracks_from_albums(sp, album_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for album_uri in album_uri_list:\n album_tracks = sp.album_tracks(album_uri, limit=50, offset=0)[\"items\"]\n count_tracks_in_album = len(album_tracks)\n album_release_date = sp.album(album_uri)[\"release_date\"]\n\n # This part is probably very slow and should be improved by accessing the API less often\n for track_number in range(count_tracks_in_album):\n track_name = album_tracks[track_number][\"name\"]\n track_uri = album_tracks[track_number][\"uri\"]\n \n track_list.append([track_name, track_uri, album_release_date])\n\n # Create df from list of tracks for all albums\n track_df = pd.DataFrame(data=track_list[1:], columns=track_list[0])\n \n print(\"Log: Finished pulling all tracks from albums.\")\n return track_df",
"def get_metadata(self):\n items = self.get_playlist_items()\n uris = [item[\"track\"][\"uri\"] for item in items]\n features = self.API.audio_features(uris)\n\n list_data = []\n\n for idx, item in enumerate(items):\n data = {\"name\": item[\"track\"][\"name\"], \"uri\": item[\"track\"][\"uri\"]}\n data.update(features[idx])\n list_data.append(data)\n\n self.metadata = pd.DataFrame(data=list_data,\n index=range(len(list_data)))\n\n return self.metadata",
"def readPlaylistData(self):\n return gatherPlaylistData(10)",
"def get_tracklist_features(tracks):\n\n # first we construct a list of all track ids and tracknames\n track_ids = []\n track_names = []\n for collection_type in tracks:\n tid = collection_type['id']\n if tid:\n track_ids.append(collection_type['id'])\n track_name = f'{collection_type[\"artists\"][0][\"name\"]} - {collection_type[\"name\"]}'\n track_names.append(track_name)\n # we can only load data in batches\n batch_size = 50\n offset = 0\n\n features = []\n\n while offset + batch_size <= len(track_ids):\n # get one batch of tracks per iteration\n new_features = SP.audio_features(track_ids[offset:offset+batch_size])\n\n # we want to add the trackname to the dataframe\n for i, feature in enumerate(new_features):\n feature['name'] = track_names[offset+i]\n features += new_features\n\n offset += batch_size\n\n # get the remaining tracks that couldnt fill a batch\n features += SP.audio_features(track_ids[offset:])\n return pd.DataFrame(features)",
"def get_albums_from_artists(sp, artist_uri_list):\n\n # Create header for output df\n albums_list = [[\"name\", \"album_uri\", \"album_release_date\", \"artist_uri\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n # Loop through list of artist uris\n for artist_uri in artist_uri_list:\n # Get album from artist\n albums = sp.artist_albums(artist_uri)\n \n # Append each album to list\n for album in albums[\"items\"]:\n album_name = album[\"name\"]\n album_uri = album[\"uri\"]\n album_release_date = album[\"release_date\"]\n albums_list.append([album_name, album_uri, album_release_date, artist_uri])\n\n # Create df from list of albums for all artist\n albums_df = pd.DataFrame(data=albums_list[1:], columns=albums_list[0])\n\n print(\"Log: Finished pulling all albums from artist.\")\n return albums_df",
"def get_playlist_tracks(user, playlist_id, limit=100):\n info_dict = spotify.user_playlist_tracks(user, playlist_id, limit=limit)\n items = info_dict[\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"track\"][\"album\"][\"name\"]\n album_type = items[i][\"track\"][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"track\"][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"track\"][\"artists\"]))\n ])\n track_name = items[i][\"track\"][\"name\"]\n popularity = items[i][\"track\"][\"popularity\"]\n track_id = items[i][\"track\"][\"id\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks",
"def main():\n\n # Spotify settings\n sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(\n client_id='118aa19f2b66476fbc062f0ac146d8b5',\n client_secret='7ca95a3159ab4391bee70f70d47a9271'\n ))\n # Postgres settings\n conn = psycopg2.connect(\n host = \"postgres\",\n port = \"5432\",\n dbname = \"postgres\",\n user = \"postgres\",\n password = \"postgres1234\",\n )\n # Redis settings\n REDIS_HOST = \"jupyter_redis\"\n REDIS_PORT = 6379\n REDIS = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)\n\n # Spotify data\n search_results = []\n playlists = sp.search(q='covid', type='playlist', limit=50,offset=0)['playlists']\n\n print('- Load Playlists from search query')\n # Load Playlists from search query\n while playlists:\n try:\n for i, playlist in enumerate(playlists['items']):\n search_results.append(playlist['id'])\n print(\"%4d %s %s\" % (i + 1 + playlists['offset'], playlist['id'], playlist['name']))\n if playlists['next']:\n playlists = sp.next(playlists)['playlists'] # Get next playlist given a page result\n else:\n playlists = None\n except Exception as e:\n playlists = None\n print('Done')\n\n \n\n\n print('- Load tracks into postgres')\n ## Load information into Postgres\n counter = 0\n final_informations = []\n columns = 'id_playlist,name_playlist,id_track,name_track,timestamp,danceability,energy,key,loudness,mode,speechiness,acousticness,instrumentalness,liveness,valence,tempo,duration_ms,time_signature'\n\n for playlist_id in search_results:\n # If is not in redis\n if(REDIS.get(playlist_id) == None):\n try:\n playlist_complete = sp.playlist(playlist_id)\n tracks = playlist_complete['tracks']\n while tracks:\n for track in tracks['items']:\n audio_features = sp.audio_features(track['track']['id'])[0]\n # Open cursor\n cur = conn.cursor()\n # Insert\n cur.execute(f'insert into spotify_details ({columns}) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',\n (\n playlist_complete['id'],\n playlist_complete['name'],\n track['track']['id'],\n track['track']['name'],\n track['added_at'],\n audio_features['danceability'],\n audio_features['energy'],\n audio_features['key'],\n audio_features['loudness'],\n audio_features['mode'],\n audio_features['speechiness'],\n audio_features['acousticness'],\n audio_features['instrumentalness'],\n audio_features['liveness'],\n audio_features['valence'],\n audio_features['tempo'],\n audio_features['duration_ms'],\n audio_features['time_signature']\n ))\n # Commit the transition\n conn.commit()\n # Close cursor\n cur.close()\n if tracks['next']:\n tracks = sp.next(tracks) # Get next playlist given a page result\n else:\n tracks = None\n print(f'Done playlist: {counter} of {len(search_results)}')\n counter += 1\n except Exception as e:\n print(e)\n counter += 1\n REDIS.set(playlist_id, 1)\n print('- Added playlist: {}'.format(playlist_id))\n # Close connection\n conn.close()\n\n print('Done! All data are update in principal db and redis!')",
"def fetch_tracks_info_df(df):\n\n gen_df = df.copy()\n gen_df = gen_df[['artist_name', 'title', 'release', 'track_id', 'song_id']]\n\n for column_name in gen_df.columns:\n gen_df[column_name] = gen_df[column_name].map(lambda x: ast.literal_eval(x).decode('utf-8'))\n\n gen_df.rename(columns={'release': 'album_name'}, inplace=True)\n gen_df['year'] = df['year']\n\n return gen_df",
"def gettrackinfo(accesstoken, playlist):\n\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n\n offset = 0\n\n needattributes = [track.trackid for track in playlist.tracks]\n\n while offset < len(needattributes):\n params = {'ids': ','.join(needattributes[offset:100+offset])}\n r = requests.get(\"https://api.spotify.com/v1/audio-features/\",\n headers=headers,\n params=params)\n\n response = r.json()\n\n if \"audio_features\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait correct amount\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n needinfo = True\n while needinfo:\n r = requests.get(\"https://api.spotify.com/v1/audio-features/\",\n headers=headers,\n params=params)\n response = r.json()\n if \"audio_features\" in response:\n break\n elif response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n continue\n else:\n print('error: gettrackinfo failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: gettrackinfo failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: gettrackinfo failed')\n print('no error response')\n return(None)\n\n for i in range(len(response[\"audio_features\"])):\n try:\n playlist.tracks[i+offset].danceability = response[\"audio_features\"][i][\"danceability\"]\n playlist.tracks[i+offset].energy = response[\"audio_features\"][i][\"energy\"]\n playlist.tracks[i+offset].key = response[\"audio_features\"][i][\"key\"]\n playlist.tracks[i+offset].loudness = response[\"audio_features\"][i][\"loudness\"]\n playlist.tracks[i+offset].mode = response[\"audio_features\"][i][\"mode\"]\n playlist.tracks[i+offset].speechiness = response[\"audio_features\"][i][\"speechiness\"]\n playlist.tracks[i+offset].acousticness = response[\"audio_features\"][i][\"acousticness\"]\n playlist.tracks[i+offset].instrumentalness = response[\"audio_features\"][i][\"instrumentalness\"]\n playlist.tracks[i+offset].liveness = response[\"audio_features\"][i][\"liveness\"]\n playlist.tracks[i+offset].loudness = response[\"audio_features\"][i][\"loudness\"]\n playlist.tracks[i+offset].valence = response[\"audio_features\"][i][\"valence\"]\n playlist.tracks[i+offset].tempo = response[\"audio_features\"][i][\"tempo\"]\n playlist.tracks[i+offset].duration_ms = response[\"audio_features\"][i][\"duration_ms\"]\n playlist.tracks[i+offset].time_signature = response[\"audio_features\"][i][\"time_signature\"]\n except Exception as e:\n print('error: error getting attributes from returned JSON')\n print('this piece of json looks like:\\n{}'.format(response[\"audiofeatures\"][i]))\n\n offset = offset + len(response[\"audio_features\"])\n\n\n # t.printattributes()",
"def append_audio_features(df,spotify_auth, return_feat_df = False):\r\n audio_features = spotify_auth.audio_features(df[\"track_id\"][:])\r\n #catch and delete songs that have no audio features\r\n if None in audio_features:\r\n NA_idx=[i for i,v in enumerate(audio_features) if v == None]\r\n df.drop(NA_idx,inplace=True)\r\n for i in NA_idx:\r\n audio_features.pop(i)\r\n assert len(audio_features) == len(df[\"track_id\"][:])\r\n feature_cols = list(audio_features[0].keys())[:-7]\r\n features_list = []\r\n for features in audio_features:\r\n try:\r\n song_features = [features[col] for col in feature_cols]\r\n features_list.append(song_features)\r\n except TypeError:\r\n pass\r\n df_features = pd.DataFrame(features_list,columns = feature_cols)\r\n df = pd.concat([df,df_features],axis = 1)\r\n if return_feat_df == False:\r\n return df\r\n else:\r\n return df,df_features",
"def getTracks(playlist_id):\n\n tracks = crud.getTracks(session, playlist_id)\n\n return tracks",
"def get_songs_by_artist(artist, linesep=' \\n ', timeout=None):\n df = pd.DataFrame(columns=['Artist', 'Title'])\n url = \"https://lyrics.fandom.com/wiki/Category:Songs_by_\"+urlize(artist)\n df = parse_page_now(url,df)\n return df",
"def get_playlist_songs(self, playlist_id):\n values = {'action' : 'playlist_songs',\n 'filter' : playlist_id,\n }\n root = self.__call_api(values)\n songs = root.getElementsByTagName('song')\n if not songs:\n return None\n l= []\n try:\n for song in songs:\n song_id = int(song.getAttribute('id'))\n song_title = song.getElementsByTagName('title')[0].childNodes[0].data\n artist_id = int(song.getElementsByTagName('artist')[0].getAttribute('id'))\n artist_name = song.getElementsByTagName('artist')[0].childNodes[0].data\n album_id = int(song.getElementsByTagName('album')[0].getAttribute('id'))\n album_name = song.getElementsByTagName('album')[0].childNodes[0].data\n\n song_track = int(song.getElementsByTagName('track')[0].childNodes[0].data)\n song_time = int(song.getElementsByTagName('time')[0].childNodes[0].data)\n song_size = int(song.getElementsByTagName('size')[0].childNodes[0].data)\n\n try: # New Ampache puts nothing here...\n precise_rating = int(song.getElementsByTagName('preciserating')[0].childNodes[0].data)\n except:\n precise_rating = 0\n try:\n rating = float(song.getElementsByTagName('rating')[0].childNodes[0].data)\n except:\n rating = 0\n art = song.getElementsByTagName('art')[0].childNodes[0].data\n url = song.getElementsByTagName('url')[0].childNodes[0].data\n song_dict = {\n 'song_id' : song_id,\n 'song_title' : song_title,\n 'artist_id' : artist_id,\n 'artist_name' : artist_name,\n 'album_id' : album_id,\n 'album_name' : album_name,\n 'song_track' : song_track,\n 'song_time' : song_time,\n 'song_size' : song_size,\n 'precise_rating' : precise_rating,\n 'rating' : rating,\n 'art' : art,\n 'url' : url,\n }\n l.append(song_dict)\n except:\n print(\"This playlist failed\", playlist_id)\n traceback.print_exc()\n return None\n return l",
"def get_playlist_tracks(playlist_id):\n\n results = spotifyObject.playlist_tracks(playlist_id)\n tracks = results['items']\n while results['next']:\n results = spotifyObject.next(results)\n tracks.extend(results['items'])\n return tracks",
"def get_playlist_tracks_adapter(json_response):\n\n ret = {\"result\": []}\n for item in json_response['items']:\n ret[\"result\"].append(json_to_track_info(item[\"track\"]))\n return ret",
"def get_artist_songs(self,genre = \"[Not Provided]\"):\n # Search for the artist and get their id\n search_artist = self.search(self.artist_name)\n\n #Prevents the stoppage in case of an Artist having zero lyrics on Genius\n if len(search_artist['response']['hits']) == 0:\n return False\n \n artist_id = str(search_artist['response']['hits'][0]['result']['primary_artist']['id'])\n print(\"ID: \" + artist_id)\n # Initialize DataFrame\n df = pd.DataFrame(columns=['title', 'url'])\n # Iterate through all the pages of the artist's songs\n more_pages = True\n page = 1\n i = 0\n while more_pages:\n # Make a request to get the songs of an artist on a given page\n request_url = self.base_url + 'artists/' + artist_id + '/songs' + '?per_page=50&page=' + str(page)\n response = requests.get(request_url, headers=self.headers).json()\n\n # For each song which the given artist is the primary_artist of the song, add the song title and\n # Genius URL to the DataFrame\n for song in response['response']['songs']:\n if str(song['primary_artist']['id']) == artist_id:\n title = song['title']\n url = song['url']\n df.loc[i] = [title, url]\n i += 1\n page += 1\n\n if response['response']['next_page'] is None:\n more_pages = False\n\n \n # Get the HTML and Song Lyrics from helper methods in the class\n df['artist'] = self.artist_name\n df['html'] = df['url'].apply(self.get_song_html)\n df['lyrics'] = df.apply(lambda row: self.get_lyrics(row.html), axis=1)\n #Uncomment to use the genre method otherwise\n #df['genre'] = df.apply(lambda row: self.get_genre(row.html), axis=1)\n df['genre'] = genre\n \n del df['url']\n del df['html']\n\n self.artist_songs = df\n\n return self.artist_songs",
"def _get_audio_features(self, sp, trackids):\n\n cols = ['acousticness', 'danceability', 'duration_ms', 'energy',\n 'instrumentalness', 'key', 'liveness', 'loudness', 'mode',\n 'speechiness', 'tempo', 'time_signature', 'valence', 'id']\n\n total_track = len(trackids)\n features = []\n start = 0\n while len(features) < total_track:\n end = start + 100 if start + 100 < total_track else total_track\n\n features += sp.audio_features(tracks=trackids[start: end])\n start = start + 100\n\n return pd.DataFrame.from_records(features, columns=cols)",
"def compute_df(playlist, song_factors, playlist_factors=None, method='ensemble'):\n playlist = playlist.str.replace('spotify:track:', '')\n playlist_set = set(playlist)\n seed_ids = []\n while len(seed_ids) < 2:\n rand = list(playlist.sample(n=1))[0]\n if rand in tid_to_idx and rand not in seed_ids:\n seed_ids.append(rand)\n playlist_set.remove(seed_ids[0])\n playlist_set.remove(seed_ids[1])\n if method == 'song':\n wrmf_output = wrmf_helpers.get_top_similar_from_tracks(\n song_factors,\n seed_ids,\n n_similar_songs=10000,\n verbose=False\n )\n elif method == 'playlist':\n wrmf_output = wrmf_helpers.get_top_similar_from_playlists(\n song_factors,\n playlist_factors,\n seed_ids,\n n_similar_songs=10000,\n n_similar_playlists=100\n )\n elif method == 'ensemble':\n wrmf_output = wrmf_helpers.get_top_similar_from_ensemble(\n song_factors,\n playlist_factors,\n seed_ids,\n n_similar_songs=10000,\n n_similar_playlists=100\n )\n else:\n raise ValueError(\"invalid method\")\n\n wrmf_output_set = set(wrmf_output)\n true_matches = playlist_set.intersection(wrmf_output_set)\n false_matches = wrmf_output_set.symmetric_difference(true_matches)\n\n X_train_ids = []\n Y_train = []\n for _ in range(min(len(true_matches), 10)):\n X_train_ids.append(true_matches.pop())\n Y_train.append(1)\n X_train_ids.append(false_matches.pop())\n Y_train.append(0)\n\n return compute_df_features(seed_ids, X_train_ids, Y_train)",
"def get_playlist_songs(self, playlist_id):\n url = get_playlist_url(playlist_id)\n result = self.get_request(url)\n return result['result']['tracks'], result['result']['name']",
"def getplaylisttracks(accesstoken, chosenplaylist):\n\n headers = {}\n headers[\"Authorization\"] = \"Bearer {}\".format(accesstoken)\n\n limit = 100\n\n payload = {}\n payload[\"limit\"] = limit\n payload[\"offset\"] = 0\n\n r = requests.get(\n \"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\".format(chosenplaylist.ownerid, chosenplaylist.playlistid),\n headers=headers,\n params=payload)\n\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n return(getplaylisttracks(accesstoken, chosenplaylist, userid))\n else:\n print(response[\"error\"])\n return(None)\n else:\n print('error: getplaylisttracks request failed')\n return(None)\n\n numberreceived = len(response[\"items\"])\n totalavailable = response[\"total\"]\n\n for track in response[\"items\"]:\n t = Track()\n t.trackid = track[\"track\"][\"id\"]\n t.albumname = track[\"track\"][\"album\"][\"name\"]\n t.trackname = track[\"track\"][\"name\"]\n t.artistname = track[\"track\"][\"artists\"][0][\"name\"]\n t.popularity = track[\"track\"][\"popularity\"]\n # print(t.trackid, t.trackname, t.artistname, t.albumname)\n chosenplaylist.tracks.append(t)\n\n # if we haven't gotten all of the tracks in the playlist, request the next\n # batch\n\n while numberreceived < totalavailable:\n\n payload[\"offset\"] = payload[\"offset\"] + limit\n r = requests.get(\n \"https://api.spotify.com/v1/users/{}/playlists/{}/tracks\".format(chosenplaylist.ownerid, chosenplaylist.playlistid),\n headers=headers,\n params=payload)\n response = r.json()\n\n if \"items\" not in response:\n if response[\"error\"]:\n if response[\"error\"][\"status\"] == 429:\n # wait for the amount of time specified in response header\n time.sleep(int(r.headers[\"Retry-After\"]) + 1)\n # try again\n continue\n else:\n print('error: getplaylisttracks request failed')\n print(response[\"error\"])\n return(None)\n else:\n print('error: unknown error')\n return(None)\n\n for track in response[\"items\"]:\n if track[\"is_local\"]:\n # a locally saved song. skip over it, as no way to query audio \n # features without having a spotify track id\n continue\n t = Track()\n t.trackid = track[\"track\"][\"id\"]\n t.albumname = track[\"track\"][\"album\"][\"name\"]\n t.trackname = track[\"track\"][\"name\"]\n t.artistname = track[\"track\"][\"artists\"][0][\"name\"]\n # print(t.trackid, t.trackname, t.artistname, t.albumname)\n chosenplaylist.tracks.append(t)\n\n \n numberreceived = numberreceived + len(response[\"items\"])\n\n # filter out tracks with trackid == None\n chosenplaylist.tracks = [track for track in chosenplaylist.tracks if track.trackid is not None]\n\n # print(chosenplaylist.tracks)\n return(chosenplaylist)",
"def test_get_pl_tracks(self):\n\n # Playlist 1\n result1 = self.client.get(\"playlist/pl1\")\n self.assertEqual(result1.status_code, 200)\n self.assertIn(b\"Track 1\", result1.data)\n self.assertIn(b\"Track 3\", result1.data)\n self.assertNotIn(b\"Track 5\", result1.data)\n\n # Playlist 2\n result2 = self.client.get(\"playlist/pl2\")\n self.assertEqual(result2.status_code, 200)\n self.assertIn(b\"Track 4\", result2.data)\n self.assertIn(b\"Track 5\", result2.data)\n self.assertNotIn(b\"Track 1\", result2.data)",
"def playlist_tracks(self, playlist_id: str, fields: str = None,\n market: str = 'from_token', limit: int = 100,\n offset: int = 0):\n return self._get(f'playlists/{playlist_id}/tracks', limit=limit,\n offset=offset, fields=fields, market=market)",
"def search_multiple_tracks(search_query, sp):\r\n \r\n # List to store the track ids\r\n track_ids = []\r\n # List to store the track names and artists\r\n tracks = []\r\n\r\n #Search for 10 results in the Spotify API given a search querry\r\n results = sp.search(q = search_query ,limit=10)\r\n results = results['tracks']['items']\r\n\r\n # Extract the track id's, names and artists for all the search results\r\n for i in range(len(results)):\r\n\r\n # Get track id, artist and name\r\n track_id = results[i]['id']\r\n artist = results[i]['artists'][0]['name']\r\n track_name = results[i]['name']\r\n\r\n # Get a string with the artist and track name\r\n track = artist + ' - ' + track_name\r\n\r\n # Append the track id's and track name/artist to the list\r\n track_ids.append(track_id)\r\n tracks.append(track)\r\n\r\n # Make a dictionary of the track id and track name/artist list.\r\n return dict(zip(tracks,track_ids))",
"def tracksDict(pl, gm_api):\r\n playlist = []\r\n notFound = []\r\n # song metadata used as cross-check reference if a playlist entry doesn't\r\n # have desired metadata\r\n all_song_meta_data = gm_api.get_all_songs()\r\n for t in pl['tracks']:\r\n # Check source:\r\n # '2' indicates hosted on Google Music, '1' otherwise\r\n if t['source'] == '2':\r\n song = Track.Track(title=t['track']['title'],\r\n artist=t['track']['artist']) \r\n playlist.append(song)\r\n elif t['source'] == '1':\r\n # Important: when source is '1', playlistEntry object's 'trackId' \r\n # will correspond w/ a track object's 'id' in all_song_meta_data\r\n badtrackID = t['trackId']\r\n song = next((t for t in all_song_meta_data \\\r\n if t['id'] == badtrackID), None)\r\n if song != None:\r\n # create track object, add to new \"playlist\"\r\n track_obj = Track.Track(title=song['title'],\r\n artist=song['artist']) \r\n playlist.append(track_obj)\r\n else:\r\n msg = \"Error with track \" + str(badtrackID) + \": 'source'\" + \\\r\n \" field is '1', but could not find matching metadata.\"\r\n print(msg, file=sys.stderr)\r\n notFound.append(badtrackID)\r\n else:\r\n msg = \"Error with track \" + str(t['trackId']) + \": 'source'\" + \\\r\n \" field not '1' or '2'.\"\r\n print(msg, file=sys.stderr)\r\n notFound.append(t['trackId'])\r\n\r\n return playlist, notFound"
]
| [
"0.7261001",
"0.67370754",
"0.6705537",
"0.6686362",
"0.66803616",
"0.6676746",
"0.6595131",
"0.6555134",
"0.6174791",
"0.61648434",
"0.61014676",
"0.6018592",
"0.593523",
"0.5921884",
"0.5910994",
"0.5907739",
"0.5896782",
"0.5857289",
"0.5855081",
"0.58021855",
"0.57907724",
"0.5784848",
"0.5739212",
"0.57280385",
"0.5683146",
"0.56814075",
"0.5680329",
"0.5667755",
"0.56524605",
"0.5639971"
]
| 0.7140388 | 1 |
Fetches the audio features for all songs in a DataFrame and appends these as rows to the DataFrame. Requires spotipy to be set up with an auth token. | def append_audio_features(df,spotify_auth, return_feat_df = False):
audio_features = spotify_auth.audio_features(df["track_id"][:])
#catch and delete songs that have no audio features
if None in audio_features:
NA_idx=[i for i,v in enumerate(audio_features) if v == None]
df.drop(NA_idx,inplace=True)
for i in NA_idx:
audio_features.pop(i)
assert len(audio_features) == len(df["track_id"][:])
feature_cols = list(audio_features[0].keys())[:-7]
features_list = []
for features in audio_features:
try:
song_features = [features[col] for col in feature_cols]
features_list.append(song_features)
except TypeError:
pass
df_features = pd.DataFrame(features_list,columns = feature_cols)
df = pd.concat([df,df_features],axis = 1)
if return_feat_df == False:
return df
else:
return df,df_features | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_audio_features(self, sp, trackids):\n\n cols = ['acousticness', 'danceability', 'duration_ms', 'energy',\n 'instrumentalness', 'key', 'liveness', 'loudness', 'mode',\n 'speechiness', 'tempo', 'time_signature', 'valence', 'id']\n\n total_track = len(trackids)\n features = []\n start = 0\n while len(features) < total_track:\n end = start + 100 if start + 100 < total_track else total_track\n\n features += sp.audio_features(tracks=trackids[start: end])\n start = start + 100\n\n return pd.DataFrame.from_records(features, columns=cols)",
"def get_tracklist_features(tracks):\n\n # first we construct a list of all track ids and tracknames\n track_ids = []\n track_names = []\n for collection_type in tracks:\n tid = collection_type['id']\n if tid:\n track_ids.append(collection_type['id'])\n track_name = f'{collection_type[\"artists\"][0][\"name\"]} - {collection_type[\"name\"]}'\n track_names.append(track_name)\n # we can only load data in batches\n batch_size = 50\n offset = 0\n\n features = []\n\n while offset + batch_size <= len(track_ids):\n # get one batch of tracks per iteration\n new_features = SP.audio_features(track_ids[offset:offset+batch_size])\n\n # we want to add the trackname to the dataframe\n for i, feature in enumerate(new_features):\n feature['name'] = track_names[offset+i]\n features += new_features\n\n offset += batch_size\n\n # get the remaining tracks that couldnt fill a batch\n features += SP.audio_features(track_ids[offset:])\n return pd.DataFrame(features)",
"def create_df_playlist(api_results,sp = None, append_audio = True):\r\n df = create_df_saved_songs(api_results[\"tracks\"])\r\n if append_audio == True:\r\n assert sp != None, \"sp needs to be specified for appending audio features\"\r\n df = append_audio_features(df,sp)\r\n return df",
"def songfeature_get(): # noqa: E501\n query = 'SELECT * FROM SongFeatures'\n results = query_to_dict(query)\n features_list = []\n for r in results:\n features_list.append(\n Songfeature(acousticness= r['Acousticness'],\n danceability= r['Danceability'],\n duration_ms= r['Duration_ms'],\n energy= r['Energy'],\n instrumentalness= r['Instrumentalness'],\n musicalkey= r['MusicalKey'],\n liveness= r['Liveness'],\n loudness= r['Loudness'],\n mode= r['Mode'],\n speechiness= r['Speechiness'],\n tempo= r['Tempo'],\n timesignature= r['Time_signature'],\n valence= r['Valence'],\n songid= r['SongID']))\n return features_list",
"def audio_features(self, track=None, tracks=None, with_cache=True, **kwargs):\n if track:\n _id = self._get_track_id(track)\n # pylint: disable=no-member\n return self._get(API.AUDIO_FEATURES_SINGLE.value.format(id=_id), **kwargs)\n\n tracks = list(map(self._get_track_id, tracks or []))\n cached_tracks = []\n if with_cache:\n with db_session:\n cached_tracks = select(a for a in AudioFeatures if a.id in tracks)[:]\n tracks = list(set(tracks) - {a.id for a in cached_tracks})\n batches = [tracks[i : i + 100] for i in range(0, len(tracks), 100)]\n audio_features = [\n self._get(API.AUDIO_FEATURES_MULTIPLE.value, ids=\",\".join(t), **kwargs)\n for t in batches\n ]\n with db_session:\n audio_features = [\n AudioFeatures.from_dict(t) for t in chain.from_iterable(audio_features)\n ] + cached_tracks\n return audio_features",
"def track_features(tracks, authorizer, verbose=False):\n spotify_endpoint = 'https://api.spotify.com/v1/audio-features'\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n\n remainder = len(tracks)\n offset = 0\n stride = 100\n features = []\n while remainder > 0:\n params = {'ids': ','.join(tracks[offset:offset + stride])} # spotify can only process 100 tracks at a time\n\n response = requests.get(spotify_endpoint, params=params, headers=headers)\n\n if response.status_code == 200:\n features += response.json()['audio_features']\n offset += stride\n remainder -= stride\n elif response.status_code == 429:\n limit = int(response.headers['Retry-After'])\n print('Hit rate limit, waiting for {} seconds to continue'.format(limit))\n time.sleep(limit)\n elif response.status_code == 401:\n print('Access token expired, refreshing...')\n authorizer.refresh()\n headers = {\"Accept\":\"application/json\", \"Content-Type\":\"application/json\", \"Authorization\": \"Bearer {bearer}\".format(bearer=authorizer.bearer)}\n else:\n print('Error %d' % response.status_code)\n if verbose:\n print(json.loads(response.text))\n return None\n\n return zip(tracks, features)",
"def get_audio_features_of_tracks(self, playlist_items: List[Dict]):\n audio_features_vectors = []\n for track_object in playlist_items:\n track_id = _get_id(track_object)\n track_features = self.spotify_client.get_audio_features(track_id)\n audio_features_vectors.append(list(track_features.values()))\n return np.array([vec for vec in audio_features_vectors])",
"def get_spotify_features(search):\n\t\n\t# Configure API credentials\n\tclient_credentials_manager = SpotifyClientCredentials(client_id=config.SPOTIFY_CID, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclient_secret=config.SPOTIFY_SECRET)\n\tsp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)\n\t\n\t# Find song ID\n\tquery = sp.search(search)\n\tsong_id = query['tracks']['items'][0]['id']\n\n\t# Use song ID to pull metadata\n\taudio_feature = sp.audio_features(song_id)[0]\n\t\n\treturn audio_feature",
"def get_metadata(self):\n items = self.get_playlist_items()\n uris = [item[\"track\"][\"uri\"] for item in items]\n features = self.API.audio_features(uris)\n\n list_data = []\n\n for idx, item in enumerate(items):\n data = {\"name\": item[\"track\"][\"name\"], \"uri\": item[\"track\"][\"uri\"]}\n data.update(features[idx])\n list_data.append(data)\n\n self.metadata = pd.DataFrame(data=list_data,\n index=range(len(list_data)))\n\n return self.metadata",
"def get_all_tracks_from_artists(sp, artist_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\", \"artist_name\"]]\n track_df = pd.DataFrame(columns=track_list[0])\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for artist_uri in artist_uri_list:\n # Get artist name and albums\n artist_name = sp.artist(artist_uri)[\"name\"]\n albums = get_albums_from_artists(sp, [artist_uri])\n\n # Get tracks from artist albums\n tracks_artist_df = get_tracks_from_albums(sp, albums[\"album_uri\"].to_list())\n tracks_artist_df[\"artist_name\"] = artist_name\n\n # Append new songs to dataframe\n track_df = track_df.append(tracks_artist_df)\n \n print(\"Log: Finished pulling all tracks from artist.\")\n return track_df",
"def get_playlist_feats(playlist_id):\r\n sourcePlaylistID = playlist_id\r\n sourcePlaylist = sp.user_playlist(username, sourcePlaylistID);\r\n tracks = sourcePlaylist[\"tracks\"];\r\n songs = tracks[\"items\"];\r\n\r\n track_ids = []\r\n track_names = []\r\n track_artists = []\r\n\r\n\r\n for i in range(0, len(songs)):\r\n if songs[i]['track']['id'] != None: # Removes the local tracks in your playlist if there is any\r\n track_ids.append(songs[i]['track']['id'])\r\n track_names.append(songs[i]['track']['name'])\r\n track_artists.append(songs[i]['track']['artists'])\r\n\r\n\r\n features = []\r\n for i in range(0,len(track_ids)):\r\n audio_features = sp.audio_features(track_ids[i])[0]\r\n track_popularity = {'popularity': sp.track(track_ids[i])['popularity']}\r\n genre = {'genres': sp.artist(track_artists[i][0]['uri'])['genres']}\r\n audio_features = dict(audio_features, **track_popularity, **genre)\r\n features.append(audio_features)\r\n\r\n\r\n playlist_df = pd.DataFrame(features, index = track_names)\r\n return playlist_df",
"def get_artist_audio_features(q, interactive = False, genre_delimiter = '-!!-', to_file = '', client = None):\n query = client.search(q = q, type = \"artist\")\n items = query['artists']['items']\n\n if not items:\n raise Exception(\"No artists found\")\n\n if interactive:\n print(\"Select the artist to use...\")\n print(\"\\n\".join(\"[{}]: {}\".format(ii, entry['name']) for ii, entry in enumerate(items)))\n artist_indx = int(input(\"artist number: \").strip())\n if artist_indx > len(items):\n raise IndexError(\"Selected number higher than options available\")\n artist = items[artist_indx]\n else:\n artist = items[0]\n\n # get artist genres\n artist_genres = genre_delimiter.join(artist['genres']) if genre_delimiter else None\n\n # get artist albums\n albums = get_artist_albums(artist['id'])\n albums['artist_genres'] = artist_genres\n\n # get album popularity\n album_popularity = get_album_popularity(albums.id)\n\n # get album tracks\n tracks = get_album_tracks(albums.id)\n\n # get track audio features\n features = get_track_features(tracks.id)\n\n # get track popularity\n popularity = get_track_popularity(tracks.id)\n\n album_data = albums.merge(album_popularity, 'left', 'id')\n\n track_data = tracks \\\n .drop(columns = ['type']) \\\n .merge(popularity, 'left', 'id') \\\n .merge(features.drop(columns = ['uri', 'type', 'duration_ms']), 'left', 'id')\n\n\n merged = prefix_merge(album_data, track_data, ['album_', 'track_'], how = 'left', on = 'album_id')\n\n if to_file:\n merged.to_csv(to_file)\n\n return merged",
"def get_song_features(tid):\n\n # dictionary of features to return\n spotify_track_data = SpotifyData[tid]\n\n features = {}\n features['name'] = spotify_track_data.name\n features['artists'] = spotify_track_data.artists\n features['popularity'] = spotify_track_data.popularity\n features['album'] = spotify_track_data.album_name\n features['danceability'] = spotify_track_data.danceability\n features['energy'] = spotify_track_data.energy\n features['key'] = spotify_track_data.key\n features['loudness'] = spotify_track_data.loudness\n features['mode'] = spotify_track_data.mode\n features['speechiness'] = spotify_track_data.speechiness\n features['acousticness'] = spotify_track_data.acousticness\n features['instrumentalness'] = spotify_track_data.instrumentalness\n features['liveness'] = spotify_track_data.liveness\n features['valence'] = spotify_track_data.valence\n features['tempo'] = spotify_track_data.tempo\n features['duration_ms'] = spotify_track_data.duration_ms\n features['time_signature'] = spotify_track_data.time_signature\n\n return features",
"def add_all_features(df):\n df.reset_index(drop=True, inplace=True)\n df = target_indicators(df)\n df = momentum_indicators(df)\n df = trend_indicators(df)\n df = volatility_indicators(df)\n df = volume_indicators(df)\n df = special_indicators(df)\n return df",
"def fetch_tracks_info_df(df):\n\n gen_df = df.copy()\n gen_df = gen_df[['artist_name', 'title', 'release', 'track_id', 'song_id']]\n\n for column_name in gen_df.columns:\n gen_df[column_name] = gen_df[column_name].map(lambda x: ast.literal_eval(x).decode('utf-8'))\n\n gen_df.rename(columns={'release': 'album_name'}, inplace=True)\n gen_df['year'] = df['year']\n\n return gen_df",
"def get_features_dataframe(tids):\n\n Data = {}\n for tid in tids:\n Data[tid] = get_song_features(tid)\n return pd.DataFrame.from_dict(Data, orient='index')",
"def create_df_saved_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results[\"items\"]:\r\n try:\r\n track_name.append(items[\"track\"]['name'])\r\n track_id.append(items[\"track\"]['id'])\r\n artist.append(items[\"track\"][\"artists\"][0][\"name\"])\r\n duration.append(items[\"track\"][\"duration_ms\"])\r\n album.append(items[\"track\"][\"album\"][\"name\"])\r\n popularity.append(items[\"track\"][\"popularity\"])\r\n except TypeError: \r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n return df",
"def get_tracks_from_albums(sp, album_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for album_uri in album_uri_list:\n album_tracks = sp.album_tracks(album_uri, limit=50, offset=0)[\"items\"]\n count_tracks_in_album = len(album_tracks)\n album_release_date = sp.album(album_uri)[\"release_date\"]\n\n # This part is probably very slow and should be improved by accessing the API less often\n for track_number in range(count_tracks_in_album):\n track_name = album_tracks[track_number][\"name\"]\n track_uri = album_tracks[track_number][\"uri\"]\n \n track_list.append([track_name, track_uri, album_release_date])\n\n # Create df from list of tracks for all albums\n track_df = pd.DataFrame(data=track_list[1:], columns=track_list[0])\n \n print(\"Log: Finished pulling all tracks from albums.\")\n return track_df",
"def __insert_songplay_data(cur, df):\n # for each songplay event, described by a row in the dataframe\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n\n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (pd.to_datetime(row.ts, unit='ms'), row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)",
"def songfeature_songid_get(songid): # noqa: E501\n query = \"SELECT * FROM SongFeatures WHERE SongID = '{}'\".format(songid)\n results = query_to_dict(query)\n features_list = []\n for r in results:\n features_list.append(\n Songfeature(acousticness= r['Acousticness'],\n danceability= r['Danceability'],\n duration_ms= r['Duration_ms'],\n energy= r['Energy'],\n instrumentalness= r['Instrumentalness'],\n musicalkey= r['MusicalKey'],\n liveness= r['Liveness'],\n loudness= r['Loudness'],\n mode= r['Mode'],\n speechiness= r['Speechiness'],\n tempo= r['Tempo'],\n timesignature= r['Time_signature'],\n valence= r['Valence'],\n songid= r['SongID']))\n return features_list",
"def extract_sound_features(metadata, audio_dataset_path):\n\n import vggish_input\n import vggish_params\n import vggish_slim\n\n with tf.Graph().as_default(), tf.Session() as sess:\n # load pre-trained vggish model\n vggish_slim.define_vggish_slim()\n vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)\n features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)\n embedding_tensor = sess.graph.get_tensor_by_name(\n vggish_params.OUTPUT_TENSOR_NAME\n )\n \n sound_features = []\n # loop through the dataset using information from the metadata file\n for index_num, row in tqdm(metadata.iterrows()):\n # get the file path \n file_name = os.path.join(os.path.abspath(audio_dataset_path),str(row['file_path']))\n \n # extract basic sound data\n audio, sample_rate = librosa.load(file_name, sr=SR, mono=True, offset=0.0, duration=None)\n\n # extract vgg features\n yt, index = librosa.effects.trim(audio, frame_length=FRAME_LEN, hop_length=HOP)\n input_batch = vggish_input.waveform_to_examples(yt, SR_VGG) \n [features_vgg] = sess.run(\n [embedding_tensor], feed_dict={features_tensor: input_batch}\n )\n features_vgg = sta_fun_2(features_vgg)\n features_vgg = features_vgg.reshape(features_vgg.shape[-1],)\n\n # extract hc features\n audio, sample_rate = librosa.load(file_name, res_type='kaiser_fast')\n features_hc = extract_features_hc(audio, sample_rate)\n\n # concat features\n features = np.concatenate((features_hc, features_vgg), axis=0)\n sound_features.append(features)\n\n return sound_features",
"def populate_billboard_scrapables(self):\n results = MongoClient().billboard.spotify.find()\n self.df = pd.DataFrame(\n data=map(\n lambda r: (\n r[\"metadata\"][\"id\"],\n r[\"metadata\"][\"artists\"][0][\"name\"],\n r[\"metadata\"][\"name\"],\n ),\n results,\n ),\n columns=[\"track_id\", \"artist_name\", \"title\"],\n )\n print(f\"Tracks identified to scrape lyrics: {self.df.shape[0]}\")",
"def songfeature_filter_get(songid=None, genre=None, artist=None, name=None): # noqa: E501\n query = 'SELECT * FROM SongFeatures'\n multi_flag = \"WHERE\"\n if genre and not artist:\n query = \"\"\"\n SELECT Acousticness, Danceability, Duration_ms, Energy,Instrumentalness, MusicalKey,\n Liveness,Loudness,Mode, Speechiness,Tempo, Time_signature, Valence, Songs.SongID,\n Songs.SongName\n FROM SongFeatures\n JOIN Songs\n ON Songs.SongID = SongFeatures.SongID\n AND Songs.SongGenre = '{}'\n \"\"\".format(genre)\n multi_flag = \"AND\"\n\n if artist and not genre:\n #Query too complicated, separate entity\n query = \"\"\"\n SELECT Acousticness, Danceability, Duration_ms, Energy,Instrumentalness, MusicalKey,\n Liveness,Loudness,Mode, Speechiness,Tempo, Time_signature, Valence, Songs.SongID\n FROM SongFeatures\n JOIN Songs\n ON Songs.SongID = SongFeatures.SongID\n JOIN Artists\n ON Songs.ArtistID = Artists.ArtistID\n WHERE Artists.ArtistName = '{}'\n \"\"\".format(artist)\n multi_flag = \"AND\"\n\n if artist and genre:\n query = \"\"\"\n SELECT Acousticness, Danceability, Duration_ms, Energy,Instrumentalness, MusicalKey,\n Liveness,Loudness, Mode, Speechiness,Tempo, Time_signature, Valence, Songs.SongID\n FROM SongFeatures\n JOIN Songs\n ON Songs.SongID = SongFeatures.SongID\n JOIN Artists\n ON Songs.ArtistID = Artists.ArtistID\n WHERE Artists.ArtistName = '{}'\n AND Songs.SongGenre = '{}'\n \"\"\".format(artist, genre)\n\n if songid:\n query = query + \" {} SongFeatures.SongID = '{}'\".format(songid)\n\n if name:\n query = query + \" JOIN Songs ON Songs.SongID = SongFeatures.SongID WHERE Songs.SongName = '{}'\".format(name)\n\n results = query_to_dict(query)\n features_list = []\n\n for r in results:\n features_list.append(\n Songfeature(acousticness= r['Acousticness'],\n danceability= r['Danceability'],\n duration_ms= r['Duration_ms'],\n energy= r['Energy'],\n instrumentalness= r['Instrumentalness'],\n musicalkey= r['MusicalKey'],\n liveness= r['Liveness'],\n loudness= r['Loudness'],\n mode= r['Mode'],\n speechiness= r['Speechiness'],\n tempo= r['Tempo'],\n timesignature= r['Time_signature'],\n valence= r['Valence'],\n songid= r['SongID']))\n return features_list",
"def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def get_audio_features(uri):\n try:\n uri = str(uri)\n res = re.findall(r':(?: *([\\w.-]+):)', uri)\n str_res = ' '.join([str(word) for word in res])\n\n if str_res in ['playlist', 'userplaylist']:\n # from the playlist get URIs for each artist\n artist_uris_total = get_artists_from(uri)\n # from artist uris get a list of album uris\n albums_uris_total = []\n for artist_uri in artist_uris_total:\n album_uris = get_albums_from(artist_uri)\n albums_uris_total.extend(album_uris)\n # from a list of albums get tracks\n track_uris_total = []\n for albums_uri in albums_uris_total:\n tracks_uris = get_tracks_from(albums_uri)\n track_uris_total.extend(tracks_uris)\n print(track_uris_total)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n elif str_res == 'artist':\n albums_uris_total = get_albums_from(uri)\n track_uris_total = []\n for albums_uri in albums_uris_total:\n tracks_uris = get_tracks_from(albums_uri)\n track_uris_total.extend(tracks_uris)\n print(track_uris_total)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n elif str_res == 'album':\n track_uris_total = get_tracks_from(uri)\n print(track_uris_total)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n elif str_res == 'track':\n features_to_db(uri)\n\n except Exception as e:\n print(\"Error processing {}: {}\".format(uri, e))\n raise e\n\n else:\n DB.session.commit()",
"def predict_best_songs(track_id):\n #This will give us a list of features necessary for prediction\n #Code by Ekaterina & Hernan\n song_to_predict = get_features(track_id)\n #Add song to existing DB\n #Code by Hernan\n add_song(song_to_predict)\n #This K Means model will give us a list of recommended songs\n #Code by Josh\n def predicto(track_id):\n # Instantiate and fit knn to the correct columns\n knn = NearestNeighbors(n_neighbors=20)\n knn.fit(df[df.columns[5:]])\n obs = df.index[df['id'] == track_id]\n series = df.iloc[obs, 5:].to_numpy()\n neighbors = knn.kneighbors(series)\n new_obs = neighbors[1][0][6:20]\n return list(df.loc[new_obs, 'id'])\n #Converting the DB to a DF to run a K Means model through\n conn = sqlite3.connect('sprs/spotitry_songs.db')\n curs = conn.cursor()\n SQL_Query = pd.read_sql_query(''' SELECT * from song ''',conn)\n df = pd.DataFrame(SQL_Query, columns=['id','name','energy',\n 'liveness','danceability','instrumentalness','loudness',\n 'speechiness','valence','tempo'])\n track_list = predicto(track_id)\n #Here we'll turn our list of track ids into song names\n #Code by Ekaterina & Hernan\n #Re-written by Hernan to return json with feature list\n suggestions = get_features(track_list[0])\n column_names = ['track_id', 'name', 'acousticness', 'danceability', 'duration_ms', 'energy', 'instrumentalness',\n 'liveness', 'loudness', 'speechiness', 'tempo', 'valence']\n final = pd.DataFrame([suggestions], columns=column_names)\n result = final.to_json()\n return result",
"def features_from_category(cat_id):\n\n try:\n playlists_uris = get_playlists_from(cat_id)\n artist_uris_total = []\n for playlist_uri in playlists_uris:\n artist_uris = get_artists_from(playlist_uri)\n artist_uris_total.append(artist for artist in artist_uris)\n albums_uris_total = []\n for artist_uri in artist_uris_total:\n album_uris = get_albums_from(artist_uri)\n albums_uris_total.append(album for album in album_uris)\n # from a list of albums get tracks\n track_uris_total = []\n for albums_uri in albums_uris_total:\n tracks_uris = get_tracks_from(albums_uri)\n track_uris_total.append(track for track in tracks_uris)\n for track_uri in track_uris_total:\n features_to_db(track_uri)\n\n except Exception as e:\n print(\"Error processing {}: {}\".format(cat_id, e))\n raise e\n\n else:\n DB.session.commit()",
"def main():\n\n # Spotify settings\n sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(\n client_id='118aa19f2b66476fbc062f0ac146d8b5',\n client_secret='7ca95a3159ab4391bee70f70d47a9271'\n ))\n # Postgres settings\n conn = psycopg2.connect(\n host = \"postgres\",\n port = \"5432\",\n dbname = \"postgres\",\n user = \"postgres\",\n password = \"postgres1234\",\n )\n # Redis settings\n REDIS_HOST = \"jupyter_redis\"\n REDIS_PORT = 6379\n REDIS = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)\n\n # Spotify data\n search_results = []\n playlists = sp.search(q='covid', type='playlist', limit=50,offset=0)['playlists']\n\n print('- Load Playlists from search query')\n # Load Playlists from search query\n while playlists:\n try:\n for i, playlist in enumerate(playlists['items']):\n search_results.append(playlist['id'])\n print(\"%4d %s %s\" % (i + 1 + playlists['offset'], playlist['id'], playlist['name']))\n if playlists['next']:\n playlists = sp.next(playlists)['playlists'] # Get next playlist given a page result\n else:\n playlists = None\n except Exception as e:\n playlists = None\n print('Done')\n\n \n\n\n print('- Load tracks into postgres')\n ## Load information into Postgres\n counter = 0\n final_informations = []\n columns = 'id_playlist,name_playlist,id_track,name_track,timestamp,danceability,energy,key,loudness,mode,speechiness,acousticness,instrumentalness,liveness,valence,tempo,duration_ms,time_signature'\n\n for playlist_id in search_results:\n # If is not in redis\n if(REDIS.get(playlist_id) == None):\n try:\n playlist_complete = sp.playlist(playlist_id)\n tracks = playlist_complete['tracks']\n while tracks:\n for track in tracks['items']:\n audio_features = sp.audio_features(track['track']['id'])[0]\n # Open cursor\n cur = conn.cursor()\n # Insert\n cur.execute(f'insert into spotify_details ({columns}) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',\n (\n playlist_complete['id'],\n playlist_complete['name'],\n track['track']['id'],\n track['track']['name'],\n track['added_at'],\n audio_features['danceability'],\n audio_features['energy'],\n audio_features['key'],\n audio_features['loudness'],\n audio_features['mode'],\n audio_features['speechiness'],\n audio_features['acousticness'],\n audio_features['instrumentalness'],\n audio_features['liveness'],\n audio_features['valence'],\n audio_features['tempo'],\n audio_features['duration_ms'],\n audio_features['time_signature']\n ))\n # Commit the transition\n conn.commit()\n # Close cursor\n cur.close()\n if tracks['next']:\n tracks = sp.next(tracks) # Get next playlist given a page result\n else:\n tracks = None\n print(f'Done playlist: {counter} of {len(search_results)}')\n counter += 1\n except Exception as e:\n print(e)\n counter += 1\n REDIS.set(playlist_id, 1)\n print('- Added playlist: {}'.format(playlist_id))\n # Close connection\n conn.close()\n\n print('Done! All data are update in principal db and redis!')",
"def get_song_features(self, song_id: str) -> List[float]:\n user = self.init_user()\n user.trace = True\n features = user.audio_features(song_id)[0]\n return [features['acousticness'], features['danceability'],\n features['energy'], features['duration_ms'],\n features['instrumentalness'], features['valence'],\n features['tempo'], features['liveness'],\n features['loudness'], features['speechiness'],\n features['key']]"
]
| [
"0.72734356",
"0.6587091",
"0.64254206",
"0.63319993",
"0.63076264",
"0.6237071",
"0.6106666",
"0.6090266",
"0.6030595",
"0.5984066",
"0.5975257",
"0.5961235",
"0.5942549",
"0.5933294",
"0.5843309",
"0.58238524",
"0.57739335",
"0.56598234",
"0.5612531",
"0.5556799",
"0.55472183",
"0.5543479",
"0.5499357",
"0.54906255",
"0.5478944",
"0.54756117",
"0.54718167",
"0.54643685",
"0.54444784",
"0.54433674"
]
| 0.8000351 | 0 |
Finds rows which are different between two DataFrames. | def dataframe_difference(df1, df2, which=None):
comparison_df = df1.merge(
df2,
indicator=True,
how='outer'
)
if which is None:
diff_df = comparison_df[comparison_df['_merge'] != 'both']
else:
diff_df = comparison_df[comparison_df['_merge'] == which]
diff_df.drop("_merge",axis = 1, inplace = True)
return diff_df.drop_duplicates().reset_index(drop = True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dataframe_diff(xxa,xxb):\n\n xa=pd.DataFrame(xxa)\n xb=pd.DataFrame(xxb)\n merged = xa.merge(xb, indicator=True, how='outer')\n\n diff=merged[merged['_merge'] != 'both']\n\n return diff",
"def isLessEqual(df1, df2):\n indices = list(set(df1.index).intersection(df2.index))\n dff1 = df1.loc[indices, :]\n dff2 = df2.loc[indices, :]\n df = dff1 - dff2\n df_tot = df.applymap(lambda v: v <= 0)\n result = df_tot.sum().sum() == df.size\n return result",
"def _(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n indicator = \"__datar_setdiff__\"\n out = pandas.merge(x, y, how=\"left\", indicator=indicator)\n\n from .distinct import distinct\n\n return distinct(\n out[out[indicator] == \"left_only\"]\n .drop(columns=[indicator])\n .reset_index(drop=True),\n __calling_env=CallingEnvs.REGULAR,\n )",
"def compare_dfs(df1: pd.DataFrame, df2: pd.DataFrame) -> list:\n # columns only exist in df1\n df1_only = list(set(df1.columns) - set(df2.columns))\n # columns only exist in df2\n df2_only = list(set(df2.columns) - set(df1.columns))\n print(df1_only, \"\\nTotal: \", len(df1_only), \"columns\\n\")\n print(df2_only, \"\\nTotal: \", len(df2_only), \"columns\\n\")\n return df1_only, df2_only",
"def df_diff(self, df1, df2, which=None):\n comparison_df = df1.merge(df2, indicator=True, on=self.columns, how=\"outer\")\n if which is None:\n diff_df = comparison_df[comparison_df[\"_merge\"] != \"both\"].reset_index(\n drop=True\n )\n else:\n diff_df = comparison_df[comparison_df[\"_merge\"] == which].reset_index(\n drop=True\n )\n\n return diff_df",
"def df_equal( df1, df2 ):\n return df1.fillna(1).sort(axis=1).eq(df2.fillna(1).sort(axis=1)).all().all()",
"def _(x: DataFrame, y: DataFrame) -> bool:\n _check_xy(x, y)\n\n x = x.sort_values(by=x.columns.to_list()).reset_index(drop=True)\n y = y.sort_values(by=y.columns.to_list()).reset_index(drop=True)\n return x.equals(y)",
"def compare(old_dataframe, fresh_dataframe):\n combined_dataframe = pd.concat([old_dataframe, fresh_dataframe])\n combined_dataframe = combined_dataframe.reset_index(drop=True)\n\n grouped_dataframes = combined_dataframe.groupby(DataFrameRow.REQUIRED)\n\n # if there is overlap, there will be a column with length > 1\n unique_indices = [col[0] for col in grouped_dataframes.groups.values() if\n len(col) == 1]\n\n return combined_dataframe.reindex(unique_indices)",
"def df_equal(left: pd.DataFrame, right: pd.DataFrame, **kwargs) -> bool:\n pd.testing.assert_frame_equal(left, right, **kwargs)\n return True",
"def assertDataframeEqual(self, df1, df2, msg='Dataframes are NOT equal'):\n try:\n pandas.testing.assert_frame_equal(df1, df2)\n except AssertionError as e:\n raise self.failureException(msg) from e",
"def assertFrameEqual( df1, df2 ):\n\n return assert_frame_equal( df1.sort( axis=1) , df2.sort( axis=1) , check_names = True )",
"def diff_dfs(df1, df2, how=\"left\"):\n if (df1.columns != df2.columns).any():\n raise ValueError(\"Two dataframe columns must match\")\n\n if df1.equals(df2):\n return None\n elif how == 'right':\n return pd.concat([df2, df1, df1]).drop_duplicates(keep=False)\n elif how == 'left':\n return pd.concat([df1, df2, df2]).drop_duplicates(keep=False)\n else:\n raise ValueError('how parameter supports only \"left\" or \"right keywords\"')",
"def setDifference(self, table2):\n results = set([])\n for rec in self.records:\n rec_tuple = tuple([v for (k, v) in rec.items()])\n results.add(rec_tuple)\n for rec in table2.records:\n rec_tuple = tuple([v for (k, v) in rec.items()])\n if rec_tuple in results:\n results.remove(rec_tuple)\n for item in results:\n print item",
"def compare_tables(t1, t2):\n assert type(t1)==type(t2)\n assert isinstance(t1,(np.ndarray,DataTable,pd.DataFrame))\n assert np.shape(t1)==np.shape(t2)\n if isinstance(t1,DataTable):\n assert all([np.all(t1.c[i]==t2.c[i]) for i in range(np.shape(t1)[1])])\n else:\n assert np.all(t1==t2)\n if isinstance(t1,DataTable):\n assert t1.get_column_names()==t2.get_column_names()\n if isinstance(t1,pd.DataFrame):\n assert np.all(t1.columns==t2.columns)",
"def compare_almost_equal(self, df1, df2, name):\n\n\t\tcomp_df = pd.DataFrame()\n\t\tcomp_df['left'] = df1[name].round(SIG_DIG)\n\t\tcomp_df['right'] = df2[name].round(SIG_DIG)\n\t\tcomp_df['diff'] = comp_df['left'] - comp_df['right']\n\t\tcomp_df['diff'] = comp_df['diff'].abs().round(SIG_DIG)\n\t\t# print(comp_df.query('diff > 0.0000'))\n\t\treturn comp_df.query('diff > .0001').empty",
"def remove_intersection(cls, first_df: pd.DataFrame, second_df: pd.DataFrame):\n\n first_df = first_df[~first_df.isin(second_df)].dropna()\n cls.logger.info(f'{len(first_df)} emails left to spam after removing already spammed')\n return first_df",
"def dbdiff(old, new):\r\n # TODO: check the comparison and add the previous value(s) into the diff table\r\n dir = \"C:/Users/Volodymyr.Turbayevsk/Desktop/Docs/programming/R/indicators/zipDBCopy/\"\r\n logging.info(old + '->' + new)\r\n engine = create_engine('sqlite:///' + dir + old + '.sqlite')\r\n next_en = create_engine('sqlite:///' + dir + new + '.sqlite')\r\n req = 'select * from \"PI_IndValues\" where RecStatus=\" \"'\r\n df1 = pd.read_sql(req, engine).set_index(['SourceId', 'YrMn', 'ElementCode'])\r\n df2 = pd.read_sql(req, next_en).set_index(['SourceId', 'YrMn', 'ElementCode'])\r\n df = pd.concat([df1, df2])\r\n df = df.drop_duplicates(keep=False)\r\n d1 = df1.index.levels[1].unique().values.tolist()\r\n d2 = df2.index.levels[1].unique().values.tolist()\r\n lst = list(set(d2) - set(d1))\r\n logging.debug(lst)\r\n for idx in lst:\r\n try:\r\n df = df.drop(index=str(idx), level=1)\r\n except:\r\n pass\r\n # print(df.tail())\r\n if len(df):\r\n old = pd.read_sql('select * from diff', engine).set_index(['SourceId', 'YrMn', 'ElementCode'])\r\n # print(old.tail())\r\n old = old.append(df)\r\n old = old.drop_duplicates(keep=False)\r\n # print(old.tail())\r\n else:\r\n old = pd.read_sql('select * from diff', engine).set_index(['SourceId', 'YrMn', 'ElementCode'])\r\n\r\n logging.debug(len(df), len(old))\r\n old.to_sql('diff', next_en, if_exists='replace')",
"def test_unique_entries_neg(self):\n\n # Dataframe that we create.\n df1 = pd.DataFrame([[1, 6, 2, 3, 19],\n [4, 5, 8, 6, 30],\n [4, 5, 12, 8, 22],\n [4, 7, 9, 5, 21],\n [7, 8, 9, 12, 5]],\n columns=['A', 'B', 'C', 'D', 'E'])\n\n # Dataframe that is NOT the same as the one the function should return.\n df2 = pd.DataFrame([[1, 6, 2, 3, 19],\n [4, 5, 12, 8, 22],\n [7, 8, 9, 12, 5]],\n columns=['A', 'B', 'C', 'D', 'E'])\n\n # List that is NOT the same as the one the function should return.\n list1 = [1, 4, 4, 4, 7]\n\n # Assume\n subsets = XbrlSubsets()\n\n # Assume 1\n tn_unique_entries1 = subsets.unique_entries(df1, 'A', False)\n # Assume 2\n tn_unique_entries2 = subsets.unique_entries(df1, 'A', True)\n\n # Assert 1\n self.assertNotEqual(tn_unique_entries1.reset_index(drop=True).equals(df2.reset_index(drop=True)), True)\n # Assert 2\n self.assertNotEqual(tn_unique_entries2 == list1, True)",
"def dataframe_difference(df_from_nerc,df_from_pangea):\n if len(df_from_nerc)!=0: # nothing to insert or update if df_from_nerc is empty\n not_in_database=[\n df_from_nerc.iloc[i]['semantic_uri'] \n not in df_from_pangea['semantic_uri'].values \n for i in range(len(df_from_nerc))\n ] \n df_from_nerc['action']= np.where(not_in_database ,'insert', '') # if there are different elements we always have to insert them\n df_insert=df_from_nerc[df_from_nerc['action']=='insert']\n if len(df_insert)==0:\n df_insert=None\n ## update cond\n if len(df_from_pangea)!=0: # nothing to update if df_from_pangea(pangaea db) is empty\n in_database=np.invert(not_in_database)\n df_from_nerc_in_database=df_from_nerc[in_database] \n # create Timestamp lists with times of corresponding elements in df_from_nerc and df_from_pangea //corresponding elements chosen by semanntic_uri\n df_from_nerc_in_database_T=[\n df_from_nerc_in_database[df_from_nerc_in_database['semantic_uri']==s_uri]['datetime_last_harvest'].iloc[0] \n for s_uri in df_from_nerc_in_database['semantic_uri']\n ]\n df_from_pangea_T=[\n df_from_pangea[df_from_pangea['semantic_uri']==s_uri]['datetime_last_harvest'].iloc[0] \n for s_uri in df_from_nerc_in_database['semantic_uri']\n ]\n # create list of booleans (condition for outdated elements)\n df_from_nerc_in_database_outdated=[df_from_nerc_in_database_T[i]>df_from_pangea_T[i] for i in range(len(df_from_nerc_in_database_T))]\n df_from_nerc_in_database=df_from_nerc_in_database.assign(action= np.where(df_from_nerc_in_database_outdated ,'update', ''))\n df_update=df_from_nerc_in_database[df_from_nerc_in_database['action']=='update']\n if len(df_update)==0: # make sure not to return empty dataframes! \n df_update=None\n else:\n df_update=None\n \n return df_insert,df_update\n \n else:\n df_insert,df_update=None,None\n \n return df_insert,df_update #df_insert/df_update.shape=(n,7) only 7 initial columns!",
"def common_cols(df1, df2):\n return list(set(df1.columns) & set(df2.columns))",
"def test_compute_container_hash__df(self):\n column_1 = [1, 2, 3]\n column_2 = [4, 5, 6]\n\n df_1 = QFDataFrame(data={\"A\": column_1, \"B\": column_2}, columns=[\"A\", \"B\"])\n df_2 = QFDataFrame(data={\"A\": column_1, \"B\": column_2}, columns=[\"B\", \"A\"])\n\n self.assertNotEqual(compute_container_hash(df_1), compute_container_hash(df_2))\n self.assertEqual(compute_container_hash(df_1.sort_index(axis=1)),\n compute_container_hash(df_2.sort_index(axis=1)))",
"def _compare_tables(expected_table, actual_table):\n\n expected_num_rows = len(expected_table.index)\n actual_num_rows = len(actual_table.index)\n if expected_num_rows != actual_num_rows:\n return False\n\n expected_column_names = list(expected_table)\n actual_column_names = list(actual_table)\n if set(expected_column_names) != set(actual_column_names):\n return False\n\n for i in range(expected_num_rows):\n for this_column_name in expected_column_names:\n if this_column_name in STRING_COLUMNS:\n are_entries_equal = (\n expected_table[this_column_name].values[i] ==\n actual_table[this_column_name].values[i])\n\n elif this_column_name in INTEGER_ARRAY_COLUMNS:\n these_expected_values = expected_table[\n this_column_name].values[i]\n if isinstance(these_expected_values, numpy.ndarray):\n these_expected_values = these_expected_values.tolist()\n\n these_actual_values = actual_table[this_column_name].values[i]\n if isinstance(these_actual_values, numpy.ndarray):\n these_actual_values = these_actual_values.tolist()\n\n are_entries_equal = (\n set(these_expected_values) == set(these_actual_values))\n\n else:\n are_entries_equal = numpy.isclose(\n expected_table[this_column_name].values[i],\n actual_table[this_column_name].values[i], atol=TOLERANCE)\n\n if not are_entries_equal:\n return False\n\n return True",
"def same_rows(rows_list_1, rows_list_2):\n return sorted(rows_list_1) == sorted(rows_list_2)",
"def compare_df_units(df1_load, df2_load):\n df1 = df1_load['Unit'].drop_duplicates().tolist()\n df2 = df2_load['Unit'].drop_duplicates().tolist()\n\n # identify differnces between unit lists\n list_comp = list(set(df1) ^ set(df2))\n # if list is not empty, print warning that units are different\n if list_comp:\n log.info('Merging df with %s and df with %s units', df1, df2)",
"def diff_dashtable(data, data_previous, row_id_name=None):\n df, df_previous = pd.DataFrame(data=data), pd.DataFrame(data_previous)\n\n if row_id_name is not None:\n # If using something other than the index for row id's, set it here\n for _df in [df, df_previous]:\n\n # Why do this? Guess just to be sure?\n assert row_id_name in _df.columns\n\n _df = _df.set_index(row_id_name)\n else:\n row_id_name = \"index\"\n\n # Pandas/Numpy says NaN != NaN, so we cannot simply compare the dataframes. Instead we can either replace the\n # NaNs with some unique value (which is fastest for very small arrays, but doesn't scale well) or we can do\n # (from https://stackoverflow.com/a/19322739/5394584):\n # Mask of elements that have changed, as a dataframe. Each element indicates True if df!=df_prev\n df_mask = ~((df == df_previous) | ((df != df) & (df_previous != df_previous)))\n\n # ...and keep only rows that include a changed value\n df_mask = df_mask.loc[df_mask.any(axis=1)]\n\n changes = []\n\n # This feels like a place I could speed this up if needed\n for idx, row in df_mask.iterrows():\n row_id = row.name\n\n # Act only on columns that had a change\n row = row[row.eq(True)]\n\n for change in row.iteritems():\n\n changes.append(\n {\n row_id_name: row_id,\n \"column_name\": change[0],\n \"current_value\": df.at[row_id, change[0]],\n \"previous_value\": df_previous.at[row_id, change[0]],\n }\n )\n\n return changes",
"def test_diff(self):\n _ff_source = FlatfileDataset(_filename=os.path.join(Test_Resource_Dir, \"csv_source.csv\"),\n _has_header=True, _delimiter=\";\", _csv_dialect=\"excel-tab\",\n _quoting=\"MINIMAL\", _quotechar='\"')\n _dataset_source = _ff_source.load()\n _ff_dest = FlatfileDataset(_filename=os.path.join(Test_Resource_Dir, \"csv_dest_orig.csv\"),\n _has_header=True, _delimiter=\";\", _csv_dialect=\"excel-tab\",\n _quoting=\"MINIMAL\", _quotechar='\"')\n _dataset_dest = _ff_dest.load()\n # print(str(_dataset_dest))\n _missing_left, _missing_right, _difference, _sorted = compare(_dataset_source, _dataset_dest, [0], True)\n self.assertEqual(_missing_left,\n [[9, 7, ['7844', 'TURNER', 'SALESMAN', '7698', '1981-09-08 00:00:00', '1500', '', '30']],\n [12, 12, ['7999', 'BORJESSON', 'HACKER', '7839', '2013-01-01', '99999', '', '10']]],\n 'Missing left differs')\n self.assertEqual(_missing_right,\n [[6, 6, ['7782', 'CLARK', 'MANAGER', '7839', '1981-06-09 00:00:00', '2450', '', '10']],\n [7, 6, ['7788', 'SCOTT', 'ANALYST', '7566', '1982-12-09 00:00:00', '3000', '', '20']]],\n 'Missing right differs')\n\n self.assertEqual(_difference,\n [\n [0, 0, ['7369', 'SMITH', 'CLERK', '7902', '1980-12-17 00:00:00', '800', '', '20'],\n ['7369', 'SMITH', 'CLERK', '7902', '1980-12-17 00:00:00', '700', '', '20']],\n [1, 1, ['7499', 'ALLEN', 'SALE;SMAN', '7698', '1981-02-20 00:00:00', '1600', '300', '30'],\n ['7499', 'ALLEN', 'SALESMAN', '7698', '1981-02-20 00:00:00', '1600', '300', '30']],\n [8, 6, ['7839', 'KING', 'PRESIDENT ', '', '1981-11-17 00:00:00', '5000', '', '10'],\n ['7839', 'KING', 'PRESIDENT', '', '1981-11-17 00:00:00', '4500', '', '10']],\n [9, 8, ['7876', 'ADAMS', 'CLERK', '7788', '1983-01-12 00:00:00', '1100,5', '', '20'],\n ['7876', 'ADAMS', 'CLERK', '7788', '1983-01-12 00:00:00', '1100', '', '20']]\n ], 'Difference differs')",
"def similar(self, other):\r\n if self.rows == other.rows and self.columns == other.columns:\r\n return True\r\n else:\r\n return False",
"def compare_data(data):\n dfs = data[0]\n args = data[1]\n\n # ID the project IDs that do not exist in both data sets\n missing = []\n for value in dfs[0]['project_id_new']:\n if value not in [x for x in dfs[1]['project_id_new']]:\n missing.append(value)\n\n for value in dfs[1]['project_id_new']:\n if value not in [x for x in dfs[0]['project_id_new']] and value not in missing:\n missing.append(value)\n\n # join data on values that exist in both data sets\n df = pd.merge(dfs[0], dfs[1], how='inner', on='project_id_new')\n\n differ = {}\n\n for index, row in df.iterrows():\n project = row['project_id_new']\n if row['sum_of_amount_new_x'] != row['sum_of_amount_new_y']:\n num1, num2 = sort_nums(row['sum_of_amount_new_x'], row['sum_of_amount_new_y'])\n variance = num1 - num2\n differ.update({project: variance})\n\n return missing, differ, args",
"def _diff_modified_data(self, d1, d2, rows1, rows2):\n diff = {\n 'modified_cells': [],\n 'added_rows': [],\n 'removed_rows': []\n }\n i1 = 0\n visited_i2 = set()\n while i1 < len(rows1):\n dest_row = -1\n r1 = d1[i1]\n i2 = 0\n while i2 < len(rows2):\n if i2 not in visited_i2:\n r2 = d2[i2]\n col_l = min(len(r1), len(r2)) # col len should be same in fact\n same_cnt, diffs = 0, []\n for j in range(col_l):\n if r1[j] == r2[j]:\n same_cnt += 1\n else:\n diffs.append({\n 'src_row': rows1[i1],\n 'dest_row': rows2[i2],\n 'src_col': j,\n 'dest_col': j,\n 'src_val': r1[j],\n 'dest_val': r2[j]\n })\n if same_cnt / col_l >= self._row_modify_threshold:\n diff['modified_cells'].extend(diffs)\n dest_row = rows2[i2]\n break\n i2 += 1\n if dest_row >= 0:\n visited_i2.add(i2)\n else:\n diff['removed_rows'].append(rows1[i1])\n i1 += 1\n for i2 in range(len(rows2)):\n if i2 not in visited_i2:\n diff['added_rows'].append(rows2[i2])\n return diff",
"def _(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n from .distinct import distinct\n\n return distinct(\n pandas.merge(x, y, how=\"outer\"), __calling_env=CallingEnvs.REGULAR\n )"
]
| [
"0.7803869",
"0.6995879",
"0.6965345",
"0.6879113",
"0.67469674",
"0.65898186",
"0.6549119",
"0.65145093",
"0.6361907",
"0.63117826",
"0.629948",
"0.62905806",
"0.6156689",
"0.61474556",
"0.6043507",
"0.59707797",
"0.59524006",
"0.5915726",
"0.5877942",
"0.58593184",
"0.5821676",
"0.58175606",
"0.581733",
"0.57959306",
"0.577064",
"0.5765404",
"0.5736684",
"0.57300246",
"0.5714729",
"0.5706557"
]
| 0.73627996 | 1 |
Creates a similarity matrix for the audio features (except key and mode) of two Dataframes. | def create_similarity_score(df1,df2,similarity_score = "cosine_sim"):
assert list(df1.columns[6:]) == list(df2.columns[6:]), "dataframes need to contain the same columns"
features = list(df1.columns[6:])
features.remove('key')
features.remove('mode')
df_features1,df_features2 = df1[features],df2[features]
scaler = MinMaxScaler() #StandardScaler() not used anymore
df_features_scaled1,df_features_scaled2 = scaler.fit_transform(df_features1),scaler.fit_transform(df_features2)
if similarity_score == "linear":
linear_sim = linear_kernel(df_features_scaled1, df_features_scaled2)
return linear_sim
elif similarity_score == "cosine_sim":
cosine_sim = cosine_similarity(df_features_scaled1, df_features_scaled2)
return cosine_sim
#other measures may be implemented in the future
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compare():\n data = request.get_json()\n res_sim = audio_featurizer.compare_two_features_sets(data['features_1'], data['features_2'])\n result = dict(similarity=res_sim)\n return jsonify(result)",
"def to_similarity(num_frames, gt_data, pr_data):\n gt_data_in_frame = split_into_frames(num_frames, gt_data)\n pr_data_in_frame = split_into_frames(num_frames, pr_data)\n gt_id_subset = [None for _ in range(num_frames)]\n pr_id_subset = [None for _ in range(num_frames)]\n similarity = [None for _ in range(num_frames)]\n for t in range(num_frames):\n gt_id_subset[t] = gt_data_in_frame[t][:, TRACK_ID_COLUMN]\n pr_id_subset[t] = pr_data_in_frame[t][:, TRACK_ID_COLUMN]\n similarity[t] = util.iou_xywh(gt_data_in_frame[t][:, BBOX_COLUMNS],\n pr_data_in_frame[t][:, BBOX_COLUMNS])\n return gt_id_subset, pr_id_subset, similarity",
"def get_similarity(df):\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"bag_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n return cosine_sim",
"def join_featuresets(featureset1, featureset2):\n joined_instances = {}\n names = []\n for audio_name in featureset1.keys():\n if audio_name in featureset2:\n names.append(audio_name)\n for name in names:\n joined_vec = join_feature_vectors(featureset1[name], featureset2[name])\n joined_instances[name] = joined_vec\n return joined_instances",
"def get_movie_tag_matrix(self):\n data_frame = genre_tag.get_genre_data()\n tag_df = data_frame.reset_index()\n unique_tags = tag_df.tag.unique()\n idf_data = tag_df.groupby(['movieid'])['tag'].apply(set)\n tf_df = tag_df.groupby(['movieid'])['tag'].apply(lambda x: ','.join(x)).reset_index()\n movie_tag_dict = dict(zip(tf_df.movieid, tf_df.tag))\n tf_weight_dict = {movie: genre_tag.assign_tf_weight(tags.split(',')) for movie, tags in\n list(movie_tag_dict.items())}\n idf_weight_dict = {}\n idf_weight_dict = genre_tag.assign_idf_weight(idf_data, unique_tags)\n tag_df = genre_tag.get_model_weight(tf_weight_dict, idf_weight_dict, tag_df, 'tfidf')\n tag_df[\"total\"] = tag_df.groupby(['movieid','tag'])['value'].transform('sum')\n temp_df = tag_df[[\"moviename\", \"tag\", \"total\"]].drop_duplicates().reset_index()\n\n\n\n genre_tag_tfidf_df = temp_df.pivot_table('total', 'moviename', 'tag')\n genre_tag_tfidf_df = genre_tag_tfidf_df.fillna(0)\n genre_tag_tfidf_df.to_csv('movie_tag_matrix1d.csv', index=True, encoding='utf-8')\n return genre_tag_tfidf_df",
"def features_combine():\n\n\n\t# PROCESSING AUDIO",
"def match_matrices(first_matrix_df, second_matrix_df):\n\n first_matrix_array = first_matrix_df.to_numpy()\n second_matrix_array = second_matrix_df.to_numpy()\n\n first_matrix_rows = list(first_matrix_df.index)\n first_matrix_columns = list(first_matrix_df)\n\n second_matrix_rows = list(second_matrix_df.index)\n second_matrix_columns = list(second_matrix_df)\n\n if first_matrix_rows == second_matrix_rows and first_matrix_columns == second_matrix_columns:\n print(\"They match!\")\n\n else:\n print(\"They don't match. Re-arranging ...\")\n\n desired_permutation = []\n for item in second_matrix_columns:\n ind = first_matrix_columns.index(item) # get the correct order of image IDs from distance matrix columns\n desired_permutation.append(ind)\n\n idx = np.empty_like(desired_permutation)\n idx[desired_permutation] = np.arange(len(desired_permutation))\n second_matrix_array[:] = second_matrix_array[:, idx]\n second_matrix_array[:] = second_matrix_array[idx, :]\n\n second_matrix_df = pd.DataFrame(second_matrix_array, columns=first_matrix_columns, index=first_matrix_rows)\n\n return first_matrix_df, second_matrix_df",
"def _generate_similarity_mat(labels):\n l_mat = np.repeat(labels, len(labels), axis=1)\n l_mat_t = l_mat.T\n\n sim_mat = np.equal(l_mat, l_mat_t).astype(int)\n return sim_mat",
"def similarity_function_old(feature1, feature2):\n f1Magnitude = feature1.dot(feature1)\n f2Magnitude = feature2.dot(feature2)\n return 1 - feature1.dot(feature2) / (f1Magnitude * f2Magnitude)",
"def _get_similarity_score(self, dict1, dict2):\n try:\n majorScoreDeterminer1 = ['primaryGenreId']\n majorScoreDeterminer2 = ['genreIds']\n Score = 0 # Base Score\n for items in majorScoreDeterminer2:\n\n for item1 in self._get_app_param_info(dict1, resultCount=1, resultKey=items):\n if item1 in self._get_app_param_info(dict2, resultCount=1, resultKey=items):\n if Score == 0: # Add 50% base score for this category.\n Score += 2 * .5\n Score += 2 * .5 / len(self._get_app_param_info(dict1, resultCount=1, resultKey=items))\n\n for items in majorScoreDeterminer1:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict1, resultCount=1, resultKey=items)):\n Score += (3 / len(majorScoreDeterminer1))\n\n nameMatchScore = difflib.SequenceMatcher(None,\n self._get_app_param_info(dict1, resultCount=1,\n resultKey='trackName'),\n self._get_app_param_info(dict2, resultCount=1,\n resultKey='trackName')).ratio()\n Score += nameMatchScore\n\n minorScoreDeterminer = ['isGameCenterEnabled', 'languageCodesISO2A', 'contentAdvisoryRating', 'artistId',\n 'formattedPrice']\n\n for items in minorScoreDeterminer:\n if items == \"formattedPrice\":\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n else:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)):\n Score += (4 / (len(minorScoreDeterminer)))\n Score = round(Score, 1)\n log_str = \"id\" + str(self._get_app_param_info(dict2, resultCount=1, resultKey='trackId')) + \" - \" + str(\n self._get_app_param_info(dict2, resultCount=1, resultKey='trackName')) + \"\\tScore: \" + str(Score)\n except AssertionError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except TypeError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except:\n e = sys.exc_info()[0]\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n else:\n return log_str",
"def content_similarity(self, movie1, movie2):\n v1, v2 = self.get_tfidf(movie1), self.get_tfidf(movie2)\n return self.cosine_similarity(v1, v2)",
"def similarity(self, wSet1, wSet2, idf): \n if len(wSet1) == 0 or len(wSet2) == 0:\n return 0.0\n else:\n defaultIDF = idf['unknownToken']\n intersection = wSet1.intersection(wSet2)\n# intersection = self.synonymIntersection(wSet1, wSet2, idf)\n if len(intersection) == 0:\n return 0\n sum1 = 0\n sum2 = 0\n intersectionSum = 0\n for word in wSet1:\n sum1 += (idf.get(word, defaultIDF))**2\n for word in wSet2:\n sum2 += (idf.get(word, defaultIDF))**2\n for word in intersection:\n intersectionSum += (idf.get(word, defaultIDF))**2\n \n if sum1 == 0 or sum2 == 0:\n return 0.0\n else:\n return intersectionSum/(math.sqrt(sum1) * math.sqrt(sum2))",
"def wordSimilarityRatio(sent_1,sent_2):",
"def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))",
"def similarity_function(feature1, feature2):\n # 256 HOG, 18 HSV, 512 Encoder\n # weight color more if using the full vector\n if len(feature1) > 785:\n salient1 = feature1[256:256 + 18].copy() # be careful not to modify feature vector in place\n salient2 = feature2[256:256 + 18].copy()\n feature1 = feature1.copy()\n feature2 = feature2.copy()\n feature1[256:256 + 18] = salient1 * 10\n feature2[256:256 + 18] = salient2 * 10\n\n abs_distance = np.abs(feature1 - feature2)\n return np.sum(abs_distance)",
"def cossim(corpus):\n files = os.listdir()\n vectorizer = TfidfVectorizer()\n trsfm = vectorizer.fit_transform(corpus)\n columns = vectorizer.get_feature_names()\n df_tfidf = pd.DataFrame(trsfm.toarray(), columns = columns, index = corpus)\n out = cosine_similarity(trsfm)\n df_result = pd.DataFrame(out, columns = files, index = files)\n return df_result",
"def getSimilarityMatrixTest(testBags, trainInstances, labels):\n\n\tsimilarityMatrix = np.zeros([testBags.shape[0], trainInstances.shape[0]])\n\n\t#print(similarityMatrix.shape)\n\n\tfor bagInd in range(0, testBags.shape[0]):\n\t\t#print(labels[bagInd])\n\t\t#get the average of all instances in this test patient bag\n\t\ttestInstances = testBags[bagInd]\n\n\t\tinstanceAvg = np.mean(testInstances, axis=0)\n\n\t\t#compute distance to all other instances from this bag average\n\t\tdistance = np.abs(instanceAvg - trainInstances)\n\n\t\t#sum the distances to get 1 similarity score\n\t\tsummedDistance = np.sum(distance,axis=1)\n\t\t#print(summedDistance)\n\t\tsimilarityMatrix[bagInd,:] = summedDistance\n\n\treturn similarityMatrix",
"def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))",
"def similarity(text1, text2):\n\n clean1 = clean(text1)\n clean2 = clean(text2)\n count_meas = src.utils.nlp.prompt_similarity(clean1, clean2, vectorizer=CountVectorizer)\n tfidt_meas = src.utils.nlp.prompt_similarity(clean1, clean2, vectorizer=TfidfVectorizer)\n similarity_dict = {'count': count_meas, 'tfidf': tfidt_meas}\n return similarity_dict",
"def cosine_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = cosine_similarity(references[i, :], queries[j, :])\n return scores",
"def compare_stability_matrices(ism1, ism2): \n \n import scipy as sp\n import sklearn as sk\n\n ism1=sk.preprocessing.normalize(ism1,norm='l2')\n ism2=sk.preprocessing.normalize(ism2,norm='l2')\n distance=sp.spatial.distance.correlation(ism1.ravel(), ism2.ravel())\n similarity= 1-distance\n return similarity",
"def build_matrix(self):\n \n for p1 in self._properties: \n p1 = p1.get_vectorized_data()\n \n for p2 in self._properties:\n p2 = p2.get_vectorized_data()\n v1, v2 = self.prepare_vectors(p1, p2)\n self._similarity_matrix.append(cosine_similarity([v1],[v2]))",
"def dice_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = dice_similarity(references[i, :], queries[j, :])\n return scores",
"def get_similar_sentences(df_1, df_2):\n cv = CountVectorizer(stop_words=\"english\")\n\n cv.fit(pd.concat([df_1, df_2])[\"Sentance\"])\n\n df_1_feat = pd.DataFrame(\n cv.transform(df_1[\"Sentance\"]).toarray(), index=df_1[\"Sentance\"].index\n )\n df_2_feat = pd.DataFrame(\n cv.transform(df_2[\"Sentance\"]).toarray(), index=df_2[\"Sentance\"].index\n )\n\n score = calculate_distance(df_1_feat, df_2_feat)\n\n score_vals = score.min()\n mins = score_vals[score_vals < 0.5].index\n results = []\n for i, j in score.idxmin().loc[mins].items():\n results.append(\n (\n df_1[\"Sentance\"].loc[i],\n df_1[\"Page\"].loc[i],\n df_2[\"Sentance\"].loc[j],\n df_2[\"Page\"].loc[j],\n )\n )\n return pd.DataFrame(\n results,\n columns=[\"File 1 Sentance\", \"File 1 Page\", \"File 2 Sentance\", \"File 2 Page\"],\n )",
"def main():\n test_folders = argv[1]\n benchmark_file = argv[2]\n output_path = argv[3]\n\n method = ['n', 'a', 'a']\n bigram = ['False', 'False', 'True']\n output_file = output_path + '/' + 'method_comparison_cosine_values.csv'\n\n with open(test_folders, 'r') as f:\n test_folders = f.read()\n\n test_folders = test_folders.splitlines()\n\n with open(benchmark_file, 'r') as f:\n benchmark_file = f.read()\n\n benchmark_file = benchmark_file.splitlines()\n\n # initialize big data frame\n frames = []\n\n for k in xrange(len(benchmark_file)):\n\n test = str(test_folders[k]).replace('\"', '')\n print \"Reading test files from folder:\"\n print test\n\n benchmark = str(benchmark_file[k]).replace('\"', '')\n print \"Reading benchmark form file:\"\n print benchmark\n\n # read file paths from test documents folder\n query = sorted([os.path.join(test, f) for f in os.listdir(test) if f.endswith('.txt')])\n\n # load benchmark text file\n with open(benchmark, \"r\", encoding=\"utf-8\", errors='ignore') as doc:\n raw = doc.read()\n\n # initialize dict of dicts for data frame\n method_csv = {}\n\n for j in xrange(len(method)):\n # extract features from benchmark\n dtm = ExtractFeatures(method[j], bigram[j])\n benchmark_name = benchmark_file[k].split('\\\\')[-1]\n benchmark_features = dtm.extract_features_from_text(raw, benchmark_name)\n\n # extract terms from each text document to create a vocabulary (keeping unique terms only)\n vocabulary = sorted(set(w[1] for w in benchmark_features))\n print \"{0} features produced.\".format(str(len(vocabulary)))\n\n benchmark_dtv = DTM(vocabulary, benchmark_name, benchmark_features)\n benchmark_dtv = benchmark_dtv.compute_dtv()\n\n # load test document features\n test_features = []\n for q in query:\n dtm1 = ExtractFeatures(method[j], bigram[j])\n test_features = test_features + dtm1.extract_features_from_file(q)\n\n documents = sorted(set([d for d, w in test_features]))\n print \"{0} test documents read.\".format(str(len(documents)))\n\n print \"Computing DTM...\"\n test_dtm = DTM(vocabulary, documents, test_features)\n test_dtm = test_dtm.compute_dtm()\n\n print \"Computing cosine values...\"\n dv = {}\n for i in xrange(len(documents)):\n d = 1 - spatial.distance.cosine(benchmark_dtv[benchmark_name], test_dtm[documents[i]])\n if isnan(d):\n d = 0\n dv[documents[i]] = d\n\n this_method = \"method=\" + method[j] + '_' + \"bigram=\" + bigram[j]\n method_csv[this_method] = pd.Series(dv)\n\n print \"Saving to data frame...\"\n df = pd.DataFrame(method_csv)\n test = test.split('\\\\')[-1]\n test = test.split('.')[0]\n df['test_group'] = test\n\n frames.append(df)\n\n result = pd.concat(frames)\n\n print \"Saving results to file: \", output_file\n result.to_csv(output_file)\n\n print 'Finished computing {0} data frames'.format(str(len(test_folders)))",
"def similarity_with(self, other_text_analyzer):\n pass",
"def generate_ml_matrix(full_df, pred_ticker, feat_days):\n\n feature_tickers = [i for i in full_df.ticker.unique() if i != pred_ticker]\n dfml = full_df[full_df.ticker == pred_ticker].drop('ticker', axis=1)\n dfml.rename({'percent_change_feat': f'{pred_ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n dfml.rename({'percent_change_pred': f'{pred_ticker}_percent_change_pred'}, axis=1, inplace=True)\n for ticker in feature_tickers:\n help_df = full_df[full_df.ticker == ticker][['past_date', 'current_date', 'prediction_date', 'percent_change_feat']]\n help_df.rename({'percent_change_feat': f'{ticker}_percent_change_{feat_days}'}, axis=1, inplace=True)\n dfml = pd.merge(dfml, help_df,\n left_on=['past_date', 'current_date', 'prediction_date'],\n right_on=['past_date', 'current_date', 'prediction_date'],\n how='left')\n return dfml",
"def _calculate_similarities(self) -> pd.DataFrame:\n\n df_encoded_articles = self._db_connection.get_dataframe(\n table_name='tfidf_representation',\n schema='encoded_articles'\n ).set_index('id')\n\n # Pandas loads the array column 'encoded' as a string e.g. \"[0.0, 0.6, 0.8]\" which needs translating to an array\n encoded_representations = np.array(df_encoded_articles['encoded'].tolist())\n\n return pd.DataFrame(\n index=df_encoded_articles.index,\n columns=df_encoded_articles.index,\n data=pairwise.cosine_similarity(encoded_representations)\n )"
]
| [
"0.61106133",
"0.6102003",
"0.575739",
"0.57292634",
"0.57210094",
"0.564439",
"0.56403285",
"0.56156737",
"0.56010723",
"0.55707866",
"0.55285054",
"0.549576",
"0.547676",
"0.54722166",
"0.54632",
"0.54632",
"0.5447425",
"0.5444294",
"0.54368526",
"0.5428084",
"0.5406614",
"0.540209",
"0.5399644",
"0.53214425",
"0.5293493",
"0.5293026",
"0.52875346",
"0.52874714",
"0.52862895",
"0.5281611"
]
| 0.6558798 | 0 |
Creates a dataframe of final recommendations filtered with a "mean" song from a playlist. | def filter_with_meansong(mean_song,recommendations_df, n_recommendations = 10):
features = list(mean_song.columns[6:])
features.remove("key")
features.remove("mode")
mean_song_feat = mean_song[features].values
mean_song_scaled = MinMaxScaler().fit_transform(mean_song_feat.reshape(-1,1))
recommendations_df_scaled = MinMaxScaler().fit_transform(recommendations_df[features])
mean_song_scaled = mean_song_scaled.reshape(1,-1)
sim_mean_finrecomms = cosine_similarity(mean_song_scaled,recommendations_df_scaled)[0][:]
indices = (-sim_mean_finrecomms).argsort()[:n_recommendations]
final_recommendations = recommendations_df.iloc[indices]
return final_recommendations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def suggest_songs(source_song, songs_df, y, model):\n source_song = preprocess(source_song)\n recommendations = model.kneighbors(source_song)[1][0]\n # normalize dataset, our graph likes normalized data\n numeric_cols = songs_df.select_dtypes(include=np.number).columns\n df_num = songs_df.select_dtypes(include=np.number)\n songs_df_norm = songs_df.copy()\n songs_df_norm[numeric_cols] = (df_num - df_num.mean()) / df_num.std()\n return songs_df_norm.iloc[recommendations]",
"def get_recommendations(df,song_title, similarity_score, num_recommends = 5):\r\n indices = pd.Series(df.index, index = df['track_name']).drop_duplicates()\r\n idx = indices[song_title]\r\n sim_scores = list(enumerate(similarity_score[idx]))\r\n sim_scores = sorted(sim_scores, key = lambda x: x[1],reverse = True)\r\n top_scores = sim_scores[1:num_recommends+1]\r\n song_indices = [i[0] for i in top_scores]\r\n return df[\"track_name\"].iloc[song_indices]",
"def recommended_songs(user_input, features_df, knn_spotify, filepath):\n # making user input lower to not worry about capitalizations\n user_input = user_input.lower()\n key = the_key(filepath)\n # find what name_artist combo contains the user_input:\n selected_song = key.loc[key.str.contains(user_input)]\n # search the key df and return the song id\n song_id = selected_song.index.tolist()\n # feed the song id into the model\n song_row = features_df.loc[song_id, :]\n # model finds the NN and gives you back song id\n neigh_dist, neigh_index = knn_spotify.kneighbors(song_row)\n # random nn\n index = neigh_index.flat[0:10].tolist()\n # song_index = random.choice(index)\n # converting list to df for easier access\n recom_songs = key.iloc[index].to_frame()\n # list of songs with no ID and formatted as title\n recom_songs_list = recom_songs['name_artist'].to_list()\n for i in range(len(recom_songs_list)):\n recom_songs_list[i] = recom_songs_list[i].title()\n return recom_songs_list",
"def make_recommendation_ga(playlist):\n tracklist = []\n\n # tracknames = list(playlist['name'])\n print(playlist.head())\n\n track_features = playlist[['danceability', 'energy']]\n # 'speechiness', 'acousticness',\n # 'instrumentalness', 'liveness', 'valence']]\n\n track_features_matrix = track_features.values\n\n path, fitness = ga.genetic_algorithm(track_features_matrix, plot=False)\n\n visualization.plot_path(\n track_features,\n path,\n fitness,\n mode=\"none\",\n keep=True\n )\n\n return tracklist",
"def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def preprocessing():\n track_metadata = pd.read_csv('/home/sachet/Artificial Intelligence/song_data.csv')\n count_play = pd.read_csv('/home/sachet/Artificial Intelligence/10000.txt', sep='\\t', header=None, names=['user','song','play_count'])\n unique_track_metadata = track_metadata.groupby('song_id').max().reset_index()\n user_song_list = pd.merge(count_play, unique_track_metadata, how='left', left_on='song', right_on='song_id')\n user_song_list.rename(columns={'play_count':'listen_count'},inplace=True)\n del(user_song_list['song_id'])\n return user_song_list",
"def compute_df(playlist, song_factors, playlist_factors=None, method='ensemble'):\n playlist = playlist.str.replace('spotify:track:', '')\n playlist_set = set(playlist)\n seed_ids = []\n while len(seed_ids) < 2:\n rand = list(playlist.sample(n=1))[0]\n if rand in tid_to_idx and rand not in seed_ids:\n seed_ids.append(rand)\n playlist_set.remove(seed_ids[0])\n playlist_set.remove(seed_ids[1])\n if method == 'song':\n wrmf_output = wrmf_helpers.get_top_similar_from_tracks(\n song_factors,\n seed_ids,\n n_similar_songs=10000,\n verbose=False\n )\n elif method == 'playlist':\n wrmf_output = wrmf_helpers.get_top_similar_from_playlists(\n song_factors,\n playlist_factors,\n seed_ids,\n n_similar_songs=10000,\n n_similar_playlists=100\n )\n elif method == 'ensemble':\n wrmf_output = wrmf_helpers.get_top_similar_from_ensemble(\n song_factors,\n playlist_factors,\n seed_ids,\n n_similar_songs=10000,\n n_similar_playlists=100\n )\n else:\n raise ValueError(\"invalid method\")\n\n wrmf_output_set = set(wrmf_output)\n true_matches = playlist_set.intersection(wrmf_output_set)\n false_matches = wrmf_output_set.symmetric_difference(true_matches)\n\n X_train_ids = []\n Y_train = []\n for _ in range(min(len(true_matches), 10)):\n X_train_ids.append(true_matches.pop())\n Y_train.append(1)\n X_train_ids.append(false_matches.pop())\n Y_train.append(0)\n\n return compute_df_features(seed_ids, X_train_ids, Y_train)",
"def find_song_recommendations(access_token, tracks, target, n, params):\n track_string = '%2C'.join(tracks[:5])\n response = spotify.get_recommendations(access_token, 50, track_string, params)\n\n song_recommendation = response['tracks']\n recommendations = {song['id']: {'name': song['name']} for song in song_recommendation}\n\n moods = get_features_moods(recommendations)\n\n return order_songs(moods, target, n)",
"def get_playlist_feats(playlist_id):\r\n sourcePlaylistID = playlist_id\r\n sourcePlaylist = sp.user_playlist(username, sourcePlaylistID);\r\n tracks = sourcePlaylist[\"tracks\"];\r\n songs = tracks[\"items\"];\r\n\r\n track_ids = []\r\n track_names = []\r\n track_artists = []\r\n\r\n\r\n for i in range(0, len(songs)):\r\n if songs[i]['track']['id'] != None: # Removes the local tracks in your playlist if there is any\r\n track_ids.append(songs[i]['track']['id'])\r\n track_names.append(songs[i]['track']['name'])\r\n track_artists.append(songs[i]['track']['artists'])\r\n\r\n\r\n features = []\r\n for i in range(0,len(track_ids)):\r\n audio_features = sp.audio_features(track_ids[i])[0]\r\n track_popularity = {'popularity': sp.track(track_ids[i])['popularity']}\r\n genre = {'genres': sp.artist(track_artists[i][0]['uri'])['genres']}\r\n audio_features = dict(audio_features, **track_popularity, **genre)\r\n features.append(audio_features)\r\n\r\n\r\n playlist_df = pd.DataFrame(features, index = track_names)\r\n return playlist_df",
"def find_worth_playlist(self, part_worths, song_list):\r\n history_df = self.extracter.make_history(song_list)\r\n u_df = self.analyser.process_song_df(part_worths.values[0], history_df)\r\n return u_df",
"def recommend(self, target_playlist, remove_seen=True, is_submission=False):\n\n # Determine the known tracks\n if is_submission:\n seen = self.challenge_playlists\n else:\n seen = self.train_playlists\n\n # Remove known tracks from the prediction\n if remove_seen and target_playlist in seen.index:\n hold_ix = ~np.in1d(self.random_tracks, seen[target_playlist])\n recommended_tracks = self.random_tracks[hold_ix]\n recommended_tracks = recommended_tracks[0:500]\n else:\n recommended_tracks = self.random_tracks[0:500]\n\n # Return tids or uris\n if is_submission:\n recommended_tracks_uri = [self.tracks_df['track_uri'][t] for t in recommended_tracks]\n return recommended_tracks_uri\n else:\n return recommended_tracks",
"def get_recommendations(artists = tuple(), genres = tuple(), limit = 20, features = True, client = None):\n\n recs = client.recommendations(seed_artists = artists, seed_genres = genres, limit = limit)\n tracks = recs['tracks']\n\n # TODO: need a compose function...\n to_keep = (\n 'album_name', 'artist_name', 'name', 'popularity', 'duration_ms',\n 'explicit', 'id'\n )\n rows = list(map(row_filter(to_keep, False), map(_hoist_track_info, tracks)))\n out = pd.DataFrame(rows)\n\n track_ids = [row['id'] for row in rows]\n if features:\n extra_cols = ['uri', 'type', 'duration_ms', 'analysis_url', 'track_href']\n return out.merge(\n get_track_features(track_ids).drop(columns = extra_cols),\n on = \"id\"\n )\n\n return out",
"def get_liked_songs(self, station_id):\n\n feedbacks = self.get_station_feedbacks(station_id)\n songs = []\n for feedback in feedbacks:\n songs.append({\n \"name\": feedback[\"songTitle\"],\n \"album\": feedback[\"albumTitle\"],\n \"artist\": feedback[\"artistName\"]\n })\n return songs",
"def song_search_matching(chart_song, query):\n song_searches = song_search(query, NUM_SONG_SEARCH_RESULTS)\n if 'error' in song_searches:\n print('>>> error:', song_searches['error'])\n return\n\n songs = []\n # print(song_searches)\n for s in song_searches['songs']:\n # print('test song:', s)\n performers = ' '.join(x['name'] for x in s['performers']).lower()\n\n print('checking performers:', performers, 'vs.', chart_song.artist.lower())\n print('checking titles:', '\"' + s['title']['name'] + '\"', 'vs.', '\"' + chart_song.title + '\"')\n diff1 = fuzz.token_set_ratio(chart_song.artist.lower(), performers)\n diff2 = difflib.SequenceMatcher(\n None,\n a=s['title']['name'].lower(),\n b=chart_song.title.lower()\n ).ratio()\n print('performer score:', diff1, 'and title score:', diff2)\n if diff1 >= 65 and diff2 > 0.75:\n songs.append(s)\n print('song passed with diff performers of', diff1, 'and diff title of', diff2)\n if diff1 <= 75 or diff2 < 0.85:\n print('NOTE impartial match?', s, 'for', chart_song)\n\n return songs",
"def create_df_playlist(api_results,sp = None, append_audio = True):\r\n df = create_df_saved_songs(api_results[\"tracks\"])\r\n if append_audio == True:\r\n assert sp != None, \"sp needs to be specified for appending audio features\"\r\n df = append_audio_features(df,sp)\r\n return df",
"def classify_playlist(classifier, playlist_feature_data, playlist_data_dict):\n\n clf, clf_name = classifier\n\n playlist_features, playlist_song_ids = playlist_feature_data\n # run classifier on playlist songs\n results = clf.predict_all(playlist_features)\n liked_songs = [playlist_song_ids[i] for i in range(len(results)) if results[i] == 1]\n\n # get songs they'd like based on song ID\n if not liked_songs:\n print(\n \"The classifier \"\n + clf_name\n + \" thinks you wouldn't like any songs in \\\n the given playlist.\"\n )\n return\n\n print(\n \"The classifier \"\n + clf_name\n + \" thinks you'd like the following from the given playlist:\\n\"\n )\n\n for song in liked_songs:\n print(playlist_data_dict[song][\"metadata\"][\"track_name\"])\n\n numLiked = len(liked_songs)\n totalSongs = len(playlist_song_ids)\n matchRate = numLiked / totalSongs * 100\n\n print(\n \"The classifier \"\n + clf_name\n + \" thinks you'd dislike the following from the given playlist:\\n\"\n )\n for song in playlist_song_ids:\n if song not in liked_songs:\n print(playlist_data_dict[song][\"metadata\"][\"track_name\"])\n\n # spotify:playlist:37i9dQZF1DWXJfnUiYjUKT\n\n # spotify:playlist:37i9dQZF1DXcRXFNfZr7Tp\n\n print(f\"Thats a taste match of {matchRate}%\")\n print()\n return",
"def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df",
"def get_ratings():\n query = \"\"\"\n SELECT DISTINCT rating\n FROM film\n \"\"\"\n cursor.execute(query)\n result = cursor.fetchall()\n\n return pd.DataFrame(result, columns=['Rating'])",
"def get_artist_songs(self,genre = \"[Not Provided]\"):\n # Search for the artist and get their id\n search_artist = self.search(self.artist_name)\n\n #Prevents the stoppage in case of an Artist having zero lyrics on Genius\n if len(search_artist['response']['hits']) == 0:\n return False\n \n artist_id = str(search_artist['response']['hits'][0]['result']['primary_artist']['id'])\n print(\"ID: \" + artist_id)\n # Initialize DataFrame\n df = pd.DataFrame(columns=['title', 'url'])\n # Iterate through all the pages of the artist's songs\n more_pages = True\n page = 1\n i = 0\n while more_pages:\n # Make a request to get the songs of an artist on a given page\n request_url = self.base_url + 'artists/' + artist_id + '/songs' + '?per_page=50&page=' + str(page)\n response = requests.get(request_url, headers=self.headers).json()\n\n # For each song which the given artist is the primary_artist of the song, add the song title and\n # Genius URL to the DataFrame\n for song in response['response']['songs']:\n if str(song['primary_artist']['id']) == artist_id:\n title = song['title']\n url = song['url']\n df.loc[i] = [title, url]\n i += 1\n page += 1\n\n if response['response']['next_page'] is None:\n more_pages = False\n\n \n # Get the HTML and Song Lyrics from helper methods in the class\n df['artist'] = self.artist_name\n df['html'] = df['url'].apply(self.get_song_html)\n df['lyrics'] = df.apply(lambda row: self.get_lyrics(row.html), axis=1)\n #Uncomment to use the genre method otherwise\n #df['genre'] = df.apply(lambda row: self.get_genre(row.html), axis=1)\n df['genre'] = genre\n \n del df['url']\n del df['html']\n\n self.artist_songs = df\n\n return self.artist_songs",
"def item_mean(trainset, finalpredset):\n\n train = testset_to_sparse_matrix(trainset.build_testset())\n\n num_items, num_users = train.shape\n pred = np.zeros(train.shape)\n\n for item_index in range(num_items):\n # find the non-zero ratings for each item in the training dataset\n train_ratings = train[item_index, :]\n nonzeros_train_ratings = train_ratings[train_ratings.nonzero()]\n\n # calculate the mean if the number of elements is not 0\n if nonzeros_train_ratings.shape[0] != 0:\n item_train_mean = nonzeros_train_ratings.mean()\n pred[item_index, :] = item_train_mean\n\n finalpred_usr_idx, finalpred_movies_idx, _ = get_testset_indices(finalpredset)\n return pred[finalpred_usr_idx, finalpred_movies_idx]",
"def get_playlist_tracks(playlist):\n track_ids = [id for id in load_from_json(f\"playlist_{playlist}.json\") if id is not None]\n tracks = []\n\n for i in range(0, len(track_ids), 50):\n tracks_info = sp.tracks(track_ids[i: i+50])['tracks']\n for track in tracks_info:\n if track:\n tracks.append({\n 'id': track['id'],\n 'name': track['name'],\n 'popularity': track['popularity']\n })\n df = pd.DataFrame(tracks)\n\n file = f\"playlist_{playlist}_df.csv\"\n df.to_csv(file)\n\n return file",
"def find_worth_playlists(self, part_worths, song_lists):\r\n print(\"\\n\\n*************************\\n\\\r\n Checking Worth of playlist\\n*************************\")\r\n worth_list = []\r\n # index = 0\r\n for song_list in song_lists:\r\n worth = self.find_worth_playlist(part_worths, song_list)\r\n self.plot_test(worth)\r\n worth_list.append(worth[\"worth\"].mean()) #TODO: mean or sum?\r\n # index += 1\r\n print(\"\\nMost valuable playlist is playlist number: \", worth_list.index(max(worth_list))+1)\r\n return worth_list",
"def get_artist_audio_features(q, interactive = False, genre_delimiter = '-!!-', to_file = '', client = None):\n query = client.search(q = q, type = \"artist\")\n items = query['artists']['items']\n\n if not items:\n raise Exception(\"No artists found\")\n\n if interactive:\n print(\"Select the artist to use...\")\n print(\"\\n\".join(\"[{}]: {}\".format(ii, entry['name']) for ii, entry in enumerate(items)))\n artist_indx = int(input(\"artist number: \").strip())\n if artist_indx > len(items):\n raise IndexError(\"Selected number higher than options available\")\n artist = items[artist_indx]\n else:\n artist = items[0]\n\n # get artist genres\n artist_genres = genre_delimiter.join(artist['genres']) if genre_delimiter else None\n\n # get artist albums\n albums = get_artist_albums(artist['id'])\n albums['artist_genres'] = artist_genres\n\n # get album popularity\n album_popularity = get_album_popularity(albums.id)\n\n # get album tracks\n tracks = get_album_tracks(albums.id)\n\n # get track audio features\n features = get_track_features(tracks.id)\n\n # get track popularity\n popularity = get_track_popularity(tracks.id)\n\n album_data = albums.merge(album_popularity, 'left', 'id')\n\n track_data = tracks \\\n .drop(columns = ['type']) \\\n .merge(popularity, 'left', 'id') \\\n .merge(features.drop(columns = ['uri', 'type', 'duration_ms']), 'left', 'id')\n\n\n merged = prefix_merge(album_data, track_data, ['album_', 'track_'], how = 'left', on = 'album_id')\n\n if to_file:\n merged.to_csv(to_file)\n\n return merged",
"def plays(df):\n tp = (\n df.query('play_type in @OFFENSE_PLAY_TYPES')\n .pivot_table(index=['game_id', 'posteam'], \n columns=['play_type'], \n values=['play_id'], \n aggfunc='count',\n fill_value=0)\n .pipe(lambda x: x.set_axis([f'{b}_plays' for a, b in x.columns], axis=1, inplace=False))\n .reset_index()\n ) \n tp['tot_plays'] = tp.loc[:, [c for c in tp.columns if '_plays' in c]].sum(axis=1)\n tp['run_pct'] = tp['run_plays'] / (tp['run_plays'] + tp['pass_plays'])\n tp['pass_pct'] = tp['pass_plays'] / (tp['run_plays'] + tp['pass_plays'])\n return tp.join(time_of_possession(df), on=['game_id', 'posteam'], how='left')",
"def recommended_tracks(related_artist, base_track):\n\n # Get all albums\n albums = [album['uri'] for album in\n spotify.artist_albums(related_artist)['items']]\n\n\n recommended_tracks = []\n for album in albums:\n\n # Get the audio features for all tracks on album\n tracks = [t['uri'] for t in spotify.album_tracks(album)['items']][:20]\n album_tracks_features = spotify.audio_features(tracks)\n\n # For each track, examine audio features. If all are within 10% range\n # of the base_track, recommend that song (add to list of URLs)\n for track in album_tracks_features:\n\n if track is None:\n continue\n if (\n var(track['danceability'], base_track['danceability']) and\n var(track['energy'], base_track['energy'] ) and\n var(track['speechiness'], base_track['speechiness'] ) and\n var(track['liveness'], base_track['liveness'] ) and\n var(track['valence'], base_track['valence'] )\n ):\n recommended_tracks += ['https://open.spotify.com/track/' + track['id']]\n\n return recommended_tracks",
"def combine_results(search_results: list, topn=20) -> pd.Series:\n\n all_results = pd.concat(search_results)\n\n if len(all_results) == 0:\n return pd.Series()\n\n score_series = (\n all_results.groupby(\"item\").sum()[\"score\"] / len(search_results)\n ).sort_values(ascending=False)\n\n if topn < len(score_series):\n score_series = score_series[0:topn]\n\n return score_series",
"def avg_item_rating_predicate(observed_ratings_df, setting='eval'):\n observed_ratings_series = observed_ratings_df.loc[:, ['userId', 'movieId', 'rating']].set_index(\n ['userId', 'movieId'])\n filename = '../movielens/data/' + setting + '/avg_item_rating_obs.txt'\n handle = open(filename, \"w\")\n\n # reindex by movie ID so we can group them and calculate the mean easily\n observed_ratings_df = observed_ratings_df.reset_index()\n observed_ratings_df = observed_ratings_df.set_index('movieId')\n\n # calculate the mean within each movie\n for movieId in observed_ratings_df.index.unique():\n df_temp = observed_ratings_df[observed_ratings_df.index == movieId]\n item_avg = df_temp['rating'].mean()\n handle.write(str(movieId) + \"\\t\" + str(item_avg) + \"\\n\")",
"def make_recommendation(fav_product,model_knn=model_knn,\n data=csr_matrix(df_product_features.values),\n\n mapper=products_to_idx,\n n_recommendations=6):\n # fit\n model_knn.fit(data)\n # get input movie index\n #print('You have input product:', fav_product)\n idx = fuzzy_matching(mapper, fav_product, verbose=True)\n if idx is None:\n return []\n #print('Recommendation system start to make inference')\n #print('......\\n')\n distances, indices = model_knn.kneighbors(data[idx], n_neighbors=n_recommendations + 1)\n\n raw_recommends = sorted(list(zip(indices.squeeze().tolist(), distances.squeeze().tolist())), key=lambda x: x[1])[\n :0:-1]\n\n # get reverse mapper\n #print(raw_recommends)\n reverse_mapper = {v: k for k, v in mapper.items()}\n # print recommendations\n #print('Recommendations for {}:'.format(fav_product))\n filter = []\n for i, (idx, dist) in enumerate(raw_recommends):\n #print('{0}: {1}, with distance of {2}'.format(i + 1, reverse_mapper[idx], dist))\n filter.append(reverse_mapper[idx])\n\n\n newproduct = pd.read_sql_query(\"\"\"SELECT p.*\n ,(SELECT img.url FROM image img WHERE p.id=img.product_id limit 1) as image\n ,(SELECT cate.cate_name FROM categories cate WHERE p.category_id=cate.id) as cateName\n FROM products p where p.name IN %s \"\"\", conn,params=(tuple(filter),))\n\n return newproduct.reset_index().to_json(orient='records')",
"def youtube_trending(session=None):\n data = []\n url = 'https://www.youtube.com/feed/trending'\n soup = ph.get_soup(url, session)\n if not soup:\n ph.logger.error('No soup found for {}'.format(url))\n return data\n\n uls = soup.find_all('ul', attrs={'class': 'expanded-shelf-content-list'})\n i = 0\n for ul in uls:\n for li in ul.find_all('li'):\n result_data = {}\n try:\n result_data['link'] = 'https://www.youtube.com' + li.h3.a.attrs['href']\n result_data['title'] = li.h3.a.attrs['title']\n except AttributeError:\n continue\n else:\n result_data['position'] = i\n try:\n result_data['duration'] = _clean_youtube_duration(li.h3.span.text)\n except AttributeError:\n result_data['duration'] = ''\n try:\n result_data['user'] = li.find(attrs={'class': 'yt-lockup-byline'}).a.text\n except AttributeError:\n result_data['user'] = ''\n try:\n metadata = li.find(attrs={'class': 'yt-lockup-meta-info'})\n metadata = [x.text for x in metadata.findChildren()]\n except AttributeError:\n metadata = []\n try:\n result_data['uploaded'] = metadata[0]\n result_data['views'] = metadata[1]\n except IndexError:\n result_data['uploaded'] = ''\n result_data['views'] = ''\n\n data.append(result_data)\n i += 1\n\n return data",
"def get_songs_by_artist(artist, linesep=' \\n ', timeout=None):\n df = pd.DataFrame(columns=['Artist', 'Title'])\n url = \"https://lyrics.fandom.com/wiki/Category:Songs_by_\"+urlize(artist)\n df = parse_page_now(url,df)\n return df"
]
| [
"0.5832328",
"0.5796316",
"0.5668893",
"0.565352",
"0.55372804",
"0.5467602",
"0.5454571",
"0.5408461",
"0.5392319",
"0.5323091",
"0.52779627",
"0.5273057",
"0.5253437",
"0.5243496",
"0.51954097",
"0.5133213",
"0.5107601",
"0.51065266",
"0.50642806",
"0.50602126",
"0.5047967",
"0.5046059",
"0.5034609",
"0.50211287",
"0.4977917",
"0.49664277",
"0.4961515",
"0.49337462",
"0.4929603",
"0.4896006"
]
| 0.6814532 | 0 |
Creates a dataframe of final recommendations filtered by a given feature and whether a high or low value is wanted. | def feature_filter(df,feature, high = True):
assert feature in ["speechiness",
"acousticness",
"instrumentalness",
"liveness"], "feature must be one of the following: speechiness,acousticness,instrumentalness,liveness"
#more features may be added
x = 0.9 if high == True else 0.1
df = df[df[feature] > x] if high == True else df[df[feature] < x]
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_active_features(summary_df, slots_offered): # prev -> getActiveFeatures\n disc_cols = [col+'_Discount' for col in slots_offered]\n eco_cols = [col+'_Eco' for col in slots_offered]\n gr_cols = [col+'_Eco' for col in slots_offered]\n features = summary_df.loc[:, disc_cols+eco_cols+gr_cols]\n features = features.loc[:, features.sum(axis=0) > 0]\n for i in reversed(['NO_PURCHASE']+slots_offered):\n features.insert(0, i+'_Asc', value=1)\n return features, disc_cols, eco_cols, gr_cols",
"def mut_filter(df, rate, binary_cutoff=12):\n get_min_count = lambda s: s.value_counts().min() if len(s.unique()) > 1 else -1\n df = df[df.apply(get_min_count, axis=1) > binary_cutoff]\n cc = H.screen_feature(rate, rev_kruskal, df)\n\n fc_apply = lambda s: fc(s, rate)\n direction = df.apply(fc_apply, axis=1)\n direction.name = 'direction'\n\n cc = cc.join(direction)\n #cc = cc[cc.direction == False]\n #return cc\n\n df = df.ix[H.true_index((cc.p > .01) | (cc.direction == True))]\n df = df.dropna(axis=1)\n return df",
"def calc_recommendation(df_train: pd.DataFrame, df_target: pd.DataFrame) -> pd.DataFrame:\n lookup_series = get_lookup_series(df_train)\n df_tc = df_target.copy()\n df_tc['item_recommendations'] = df_tc.apply(lambda x: sort_by_interaction(x, lookup_series), axis=1)\n df_out = df_tc[['user_id', 'session_id', 'timestamp', 'step', 'item_recommendations']]\n return df_out",
"def feature_eng2(housing_tr, housing):\n logging.info(\"Adding features.....\")\n housing_tr[\"rooms_per_household\"] = (\n housing_tr[\"total_rooms\"] / housing_tr[\"households\"]\n )\n housing_tr[\"bedrooms_per_room\"] = (\n housing_tr[\"total_bedrooms\"] / housing_tr[\"total_rooms\"]\n )\n housing_tr[\"population_per_household\"] = (\n housing_tr[\"population\"] / housing_tr[\"households\"]\n )\n housing_cat = housing[[\"ocean_proximity\"]]\n housing_prepared = housing_tr.join(\n pd.get_dummies(housing_cat, drop_first=True)\n )\n return housing_prepared",
"def my_featurize(apartment):\n col =np.array([1, 2, 0, 0, 0, 0, 0, 0 ])\n a= pd.DataFrame(apartment[col])\n if(apartment.get('condition')== 'good'):\n col[1] =1\n else:\n if(apartment.get('condition')== 'zero condition'):\n col[1] = 0\n col[2] =apartment.get('num_rooms')\n col[3] =apartment.get('area')\n col[4] =apartment.get('num_bathrooms')\n col[5] =apartment.get('floor')\n col[6] =apartment.get('ceiling_height')\n col[7] =apartment.get('max_floor')\n\n return col, apartment['price']",
"def generate_features(df):\n df_new = pd.DataFrame()\n \n # 6 original features\n df_new['open'] = df['open']\n df_new['open_1'] = df['open'].shift(1)\n df_new['close_1'] = df['close'].shift(1)\n df_new['high_1'] = df['high'].shift(1)\n df_new['low_1'] = df['low'].shift(1)\n df_new['volume_1'] = df['volume'].shift(1)\n \n # 50 original features\n # average price\n df_new['avg_price_5'] = df['close'].rolling(window=5).mean().shift(1)\n df_new['avg_price_30'] = df['close'].rolling(window=21).mean().shift(1)\n df_new['avg_price_90'] = df['close'].rolling(window=63).mean().shift(1)\n df_new['avg_price_365'] = df['close'].rolling(window=252).mean().shift(1)\n \n # average price ratio\n df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30']\n df_new['ratio_avg_price_905_'] = df_new['avg_price_5'] / df_new['avg_price_90']\n df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365']\n df_new['ratio_avg_price_30_90'] = df_new['avg_price_30'] / df_new['avg_price_90']\n df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365']\n df_new['ratio_avg_price_90_365'] = df_new['avg_price_90'] / df_new['avg_price_365'] \n \n \n # average volume\n df_new['avg_volume_5'] = df['volume'].rolling(window=5).mean().shift(1)\n df_new['avg_volume_30'] = df['volume'].rolling(window=21).mean().shift(1)\n df_new['avg_volume_90'] = df['volume'].rolling(window=63).mean().shift(1)\n df_new['avg_volume_365'] = df['volume'].rolling(window=252).mean().shift(1)\n \n #average volume ratio\n df_new['ratio_avg_volume_5_30'] = df_new['avg_volume_5'] / df_new['avg_volume_30']\n df_new['ratio_avg_volumee_5_90'] = df_new['avg_volume_5'] / df_new['avg_volume_90'] \n df_new['ratio_avg_volume_5_365'] = df_new['avg_volume_5'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_30_90'] = df_new['avg_volume_30'] / df_new['avg_volume_90']\n df_new['ratio_avg_volume_30_365'] = df_new['avg_volume_30'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_90_365'] = df_new['avg_volume_90'] / df_new['avg_volume_365'] \n \n \n # standard deviation of prices\n df_new['std_price_5'] = df['close'].rolling(window=5).std().shift(1)\n df_new['std_price_30'] = df['close'].rolling(window=21).std().shift(1)\n df_new['std_price_90'] = df['close'].rolling(window=63).std().shift(1) \n df_new['std_price_365'] = df['close'].rolling(window=252).std().shift(1)\n \n # standard deviation ratio of prices \n df_new['ratio_std_price_5_30'] = df_new['std_price_5'] / df_new['std_price_30']\n df_new['ratio_std_price_5_90'] = df_new['std_price_5'] / df_new['std_price_90']\n df_new['ratio_std_price_5_365'] = df_new['std_price_5'] / df_new['std_price_365']\n df_new['ratio_std_price_30_90'] = df_new['std_price_30'] / df_new['std_price_90'] \n df_new['ratio_std_price_30_365'] = df_new['std_price_30'] / df_new['std_price_365'] \n df_new['ratio_std_price_90_365'] = df_new['std_price_90'] / df_new['std_price_365'] \n \n \n # standard deviation of volumes\n df_new['std_volume_5'] = df['volume'].rolling(window=5).std().shift(1)\n df_new['std_volume_30'] = df['volume'].rolling(window=21).std().shift(1)\n df_new['std_volume_90'] = df['volume'].rolling(window=63).std().shift(1)\n df_new['std_volume_365'] = df['volume'].rolling(window=252).std().shift(1)\n \n #standard deviation ratio of volumes\n df_new['ratio_std_volume_5_30'] = df_new['std_volume_5'] / df_new['std_volume_30']\n df_new['ratio_std_volume_5_90'] = df_new['std_volume_5'] / df_new['std_volume_90']\n df_new['ratio_std_volume_5_365'] = df_new['std_volume_5'] / df_new['std_volume_365'] \n df_new['ratio_std_volume_30_90'] = df_new['std_volume_30'] / df_new['std_volume_90']\n df_new['ratio_std_volume_30_365'] = df_new['std_volume_30'] / df_new['std_volume_365']\n df_new['ratio_std_volume_90_365'] = df_new['std_volume_90'] / df_new['std_volume_365'] \n \n # return\n df_new['return_1'] = ((df['close'] - df['close'].shift(1)) / df['close'].shift(1)).shift(1)\n df_new['return_5'] = ((df['close'] - df['close'].shift(5)) / df['close'].shift(5)).shift(1)\n df_new['return_30'] = ((df['close'] - df['close'].shift(21)) / df['close'].shift(21)).shift(1)\n df_new['return_90'] = ((df['close'] - df['close'].shift(63)) / df['close'].shift(63)).shift(1) \n df_new['return_365'] = ((df['close'] - df['close'].shift(252)) / df['close'].shift(252)).shift(1)\n \n #average of return\n df_new['moving_avg_5'] = df_new['return_1'].rolling(window=5).mean()\n df_new['moving_avg_30'] = df_new['return_1'].rolling(window=21).mean()\n df_new['moving_avg_90'] = df_new['return_1'].rolling(window=63).mean()\n df_new['moving_avg_365'] = df_new['return_1'].rolling(window=252).mean()\n \n # the target\n df_new['close'] = df['close']\n df_new = df_new.dropna(axis=0)\n return df_new",
"def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]",
"def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]",
"def user_input_features():\n sepal_length = st.sidebar.slider('Sepal length', 4.3, 7.9, 5.4) # the sidebar.slider magic function receive the max, min and default value in out sidebar\n sepal_width = st.sidebar.slider('Sepal width', 2.0, 4.4, 3.4)\n petal_length = st.sidebar.slider('Petal length', 1.0, 6.9, 1.3)\n petal_width = st.sidebar.slider('Petal width', 0.1, 2.5, 0.2)\n data = {'sepal_length': sepal_length,\n 'sepal_width': sepal_width,\n 'petal_length': petal_length,\n 'petal_width': petal_width}\n\n return pd.DataFrame(data, index=[0])",
"def prepare_data_features(raw=False, round_ratings=False):\n df = prepare_data(raw=False)\n print(\"prepare features\")\n df_categories = df['categories'].str.get_dummies(sep=\", \")\n df_categories = df_categories[df_categories.columns[\n df_categories.sum() > len(df)*0.01]]\n df = pd.concat([df.drop('categories', 1), df_categories], axis=1)\n print(\"end prepare features\")\n return df",
"def filter_with_meansong(mean_song,recommendations_df, n_recommendations = 10):\r\n\r\n features = list(mean_song.columns[6:])\r\n features.remove(\"key\")\r\n features.remove(\"mode\")\r\n mean_song_feat = mean_song[features].values\r\n mean_song_scaled = MinMaxScaler().fit_transform(mean_song_feat.reshape(-1,1))\r\n recommendations_df_scaled = MinMaxScaler().fit_transform(recommendations_df[features])\r\n mean_song_scaled = mean_song_scaled.reshape(1,-1)\r\n sim_mean_finrecomms = cosine_similarity(mean_song_scaled,recommendations_df_scaled)[0][:]\r\n indices = (-sim_mean_finrecomms).argsort()[:n_recommendations]\r\n final_recommendations = recommendations_df.iloc[indices]\r\n return final_recommendations",
"def create_features_using_groupby(training, entity, feature, avg=True, minimum=True, maximum=True):\n\n entity_col = 'offer_id' if entity == 'portfolio' else 'person'\n\n groupby = training.groupby(entity_col)[feature]\n\n features, col_name = [], []\n if avg:\n features.append(groupby.mean())\n col_name.append('avg_'+feature)\n if minimum:\n features.append(groupby.min())\n col_name.append('min_'+feature)\n if maximum:\n features.append(groupby.max())\n col_name.append('max_'+feature)\n\n feature_df = pd.concat(features, axis=1)\n feature_df.columns = [col + '_' + entity for col in col_name]\n\n return feature_df",
"def update_features(\r\n df:pd.DataFrame\r\n ) -> pd.DataFrame:\r\n # Check input.\r\n # Copy dataframe to avoid in place modification.\r\n df = df.copy()\r\n ########################################\r\n # Returned_asm\r\n # Interpretation of assumptions:\r\n # If DSEligible=0, then the vehicle is not eligible for a guarantee.\r\n # * And Returned=-1 (null) since we don't know whether or not it would have been returned,\r\n # but given that it wasn't eligible, it may have been likely to have Returned=1.\r\n # If DSEligible=1, then the vehicle is eligible for a guarantee.\r\n # * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.\r\n # * And if Returned=1 then the guarantee was purchased and the vehicle was returned.\r\n # * And if Returned=-1 (null) then the guarantee was not purchased.\r\n # We don't know whether or not it would have been returned,\r\n # but given that the dealer did not purchase, it may have been likely to have Returned=0.\r\n # Assume:\r\n # If Returned=-1 and DSEligible=0, then Returned_asm=1\r\n # If Returned=-1 and DSEligible=1, then Returned_asm=0\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n Returned_asm: Assume returned status to fill nulls as new feature.\r\n If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))\r\n If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))\"\"\"))\r\n df['Returned_asm'] = df['Returned']\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 0),\r\n 'Returned_asm'] = 1\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 1),\r\n 'Returned_asm'] = 0\r\n logger.info(\"Relationship between DSEligible and Returned:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned']].astype(str),\r\n index='DSEligible', columns='Returned',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between DSEligible and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned_asm']].astype(str),\r\n index='DSEligible', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between Returned and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['Returned', 'Returned_asm']].astype(str),\r\n index='Returned', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n ########################################\r\n # BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n # Make cumulative informative priors (*_num*, *_frac*) for string features.\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n Make cumulative informative priors (*_num*, *_frac*) for string features.\"\"\"))\r\n # Cumulative features require sorting by time.\r\n assert (df['SaleDate'].diff().iloc[1:] >= np.timedelta64(0, 'D')).all()\r\n for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:\r\n logger.info(\"Processing {col}\".format(col=col))\r\n ####################\r\n # Cumulative count of transactions and DSEligible:\r\n # Cumulative count of transactions (yes including current).\r\n df[col+'_numTransactions'] = df[[col]].groupby(by=col).cumcount().astype(int) + 1\r\n df[col+'_numTransactions'].fillna(value=1, inplace=True)\r\n # Cumulative count of transations that were DealShield-eligible (yes including current).\r\n df[col+'_numDSEligible1'] = df[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)\r\n df[col+'_numDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).\r\n df[col+'_fracDSEligible1DivTransactions'] = (df[col+'_numDSEligible1']/df[col+'_numTransactions'])\r\n df[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)\r\n ####################\r\n # DSEligible and Returned\r\n # Note:\r\n # * DealShield-purchased ==> Returned != -1 (not null)\r\n # * below requires\r\n # DSEligible == 0 ==> Returned == -1 (is null)\r\n # Returned != -1 (not null) ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned'] == -1).all()\r\n assert (df.loc[df['Returned']!=-1, 'DSEligible'] == 1).all()\r\n # Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['ReturnedNotNull'] = df_tmp['Returned'] != -1\r\n df[col+'_numReturnedNotNull'] = df_tmp[[col, 'ReturnedNotNull']].groupby(by=col)['ReturnedNotNull'].cumsum().astype(int)\r\n df[col+'_numReturnedNotNull'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).\r\n df[col+'_fracReturnedNotNullDivDSEligible1'] = df[col+'_numReturnedNotNull']/df[col+'_numDSEligible1']\r\n df[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['Returned1'] = df_tmp['Returned'] == 1\r\n df[col+'_numReturned1'] = df_tmp[[col, 'Returned1']].groupby(by=col)['Returned1'].cumsum().astype(int)\r\n df[col+'_numReturned1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).\r\n # Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.\r\n df[col+'_fracReturned1DivReturnedNotNull'] = df[col+'_numReturned1']/df[col+'_numReturnedNotNull']\r\n df[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)\r\n # Check that weighted average of return rate equals overall return rate.\r\n # Note: Requires groups sorted by date, ascending.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned']==1)/sum(df['Returned'] != -1),\r\n equal_nan=True)\r\n ####################\r\n # DSEligible and Returned_asm\r\n # NOTE:\r\n # * Below requires\r\n # DSEligible == 0 ==> Returned_asm == 1\r\n # Returned_asm == 0 ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned_asm'] == 1).all()\r\n assert (df.loc[df['Returned_asm']==0, 'DSEligible'] == 1).all()\r\n # Cumulative number of transactions that were assumed to be returned.\r\n df_tmp = df[[col, 'Returned_asm']].copy()\r\n df_tmp['Returnedasm1'] = df_tmp['Returned_asm'] == 1\r\n df[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum().astype(int)\r\n df[col+'_numReturnedasm1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of transactions that were assumed to be returned (0=mode).\r\n df[col+'_fracReturnedasm1DivTransactions'] = df[col+'_numReturnedasm1']/df[col+'_numTransactions']\r\n df[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)\r\n # Check that weighted average of assumed return rate equals overall assumed return rate.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned_asm']==1)/sum(df['Returned_asm'] != -1),\r\n equal_nan=True)\r\n # Note:\r\n # * Number of transactions that were DealShield-eligible and assumed to be returned ==\r\n # number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned\r\n # (numReturned1)\r\n return df",
"def get_hikedetails_by_feature(feature):\n\n if (feature == \"dog\"):\n npbyfeature = Hike.query.filter(Hike.features.like('%dogs-leash%')).all()\n\n if (feature == \"kid\"):\n npbyfeature = Hike.query.filter(Hike.features.like('%kids%') | Hike.features.like('%strollers%')).all()\n \n if (feature == \"water\"):\n npbyfeature = Hike.query.filter(Hike.features.like('%river%') | Hike.features.like('%beach%')).all()\n\n \n return npbyfeature",
"def feature_selection_rf(df, threshold, cols_to_filter, label_col = 'label', pcg = 1.0):\n print(\"[Info] Feature selection by Random Forest may take a long time\")\n\n df = df.select(cols_to_filter + [label_col]).sample(withReplacement=False, fraction=pcg)\n\n df = only_numeric_columns(df, label_col = label_col)\n\n df.cache()\n\n print \"[Info] Number of rows in the DF: \" + str(df.count())\n\n input_cols = list(set(df.columns) - set([label_col]))\n\n assembler = VectorAssembler(inputCols=input_cols, outputCol='features')\n\n numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter = param_selection(df)\n\n rf_model = RandomForestClassifier(numTrees=numTrees, maxDepth=maxDepth,\n minInstancesPerNode=minInstancesPerNode,\n maxBins=maxBins, featureSubsetStrategy='auto', minInfoGain=0.0,\n impurity='gini', subsamplingRate=subsamplingRate, labelCol = label_col)\\\n\n pipeline = Pipeline(stages=[assembler, rf_model])\n\n pipeline_model = pipeline.fit(df)\n\n from churn_nrt.src.projects_utils.models.modeler import getOrderedRelevantFeats\n\n feat_imp_nrt = getOrderedRelevantFeats(pipeline_model, input_cols, \"f\")\n\n n = threshold if(threshold >=1) else round(threshold*len(feat_imp_nrt))\n\n num_cols = [f[0] for f in feat_imp_nrt][0:n]\n\n return num_cols",
"def GetActions(features):\n featureOP = []\n featureYMore = []\n featureYAcc = []\n featureImprove = []\n count = 0\n for index, row in features[::-1].iterrows():\n \tif count > 7: # Don't recommend more than 8 actions\n \t\tbreak\n if ('poss' not in row['features']) and ('scoring' not in row['features']):\n if '_op' in row['features']:\n if (row['coef'] < 0) and ('accuracy' not in row['features']) and ('accurate' not in row['features']):\n featureOP.append(row['features'][:-3].replace('_', ' ').title())\n featureImprove.append(row['features'])\n count += 1\n else:\n if row['coef'] > 0:\n if 'accuracy' not in row['features']:\n featureYMore.append(row['features'].replace('_', ' ').title())\n featureImprove.append(row['features'])\n count += 1\n else:\n featureYAcc.append(row['features'].replace('_', ' ').title())\n featureImprove.append(row['features'])\n count += 1\n\n # Whether show 2 columns or 3\n useTwoCol = True\n if useTwoCol:\n actions = pd.DataFrame([featureYAcc + featureYMore, featureOP], index = ['Your', 'OP']).T\n\n else:\n actions = pd.DataFrame([featureYAcc, featureYMore, featureOP], index = ['YAcc', 'YMore', 'OP']).T\n nDimActions = actions.shape\n actions = actions.values.tolist()\n\t\n ## Make the actions more readable\n for ii in np.arange(nDimActions[0]):\n for jj in np.arange(nDimActions[1]):\n #print actions[ii][jj]\n if actions[ii][jj] == None:\n actions[ii][jj] = ' '\n else:\n actions[ii][jj] = actions[ii][jj].replace('Att', 'Attempt').replace('Obox', 'Outside the Penalty Box').replace('Ibox', 'Inside the Penalty Box').replace('Total ', '').replace('Fwd', 'Forward').replace('18Yardplus', 'Outside the Penalty Box').replace('18Yard', 'Inside the Penalty Box')\n if 'Accuracy' in actions[ii][jj]:\n actions[ii][jj] = actions[ii][jj][9:] + ' Accuracy'\n else:\n actions[ii][jj] = '# of ' + actions[ii][jj]\n if (\"alls\" not in actions[ii][jj]) and (\"Penalty Box\" not in actions[ii][jj]):\n if \"Won\" in actions[ii][jj]:\n actions[ii][jj] = actions[ii][jj][:-4] + 's Won'\n elif actions[ii][jj][-2:] != 'ss':\n actions[ii][jj] = actions[ii][jj] + 's'\n else:\n actions[ii][jj] = actions[ii][jj] + 'es'\n #print actions\n return actions, featureImprove",
"def getFeature(df, start, end):\n\n return [df[start:end].mean(),\n df[start:end].std(),\n df[start:end].skew(),\n df[start:end].kurt(),\n df[start:end].quantile(0.25),\n df[start:end].quantile(0.75),\n df[start:end].quantile(0.90),\n df[start:end].quantile(0.15),\n df[start:end].median(),\n df[start:end].mad(),\n df[start:end].sem(),\n df[start:end].var(),\n df[start:end].autocorr(1),\n df[start:end].autocorr(2),\n df[start:end].autocorr(3),\n df[start:end].autocorr(4),\n df[start:end].autocorr(5),\n np.append(df[start:end].mode(), -1)[0]\n ]",
"def select_important_features(data):\n\n selected_attributes = ['text_cleaned', 'description_cleaned', 'sidebar_color', 'link_color', 'name']\n filtered_data = pd.DataFrame(data, columns=selected_attributes)\n output_data = data['gender']\n\n # Converting text strings into a matrix of word token counts\n cv = CountVectorizer()\n inputString = sp.hstack(filtered_data.apply(lambda attribute: cv.fit_transform(attribute)))\n\n # Encodes class labels from 0 to Num_of_classes-1\n le = LabelEncoder()\n outputString = le.fit_transform(output_data)\n\n # Splitting the data such that 66% of the data is assigned as training data and the rest as the test data set.\n input_train, input_test, output_train, output_test = train_test_split(inputString, outputString, train_size=0.66)\n return input_train, output_train, input_test, output_test",
"def select_features(self):\r\n \r\n features_list = list(self.feed_data.columns.values)\r\n features_list.remove(\"min_time\")\r\n thisrace = self.config.race_to_predict\r\n\r\n #if never ran race before, don't include these variables in feature\r\n #selection, they're just 0's anyway\r\n if self.config.first_time_running_race == True:\r\n unuseable_columns = [('min_time', thisrace),('std', thisrace),('num_races', thisrace),\r\n ('rainfall', thisrace),\r\n ('temp', thisrace),\r\n ('wind', thisrace),\r\n ('metersup', thisrace), \r\n 'sex_W']\r\n else:\r\n #drop this column...probs should have removed it earlier. \r\n unuseable_columns = ['sex_W']\r\n #print(features_list)\r\n for element in unuseable_columns:\r\n features_list.remove(element)\r\n data_with_all_feats = self.feed_data.drop(unuseable_columns,axis=1)\r\n colstodrop = features_list\r\n thiscols = []\r\n data_with_current_feats = data_with_all_feats.drop(features_list,axis=1)\r\n checkfit=100.0\r\n scores = []\r\n dropped_cols = []\r\n loopgain =True\r\n #mymod = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=10,\r\n # min_samples_split = 25, criterion='mse')\r\n thisloopfeatures_list = features_list\r\n curcols = data_with_current_feats.columns\r\n countgain=0\r\n #print(\"cc\",curcols)\r\n while loopgain == True:\r\n thisloopscore=100.0\r\n for fet in thisloopfeatures_list:\r\n data_with_current_feats[fet] = data_with_all_feats[fet]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=15,\r\n min_samples_split = 12, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n if ((thisloopscore - oobs) > 0.0):\r\n thisloopscore = oobs\r\n fetwinner = fet\r\n data_with_current_feats.drop(fet,axis=1,inplace=True)\r\n etrain.drop(fet,axis=1,inplace=True)\r\n\r\n data_with_current_feats[fetwinner] = data_with_all_feats[fetwinner]\r\n etrain=data_with_current_feats.sample(frac=0.8,random_state=200)\r\n etest=data_with_current_feats.drop(etrain.index)\r\n y = etrain.pop('min_time')\r\n ytest = etest.pop('min_time')\r\n #print(y)\r\n model = RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n model.fit(etrain,y)\r\n\r\n PRED = model.predict(etrain)\r\n predscore = self.mean_absolute_percentage_error(y,PRED)#= r2_score(y,PRED)\r\n #print(fetwinner,predscore)\r\n oobs = self.mean_absolute_percentage_error(y,model.oob_prediction_)\r\n scores.append(oobs)\r\n #print(fetwinner,\"~\",oobs)\r\n thisloopfeatures_list.remove(fetwinner)\r\n if ((checkfit-oobs)>0.0001):\r\n checkfit = oobs\r\n curcols = data_with_current_feats.columns\r\n #print(curcols)\r\n else:\r\n break\r\n\r\n\r\n self.final_df = self.feed_data[data_with_current_feats.columns]\r\n self.Xtrain=self.final_df.sample(frac=0.8,random_state=200)\r\n self.Xtest=self.final_df.drop(self.Xtrain.index)#\r\n self.ytrain = self.Xtrain.pop('min_time')\r\n self.ytest = self.Xtest.pop('min_time')\r\n self.model= RandomForestRegressor(n_estimators=80, oob_score = True, max_depth=30,\r\n min_samples_split = 12,min_samples_leaf =7, criterion='mse')\r\n self.model.fit(self.Xtrain,self.ytrain)\r\n #print(y)\r\n return",
"def get_features(df, target=[], meta=[]):\n ############################################################\n # Type conversion\n ############################################################\n\n types = df[df.columns[~df.columns.isin(target+meta)]].dtypes\n for col_name, col_type in types.iteritems():\n if col_type == bool:\n df[col_name] = df[col_name].astype(float)\n\n ############################################################\n # Get features by type\n ############################################################\n \n features_cat = filter(lambda x: not np.issubdtype(x[1], np.number), types.iteritems())\n features_cat = sorted(list(map(lambda x: x[0], features_cat)))\n # target and meta should have already been removed. but just to be sure\n features_num = sorted(list(set(types.index) - set(features_cat) - set(target) - set(meta))) \n selected_features = df.columns.to_list()\n features_idx = dict(zip(selected_features, range(len(selected_features))))\n \n return selected_features, features_num, features_cat, features_idx",
"def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]",
"def feature_selection(cls, tbl, thresh=-1):\n\n numerical_columns = [col for col in tbl.columns if col not in [\"F21\", \"F20\", \"F54\", \"Name\"]]\n X = tbl[numerical_columns[:-1]].values\n y = tbl[numerical_columns[-1]].values\n\n n = X.shape[1]\n slist = np.zeros((n, 3))\n slist[:, -1] = 1\n\n # identify relevant features\n slist[:, 0] = cls._c_correlation(X, y) # compute 'C-correlation'\n idx = slist[:, 0].argsort()[::-1]\n slist = slist[idx,]\n slist[:, 1] = idx\n if thresh < 0:\n thresh = np.median(slist[-1, 0])\n\n slist = slist[slist[:, 0] > thresh, :] # desc. ordered per SU[i,c]\n\n \"Identify redundant features among the relevant ones\"\n cache = {}\n m = len(slist)\n p_su, p, p_idx = cls._get_first_element(slist)\n for i in xrange(m):\n q_su, q, q_idx = cls._get_next_element(slist, p_idx)\n if q:\n # p, q = int(p), int(q)\n while q:\n if (p, q) in cache:\n pq_su = cache[(p, q)]\n else:\n pq_su = cls._symmetrical_uncertainty(X[:, int(p)], X[:, int(q)])\n cache[(p, q)] = pq_su\n\n if pq_su >= q_su:\n slist = cls._remove_element(slist, q_idx)\n q_su, q, q_idx = cls._get_next_element(slist, q_idx)\n\n p_su, p, p_idx = cls._get_next_element(slist, p_idx)\n if not p_idx:\n break\n\n sbest = slist[slist[:, 2] > 0, :2]\n selected_features = [int(ff) for ff in sbest[:, 1]]\n selected_features = [numerical_columns[i] for i in selected_features]\n selected_features.insert(0, \"Name\")\n selected_features.append(\"category\")\n new_tbl = tbl[selected_features]\n\n return new_tbl",
"def filter_row(row: OrderedDict, sampling_features: set) -> bool:\n\n return row['feature_of_interest'] in sampling_features",
"def ite_best(train_df, test_df, features, outcome, treatment):\n train_t_df, train_c_df = split_treatment_control(train_df, treatment)\n\n by_feat_t = train_t_df.groupby(features)[outcome].mean()\n by_feat_c = train_c_df.groupby(features)[outcome].mean()\n by_feat = by_feat_t - by_feat_c\n\n return test_df[features].join(by_feat, on=features)[outcome].values",
"def remove_rows_without_feature(df, feature):\n return df[np.isfinite(df[feature])]",
"def main():\n housing = pd.read_csv(\"Data/train_original.csv\")\n housing[\"TotalSF\"] = (\n housing[\"TotalBsmtSF\"] + housing[\"1stFlrSF\"] + housing[\"2ndFlrSF\"]\n )\n training_features, testing_features, training_target, testing_target = impute_dummify_and_split(\n housing, drop_target=False\n )\n\n p_values = [\n (c, pearsonr(training_features[\"SalePrice\"], training_features[c])[1])\n for c in training_features.columns\n ]\n\n p_value_limits = [0.05]\n\n result = []\n ps_and_cols = {}\n\n for p_value_limit in p_value_limits:\n\n high_ps = list(\n map(lambda t: t[0], sorted(p_values, key=lambda t1: t1[1])[:15])\n )\n\n print(training_features[high_ps].corr())\n\n columns = [p[0] for p in p_values if p[1] < p_value_limit]\n\n training_features_restricted = training_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n testing_features_restricted = testing_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n for model in (\n linear_model.Lasso(alpha=2.1),\n linear_model.Ridge(alpha=2.1),\n ):\n\n model.fit(training_features_restricted, training_target)\n\n train_score = model.score(\n training_features_restricted, training_target\n )\n\n test_score = model.score(\n testing_features_restricted, testing_target\n )\n\n name = str(model).split(\"(\")[0]\n\n result = result + [\n (\n \"_2_restrict_features\",\n name,\n \"p value limit: {:.3f}, alpha: 2.1\".format(p_value_limit),\n train_score,\n test_score,\n )\n ]\n\n print(ps_and_cols)\n return training_features[high_ps].corr()",
"def get_used_features():\n print(\"filtering used features\")\n # is this user an active user? if any of the features is non-zero take this user as an active user\n used_features = {}\n for row in df_train.values:\n row = list(row)\n id = row.pop(0)\n active = [c[0] for c in zip(df_train.columns[1:], row) if (c[0] in item_cols and c[1] > 0)]\n if id not in used_features:\n used_features[id] = []\n used_features[id].extend(active)\n return used_features",
"def get_filter(feature, value):\r\n return {\r\n 'gender': {'user__profile__gender': value},\r\n 'level_of_education': {'user__profile__level_of_education': value},\r\n }[feature]",
"def make_recommendation(fav_product,model_knn=model_knn,\n data=csr_matrix(df_product_features.values),\n\n mapper=products_to_idx,\n n_recommendations=6):\n # fit\n model_knn.fit(data)\n # get input movie index\n #print('You have input product:', fav_product)\n idx = fuzzy_matching(mapper, fav_product, verbose=True)\n if idx is None:\n return []\n #print('Recommendation system start to make inference')\n #print('......\\n')\n distances, indices = model_knn.kneighbors(data[idx], n_neighbors=n_recommendations + 1)\n\n raw_recommends = sorted(list(zip(indices.squeeze().tolist(), distances.squeeze().tolist())), key=lambda x: x[1])[\n :0:-1]\n\n # get reverse mapper\n #print(raw_recommends)\n reverse_mapper = {v: k for k, v in mapper.items()}\n # print recommendations\n #print('Recommendations for {}:'.format(fav_product))\n filter = []\n for i, (idx, dist) in enumerate(raw_recommends):\n #print('{0}: {1}, with distance of {2}'.format(i + 1, reverse_mapper[idx], dist))\n filter.append(reverse_mapper[idx])\n\n\n newproduct = pd.read_sql_query(\"\"\"SELECT p.*\n ,(SELECT img.url FROM image img WHERE p.id=img.product_id limit 1) as image\n ,(SELECT cate.cate_name FROM categories cate WHERE p.category_id=cate.id) as cateName\n FROM products p where p.name IN %s \"\"\", conn,params=(tuple(filter),))\n\n return newproduct.reset_index().to_json(orient='records')",
"def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected"
]
| [
"0.58443344",
"0.5821084",
"0.5789811",
"0.57810664",
"0.57256854",
"0.56218714",
"0.56099474",
"0.56072265",
"0.5601768",
"0.5588304",
"0.5585128",
"0.5567165",
"0.55657196",
"0.5565339",
"0.55552304",
"0.55424345",
"0.5533143",
"0.54980946",
"0.5436976",
"0.5403029",
"0.5400791",
"0.53976893",
"0.53850293",
"0.538101",
"0.53763646",
"0.5373584",
"0.5350518",
"0.534596",
"0.53346556",
"0.53073615"
]
| 0.7660822 | 0 |
Gives top num_recommends recommendations for a song based on a similarity_score or matrix | def get_recommendations(df,song_title, similarity_score, num_recommends = 5):
indices = pd.Series(df.index, index = df['track_name']).drop_duplicates()
idx = indices[song_title]
sim_scores = list(enumerate(similarity_score[idx]))
sim_scores = sorted(sim_scores, key = lambda x: x[1],reverse = True)
top_scores = sim_scores[1:num_recommends+1]
song_indices = [i[0] for i in top_scores]
return df["track_name"].iloc[song_indices] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def top_5_similar_2(list_string, my_nlp=nlp1, model_type=my_model, doc_topic=my_doc_topic):\n vec = my_nlp.transform(list_string)\n vtrans = model_type.transform(vec)\n array_5 = pairwise_distances(vtrans, doc_topic, metric='cosine').argsort()[0][0:5]\n # result_df = df_reviews[['game_link']].iloc[array_5]\n return df_reviews[['game']].iloc[array_5]\n # return(\"test\")\n return result_df",
"def content_model(movie_list,top_n=10):\n # Initializing the empty list of recommended movies\n recommended_movies = []\n data = data_preprocessing(27000)\n # Instantiating and generating the count matrix\n count_vec = CountVectorizer()\n count_matrix = count_vec.fit_transform(data['keyWords'])\n indices = pd.Series(data['title'])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n # Getting the index of the movie that matches the title\n idx_1 = indices[indices == movie_list[0]].index[0]\n idx_2 = indices[indices == movie_list[1]].index[0]\n idx_3 = indices[indices == movie_list[2]].index[0]\n # Creating a Series with the similarity scores in descending order\n rank_1 = cosine_sim[idx_1]\n rank_2 = cosine_sim[idx_2]\n rank_3 = cosine_sim[idx_3]\n # Calculating the scores\n score_series_1 = pd.Series(rank_1).sort_values(ascending = False)\n score_series_2 = pd.Series(rank_2).sort_values(ascending = False)\n score_series_3 = pd.Series(rank_3).sort_values(ascending = False)\n # Getting the indexes of the 10 most similar movies\n listings = score_series_1.append(score_series_1).append(score_series_3).sort_values(ascending = False)\n\n # Store movie names\n recommended_movies = []\n # Appending the names of movies\n top_50_indexes = list(listings.iloc[1:50].index)\n # Removing chosen movies\n top_indexes = np.setdiff1d(top_50_indexes,[idx_1,idx_2,idx_3])\n for i in top_indexes[:top_n]:\n recommended_movies.append(list(movies['title'])[i])\n return recommended_movies",
"def top_matches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]",
"def top_by_num_of_ratings(self, n):\n return top_movies",
"def recommend(self, u):\n\n sims = {} #similarities\n recommendation = \"\"\n topScore = None\n start = time.time()\n for movie_id, rating in enumerate(u):\n if rating != 0:\n sims[movie_id] = {}\n for r_id, movie in enumerate(self.ratings):\n sims[movie_id][r_id] = self.distance(movie,self.ratings[movie_id])\n # print time.time() - start, \"distance time\"\n\n start = time.time()\n for i, movieRating in enumerate(self.ratings):\n iPrediction = 0\n for movieName in self.ratedMovieList:\n j = self.titlesOnly.index(movieName)\n iPrediction += sims[j][i]*1.0 * self.userRatingVector[j]\n if topScore is None or iPrediction > topScore:\n movie = self.titlesOnly[i]\n if movie not in self.ratedMovieList and movie not in self.recommendedMovies:\n # print(\"prediction score for %s is %.5f\" % (movie, iPrediction))\n topScore = iPrediction\n recommendation = movie\n # print time.time() - start, \"recommendation time\"\n self.recommendedMovies.append(recommendation)\n\n articlePattern = re.match('(.*), (the|a|an|el|la)', recommendation)\n if articlePattern is not None:\n recommendation = articlePattern.group(2) + \" \" + articlePattern.group(1)\n\n return recommendation",
"def recommend(self, item_ids, topN=5):\n positives = [str(x) for x in item_ids if str(x) in self.model.wv]\n recommendations = self.model.wv.most_similar(positives, topn=topN)\n recommendations = [self.recid2title[self.iid2recid[int(x[0])]] for x in recommendations]\n history = [self.recid2title[self.iid2recid[int(y)]] for y in item_ids]\n return {'recommendations': recommendations, 'history': history}",
"def get_recommendations(prefs, person, similarity=sim_pearson):\n totals = {}\n similarity_sums = {}\n\n for other in prefs:\n if other == person:\n continue\n\n sim = similarity(prefs, person, other)\n\n if sim <= 0:\n continue\n\n for item in prefs[other]:\n if item not in prefs[person] or prefs[person][item] == 0:\n totals.setdefault(item, 0)\n totals[item] += prefs[other][item] * sim\n similarity_sums.setdefault(item, 0)\n similarity_sums[item] += sim\n\n # Normalized list\n rankings = [(total / similarity_sums[item], item)\n for item, total in totals.items()]\n\n # Returns normalized score, not an r that would be between -1 and 1\n rankings.sort()\n rankings.reverse()\n return rankings",
"def test_most_similar_topn(self):\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=5)), 5)\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=10)), 10)\n\n predicted = self.vectors.most_similar('dog.n.01', topn=None)\n self.assertEqual(len(predicted), len(self.vectors.vocab) - 1)\n self.assertEqual(predicted[-1][0], 'gallant_fox.n.01')",
"def top_by_ratings(self, n, metric=average):\n return top_movies",
"def content_model(movie_list,top_n):\r\n # Vectorise content for each movie in list_title\r\n input_matrix = vectoriser.transform(movies[movies['title'].isin(movie_list)].content)\r\n \r\n # Initiate list to store indeces of input movies\r\n m_idx = []\r\n \r\n for title in movie_list:\r\n for id in movies.movieId[movies['title']==title]:\r\n m_idx.append(indices[id])\r\n \r\n # Create list of similarities between each input movie and every other movie in the dataset \r\n sim = list(enumerate(cosine_similarity(content_matrix,\r\n input_matrix))) \r\n\r\n # Sort the list by the average similarity of the movies\r\n sim_scores = sorted(sim, key=lambda x: x[1].mean(), reverse=True)\r\n \r\n # Select the top-k values for recommendation\r\n sim_scores = sim_scores[0:20]\r\n\r\n # Select the indices of the top-k movies\r\n movie_indices = [i[0] for i in sim_scores if i[0] not in m_idx]\r\n \r\n # Return a list of the movie titles\r\n return movies.iloc[movie_indices].title[:top_n]",
"def recommend(title, cosine_sim, indices, df):\n\n recommended_papers = {}\n\n # getting the index of the movie that matches the title\n try:\n idx = indices[indices == title].index[0]\n except:\n # return dummy values if indices doesn't behave (?)\n return {}, 0.0\n\n # creating a Series with the similarity scores in descending order\n score_series = pd.Series(cosine_sim[idx]).sort_values(ascending = False)\n #print(type(score_series))\n #print(len(score_series))\n\n # getting the indexes of the 5 most similar papers\n top_5_indexes = list(score_series.iloc[1:5].index)\n average_score = mean(score_series)\n #print('avg:', average_score)\n #highest_score = float(score_series.iloc[1])\n #print(score_series.iloc[1:5].index)\n #print(score_series.iloc[1:5])\n\n # populating the list with the titles of the best 5 matching papers\n for i in top_5_indexes:\n recommended_papers[list(df.index)[i]] = df['Link'][i]\n\n return recommended_papers, average_score",
"def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)",
"def get_similar_products(user_input_emb, ref_catalog, n = 5):\r\n sim_list = []\r\n for i in range(len(ref_catalog)):\r\n desc_id = ref_catalog.iloc[i]['id']\r\n emb = ref_catalog.iloc[i]['desc_embedding']\r\n cos_sim = compute_cosine_sim(emb,user_input_emb)\r\n sim_list.append((desc_id, cos_sim))\r\n top_n = sorted(sim_list, key= lambda tup: tup[1], reverse = True)[:n]\r\n return top_n",
"def show_recommendation_pool(self, top_n=None):\n i = 0\n if top_n is None:\n top_n = self.number_of_recommendations\n\n for _, rdata in self.recommendation_pool.items():\n print(\"\\n{R.movie_id} - {R.title} - {R.genres}\".format(\n R=rdata['movie_obj']))\n\n if 'title_similarity' in rdata:\n print(\" Title Similarity: {} - ({})\".format(\n rdata['title_similarity'], rdata['movie_obj'].title))\n\n if 'genres_similarity' in rdata:\n print(\" Genres Similarity: {} - ({})\".format(\n rdata['genres_similarity'], rdata['movie_obj'].genres))\n\n if 'tags_similarity' in rdata:\n print(\" Tags Similarity: {} - ({})\".format(\n rdata['tags_similarity'], rdata['tags']))\n\n if 'final_similarity' in rdata:\n print(\" -> Final Similarity: {}\".format(\n rdata['final_similarity']))\n\n i += 1\n if top_n and i >= top_n:\n break",
"def top_n_satisfy2(content, n):\n #print(n)\n sum_satisfy = 0.0\n query_num = 0.0\n for qid in content:\n label_sort = []\n score = []\n all_info = content[qid]\n num_label1 = 0\n for info in all_info:\n if info[0] > 0:\n num_label1 += 1\n label_sort.append([info[0], info[1]])\n label_sort.sort(key=take_second, reverse=True)\n satisfy = 0.0\n count = 0\n size = len(label_sort)\n for i in range(min(n, size)):\n cur_label = label_sort[i][0]\n if cur_label > 0:\n satisfy += 1\n cur_satisfy = satisfy / min(n, num_label1)\n sum_satisfy += cur_satisfy\n query_num += 1\n return sum_satisfy / query_num",
"def topMatches(prefs, person, n=5, similarity=sim_pearson):\n all_matches = [(similarity(prefs, person, other), other) \n for other in prefs.keys()\n if person != other]\n all_matches.sort()\n all_matches.reverse()\n return all_matches[0:n]",
"def build_recommendations(sc, myRatings, model):\n #myRatedMovieIds = set([x[1] for x in myRatings])\n uid = get_uid_from_ratings(myRatings)\n #print \"uid:\", uid\n myRatedMovieIds = set([x[1] for x in myRatings.collect()])\n #print \"myRatedMovieIds:\", myRatedMovieIds\n candidates = sc.parallelize([m for m in movies if m not in myRatedMovieIds]).cache()\n #print candidates\n predictions = model.predictAll(candidates.map(lambda x: (uid, x))).collect()\n #print predictions\n recommendations = sorted(predictions, key = lambda x: x.product)\n return recommendations",
"def recommendations_similarity(aData, needed_param, user, products, n = 10, simfunc = sim_cosine):\n table_CF = preproc.make_CF_table(aData, needed_param)\n sim_measures_table = simfunc(table_CF) \n \n scores = sim_measures_table.dot(table_CF)\n mean_scores = np.array(np.sum(sim_measures_table, axis=1).T)\n mean_scores = pd.DataFrame(np.tile(mean_scores, (scores.shape[1],1))).T\n predicted_ratings = np.divide(scores, np.absolute(mean_scores))\n \n ratings = predicted_ratings[user].order(ascending= False)\n ratings = ratings[0:n]\n \n return (ratings.index[ratings.index.isin(products)==False])",
"def test_predictTopSong():\n\n user_recommend = svd.predictTopSong(algo_svd, testset, targetSongidList)\n\n # check type\n assert isinstance(user_recommend, pd.DataFrame)\n\n # check shape\n assert user_recommend.shape == (10, 6)\n\n # check sorted\n assert user_recommend.loc[0]['score'] == max(user_recommend.score)",
"def getRecommendations(prefs,person,similarity=sim_pearson):\n weighted_similarities = dict((\n (other, similarity(prefs, person, other)) \n for other in prefs.keys() if other != person))\n # Eliminate critics with negative correlation (I'm not sure why\n # this is a good idea)\n for critic, sim in weighted_similarities.items():\n if sim <= 0:\n del weighted_similarities[critic]\n sum_ratings = defaultdict(int) # int() initializes to 0\n sum_weights = defaultdict(int)\n for other, weight in weighted_similarities.items():\n for movie, rating in prefs[other].items():\n sum_ratings[movie] += rating * weight\n sum_weights[movie] += weight\n recommendations = [(sum_ratings[movie]/sum_weights[movie], movie)\n for movie in sum_ratings.keys()\n if movie not in prefs[person].keys()]\n recommendations.sort()\n recommendations.reverse()\n return recommendations",
"def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]",
"def get_top_k(weight_query, doc_dict, k):\n \n # find fraction of all inlinks to doc_id\n total_num_inlinks = 0\n frac_inlinks = {}\n with open(num_inlinks_file) as f:\n doc_ids_set = doc_dict.keys()\n for i, line in enumerate(f):\n total_num_inlinks += int(line.strip())\n if i in doc_ids_set:\n frac_inlinks[i] = int(line.strip())\n \n\n for doc_id, frac in frac_inlinks.items():\n frac_inlinks[doc_id] = frac / total_num_inlinks\n\n # calculate score\n # score = alpha * frac_inlinks + (1 - alpha) * cosine similarity\n alpha = 0.5\n score = {}\n for doc_id, weight_doc in doc_dict.items():\n cosine_score = 0\n for term, weight in weight_doc.items():\n cosine_score += weight_doc[term] * weight_query[term]\n score[doc_id] = alpha * frac_inlinks[doc_id] + (1 - alpha) * cosine_score\n \n # sort based on score, high to low\n sorted_score = OrderedDict( sorted(score.items(), key=lambda t: t[1], reverse=True) )\n \n # type(top_k) == {doc_id: [score, \"doc_text\"]}\n # note top_k is not sorted based on score!\n top_k = {}\n num_results = 0\n for doc_id, score in sorted_score.items():\n num_results += 1\n top_k[doc_id] = [score, \"\"]\n if num_results == k:\n break\n return top_k",
"def top10_odds_ratio(likelihoods, vocab, classes):\r\n results = []\r\n for word in vocab:\r\n highestOddsRatio = None\r\n for c1 in classes:\r\n for c2 in classes:\r\n # Skip self TODO: Is this right?\r\n # if c1 == c2:\r\n # continue\r\n oddsRatio = odds_ratio(likelihoods, c1, c2, word)\r\n if oddsRatio > highestOddsRatio or highestOddsRatio == None:\r\n highestOddsRatio = oddsRatio\r\n results.append((word, highestOddsRatio))\r\n # Sort and return top 10\r\n return map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10]",
"def personalization(prediction, n):\n # prediction\n # n top n recommendation\n\n top_n = get_top_n(prediction, n)\n\n rec_dict = {}\n for uid, user_ratings in top_n.items():\n rec_dict[uid] = [iid for (iid, _) in user_ratings]\n\n rec_user_ls = [pred[0] for pred in prediction]\n rec_item_ls = [pred[1] for pred in prediction]\n\n unique_rec_user_ls = np.unique(rec_user_ls)\n unique_rec_item_ls = np.unique(rec_item_ls)\n\n # assign each item with index number\n unique_rec_item_dict = {item: ind for ind,\n item in enumerate(unique_rec_item_ls)}\n\n n_unique_rec_user = len(unique_rec_user_ls)\n n_unique_rec_item = len(unique_rec_item_ls)\n\n # recommended user item matrix\n rec_matrix = np.zeros(shape=(n_unique_rec_user, n_unique_rec_item))\n\n # represent recommended item for each user as binary 0/1\n for user in range(n_unique_rec_user):\n # get userid\n user_id = unique_rec_user_ls[user]\n # get rec item list\n item_ls = rec_dict[user_id]\n\n for item_id in item_ls:\n # get item index\n item = unique_rec_item_dict[item_id]\n rec_matrix[user, item] = 1\n\n # calculate cosine similarity matrix across all user recommendations\n similarity = cosine_similarity(X=rec_matrix, dense_output=False)\n # calculate average of upper triangle of cosine matrix\n upper_right = np.triu_indices(similarity.shape[0], k=1)\n # personalization is 1-average cosine similarity\n score = 1 - np.mean(similarity[upper_right])\n return score",
"def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]",
"def collab_model(movie_list,top_n=10):\r\n\r\n movie_ids = get_ids(movie_list)\r\n train = ratings_df\r\n empty = pd.DataFrame()\r\n for i in movie_ids:\r\n ds = train[train['movieId']==i]\r\n empty = pd.concat([empty, ds])\r\n best_rating = empty[empty['rating']>=3]\r\n count_ratings = best_rating.groupby('userId').count()\r\n sorted_df = count_ratings.sort_values('movieId', ascending=False)\r\n user_id = sorted_df.index[0]\r\n \r\n metric = 'cosine'\r\n \r\n similarities=[]\r\n indices=[]\r\n model_knn = NearestNeighbors(metric = metric, algorithm = 'brute') \r\n model_knn.fit(dataset)\r\n\r\n distances, indices = model_knn.kneighbors(dataset.iloc[user_id-1, :].values.reshape(1, -1), n_neighbors = 20)\r\n similarities = 1-distances.flatten()\r\n for i in range(0, len(indices.flatten())):\r\n if indices.flatten()[i]+1 == user_id:\r\n continue;\r\n train = train.astype({\"movieId\": str})\r\n Movie_user = train.groupby(by = 'userId')['movieId'].apply(lambda x:','.join(x))\r\n b = indices.squeeze().tolist()\r\n d = Movie_user[Movie_user.index.isin(b)]\r\n l = ','.join(d.values)\r\n Movie_seen_by_similar_users = l.split(',')\r\n Movies_under_consideration = list(map(int, Movie_seen_by_similar_users))\r\n df = pd.DataFrame({'movieId':Movies_under_consideration})\r\n top_10_recommendation = df[0:top_n+1]\r\n Movie_Name = top_10_recommendation.merge(movies_df, how='inner', on='movieId')\r\n recommended_movies = Movie_Name.title.values.tolist()\r\n \r\n\r\n return recommended_movies",
"def collab_model(movie_list, top_n=10):\r\n user_ids = pred_movies(movie_list)\r\n\r\n temp = get_user_movies(ratings, user_ids)\r\n\r\n movie_ids = []\r\n for i in movie_list:\r\n \"\"\" get movieId from title\"\"\"\r\n movieID = movies_df[movies_df['title'] == i]['movieId'].values[0]\r\n movie_ids.append(movieID)\r\n\r\n # Add new user with ratings to userlist\r\n new_user_row1 = {'userId': 1000000, 'movieId': movie_ids[0], 'rating': 5.0, 'title': movie_list[0]}\r\n new_user_row2 = {'userId': 1000000, 'movieId': movie_ids[1], 'rating': 5.0, 'title': movie_list[1]}\r\n new_user_row3 = {'userId': 1000000, 'movieId': movie_ids[2], 'rating': 5.0, 'title': movie_list[2]}\r\n temp = temp.append([new_user_row1, new_user_row2, new_user_row3], ignore_index=True)\r\n\r\n # create pivot table\r\n user_ratings = temp.pivot_table(index='userId', columns='title', values='rating').fillna(0)\r\n # compute correlations from pivot table\r\n item_similarity_df = user_ratings.corr(method='pearson')\r\n\r\n def get_similar_movies(movie_name, user_rating=5):\r\n \"\"\"\r\n :param movie_name:\r\n :param user_rating: optional\r\n :return: list of similar movies\r\n \"\"\"\r\n similar_score = item_similarity_df[movie_name] * user_rating\r\n similar_score = similar_score.sort_values(ascending=False)\r\n return similar_score\r\n\r\n similar_movies = pd.DataFrame()\r\n\r\n # get similar movies of fav movies\r\n for movie in movie_list:\r\n similar_movies = similar_movies.append(get_similar_movies(movie, 5), ignore_index=True)\r\n\r\n recommended_movies = []\r\n # sum similarities together append highest values\r\n for i in similar_movies.sum().sort_values(ascending=False).index:\r\n if i in movie_list:\r\n pass\r\n else:\r\n recommended_movies.append(i)\r\n\r\n return recommended_movies[:10]",
"def recommend_from_scores(scores: List[List[float]], n: int) -> List[List[int]]:\n\n def top_idx(scores):\n return np.array(scores).argsort()[::-1][:n]\n\n return [top_idx(s) for s in scores]",
"def graphlab_recommendations(aData, user, needed_param, n = 10, cv_ratio = 0.7):\n # change the data into SFrame and the user data into SArray\n import preprocessing\n aData.rename(columns = {needed_param['user_id']:'user_id', needed_param['item_id']: 'item_id', \n needed_param['ratings']: 'ratings'}, inplace = True)\n aData = gl.SFrame(aData)\n train, test= preprocessing.graphlab_split_data(aData, cv_ratio)\n user = gl.SArray([user])\n \n # make models\n methods = ['matrix_factorization', 'linear_model', 'item_similarity', 'popularity', 'item_means']\n sim_type = ['jaccard', 'cosine', 'pearson']\n models = []\n for aMethod in methods:\n print aMethod\n if(aMethod != 'item_similarity'):\n model = gl.recommender.create(observation_data = train, user_id = 'user_id', \n item_id = 'item_id', target = 'ratings', method = aMethod)\n models.append(model)\n else:\n for aSim in sim_type:\n print aSim\n sim_model = gl.recommender.create(observation_data = train, user_id = 'user_id', \n item_id = 'item_id', target = 'ratings', method = aMethod, similarity_type = aSim)\n models.append(sim_model)\n \n # generate results for models as well as the rmse results\n recommended = []\n rmse = []\n for model in models:\n aResult = model.recommend(users = user, k = n)\n recommended.append(aResult)\n aRMSE = gl.evaluation.rmse(test['ratings'], model.predict(test))\n rmse.append(aRMSE)\n \n # create DataFrame\n df = pd.DataFrame({'models':models, 'recommended':recommended, 'rmse':rmse})\n # find the model that gives k least square errors\n df = df.sort('rmse', ascending = True).iloc[0:2]\n df.index = range(0,2)\n \n colnames = df['recommended'].loc[0].column_names()\n results = pd.DataFrame(columns = colnames)\n \n for aResult in df['recommended']:\n aResult = aResult.to_dataframe()\n results = results.append(aResult)\n \n results = results.sort('score', ascending = False)\n\n return results.sort('score', ascending=False), 'item_id'",
"def recommend(self, user):\n K = self.n_sim_user\n N = self.n_rec_movie\n rank = dict()\n watched_movies = self.trainset[user]\n\n # v=similar user, wuv=similarity factor\n for v, wuv in sorted(self.user_sim_mat[user].items(),\n key=itemgetter(1), reverse=True)[0:K]:\n for movie in self.trainset[v]:\n if movie in watched_movies:\n continue\n # predict the user's \"interest\" for each movie\n rank.setdefault(movie, 0)\n rank[movie] += wuv\n # return the N best movies\n return sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N]"
]
| [
"0.69403774",
"0.6708428",
"0.66361177",
"0.66208243",
"0.66061056",
"0.64581853",
"0.64331615",
"0.6422014",
"0.6418242",
"0.63883686",
"0.63695747",
"0.6348622",
"0.63314646",
"0.6330419",
"0.6316701",
"0.6284902",
"0.6264998",
"0.6257891",
"0.62459356",
"0.6227139",
"0.6223714",
"0.6223226",
"0.6212176",
"0.62096363",
"0.617238",
"0.61676055",
"0.616624",
"0.6158258",
"0.61549455",
"0.61371756"
]
| 0.74992377 | 0 |
Return +1, 0, or 1 if the unit cube is above, below, or intersecting the plane. | def UnitCubeTest(P):
above = 0
below = 0
for (a,b,c) in [(0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0), (1,1,1)]:
s = P.test(a, b, c)
if s > 0:
above = 1
elif s < 0:
below = 1
return above - below | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hitTest( a, b ):\n r = a.radius + b.radius\n x = abs( a.x - b.x )\n y = abs( a.y - b.y )\n if x <= r and y <= r and x*x + y*y <= r*r:\n return 1\n return 0",
"def isInPlane(self, p) -> bool:\n # Testing for zero is done with math.isclose, to avoid rounding/floating point errors.\n # Since we are testing near zero, abs_tol is set to 1e-09\n return math.isclose(\n math.fabs(\n dot(\n self.normal(),\n Vector.connect(p.x, p.y, p.z, self.p0.x, self.p0.y, self.p0.z),\n )\n ),\n 0,\n rel_tol=1e-09,\n abs_tol=1e-09,\n )",
"def winner(self):\n if (self.player):\n return (0 == reduce(lambda x, y: x+y, self.board.p1vec))\n else:\n return (0 == reduce(lambda x, y: x+y, self.board.p2vec))",
"def ccw(p1: np.ndarray, p2: np.ndarray, p3: np.ndarray) -> int:\n dx1 = p2[0] - p1[0]\n dy1 = p2[1] - p1[1]\n dx2 = p3[0] - p1[0]\n dy2 = p3[1] - p1[1]\n\n dx1dy2 = dx1 * dy2\n dy1dx2 = dy1 * dx2\n\n if dx1dy2 > dy1dx2:\n return 1\n if dx1dy2 < dy1dx2:\n return -1\n if dx1 * dx2 < 0 or dy1 * dy2 < 0:\n return -1\n if dx1 * dx1 + dy1 * dy1 < dx2 * dx2 + dy2 * dy2:\n return 1\n\n return 0",
"def check(self):\n\n if (sum(self.game_state) == 0):\n return 1\n elif (self.game_state[-1] >=1 ):\n return -1\n else:\n return 0",
"def _in_huc(shply, huc_shply):\n if huc_shply.contains(shply):\n return 2\n elif huc_shply.intersects(shply):\n return 1\n else:\n return 0",
"def get_at(self,x,y):\n\t\tif ( not self._validate(x,y )):\n\t\t\treturn\n\n\t\tif (self.Grid[y] & (1 << x)) != 0:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0",
"def in_side(self, side):\n if side == \"U\":\n return self.z() == 1\n if side == \"D\":\n return self.z() == -1\n if side == \"F\":\n return self.y() == -1\n if side == \"B\":\n return self.y() == 1\n if side == \"R\":\n return self.x() == 1\n if side == \"L\":\n return self.x() == -1",
"def evaluate(self, point):\n result = self.__evaluate(point)\n return -1 if result < 0 else 1",
"def count_oob(cube):\n out_range = (cube > 4).any(1) | (cube < -4).any(1)\n out_range = out_range.sum() / cube.shape[0]\n return out_range",
"def inRect(p,rect,dilation):\n if p[0]<rect[0]-dilation: return 0\n if p[1]<rect[1]-dilation: return 0\n if p[0]>rect[2]+dilation: return 0\n if p[1]>rect[3]+dilation: return 0\n return 1",
"def check(self):\n if (sum(self.state) == 0):\n return -1\n elif (self.state[-1] >= 1):\n return 1\n else:\n return 0",
"def goal_test(c):\n return c == GOAL_CUBE",
"def outcome(self):\n if self.grid[0][0] == self.grid[1][0] == self.grid[2][0] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[0][1] == self.grid[1][1] == self.grid[2][1] and self.grid[0][1] != 0:\n return self.grid[0][1]\n if self.grid[0][2] == self.grid[1][2] == self.grid[2][2] and self.grid[0][2] != 0:\n return self.grid[0][2]\n if self.grid[0][0] == self.grid[0][1] == self.grid[0][2] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[1][0] == self.grid[1][1] == self.grid[1][2] and self.grid[1][0] != 0:\n return self.grid[1][0]\n if self.grid[2][0] == self.grid[2][1] == self.grid[2][2] and self.grid[2][0] != 0:\n return self.grid[2][0]\n if self.grid[0][0] == self.grid[1][1] == self.grid[2][2] and self.grid[0][0] != 0:\n return self.grid[0][0]\n if self.grid[0][2] == self.grid[1][1] == self.grid[2][0] and self.grid[0][2] != 0:\n return self.grid[0][2]\n return 0",
"def winner(self):\n state = self._state['visible']\n if state['reserve'][0] < 1:\n return 1\n elif state['reserve'][1] < 1:\n return 0\n return -1",
"def evaluate(self) :\n if self.inStates[0].getState() == self.inStates[1].getState(): return 0\n return 1",
"def calcCondition(edge, x1, y1, x2, y2, left, right, top, bottom):\n\n stat1 = insideWindow(edge, x1, y1, left, right, top, bottom)\n stat2 = insideWindow(edge, x2, y2, left, right, top, bottom);\n\n if(not stat1 and stat2):\n return 1;\n if(stat1 and stat2):\n return 2;\n if(stat1 and not stat2):\n return 3;\n if(not stat1 and not stat2):\n return 4;\n return 0 #never executed",
"def intersect(self, plane, epsilon=0.00001):\r\n den = np.dot(self.direction, plane.normal)\r\n if math.fabs(den) < epsilon:\r\n return None\r\n\r\n result = (-plane.distance - np.dot(plane.normal, self.origin)) / den\r\n\r\n if result < 0.0:\r\n if result < -epsilon:\r\n return None\r\n result = 0.0\r\n return result",
"def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1",
"def intersection(self, ray):\n d_proj = self._normal.dot(ray.d)\n if abs(d_proj) < bounds.too_small:\n return -1.0\n s_proj = (self._origin - ray.o).dot(self._normal)\n if d_proj * s_proj < 0.0:\n # ray going away from plane\n return -1.0\n else:\n return s_proj / d_proj",
"def ccw(a, b, c):\n return (c.y - a.y) * (b.x - a.x) > (b.y - a.y) * (c.x - a.x)",
"def _is_visible(self, point):\n return point[0] > 0 and point[0] < 1 and point[1] > 0 and point[1] < 1",
"def point_orientation(a, b, c):\n return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y) >= 0",
"def _indicies_in_plane(self, plane=\"xy\", tol=1.0):\n control_indx = {\"xy\": 2, \"xz\": 1, \"yz\": 0}\n\n if plane not in [\"xy\", \"xz\", \"yz\"]:\n raise ValueError(\"Plane has to by xy, xz or yz!\")\n \n pos = self.surface_atoms.get_positions()\n com = np.mean(pos, axis=0)\n pos -= com\n in_plane = []\n for i in range(pos.shape[0]):\n ctrl_pos = pos[i, control_indx[plane]]\n if ctrl_pos > -tol and ctrl_pos < tol:\n in_plane.append(i)\n return in_plane",
"def isinside(z1, z2, zc, R):\n irv = 0\n Lover2 = np.abs(z2 - z1) / 2\n bigz = (2 * zc - (z1 + z2)) * np.abs(z2 - z1) / (2 * (z2 - z1))\n if (np.abs(bigz.imag) < R):\n d = np.sqrt(R ** 2 - bigz.imag ** 2)\n xa = bigz.real - d\n xb = bigz.real + d\n if ((xa < Lover2) and (xb > - Lover2)):\n irv = 1\n return irv",
"def surface_area_of_cube(side):\n return side",
"def in_cylinder(x, y, z, min_z, max_z, max_r):\n r = np.sqrt(x ** 2 + y ** 2)\n m = r < max_r\n m = m & (z < max_z)\n m = m & (z >= min_z)\n return m",
"def inCamp(self):\n return (((self.myTeam==1) and (self.ballPos.x <= self.width/2))\n | ((self.myTeam==2) and (self.ballPos.x >= self.width/2)))",
"def determine_round_winner(self):\n\n if self.getX() + self.SIZE[0] < 0:\n # point for player two\n return 2\n elif self.getX() > Configuration.windowWidth:\n # point for player one\n return 1",
"def evaluate(self, x, y, collision):\n\n if collision:\n return -1.0, False\n\n ax = x - self._x0\n ay = y - self._y0\n bx = self._x1 - self._x0\n by = self._y1 - self._y0\n\n da = ax * ax + ay * ay\n dot = ax * bx + ay * by\n\n if dot <= 0.0:\n return 0.0, False\n\n db = bx * bx + by * by\n p = dot * dot / db\n\n if p > db:\n return 0.0, False\n\n if da - p > self._rsquared:\n return 0.0, False\n\n return 1.0, True"
]
| [
"0.6006958",
"0.5900927",
"0.5759193",
"0.5713522",
"0.5660144",
"0.56555474",
"0.56256115",
"0.56209445",
"0.5612326",
"0.5611536",
"0.5605023",
"0.55960155",
"0.55666924",
"0.55626243",
"0.5540222",
"0.55394",
"0.55347407",
"0.55283034",
"0.5525503",
"0.55229247",
"0.55088705",
"0.5504836",
"0.5501232",
"0.5500875",
"0.5470349",
"0.54599494",
"0.54588544",
"0.54285717",
"0.5425404",
"0.54234105"
]
| 0.6731846 | 0 |
Function to sample n numbers using parameters in l | def random_sample(l, n):
assert len(l) > 1
assert n
if l[0] == "int":
if len(l) == 3:
return np.random.randint(l[1], l[2] + 1, n, dtype="int32")
elif len(l) == 2:
return np.random.randint(l[1], l[1] + 1, n, dtype="int32")
else:
return np.random.uniform(l[1], l[2], n)
elif l[0] == "cat":
return np.random.choice(l[1:], size=n)
elif l[0] == "float":
if len(l) == 2:
return np.random.uniform(l[1], l[1], n)
else:
return np.random.uniform(l[1], l[2], n)
elif l[0] == "loguniform_int":
if len(l) == 2:
return np.power(
10, (np.random.randint(np.log10(l[1]), np.log10(l[1]), n)), dtype=float
)
else:
return np.power(
10,
(np.random.randint(np.log10(l[1]), np.log10(l[2]) + 1, n)),
dtype=float,
)
elif l[0] == "loguniform":
if len(l) == 2:
return 10 ** (np.random.uniform(np.log10(l[1]), np.log10(l[1]), n))
else:
return 10 ** (np.random.uniform(np.log10(l[1]), np.log10(l[2]) + 1), n)
else:
raise ValueError("Something went wrong") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample(self, n, include=True):\n return [self(t / n) for t in range(n + int(include))]",
"def sample(self, n):\n ret = []\n for i in range(n):\n for j in range(n):\n ret.append(self.a + (((0.5 + i) / n) * self.l1) + (((0.5 + j) / n) * self.l2))\n return ret",
"def random_sampling(elements, n):\r\n import random\r\n return [random.choice(elements) for i in range(n)]",
"def sample(self, n):\n raise NotImplementedError",
"def sample(self, n=1):\n raise NotImplementedError",
"def Sample(n=6):\n t = [random.normalvariate(0.0, 1.0) for i in range(n)]\n t.sort()\n return t",
"def choose_m_n(li,min,max):\n n_items = random.randrange(min,max+1)\n if n_items == 0:\n return [ ]\n sample=random.sample(li,n_items) # Should it be sorted?\n return sample",
"def _sample(self, n=1):\n return [self[i] for i in np.random.choice(self.length, n, replace=False)]",
"def func(args):\n l, r, n = args\n import random\n\n return sum([random.randint(l, r) for _ in range(n)]) / n",
"def Samples(n=6, m=1000):\n t = [Sample(n) for i in range(m)]\n return t",
"def generate_samples(self, n_samples):",
"def generate_samples(self, n_samples):",
"def sample_n_unique(sampling_f, n):\n res = []\n while len(res) < n:\n candidate = sampling_f()\n if candidate not in res:\n res.append(candidate)\n return res",
"def getRandomList(n):\n lyst = list()\n for count in range (n):\n lyst.append(random.randint(1, n))\n return lyst",
"def easy_sample(self, num, **kwargs):\n return self.preprocess(self.sample(num, **kwargs), **kwargs)",
"def sample(self,f,N,p=100):\n return [f(x) for x in np.linspace(0,N,p)]",
"def sampling(n):\n\n def _sample(x):\n if n > x.shape[0]:\n # generate dups\n count = n // x.shape[0] + 1\n x = pd.concat([x] * count)\n return x.sample(n=n)\n else:\n return x.sample(n=n)\n\n return _sample",
"def samplingWithReplacement(m):\n return [ random.randrange(m) for i in range(m) ]",
"def gen_rand(l):\n w = int(l / 2)\n\n min = (1 << (w - 1)) | 1\n max = (1 << w) - 1\n\n n = random.randrange(min, max) | 1\n\n return n",
"def random_sampling(total_nums: int, samples_needed: int, start_num=1):\n target = np.arange(start_num, total_nums + 1)\n np.random.shuffle(target)\n\n return target[:samples_needed]",
"def simplestRandom(n):\n # do something \"perlin noise like\" - with various frequency scales\n level1 = numpy.random.randint(0,4,size=4)\n level2 = numpy.random.randn(10)\n level3 = numpy.random.randn(50) * .5\n # make splines for each\n u1 = INTERP.UnivariateSpline(numpy.linspace(0,1,4) ,level1,s=0)\n u2 = INTERP.UnivariateSpline(numpy.linspace(0,1,10),level2,s=0)\n u3 = INTERP.UnivariateSpline(numpy.linspace(0,1,50),level3,s=0)\n # build the signal on the range 0..1 - then use linspace to sample it\n samples = numpy.linspace(0,1,n)\n return numpy.array([u1(u)+u2(u)+u3(u) for u in samples])",
"def sample(n, seed= 0):\n data = list(range(n))\n while True:\n np.random.seed(seed)\n np.random.shuffle(data)\n yield from data",
"def random_sampling(predictions, number):\n return random.sample(range(len(predictions)), number)",
"def choose_ordered_m_n(li,min,max):\n n_items = random.randrange(min,max+1)\n if n_items == 0:\n return [ ]\n indices = list(range(len(li)))\n sample=random.sample(indices,n_items) # Should it be sorted?\n return [li[i] for i in sorted(sample)]",
"def sample_batch(memory, n):\n batch = rnd.sample(memory, n) # List containing tuples\n return batch",
"def small_sample(num):\n sample = [0] * num\n for i in range(num):\n u = random.randint(0, 3)\n if u == 3:\n sample[i] = -1\n if u == 2:\n sample[i] = 1\n return sample",
"def fast_sample(self, n, items_per=None):\n item_pool = np.arange(self.items.shape[0]) #self.items.copy()\n samples = []\n remaining = n\n samples_per_shuffle = int(item_pool.shape[0]/items_per)\n while remaining > 0:\n random.shuffle(item_pool)\n for i in range(0, min(samples_per_shuffle, remaining) * items_per, items_per):\n samples.append(item_pool[i:i+items_per])\n remaining -= 1\n return np.array(samples)",
"def lhsample(N, bounds):\n \n D = len(bounds)\n sample = vstack(arange(a,b,(b-a)/N) for (a,b) in bounds).T + rand(N,D) / N \n for d in xrange(D): \n shuffle(sample[:,d])\n return sample",
"def sample(self, n, items_per=1, weight=False):\n if weight:\n item_count = self.item_count()\n p = self.n_per_item()\n p = p / p.sum()\n return np.array([np.random.choice(item_count, size=items_per, replace=False, p=p) for _ in range(n)])\n else:\n return self.fast_sample(n, items_per)",
"def sample_from_list(l, probs, max_n=None):\n assert len(l) == len(probs), 'given list l and probs must have same length'\n if max_n is None:\n max_n = len(l)\n sum_probs = sum(probs)\n if sum_probs == 0:\n return []\n probs_ = np.array(probs) / sum_probs\n # we draw max n or |probs_ > 0|\n # noinspection PyTypeChecker\n n = min(max_n, np.sum(probs_ > 0))\n # use idx approach as direct passing to np.random.choice would convert\n # items of l into str\n # noinspection PyUnresolvedReferences\n res = [\n l[idx] for idx in np.random.choice(len(l), n, replace=False, p=probs_)\n ]\n return res"
]
| [
"0.74199224",
"0.7406717",
"0.72772586",
"0.72482455",
"0.71520865",
"0.71470773",
"0.71248704",
"0.69935256",
"0.6898351",
"0.68867844",
"0.6823116",
"0.6823116",
"0.6820484",
"0.67658585",
"0.67542326",
"0.67369694",
"0.66240114",
"0.6598932",
"0.65872574",
"0.65802604",
"0.65547705",
"0.6545067",
"0.6534113",
"0.6505859",
"0.6467375",
"0.64564586",
"0.6423675",
"0.6412382",
"0.6395107",
"0.6382141"
]
| 0.76497614 | 0 |
use ArmSettings to set misty's arm positions | async def _move_arms_via_settings(self, increment=False, *settings: ArmSettings):
if increment:
act_vals = await self.get_actuator_values(Actuator.left_arm, Actuator.right_arm)
settings = [s.increment(act_vals) for s in settings]
for s in settings:
_actuator_cache.update_from_settings(s)
payload = {k: v for arm in settings for k, v in arm.json.items()}
if payload:
return await self._post('arms/set', payload) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_armed(self, arm):\n pin = 0 if arm else 1\n self.mcp_misc[0].output(pin, MCP23008.LOW)\n self.mcp_misc[0].output(pin, MCP23008.HIGH)\n self.mcp_misc[0].output(pin, MCP23008.LOW)",
"def set_right_arm(self, joint_vals):\n joint_nums = []\n for i in range(1, 8):\n err_code, joint = self.sim.simxGetObjectHandle(self.clientID, \"Baxter_rightArm_joint\" + str(i),\n self.sim.simx_opmode_blocking)\n joint_nums.append(joint)\n for i in range(7):\n self.sim.simxSetJointTargetPosition(self.clientID, joint_nums[i], joint_vals[i],\n self.sim.simx_opmode_oneshot)",
"def useArm(self,**kwargs):\n if not self.mm.modes[self.mm.cur_mode].find(\"left\") != -1:\n rospy.loginfo(\"Using the left arm\")\n self.use_arm[\"left\"] = True\n elif not self.mm.modes[self.mm.cur_mode].find(\"right\") != -1:\n rospy.loginfo(\"Using the right arm\")\n self.use_arm[\"right\"] = True\n else:\n self.baxter.no()\n return\n self.mm.confirm()\n self.baxter.yes()",
"def add_arm(cls, arm):\n cls.ARM = arm",
"def arm(self):\n pass",
"def set_arm(self, track, xclip, ident, value = None):\n if track in self.song().tracks and track.can_be_armed:\n if value in KEYWORDS:\n track.arm = KEYWORDS[value]\n else:\n track.arm = not(track.arm)",
"def __init__(self, settings, name, linear_scaling_factor=0.1, angular_scaling_factor=0.1):\n super(ArmControlInterpreter, self).__init__(rate=0.033)\n\n self.settings = settings\n\n self.linear_scaling_factor = linear_scaling_factor\n self.angular_scaling_factor = angular_scaling_factor\n\n self.arm_velocity_client = actionlib.SimpleActionClient('/arm_controller/velocity', rose_arm_controller_msgs.msg.set_velocityAction)\n self.arm_gripper_client = actionlib.SimpleActionClient('/arm_controller/gripper_width', rose_arm_controller_msgs.msg.set_gripper_widthAction)\n\n self.arm_name = name\n\n self.velocity_goal = set_velocityGoal()\n self.gripper_goal = set_gripper_widthGoal()\n\n self.gripper_width = ArmControlInterpreter.gripper_open\n self.open_close_toggle = self.settings[\"open_close\"]",
"def selectArms(self,**kwargs):\n self.use_arm[\"left\"] = False\n self.use_arm[\"right\"] = False\n entries = {}\n entries[\"Use Left\"] = self.useArm\n entries[\"Use Right\"] = self.useArm\n self.mm.addGenericMenu(\"selectArms\",\"main\",\"Select the arm you want to use\", entries)\n self.mm.loadMenu(\"selectArms\")",
"def choose_arm(self):\n raise NotImplementedError",
"def choose_arm(self,context):\n raise NotImplementedError",
"def choose_arm(self,context):\n raise NotImplementedError",
"def _select_arm(self):\n pass",
"def changeRingSetting(self):\n #Input code to accommodate function of Ring setting",
"async def setArm(self, usercode, mode: str):\n usercode = str(usercode)\n await self.director.sendPostRequest(\n \"/api/v1/items/{}/commands\".format(self.item_id),\n \"PARTITION_ARM\",\n {\"ArmType\": mode, \"UserCode\": usercode},\n )",
"def copy_arm(self, real_arm):\n arm = real_arm.__class__()\n arm.dt = real_arm.dt\n\n # reset arm position to x_0\n arm.reset(q = real_arm.q, dq = real_arm.dq)\n\n return arm, np.hstack([real_arm.q, real_arm.dq])",
"def initRexarm(self):\n self.sm.set_next_state('initialize_rexarm')\n self.ui.SliderFrame.setEnabled(False)\n self.ui.chk_directcontrol.setChecked(False)",
"def antenna_set(self):",
"def lower_arm(self):\r\n # ---------------------------------------------------------------------\r\n # Done: 8. Implement this method; it is a ONE-LINER!\r\n # ---------------------------------------------------------------------\r\n if self.is_calibrated == False:\r\n self.calibrate_arm()\r\n self.move_arm_to_position(0)#America\r",
"def _reset_arm(self, reset_angles):\n self._actuator_comms['UR5'].actuator_buffer.write(self._stopj_packet)\n time.sleep(0.5)\n\n self._reset_packet[1:1 + 6][self._joint_indices] = reset_angles\n self._actuator_comms['UR5'].actuator_buffer.write(self._reset_packet)\n time.sleep(max(self._reset_packet[-2] * 1.5, 2.0))",
"def armLocation(self,length, theta, position = [0,0]):\n #print \"Angle:\",theta\n \n width = 300\n dx = 125\n #dy = 40\n bumpx = 150\n bumpy = length/2\n #width = 300\n \n #dx = 175\n dy = 170\n \n #p1 = (position[0]+dx*cos(theta)+dy*cos(pi/2 - theta),position[1]-dx*sin(theta)+dy*sin(pi/2 - theta))\n #p2 = (p1[0]-length*sin(theta),p1[1]-length*cos(theta))\n #p3 = (p2[0]-width*cos(theta),p2[1]+width*sin(theta))\n #p4 = (p3[0]+length*sin(theta),p3[1]+length*cos(theta))\n\n p1 = (position[0]+dx*cos(theta)+dy*cos(pi/2 - theta),position[1]-dx*sin(theta)+dy*sin(pi/2 - theta))\n p2 = (p1[0]-length*sin(theta),p1[1]-length*cos(theta))\n p3 = (p2[0]-(width+bumpx)*cos(theta),p2[1]+(width+bumpx)*sin(theta))\n p4 = (p3[0]+bumpy*sin(theta),p3[1]+bumpy*cos(theta))\n p5 = (p4[0]+bumpx*cos(theta),p4[1]-bumpx*sin(theta))\n p6 = (p5[0]+(length-bumpy)*sin(theta),p5[1]+(length-bumpy)*cos(theta))\n\n \n #plt.plot([p1[0], p2[0], p3[0], p4[0], p1[0]], [p1[1], p2[1], p3[1], p4[1], p1[1]])\n #plt.axis([-700, 700, -200, 700])\n #plt.show()\n return [p1, p2, p3, p4, p5, p6]",
"def _lift_arm(self, arm: Arm) -> None:\n\n self.reach_for_target(arm=arm,\n target={\"x\": -0.2 if arm == Arm.left else 0.2, \"y\": 0.4, \"z\": 0.3},\n check_if_possible=False,\n stop_on_mitten_collision=False)",
"async def move_arms(self, l_position: Optional[float] = None, l_velocity: Optional[float] = 40,\n r_position: Optional[float] = None, r_velocity: Optional[float] = 40, *, increment=False):\n return await self._move_arms_via_settings(increment,\n ArmSettings('left', l_position, l_velocity),\n ArmSettings('right', r_position, r_velocity))",
"def set_r15(self):\r\n self.decrement_sp()\r\n self.set_a_to_m()\r\n self.set_d_to_m()\r\n self.at_var(\"R15\")\r\n self.set_m_to_d()",
"def reset_world(self):\n print(\"Resetting world\")\n\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print(\"Done\")",
"def move_arm_to_position(self, desired_arm_position):\r\n\r\n\r\n # ---------------------------------------------------------------------\r\n # Done: 7. Implement this method, WITH YOUR INSTRUCTOR.\r\n # ---------------------------------------------------------------------\r\n if self.is_calibrated == False:\r\n self.calibrate_arm()\r\n\r\n if desired_arm_position >= self.arm_motor.get_position():\r\n self.arm_motor.turn_on(self.speed)\r\n while True:\r\n if self.arm_motor.get_position() >= desired_arm_position:\r\n break\r\n\r\n else:\r\n self.arm_motor.turn_on(self.speed*-1)\r\n while True:\r\n if self.arm_motor.get_position() <= desired_arm_position:\r\n break\r\n\r\n self.arm_motor.turn_off()",
"def arm_mirror():\n MOVEMENTS.disable_all_joints()\n while True:\n for i in range(3):\n angle = MOVEMENTS.get_raw_angle(i*2)\n MOVEMENTS.set_raw_angle(i*2 +1, angle)\n sleep(0.01)",
"def relax(self):\n rospy.loginfo('Now Arm will be relax')\n self.go_with2([0, 0, 0, 0, 0])",
"def arm(self, is_armed):\n self.armer(value=is_armed)\n result = MoveRobotResult(actionID='arm',\n arm=is_armed)\n self._as.set_succeeded(result=result)",
"def setup_by_configuration(self, robot_yaml_path: str, settings_yaml_path: str):\n with open(robot_yaml_path, 'r') as fd:\n robot_data = yaml.safe_load(fd)\n # get robot radius\n for body in robot_data['bodies']:\n if body['name'] == \"base_footprint\":\n for footprint in body['footprints']:\n if footprint['type'] == 'circle':\n self._robot_radius = footprint.setdefault(\n 'radius', 0.3)*1.05\n if footprint['radius']:\n self._robot_radius = footprint['radius']*1.05\n # get laser related information\n for plugin in robot_data['plugins']:\n if plugin['type'] == 'Laser':\n laser_angle_min = plugin['angle']['min']\n laser_angle_max = plugin['angle']['max']\n laser_angle_increment = plugin['angle']['increment']\n self._laser_num_beams = int(\n round((laser_angle_max-laser_angle_min)/laser_angle_increment)+1)\n self._laser_max_range = plugin['range']\n\n with open(settings_yaml_path, 'r') as fd:\n setting_data = yaml.safe_load(fd)\n if self._is_action_space_discrete:\n # self._discrete_actions is a list, each element is a dict with the keys [\"name\", 'linear','angular']\n self._discrete_acitons = setting_data['robot']['discrete_actions']\n self.action_space = spaces.Discrete(\n len(self._discrete_acitons))\n else:\n linear_range = setting_data['robot']['continuous_actions']['linear_range']\n angular_range = setting_data['robot']['continuous_actions']['angular_range']\n self.action_space = spaces.Box(low=np.array([linear_range[0], angular_range[0]]),\n high=np.array(\n [linear_range[1], angular_range[1]]),\n dtype=np.float)",
"def arm_calibration(self):\n # DONE: 3. Implement the arm calibration movement by fixing the code below (it has many bugs). It should to this:\n # Command the arm_motor to run forever in the positive direction at max speed.\n # Create an infinite while loop that will block code execution until the touch sensor's is_pressed value is True.\n # Within that loop sleep for 0.01 to avoid running code too fast.\n # Once past the loop the touch sensor must be pressed. So stop the arm motor quickly using the brake stop action.\n # Make a beep sound\n # Now move the arm_motor 14.2 revolutions in the negative direction relative to the current location\n # Note the stop action and speed are already set correctly so we don't need to specify them again\n # Block code execution by waiting for the arm to finish running\n # Make a beep sound\n # Set the arm encoder position to 0 (the last line below is correct to do that, it's new so no bug there)\n\n # Code that attempts to do this task but has MANY bugs (nearly 1 on every line). Fix them!\n self.arm_motor.run_forever(speed_sp=900)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action='brake')\n ev3.Sound.beep().wait()\n # time.sleep(2)\n # arm_motor.stop(stop_action='brake')\n\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-arm_revolutions_for_full_range, speed_sp=900)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep()\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this line is correct as is)."
]
| [
"0.6547637",
"0.64687747",
"0.64340824",
"0.60914147",
"0.60654134",
"0.60138893",
"0.59958744",
"0.5948498",
"0.5896378",
"0.58270556",
"0.58270556",
"0.58222634",
"0.57513046",
"0.5742643",
"0.5727624",
"0.57268363",
"0.5720622",
"0.56888384",
"0.5683123",
"0.5671134",
"0.5657383",
"0.5656398",
"0.564067",
"0.55364263",
"0.5533641",
"0.55310327",
"0.55261725",
"0.55007875",
"0.5481645",
"0.5439076"
]
| 0.6481327 | 1 |
stop motion if `everything` is set, will stop everything (i.e. halt) | async def stop(self, *, everything=False):
if everything:
return await self.halt()
return await self._post('drive/stop') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stopMovementAll(self):\n self.stopMovementX()\n self.stopMovementY()\n self.stopMovementZ()",
"def stop_all():\r\n motors.stop_all_motors()\r\n led.set_colour_solid(0)\r\n display.clear()",
"def _stop_all(self):\n # LEDs\n self.cam_led.off\n self.analysis_led[0].off\n self.analysis_led[1].off\n self.error.off\n \n # motors\n self.motor.stop()\n self.wash.stop()",
"def stop(self):\n self.stopAngMovementAll()\n self.stopMovementAll()",
"def stop(self):\n self.turnOffMotors()",
"def stop(self) -> None:\n turnOffMotors()",
"def stop() -> None:",
"def stopall(self):\n\n for i in self.bots:\n try:\n i.stop()\n except:\n pass",
"def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)",
"def _stop_bot(_event):\n pass",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop(self):\n self.halt = True",
"def stopAllMotors():\n return RoboCaller().call(\"stopAllMotors\", \"void\")",
"def stop(self, **kwargs):\n self.turn_off()",
"def stop(self):\r\n self.terminating = True",
"def stop(self):\n self.idle = True\n # pass",
"def stopAll(self, event=None):\n self.paused = False if self.paused else True\n self.gotData = False\n label = \"Resume all Sensors\" if (~self.paused) else \"Pause all Sensors\"\n self.displayPanel1.paused = False if self.displayPanel1.paused else True\n pass",
"def stop(self):\n self.left_motor.stop()\n self.right_motor.stop()",
"def turn_off(self):\n self.robot.stop_simulation()",
"def stop(self):\n self._run = False",
"def stopall(pidevice, **kwargs):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n pidevice.StopAll(noraise=True)\n waitonready(pidevice, **kwargs) # there are controllers that need some time to halt all axes",
"def stop(self) -> None:\n ...",
"def stop(self):\n self.right_motor.stop(stop_action='brake')\n self.left_motor.stop(stop_action='brake')",
"def stop(self):\n self.motor.stop()",
"def stopDetection(self):\n self.statusWrite(\"stop\")\n self.p.sleep()\n self.birdHere = 0",
"def stop(self):\n self._should_run = False",
"def stop(self):\n self.running = False"
]
| [
"0.6458159",
"0.6419564",
"0.64075524",
"0.62799674",
"0.6278357",
"0.6228199",
"0.62213844",
"0.6205745",
"0.60997784",
"0.60664624",
"0.60289675",
"0.60289675",
"0.60289675",
"0.60289675",
"0.6011918",
"0.60026646",
"0.59904975",
"0.5956328",
"0.59468657",
"0.5929406",
"0.5912169",
"0.5910342",
"0.58729124",
"0.58037853",
"0.57924706",
"0.57901996",
"0.5763019",
"0.576064",
"0.5759937",
"0.575887"
]
| 0.7449722 | 0 |
Determine whether elements in a list are monotonic. ie. unique elements are clustered together. ie. [5,5,3,4] is, [5,3,5] is not. | def is_monotonic(items: Sequence) -> bool:
prev_elements = set({items[0]})
prev_item = items[0]
for item in items:
if item != prev_item:
if item in prev_elements:
return False
prev_item = item
prev_elements.add(item)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkmonotonic(iterable, piecewise=False):\n monotonic = [True] + [x < y for x, y in zip(iterable, iterable[1:])]\n if piecewise is True:\n return monotonic\n else:\n return all(monotonic)",
"def is_unique_and_sorted(self):\n return all((self(i) < self(i+1) for i in range(len(self)-1))) # all() returns true if all the items in the iterable are TRUE",
"def has_duplicates(l):\r\n return len(set(l)) < len(l)",
"def has_duplicates(list) :\n copy = list[:]\n copy.sort()\n for item in range(len(list)-1):\n if copy[item] == copy[item + 1]:\n return True;\n return False;",
"def is_unique(L):\n for i in range(len(L)):\n for j in range(len(L)):\n if (i!=j)and (L[i]==L[j]):\n return False\n return True",
"def uniform_list_check(value_list):\n return reduce((lambda acc, value: acc and value == value_list[0]), value_list, True)",
"def has_duplicates(items):\n items = list()\n items.sort()\n for i in range(len(items) - 1):\n if items[i] == items[i + 1]:\n return True\n return False",
"def checkUniqueness_(self, list):\n\n uniqueList = []\n # use a list comprehension statement (takes a while to understand) \n\n [uniqueList.append(it) for it in list if not uniqueList.count(it)]\n\n return (len(list)==len(uniqueList))",
"def all_different(l):\n seen = set()\n for i in l:\n if i in seen:\n return False\n seen.add(i)\n return True",
"def is_consecutive(a_list):\n current_value = a_list[0]-1\n for number in a_list:\n current_value +=1\n if current_value != number:\n return False\n return True",
"def are_consecutive(int_list):\n\n if set(np.diff(int_list)) == set([1]):\n return True\n else:\n return False",
"def has_sorted(l):\n return all(l[i] <= l[i+1] for i in xrange(len(l)-1))",
"def smallest_does_not_occur(list_):\n length = len(list_)\n if list_[0] != 1:\n return 1\n does_not_occur = [\n list_[each] + 1\n for each in range(length - 1)\n if list_[each + 1] - list_[each] > 1\n ]\n return (\n min(does_not_occur)\n if len(does_not_occur) > 0\n else list_[length - 1] + 1\n )",
"def is_monotonic_decreasing(self) -> bool:\n # monotonic decreasing if and only if reverse is monotonic increasing\n return self[::-1].is_monotonic_increasing",
"def list_should_not_contain_duplicates(self,list_,msg=None):\r\n if not isinstance(list_,list):\r\n list_= list(list_)\r\n dupes = []\r\n for item in list_:\r\n if item not in dupes:\r\n count = list_.count(item)\r\n if count >1:\r\n logger.info(\" '%s' found %d times\" %(item,count))\r\n dupes.append(item)\r\n if dupes:\r\n raise AssertionError(msg or '%s found multiple times' %seq2str(dupes))",
"def has_duplicates_set(L):\r\n return len(L) != len(set(L))",
"def remove_list_redundancies(l):\n return sorted(list(set(l)), lambda a, b : l.index(a) - l.index(b))",
"def has_duplicates(L):\r\n unique = []\r\n for e in L:\r\n if e in unique:\r\n return True\r\n unique.append(e)\r\n return False",
"def has_duplicates(list):\n for i in list:\n if list.count(i) > 1:\n return True\n else:\n return False",
"def is_sorted_list(list_):\n prev = -1\n for item in list_:\n if item < prev:\n return False\n prev = item\n return True",
"def remove_consequetive_duplicates(your_list):\n out = [v for i, v in enumerate(your_list) if i == 0 or v != your_list[i-1]]\n if type(your_list) == np.ndarray:\n return np.array(out)\n return out",
"def check_sorted(thelist):\n it = iter(thelist)\n next(it, None)\n return all(b >= a for a, b in zip(thelist, it))",
"def has_duplicates(lst):\n\n return len(lst) != len(set(lst))",
"def unique_list(input_list):\n output_list = []\n if len(input_list) > 0:\n dim = _sp.shape(input_list)[1]\n for i in input_list:\n match = False\n for j in output_list:\n if dim == 3:\n if i[0] == j[0] and i[1] == j[1] and i[2] == j[2]:\n match = True\n elif dim == 2:\n if i[0] == j[0] and i[1] == j[1]:\n match = True\n elif dim == 1:\n if i[0] == j[0]:\n match = True\n if match is False:\n output_list.append(i)\n return output_list",
"def isMonotonic(self):\n\n #assume monotonic non-increasing\n nonIncreasingMono = True\n contradictions = 0\n prev=self.fluorescence[0]\n for point in self.fluorescence[1:]:\n if point > prev+self.monoThresh:\n #found contradiction, even with forgive threshold, need (5) contradictions with less than (5)\n #non-contradicting points between them to be able to say it is not monotonically non-increasing\n contradictions+=1\n \n elif point < prev+self.monoThresh:\n #lower contradiction counter, if points are non-increasing as assumed, but not below 0\n if contradictions!=0:\n contradictions-=1\n \n #when point=previous do nothing with the contradiction counter\n \n if contradictions == 5:\n #if 5 contradictions, it is definately not monotonic non-increasing\n nonIncreasingMono=False\n break\n prev=point\n \n #if it is non-increasing set mono to true and end now\n if nonIncreasingMono:\n self.mono = True\n return\n\n #if not non-increasing, then func is not monotonic\n self.mono = False\n return",
"def all_consecutive(s):\n for (x, y) in pairwise(sorted(s)):\n if y - x != 1:\n return False\n return True",
"def has_duplicates(t):\n\n\t# Each element will check each following elements for its duplicate\n\t# An element looking for duplicates before itself is redundant\n\tfor i in range(len(t)):\n\t\tfor n in t[i + 1:]:\n\t\t\tif t[i] == n:\n\t\t\t\treturn True\n\treturn False",
"def containsDuplciateOptimized(self, nums):\n nums = sorted(nums)\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]:\n return True\n return False",
"def remove_identical(list):\n seen = set()\n seen_add = seen.add\n return [x for x in list if not (x in seen or seen_add(x))]",
"def uniqueCheckLoop(aList):\r\n n = len(aList)\r\n for i in range(n-1):\r\n for j in range(i+1, n):\r\n if aList[i] == aList[j]:\r\n return True\r\n return False"
]
| [
"0.6754721",
"0.62469804",
"0.6069717",
"0.60431504",
"0.6037963",
"0.601249",
"0.5946327",
"0.5945903",
"0.59423465",
"0.588357",
"0.587069",
"0.58562666",
"0.58260524",
"0.58257693",
"0.57807654",
"0.57767946",
"0.57500005",
"0.57214695",
"0.571543",
"0.57055587",
"0.570405",
"0.56901115",
"0.5673648",
"0.5665742",
"0.56641203",
"0.5659952",
"0.5649676",
"0.56358755",
"0.56207687",
"0.5620484"
]
| 0.7788266 | 0 |
Round up to nearest . | def upround(x, base):
return base * math.ceil(float(x) / base) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def roundUP(x):\n\treturn int(ceil(x / 10.0)) * 10",
"def round_up(number, decimals=0):\n multiplier = 10 ** decimals\n return math.ceil(number * multiplier) / multiplier",
"def _round_to_nearest_multiple_up(x, n=5):\n return n * math.ceil(float(x) / n)",
"def round_up(amount: Decimal) -> Decimal:\n return Decimal(amount.quantize(Decimal('.01'), rounding=ROUND_CEILING))",
"def round_up(x, sig=2):\n dig = pow(10., sig - int(math.floor(math.log10(abs(x)))) - 1)\n return math.ceil(x * dig) / dig",
"def round_half_up(number):\n return number.quantize(decimal.Decimal(\"0.01\"), rounding=decimal.ROUND_HALF_UP)",
"def round_down(x):\n return int(math.floor(x / 10.0)) * 10",
"def _round_to_nearest_multiple_down(x, n=5):\n return n * math.floor(float(x) / n)",
"def downround(x, base):\n return base * math.floor(float(x) / base)",
"def pop_round(value):\n return commify(round_to_nearest(value))",
"def round(x):\n return int(x + copysign(0.5, x))",
"def divide_and_round_up(x, y):\n return ((x - 1) // y) + 1",
"def round_to_nearest(value, round_value=1000):\n if round_value < 1:\n ds = str(round_value)\n nd = len(ds) - (ds.find('.') + 1)\n value = value * 10**nd\n round_value = round_value * 10**nd\n value = int(round(float(value) / round_value) * round_value)\n value = float(value) / 10**nd\n else:\n value = int(round(float(value) / round_value) * round_value)\n\n return value",
"def roundup100(x):\n\treturn int(math.ceil(x / 100.0)) * 100",
"def round_up(number: int, multiple: int) -> int:\n assert multiple != 0\n\n return int((number + multiple - 1) / multiple) * multiple",
"def ceil(x):\n return 0.0",
"def ROUNDDOWN(val, decimal_places):\n # Cute alternative:\n # return val // 10**-decimal_places / 10**decimal_places\n multiplier = 10**decimal_places\n if val > 0:\n return math.floor(val * multiplier) / multiplier\n return math.ceil(val * multiplier) / multiplier",
"def round_down(x, m):\n return int(m * round(float(x) / m))",
"def roundAlwaysUp( toRound, end = 10 ):\n\tend = abs( int(end) )\n\n\tif end == 0:\n\t\tend = 10\n\ttimes = toRound/end\n\n\tif times >= 0:\n\t\ttimes = times + 1\n\telse:\n\t\ttimes = times - 1\n\treturn ( int( times ) )*end;",
"def round_to_1(val):\n return round(val, -int(np.floor(np.log10(abs(val)))))",
"def round_to(n, precision):\n correction = 0.5 if n >= 0 else -0.5\n return int(n / precision + correction) * precision",
"def nearest(n, number):\n return math.floor((n / number) + 0.5) * number",
"def py3round(number):\n if abs(round(number) - number) == 0.5:\n return int(2.0 * round(number / 2.0))\n\n return int(round(number))",
"def round_down(val):\n floor_val = val\n\n try:\n if not is_empty(val):\n float_val = float(val)\n floor_val = math.floor(float_val)\n except Exception as e:\n pass\n\n return floor_val",
"def RoundUp(value, boundary):\n return (value + boundary - 1) & ~(boundary - 1)",
"def round_to(self, value, res):\n if res == 0:\n return round(value)\n\n return res * (round(value/res))",
"def round_to(x, y):\n return round(x, -int(floor(log10(abs(y)))))",
"def roundup_int(x, m):\n\treturn int(math.ceil(x / float(m))) * m",
"def __round(num):\n return float(round(decimal.Decimal(num), DataGen.precision))",
"def round_to_multiple_of(val, divisor, round_up_bias=0.9):\n assert 0.0 < round_up_bias < 1.0\n new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)\n return new_val if new_val >= round_up_bias * val else new_val + divisor"
]
| [
"0.7775397",
"0.7578685",
"0.7482397",
"0.7408641",
"0.73013836",
"0.72726816",
"0.7230321",
"0.7196287",
"0.71550983",
"0.712848",
"0.71170646",
"0.71084553",
"0.69919866",
"0.69717807",
"0.69040406",
"0.6871906",
"0.6868747",
"0.6845126",
"0.6818186",
"0.6784912",
"0.67466515",
"0.67152303",
"0.6705646",
"0.67023426",
"0.6695287",
"0.6692358",
"0.6690398",
"0.66834366",
"0.66824895",
"0.6677106"
]
| 0.7694207 | 1 |
Given two collections of integer ranges, return a list of ranges in which both input inputs overlap. | def overlapping_ranges(
ranges_1: Sequence[Tuple[int, int]],
ranges_2: Sequence[Tuple[int, int]],
) -> List[Tuple[int, int]]:
return [
(max(first[0], second[0]), min(first[1], second[1]))
for first in ranges_1
for second in ranges_2
if max(first[0], second[0]) < min(first[1], second[1])
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def range_overlap(range1, range2):\n return range(max(range1[0], range2[0]), min(range1[1], range2[1]))",
"def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2",
"def find_common_bounds(bounds_1, bounds_2):\n new_bounds = []\n for (lower_1, upper_1), (lower_2, upper_2) in itertools.product(bounds_1, bounds_2):\n # Ignore this region if it's outside the current limits\n if upper_1 <= lower_2 or upper_2 <= lower_1:\n continue\n new_bounds.append(Region(max(lower_1, lower_2), min(upper_1, upper_2)))\n return new_bounds",
"def merge_ranges():",
"def _range_overapped(self, x, y):\n xs = set( range(x[0], x[1]))\n ys = set( range(y[0], y[1]))\n return xs.intersection(ys)",
"def range_overlap(ranges):\n max_left = 0.0\n min_right = 1.0\n for (left, right) in ranges:\n max_left = max(max_left, left)\n min_right = min(min_right, right)\n return (max_left, min_right)",
"def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail",
"def _combine_bounds(bounds1, bounds2):\n \n bounds_comb = np.zeros((2, 2), dtype=int)\n bounds_comb[0, 0] = bounds1[0, 0] if (bounds1[0, 0] < bounds2[0, 0]) else bounds2[0, 0]\n bounds_comb[1, 0] = bounds1[1, 0] if (bounds1[1, 0] < bounds2[1, 0]) else bounds2[1, 0]\n bounds_comb[0, 1] = bounds1[0, 1] if (bounds1[0, 1] > bounds2[0, 1]) else bounds2[0, 1]\n bounds_comb[1, 1] = bounds1[1, 1] if (bounds1[1, 1] > bounds2[1, 1]) else bounds2[1, 1]\n \n return bounds_comb",
"def range_union(ranges):\n union = []\n for r in sorted(ranges, key=lambda r: r.start):\n if len(union) > 0 and union[-1].stop >= r.start:\n union[-1] = range(union[-1].start, max(union[-1].stop, r.stop))\n else:\n union.append(r)\n return union",
"def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False",
"def overlaps(interval,intervals):\n return [x for x in intervals if interval.overlaps(x)]",
"def _combine_ind_ranges(ind_ranges_to_merge):\n ind_ranges_to_merge = sorted(ind_ranges_to_merge)\n stack = []\n result = []\n for curr in ind_ranges_to_merge:\n if len(stack) == 0:\n stack.append(curr)\n elif stack[-1][-1] >= curr[0]:\n prev = stack.pop()\n merged = sorted(list(set(prev + curr)))\n stack.append(merged)\n else:\n prev = stack.pop()\n result.append(prev)\n stack.append(curr)\n result += stack\n return result",
"def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)",
"def overlap_len(range1, range2):\n return min(range1[1], range2[1]) - max(range1[0], range2[0])",
"def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1",
"def expand_ranges(ranges):\n for low, high in low_high_pairs:\n for j in range(low, high+1):\n yield j",
"def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)",
"def from_inclusive(a, b):\n c = int(b > a)*2-1\n return range(a, b+c, c)",
"def get_overlap(a, b):\n return max(0, min(a[1], b[1]) - max(a[0], b[0]))",
"def overlaps(a, b, **kwargs):\n return lib.overlaps(a, b, **kwargs)",
"def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])",
"def calculate_ranges(a, b):\n try:\n ranges = list(range(0, a, a//b))\n if ranges[-1] != a:\n ranges.append(a)\n return ranges\n except ValueError:\n return [0, a]",
"def getCoveringRanges( self, left_ranges, right_ranges, parent_ranges ):\n \n child_ranges = map( lambda x: (x[0], x[1], 0), left_ranges)\n child_ranges += map( lambda x: (x[0], x[1], 1), right_ranges)\n \n child_ranges.sort()\n parent_ranges.sort()\n \n new_left_ranges = []\n new_right_ranges = []\n \n parent_index = 0\n last_to = 0\n \n parent_left, parent_right = parent_ranges[parent_index]\n\n self.debug( \"child_ranges=%s\" % str(child_ranges) )\n self.debug( \"parent_ranges=%s\" % str(parent_ranges))\n \n last_left, last_right, last_is_right = child_ranges[0]\n \n for this_left, this_right, this_is_right in child_ranges[1:]:\n \n ## look at previous segment last_left to last_right:\n ## find matching parent_index:\n old_parent_index = parent_index\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index == len(parent_ranges): break\n parent_left, parent_right = parent_ranges[parent_index]\n \n ## skip fragments that do not overlap\n if parent_index == len(parent_ranges):\n parent_index = old_parent_index\n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n continue\n \n ## firstly: make segment covering\n new_left = min(parent_left, last_left)\n new_right = min(max(parent_right, last_right), this_left)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n ## reduce parent on left side\n parent_left=max(new_right, parent_left)\n \n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n \n ## process last segment\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index >= len(parent_ranges): break \n parent_left, parent_right = parent_ranges[parent_index]\n \n new_left = min(parent_left, last_left)\n new_right = max(parent_right, last_right)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n self.debug( \"old left ranges=%s\" % str(left_ranges))\n self.debug( \"new left ranges=%s\" % str(new_left_ranges))\n self.debug( \"old right ranges=%s\" % str(right_ranges))\n self.debug( \"new right ranges=%s\" % str(new_right_ranges))\n \n return new_left_ranges, new_right_ranges",
"def overlaps(*objs):\n return set.intersection(*(set(range(*extent(obj))) for obj in objs))",
"def overlap(annotations1, annotations2):\n return [val for val in annotations1 if val in annotations2]",
"def collapse_ranges (ranges):\n\n # FIXME: does tuple and set conversion really add anything?\n\n # Ranges must be unique: we do not count timings when they start and end at\n # exactly the same time. By using a set, we do not repeat ranges.\n # we convert to a list before return.\n final = set()\n\n # return empty list if given an empty list\n if not ranges:\n return final\n\n START = 0\n END = 1\n\n # sort ranges into a copy list, by their start time\n _ranges = sorted(ranges, key=lambda x: x[START])\n\n # sat 'base' to the earliest range (smallest starting point)\n base = _ranges[0]\n\n for _range in _ranges[1:]:\n\n # if range is empty, skip it\n if _range[START] == _range[END]:\n continue\n\n if _range[START] <= base[END]:\n # ranges overlap -- extend the base\n base[END] = max(base[END], _range[END])\n\n else:\n # ranges don't overlap -- move base to final, and current _range\n # becomes the new base\n final.add(tuple(base))\n base = _range\n\n # termination: push last base to final\n final.add(tuple(base))\n\n # Return final as list of list in case a mutable type is needed.\n return [list(b) for b in final]",
"def _get_sharded_ranges(\n begin,\n end,\n max_length,\n):\n if max_length <= 0:\n raise ValueError(\"max_length <= 0.\")\n length = end - begin\n if length <= max_length:\n return [(begin, end)]\n pivot = begin + length // 2\n return (_get_sharded_ranges(begin, pivot, max_length) +\n _get_sharded_ranges(pivot, end, max_length))",
"def ranges_conflicting_with_ip(ip_address, ip_ranges):\n return [(pool_name, ip_range) for (pool_name, ip_range) in ip_ranges\n if ip_address in ip_range]",
"def gen_ranges(starts, ends):\n if starts.size != ends.size:\n raise ValueError(\"starts and ends must be same size\")\n if not ((ends - starts) > 0).all():\n raise ValueError(\"all ends must be greater than starts\")\n lengths = ends - starts\n segs = ak.cumsum(lengths) - lengths\n totlen = lengths.sum()\n slices = ak.ones(totlen, dtype=ak.int64)\n diffs = ak.concatenate((ak.array([starts[0]]), \n starts[1:] - starts[:-1] - lengths[:-1] + 1))\n slices[segs] = diffs\n return segs, ak.cumsum(slices)",
"def _range_contains(self, a, b):\n\t\treturn b[0] >= a[0] and b[-1] <= a[-1]"
]
| [
"0.8035099",
"0.75984454",
"0.73328453",
"0.7216684",
"0.71589375",
"0.7155597",
"0.7152191",
"0.70526004",
"0.7042103",
"0.7029379",
"0.69834244",
"0.6971435",
"0.6906623",
"0.6870648",
"0.6858326",
"0.67309386",
"0.6729278",
"0.6715122",
"0.6676067",
"0.66637343",
"0.6651569",
"0.6591904",
"0.65605175",
"0.6551512",
"0.65108174",
"0.64265287",
"0.642028",
"0.641488",
"0.64128655",
"0.6384727"
]
| 0.8585493 | 0 |
The purpose of this function is to process 3d MRI images of mouse skulls, package the files and send to google cloud storage for a machine learning model to fetch and predict facial keypoints on. The .mnc files are the image arrays and the .tag files are the corresponding facial keypoints. | def main():
skulls_folder = os.listdir(RAW_IMAGE_DIRECTORY)
# fetch and sort the .mnc and .tag files
mnc_files = [f for f in skulls_folder if 'mnc' in f]
tag_files = [f for f in skulls_folder if 'tag' in f]
mnc_names = [i.split('.mnc')[0] for i in mnc_files]
mnc_files.sort()
tag_files.sort()
mnc_names.sort()
# Process and package ndarrays as tuples inside npy file
package_to_npy(RAW_IMAGE_DIRECTORY, mnc_files, tag_files, mnc_names)
print('\n' * 5)
# Push the npy files to GCP Cloud Storage
upload_to_gcp(PROCESSED_IMAGE_DIRECTORY, GCP_PROJECT_NAME, GCP_BUCKET_NAME) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _preprocess(self):\n print(\"Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)\")\n if osp.exists(self.imgs_labeled_dir) and \\\n osp.exists(self.imgs_detected_dir) and \\\n osp.exists(self.split_classic_det_json_path) and \\\n osp.exists(self.split_classic_lab_json_path) and \\\n osp.exists(self.split_new_det_json_path) and \\\n osp.exists(self.split_new_lab_json_path):\n return\n\n mkdir_if_missing(self.imgs_detected_dir)\n mkdir_if_missing(self.imgs_labeled_dir)\n\n print(\"Extract image data from {} and save as png\".format(self.raw_mat_path))\n mat = h5py.File(self.raw_mat_path, 'r')\n\n def _deref(ref):\n return mat[ref][:].T\n\n def _process_images(img_refs, campid, pid, save_dir):\n img_paths = [] # Note: some persons only have images for one view\n for imgid, img_ref in enumerate(img_refs):\n img = _deref(img_ref)\n # skip empty cell\n if img.size == 0 or img.ndim < 3: continue\n # images are saved with the following format, index-1 (ensure uniqueness)\n # campid: index of camera pair (1-5)\n # pid: index of person in 'campid'-th camera pair\n # viewid: index of view, {1, 2}\n # imgid: index of image, (1-10)\n viewid = 1 if imgid < 5 else 2\n img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)\n img_path = osp.join(save_dir, img_name)\n imageio.imwrite(img_path, img)\n img_paths.append(img_path)\n return img_paths\n\n def _extract_img(name):\n print(\"Processing {} images (extract and save) ...\".format(name))\n meta_data = []\n imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir\n for campid, camp_ref in enumerate(mat[name][0]):\n camp = _deref(camp_ref)\n num_pids = camp.shape[0]\n for pid in range(num_pids):\n img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)\n assert len(img_paths) > 0, \"campid{}-pid{} has no images\".format(campid, pid)\n meta_data.append((campid+1, pid+1, img_paths))\n print(\"done camera pair {} with {} identities\".format(campid+1, num_pids))\n return meta_data\n\n meta_detected = _extract_img('detected')\n meta_labeled = _extract_img('labeled')\n\n def _extract_classic_split(meta_data, test_split):\n train, test = [], []\n num_train_pids, num_test_pids = 0, 0\n num_train_imgs, num_test_imgs = 0, 0\n for i, (campid, pid, img_paths) in enumerate(meta_data):\n \n if [campid, pid] in test_split:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n test.append((img_path, num_test_pids, camid))\n num_test_pids += 1\n num_test_imgs += len(img_paths)\n else:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n train.append((img_path, num_train_pids, camid))\n num_train_pids += 1\n num_train_imgs += len(img_paths)\n return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs\n\n print(\"Creating classic splits (# = 20) ...\")\n splits_classic_det, splits_classic_lab = [], []\n for split_ref in mat['testsets'][0]:\n test_split = _deref(split_ref).tolist()\n\n # create split for detected images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_detected, test_split)\n splits_classic_det.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n # create split for labeled images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_labeled, test_split)\n splits_classic_lab.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n \n write_json(splits_classic_det, self.split_classic_det_json_path)\n write_json(splits_classic_lab, self.split_classic_lab_json_path)\n\n def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):\n tmp_set = []\n unique_pids = set()\n for idx in idxs:\n img_name = filelist[idx][0]\n camid = int(img_name.split('_')[2])\n pid = pids[idx]\n if relabel: pid = pid2label[pid]\n img_path = osp.join(img_dir, img_name)\n tmp_set.append((img_path, int(pid), camid))\n unique_pids.add(pid)\n return tmp_set, len(unique_pids), len(idxs)\n\n def _extract_new_split(split_dict, img_dir):\n train_idxs = split_dict['train_idx'].flatten() - 1 # index-0\n pids = split_dict['labels'].flatten()\n train_pids = set(pids[train_idxs])\n pid2label = {pid: label for label, pid in enumerate(train_pids)}\n query_idxs = split_dict['query_idx'].flatten() - 1\n gallery_idxs = split_dict['gallery_idx'].flatten() - 1\n filelist = split_dict['filelist'].flatten()\n train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)\n query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)\n gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)\n return train_info, query_info, gallery_info\n\n print(\"Creating new splits for detected images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_det_mat_path),\n self.imgs_detected_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_det_json_path)\n\n print(\"Creating new splits for labeled images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_lab_mat_path),\n self.imgs_labeled_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_lab_json_path)",
"def fill_3d_gt_poses(dataset_file, train_set):\n with open(dataset_file, 'rb') as handle:\n dataset = pickle.load(handle)\n\n if train_set:\n subjects = subjects_train\n else:\n subjects = subjects_test\n\n for subject in subjects:\n data_path = Path('data/') / subject / 'MyPoseFeatures' / 'D3_Positions_mono'\n files = list(sorted(data_path.glob('*.cdf')))\n assert len(files) > 0 # something is wrong with data paths...\n for file in files:\n cdf_file = cdflib.CDF(file)\n poses_3d = cdf_file[0].squeeze()\n assert poses_3d.shape[1] == 96\n # select 17 joints:\n joints = [0, 1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27]\n poses_3d = poses_3d.reshape(-1, 32, 3)[:, joints]\n poses_3d = poses_3d.swapaxes(1, 2).reshape(-1, 3*17)\n # select every 4th frame\n indices = np.arange(3, len(poses_3d), 4)\n poses_3d = poses_3d[indices, :]\n\n # extract action, subaction and cam from filename\n filename = str(file.stem)\n if ' ' in filename:\n action, rest_info = filename.split(\" \")\n else:\n action, rest_info = filename.split(\".\")\n\n # rename for consistency:\n # TakingPhoto -> Photo, WalkingDog -> WalkDog\n if action == 'TakingPhoto':\n action = 'Photo'\n if action == 'WalkingDog':\n action = 'WalkDog'\n\n # take care of inconsistent naming...\n if subject == 'S1':\n if action == 'Eating':\n # S1 Eating (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Sitting':\n # S1 Sitting (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'SittingDown':\n # S1 SittingDown (., 2)\n rest_info = fix_dot_2(rest_info)\n\n if subject == 'S5':\n if action == 'Directions':\n # S5 Directions (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'Discussion':\n # S5 Discussion (2, 3)\n rest_info = fix_2_3(rest_info)\n if action == 'Greeting':\n # S5 Greeting (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'Photo':\n # S5 Photo (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Waiting':\n # S5 Waiting (1, 2)\n rest_info = fix_1_2(rest_info)\n\n if subject == 'S6':\n if action == 'Eating':\n # S6 Eating (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'Posing':\n # S6 Posing (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Sitting':\n # S6 Sitting (1,2)\n rest_info = fix_1_2(rest_info)\n if action == 'Waiting':\n # S6 Waiting (., 3)\n rest_info = fix_dot_3(rest_info)\n\n if subject == 'S7':\n if action == 'Phoning':\n # S7 Phoning (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Waiting':\n # S7 Waiting (1, 2)\n rest_info = fix_1_2(rest_info)\n if action == 'Walking':\n # S7 Walking (1, 2)\n rest_info = fix_1_2(rest_info)\n\n if subject == 'S8':\n if action == 'WalkTogether':\n # S8 WalkTogether (1, 2)\n rest_info = fix_1_2(rest_info)\n\n if subject == 'S9':\n if action == 'Discussion':\n # S9 discussion (1, 2)\n rest_info = fix_1_2(rest_info)\n\n if subject == 'S11':\n if action == 'Discussion':\n rest_info = fix_1_2(rest_info)\n if action == 'Greeting':\n # S11 greeting (., 2)\n rest_info = fix_dot_2(rest_info)\n if action == 'Phoning':\n # S11 phoning (2,3)\n rest_info = fix_2_3(rest_info)\n if action == 'Smoking':\n # S11 smoking (., 2)\n if '2.' in rest_info:\n # replace 2. with .\n rest_info = fix_dot_2(rest_info)\n\n assert rest_info[:2] == '1.' or '.' not in rest_info\n if '.' not in rest_info:\n subact = '0'\n cam = rest_info\n else:\n subact = '1'\n cam = rest_info.split('.')[-1]\n\n if subject == 'S5' and subact == '1' and action == 'Waiting' and cam == '55011271':\n continue\n if subject == 'S11' and subact == '0' and action == 'Directions' and cam == '54138969':\n continue\n\n used_frames = len(dataset[subject][action][subact][cam]['imgpath'])\n assert used_frames <= len(poses_3d)\n poses_3d = poses_3d[:used_frames]\n dataset[subject][action][subact][cam]['3d_gt'] = poses_3d\n\n if train_set:\n out_file = c.train_file\n else:\n out_file = c.test_file\n with open(out_file, 'wb') as handle:\n pickle.dump(dataset, handle, protocol=pickle.HIGHEST_PROTOCOL)",
"def main(dataset, n, ms=False, out=sys.stdout):\n # build lists of paths previews files, tif, rpc and dzi files\n prv_paths = ' '.join([os.path.join(dataset, 'prv_%02d.jpg' % (i+1)) for i in xrange(n)])\n tif_paths = ' '.join([os.path.join(dataset, 'im_panchro_%02d.tif' % (i+1)) for i in xrange(n)])\n rpc_paths = ' '.join([os.path.join(dataset, 'rpc_%02d.xml' % (i+1)) for i in xrange(n)])\n dzi8_paths, dzi16_paths = None, None\n if ms:\n ms_paths = ' '.join([os.path.join(dataset, 'im_ms_%02d.tif' % (i+1)) for i in xrange(n)])\n if os.path.isfile(os.path.abspath(os.path.join(dataset,\n 'im_panchro_8BITS_01.dzi'))):\n dzi8_paths = ' '.join([os.path.join('input', dataset,\n 'im_panchro_8BITS_%02d.dzi' %\n (i+1)) for i in xrange(n)])\n if os.path.isfile(os.path.abspath(os.path.join(dataset,\n 'im_panchro_16BITS_01.dzi'))):\n dzi16_paths = ' '.join([os.path.join('input', dataset,\n 'im_panchro_16BITS_%02d.dzi' %\n (i+1)) for i in xrange(n)])\n\n # read infos in DIM*.XML file\n dim_xml_file = os.path.join(dataset, 'dim_01.xml')\n tif_file = os.path.join(dataset, 'im_panchro_01.tif')\n if os.path.isfile(dim_xml_file): # check if the link points to an existing file\n date = grep_xml(dim_xml_file, \"IMAGING_DATE\")\n satellite = grep_xml(dim_xml_file, \"INSTRUMENT_INDEX\")\n elif os.path.isfile(tif_file):\n date = extract_date_from_pleiades_filename(os.readlink(tif_file))\n satellite = extract_satellite_from_pleiades_filename(os.readlink(tif_file))\n else:\n date = 'DD-MM-YYYY'\n satellite = 'Pleiades 1X'\n\n # print to stdout\n if dzi8_paths or dzi16_paths:\n print('[%s]' % dataset, file=out)\n print('files = ', prv_paths, file=out)\n print('tif = ', tif_paths, file=out)\n print('rpc = ', rpc_paths, file=out)\n if ms:\n print('clr = ', ms_paths, file=out)\n if dzi8_paths:\n print('dzi8 = ', dzi8_paths, file=out)\n if dzi16_paths:\n print('dzi16 = ', dzi16_paths, file=out)\n s = dataset.split(os.path.sep)\n if len(s) == 3: # ie the path is of the kind 'pleiades/reunion/dataset_1'\n print('title = %s (%s)' % (s[1].capitalize(), s[2][-1]), file=out) # ie 'Reunion (1)'\n elif len(s) == 2: # ie the path is of the kind 'pleiades/reunion'\n print('title = %s' % s[1].capitalize(), file=out) # ie 'Reunion'\n else:\n print('path %s not expected by the author of the script: ' % dataset, s, file=sys.stderr)\n print('date = %s' % date, file=out)\n print('satellite = Pleiades %s' % satellite, file=out)\n print('nb_img = %d' % n, file=out)\n if ms:\n print('color = panchro_xs', file=out)\n else:\n print('color = panchro', file=out)",
"def process_images(path, dataset):\n \n print(f\"Processing images {os.path.join(path, dataset)}\", flush=True)\n label_file = os.path.join(path, dataset + '-labels-idx1-ubyte')\n with open(label_file, 'rb') as file:\n _, num = struct.unpack(\">II\", file.read(8))\n labels = numpy.fromfile(file, dtype=numpy.int8) #int8\n new_labels = numpy.zeros((num, 10))\n new_labels[numpy.arange(num), labels] = 1\n\n img_file = os.path.join(path, dataset + '-images-idx3-ubyte')\n with open(img_file, 'rb') as file:\n _, num, rows, cols = struct.unpack(\">IIII\", file.read(16))\n imgs = numpy.fromfile(file, dtype=numpy.uint8).reshape(num, rows, cols) #uint8\n imgs = imgs.astype(numpy.float32) / 255.0\n\n os.remove(label_file); os.remove(img_file)\n print(f\"Saving files under {os.path.join(path, dataset)} path\", flush=True)\n numpy.savez_compressed(os.path.join(path, dataset), imgs=imgs, labels=labels)",
"def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array",
"def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")",
"def extract_c3d_all(filenames_df, model, depth=16, gpu_id=0): \n nrows = filenames_df.shape[0]\n \n ###########################################################################\n\n # Serial Implementation (For GPU based extraction)\n for i in range(nrows):\n st = time.time()\n feat = getC3DFrameFeats(model, filenames_df['infiles'][i], True, gpu_id, depth, i)\n # save the feature to disk\n if feat is not None:\n np.save(filenames_df['outfiles'][i], feat)\n print \"Written \"+str(i+1)+\" : \"+filenames_df['outfiles'][i]\n \n e = time.time()\n print \"Execution Time : \"+str(e-st)\n \n ###########################################################################\n return nrows",
"def main():\n feature_extraction_model = \"HOG\"\n # feature_extraction_models = [\"CM\", \"HOG\"]\n feature_extraction_model_1 = \"CM\"\n dimension_reduction_model = \"PCA\"\n k_value = 10\n dim_k_value = 40\n # K_value = 20\n # lab_folder = \"Dataset3/Labelled/Set1\"\n # unlab_folder = \"Dataset3/Unlabelled/Set 2\"\n lab_folder = get_input_folder(\"Labelled Folder\")\n unlab_folder = get_input_folder(\"Classify\")\n start = time.time()\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab = dim_red.get_object_feature_matrix()\n features_list_lab = np.array(obj_feat_lab['featureVector'].tolist())\n images_list_lab = np.array(obj_feat_lab['imageId'])\n # filtering the labelled set\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab = dim_red.get_object_feature_matrix()\n features_list_unlab = np.array(obj_feat_unlab['featureVector'].tolist())\n images_list_unlab = np.array(obj_feat_unlab['imageId'])\n\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab_1 = dim_red.get_object_feature_matrix()\n features_list_lab_1 = np.array(obj_feat_lab_1['featureVector'].tolist())\n # images_list_lab = np.array(obj_feat_lab_1['imageId'])\n # filtering the labelled set\n\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab_1 = dim_red.get_object_feature_matrix()\n features_list_unlab_1 = np.array(obj_feat_unlab_1['featureVector'].tolist())\n # images_list_unlab = np.array(obj_feat_unlab['imageId'])\n features_list_lab = np.concatenate((features_list_lab, features_list_lab_1), axis=1)\n features_list_unlab = np.concatenate((features_list_unlab, features_list_unlab_1), axis=1)\n\n # ================================================================================================================\n\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n features_list = np.concatenate((features_list_lab, features_list_unlab))\n images_list = np.concatenate((images_list_lab, images_list_unlab))\n images_list = list(images_list)\n # Finding Similarity Matrix\n cos_sim = cosine_similarity(features_list)\n sim_graph = np.empty((0, len(cos_sim)))\n for row in cos_sim:\n k_largest = np.argsort(-np.array(row))[1:k_value + 1]\n sim_graph_row = [d if i in k_largest else 0 for i, d in enumerate(row)]\n sim_graph = np.append(sim_graph, np.array([sim_graph_row]), axis=0)\n\n row_sums = sim_graph.sum(axis=1)\n sim_graph = sim_graph / row_sums[:, np.newaxis]\n idx = 0\n results_dorsal = ppr(sim_graph, images_list, dorsal_list)\n results_palmar = ppr(sim_graph, images_list, palmar_list)\n final_results = {}\n\n for img in images_list_unlab:\n if results_dorsal[img] < results_palmar[img]:\n final_results[img] = \"dorsal\"\n else:\n final_results[img] = \"palmar\"\n\n actual_labels = fetch_actual_labels(images_list_unlab)\n print(\"Classification\")\n no_correct = 0\n correctly_classified = []\n incorrectly_classified = []\n print(\"| ImageId | Prediction | Actual |\")\n for r in final_results:\n print(\"| {} | {} | {} |\".format(r, final_results[r], actual_labels[r]))\n if final_results[r] == actual_labels[r]:\n correctly_classified.append(r)\n no_correct += 1\n else:\n incorrectly_classified.append(r)\n\n print(\"Correctly classified: {}\\n\".format(correctly_classified))\n print(\"InCorrectly classified: {}\\n\".format(incorrectly_classified))\n\n print(\"Classification Accuracy: {}%\".format(no_correct / len(images_list_unlab) * 100))\n print(\"Execution time: {} seconds\".format(time.time() - start))",
"def get_image_data(imagedir, model_kwds=dict(layer='fc2'),\n img_kwds=dict(size=(224,224)), timestamps_kwds=dict(source='auto'),\n pca_kwds=None):\n fingerprints_fn = pj(imagedir, ic_base_dir, 'fingerprints.pk')\n images_fn = pj(imagedir, ic_base_dir, 'images.pk')\n if os.path.exists(images_fn):\n print(f\"reading image arrays {images_fn} ...\")\n images = read_pk(images_fn)\n else:\n print(f\"create image arrays {images_fn}\")\n images = read_images(imagedir, **img_kwds)\n write_pk(images, images_fn)\n if os.path.exists(fingerprints_fn):\n print(f\"reading fingerprints {fingerprints_fn} ...\")\n fingerprints = read_pk(fingerprints_fn)\n else:\n print(f\"create fingerprints {fingerprints_fn}\")\n fingerprints = ic.fingerprints(images, ic.get_model(**model_kwds))\n if pca_kwds is not None:\n fingerprints = ic.pca(fingerprints, **pca_kwds)\n write_pk(fingerprints, fingerprints_fn)\n print(f\"reading timestamps ...\")\n if timestamps_kwds is not None:\n timestamps = read_timestamps(imagedir, **timestamps_kwds)\n return images, fingerprints, timestamps",
"def load_datasets(filepath, sample_list, label_list, mark, a4c_or_a2c, m):\n # here can adjust n to apply your datasets\n if mark:\n n = 4000\n else:\n n = 4000\n dst_pair1 = np.zeros(shape=(n, m, m, 1), dtype=np.float32)\n dst_pair2 = np.zeros(shape=(n, m, m, 1), dtype=np.float32)\n dst_label = np.zeros(shape=(n,), dtype=np.int32)\n k = 0\n label_list_copy = copy.deepcopy(label_list)\n for number in range(len(sample_list)):\n label = label_list_copy[sample_list[number]-1] # o--->up 1--->down\n start_mark = label.pop()\n for i in (label):\n position = label.index(i)\n if position == len(label)-1:\n break\n j = label[position+1]\n for t in range(i,j):\n # load imgs: from number i to number j-1-->pair1\n # i+1 j-->pair2\n img_p1 = cv2.imread(filepath+\"Patient\"+(\"000\"+str(sample_list[number]))[-4:] +\n \"\\\\a\"+str(a4c_or_a2c)+\"c\\\\\"+str(t)+'.png', 0)\n img_p2 = cv2.imread(filepath+\"Patient\"+(\"000\"+str(sample_list[number]))[-4:] +\n \"\\\\a\"+str(a4c_or_a2c)+\"c\\\\\"+str(t+1)+'.png', 0)\n # cut and unsamping use cv2.resize\n # original 600*800--cut-->512*512--->resize by cv2 ---> m*m\n dst_pair1[k, :, :, 0] = cv2.resize(img_p1[80:592, 176:688].reshape(512, -1, 1), (m, m))/255.0\n dst_pair2[k, :, :, 0] = cv2.resize(img_p2[80:592, 176:688].reshape(512, -1, 1), (m, m))/255.0\n if start_mark == 0: # up\n dst_label[k] = 0 \n else:\n dst_label[k] = 1 \n k += 1\n if start_mark == 0:\n start_mark = 1\n else:\n start_mark = 0\n if mark == 1:\n pathname = 'train'\n elif mark == 0:\n pathname = 'test'\n else:\n pathname = \"val\"\n # save the imgs for augmentation before training.\n os.mkdir('../'+pathname+'p1/') \n os.mkdir('../'+pathname+'p2/')\n K = 0\n for i in (dst_pair1[:k]):\n preprocessing.image.save_img('../'+pathname+'p1/'+str(K)+'.png', i)\n K += 1\n K = 0\n for i in (dst_pair2[:k]):\n preprocessing.image.save_img('../'+pathname+'p2/'+str(K)+'.png', i)\n K += 1\n return dst_pair1[:k], dst_pair2[:k], dst_label[:k]",
"def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')",
"def load_pascal(data_dir, split='train'):\n # Wrote this function\n # idx = 0\n # if idx >20:\n # idx+=1\n # break\n \"\"\"\n print(\"Begin Load Images ------------------------------------\")\n images = []\n # images_dict -> key: img_file_idx, value: rgb image ndarray (256*256*3)\n images_dict = {}\n # count\n for infile in glob.glob(\"./VOCdevkit/VOC2007/JPEGImages/*.jpg\"):\n # reshape the images to 256*256*3\n file, ext = os.path.splitext(infile)\n file_idx = file[-6:]\n\n try:\n im = Image.open(infile)\n resized_img = im.resize((256, 256), Image.ANTIALIAS)\n resized_arr = np.array(resized_img)\n images_dict[file_idx] = resized_arr.astype(np.float32)\n except IOError:\n print(\"Error\")\n\n save_obj(images_dict,\"images_dict\")\n \"\"\"\n # label_mat: 2d array, each annotation file is one label_col, multiple label_col mean multiple annotation files\n label_mat = []\n weight_mat = []\n image_mat = []\n\n images_dict = load_obj(\"images_dict\")\n print(\"Return Load Images ------------------------------------\")\n\n # for filename in os.listdir(\"./VOCdevkit/VOC2007/ImageSets/Main/\"):\n for filename in enumerate(CLASS_NAMES):\n\n with open(\"./VOCdevkit/VOC2007/ImageSets/Main/\"+filename[1] +\"_\"+split+\".txt\") as fp:\n print(fp)\n image_mat = []\n label_col = []\n weight_col = []\n line = fp.readline()\n cnt = 1\n while line:\n\n label_idx = line.strip()[:-3]\n try:\n # print(\"Line {}: {}\".format(label_idx, type(label_idx)))\n # Be aware!! '000005 ' is different from '000005', there is a space in the first string!!!\n # label_idx = '000005 ' label_idx[:-1]='000005'\n image_mat.append(images_dict[label_idx])\n except IOError:\n print(\"Error Line {}: {}\".format(label_idx, type(label_idx)))\n\n label_flag = int(line.strip()[-2:])\n\n if label_flag is 0 or label_flag is -1:\n label_col.append(np.int32(0))\n else:\n label_col.append(np.int32(1))\n\n if label_flag is 1 or label_flag is -1:\n weight_col.append(np.int32(1))\n else:\n weight_col.append(np.int32(0))\n\n line = fp.readline()\n cnt += 1\n np_label_col = np.asarray(label_col)\n label_mat.append(np_label_col)\n # print(np.shape(label_mat))\n np_weight_col = np.asarray(weight_col)\n weight_mat.append(np_weight_col)\n\n # print('image_mat {}: label_mat {}'.format(np.shape(image_mat), np.shape(label_mat)))\n np_image_mat = np.asarray(image_mat)\n np_label_mat = np.asarray(label_mat)\n np_weight_mat = np.asarray(weight_mat)\n # print('np_image_mat {}: np_label_mat {}'.format(np.shape(np_image_mat), np.shape(np_label_mat)))\n np_trans_label_mat = np_label_mat.transpose()\n np_trans_weight_mat = np_weight_mat.transpose()\n # print(np.shape(np_label_mat))\n # print(np.shape(np_weight_mat))\n print('np_trans_label_mat {}: np_trans_weight_mat {}'.format(np.shape(np_trans_label_mat), np.shape(np_trans_weight_mat)))\n print(\"Return Load Weights and Labels ------------------------------------\")\n return np_image_mat, np_trans_label_mat, np_trans_weight_mat",
"def createDataset(sources,output,labels,sparse):\n global has_joblib\n out_path = str(output)\n # delete the output file\n if os.path.exists(os.path.abspath(out_path)):\n os.remove(os.path.abspath(out_path))\n \n # first, list the source files\n fpaths_src, fnames_src = utils.listFiles(directory=os.path.abspath(sources), ext='png')\n \n label_map={}\n \n # read the label file\n if not (labels == None):\n label_map = utils.readLabelMap(labels)\n # check that the numbers match\n print(\"Number of images in label map : %s\"%str(len(label_map.keys())-1))\n print(\"Number of images in source dir: %s\"%str(len(fpaths_src)))\n assert len(label_map.keys())-1 == len(fpaths_src)\n \n # generate KNN classifier\n if not (args.codebook == 'None' or args.codebook == None):\n args.knn = getKNNClassifier() \n else:\n args.knn = None\n \n # precompute number of images\n n_imgs = len(fpaths_src)\n \n # preallocate array\n # if augmentation, calculate (9*4+1)*n samples\n all_features_list = []\n \n # parallel implementation (default, if joblib available)\n if has_joblib:\n image_features = Parallel(n_jobs=args.njobs,verbose=5) (delayed(processImage)(fpaths_src, label_map, fnames_src, img_idx) for img_idx in range(n_imgs))\n # collect all images into a single matrix\n image_features = np.concatenate(image_features, axis=0)\n all_features_list.append(image_features)\n else:\n for img_idx in xrange(n_imgs):\n image_features = processImage(fpaths_src, label_map, fnames_src, img_idx)\n all_features_list.append(image_features)\n \n # make a 2D matrix from the list of features (stack all images vertically)\n feat_matrix = np.concatenate(all_features_list, axis=0).astype(np.float32) \n \n # do scaling of each feature dimension \n #if False:\n if not (args.scale == 0):\n print \"Scaling data...\"\n \n # preserve the labels\n label_vec = feat_matrix[:,0]\n feat_matrix = np.delete(feat_matrix,0,1)\n \n featurestats = np.zeros((2,feat_matrix.shape[1]))\n \n # use soft-normalization (zero-mean, unit var whitening)\n if (args.scale == 1):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # featurestats contains 2 rows, first row = mean, second row = std\n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # use hard-normalization \n elif (args.scale == 2):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # the featurestats contains 2 rows, first row = min, second row = max \n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # normalize each feature dimension\n for feat_idx in xrange(feat_matrix.shape[1]):\n feat_vec = feat_matrix[:,feat_idx]\n \n # soft-normalization (zero-mean, approx. unit variance)\n if (args.scale == 1): \n # if feature statistics are specified\n if not (args.featurestats == None):\n feat_mean = featurestats[0,feat_idx]\n feat_std = featurestats[1,feat_idx]\n else:\n # compute them from the data\n feat_mean = feat_vec.mean()\n feat_std = (feat_vec.std() + 1e-10)\n # store them \n featurestats[0,feat_idx] = feat_mean\n featurestats[1,feat_idx] = feat_std\n \n # shift to zero mean and (unit) variance\n feat_vec_scaled = (feat_vec - feat_mean) / (1.*feat_std)\n \n \n # hard-normalization (min/max = borders estimated from the (training) dataset)\n elif (args.scale == 2):\n if not (args.featurestats == None):\n feat_min = featurestats[0,feat_idx]\n feat_max = featurestats[1,feat_idx]\n else:\n # compute them freshly\n feat_min = np.min(feat_vec)\n feat_max = np.max(feat_vec)\n # store them \n featurestats[0,feat_idx] = feat_min\n featurestats[1,feat_idx] = feat_max\n \n # standardize/normalize between 0 and 1\n feat_vec_std = (feat_vec - feat_min) / (feat_max - feat_min + 1e-10) \n \n # linearly scale between -1 and 1 \n feat_vec_scaled = (1.0*feat_vec_std * (1 - -1)) - 1\n \n \n # set column back to matrix\n feat_matrix[:,feat_idx] = feat_vec_scaled\n \n # finally prepend the label_vec again\n feat_matrix = np.concatenate((np.reshape(label_vec,(feat_matrix.shape[0],1)),feat_matrix), axis=1)\n \n print \"Done.\"\n else:\n print \"Data may not be properly scaled, use the 'svm-scale' implementation of libsvm.\"\n \n if not (args.savefeaturestats == None):\n saveFeatureStats(featurestats) \n\n #Parallel(n_jobs=args.njobs, verbose=5)(delayed(function)(params) for i in range(10))\n # open the output file\n output_file = open(os.path.abspath(out_path), 'wb')\n\n # run through the feature matrix \n print \"Writing %s rows and %s cols to file...\"%(feat_matrix.shape)\n # parallel implementation (default, if joblib available)\n if has_joblib:\n lines = Parallel(n_jobs=args.njobs, verbose=5)(delayed(writeLine)(i, feat_matrix) for i in range(feat_matrix.shape[0]))\n output_file.writelines(lines) \n else:\n for i in xrange(feat_matrix.shape[0]):\n line = writeLine(i, feat_matrix)\n output_file.writelines(line)\n \n output_file.close()\n \n return 0",
"def process_images():\n\t\n\tparser = argparse.ArgumentParser(description=\"Splice image patch for face from GAN generated donor to detected face in recipient image.\")\n\tparser.add_argument(\"-d\", \"--donor\", dest=\"donor\", default=\"./GAN_Faces/\", help=\"path to directory containing GAN generated faces\")\n\tparser.add_argument(\"-r\", \"--recipient\", dest=\"recipient\", default=\"./MediFor_Images/\", help=\"path to directory containing images into which faces are spliced\")\n\tparser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"./GAN_MediFor/\", help=\"output directory into which spliced images are saved\")\n\tparser.add_argument(\"-f\", \"--files\", dest=\"files\", default=False, help=\"If the input and output are files not directories\", action='store_true')\n\n\targs = parser.parse_args()\n\tdonor_directory = args.donor\n\trecipient_directory = args.recipient\n\tout_directory = args.output\n\tfi = args.files\n\t\n\t# donor images\n\ttry:\n\t\thead_image_paths = os.listdir(donor_directory) if not fi else [donor_directory]\n\t\tif not os.path.exists(head_image_paths[0]):\n\t\t\traise ValueError\n\texcept:\n\t\tprint('Did you create the donor image directory?')\n\t\tprint('Quiting ...')\n\t\treturn\n\t\t\n\t# recipient images\n\ttry:\n\t\trecipient_paths = os.listdir(recipient_directory) if not fi else [recipient_directory]\n\t\tif not os.path.exists(recipient_paths[0]):\n\t\t\traise ValueError\n\texcept:\n\t\tprint('Did you create the recipient image directory?')\n\t\tprint('Quiting ...')\n\t\treturn\n\t\n\t# output folder existence\n\tif not os.path.exists(out_directory) and not fi:\n\t\tprint('Did you create the output image directory?')\n\t\tprint('Quiting...')\n\t\treturn\n\t\n\t# log errors\n\tlf = open('./log.txt', 'w')\n\t\n\t\"\"\"\n\tTowards the objectives of the MediFor program, all Progressive GAN generated face images are utilized in combination with all available images in recipient images.\n\t\n\tNaming convention:\n\tThe spliced images are named as <donor image name>--<recipient image name>.png\n\tThe spliced images can be renamed at a later date if a hashing function is used to rename donor or recipient image file names.\t\n\t\"\"\"\n\n\tfor head_img in head_image_paths:\n\t\thead_path = donor_directory + head_img if not fi else head_img\n\t\tfor recipient_img in recipient_paths:\n\t\t\trecipient_path = recipient_directory + recipient_img if not fi else recipient_img\n\t\t\tout_img = head_img.split('.')[0] + '--' + recipient_img.split('.')[0] + '.png'\n\t\t\tout_path = os.path.join(out_directory, out_img) if not fi else out_directory\n\t\t\ttry:\n\t\t\t\tsplice_donor_recipient(recipient_path, head_path, out_path)\n\t\t\t\tprint('donor: {}, recipient: {}\\n output: {}'.format(head_path, recipient_path, out_path))\n\t\t\texcept Exception as err:\n\t\t\t\tprint(err)\n\t\t\t\tlf.write('Issue with: {}\\n'.format(out_img))\n\t\n\tlf.close()",
"def preprocessing_objects(img_data, hierarchy_mapping, object_file_name='objects.p'):\n\n object_path_token = \"{0}.{1}.{2}\".format(DATA, VISUAL_GENOME, get_name_from_file(object_file_name))\n\n # Check if pickles are already created\n objects_path = FilesManager().get_file_path(object_path_token)\n\n if os.path.isfile(objects_path):\n Logger().log('File is already exist {0}'.format(objects_path))\n objects = FilesManager().load_file(object_path_token)\n return objects\n\n # Bad urls which should be sorted out\n bad_urls = get_bad_urls()\n\n # Get the whole objects from entities\n objects_lst = []\n correct_labels = hierarchy_mapping.keys()\n idx = 0\n for img in img_data:\n\n # Get the url image\n url = img.image.url\n\n # Sorting bad urls\n if url in bad_urls:\n continue\n\n # Get the objects per image\n objects = img.objects\n for object in objects:\n\n # Get the lable of object\n label = object.names[0]\n\n # Check if it is a correct label\n if label not in correct_labels:\n continue\n\n new_object_mapping = ObjectMapping(object.id, object.x, object.y, object.width, object.height, object.names,\n object.synsets, url)\n # Append the new objectMapping to objects_lst\n objects_lst.append(new_object_mapping)\n\n idx += 1\n Logger().log(\"Finished img: {}\".format(idx))\n\n # Pickle objects_lst\n objects_array = np.array(objects_lst)\n # Save the objects files to the disk\n FilesManager().save_file(object_path_token, objects_array)\n return objects_array",
"def imaging(input_model, reference_files):\n detector = cf.Frame2D(name='detector', axes_order=(0, 1), unit=(u.pix, u.pix))\n v2v3 = cf.Frame2D(name='v2v3', axes_order=(0, 1), unit=(u.deg, u.deg))\n world = cf.CelestialFrame(reference_frame=coord.ICRS(), name='world')\n\n subarray2full = subarray_transform(input_model)\n imdistortion = imaging_distortion(input_model, reference_files)\n distortion = subarray2full | imdistortion\n distortion.bounding_box = imdistortion.bounding_box\n del imdistortion.bounding_box\n tel2sky = pointing.v23tosky(input_model)\n pipeline = [(detector, distortion),\n (v2v3, tel2sky),\n (world, None)]\n return pipeline",
"def parse_points3d(kapture_path: str,\n number_of_points: int,\n nvm_content: List[str],\n offset: int,\n point_id_offset: int,\n image_idx_to_image_name: List[str],\n filter_list: Optional[Set[str]],\n points3d: List[List[float]],\n keypoints: kapture.Keypoints,\n observations: kapture.Observations) -> None:\n # (image_name, nvm_feature_id ) -> keypoint_id\n known_keypoints = {}\n local_keypoints = {}\n for i in range(0, number_of_points):\n fields = nvm_content[i + offset].split()\n points3d.append([float(v) for v in fields[0:6]])\n # parse observations\n number_of_measurements = int(fields[6])\n for j in range(0, number_of_measurements):\n # parse measurement\n image_index = int(fields[7 + 4 * j + 0])\n feature_index = int(fields[7 + 4 * j + 1])\n x = float(fields[7 + 4 * j + 2])\n y = float(fields[7 + 4 * j + 3])\n\n # retrieve filename. if added, then proceed to add features / observations\n file_name = image_idx_to_image_name[image_index]\n if filter_list is not None and file_name not in filter_list:\n # file_name is not in the list, do not add it\n continue\n\n # init local_keypoints if needed\n if file_name not in local_keypoints:\n local_keypoints[file_name] = []\n # do not add the same keypoint twice\n if (file_name, feature_index) not in known_keypoints:\n # in the kapture format, keypoint id is different. Note that it starts from 0\n known_keypoints[(file_name, feature_index)] = len(local_keypoints[file_name])\n local_keypoints[file_name].append([x, y])\n keypoint_idx = known_keypoints[(file_name, feature_index)]\n point3d_idx = i + point_id_offset\n observations.add(point3d_idx, LOCAL_FEATURE_TYPE, file_name, keypoint_idx)\n\n # finally, convert local_keypoints to np.ndarray and add them to the global keypoints variable\n for image_filename, keypoints_array in local_keypoints.items():\n keypoints_np_array = np.array(keypoints_array, dtype=np.float32)\n keypoints_filepath = kapture.io.features.get_keypoints_fullpath(LOCAL_FEATURE_TYPE,\n kapture_path,\n image_filename)\n kapture.io.features.image_keypoints_to_file(keypoints_filepath, keypoints_np_array)\n keypoints.add(image_filename)",
"def main():\n with open(IMAGEPATH_LIST_PATH, \"rt\") as imagepath_list_handle:\n imagepath_list = [line.strip() for line in imagepath_list_handle.readlines()]\n\n object_detector = ObjectDetector(MODEL_PATH)\n\n dataset_json = []\n for imagepath in imagepath_list:\n image = scipy.misc.imread(imagepath)\n detections = object_detector.run(image)\n\n detections_json = {\"path\": imagepath, \"detections\": [det.to_dict() for det in detections]}\n dataset_json.append(detections_json)\n\n with open(DATASET_PATH, \"wt\") as json_handle:\n json.dump(dataset_json, json_handle, sort_keys=True, indent=4)",
"def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)",
"def load_images(self, target = \"standard\", path=OMNIGLOT_DATAPATH):\n X = []\n Y = []\n folderName = {}\n if target == \"standard\":\n trainFolders = [\"images_background\"]\n testFolders = [\"images_evaluation\"]\n elif target == \"minimal\":\n trainFolders = [\"images_background_small1\", \"images_background_small2\"]\n testFolders = [\"images_evaluation\"]\n \n if self.train:\n for trainFolder in trainFolders:\n folderPath = os.path.join(path, trainFolder)\n imgAllCount = 0 # this is counted for the whole images in all alphabet\n chaAllCount = 0 # this is counted for the whole characters in all alphabet\n\n for alphabet in sorted(os.listdir(folderPath)):\n alphabetPath = os.path.join(folderPath, alphabet)\n folderName[alphabet] = {'totalChar': 0, 'charIndex': [], 'totalImg': 0, 'imgIndex': []}\n \n imgAlphabetCount = 0 # this is counted for the number of images in this alphabet\n chaAlphabetCount = 0 # this is counted for the number of character in this alphabet\n\n folderName[alphabet]['charIndex'].append(chaAllCount)\n folderName[alphabet]['imgIndex'].append(imgAllCount)\n \n for letter in sorted(os.listdir(alphabetPath)):\n letterPath = os.path.join(alphabetPath, letter)\n \n for letterImage in os.listdir(letterPath):\n imagePath = os.path.join(letterPath, letterImage)\n image = mpimg.imread(imagePath)\n X.append(image)\n Y.append(chaAllCount)\n \n imgAlphabetCount += 1\n imgAllCount += 1\n\n chaAlphabetCount += 1\n chaAllCount += 1\n \n folderName[alphabet]['totalChar'] = chaAlphabetCount\n folderName[alphabet]['totalImg'] = imgAlphabetCount\n folderName[alphabet]['charIndex'].append(chaAllCount-1)\n folderName[alphabet]['imgIndex'].append(imgAllCount-1)\n \n X = np.stack(X) \n X = X.reshape(-1, IMAGES_PER_CHARACTER, X.shape[1], X.shape[2])\n return X, np.stack(Y), folderName",
"def read_fn(file_references, mode, params=None):\r\n print('Reading the dataset from Datalakestore (2mm NIfTI images)....')\r\n\r\n def _augment(img):\r\n \"\"\"An image augmentation function\"\"\"\r\n return flip(img, axis=2)\r\n\r\n image_array = []\r\n label_array = []\r\n for f in file_references:\r\n subject_id = f[0]\r\n\r\n # Read the image nii with sitk\r\n ##t1_fn = os.path.join(data_path, '{}/T1_2mm.nii.gz'.format(subject_id))\r\n ##t1 = sitk.GetArrayFromImage(sitk.ReadImage(str(t1_fn)))\r\n t1_fn = os.path.join(data_path, '{}/T1_2mm.nii.gz'.format(subject_id))\r\n print(t1_fn)\r\n #with adlsFileSystemClient.open(t1_fn, 'rb') as f:\r\n # img = sitk.ReadImage(str(f))\r\n # sitk::ERROR: The file \"<ADL file: /clusters/DLTK_IXI_Dataset/2mm/IXI012/T1_2mm.nii.gz>\" does not exist.\r\n # sitk seems only read from local path....how to read from remote path????????\r\n # for short term download to local path\r\n # rpath is datalakestore, lpath is local file path both have the same root structure '/clusters/DLTK_IXI_Dataset/'\r\n multithread.ADLDownloader(adlsFileSystemClient, rpath=t1_fn, lpath=t1_fn, nthreads=5, chunksize=2**24, overwrite=True)\r\n img = sitk.ReadImage(str(t1_fn))\r\n # you need http://imagej.net/Fiji#Downloads app to show the img. More discussion and instruction: https://stackoverflow.com/questions/45682319/simpleitk-show-generates-error-in-imagej-on-linux\r\n ##sitk.Show(img)\r\n t1 = sitk.GetArrayFromImage(img)\r\n\r\n # Normalise volume image\r\n t1 = whitening(t1)\r\n images = np.expand_dims(t1, axis=-1).astype(np.float32)\r\n\r\n if mode == tf.estimator.ModeKeys.PREDICT:\r\n yield {'features': {'x': images}, 'img_id': subject_id}\r\n print('read_fn Predict')\r\n\r\n # Parse the sex classes from the file_references [1,2] and shift them\r\n # to [0,1]\r\n sex = np.int(f[1]) - 1\r\n y = np.expand_dims(sex, axis=-1).astype(np.int32)\r\n\r\n # Augment if used in training mode\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n images = _augment(images)\r\n print('read_fn Train')\r\n # Check if the reader is supposed to return training examples or full images\r\n if params['extract_examples']:\r\n #print('read_fn params extract_examples')\r\n images = extract_random_example_array(\r\n image_list=images,\r\n example_size=params['example_size'],\r\n n_examples=params['n_examples'])\r\n for e in range(params['n_examples']):\r\n #print ('e: ', e)\r\n## yield {'features': {'x': images[e].astype(np.float32)},\r\n## 'labels': {'y': y.astype(np.float32)},\r\n## 'img_id': subject_id}\r\n image_array.append(images[e].astype(np.float32))\r\n label_array.append(y.astype(np.int32))\r\n else:\r\n print('read_fn params yield last')\r\n## yield {'features': {'x': images},\r\n## 'labels': {'y': y.astype(np.float32)},\r\n## 'img_id': subject_id}\r\n image_array.append(images)\r\n label_array.append(y.astype(np.int32))\r\n\r\n print(\"read_fn yield output_array with image shape = \", images.shape, \"label shape = \", y.shape)\r\n yield {'x': np.array(image_array), 'y': np.array(label_array)}",
"def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):\n\n minsize = 20 # minimum size of face\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n factor = 0.709 # scale factor\n \n print('Creating networks and loading parameters')\n with tf.Graph().as_default() as g:\n sess = tf.Session(graph=g, config=tf.ConfigProto(log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = detect_face.create_mtcnn(sess, _files.model_dir)\n\n nrof_samples = len(image_paths)\n img_list = [None] * nrof_samples\n for i in xrange(nrof_samples):\n img = misc.imread(os.path.expanduser(image_paths[i]))\n img_size = np.asarray(img.shape)[0:2]\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n det = np.squeeze(bounding_boxes[0, 0:4])\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0] - margin / 2, 0)\n bb[1] = np.maximum(det[1] - margin / 2, 0)\n bb[2] = np.minimum(det[2] + margin / 2, img_size[1])\n bb[3] = np.minimum(det[3] + margin / 2, img_size[0])\n cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]\n aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')\n prewhitened = facenet.prewhiten(aligned)\n img_list[i] = prewhitened\n images = np.stack(img_list)\n \n return images",
"def _process_image_files(self, input_files):\n # Handle single file-object as arg.\n if not isinstance(input_files, list):\n input_files = [input_files]\n self._check_batch_size(input_files)\n # Handle unnames images as lists of file objects. Named by index in list.\n image_files = []\n for i, tup in enumerate(input_files):\n if not isinstance(tup, tuple):\n image_files.append((tup, str(i)))\n assert hasattr(image_files[i][0], 'read'), (\n 'image_files[%d] has wrong type: %s. Must be file-object with read method.') % (\n i, type(image_files[i][0]))\n else: # already tuples passed in.\n image_files.append(tup)\n # Resize any images such that the min dimension is in range.\n if CAN_RESIZE:\n for i, image_tup in enumerate(image_files):\n image_files[i] = self._resize_image_tuple(image_tup)\n # Return a list of (bytes, name) tuples of the encoded image bytes.\n image_data = []\n for image_file in image_files:\n image_data.append((bytes(image_file[0].read()), image_file[1]))\n return image_data",
"def preprocess_images():\n \n # Set up the lists to collect the images and measurements\n images = []\n measurements = []\n \n # Set up the path to the data files \n data_sets_path = 'data'\n data_sets = [os.path.join(data_sets_path, i) for i\n in os.listdir(data_sets_path)]\n \n # Step through the data folders and collect the images\n # and the steering angles\n for data_set in data_sets:\n lines = []\n \n # Open up the csv file of image paths and steering angles\n with open(os.path.join(data_set,\n 'driving_log.csv')) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n for line in lines:\n source_path = line[0]\n filename = source_path.split('\\\\')[-1]\n current_path = os.path.join(data_set, 'IMG',\n filename)\n \n # Import each image and change it to RGB\n BGR_image = cv2.imread(current_path)\n image = cv2.cvtColor(BGR_image, cv2.COLOR_BGR2RGB)\n rows, cols, depth = image.shape\n flipped_image = cv2.flip(image, 1)\n \n # Create a scaled version of the image\n scale = [0.9, 1.1]\n zoomfactor = random.choice(scale)\n scale_matrix = cv2.getRotationMatrix2D((cols/2, rows/2),\n 0, zoomfactor)\n scaled_image = cv2.warpAffine(image, scale_matrix,\n (cols, rows))\n\n # Append the images to the image list\n images.append(image)\n images.append(scaled_image)\n images.append(flipped_image)\n \n # Append the steering angle to the measurements list\n measurement = float(line[3])\n measurements.append(measurement)\n measurements.append(measurement)\n measurements.append(-1*measurement)\n \n return images, measurements",
"def create_GT_masks(root_dir, background_dir, intrinsic_matrix,classes):\n list_all_images = load_obj(root_dir + \"all_images_adr\")\n training_images_idx = load_obj(root_dir + \"train_images_indices\")\n for i in range(len(training_images_idx)):\n img_adr = list_all_images[training_images_idx[i]]\n label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]\n regex = re.compile(r'\\d+')\n idx = regex.findall(os.path.split(img_adr)[1])[0]\n\n if i % 1000 == 0:\n print(str(i) + \"/\" + str(len(training_images_idx)) + \" finished!\")\n\n image = cv2.imread(img_adr)\n ID_mask = np.zeros((image.shape[0], image.shape[1]))\n U_mask = np.zeros((image.shape[0], image.shape[1]))\n V_mask = np.zeros((image.shape[0], image.shape[1]))\n\n ID_mask_file = root_dir + label + \\\n \"/ground_truth/IDmasks/color\" + str(idx) + \".png\"\n U_mask_file = root_dir + label + \\\n \"/ground_truth/Umasks/color\" + str(idx) + \".png\"\n V_mask_file = root_dir + label + \\\n \"/ground_truth/Vmasks/color\" + str(idx) + \".png\"\n\n tra_adr = root_dir + label + \"/data/tra\" + str(idx) + \".tra\"\n rot_adr = root_dir + label + \"/data/rot\" + str(idx) + \".rot\"\n rigid_transformation = get_rot_tra(rot_adr, tra_adr)\n\n # Read point Point Cloud Data\n ptcld_file = root_dir + label + \"/object.xyz\"\n pt_cld_data = np.loadtxt(ptcld_file, skiprows=1, usecols=(0, 1, 2))\n ones = np.ones((pt_cld_data.shape[0], 1))\n homogenous_coordinate = np.append(pt_cld_data[:, :3], ones, axis=1)\n\n # Perspective Projection to obtain 2D coordinates for masks\n homogenous_2D = intrinsic_matrix @ (rigid_transformation @ homogenous_coordinate.T)\n coord_2D = homogenous_2D[:2, :] / homogenous_2D[2, :]\n coord_2D = ((np.floor(coord_2D)).T).astype(int)\n x_2d = np.clip(coord_2D[:, 0], 0, 639)\n y_2d = np.clip(coord_2D[:, 1], 0, 479)\n ID_mask[y_2d, x_2d] = classes[label]\n\n if i % 100 != 0: # change background for every 99/100 images\n background_img_adr = background_dir + random.choice(os.listdir(background_dir))\n background_img = cv2.imread(background_img_adr)\n background_img = cv2.resize(background_img, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_AREA)\n background_img[y_2d, x_2d, :] = image[y_2d, x_2d, :]\n background_adr = root_dir + label + \"/changed_background/color\" + str(idx) + \".png\"\n mpimg.imsave(background_adr, background_img)\n\n # Generate Ground Truth UV Maps\n centre = np.mean(pt_cld_data, axis=0)\n length = np.sqrt((centre[0]-pt_cld_data[:, 0])**2 + (centre[1] -\n pt_cld_data[:, 1])**2 + (centre[2]-pt_cld_data[:, 2])**2)\n unit_vector = [(pt_cld_data[:, 0]-centre[0])/length, (pt_cld_data[:,\n 1]-centre[1])/length, (pt_cld_data[:, 2]-centre[2])/length]\n U = 0.5 + (np.arctan2(unit_vector[2], unit_vector[0])/(2*np.pi))\n V = 0.5 - (np.arcsin(unit_vector[1])/np.pi)\n U_mask[y_2d, x_2d] = U\n V_mask[y_2d, x_2d] = V\n\n # Saving ID, U and V masks after using the fill holes function\n ID_mask, U_mask, V_mask = fill_holes(ID_mask, U_mask, V_mask)\n cv2.imwrite(ID_mask_file, ID_mask)\n mpimg.imsave(U_mask_file, U_mask, cmap='gray')\n mpimg.imsave(V_mask_file, V_mask, cmap='gray')",
"def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels",
"def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix",
"def process(self, step_guess_orientation=True, step_advanced_alignement=True,\n step_gen_worldfiles=True, step_load_worldfiles=True,\n step_gen_vrts=True, step_load_vrts=True,\n step_load_debug=True ):\n\n QgsMessageLog.logMessage(\"1/ Instantiating all images...\", \"QuickDroneMap\", 0)\n for root, dirs, files in os.walk(self.folder):\n for file in files:\n if file.endswith(\".jpg\") or file.endswith(\".JPG\"):\n image_path = os.path.join(root, file)\n image = Image(self, image_path)\n self.images.append(image)\n self.images = self.images[70:90]\n # for i in [301,300,329]: # 3 images, transform fails on all of them\n # for i in [397,398,364]: # 3 images, transform fails on one of them\n # for i in [377,380,381]: # 3 images, transform works on all of them\n # path = \"C:\\\\Users\\\\Olivier\\\\Dropbox\\\\Affaires\\\\SPC\\\\Sources\\\\quickdronemap\\\\test\\\\data\\\\DJI_{0:04d}.JPG\".format(i)\n # self.images.append(Image(self, path))\n\n QgsMessageLog.logMessage(\"2/ Assigning ids\", \"QuickDroneMap\", 0)\n for i, image in enumerate(self.images):\n image.id = i\n\n\n QgsMessageLog.logMessage(\"2/ Loading image attributes and parsing exif tags...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.set_attributes()\n\n if step_guess_orientation:\n QgsMessageLog.logMessage(\"3/ Building image sequences...\", \"QuickDroneMap\", 0)\n sorted_images = sorted(self.images, key=lambda x: x.timestamp)\n for i in range(len(sorted_images)):\n\n prev_image = sorted_images[i-1] if i>0 else None\n image = sorted_images[i]\n next_image = sorted_images[i+1] if i<len(sorted_images)-1 else None\n\n if prev_image is None or next_image is None:\n continue\n\n angle_p_i = math.atan2(image.point.x()-prev_image.point.x(),-image.point.y()+prev_image.point.y())\n angle_i_n = math.atan2(next_image.point.x()-image.point.x(),-next_image.point.y()+image.point.y())\n\n # Checking if the three images are aligned (if not, we're probably at an angle)\n dA = absolute_angle_difference(angle_p_i, angle_i_n)\n if dA > ANGLE_THRESHOLD:\n continue\n\n # Checking if the three images are near enough timewise, if not, it could be separate flights\n dT1 = image.timestamp - prev_image.timestamp\n dT2 = next_image.timestamp - image.timestamp\n if dT1 > TIME_THRESHOLD or dT2 > TIME_THRESHOLD:\n continue\n\n prev_image.next_image = image\n image.prev_image = prev_image\n image.next_image = next_image\n next_image.prev_image = image\n\n QgsMessageLog.logMessage(\"4/ Deriving orientation from image sequence\", \"QuickDroneMap\", 0)\n for image in self.images:\n # if the direction wasn't set in the Exif tags, we derive it from the image sequences\n if image.direction is None:\n img_a = image.prev_image or image \n img_b = image.next_image or image\n image.angle = math.atan2(img_b.point.x()-img_a.point.x(),-img_b.point.y()+img_a.point.y())\n\n if step_advanced_alignement:\n QgsMessageLog.logMessage(\"5/ Building image neighbourhood graph...\", \"QuickDroneMap\", 0)\n from scipy.spatial import Delaunay\n points = [(i.point.x(),i.point.y()) for i in self.images]\n triangulation = Delaunay(points)\n\n done = [[False for _i2 in self.images] for _i1 in self.images]\n for tri in triangulation.simplices:\n i1,i2,i3 = tri\n if not done[i1][i2]:\n e = Edge(self.images[i1], self.images[i2])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i2].edges.append(e)\n done[i1][i2] = True\n if not done[i1][i3]:\n e = Edge(self.images[i1], self.images[i3])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i3].edges.append(e)\n done[i1][i3] = True\n if not done[i2][i3]:\n e = Edge(self.images[i2], self.images[i3])\n self.edges.append(e)\n self.images[i2].edges.append(e)\n self.images[i3].edges.append(e)\n done[i2][i3] = True\n\n QgsMessageLog.logMessage(\"6/ Computing similarities\", \"QuickDroneMap\", 0)\n for i, edge in enumerate(self.edges):\n QgsMessageLog.logMessage(\"Done {} out of {}\".format(i,len(self.edges)), \"QuickDroneMap\", 0)\n QApplication.processEvents()\n edge.compute_transform()\n\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # QgsMessageLog.logMessage(\"Initial fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n\n # print(\"TESTING QUALITY OF SIMILARITY (disable optimization to do this)\")\n # done = []\n # edges_to_delete = []\n # for edge in self.edges:\n # QApplication.processEvents()\n\n # if edge.imageA in done or edge.imageB in done:\n # edges_to_delete.append(edge)\n # continue\n\n # done.append(edge.imageA)\n # done.append(edge.imageB)\n\n # d_angle = edge.angle\n # edge.imageB.angle = edge.imageA.angle + d_angle\n\n # f_scale = edge.scale\n # edge.imageB.scale = edge.imageA.scale * f_scale\n\n # d_point = QgsPointXY(edge.tvec[0],edge.tvec[1])\n # d_point = d_point.rotated(edge.imageA.angle)\n # d_point *= edge.imageA.pixel_size/DOWNSCALING_FACTOR\n # edge.imageB.point = edge.imageA.point + d_point\n # for edge in edges_to_delete:\n # self.edges.remove(edge)\n\n\n # print(\"AFTER PROTOTYPE PLACEMENT\")\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # self.calculate_fitness(initial_guess_np)\n\n\n QgsMessageLog.logMessage(\"7/ Optimizing\", \"QuickDroneMap\", 0)\n QApplication.processEvents()\n\n initial_guess_np, bounds = self.get_initial_values_and_bounds() \n # res_1 = least_squares(calculate_fitness, initial_guess_np, bounds=([b[0] for b in bounds],[b[1] for b in bounds]))\n res_1 = minimize(self.calculate_fitness, initial_guess_np, bounds=bounds)\n\n for image in self.images:\n px = res_1.x[image.id*4+0]\n py = res_1.x[image.id*4+1]\n pa = res_1.x[image.id*4+2]\n ps = res_1.x[image.id*4+3]\n image.point = QgsPointXY(px, py)\n image.angle = pa\n image.psize = ps\n\n initial_guess_np, _ = self.get_initial_values_and_bounds()\n QgsMessageLog.logMessage(\"After optimization fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n \n QgsMessageLog.logMessage(\"8/ Computing all transforms...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.update_transform()\n\n if step_gen_worldfiles:\n QgsMessageLog.logMessage(\"9a/ Creating and loading worldfiles\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_worldfile()\n if step_load_worldfiles:\n image.load_worldfile(self.iface)\n\n if step_gen_vrts:\n QgsMessageLog.logMessage(\"9b/ Creating and loading vrts\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_vrt()\n if step_load_vrts:\n image.load_vrt(self.iface)\n\n if step_load_debug:\n QgsMessageLog.logMessage(\"10/ Creating debug jsons files\", \"QuickDroneMap\", 0)\n edg_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 32628}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.point.x(), edge.imageA.point.y()],[edge.imageB.point.x(), edge.imageB.point.y()]]\n props = {k:v for (k,v) in vars(edge).items()}\n props['angle_a'] = edge.imageA.angle\n props['angle_b'] = edge.imageB.angle\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n edg_data['features'].append(feature)\n \n edg_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(edg_data, edg_file, default=lambda o: str(o))\n edg_file.close()\n layer = self.iface.addVectorLayer(edg_file.name,\"[DEBUG] Edges\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_edges_style.qml'))\n \n graph_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 4326}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.lon, edge.imageA.lat],[edge.imageB.lon, edge.imageB.lat]]\n props = {k:v for (k,v) in vars(edge).items()}\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n graph_data['features'].append(feature)\n\n graph_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(graph_data, graph_file, default=lambda o: str(o))\n graph_file.close()\n layer = self.iface.addVectorLayer(graph_file.name,\"[DEBUG] Graph\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_graph_style.qml'))",
"def main():\n # Placing imports here so it will be imported only if user want to test algorithm, not when importing\n # Class DepthCameraServer\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n import sensors_classes as sensors\n from images_processing_class import ImagesProcessing\n import struct\n import time\n\n # Starting Thread which receives data from VideoCamera, port od thread's socket must be the same as the port at\n # which data from VideoCamera is redirected, to be sure check where VideoCamera data stream is send in script env.py\n depth_camera_server = DepthCameraServer('localhost', 60012)\n depth_camera_server.run()\n\n pose_server = sensors.Pose_server('localhost', 60007)\n pose_server.run()\n\n # Waiting 1 sec to be sure than depth_camera_server has received minimum 1 image, because program will crash if\n # depth_camera_server doesn't have time to receive an image\n time.sleep(1)\n\n points = depth_camera_server.get_points()\n\n lista_punktow = []\n x = []\n y = []\n z = []\n\n data_pose_dict = pose_server.get_all()\n pose_x = data_pose_dict['x']\n pose_y = data_pose_dict['y']\n pose_z = data_pose_dict['z']\n\n yawp = data_pose_dict['yaw']\n pitchp = data_pose_dict['pitch']\n rollp = data_pose_dict['roll']\n\n # Each 3D point is a set of float(x,y,z). Each point has a size of 12 bytes because\n # 3*sizeof(float) = 12 bytes, that's why we are dividing data into parts with size of 12 and then\n # converting this data to tuple with 3 float (xyz).\n\n #\n # Processing cloud of points to seperate x, y and z was copied from dcam_old.py\n #\n\n for i in range(0, len(points) - 12, 12):\n xyz = struct.unpack('fff', points[i:i + 12])\n\n # rotation is included\n x1p, y1p, z1p = rotation(xyz[2], xyz[0], xyz[1], yawp, pitchp, rollp)\n\n # data from pose is included\n xp = round(x1p + pose_x, 1)\n yp = round(y1p + pose_y, 1)\n zp = round(z1p + pose_z, 1)\n temp = [xp, yp, zp]\n lista_punktow.append(temp)\n\n # Choosing only these points which have minimum 0.45 meters at z-axis, but why???\n for i in lista_punktow:\n x.append(i[0])\n y.append(i[1])\n z.append(i[2])\n\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x, y, z, cmap='viridis', linewidth=0.5)\n ax.scatter(x[0], y[0], z[0], c='red')\n ax.scatter(x[1], y[1], z[1], c='yellow')\n ax.scatter(x[2], y[2], z[2], c='black')\n ax.scatter(pose_x, pose_y, pose_z, c='green')\n plt.show()",
"def extract_data(filename, num_images, starting_id, context_factor):\n imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n\n\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = int(imgs[0].shape[0]/DOWNSCALE)\n IMG_HEIGHT = int(imgs[0].shape[1]/DOWNSCALE)\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n\n img_patches = [img_crop_context(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor, sub_mean=True) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n data = np.asarray(data)\n return data"
]
| [
"0.6201955",
"0.6036239",
"0.6035186",
"0.5952722",
"0.5932819",
"0.5930254",
"0.59231067",
"0.5875409",
"0.586604",
"0.5855777",
"0.58320963",
"0.58187795",
"0.5815014",
"0.57761997",
"0.57627916",
"0.5759528",
"0.57473373",
"0.5740311",
"0.57236683",
"0.5693929",
"0.5691488",
"0.56874454",
"0.56788695",
"0.56701803",
"0.56659126",
"0.56647635",
"0.56612146",
"0.56459194",
"0.56353724",
"0.56186163"
]
| 0.7702474 | 0 |
Scales an cubic image to a certain number of voxels. This function relies on numpy's ndimage.zoom function | def scale(self, size=128):
scale_factor = size / max(self.voxels.shape)
self.voxels = ndimage.zoom(self.voxels, scale_factor)
self.point_position = self.point_position * scale_factor
self.voxel_size = False # To ignore this
return(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resize3D(img, target_size, bspline_order=3, mode='constant'): \n # compute zoom values\n target_size = np.array(target_size, dtype=float)\n image_shape = np.array(img.shape, dtype=float)\n zoom_factors = np.divide(target_size,image_shape)\n print \"Target Size\"\n print target_size\n \n\n print \"Zoom Factors\"\n print zoom_factors\n\n \n # zoom image\n img = zoom(img, zoom_factors, order=bspline_order, mode=mode)\n\n print \"image_shape\"\n print img.shape\n\n return img",
"def scale_image(self, pixels, size):\n x_min, x_max = np.amin(pixels[:,0]), np.amax(pixels[:,0])\n y_min, y_max = np.amin(pixels[:,1]), np.amax(pixels[:,1])\n z_min, z_max = np.amin(pixels[:,2]), np.amax(pixels[:,2])\n \n pixels[:,0] -= x_min \n pixels[:,1] -= y_min\n pixels[:,2] -= z_min\n \n x_max -= x_min\n y_max -= y_min\n z_max -= z_min\n \n scale_factor = size / max(x_max, y_max, z_max) \n # All points are now between [0..max]\n\n pixels *= scale_factor\n return pixels",
"def scale(volume, voxel_dim, expansion, objective_factor,\n pixel_size, focal_plane_depth, **kwargs):\n xy_step = float(pixel_size) / (voxel_dim[1] * expansion * objective_factor)\n #This removes rounding artifacts, by binning with an integer number of pixels\n if xy_step < 1:\n xy_scale = 1.0 / xy_step\n xy_scale = np.round(xy_scale)\n print \"Warning: the ground truth resolution is too low to resolve the volume with the desired expansion. Attempting a work around.\"\n else:\n xy_step = np.round(xy_step)\n xy_scale = 1.0 / xy_step\n z_scale = voxel_dim[0] * expansion / float(focal_plane_depth)\n z_step = np.round(1.0 / z_scale).astype(np.int)\n out = []\n for i in range(0, volume.shape[0], z_step):\n X, Y = np.nonzero(volume[i, :, :])\n values = volume[i, X, Y]\n #Rescale and round\n X = np.floor(xy_scale * X).astype(np.int64)\n Y = np.floor(xy_scale * Y).astype(np.int64)\n #Create new image\n d, w, h = np.ceil(np.array(volume.shape) * xy_scale)\n im = np.zeros((int(w), int(h)), np.uint32)\n #Adding poisson if the volume is expanded, to avoid grid-like images\n if xy_scale > 1:\n X = np.clip(X + np.random.poisson(int(xy_scale), size = len(X)), 0, w - 1)\n Y = np.clip(Y + np.random.poisson(int(xy_scale), size = len(Y)), 0, h - 1)\n #This allows to add to repetition of the same index\n np.add.at(im, (X.astype(np.uint64), Y.astype(np.uint64)), values)\n out.append(im)\n return np.array(out)",
"def resize(self, **kwargs):\n\n if self.image is None:\n raise ValueError('self.image is None! The image has to be initialized!')\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n self.image = ndimage.interpolation.zoom(self.image * 1., **kwargs)\n\n # if size <= 3, pad with zeros\n\n if np.min(self.image.shape) < 5:\n self.image = np.pad(self.image, pad_width=3, mode='constant', constant_values=0)\n\n if self.image.max() > 0:\n self.image = rescale_intensity(self.image, out_range=(0, 255))\n\n if 'Voxel size x' in self.metadata.index and 'Voxel size y' in self.metadata.index \\\n and 'Voxel size z' in self.metadata.index:\n new_voxel_size = np.array([self.metadata['Voxel size z'], self.metadata['Voxel size y'],\n self.metadata['Voxel size x']]) / kwargs['zoom']\n self.metadata['Voxel size'] = str(new_voxel_size)\n self.metadata['Voxel size z'], self.metadata['Voxel size y'], self.metadata['Voxel size x'] = new_voxel_size\n\n return self.image",
"def zoom(cls, img, zoom):\n w, h = img.size\n x = h / 2\n y = w / 2\n zoom2 = zoom * 2\n img = img.crop((x - w / zoom2, y - h / zoom2,\n x + w / zoom2, y + h / zoom2))\n return img.resize((w, h), Image.LANCZOS)",
"def clipped_zoom(img, zoom_factor, **kwargs):\n\n h, w = img.shape[:2]\n\n # width and height of the zoomed image\n zh = int(np.round(zoom_factor * h))\n zw = int(np.round(zoom_factor * w))\n\n # for multichannel images we don't want to apply the zoom factor to the RGB\n # dimension, so instead we create a tuple of zoom factors, one per array\n # dimension, with 1's for any trailing dimensions after the width and height.\n zoom_tuple = (zoom_factor,) * 2 + (1,) * (img.ndim - 2)\n\n # zooming out\n if zoom_factor < 1:\n # bounding box of the clip region within the output array\n top = (h - zh) // 2\n left = (w - zw) // 2\n # zero-padding\n out = np.zeros_like(img)\n out[top:top+zh, left:left+zw] = scipy.ndimage.zoom(img, zoom_tuple, **kwargs)\n\n # zooming in\n elif zoom_factor > 1:\n # bounding box of the clip region within the input array\n top = (zh - h) // 2\n left = (zw - w) // 2\n out = scipy.ndimage.zoom(img[top:top+zh, left:left+zw], zoom_tuple, **kwargs)\n # `out` might still be slightly larger than `img` due to rounding, so\n # trim off any extra pixels at the edges\n trim_top = ((out.shape[0] - h) // 2)\n trim_left = ((out.shape[1] - w) // 2)\n out = out[trim_top:trim_top+h, trim_left:trim_left+w]\n\n # if zoom_factor == 1, just return the input array\n else:\n out = img\n return out",
"def cv2_clipped_zoom(img, zoom_factor):\r\n height, width = img.shape[:2] # It's also the final desired shape\r\n new_height, new_width = int(height * zoom_factor), int(width * zoom_factor)\r\n\r\n ### Crop only the part that will remain in the result (more efficient)\r\n # Centered bbox of the final desired size in resized (larger/smaller) image coordinates\r\n y1, x1 = max(0, new_height - height) // 2, max(0, new_width - width) // 2\r\n y2, x2 = y1 + height, x1 + width\r\n bbox = np.array([y1,x1,y2,x2])\r\n # Map back to original image coordinates\r\n bbox = (bbox / zoom_factor).astype(np.int)\r\n y1, x1, y2, x2 = bbox\r\n cropped_img = img[y1:y2, x1:x2]\r\n\r\n # Handle padding when downscaling\r\n resize_height, resize_width = min(new_height, height), min(new_width, width)\r\n pad_height1, pad_width1 = (height - resize_height) // 2, (width - resize_width) //2\r\n pad_height2, pad_width2 = (height - resize_height) - pad_height1, (width - resize_width) - pad_width1\r\n pad_spec = [(pad_height1, pad_height2), (pad_width1, pad_width2)] + [(0,0)] * (img.ndim - 2)\r\n\r\n result = cv2.resize(cropped_img, (resize_width, resize_height))\r\n result = np.pad(result, pad_spec, mode='constant')#,constant_values=105)\r\n assert result.shape[0] == height and result.shape[1] == width\r\n return result",
"def scale(self, x, y, z) -> None:\n ...",
"def zoom(x, zoom_range=(0.9, 1.1), flags=None, border_mode='constant'):\n zoom_matrix = affine_zoom_matrix(zoom_range=zoom_range)\n h, w = x.shape[0], x.shape[1]\n transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)\n x = affine_transform_cv2(x, transform_matrix, flags=flags, border_mode=border_mode)\n return x",
"def zoom_to_size(self, *p):\n\t\tif self.image is None or self.allocation is None:\n\t\t\treturn\n\t\tif __debug__: print self.allocation.width, self.image.get_width()\n\t\tif __debug__: print self.allocation.width, self.image.get_width(), self.allocation.width/self.image.get_width()\n\t\tz = min(\n\t\t\tself.allocation.width/self.image.get_width(),\n\t\t\tself.allocation.height/self.image.get_height()\n\t\t\t)\n\t\tif __debug__: print \"zoom_to_size\", \"z=\", z\n\t\tself.zoom = z",
"def zoom_augmentation():\n # Get the width and the height of the zoomed version\n x_len, y_len = np.random.randint(250, 350, size=2)\n # Get left upper ,right and lower bound of the pixels in the original image\n left = np.random.randint(x_size-x_len)\n upper = np.random.randint(y_size-y_len)\n right, lower = left + x_len, upper+y_len\n # Crops the box and resizes it to the original image size\n box = (left, upper, right, lower)\n return lambda image: image.transform(image.size, Image.EXTENT, box)",
"def zoom_pxl_replication(img,z_f=1):\n\n try:\n m,n = img.shape\n\n new_img = np.zeros((z_f*m,z_f*n),dtype=int)\n\n for i,j in it.product(range(m),range(n)):\n new_img[z_f*i:z_f*i+z_f,z_f*j:z_f*j+z_f] = np.ones((z_f,z_f),dtype=int) * img[i,j]\n\n\n except:\n m,n,_ = img.shape\n\n new_img = np.zeros((z_f*m,z_f*n,3),dtype=int)\n\n for i,j in it.product(range(m),range(n)):\n new_img[z_f*i:z_f*i+z_f,z_f*j:z_f*j+z_f,0] = np.ones((z_f,z_f),dtype=int) * img[i,j,0]\n new_img[z_f*i:z_f*i+z_f,z_f*j:z_f*j+z_f,1] = np.ones((z_f,z_f),dtype=int) * img[i,j,1]\n new_img[z_f*i:z_f*i+z_f,z_f*j:z_f*j+z_f,2] = np.ones((z_f,z_f),dtype=int) * img[i,j,2]\n\n return new_img",
"def resize_volume(img):\n desired_depth = 64\n desired_width = 128\n desired_height = 128\n\n current_depth = img.shape[-1]\n current_width = img.shape[0]\n current_height = img.shape[1]\n\n depth = current_depth / desired_depth\n width = current_width / desired_width\n height = current_height / desired_height\n\n depth_factor = 1 / depth\n width_factor = 1 / width\n height_factor = 1 / height\n\n img = ndimage.rotate(input=img, angle=90, reshape=False)\n img = ndimage.zoom(input=img, zoom=(width_factor, height_factor, depth_factor), order=1)\n\n return img",
"def scale_zoom(x, start, end):\n length = len(x)\n start_index = int(np.round(length * start))\n end_index = int(np.round(length * end))\n if start_index >= end_index:\n if start_index <= 3:\n start_index = 0\n end_index = 3\n else:\n start_index = end_index - 3\n return normalize_1d(x[start_index:end_index])",
"def zoom_k(img,k=1):\n\n try:\n\n m,n = img.shape\n\n tmp1 = np.zeros((m,n*k-k+1),dtype=float)\n\n for i in range(n-1):\n tmp1[:,k*i:k*i+k+1] = np.linspace(img[:,i],img[:,i+1],k+1,axis=1)\n\n tmp2 = np.zeros((m*k-k+1,n*k-k+1),dtype=float)\n\n for j in range(m-1):\n tmp2[k*j:k*j+k+1,:] = np.linspace(tmp1[j,:],tmp1[j+1,:],k+1,axis=0)\n\n except:\n\n m,n,_ = img.shape\n\n tmp1 = np.zeros((m,n*k-k+1,3),dtype=float)\n\n for i in range(n-1):\n tmp1[:,k*i:k*i+k+1,:] = np.linspace(img[:,i,:],img[:,i+1,:],k+1,axis=1)\n\n tmp2 = np.zeros((m*k-k+1,n*k-k+1,3),dtype=float)\n\n for j in range(m-1):\n tmp2[k*j:k*j+k+1,:,:] = np.linspace(tmp1[j,:,:],tmp1[j+1,:,:],k+1,axis=0)\n\n\n return tmp2",
"def scaleZoomed(self, bx, by):\n ix, iy = self._image.get_size()\n if ix > iy:\n # fit to width\n scale = bx/float(iy)\n sy = scale * ix\n if sy > by:\n scale = by/float(iy)\n sx = scale * ix\n sy = by\n else:\n sx = bx\n else:\n # fit to height\n scale = by/float(ix)\n sx = scale * iy\n if sx > bx:\n scale = bx/float(ix)\n sx = bx\n sy = scale * iy\n else:\n sy = by\n\n self._image = pygame.transform.scale(self._image, (int(sx), int(sy)))",
"def scale_image(img, factor=1):\n return cv2.resize(img, (int(img.shape[1] * factor), int(img.shape[0] * factor)))",
"def _resize(img, max_dim=128):\n if max(img.shape[:3]) <= max_dim:\n return img\n else:\n new_size = [max_dim / s if s >= max_dim else 1.0 for s in img.shape[:3]]\n new_size.append(1.0) # for channel\n return scipy.ndimage.zoom(img, new_size, order=2)",
"def zoom(self, newsize, order=1):\n try:\n Nx2, Ny2 = newsize\n except TypeError:\n Nx2 = Ny2 = newsize\n zoom = ((Ny2+0.1)/self.Ny, (Nx2+0.1)/self.Nx)\n if abs(zoom[0] - zoom[1]) > 1e-3:\n raise RuntimeError(\"image aspect ratio cannot be changed\")\n\n pixelsize_old = self.pixelsize\n self.image = ndimage.zoom(self.image, zoom=zoom, order=order)\n self.pixelsize = pixelsize_old * (self.Nx / Nx2)\n return self.image",
"def zoom(self, factor):\n self._transform(\n [\n [factor, 0, 0],\n [0, factor, 0],\n [0, 0, factor],\n ])",
"def scale(self,factor):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y*factor for y in self.coord[x]])\n return self",
"def zoom(self, factor):\n adj = self.canvas.get_hadjustment()\n oldCenter = adj.value + adj.page_size // 2\n\n self.scale *= factor\n self.resizer.rescale()\n self.resize(self.timeExtent * self.scale, self.height)\n for f in self.resizeCallbacks:\n f()\n\n adj.value = oldCenter * factor - adj.page_size // 2",
"def scale_multidimensional(image: _ImageDataType, scaled_size: ShapeType) -> _ImageDataType:\n # we make a list of slice objects like [0:image_x-1:scaled_size_x*1j]\n # this will give us scaled_size_x equal points between 0 and image_x-1\n slices = [slice(0, x-1, y*1j) for x, y in zip(image.shape, scaled_size)]\n # we pass slices into ogrid, to gives us vectors for each dimension\n # ogrid returns a list of floating numbers if we use complex so we have\n # to convert to int. np.rint rounds to nearest for us, but doesn't cast to int!\n coords = [numpy.rint(x).astype(int) for x in numpy.ogrid[slices]]\n # coords is now, for an array image of dimension n, a list of n 1d arrays we the\n # coords we want to take from image:\n return typing.cast(_ImageDataType, image[coords])",
"def img_zoom(img, fx, fy, interp=cv2.INTER_AREA):\n res = cv2.resize(img, None, fx=fx, fy=fy,\n interpolation=interp)\n return res",
"def scale(input_img, size):\n width, height = size\n old_height, old_width = input_img.shape\n x_scale = float(height) / old_height\n y_scale = float(width) / old_width\n\n output_img = np.zeros((height, width), dtype=np.uint8)\n for xidx in xrange(height):\n old_x = float(xidx) / x_scale\n for yidx in xrange(width):\n old_y = float(yidx) / y_scale\n if old_x.is_integer() or old_y.is_integer():\n output_img[xidx, yidx] = input_img[int(old_x), int(old_y)]\n else: # use bilinear interpolation\n x1 = int(np.floor(old_x))\n x2 = int(np.ceil(old_x)) if int(np.ceil(old_x)) < old_height else old_height - 1\n y1 = int(np.floor(old_y))\n y2 = int(np.ceil(old_y)) if int(np.ceil(old_y)) < old_width else old_width - 1\n\n q11 = input_img[x1, y1]\n q12 = input_img[x1, y2]\n q21 = input_img[x2, y1]\n q22 = input_img[x2, y2]\n\n output_img[xidx, yidx] = (q11 * (x2 - old_x) * (y2 - old_y)\n + q21 * (old_x - x1) * (y2 - old_y)\n + q12 * (x2 - old_x) * (old_y - y1)\n + q22 * (old_x - x1) * (old_y - y1)) \\\n / ((x2 - x1) * (y2 - y1) + 1e-10)\n\n return output_img",
"def scale_in(self, count):\n pass",
"def scale_image(img, factor=1):\n\treturn cv2.resize(img,(int(img.shape[1]*factor), int(img.shape[0]*factor)))",
"def scale(img, scale):\n return resize(img, x_scale=scale, y_scale=scale)",
"def compute_scaling_coefs(im_size, grid_size, numpoints, alpha, order):\n num_coefs = np.array(range(im_size[0])) - (im_size[0] - 1) / 2\n scaling_coef = 1 / kaiser_bessel_ft(\n num_coefs / grid_size[0],\n numpoints[0],\n alpha[0],\n order[0],\n 1\n )\n if numpoints[0] == 1:\n scaling_coef = np.ones(scaling_coef.shape)\n for i in range(1, len(im_size)):\n indlist = np.array(range(im_size[i])) - (im_size[i] - 1) / 2\n scaling_coef = np.expand_dims(scaling_coef, axis=-1)\n tmp = 1 / kaiser_bessel_ft(\n indlist / grid_size[i],\n numpoints[i],\n alpha[i],\n order[i],\n 1\n )\n\n for _ in range(i):\n tmp = tmp[np.newaxis]\n\n if numpoints[i] == 1:\n tmp = np.ones(tmp.shape)\n\n scaling_coef = scaling_coef * tmp\n\n return scaling_coef",
"def zoom(self, zoomIn):\n zoomFactor = 0.05\n maxZoomIn = 2\n maxZoomOut = 0.1\n if zoomIn:\n s = self.viewNP.getScale()\n if s.getX()-zoomFactor < maxZoomIn and s.getY()-zoomFactor < maxZoomIn and s.getZ()-zoomFactor < maxZoomIn:\n self.viewNP.setScale(s.getX()+zoomFactor,s.getY()+zoomFactor,s.getZ()+zoomFactor)\n else:\n s = self.viewNP.getScale()\n if s.getX()-zoomFactor > maxZoomOut and s.getY()-zoomFactor > maxZoomOut and s.getZ()-zoomFactor > maxZoomOut:\n self.viewNP.setScale(s.getX()-zoomFactor,s.getY()-zoomFactor,s.getZ()-zoomFactor)\n self.nodeMgr.updateConnections()"
]
| [
"0.65019464",
"0.63492495",
"0.6301837",
"0.612139",
"0.60350764",
"0.5970467",
"0.5939783",
"0.5828498",
"0.5821332",
"0.57809144",
"0.57622355",
"0.5758738",
"0.5750291",
"0.57458144",
"0.57279766",
"0.56705326",
"0.56545484",
"0.56350017",
"0.56083345",
"0.55590546",
"0.5534708",
"0.55341244",
"0.55274343",
"0.55272627",
"0.5523969",
"0.5523962",
"0.551679",
"0.5492195",
"0.5479531",
"0.5476744"
]
| 0.65824926 | 0 |
Test that the pesummary.core.reweight.rejection_sampling works as expected | def test_rejection_sampling():
# Check that it works with a numpy array
original_samples = np.random.uniform(0, 10, (n_samples, n_params))
weights = np.random.uniform(0, 5, n_samples)
new_samples = rejection_sampling(original_samples, weights)
# new_samples should have less samples than what we started with originally
assert len(new_samples) <= n_samples
# Each sample should be in the original posterior table
assert all(new_sample in original_samples for new_sample in new_samples)
# Each sample should be unique
unique = np.unique(new_samples, axis=0)
assert len(unique) == len(new_samples)
# Now check that it works as expected for the
# pesummary.utils.samples_dict.SamplesDict object
original_samples = SamplesDict(
{param: np.random.uniform(0, 10, n_samples) for param in gw_parameters()}
)
weights = np.random.uniform(0, 5, n_samples)
new_samples = rejection_sampling(original_samples, weights)
assert new_samples.number_of_samples <= original_samples.number_of_samples
assert new_samples.parameters == original_samples.parameters
assert all(
new_sample in original_samples.samples.T for new_sample in
new_samples.samples.T
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_does_not_sample_twice_ppswor(self):\n with self.assertRaises(ValueError):\n s = private_sampling.ThresholdSample(\n 1.0, private_sampling.PpsworSamplingMethod)\n s.process(\"a\", math.log(FAILURE_PROBABILITY_INVERSE, math.e))\n s.process(\"a\", 1)",
"def test_does_not_sample_negligible_weight_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n s.process(\n \"a\",\n math.log(\n FAILURE_PROBABILITY_INVERSE / (FAILURE_PROBABILITY_INVERSE - 1),\n math.e))\n self.assertEmpty(s.elements)",
"def test_does_not_sample_negligible_weight_priority(self):\n s = private_sampling.ThresholdSample(\n 1.0, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 1.0 / FAILURE_PROBABILITY_INVERSE)\n self.assertEmpty(s.elements)",
"def test_samples_high_weight_elements_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n s.process(\"a\", math.log(FAILURE_PROBABILITY_INVERSE, math.e))\n self.assertCountEqual([\"a\"], s.elements.keys())",
"def test_does_not_sample_twice_priority(self):\n with self.assertRaises(ValueError):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"a\", 0.1)",
"def rejection_sampling(data, weights):\n weights = np.asarray(weights)\n idx = weights > np.random.uniform(0, np.max(weights), len(weights))\n logger.info(\n \"Rejection sampling resulted in {} samples ({} input)\".format(\n idx.sum(), len(idx)\n )\n )\n return data[idx]",
"def test_sampling1 ():\n cpus = list(range(C.N_PARALLEL))\n affinity = dict(cuda_idx=C.CUDA_IDX, workers_cpus=cpus)\n agent_ = findOptimalAgent(reward=None)\n agent = CategoricalPgAgent(AcrobotNet, \n initial_model_state_dict=agent_.state_dict())\n s0 = np.array([1, 0, 1/np.sqrt(2), 1/np.sqrt(2), 4, 2], dtype=np.float)\n sampler = SerialSampler(\n EnvCls=rlpyt_make,\n env_kwargs=dict(id=C.ENV, reward=None, internalStateFn=C.INTERNAL_STATE_FN, s0=s0),\n batch_T=500,\n batch_B=16,\n max_decorrelation_steps=0,\n )\n sampler.initialize(\n agent=agent,\n affinity=affinity,\n seed=0\n )\n _, traj_info = sampler.obtain_samples(0)\n print(np.mean([t['DiscountedReturn'] for t in traj_info]))",
"def test_estimate_statistics_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertEqual(s.estimate_statistics(), 5.0)",
"def test_estimate_statistics_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n element_weight = math.log(FAILURE_PROBABILITY_INVERSE, math.e)\n s.process(\"a\", element_weight)\n sampling_probability = (FAILURE_PROBABILITY_INVERSE -\n 1) / FAILURE_PROBABILITY_INVERSE\n self.assertEqual(s.estimate_statistics(),\n element_weight / sampling_probability)",
"def test_samples_high_weight_elements_priority(self):\n s = private_sampling.ThresholdSample(\n 0.5, private_sampling.PrioritySamplingMethod)\n s.process(\"a\", 2.0)\n s.process(\"b\", 3.0)\n self.assertCountEqual([\"a\", \"b\"], s.elements.keys())",
"def posterior_sample(self):\n pass",
"def rejection_sample(self, trial_count):\n count = 0\n valid_trial_count = 1\n\n for i in xrange(trial_count):\n values = {}\n\n valid_sample = True\n\n for letter in self.letters:\n prob = self.variables[letter].get_prob(values)\n values[letter] = self.sample(prob)\n\n if letter in self.query.evidence:\n if (self.query.evidence[letter] != values[letter]):\n valid_sample = False\n break\n\n if valid_sample:\n valid_trial_count += 1\n\n if values[self.query.variable]:\n count += 1\n\n return float(count) / valid_trial_count",
"def test_default_sample_weight() -> None:\n mapie = MapieRegressor()\n assert signature(mapie.fit).parameters[\"sample_weight\"].default is None",
"def test_sampling_no_caching():\n folding = Folding(DATASET,\n reset=True)\n oversampler = 'SMOTE'\n oversampler_params = {}\n\n for fold in folding.fold():\n sjob = SamplingJob(fold,\n oversampler,\n oversampler_params)\n\n result = sjob.do_oversampling()\n\n ejob = EvaluationJob(result,\n CLASSIFIERS)\n\n result = ejob.do_evaluation()\n\n assert isinstance(result[0], dict)",
"def accept_reject_sample(prob: Callable, n: int, limits: Space,\n sample_and_weights_factory: Callable = UniformSampleAndWeights,\n dtype=ztypes.float, prob_max: Union[None, int] = None,\n efficiency_estimation: float = 1.0) -> tf.Tensor:\n multiple_limits = limits.n_limits > 1\n\n # if limits.n_limits == 1:\n # lower, upper = limits.limits\n # lower = ztf.convert_to_tensor(lower[0], dtype=dtype)\n # upper = ztf.convert_to_tensor(upper[0], dtype=dtype)\n\n sample_and_weights = sample_and_weights_factory()\n\n n = tf.to_int64(n)\n\n def enough_produced(n, sample, n_total_drawn, eff):\n return tf.greater(n, tf.shape(sample, out_type=tf.int64)[0])\n\n def sample_body(n, sample, n_total_drawn=0, eff=1.0):\n if sample is None:\n n_to_produce = n\n else:\n n_to_produce = n - tf.shape(sample, out_type=tf.int64)[0]\n do_print = settings.get_verbosity() > 5\n if do_print:\n print_op = tf.print(\"Number of samples to produce:\", n_to_produce, \" with efficiency \", eff)\n with tf.control_dependencies([print_op] if do_print else []):\n n_to_produce = tf.to_int64(ztf.to_real(n_to_produce) / eff * 1.01) + 100 # just to make sure\n # TODO: adjustable efficiency cap for memory efficiency (prevent too many samples at once produced)\n n_to_produce = tf.minimum(n_to_produce, tf.to_int64(5e5)) # introduce a cap to force serial\n\n rnd_sample, thresholds_unscaled, weights, weights_max, n_drawn = sample_and_weights(n_to_produce=n_to_produce,\n limits=limits,\n dtype=dtype)\n\n # if n_produced is None:\n # raise ShapeIncompatibleError(\"`sample_and_weights` has to return thresholds with a defined shape.\"\n # \"Use `Tensor.set_shape()` if the automatic propagation of the shape \"\n # \"is not available.\")\n n_total_drawn += n_drawn\n n_total_drawn = tf.to_int64(n_total_drawn)\n\n probabilities = prob(rnd_sample)\n if prob_max is None: # TODO(performance): estimate prob_max, after enough estimations -> fix it?\n # TODO(Mayou36): This control dependency is needed because otherwise the max won't be determined\n # correctly. A bug report on will be filled (WIP).\n # The behavior is very odd: if we do not force a kind of copy, the `reduce_max` returns\n # a value smaller by a factor of 1e-14\n # with tf.control_dependencies([probabilities]):\n # UPDATE: this works now? Was it just a one-time bug?\n prob_max_inferred = tf.reduce_max(probabilities)\n else:\n prob_max_inferred = prob_max\n\n if weights_max is None:\n weights_max = tf.reduce_max(weights) * 0.99 # safety margin, also taking numericals into account\n\n weights_scaled = prob_max_inferred / weights_max * weights\n random_thresholds = thresholds_unscaled * weights_scaled\n if run.numeric_checks:\n assert_op = [tf.assert_greater_equal(x=weights_scaled, y=probabilities,\n message=\"Not all weights are >= probs so the sampling \"\n \"will be biased. If a custom `sample_and_weights` \"\n \"was used, make sure that either the shape of the \"\n \"custom sampler (resp. it's weights) overlap better \"\n \"or decrease the `max_weight`\")]\n else:\n assert_op = []\n with tf.control_dependencies(assert_op):\n take_or_not = probabilities > random_thresholds\n # rnd_sample = tf.expand_dims(rnd_sample, dim=0) if len(rnd_sample.shape) == 1 else rnd_sample\n take_or_not = take_or_not[0] if len(take_or_not.shape) == 2 else take_or_not\n filtered_sample = tf.boolean_mask(rnd_sample, mask=take_or_not, axis=0)\n\n if sample is None:\n sample = filtered_sample\n else:\n sample = tf.concat([sample, filtered_sample], axis=0)\n\n # efficiency (estimate) of how many samples we get\n eff = ztf.to_real(tf.shape(sample, out_type=tf.int64)[1]) / ztf.to_real(n_total_drawn)\n return n, sample, n_total_drawn, eff\n\n # TODO(Mayou36): refactor, remove initial call\n sample = tf.while_loop(cond=enough_produced, body=sample_body, # paraopt\n loop_vars=sample_body(n=n, sample=None, # run first once for initialization\n n_total_drawn=0, eff=efficiency_estimation),\n swap_memory=True,\n parallel_iterations=4,\n back_prop=False)[1] # backprop not needed here\n if multiple_limits:\n sample = tf.random.shuffle(sample) # to make sure, randomly remove and not biased.\n new_sample = sample[:n, :] # cutting away to many produced\n\n # TODO(Mayou36): uncomment below. Why was set_shape needed? leave away to catch failure over time\n # if no failure, uncomment both for improvement of shape inference\n # with suppress(AttributeError): # if n_samples_int is not a numpy object\n # new_sample.set_shape((n_samples_int, n_dims))\n return new_sample",
"def test_low_delta_weight_one_not_sampled(self, sampling_class,\n sampling_method):\n s = sampling_class(\n threshold=100,\n eps=0.1,\n delta=1.0 / FAILURE_PROBABILITY_INVERSE,\n sampling_method=sampling_method)\n s.process(1, 1)\n self.assertEmpty(s.elements)",
"def test_samples_close_to_inclusion_probability_ppswor(self):\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n for i in range(n):\n s.process(i, math.log(2.0, math.e))\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)",
"def downsample_sam(self, factor):",
"def test_weighted_strategy_pool(self):\n environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')\n strategy_pool = strategy_selection.generate_weighted_strategy_pool(\n strategy_list=strategy.AFL_STRATEGY_LIST,\n use_generator=True,\n engine_name='afl')\n self.assertFalse(\n strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.CORPUS_SUBSET_STRATEGY))",
"def resample(self, samplePool, weights):\n # newSamples = random.choices(samplePool, weights, k=self.numParticles)\n # Python 3.6 can do the previous, but now we need to do it by hand.\n newSamples = []\n newWeights = []\n for i in range(len(samplePool)):\n randVal = random.random()\n sampIndex = 0\n total = weights[0]\n while randVal >= total:\n sampIndex += 1\n total += weights[sampIndex]\n newSamples.append(samplePool[sampIndex])\n newWeights.append(weights[sampIndex])\n return newSamples, newWeights",
"def _linear_rejection_sampling(self, rng, beliefs, observation, keys=None):\n if not keys:\n return observation\n\n # Deep copy so observation is not mutated in case it needs to be reused.\n observation_copy = copy.deepcopy(observation)\n\n total = np.sum(beliefs)\n contributions = np.array([\n (total - belief) / float(total) for belief in beliefs\n ])\n max_contribution = np.max(contributions)\n normalized_contributions = contributions / max_contribution\n\n for index in range(len(normalized_contributions)):\n if rng.random_sample() >= normalized_contributions[index]:\n for key in keys:\n if len(normalized_contributions) != len(\n observation_copy[key]) or observation_copy[key].ndim != 1:\n raise KeyError(\n \"Key %s field is not a 1D vector with length equal to len(beliefs).\"\n % key)\n observation_copy[key][index] = 0\n\n return observation_copy",
"def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling",
"def test_two_unsampled_arms(self):\n self._test_two_unsampled_arms()",
"def _sample_using_random(\n self,\n p: float = 0.1,\n ):\n return sa.func.random() < p",
"def _reweight_and_discard_irrelevant(self, weighted_sample_pool, t):\n tmp = []\n ret = []\n wc = self.classifiers[t]\n theta_a = wc.theta_a\n theta_b = wc.theta_b\n\n norm_factor = 0\n discarded = 0\n for patch, w in weighted_sample_pool:\n response = self.h_t(patch, t)\n # if t > 3:\n # if response < theta_a or response > theta_b: # throw it away\n # discarded += 1\n # continue\n r = self.classify(patch)\n label = patch.label\n new_weight = w * np.exp(-label * r)\n\n tmp.append([patch, new_weight])\n norm_factor += new_weight\n for patch, w in tmp: # normalize weights\n normalized_weight = w / norm_factor\n ret.append([patch, normalized_weight])\n print \"Discarded %d training samples\" % discarded\n return ret",
"def test_measure_nondeterministic_with_sampling(self):\n shots = 2000\n circuits = ref_measure.measure_circuits_nondeterministic(allow_sampling=True)\n targets = ref_measure.measure_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def prior_sample(self):\n pass",
"def test_oss_sample_wt_fit():\n\n # Create the object\n oss = OneSidedSelection(random_state=RND_SEED)\n assert_raises(RuntimeError, oss.sample, X, Y)",
"def test_weighted_strategy_pool(self):\n environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')\n strategy_pool = strategy_selection.generate_weighted_strategy_pool(\n strategy_list=strategy.LIBFUZZER_STRATEGY_LIST,\n use_generator=True,\n engine_name='libFuzzer')\n self.assertTrue(\n strategy_pool.do_strategy(strategy.RANDOM_MAX_LENGTH_STRATEGY))\n self.assertTrue(strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY))\n self.assertFalse(\n strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))\n self.assertFalse(strategy_pool.do_strategy(strategy.FORK_STRATEGY))",
"def test_checks_population_size(self):\n with pm.Model() as model:\n n = pm.Normal(\"n\", mu=0, sigma=1)\n for stepper in TestPopulationSamplers.steppers:\n step = stepper()\n with pytest.raises(ValueError, match=\"requires at least 3 chains\"):\n pm.sample(draws=10, tune=10, chains=1, cores=1, step=step)\n # don't parallelize to make test faster\n pm.sample(\n draws=10,\n tune=10,\n chains=4,\n cores=1,\n step=step,\n compute_convergence_checks=False,\n )"
]
| [
"0.6902385",
"0.67308843",
"0.67236114",
"0.66180545",
"0.6555439",
"0.63935053",
"0.6354483",
"0.6283435",
"0.62815934",
"0.6279335",
"0.60121846",
"0.59245867",
"0.5919744",
"0.59115124",
"0.59114724",
"0.5748331",
"0.57358813",
"0.57063603",
"0.5699293",
"0.5692468",
"0.5669262",
"0.5657751",
"0.56516385",
"0.5647729",
"0.56214446",
"0.5587468",
"0.5583199",
"0.5576722",
"0.5567358",
"0.55658334"
]
| 0.7511275 | 0 |
Test that the pesummary.gw.reweight.uniform_in_comoving_volume_from_uniform_in_volume function works as expected | def test_uniform_in_comoving_volume_from_uniform_in_volume():
original_samples = SamplesDict(
{param: np.random.uniform(0, 10, n_samples) for param in gw_parameters()}
)
new_samples = uniform_in_comoving_volume_from_uniform_in_volume(
original_samples
)
assert new_samples.number_of_samples <= original_samples.number_of_samples
assert all(
new_sample in original_samples.samples.T for new_sample in
new_samples.samples.T
)
# check that if there are no redshift samples it still reweights
original_samples.pop("redshift")
new_samples = uniform_in_comoving_volume_from_uniform_in_volume(
original_samples
)
assert new_samples.number_of_samples <= original_samples.number_of_samples
assert all(
new_sample in original_samples.samples.T for new_sample in
new_samples.samples.T
)
# check that if there are no distance samples it still reweights
original_samples = SamplesDict(
{param: np.random.uniform(0, 10, n_samples) for param in gw_parameters()}
)
original_samples.pop("luminosity_distance")
new_samples = uniform_in_comoving_volume_from_uniform_in_volume(
original_samples
)
assert new_samples.number_of_samples <= original_samples.number_of_samples
assert all(
new_sample in original_samples.samples.T for new_sample in
new_samples.samples.T
)
# check that if there are no redshift or distance samples it fails
original_samples.pop("redshift")
with pytest.raises(Exception):
new_samples = uniform_in_comoving_volume_from_uniform_in_volume(
original_samples
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_volume():\n structure = Material(input)\n assert (structure.volume == 90.725624999999965)",
"def test_for_arbitrarily_complicated_substance():\n verify_atomic_weight_for_substance(\"Al4O2H2\", 141.94015428)",
"def test_uniform(self):\n # some reproducible arbitrariness\n np.random.seed(87548)\n\n n = 50\n t_max = 50\n dt = 0.1\n resolution = 1.0\n\n class UniformityChecker(object):\n def __init__(self, target, resolution):\n self.target = target\n self.resolution = resolution\n self.order = 1\n\n def prepare(self, t_max, dt):\n self.has_spike = np.zeros(int_r(t_max/self.resolution) + 1)\n\n def evolve(self, t, dt):\n i = int_r(t/self.resolution)\n self.has_spike[i] = (self.has_spike[i] or np.any(self.target.out > 0))\n\n G = RateHVCLayer(n)\n M = UniformityChecker(G, resolution)\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n self.assertTrue(np.all(M.has_spike))",
"def test_absolute_volume(self):\n\n assert self.test_shape.volume() == pytest.approx(50 * 60 * math.pi * 2 * 1000)",
"def test_volume(self):\n\n self.test_shape.workplane = \"XY\"\n self.test_shape.rotation_axis = \"Z\"\n\n assert self.test_shape.volume() == pytest.approx(math.pi * (10**2) * 100 * 8)",
"def test_uniform(self):\n # some reproducible arbitrariness\n np.random.seed(87548)\n\n n = 50\n t_max = 50\n dt = 0.1\n resolution = 1.0\n\n class UniformityChecker(object):\n def __init__(self, target, resolution):\n self.target = target\n self.resolution = resolution\n self.order = 1\n\n def prepare(self, t_max, dt):\n self.has_spike = np.zeros(int_r(t_max/self.resolution) + 1)\n\n def evolve(self, t, dt):\n i = int_r(t/self.resolution)\n self.has_spike[i] = (self.has_spike[i] or np.any(self.target.spike))\n\n G = HVCLikeLayer(n)\n M = UniformityChecker(G, resolution)\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n self.assertTrue(np.all(M.has_spike))",
"def test_standard_atomic_weight_value_between():\n assert (\n 30.973 < standard_atomic_weight(\"P\").to(u.u).value < 30.974\n ), \"Incorrect standard atomic weight for phosphorus.\"",
"def test_volume_3d(self):\n # generate voronoi mesh \n mesh = Mesh3d(self.particles, self.bound)\n print(\"building mesh...\")\n mesh.build_geometry()\n print(\"mesh complete\")\n\n # calculate voronoi volumes of all real particles \n real_indices = self.particles[\"tag\"] == ParticleTAGS.Real\n tot_vol = np.sum(self.particles[\"volume\"][real_indices])\n\n self.assertAlmostEqual(tot_vol, 1.0)",
"def check_reward_volume_set(data, **_):\n metric = data[\"rewardVolume\"]\n passed = 0 < len(set(metric)) <= 2 and 0. in metric\n return metric, passed",
"def acquisition_function_expected_volume_removal(\n gp_reward_model: BasicGPRewardModel,\n) -> int:\n assert gp_reward_model.use_comparisons\n\n # DL: This assumes the same observation model for each query which we might\n # want to change at some point\n query0 = gp_reward_model.candidate_queries[0]\n response = query0.response\n\n (\n candidate_queries_gp_repr,\n candidate_queries_linear_combination,\n candidate_queries_gp_repr_idx,\n ) = gp_reward_model.get_candidate_queries_gp_repr()\n # mu_diff, _ = gp_reward_model.get_candidate_queries_reward_predictions()\n mu_diff, _ = gp_reward_model.gp_model.predict_multiple(\n candidate_queries_gp_repr,\n linear_combination=candidate_queries_linear_combination,\n )\n\n if response == \"bernoulli\":\n prob = (1 + np.clip(mu_diff, -1, 1)) / 2\n elif response == \"deterministic\":\n prob = np.sign(mu_diff)\n elif response == \"probit\":\n prob = norm.cdf(mu_diff / (np.sqrt(2) * query0.sigma))\n else:\n raise NotImplementedError(f\"evr for {response}\")\n\n volume_removal = np.minimum(1 - prob, prob)\n\n argmax_volume_removal = argmax_over_index_set(\n volume_removal, range(len(candidate_queries_gp_repr_idx))\n )\n return candidate_queries_gp_repr_idx[np.random.choice(argmax_volume_removal)]",
"def test_vapor_pressure():\n assert_almost_equal(vapor_pressure(998. * units.mbar, 0.04963),\n 73.75179 * units.mbar, 5)",
"def test_relative_shape_volume(self):\n\n test_volume = self.test_shape.volume()\n self.test_shape.azimuth_placement_angle = [0, 90, 180, 270]\n assert test_volume * 4 == pytest.approx(self.test_shape.volume())",
"def test_container_weight(self):\r\n weight = self.combinedoe_container.weight\r\n self.assertEqual(weight, 1)",
"def peridym_compute_weighted_volume(cell_cent, cell_vol, nbr_lst, nbr_beta_lst, horizon, omega_fun):\n\n mw = np.zeros(len(cell_vol), dtype=float) #m is wighted volume\n\n for i in range(len(cell_cent)):\n curr_node_coord = cell_cent[i]\n \n #declare empty lists for current node neighbor\n #attributes like neighbor bond vector, bond len,\n #and influence field \n #refer ch5 algo1 of handbook of peridynamic modelling\n #by silling etal \n\n curr_nbr_lst = nbr_lst[i] \n curr_beta_lst = nbr_beta_lst[i]\n curr_nbr_bnd_vct = cell_cent[curr_nbr_lst] - curr_node_coord\n curr_nbr_bnd_len = la.norm(curr_nbr_bnd_vct, 2, axis=1)\n mw[i] = sum(omega_fun(curr_nbr_bnd_vct, horizon)*curr_nbr_bnd_len**2*cell_vol[curr_nbr_lst]*curr_beta_lst)\n\n return mw",
"def test_particle_mass_berkelium_249():\n assert np.isclose(\n particle_mass(\"berkelium-249\").to(u.u).value, 249.0749877\n ), \"Incorrect isotope mass for berkelium.\"",
"def test_calculate_volume(self, mock_send_cli_cmd):\n self.log.display_title(title=self.tool.get_current_function_name())\n self.log.step_num = 0\n msg = \"calculate volume with number\"\n response = [\"2000\", \"400\", \"-\"]\n\n result = self.ins.calculate_volume(\n device=None,\n count=response,\n )\n self.assertTrue(result)\n\n msg = \"calculate volume with number with wing1_volume\"\n response = [\"2000\", \"400\"]\n\n result = self.ins.calculate_volume(\n device=None,\n count=response,\n wing1_volume=\"1000\"\n )\n self.assertTrue(result)",
"def vol_from_price( self, price ):\n def target_func( price, vol ):\n return self.price_from_vol( vol ) - price \n \n return brentq( partial( target_func, price ), 1e-8, 10 )",
"def test_window_funcs():\n # get a PSpecData\n uvd = UVData()\n uvd.read_miriad(\n os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'),\n use_future_array_shapes=True\n )\n beam = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, \"HERA_NF_dipole_power.beamfits\"))\n ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd)], beam=beam)\n ds.set_spw((0, 20))\n ds.set_taper('bh')\n bl = (37, 38)\n key = (0, bl, 'xx')\n d = uvd.get_data(bl)\n C = np.cov(d[:, :20].T).real\n iC = np.linalg.pinv(C)\n # iterate over various R and M matrices and ensure\n # normalization and dtype is consistent\n for data_weight in ['identity', 'iC']:\n ds.set_weighting(data_weight)\n for norm in ['H^-1', 'I', 'V^-1/2']:\n for exact_norm in [True, False]:\n if exact_norm and norm != 'I':\n # exact_norm only supported for norm == 'I'\n continue\n ds.clear_cache()\n if data_weight == 'iC':\n # fill R with iC\n ds._R[(0, (37, 38, 'xx'), 'iC', 'bh')] = iC\n # compute G and H\n Gv = ds.get_G(key, key, exact_norm=exact_norm, pol='xx')\n Hv = ds.get_H(key, key, exact_norm=exact_norm, pol='xx')\n Mv, Wv = ds.get_MW(Gv, Hv, mode=norm, exact_norm=exact_norm,\n band_covar=C)\n # assert row-sum is normalized to 1\n assert np.isclose(Wv.sum(axis=1).real, 1).all()\n # assert this is a real matrix, even though imag is populated\n assert np.isclose(Wv.imag, 0, atol=1e-6).all()",
"def specvol(SA, CT, p):\n\n SA = np.maximum(SA, 0)\n\n xs = np.sqrt(sfac * SA + soffset)\n ys = CT * 0.025\n z = p * 1e-4\n\n specific_volume = (v000\n + xs * (v100 + xs * (v200 + xs * (v300 + xs * (v400 + xs * (v500\n + xs * v600)))))\n + ys * (v010\n + xs * (v110 + xs * (v210 + xs * (v310 + xs * (v410 + xs * v510))))\n + ys * (v020 + xs * (v120 + xs * (v220 + xs * (v320 + xs * v420)))\n + ys * (v030 + xs * (v130 + xs * (v230 + xs * v330))\n + ys * (v040 + xs * (v140 + xs * v240)\n + ys * (v050 + xs * v150 + ys * v060)))))\n + z * (v001\n + xs * (v101 + xs * (v201 + xs * (v301 + xs * (v401 + xs * v501))))\n + ys * (v011 + xs * (v111 + xs * (v211 + xs * (v311 + xs * v411)))\n + ys * (v021 + xs * (v121 + xs * (v221 + xs * v321))\n + ys * (v031 + xs * (v131 + xs * v231)\n + ys * (v041 + xs * v141 + ys * v051))))\n + z * (v002\n + xs * (v102 + xs * (v202 + xs * (v302 + xs * v402)))\n + ys * (v012 + xs * (v112 + xs * (v212 + xs * v312))\n + ys * (v022 + xs * (v122 + xs * v222)\n + ys * (v032 + xs * v132 + ys * v042)))\n + z * (v003\n + xs * (v103 + xs * v203)\n + ys * (v013 + xs * v113 + ys * v023)\n + z * (v004 + xs * v104 + ys * v014\n + z * (v005 + z * v006))))))\n\n return specific_volume",
"def test_strong(self):\n c = array([1,2,3,1])\n self.assertFloatEqual(strong(c), 0.214285714)",
"def test_basic(self):\n result = NonLinearWeights(0.85)\n self.assertAlmostEqual(result.cval, 0.85)",
"def check_reward_volumes(data, **_):\n metric = data['rewardVolume']\n correct = data['correct']\n passed = np.zeros_like(metric, dtype=bool)\n # Check correct trials within correct range\n passed[correct] = (1.5 <= metric[correct]) & (metric[correct] <= 3.)\n # Check incorrect trials are 0\n passed[~correct] = metric[~correct] == 0\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def total_volume(self):",
"def _calculate_volume(seq, window):\n # Article: On the average hydrophobicity of proteins and the relation between it and protein structure\n VOLUME = {'A': 52.6, 'R': 109.1, 'N': 75.7, 'D': 68.4, 'C': 68.3, 'Q': 89.7,\n 'E': 84.7, 'G': 36.3, 'H': 91.9, 'I': 102.0, 'L': 102.0, 'K': 105.1,\n 'M': 97.7, 'F': 113.9, 'P': 73.6, 'S': 54.9, 'T': 71.2, 'W': 135.4,\n 'Y': 116.2, 'V': 85.1}\n\n VOLUME_N = _nomalized_data(VOLUME)\n return _calculate_scale(seq, window, VOLUME_N)",
"def effective(\n network,\n pore_volume='pore.volume',\n throat_volume='throat.volume',\n):\n cn = network['throat.conns']\n P1 = cn[:, 0]\n P2 = cn[:, 1]\n eff_vol = np.copy(network[pore_volume])\n np.add.at(eff_vol, P1, 1/2*network[throat_volume])\n np.add.at(eff_vol, P2, 1/2*network[throat_volume])\n return eff_vol",
"def test_get_volume(self):\n self.assertEqual(self.cat_a.volume(), 6000)",
"def test_absolute_shape_volume(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() == pytest.approx(math.pi * (10**2) * 30)",
"def test_purity():\n psi = qt.fock(3)\n rho_test = qt.ket2dm(psi)\n test_pure = purity(rho_test)\n assert_equal(test_pure,1)",
"def test_weight_is_positive(self):\n nt.assert_greater(self.herb.weight, 0)",
"def test_with_random(unitless=True):\n\n delta = 1.33333\n cube1 = algebra.make_vect(np.random.normal(0, 1,\n size=(257, 124, 68)))\n\n info = {'axes': [\"freq\", \"ra\", \"dec\"], 'type': 'vect',\n 'freq_delta': delta / 3.78, 'freq_centre': 0.,\n 'ra_delta': delta / 1.63, 'ra_centre': 0.,\n 'dec_delta': delta, 'dec_centre': 0.}\n cube1.info = info\n cube2 = copy.deepcopy(cube1)\n\n weight1 = algebra.ones_like(cube1)\n weight2 = algebra.ones_like(cube2)\n\n bin_left, bin_center, bin_right, counts_histo, binavg = \\\n calculate_xspec(cube1, cube2, weight1, weight2,\n window=\"blackman\",\n truncate=False,\n nbins=40,\n unitless=unitless,\n logbins=True)\n\n if unitless:\n pwrspec_input = bin_center ** 3. / 2. / math.pi / math.pi\n else:\n pwrspec_input = np.ones_like(bin_center)\n\n volume = 1.\n for axis_name in cube1.axes:\n axis_vector = cube1.get_axis(axis_name)\n volume *= abs(axis_vector[1] - axis_vector[0])\n\n pwrspec_input *= volume\n\n for specdata in zip(bin_left, bin_center,\n bin_right, counts_histo, binavg,\n pwrspec_input):\n print((\"%10.15g \" * 6) % specdata)"
]
| [
"0.68578726",
"0.63723814",
"0.6354876",
"0.62236947",
"0.61338544",
"0.6127688",
"0.6097099",
"0.5850792",
"0.5831654",
"0.57379633",
"0.57359594",
"0.57206374",
"0.5676544",
"0.5635987",
"0.5625465",
"0.5582712",
"0.5577558",
"0.5575191",
"0.55607986",
"0.5552567",
"0.55507225",
"0.5545548",
"0.5518826",
"0.55180156",
"0.55029726",
"0.5487141",
"0.54841876",
"0.54725146",
"0.5461979",
"0.54573977"
]
| 0.78548443 | 0 |
A tag is a string of length greater than 1 starting with but not . | def is_tag(t):
return len(t) > 1 and t.startswith('#') and not t.startswith('##') and t | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unknown_starttag(self, tag, attrs):\n if tag in self.valid_tags:\n self.result.append('<' + tag)\n for k, v in attrs:\n if string.lower(k[0:2]) != 'on' and",
"def is_tag(value):\r\n tag_names = parse_tag_input(value)\r\n if len(tag_names) > 1:\r\n raise ValidationError(_('Multiple tags were given.'))\r\n elif len(tag_names[0]) > settings.MAX_TAG_LENGTH:\r\n raise forms.ValidationError(\r\n _('A tag may be no more than %s characters long.') % settings.MAX_TAG_LENGTH)\r\n return value",
"def tag(self, text):\n\t\tpass",
"def detag_string(self, string):\r\n counter = itertools.count(0)\r\n count = lambda m: '<%s>' % counter.next()\r\n tags = self.tag_pattern.findall(string)\r\n tags = [''.join(tag) for tag in tags]\r\n (new, nfound) = self.tag_pattern.subn(count, string)\r\n if len(tags) != nfound:\r\n raise Exception('tags dont match:' + string)\r\n return (new, tags)",
"def tagify(parsedtag):\n tag = \"\"\n for t in parsedtag:\n if t == '':\n t = '_'\n tag = tag+t\n return tag",
"def getTagList(tags):\n tags = tags[1:len(tags)-1]\n return tags.split('><')",
"def realtag(element):\n try:\n return element.tag.rsplit('}', 1)[1]\n except ValueError:\n return element.tag",
"def clean_tag(tag):\n tmp0 = tag.strip()\n tmp1 = tmp0.lower()\n return tmp1",
"def isValidTagName(s):\n if s.lower().startswith(\"xml\"):\n return False\n return re.match(\"[^\\W\\d][\\w\\-_.]*\", s)",
"def start_tag_or_none(self, token):\n if self.patterns['start_tag'].match(token):\n return token[2:-6].upper()",
"def _tag_of(entry: _LexiconEntry) -> str:\n return entry[\"tag\"].upper()",
"def test_format_bad_tags(self):\n tags = self.c._format_tags(None)\n self.assertEqual(0, len(tags))",
"def get_nametag(nametag):\n # start must be valid\n if not nametag.startswith(Tags.NAMETAG_START.value):\n return None\n\n # removes the start of the tag\n nametag = nametag[len(Tags.NAMETAG_START.value):]\n\n # end must be valid\n if not nametag.endswith(Tags.NAMETAG_END.value):\n return None\n\n # removes the end of the tag\n nametag = nametag[:(len(nametag) - len(Tags.NAMETAG_END.value))]\n\n # no empty nametags\n if nametag == \"\":\n return None\n\n # checks that every single character is valid\n for c in nametag:\n if (not is_letter(c) and\n not is_number(c) and\n c != \"-\" and c != \"_\" and c != \"'\"):\n return None\n return nametag",
"def test_get_tag_name(self):\r\n name = self.combinedoe.get_tag_name(\"<t>Tag</t>\")\r\n self.assertEqual(name, \"t\")",
"def unknown_starttag(self, tag, attrs):\n if tag in self.valid_tags:\n self.result = self.result + '<' + tag\n for k, v in attrs:\n if (string.lower(k[0:2]) != 'on' and\n string.lower(v[0:10]) != 'javascript'):\n self.result = '%s %s=\"%s\"' % (self.result, k, v)\n endTag = '</%s>' % tag\n self.endTagList.insert(0, endTag)\n self.result = self.result + '>'",
"def openTag ( x ):\n assert str(type(x)) == \"<type 'str'>\"\n tag = \"<\" + str ( x ) + \">\"\n assert str ( type ( tag ) ) == \"<type 'str'>\"\n return tag",
"def _issingleton(self, tagname):\n return self.shortempty",
"def tags_in_string(msg):\r\n def is_linguistic_tag(tag):\r\n \"\"\"Is this tag one that can change with the language?\"\"\"\r\n if tag.startswith(\"&\"):\r\n return True\r\n if any(x in tag for x in [\"<abbr>\", \"<abbr \", \"</abbr>\"]):\r\n return True\r\n return False\r\n\r\n __, tags = Converter().detag_string(msg)\r\n return set(t for t in tags if not is_linguistic_tag(t))",
"def remove_single_tag(tag_exp, text):\n \n while True:\n matched = re.search(tag_exp, text)\n if not matched: break\n text = text[:matched.start()] + \" \" + text[matched.end():]\n \n return text",
"def _parse_tags (tag, multi_kind_dataset ='bagoue'): \r\n tag = str(tag); t = tag.strip().split() \r\n \r\n if len(t) ==1 : \r\n if t[0].lower() not in _DTAGS: \r\n tag = multi_kind_dataset +' ' + t[0]\r\n \r\n warn(f\"Fetching {multi_kind_dataset.title()!r} data without\"\r\n \" explicitly prefixing the kind of data with the area\"\r\n \" name will raise an error. In future, the argument\"\r\n f\" should be '{tag}' instead.\", FutureWarning \r\n )\r\n elif len(t) >1 : \r\n # only the multi kind dataset is allowed \r\n # to contain two words for fetching data \r\n if t[0].lower() !=multi_kind_dataset: \r\n tag = t[0].lower() # skip the second word \r\n return tag",
"def is_valid_tag(tag):\n if not tag or ':' not in tag or len(tag) > TAG_MAX_LEN:\n return False\n # Care only about the key. Value can be anything (including empty string).\n return bool(TAG_KEY_RE.match(tag.split(':', 1)[0]))",
"def is_tag_argument(argument):\n return argument.startswith('%')",
"def unknown_starttag(self, tag, attrs):\n starttrs = \"\".join(['%s=\"%s\"' % (key, value) for key, value in attrs])\n self.pieces.append(\"<%(tag)s %(starttrs)s>\" % locals())",
"def is_tag_list(value):\r\n for tag_name in parse_tag_input(value):\r\n if len(tag_name) > settings.MAX_TAG_LENGTH:\r\n raise forms.ValidationError(\r\n _('Each tag may be no more than %s characters long.') % settings.MAX_TAG_LENGTH)\r\n return value",
"def __getTagText(self, tag):\n return ''.join(tag.findAll(text=True)).replace(unichr(160), ' ')",
"def allowed_xml_tag_fragments():\n return (\n \"<i>\",\n \"</i>\",\n \"<i/>\",\n \"<b>\",\n \"</b>\",\n \"<b/>\",\n \"<sub>\",\n \"</sub>\",\n \"<sub/>\",\n \"<sup>\",\n \"</sup>\",\n \"<sup/>\",\n )",
"def make_tags(tag, word):\n tag1 = \"<{}>\".format(tag)\n tag2 = \"</{}>\".format(tag)\n final = tag1 + word + tag2\n return final",
"def POStag(self, word):\n \t\tif word in (\"'\",\",\",\".\",':',';','.'):\n \t\t\ttag = 'PUNCT'\n \t\telif word == '-':\n \t\t\ttag = 'DASH'\n \t\telse:\n \t\t\ttag = 'NOTAG'\n \t\treturn tag",
"def tags():",
"def __init__(self, tag):\r\n self.tag = tag.lower()"
]
| [
"0.6300984",
"0.62361676",
"0.62319434",
"0.62009877",
"0.61545885",
"0.61463207",
"0.6131712",
"0.61184245",
"0.6023833",
"0.6015184",
"0.60081524",
"0.5920608",
"0.591738",
"0.5891422",
"0.5878843",
"0.5873668",
"0.5853209",
"0.5814497",
"0.5764625",
"0.57616985",
"0.57601917",
"0.57365155",
"0.5722294",
"0.57108414",
"0.5703768",
"0.56877697",
"0.5684106",
"0.5675299",
"0.5669072",
"0.5637245"
]
| 0.671756 | 0 |
If the first value in this list is a tag, pop and return it. | def pop_tag(data):
if data and is_tag(data[0]):
return data.pop(0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pop(self):\n while not self.queue[self.tag].empty():\n temp = self.queue[self.tag].get()\n if not self.queue[self.tag].empty():\n self.queue[1 - self.tag].put(temp)\n else:\n self.tag = 1 - self.tag\n return temp",
"def pop(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val",
"def popitem(self):\n return self.pop(0)",
"def pop(self):\n if self.items:\n return self.items.pop()\n return None",
"def pop(self):\n if not self.value:\n return\n s = []\n while len(self.value) > 1:\n s.append(self.value.pop())\n peek = self.value.pop()\n while s:\n self.value.append(s.pop())\n return peek",
"def pop(self):\n return self.ll.delete_first()",
"def pop(self):\n\n if self.items:\n return self.items.pop()\n\n return None",
"def pop(self):\r\n return self.list.pop()",
"def pop(self):\n return self.list.pop()",
"def pop(self):\n try:\n res = self._linkedlist.pop()\n self._update_attr()\n return res\n except IndexError:\n raise IndexError(\"Cannot pop from empty stack.\")",
"def pop(self):\n\n value = self.values[0]\n if len(self.values) == 1:\n self.values = []\n else:\n self.populate(self.values[1:])\n return value",
"def pop(self):\n\n traverse = self.head\n\n while traverse.next is not None:\n\n t1 = traverse.next\n if t1.next is None:\n traverse.next = None\n return t1.data\n traverse = traverse.next",
"def pop(self):\n # Pre:\n # The stack may not be empty.\n # Post:\n # If the stack was empty, the stack's state will not change, and None will be returned.\n if self.is_empty:\n return None\n value = self.data_container[0]\n self.data_container.remove_at(0)\n return value",
"def pop(self):\n if self.is_empty():\n return None\n\n return self.container.pop()",
"def popitem(self):\n key = next(iter(self))\n return key, self.pop(key)",
"def _popToTag(self, name, inclusivePop=True):\r\n #print \"Popping to %s\" % name\r\n if name == self.ROOT_TAG_NAME:\r\n return\r\n\r\n numPops = 0\r\n mostRecentTag = None\r\n for i in range(len(self.tagStack)-1, 0, -1):\r\n if name == self.tagStack[i].name:\r\n numPops = len(self.tagStack)-i\r\n break\r\n if not inclusivePop:\r\n numPops = numPops - 1\r\n\r\n for i in range(0, numPops):\r\n mostRecentTag = self.popTag()\r\n return mostRecentTag",
"def pop(self):\n\t\treturn self.items.pop()",
"def pop(self):\n if self.end is None:\n return None\n elif self.end == self.begin:\n element = self.begin\n self.begin = self.end = None\n return element.value\n else:\n element = self.begin\n while element.next != self.end:\n element = element.next\n temp = self.end\n self.end = element\n element.next = None\n return temp.value",
"def pop(self):\n # If the stack is empty, return None\n # (it would also be reasonable to throw an exception)\n if not self.items:\n return None\n\n return self.items.pop()",
"def pop(self):\r\n it = iter(self)\r\n try:\r\n value = next(it)\r\n except StopIteration:\r\n raise KeyError\r\n self.discard(value)\r\n return value",
"def pop(self):\n return self.s1.pop()",
"def stack_pop(self):\n value = self.stack.pop()\n\n return value",
"def pop(self):\n if self.first is None:\n return None\n if self.first == self.last:\n # If only one node, set last to None\n # this way when we set self.first = self.first.next\n # we are setting both nodes to None\n self.last = None\n poppedNode = self.first\n self.first = self.first.next\n poppedNode.next = None\n self.size -= 1\n return poppedNode.data",
"def pop(self):\n res = self.first_node\n self.first_node = self.first_node.next\n return res",
"def pop(self): ##################### <-\n value = self.lst[-1]\n self.lst = self.lst[:-1]\n return value",
"def pop(self):\n if self.stack:\n return self.stack.pop()\n return None",
"def pop(self):\n return self.remove(0)",
"def pop(self):\n size = self._list.size()\n if size == 0:\n return None\n data = self._list.tail.data\n self._list.removeIndex(size-1)\n return data",
"def pop_(self):\n\n return self.items.pop()",
"def pop(self):\n if self.stack:\n return self.stack.pop()"
]
| [
"0.73780775",
"0.71970713",
"0.700163",
"0.69585055",
"0.69546187",
"0.6942529",
"0.69298804",
"0.6921087",
"0.69165844",
"0.69013816",
"0.6838649",
"0.6807731",
"0.6806792",
"0.67973435",
"0.67922765",
"0.6770666",
"0.67601925",
"0.67588156",
"0.6755501",
"0.6738713",
"0.6737764",
"0.6726784",
"0.6723342",
"0.672197",
"0.6719779",
"0.67119086",
"0.67060655",
"0.66850895",
"0.6683093",
"0.66754466"
]
| 0.8173029 | 0 |
Download the COCO dataset | def download_coco_dataset():
# Create file structure
os.makedirs(os.path.join("data", "coco", "train"), exist_ok=True)
os.makedirs(os.path.join("data", "coco", "dev"), exist_ok=True)
os.makedirs(os.path.join("data", "coco", "test"), exist_ok=True)
# Download the train, dev and test datasets
print("Downloading COCO dataset.")
url = "http://images.cocodataset.org/zips/train2014.zip"
print("Downloading " + url)
urllib.request.urlretrieve(url, os.path.join("data", "coco", "train2014.zip"))
url = "http://images.cocodataset.org/zips/val2014.zip"
print("Downloading " + url)
urllib.request.urlretrieve(url, os.path.join("data", "coco", "val2014.zip"))
url = "http://images.cocodataset.org/zips/test2014.zip"
print("Downloading " + url)
urllib.request.urlretrieve(url, os.path.join("data", "coco", "test2014.zip"))
print("Done downloading COCO dataset.")
# Unzip the files
print("Extracting COCO dataset.")
# Extract Train dataset
zip_ref = zipfile.ZipFile(os.path.join("data", "coco", "train2014.zip", "r"))
zip_ref.extractall(os.path.join("data", "coco"))
shutil.move(
os.path.join("data", "coco", "train2014"),
os.path.join("data", "coco", "train", "dummy"),
)
# Extract Validation dataset
zip_ref = zipfile.ZipFile(os.path.join("data", "coco", "val2014.zip", "r"))
zip_ref.extractall(os.path.join("data", "coco"))
shutil.move(
os.path.join("data", "coco", "val2014"),
os.path.join("data", "coco", "dev", "dummy"),
)
# Extract Test dataset
zip_ref = zipfile.ZipFile(os.path.join("data", "coco", "test2014.zip", "r"))
zip_ref.extractall(os.path.join("data", "coco"))
shutil.move(
os.path.join("data", "coco", "test2014"),
os.path.join("data", "coco", "test", "dummy"),
)
print("Done extracting COCO dataset.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_coco(): \n file_type = '.zip'\n img_to_download = ['val','test','train']\n ann_to_download = ['annotations_trainval','image_info_test']\n base_url_images = 'http://images.cocodataset.org/zips/'\n base_url_ann = 'http://images.cocodataset.org/annotations/'\n\n\n click.echo(click.style(f\"\\n DOWNLOAD ANNOTATIONS \\n\", bg='green', bold=True, fg='white'))\n for ann in ann_to_download:\n\n ## build Urls\n ann_url = base_url_ann + ann + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\nDownloading of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be downloaded')\n\n zip_filename_location = save_zip_from_url(ann_url,cfg.PATH_ANNOTATIONS)\n #zip_filename_location = \"/home/kamgo-gpu/Schreibtisch/stuff_annotations_trainval2017.zip\"\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n\n click.echo(click.style(f'\\n Extraction of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be extracted and the zip-file will be deleted')\n\n # Extract zip to annotation directory\n Extract_zip_file(zip_filename_location,cfg.PATH_ANNOTATIONS)\n\n click.echo(click.style(f\"\\n DOWNLOAD IMAGES \\n\", bg='green', bold=True, fg='white'))\n for dataset in img_to_download:\n ## build Urls\n dataset_img_url = base_url_images + dataset + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\n Downloading of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be downloaded')\n\n zip_filename_location = save_zip_from_url(dataset_img_url,cfg.PATH_IMAGES)\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n click.echo(click.style(f'\\n Extraction of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be extracted and the zip-File will be deleted')\n\n # set complet Path to save images\n Extract_zip_file(zip_filename_location,cfg.PATH_IMAGES)\n\n click.echo(click.style(f'\\n Download and extraction termined successfull {dataset} ...\\n', bg='green', bold=True, fg='white'))",
"def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df",
"def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df",
"def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")",
"def download_dataset(self):\n raise NotImplementedError",
"def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")",
"def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file",
"def _download_cxr_model(self):\n file_id = \"1KIsLmVv8jKTVG_LxchMZAvR7rugHy7uB\"\n download_from_google_drive(file_id=file_id, folder=\"data/\", name=\"covid_cxr.zip\")",
"def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)",
"def data_fetch_netcdf(self):\n self.client = boto3.client('s3', aws_access_key_id=self.creds_data['key_id'],\n aws_secret_access_key=self.creds_data['key_access'])\n year = self.month_year[0]\n month = self.month_year[1]\n # change output folder to desired location from TRMM website\n # folder structure to partitioned the data year_month\n output_temp = self.output_folder + year + '_' + month\n url_data = \"http://trmm.atmos.washington.edu/{}interp_data/{}/{}\".format(self.output_folder, year, month)\n print(url_data)\n start_time_year_month = time.time()\n r = requests.get(url_data, auth=self.auth_data)\n # check if url exists then extract netcdf links to download and upload to s3.\n if r.status_code == 200:\n soup = BeautifulSoup(r.text, features='lxml')\n for link in soup.findAll('a'):\n link_url = link.get('href')\n write_path = os.path.join(output_temp, link_url)\n if link_url.endswith('.nc4'):\n file_url = url_data + '/' + link_url\n r = requests.get(file_url, auth=self.auth_data, stream=True)\n if r.status_code == 200:\n self.client.put_object(Body=r.content, Bucket='himatdata', Key='Trmm/' + write_path)\n logging.info(\"Done with Year Month: %s\", month_year)\n print(\"--- %s seconds ---\" % (time.time() - start_time_year_month))\n\n else:\n print('No data/authentication for'.format(month_year))",
"def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")",
"def download(args):\n with_dataset(args, Dataset._download)",
"def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise",
"def download_compressed_dataset(url):\n raise NotImplementedError",
"def get_pronto_data():\n download_if_needed(\"https://s3.amazonaws.com/pronto-data/open_data_year_one.zip\",\n \"open_data_year_one.zip\")",
"def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()",
"def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()",
"def get_coco_dataset():\n ds = AttrDict()\n # classes = [\n # '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n # 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n # 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n # 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n # 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n # 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n # 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n # 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n # 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n # 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n # 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n # 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n # 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n # ]\n # classes = ['__background__', 'lane']\n #\n base_classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n classes = ['__background__',\n 'guard rail',\n # 'car',\n 'dashed',\n 'solid',\n 'solid solid',\n 'dashed dashed',\n 'dashed-solid',\n 'solid-dashed',\n 'yellow dashed',\n 'yellow solid',\n 'yellow solid solid',\n 'yellow dashed dashed',\n 'yellow dashed-solid',\n 'yellow solid-dashed',\n 'boundary',\n 'fork_line',\n 'fork_edge',\n 'arrow_s',\n 'arrow_r',\n 'arrow_l',\n 'arrow_lr',\n 'arrow_inclined_r',\n 'arrow_r_s',\n 'arrow_l_s',\n 'sidewalk',\n 'handrail'\n ]\n base_classes.extend(classes[1:])\n classes = base_classes\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds",
"def download_dataset(url=DATASET_URL):\n # disable insecure https warning\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n c = urllib3.PoolManager()\n with c.request(\"GET\", url, preload_content=False) as res, open(\n LOCAL_FILE_NAME, \"wb\"\n ) as out_file:\n shutil.copyfileobj(res, out_file)\n logging.info(\"Download completed.\")",
"def download():\n toydata = requests.get(DATA_URL).json()\n return toydata",
"def download_dataset():\n \n ID = \"1-3_oB5iSF-c_V65-uSdUlo024NzlgSYZ\"\n script1 = f\"\"\"\n wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id='{ID} -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=\"{ID} -O Data.zip && rm -rf /tmp/cookies.txt\n \"\"\"\n script2 = \"\"\"unzip Data.zip\"\"\"\n\n os.system(script1)\n os.system(script2)",
"def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")",
"def getMpcorb(url='https://minorplanetcenter.net/iau/MPCORB/MPCORB.DAT.gz', fname='MPCORB.DAT.gz', verbose=True):\n\n #filename = wget.download(url)\n try:\n r = requests.get(url, allow_redirects=True)\n open(fname, 'wb').write(r.content)\n if (verbose):\n print('Download complete:', url)\n except:\n print(\"Error in getMpcorb: could not download \", fname, \" at \", url)\n raise\n return",
"def download_data():\r\n print('Downloading cifar-10 data...')\r\n request.urlretrieve(dataurl)\r\n print('Done')\r\n print('Please unzip files. command is:')\r\n print('gzip -d cifar-10-python.tar.gz')\r\n print('tar -xf cifar-10-python.tar')\r\n exit()",
"def download_data():\n urllib.request.urlretrieve('http://cs.iit.edu/~culotta/cs579/a1/edges.txt.gz', 'edges.txt.gz')",
"def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())",
"def download_datasets():\n if not os.path.exists(\"__data__/cornell/movie_conversations.txt\") \\\n or not os.path.exists(\"__data__/cornell/movie_lines.txt\"):\n subprocess.call(['scripts/download_cornell.sh'])\n if not os.path.isdir('__data__/opensubs'):\n subprocess.call(['scripts/download_opensubs.sh'])",
"def getData(constrain):\n\n dat_AGS = chunks(AGS, 100)\n for num, ags_c in enumerate(dat_AGS):\n to_download = DOWNLOAD_LINK.format(ags_id=ags_c, constrain=constrain)\n to_download = to_download.replace(\" \", \"\")\n download_name = \"../Data/Gemeinden/{}-{}.csv\".format(\n constrain, num)\n\n url.urlretrieve(to_download, filename=download_name)\n\n sleep(1) # be nice\n\n return(num)",
"def download_dataset(url=DATASET_URL):\n df = pd.read_csv(url, index_col=0)\n \n # ディレクトリが無ければ,作成する\n if not os.path.isdir(BASE_DIR):\n os.makedirs(BASE_DIR)\n \n df.to_csv(LOCAL_FILE_NAME)",
"def download(path):\n\n # Check if directory exists\n if not os.path.isdir(path + \"birdvox_dcase_20k\"):\n print(\"Creating birdvox_dcase_20k Directory\")\n os.mkdir(path + \"birdvox_dcase_20k\")\n base = \"https://zenodo.org/record/1208080/files/\"\n filename = \"BirdVox-DCASE-20k.zip\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n url = base + filename + \"?download=1\"\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)\n url = \"https://ndownloader.figshare.com/files/10853300\"\n filename = \"data_labels.csv\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)"
]
| [
"0.7337057",
"0.7103282",
"0.70431393",
"0.6646939",
"0.66247356",
"0.652305",
"0.6369042",
"0.63372713",
"0.63305813",
"0.6327974",
"0.62980855",
"0.628331",
"0.6242413",
"0.61973923",
"0.6178355",
"0.61595905",
"0.61565197",
"0.611133",
"0.61082715",
"0.60795313",
"0.6077686",
"0.60677314",
"0.6053149",
"0.6046052",
"0.601248",
"0.59885365",
"0.59837365",
"0.5974654",
"0.5951495",
"0.5939127"
]
| 0.8114325 | 0 |
Check whether vote generator generates 6 votes | def test_vote_generator(self):
self.assertEqual(len(self.vote_ballot), 6) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def result_poll(votes):\n return sum(votes) >= 2 / 3 * len(votes)",
"def Vote(i, j, budget, count):\r\n if(count < budget):\r\n if(random.uniform(0, i+j) < i):\r\n return True\r\n else:\r\n return False\r\n else:\r\n if(random.uniform(0, 1) < 0.5):\r\n return True\r\n else:\r\n return False",
"def voteCheck(number):\n\n if number >= MIN_VOTES and number <= MAX_VOTES:\n return True\n else:\n return False\n number = input(\"\\n\\tEnter votes: \")",
"def vote_of_citizens():\n\tglobal vote_first_candidate\n\tglobal vote_second_candidate\n\tglobal blank_vote\n\t\n\tfor i in range(NUMBER_OF_CITIZENS):\n\t\tvote = random.randint(1,10)\n\n\t\tif(vote <= 3):\n\t\t\tvote_first_candidate+=1\n\t\telif(vote > 3 and vote <= 6):\n\t\t\tvote_second_candidate+=1\n\t\telse:\n\t\t\tblank_vote+=1",
"def test_vote_count(self) -> None:\n self.downvote()\n self.downvote()\n vote_count = QuestionVote.objects.all().count()\n self.assertEqual(vote_count, 1)",
"def vote_result(self) -> bool:\n token_score = self.create_interface_score(self._token_score.get(), TokenInterface)\n yes = 0\n no = 0\n for address in self._voted:\n vote = self._vote[str(address)]\n if vote == 'yes':\n yes += token_score.balanceOf(address)\n else:\n no += token_score.balanceOf(address)\n self._yes_votes.set(yes)\n self._no_votes.set(no)\n if self._yes_votes.get() > (token_score.totalSupply() - token_score.balanceOf(self._rewards_score.get())) // 2:\n return True\n else:\n return False",
"def test_questions_num_votes(self):\n q = QuestionFactory(title=u'tags tags tags')\n\n # Add two question votes\n QuestionVoteFactory(question=q)\n QuestionVoteFactory(question=q)\n\n self.refresh()\n\n # Advanced search for questions with num_votes > 5. The above\n # question should be not in this set.\n response = self.client.get(reverse('search.advanced'), {\n 'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',\n 'num_voted': 2, 'num_votes': 5,\n 'format': 'json'\n })\n\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['total'], 0)\n\n # Advanced search for questions with num_votes < 1. The above\n # question should be not in this set.\n response = self.client.get(reverse('search.advanced'), {\n 'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',\n 'num_voted': 1, 'num_votes': 1,\n 'format': 'json'\n })\n\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['total'], 0)",
"def _vote(self, team):\r\n return True",
"def can_vote(age):\n return age >= 18",
"def enoughForLeader(self, votes):\n entry = self.getConfig()\n if entry['config'] == 'single':\n validVotes = len(set(entry['data'].keys()) & set(votes))\n return validVotes > len(entry['data']) / 2\n validVotesOld = len(set(entry['data'][0].keys()) & set(votes))\n validVotesNew = len(set(entry['data'][1].keys()) & set(votes))\n return validVotesOld > len(entry['data'][0]) / 2 and \\\n validVotesNew > len(entry['data'][1]) / 2",
"def test_unpopular(self):\n self.assertFalse(self.user3.is_popular())\n self.user3.receive_upvotes(randint(101, 10000))\n self.assertTrue(self.user3.is_popular())",
"async def applyVote(self, votes):\n voteCount = {vote: 0 for vote in self.getMembersName()}\n voteCount[None] = 0\n for vote in votes.values():\n voteCount[vote] += 1\n\n if voteCount[None] != 0:\n await self.textChannel.send(\n \"Attention, des joueurs n'ont pas voté / ont mal écrit, les votes peuvent être faussés.\")\n del voteCount[None]\n\n playerOrder = sorted(voteCount.items(), key=lambda x: x[1], reverse=True)\n print(\"playerOrder\", playerOrder)\n if playerOrder[0][1] == 0: # Nobody vote\n await self.textChannel.send(\"`Partie non valide`, personne n'a voté.\")\n\n elif playerOrder[0][1] == 1: # People think nobody is a werewolf\n await self.textChannel.send(\"Le village pense qu'il n'y a pas de loups-garou ? Vérification ...\")\n werewolves = self.getWolves()\n if len(werewolves) == 0:\n await self.textChannel.send(\"Le village a raison, il n'y a pas de loups-garous parmis eux.\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else:\n await self.textChannel.send(\"Malheuresement, il y avait```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n else: # Classic vote\n werewolves = self.getWolves()\n deaths = []\n for i in range(len(playerOrder)):\n player = self.getMemberFromName(name=playerOrder[i][0])\n isDead = await player.isDead(channel=self.textChannel)\n if isDead:\n deaths += await player.death(channel=self.textChannel, members=self.players)\n print(\"voteCount :\", voteCount)\n\n # Get player name with same number of vote against them\n playerEqualVote = []\n for p in playerOrder:\n if p[1] == playerOrder[i][1] and p[0] != playerOrder[i][0]:\n playerEqualVote.append(self.getMemberFromName(name=p[0]))\n print(\"Other players with equals number of vote :\", playerEqualVote)\n for otherPlayer in playerEqualVote:\n isDead = await otherPlayer.isDead(channel=self.textChannel)\n if isDead:\n deaths += await otherPlayer.death(channel=self.textChannel, members=self.players)\n break\n\n for i in range(len(deaths)):\n if deaths[i] is None:\n del deaths[i]\n\n if len(deaths) == 0: # No one die\n if len(werewolves) == 0: # No Werewolves\n await self.textChannel.send(\"Il n'ya pas eu de mort et il n'y a aucun Loup-Garou !\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else: # Werewolves among players\n await self.textChannel.send(\n \"Il n'y a pas eu de mort mais```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n elif len(deaths) == 1:\n if deaths[0].lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rêveur\"]: # Werewolf die\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n elif deaths[0].lastRole in [\"Tanneur\"]: # Tanner died\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNÉ#```\")\n if len(werewolves) > 0: # Wolves in game\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT ÉGALEMENT GAGNÉ```\")\n else: # Villager died\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n else: # more than 2 deaths\n rolesDead = []\n for dead in deaths:\n if dead.lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rêveur\"]:\n rolesDead.append(\"Loup-Garou\")\n elif dead.lastRole in [\"Tanneur\"]:\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNÉ#```\")\n else:\n rolesDead.append(\"Villageois\")\n print(\"rolesDead :\", rolesDead)\n rolesDead = list(dict.fromkeys(rolesDead))\n print(\"rolesDead unique :\", rolesDead)\n if \"Loup-Garou\" in rolesDead:\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else:\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")",
"def test_primary_election_case6(elections_done, txnPoolNodeSet, looper,\n sdk_pool_handle,\n sdk_wallet_steward):\n sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,\n sdk_wallet_steward, 5)",
"async def create_random_vote(self):\n \"\"\"This is done for prevent automatic or robotic voting.\"\"\"\n random_number = random.randrange(0, 100000)\n current_time = int(time.time())\n result = await self.db.fetchrow(\n # 'select * from stickers order by random() limit 1;'\n 'SELECT * FROM stickers TABLESAMPLE SYSTEM_ROWS(1);'\n )\n random_sticker = json.loads(result[1])\n token = await self.db.fetchval(\n \"select md5('{}');\".format(random_number))\n await self.db.fetch(\n \"insert into secret (data) values\"\n \"('{}')\".format(json.dumps([\n token,\n current_time,\n random_sticker[0],\n random_sticker[2]\n ])))\n return (random_sticker[2], token)",
"def addable(self):\n for dn in self.dim:\n cval = getattr(self, dn)\n if self.dim[dn].vote(cval) is False:\n CrawlConfig.log(\"%s votes against %s -- skipping\" %\n (dn, self.path))\n return False\n randval = random.random()\n if self.probability < randval:\n CrawlConfig.log(\"random votes against %s -- skipping (%g < %g)\" %\n (self.path, self.probability, randval))\n return False\n return True",
"def test_vote_view_allows_to_vote(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n votes_len = len(PostVotes.objects.all())\n response = self.client.post('/posts/2/vote/', {\"vote\": \"-1\"})\n self.assertEqual(len(PostVotes.objects.all()), votes_len + 1)",
"def verify_winner(self):\r\n return self.count_pegs() == 1",
"def has_voted(self, user):\n return user.choice_set.filter(vote=self).exists()",
"def test_n_volunteers(self):\r\n\r\n app = self.create_app_with_contributors(anonymous=2, registered=3, two_tasks=True)\r\n total_volunteers = cached_apps.n_volunteers(app.id)\r\n\r\n err_msg = \"Volunteers is %s, it should be 5\" % total_volunteers\r\n assert total_volunteers == 5, err_msg",
"def get_voters():",
"def get_voters():",
"def test_basic(self):\n with build_video(self.user, votes=0) as video:\n votes = video.votes\n add_vote(video)\n video = Video.objects.get(pk=video.pk)\n eq_(video.votes, votes + 1)",
"def all_votesp(self, game_key):\n participants = models.Participant.query(\n models.Participant.playing == True,\n models.Participant.vote == None,\n ancestor=game_key).fetch()\n logging.info(\n \"participants who have not voted: %s\", \n [p.plus_id for p in participants])\n if participants:\n return False\n else:\n return True",
"def count_upvotes(self):\n return self.filter(value=1).count()",
"def test_vote_submission(self):\n starting_count = Vote.objects.count()\n data = { \"candidate\":3,\n \"student\":2}\n response = self.client.post(\"/vote/\", data, format='json')\n print(response.data)\n assert response.status_code == status.HTTP_201_CREATED\n assert Vote.objects.count() - starting_count == 1",
"def multishot(attacker_schema, victim_schema):\n\n multishot = attacker_schema.multishot.get(victim_schema.name, 0)\n return multishot > 0 and (multishot - 1.0) / multishot > random.random()",
"def test_15(self):\n\t self.assertTrue(prime_generator(15), [2, 3, 5, 7, 11, 13])",
"def test_fav_6(self):\n\t\tplayer_list = [Player(\"Blake Base\", 1, 300000, 10), Player(\"Corey Catcher\", 2, 500000, 20), Player(\"Dexter Dugout\", 3, 200000, 50)]\n\t\tself.assertEqual( free_agent_vorp(player_list, 100000, 4), (0, 0, []) )",
"def vote_exists(self):\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n try:\n query = \"SELECT user_id, vote_id FROM votes WHERE answer_id=%s AND user_id=%s\"\n cur.execute(query, (self.answer_id, self.user_id))\n queryset_list = cur.fetchall()\n con.close()\n if len(queryset_list) < 1:\n return False\n return True\n except Exception as e:\n print(e)\n con.close()\n return False",
"def user_interaction(uv, recommended_News, ranked=True):\n\n iv = recommended_News[\"topical_vector\"]\n\n product = simple_doct_product(uv, iv)\n\n epsilon = 10e-5\n\n if (product + epsilon) > 1.0:\n vui = 0.99\n else:\n vui = beta_distribution(product)\n\n # Awared preference\n ita = beta_distribution(0.98)\n pui = vui * ita\n\n rand_num = np.random.random()\n\n if rand_num < pui:\n return True\n else:\n return False"
]
| [
"0.6676807",
"0.6540372",
"0.630108",
"0.6265139",
"0.61997306",
"0.6176548",
"0.6131795",
"0.60507333",
"0.6007827",
"0.5923374",
"0.5891573",
"0.5854174",
"0.5840629",
"0.5838884",
"0.5776376",
"0.57043755",
"0.5663822",
"0.5648873",
"0.5608658",
"0.5601602",
"0.5601602",
"0.5591745",
"0.5590041",
"0.5586774",
"0.5582956",
"0.5534607",
"0.5526226",
"0.5507976",
"0.5470689",
"0.54632425"
]
| 0.77197534 | 0 |
Unit test case for kingdom_result method. Used in Problem1 | def test_kingdom_result(self):
voting_machine = VotingMachine()
vote_ballot = []
all_kingdoms = ['land', 'air', 'ice']
competing_kingdoms = {'space': []}
for name in all_kingdoms:
sender = Kingdom.get_kingdom('space')
receiver = Kingdom.get_kingdom(name)
message_txt = 'oaaawaalaa1d22n333a4444pzmzmzmzaztzozh'
message_obj = Message(sender, receiver, message_txt)
vote_ballot.append(message_obj)
voting_machine.execute(vote_ballot, competing_kingdoms)
ruler, allies = voting_machine.kingdom_result('space', 2)
self.assertEqual(ruler, 'Space')
self.assertEqual(allies, 'Land, Air, Ice') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_result(self):\n result = compute()\n self.assertEqual(result, '4782')\n print(\"eulpy25Test passed\")",
"def test_initialization_of_homework_result_solution():\n assert result_1.solution == \"I have done this hw\"",
"def test_goldbach(n, result):\n from goldbach import goldbach\n assert goldbach(n) == result",
"def test_teacher_check_homework_positive():\n assert opp_teacher.check_homework(result_1)",
"def test_check_cost():",
"def main():\r\n test = TesterNeighbour()\r\n test.setUp()\r\n test.test_result_n()\r\n print(\"result_of_algorithm_test - passed\")",
"def getTestResults():",
"def test_should_return_correct_data(self, mocked_get_data_loader):\n\n mocked_get_data_loader.return_value = mock_data_loader\n\n correct_kingdoms = {\n 'SPACE': Kingdom('SPACE', 'Gorilla'),\n 'LAND': Kingdom('LAND', 'Panda'),\n 'WATER': Kingdom('WATER', 'Octopus'),\n 'ICE': Kingdom('ICE', 'Mammoth'),\n 'AIR': Kingdom('AIR', 'Owl'),\n 'FIRE': Kingdom('FIRE', 'Dragon')\n }\n\n result_kingdoms = KingdomRepositoryServiceCsvImpl().get_all_kingdoms()\n\n for i in correct_kingdoms:\n\n self.assertEqual(correct_kingdoms[i], result_kingdoms[i])",
"def test_get_solution(self):\n pass",
"def test_initialization_of_homework_result_homework():\n assert result_1.homework == oop_hw",
"def test_get_results(self):\n pass",
"def test_kyc_get_legal(self):\n pass",
"def test_part_2(arguments, distance, output):\n assert part_2.solution(arguments, distance) == output",
"def test_arc_smear(self):",
"def test_teacher_check_homework_negative_if_solution_is_not_ok():\n assert not opp_teacher.check_homework(result_3)",
"def test_households_in_admin_unit(self):",
"def test_josephus_survivor(self):\n\n allure.dynamic.title(\"Testing josephus_survivor function\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>In this kata you have to verify that the function \"\n \"correctly returns who is the \\\"survivor\\\", ie: the \"\n \"last element of a Josephus permutation.</p>\")\n\n test_data = [\n ((7, 3), 4),\n ((11, 19), 10),\n ((1, 300), 1),\n ((14, 2), 13),\n ((100, 1), 100)\n ]\n\n for test_data, expected in test_data:\n n = test_data[0]\n k = test_data[1]\n result = josephus_survivor(n, k)\n\n with allure.step(\"Enter test data (n: {}, k: {}) and verify \"\n \"the output ({}) vs expected ({})\".format(n,\n k,\n result,\n expected)):\n print_log(n=n,\n k=k,\n result=result,\n expected=expected)\n\n self.assertEqual(expected,\n result)",
"def test_resultingGoose_outside(self):\n goose = coordinate.Coordinate(5, 5)\n actual_result = rules.resultingGoose(types.GOOSE, goose)\n expected_result = types.GOOSE\n self.assertEqual(actual_result, expected_result)",
"def test_sum_three_five(upper, result):\n from sum_three_five import solution\n assert solution(upper) == result",
"def test_resultingGoose_inside(self):\n goose = coordinate.Coordinate(4, 2)\n actual_result = rules.resultingGoose(types.GOOSE, goose)\n expected_result = types.SUPERGOOSE\n self.assertEqual(actual_result, expected_result)",
"def test_student_do_homework_positive():\n assert isinstance(result_1, HomeworkResult)",
"def test_katsuura(self):\n fun = get_problem('katsuura', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 3837.4739882594373, delta=4000)",
"def test_apply_endorsements(self):",
"def test_get_war_result_tie(self):\n five = Card.objects.create(suit=Card.CLUB, rank=\"five\")\n five2 = Card.objects.create(suit=Card.HEART, rank=\"five\")\n self.assertEqual(five.get_war_result(five2), 0)",
"def test_zakharov(self):\n fun = get_problem('zakharov', 2, -5, 10)\n self.assertAlmostEqual(fun(np.zeros(2)), 0.0)",
"def test_wip(self):\n self.assertTrue(not return_true())",
"def test_unit(self):\n self.assertTrue(return_true())",
"def test_2():\n results = base_tests()\n correct = {\n \"Consequence\": \"synonymous_variant\",\n \"Codons\": \"tgC/tgT\",\n \"Amino_acids\": \"C\",\n \"Gene\": \"ENSG00000130164\",\n \"SYMBOL\": \"LDLR\",\n \"Feature\": \"ENST00000558013\",\n \"EXON\": \"2/18\",\n \"PolyPhen\": \"\",\n \"SIFT\": \"\",\n \"Protein_position\": \"27/858\",\n 'BIOTYPE\"': \"protein_coding\",\n }\n assert results[0] == correct",
"def test_calculation(self):\n expected = [['William Gates, III', \"$\", 653784.49, 2, \"$\", 326892.24],\n['Mark Zuckerberg', \"$\", 16396.10, 3, \"$\", 5465.37],\n['Jeff Bezos', \"$\", 877.33, 1, \"$\", 877.33],\n['Paul Allen', \"$\", 708.42, 3, \"$\", 236.14]]\n actual = mailroom4.calculation()\n self.assertEqual(expected,actual)",
"def test_4():"
]
| [
"0.64322424",
"0.62916774",
"0.6290475",
"0.6263768",
"0.6191611",
"0.617668",
"0.6162487",
"0.6086435",
"0.60824585",
"0.60282415",
"0.60280466",
"0.59648955",
"0.59591895",
"0.5949394",
"0.59443086",
"0.59283173",
"0.591891",
"0.5888462",
"0.5880813",
"0.5860383",
"0.5826834",
"0.582008",
"0.58148026",
"0.5808331",
"0.57436615",
"0.5743359",
"0.5743124",
"0.57384336",
"0.57246643",
"0.57180685"
]
| 0.7432416 | 0 |
Test to ensure we can retrieve the leaders list from leaderboard.json | def test_get_leaders(self):
leaders = app.get_leaders()
self.assertEqual(len(leaders), 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list(self, request):\n team_leaders = self.controller.retrieve_all_teams_leaders()\n serializer = data_serializers.TeamLeaderPresenterSerializer(team_leaders, many=True)\n return Response(serializer.data)",
"def test_03_leaderboard(self):\r\n # As Anonymou user\r\n url = \"/leaderboard\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()",
"def test_03_leaderboard(self):\r\n # As Anonymou user\r\n url = \"/leaderboard\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()",
"def test_teams_read(self):\n pass",
"def test_get(self):\r\n resp = self.client.get_json(self.url + '/0')\r\n self.assertEqual(resp.status_code, 200)\r\n obj = json.loads(resp.content)\r\n self.assertEqual(self.starting_graders[0], obj)",
"def test_get_learners(self):\n pass",
"def test_get_candidates(self):\n self.populate_database()\n response = self.client.get(\"/api/races/2/candidates\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.mimetype, \"application/json\")\n\n candidates = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(len(candidates), 4)\n self.assertEqual(candidates[0][\"title\"], \"Candidate BA\")",
"def test_get_leader(self):\n self._mock_api(200, 'foo.example.com')\n self.assertEquals(self.client.election.get('/mysql'), 'foo.example.com')\n self._mock_api(200,'')\n self.assertRaises(etcd.EtcdException, self.client.election.get, '/mysql')",
"def test_get_races(self):\n self.populate_database()\n response = self.client.get(\"/api/elections/1/races\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.mimetype, \"application/json\")\n\n races = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(len(races), 1)\n self.assertEqual(races[0][\"id\"], 1)",
"def test_get_elections(self):\n self.populate_database()\n # electionB = session.query(models.Election).filter(\n # models.Election.title == \"Election B\")\n response = self.client.get(\"/api/elections/\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.mimetype, \"application/json\")\n\n elections = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(len(elections), 2)\n self.assertEqual(elections[0][\"id\"], 1)",
"def test_retrieve_team(self):\n pass",
"def test_get_teams(self):\n pass",
"def test_get_teams(self):\n pass",
"def test_get_league_leaders___goaltending(self):\n msg = \"Response status is not 200\"\n response = self.api.get_league_leaders___goaltending(self.season, self.nhl_season)\n self.assertEqual(response.status_code, 200, msg)",
"def test_load_draft(league):\n draft = league.draft_results()\n assert(len(draft) == 144)\n #mcdavid 1st\n assert(draft[0]['player_key'] == '396.p.6743')\n # carter hart 67th\n assert(draft[66]['player_key'] == '396.p.7156')\n # zadorov last\n assert(draft[-1]['player_key'] == '396.p.5995')",
"def test_get_league_hierarchy(self):\n msg = \"Response status is not 200\"\n response = self.api.get_league_hierarchy()\n self.assertEqual(response.status_code, 200, msg)",
"async def load_state(self):\n\n\t\twith open(os.path.join(\"config\", \"leaderboards.json\"), \"r+\") as leaderboards:\n\t\t\tself.leaderboards = json.loads(leaderboards.read())",
"def test_candidates_retrieve(self):\n pass",
"def test_length_of_teammates_list(self):\n response = self.client.get(\"/team/all/\", format='json')\n self.assertEqual(response.data, {'status': 200, 'data': []})",
"def test_teams_list(self):\n pass",
"def test_get_election(self):\n self.populate_database()\n # electionB = session.query(models.Election).filter(\n # models.Election.title == \"Election B\")\n response = self.client.get(\"/api/elections/{}\".format(self.electionB.id),\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.mimetype, \"application/json\")\n\n election = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(election[\"title\"], \"Election B\")\n self.assertEqual(election[\"admin_id\"], self.electionB.admin_id)",
"def test_set_leader(self):\n self._mock_api(200, u'234')\n #set w/o a TTL or a name\n self.assertEquals(self.client.election.set('/mysql'), u'234')\n self.assertEquals(self.client.election.set(\n '/mysql',\n name='foo.example.com',\n ttl=60), u'234')\n self._mock_api(500, 'leader name required')\n self.assertRaises(etcd.EtcdException, self.client.election.set,'/mysql')",
"def test_get_team_history(self):\n pass",
"def do_test_we_are_the_leader(self, h_is_leader, h_leader_set):\n states = r_state.r_get_states()\n r_state.remove_state(LEADER_STATE)\n no_leader = r_state.r_get_states()\n r_state.set_state(LEADER_STATE)\n leader = r_state.r_get_states()\n self.assertNotEquals(no_leader, leader)\n self.assertEquals(no_leader.union(set([LEADER_STATE])), leader)\n\n is_leader_call_count = h_is_leader.call_count\n leader_set_call_count = h_leader_set.call_count\n # is_leader() fails\n h_is_leader.return_value = False\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 1, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 0, h_leader_set.call_count)\n\n def raise_fail(*args, **kwargs):\n \"\"\"\n Simulate a leader_set() failure.\n \"\"\"\n raise Exception(\"oops\")\n\n # is_leader() succeeds, but leader_set() fails\n h_is_leader.return_value = True\n h_leader_set.side_effect = raise_fail\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 2, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 1, h_leader_set.call_count)\n\n self.lset_args = None\n self.lset_kwargs = None\n\n def record_leader_set_args(*args, **kwargs):\n \"\"\"\n Make sure leader_set() was invoked with the correct parameters.\n \"\"\"\n self.lset_args = args\n self.lset_kwargs = kwargs\n\n # ...and now it all works out\n h_is_leader.return_value = True\n h_leader_set.side_effect = record_leader_set_args\n testee.we_are_the_leader()\n self.assertEquals(leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 3, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 2, h_leader_set.call_count)\n self.assertEquals((), self.lset_args)\n self.assertEquals(\n {\"charm_storpool_block_unit\": sputils.MACHINE_ID}, self.lset_kwargs\n )\n\n r_state.r_set_states(states)",
"def test_kyc_get_legal_board_members(self):\n pass",
"def test_get_league_leaders___skaters(self):\n msg = \"Response status is not 200\"\n response = self.api.get_league_leaders___skaters(self.season, self.nhl_season)\n self.assertEqual(response.status_code, 200, msg)",
"def test_retrieve_l_organizations(self):\n pass",
"def test_candidate(self):\n url = reverse('candidates-list')\n print(url, type(url))\n response = self.client.get(url, format='json')\n assert response.status_code == status.HTTP_200_OK\n assert Candidate.objects.count()== len(response.json())",
"def detect_leader(self):\n # Should this be moved to the AF script?\n lfs = self.read_db_logfile()\n\n became_leader = lfs.find(\"Became leader in\") >= 0\n took_over = lfs.find(\"Successful leadership takeover:\" + \" All your base are belong to us\") >= 0\n self.is_leader = became_leader or took_over\n if self.is_leader:\n url = self.get_frontend().get_local_url(\"\")\n reply = requests.get(url, auth=requests.auth.HTTPBasicAuth(\"root\", self.passvoid), timeout=120)\n print(f\"{url} => {str(reply)}\")\n if reply.status_code == 503:\n self.is_leader = False\n return self.is_leader",
"def test_time_league(self):\n result = self.test_client.league\n\n assert isinstance(result, dict)"
]
| [
"0.6381545",
"0.6360777",
"0.63352084",
"0.6237816",
"0.6234864",
"0.6102163",
"0.6041341",
"0.5974139",
"0.59478533",
"0.5893005",
"0.58824646",
"0.58673745",
"0.58673745",
"0.58460206",
"0.58338076",
"0.58337706",
"0.5821925",
"0.5797607",
"0.579719",
"0.5778626",
"0.5751442",
"0.57207054",
"0.5712118",
"0.5704819",
"0.5695931",
"0.56803924",
"0.56716543",
"0.5666807",
"0.5657217",
"0.5649384"
]
| 0.7366307 | 0 |
For now, the string method simply returns the topology of the network. | def __str__(self):
return "Network: {0}".format(self.topology) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def network(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"network\")",
"def __str__(self):\n return ('NodesNetwork('\n f'uris: {self.uris.array}, '\n f'sockets: {self.sockets.array})')",
"def network(self) -> str:\n return pulumi.get(self, \"network\")",
"def topology_name(self):\n return self._topology_name",
"def __str__(self):\n built_string = \"Graph(\"\n built_string += str(self.get_nodes())\n built_string += \", \"\n built_string += str(self.get_edges())\n built_string += \")\"\n return built_string",
"def __str__(self):\n built_string = \"Graph(\"\n built_string += str(self.get_nodes())\n built_string += \", \"\n built_string += str(self.get_edges())\n built_string += \")\"\n return built_string",
"def __str__(self):\n stringRepresentation = []\n for node in self.getNodes():\n stringRepresentation.append(\"->\".join(\n (str(node), str(self.graph[node]))))\n\n return str(stringRepresentation)",
"def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s",
"def topology(self):\n return self._topology",
"def __repr__(self):\n string = self.__class__.__name__\n string += f'\\n\\tNetwork: {self.network.__repr__()}'\n string += f'\\n\\tRecurrent: {self.recurrent}'\n \n return string",
"def to_string(self):\n return \"{}.{}[{}]->{}\".format(self.driver, self.port, self.pin, self.interconnect)",
"def network_format(self):\n network_format = \"d{}{}{}{}{}{}{}\".format(fixed_length_hex(self.block_number, 6), fixed_length_hex(self.timestamp, 8),\n fixed_length_hex(self.difficulty, 2), fixed_length_hex(self.nonce, 64), self.prev_hash,\n self.merkle_root_hash, fixed_length_hex(len(self.transactions), 2))\n for t in self.transactions:\n if isinstance(t, str):\n t = Transaction.parse(t)\n network_format += fixed_length_hex(len(t.network_format()), 5)\n network_format += t.network_format()\n return network_format",
"def __str__(self):\n\t\treturn str(self.graph)",
"def build_topology(self):\n# errstr = \"build_topology() is not implemented.\\n\"\n# errstr += textwrap.dedent(self.build_topology.__doc__)\n# raise NotImplementedError(errstr)\n pass # May be a 1-compartment neuron. No need to abstract. ",
"def dump_topology_to_string(shape, level=0, buffer=\"\"):\n brt = BRep_Tool()\n s = shape.ShapeType()\n if s == TopAbs_VERTEX:\n pnt = brt.Pnt(topods_Vertex(shape))\n print(\"..\" * level + \"<Vertex %i: %s %s %s>\\n\" % (\n hash(shape), pnt.X(), pnt.Y(), pnt.Z()))\n else:\n print(\"..\" * level, end=\"\")\n print(shape_type_string(shape))\n it = TopoDS_Iterator(shape)\n while it.More() and level < 5: # LEVEL MAX\n shp = it.Value()\n it.Next()\n print(dump_topology_to_string(shp, level + 1, buffer))",
"def __str__(self):\n ret_str = \"\"\n for element_type in ('nodes', 'edges', 'layers'):\n elements = getattr(self, element_type)\n subtype_counts = defaultdict(int)\n ret_str += \"{0} {1}:\\n\".format(len(elements), element_type)\n for element in elements:\n subtype_counts[type(element).__name__] += 1\n for subtype in subtype_counts:\n ret_str += \"\\t{0}: {1}\\n\".format(subtype,\n subtype_counts[subtype])\n if element_type == 'layers':\n layer_names = [layer.name for layer in self.layers]\n ret_str += \"\\t\\t\" + \", \".join(layer_names)\n ret_str += \"\\n\"\n return ret_str",
"def network(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network\")",
"def __str__(self):\n return 'InteractingNetworks:\\n' + Network.__str__(self)",
"def __str__(self):\n weight = self.weight * self.connectivity\n strio = io.StringIO()\n for i in range(self.dim_in, self.dim_node):\n if i<self.dim_in+self.dim_hid and not self.hidden[i-self.dim_in]: # no such hidden node\n continue\n # strio.write('{:6.1f}'.format(weight[i][0]))\n strio.write('{:.6f}'.format(weight[i][0]))\n for j in range(1, self.dim_node-1):\n if self.dim_in<=j<self.dim_in+self.dim_hid and not self.hidden[j-self.dim_in]: # this node is not connected\n continue\n # strio.write(' {:6.1f}'.format(weight[i][j]))\n strio.write(' {:.6f}'.format(weight[i][j]))\n if i < self.dim_node - 1:\n strio.write('\\n')\n return strio.getvalue()",
"def __str__(self):\n return self.socket.__str__()",
"def __str__(self):\n text = (\"EventSynchronizationClimateNetwork:\\n%s\\n%s\\n\"\n \"Type of event synchronization to construct the network: %s\")\n return text % (EventSynchronization.__str__(self),\n ClimateNetwork.__str__(self), self.__eventsynctype)",
"def __repr__(self):\n output = \"\"\n output += \"--- geounit Node ---\\n\"\n output += \"geocode: \" + str(self.geocode) + \", geolevel \" + str(self.geolevel) + \"\\n\"\n output += \"parent geocode: \" + str(self.parentGeocode) + \"\\n\"\n if self.congDistGeocode:\n output += \"congressional districts geocode: \" + str(self.congDistGeocode) + \"\\n\"\n if self.sldlGeocode:\n output += \"state lower chambers geocode: \" + str(self.sldlGeocode) + \"\\n\"\n if self.slduGeocode:\n output += \"state upper chambers geocode: \" + str(self.slduGeocode) + \"\\n\"\n if self.raw is not None:\n output += \"raw.shape: \" + str(self.raw.toDense().shape) + \"\\n\"\n output += \"raw: \" + str(self.raw.toDense()) + \"\\n\"\n else:\n output += \"raw: None\\n\"\n if self.raw_housing is not None:\n output += \"raw_housing\" + str(self.raw_housing.toDense()) + \"\\n\"\n else:\n output += \"raw_housing: None\\n\"\n output += \"dp: \" + str(self.dp) + \"\\n\"\n output += \"cons: \" + str(self.cons) + \"\\n\"\n output += \"invar: \" + str(self.invar) + \"\\n\"\n output += \"syn: \" + str(self.syn) + \"\\n\"\n output += \"syn_unrounded: \" + str(self.syn_unrounded) + \"\\n\"\n output += \"dp_queries: \" + str(self.dp_queries) + \"\\n\"\n return output",
"def __str__(self):\n return np.array2string(self.graph.toarray())",
"def __repr__(self):\n return f\"{self.host}/{self.iface}\"",
"def as_str(self):\n connectivity_str = '_'.join(map(str, self.values))\n return connectivity_str",
"def get_network(self) -> Optional[str]:\n return self.get_value(self._network_attribute)",
"def __str__(self):\n return ('PartialCorrelationClimateNetwork:\\n'\n f'{TsonisClimateNetwork.__str__(self)}')",
"def __str__(self):\n \n return self._addr[0] + \":\" + str(self._addr[1])",
"def __str__(self, printODData = False):\n networkStr = \"Link\\tFlow\\tCost\\n\"\n for ij in sorted(self.link, key=lambda ij : self.link[ij].sortKey):\n networkStr += \"%s\\t%f\\t%f\\n\" % (ij, self.link[ij].flow, self.link[ij].cost)\n if printODData == True:\n networkStr += \"\\n\"\n networkStr += \"OD pair\\tDemand\\tLeastCost\\n\"\n for ODpair in self.ODpair:\n networkStr += \"%s\\t%f\\t%f\\n\" % (ODpair, self.ODpair[ODpair].demand, self.ODpair[ODpair].leastCost)\n return networkStr",
"def topology(self):\n return self._h5[TOPOLOGY][()]"
]
| [
"0.70776266",
"0.7034348",
"0.6924842",
"0.67171836",
"0.6608755",
"0.6608755",
"0.65742755",
"0.6554554",
"0.6553372",
"0.6523976",
"0.64513",
"0.641888",
"0.6392012",
"0.638203",
"0.636719",
"0.63357216",
"0.6312321",
"0.63068455",
"0.6295409",
"0.6251271",
"0.62105703",
"0.6209774",
"0.62081313",
"0.62006915",
"0.6199994",
"0.6187077",
"0.61840487",
"0.6159987",
"0.6108517",
"0.6103776"
]
| 0.8206173 | 0 |
Returns the number of features or synapses (connections) present in the network. | def get_num_connections(self):
synapses = 0
for mat in self.weights:
synapses += mat.size
return synapses | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getNeuronCount(self):\n\t\treturn self.loader.getNeuronCount()",
"def num_node_features(self):\n return self[0].num_node_features",
"def num_features(self) -> Dict[NodeType, int]:\n return self.num_node_features",
"def get_num_features(self):\r\n \r\n return len(self[0]['x'])",
"def count(self):\n\t\treturn len(list(self.nodes))",
"def getNrFeatures(self):\n return self.featureNames.size",
"def count_nodes(self):\n\t\treturn self.__count_nodes(self)",
"def _num_conn_comp(graph):\n\n return nx.number_connected_components(graph)",
"def return_num_edges(self):\n return sum(map(lambda x: len(x),self.__adj))",
"def num_node_features(self) -> int:\n data, _, _ = self[0]\n if hasattr(data, 'num_node_features'):\n return data.num_node_features\n raise AttributeError(f\"'{data.__class__.__name__}' object has no \"\n f\"attribute 'num_node_features'\")",
"def _number_of_edges(self):\n if self._edges is None:\n return 0\n return len(self._edges)",
"def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n",
"def NodesCount(self):\n return len(self.nodes)",
"def get_network_stats(net):\n return net.get_num_connections(), net.num_neurons, len(net.neurons_in_layer)",
"def num_layers(self):\n return self._num_layers",
"def node_count(self):\n return self._node_count",
"def GetNumberOfNetworks(self):\n return len(self.LastScan)",
"def num_edge_features(self):\n return self[0].num_edge_features",
"def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res",
"def num_feature(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumFeature(self.handle, ctypes.byref(out)))\n return out.value",
"def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count",
"def get_num_nodes(self):\n\n return sum(self.topology)",
"def get_num_nodes(self):\n return len(self._nodes)",
"def get_num_nodes(self):\n return len(self._nodes)",
"def nr_features(self):\n if self.is_predict_only:\n return clib.xlinear_get_int_attr(self.model_chain, \"nr_features\")\n else:\n return self.model_chain[0].nr_features",
"def num_edges(self):\r\n return len(self.__generate_edges())",
"def get_num_edges(self):\n\n return self._graph_state.get_num_edges()",
"def size_in(self):\n if isinstance(self.ensemble.neuron_type, Direct):\n # This will prevent users from connecting/probing Direct neurons\n # (since there aren't actually any neurons being simulated).\n return 0\n return self.ensemble.n_neurons",
"def num_nodes(self):\n\n return self.num_input_nodes + self.num_hidden_layers * self.num_nodes_per_hidden_layer + self.num_output_nodes",
"def num_nodes(self):\n return len(self.nodes)"
]
| [
"0.7547738",
"0.73359764",
"0.7307265",
"0.72956717",
"0.7176172",
"0.712794",
"0.7111971",
"0.71068484",
"0.7103375",
"0.70746815",
"0.7061183",
"0.7060888",
"0.7045565",
"0.7018414",
"0.7011767",
"0.698772",
"0.6983539",
"0.6975411",
"0.6970141",
"0.6951382",
"0.6929856",
"0.6925664",
"0.6903497",
"0.6903497",
"0.68889016",
"0.6875",
"0.68594104",
"0.6856395",
"0.68419206",
"0.6840077"
]
| 0.7790741 | 0 |
Like the feedforward function but reversed. It takes an output or target vector, and returns the corresponding input vector. Nothing is stored by this function. | def reversed_feed(self, outIn):
I = np.array(outIn)
for W in self.weights[::-1]: # We traverse backwards through the weight matrices
I = np.dot(W,I)[:-1] #The dot product of the two numpy arrays will have one extra element, corresponding to the bias node, but we do not need it, so we slice it off
return I | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backward(self, x_out, x_target):\r\n return 2*(x_out - x_target)",
"def reverse(self, y, *args, **kwargs):\n if self.memory_free:\n if isinstance(y, list):\n n_args = len(y)\n y = list(itertools.chain(*y))\n else:\n n_args = 1\n y = list(y)\n\n tensors = list(y) + list(self.parameters())\n for arg in args:\n if torch.is_tensor(arg) and arg.requires_grad:\n tensors.append(arg)\n for arg in kwargs.values():\n if torch.is_tensor(arg) and arg.requires_grad:\n tensors.append(arg)\n\n reverse_fun = self._forward\n forward_fun = self._reverse\n x = InvertToLearnFunction.apply(n_args, self, forward_fun, reverse_fun, args, kwargs, *tensors)\n if len(x) > 2:\n x = list(zip(x[::2], x[1::2]))\n else:\n x = self._reverse(y, *args, **kwargs)\n return x",
"def reverse(input):\n return input[::-1]",
"def backward_tensor(self, x):\n pass",
"def reverse(self,v):\n return np.tensordot(self._inverseTransform,\n v-self._translation,axes=([1],[0]))",
"def reverse(input=''):\n return input[::-1]",
"def reverse_forward(self, z: torch.Tensor):\n W = self.conv.weight.squeeze()\n\n # reverse forward computation, cache W_inverse for improved speed\n if not hasattr(self, 'W_inverse'):\n # Reverse computation\n W_inverse = W.float().inverse()\n W_inverse = W_inverse[..., None]\n if z.dtype == torch.half:\n W_inverse = W_inverse.half()\n self.W_inverse = W_inverse\n z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)\n return z",
"def reverse(x):\n return x[::-1]",
"def backward_transform(self):\n try:\n backward = self.forward_transform.inverse\n except NotImplementedError as err:\n raise NotImplementedError(\"Could not construct backward transform. \\n{0}\".format(err))\n return backward",
"def forward(self, input):\n return input.view(input.size(0), -1)",
"def backward(self, z):\n return self.forward(z) * (1 - self.forward(z))",
"def backward(self, dout):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)-1,-1,-1):\n act_dout = self.activations[l].backward(dout)\n dout = self.layers[l].backward(act_dout)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return",
"def backward_sample(self, target):\n return self.flow.bijector.inverse(target)",
"def backward(self, out_tensors: List[Tensor], in_tensors: List[Tensor]):\n pass",
"def reversed(x) -> List:\n pass",
"def forward_backward(self, x):\n raise NotImplementedError()",
"def reverse_args(self, /, *args, **kwargs):\n return self._func(*args[::-1], **kwargs)",
"def backward(self, y):\n pass",
"def forward(self, x, y):\n y_summary = self.summary_net(y)\n return self.invertible_net(x, y_summary, inverse=False)",
"def reverse(self):\n return self[::-1]",
"def inverse(self, x):\n return self.mul(self.weights, x.unsqueeze(-1)).squeeze(-1) + self.shift\n #return self.mul(torch.inverse(self.weights), (x - self.shift).unsqueeze(-1)).squeeze(-1)",
"def reverse(self, *args, **kwargs):\n return reverse(*args, **kwargs)",
"def backward(self):\n if self.d_out_d_in is None:\n raise Exception(\"Haven't computed the loss!\")\n return self.d_out_d_in",
"def reverse(self):\n cdef StdVectorFst result = StdVectorFst(isyms=self.isyms, osyms=self.osyms)\n openfst.Reverse(self.fst[0], result.fst)\n return result",
"def __reversed__(self):\n return reverse(self)",
"def backward(ctx, grad_output):\n diff, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input = grad_input + diff\n return grad_input",
"def __invert__(self):\n \n return Vector(-self.y, self.x)",
"def reversed(self):\n return LINE(*self.elems,**{'reverse':(not self.reverse)})",
"def get_reconstructed_input(self):\n\n\t\treturn self.activation(\n\t\t\ttheano.tensor.dot(self.get_hidden_output(), self.reverse_weights) +\n\t\t\tself.reverse_bias)",
"def reversed(self):\n ops = {Eq: Eq, Gt: Lt, Ge: Le, Lt: Gt, Le: Ge, Ne: Ne}\n a, b = self.args\n return Relational.__new__(ops.get(self.func, self.func), b, a)"
]
| [
"0.7287039",
"0.67654866",
"0.6600392",
"0.65811574",
"0.654743",
"0.64547324",
"0.63930595",
"0.6372981",
"0.6336604",
"0.6294984",
"0.62903523",
"0.62878835",
"0.6214021",
"0.6207465",
"0.6194195",
"0.6163077",
"0.612815",
"0.6098429",
"0.60826397",
"0.6078447",
"0.60556835",
"0.6049296",
"0.60305554",
"0.6030407",
"0.60165167",
"0.59984887",
"0.59975445",
"0.59853506",
"0.59829044",
"0.59736115"
]
| 0.68784493 | 1 |
Computes the error for the network output. | def _compute_error(self,expected_out,actual_out,error_func):
error = error_func(expected_out,actual_out)
return error | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error(input_, output):\n global number_of_neurons_by_layer\n if len(output) != number_of_neurons_by_layer[-1]:\n raise IndexError(\n f\"\\033[91mDesired output length is incorrect. It must be {number_of_neurons_by_layer[-1]}.\\033[m\")\n output = np.array(output).reshape(len(output), 1)\n flow(input_)\n layers[-1][\"error\"] = output - layers[-1][\"v\"]",
"def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)",
"def calculate_error(self):\n self.network.index_nodes()\n self._calculate_dist()\n _, relative_error = self._relative_error()\n _, absolute_error = self._absolute_error()\n\n return absolute_error, relative_error",
"def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)",
"def calc_error_dist(self):\n pass",
"def error_compute(self):\n self.tt_error = np.linalg.norm(self.rel_error)\n if self.global_rank==0:print('Overall error is::',self.tt_error)\n return {'NMF': self.rel_error, 'tt': self.tt_error}",
"def __error(self,node_set):\n error=0\n for n in node_set:\n if(n.seq_num!=0):\n error+=LA.norm(n.node_vol-node_set[n.neighbor.parent].node_vol-n.impedance*n.branch_cur)\n #print n.node_vol, '\\n', node_set[n.neighbor.parent].node_vol\n \n return error",
"def error2(input_, output):\n error(input_, output)\n layers[-1][\"error2\"] = layers[-1][\"error\"].T @ layers[-1][\"error\"]",
"def calcError(self, inherited_error):\r\n\t\tif inherited_error == None:\t\t# output neurons\r\n\t\t\tself.error = (self.target - self.value) * self.activate_der()\r\n\t\telse:\r\n\t\t\tself.error = inherited_error * self.activate_der()",
"def get_error(self, params):\n return self.endog - self.predict(params)",
"def get_error(self, output,target):\n return [target[i]-output[i] for i in range(len(output))]",
"def error(self):\n self.mean_error = tf.reduce_mean(self.errors, name=\"mean_error\")\n return(self.mean_error)",
"def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)",
"def _delta(self, output, err, neuron):\n return neuron._g_prime(output) * err",
"def getError(outputVector, targetVector):\r\n return np.sum((outputVector-targetVector)**2)",
"def check_error(gluon_output, k_model, input_np, epsilon=1e-4):\n gluon_output = gluon_output.asnumpy()\n keras_output = k_model.predict(input_np)\n\n error = np.max(gluon_output - keras_output)\n print('Error:', error)\n\n assert error < epsilon\n return error",
"def get_error(self, err_type):\n self.tmp = self.location_errors[0].errors[err_type] * \\\n self.location_errors[0].errors[\"N\"]\n N = self.location_errors[0].errors[\"N\"]\n\n for lerr in self.location_errors[1:]:\n self.tmp = np.add(self.tmp, lerr.errors[err_type] *\n lerr.errors[\"N\"])\n N += lerr.errors[\"N\"]\n\n # print(self.tmp, N, self.tmp/ N)\n return self.tmp / N",
"def _wer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total",
"def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err",
"def _ms_err(self):\n return self._ss_err / self._df_err",
"def compute_error(self):\n \n self.error = pd.DataFrame()\n \n for name in self.conf[\"w_sizes\"].keys():\n \n self.error[f\"mae {name}\"] = self.predict[[name, \"test\"]].apply(lambda x: mae(x), axis=1)\n self.error[f\"mape {name}\"] = self.predict[[name, \"test\"]].apply(lambda x: MAPE(x[0], x[1]), axis=1)\n \n self.predict['error'] = self.error.filter(like='mae').apply(lambda r: tuple(r), axis=1).apply(np.array)",
"def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum",
"def calculate_energy_conservation_error(self):\n assert self.data is not None\n # calculate total energy at start and end of simulation\n energy_start = self.measure_total_energy(self.t0)\n energy_end = self.measure_total_energy(self.t1)\n \n # calculate accuracy\n error = abs(1.0 - energy_start/energy_end)\n \n return error",
"def tracking_error(port_returns, market_returns):\n\n return np.std(port_returns - market_returns)",
"def tracking_error(port_returns, market_returns):\n\n return np.std(port_returns - market_returns)",
"def _compute_errors(self):\n self.errors = np.sqrt(self.data)\n self.errors[self.errors == 0.] = 1.",
"def _cer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total",
"def get_errors(self):\n return {'loss': self.loss.data[0]}",
"def getBatchError(self, inputMatrix, targetMatrix):\r\n outputmatrix = self.forwardPropagate(inputMatrix)\r\n return np.sum((outputmatrix-targetMatrix)**2, axis=(1, 2))",
"def error_rate(self):\n\n\t\treturn theano.tensor.mean(theano.tensor.neq(\n\t\t\tself.get_symbolic_predicted_labels(),\n\t\t\tself.symbolic_output))"
]
| [
"0.7844051",
"0.7378653",
"0.70772296",
"0.7048775",
"0.700349",
"0.6880271",
"0.687721",
"0.6814743",
"0.6809733",
"0.67768127",
"0.6671029",
"0.6610069",
"0.65781933",
"0.6498636",
"0.6493377",
"0.6410907",
"0.6401569",
"0.6319782",
"0.62156636",
"0.62088066",
"0.6189162",
"0.6188788",
"0.61835873",
"0.61541116",
"0.61541116",
"0.6131142",
"0.6123296",
"0.6109908",
"0.60932624",
"0.60873944"
]
| 0.7385006 | 1 |
Prints the current state of the training process, such as the epoch, current error. | def print_training_state(self,epoch,error,finished=False):
#print("Epoch:",iterCount)
if finished:
print("Network has reached a state of minimum error.")
#print("Error: {0}\tEpoch {1}".format(error,iterCount))
print("""Epoch {0} completed""".format(epoch),'Error:',error) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_status(self, epoch, iteration, prefix=\"\",\n mode=\"train\", is_main_net=True):\n if mode == \"train\":\n log = getattr(self.logger[\"train\"], \"info\")\n else:\n log = getattr(self.logger[\"train\"], \"debug\")\n\n if is_main_net:\n # prepare txt to print\n jump = 3\n txt = \"epoch {} step {} \".format(epoch, iteration)\n for i, (k,v) in enumerate(self.status.items()):\n if (i+1) % jump == 0:\n txt += \", {} = {:.3f}\".format(k, v)\n log(txt)\n txt = \"\"\n elif (i+1) % jump == 1:\n txt += \"{} = {:.3f}\".format(k, v)\n else:\n txt += \", {} = {:.3f}\".format(k, v)\n\n txt += \" ({}->{}/{})\".format(\"|\".join(pred for pred in self.basenet_pred),\n utils.label2string(self.itoa, self.top1_predictions[0]), self.top1_gt)\n\n # print learning information\n log(txt)",
"def see_evaluation(epoch, training_acc, test_acc):\n print (\"Epoch \", epoch, \"Training acc: \", training_acc*100, \"Test acc: \", test_acc*100)",
"def print_statistics(self) -> None:\n e = self.current_epoch\n if len(self.loss_history[\"test_loss\"]) > 0:\n template = 'Epoch: {} Training loss: {:.4f}, Test loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1],\n self.loss_history[\"test_loss\"][-1]))\n else:\n template = 'Epoch: {} Training loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1]))",
"def log_training_results(engine: Engine):\n train_evaluator.run(self.train_dl)\n metrics: Dict[str, float] = train_evaluator.state.metrics\n avg_accuracy: float = metrics['accuracy']\n avg_bce: float = metrics['bce']\n pbar.log_message(\n f'Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.4f} Avg loss: {avg_bce:.4f}')",
"def print_end_epoch(self, msg=\"\"):\n print(self.last_train, end='')\n print(msg, end='\\r')",
"def _print_progress(self):\n if self.current_training_size % 1000 == 0:\n print(self.current_training_size, end='')\n elif self.current_training_size % 100 == 0:\n print('.', end='')",
"def print_state(self):\n raise AIError(\"Must be implemented in child class!\")",
"def show_training(history: tf.keras.callbacks.History) -> None:\n hist = history.history\n\n if \"loss\" not in hist:\n print(\"Error: 'loss' values not found in the history\")\n return\n\n # plot training\n plt.figure(figsize=(14, 4))\n plt.subplot(121)\n plt.plot(hist[\"loss\"], label=\"Training\")\n if \"val_loss\" in hist:\n plt.plot(hist[\"val_loss\"], label=\"Validation\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n plt.legend()\n\n if \"accuracy\" in hist:\n plt.subplot(122)\n plt.plot(hist[\"accuracy\"], label=\"Training\")\n if \"val_accuracy\" in hist:\n plt.plot(hist[\"val_accuracy\"], label=\"Validation\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.legend()\n\n plt.suptitle(\"Training history\")\n plt.show()\n\n # show final results\n print(\"\\nTraining loss: \\t{:.4f}\".format(hist[\"loss\"][-1]))\n if \"val_loss\" in hist:\n print(\"Validation loss: \\t{:.4f}\".format(hist[\"val_loss\"][-1]))\n if \"accuracy\" in hist:\n print(\"\\nTraining accuracy: \\t{:.3f}\".format(hist[\"accuracy\"][-1]))\n if \"val_accuracy\" in hist:\n print(\"Validation accuracy:\\t{:.3f}\".format(hist[\"val_accuracy\"][-1]))",
"def print_state(self):\n print('\\nthe current state is: ' + str(self.state) + '\\n')",
"def print_training_update(epoch: int, duration: float, lr_index: int,\n losses: Tuple[float], true_pred: dict,\n period=0):\n if period == 0 or epoch % period == 0:\n train_loss, val_loss = losses\n acc, sens, spec = get_con_stats(true_pred)\n balance = 100*sum(true_pred[\"true\"])/len(true_pred[\"true\"])\n if epoch == 0:\n update = (\"======================================================\\n\"\n \"Validation Category Balance: {:0.2f}%\\n\").format(balance)\n else:\n update = \"\"\n update += (\n \"Learning Rate Index: {}\\n\"\n \"Epoch: {}\\n\"\n \"Time Elapsed: {:0.2f} minutes\\n\"\n \"Train Loss: {:0.2f}\\n\"\n \"Validation Loss: {:0.2f}\\n\"\n \"Validation Accuracy: {:0.2f}\\n\"\n \"Validation Sensitivity: {:0.2f}\\n\"\n \"Validation Specificity: {:0.2f}\\n\"\n ).format(\n lr_index, epoch, duration, train_loss, val_loss, acc, sens, spec\n )\n print(update)\n print(confusion_matrix(true_pred[\"true\"], true_pred[\"pred\"]), \"\\n\")",
"def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs",
"def _print(self):\n self.i += 1\n np.set_printoptions(precision=3, suppress=True)\n if self.i%40 == 0:\n self.i = 0\n print self.ekf.current_state_estimate[4:7]",
"def training_info(self):\n pass",
"def printStep(self):\n\n\t\tprint '\\nConfiguracao da fita: ',\n\n\t\tcount = 0\n\t\twhile count < len(self.tape):\n\t\t\tif count == self.currentPos:\n\t\t\t\tprint '_',\n\n\t\t\tprint self.tape[count],\n\t\t\tcount += 1\n\n\t\tprint '\\nEstado atual: ', self.currentState",
"def print_state():\n global simulator\n if simulator is None:\n print \"program is not started\"\n return\n print simulator.state()",
"def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()",
"def train(self, num_iterations):\n for t in range(num_iterations):\n self._step()\n # Maybe print training loss\n if (t != 0) and (t % self.print_every == 0):\n print('(Iteration %d / %d) loss: %f' % (\n t + 1, num_iterations, self.loss_history[-1]))",
"def print_info(self):\n\n print \"\\nALGORITHM INFO\"\n print \"modelnumber:\", self.modelnumber\n print \"restart:\", self.restart\n print \"particles:\", self.particles\n print \"beta:\", self.beta\n print \"dt:\", self.dt\n if self.mode != 1:\n if len(self.final_epsilon) == 0:\n print \"manual epsilon:\"\n for i in range(self.epsilon.shape[0]):\n print \"\\t\",\n for j in range(self.epsilon.shape[1]):\n print \"\", self.epsilon[i, j],\n print \"\"\n else:\n print \"auto epsilon:\"\n print \"\\t\", self.final_epsilon\n print \"\\talpha:\", self.alpha\n\n print \"kernel:\", self.kernel\n print \"model kernel:\", self.modelkernel\n print \"model prior:\", self.modelprior\n\n print \"DATA:\"\n print \"\\ttimes:\", self.times\n if self.mode == 0:\n print \"\\tvars:\"\n for i in range(len(self.data[0, :])):\n print \"\\t\",\n for j in range(self.ntimes):\n print \"\", self.data[j, i],\n print \"\"\n\n print \"MODELS:\", self.nmodels\n for i in range(self.nmodels):\n print \"\\t\", \"npar:\", self.nparameters[i]\n print \"\\t\", \"nspecies:\", self.nspecies[i]\n print \"\\t\", \"name:\", self.name[i]\n print \"\\t\", \"source:\", self.source[i]\n print \"\\t\", \"type:\", self.type[i]\n print \"\\t\", \"fit:\", self.fit[i]\n print \"\\t\", \"init:\", self.x0prior[i]\n print \"\\t\", \"prior:\", self.prior[i]\n print \"\\t\", \"logp:\", self.logp[i]\n print \"\\n\"",
"def print_info(self):\r\n self.system.print_to_log(\r\n f\"{self.__class__.__name__} model: Infection probability: {self.p}, Infectious period: {self.i}, Recovery period: {self.r}.\")",
"def summary(self):\n\n print(\n \"\\nModel trained with dataset %s that has maxlen=%d and charset=%s for %d epochs.\"\n % (self.dataset_name, self.maxlen, self.charset, self.epochs)\n )\n\n print(\n \"noise_std: %.6f, lstm_dim: %d, dec_layers: %d, td_dense_dim: %d, batch_size: %d, codelayer_dim: %d, lr: %.6f.\"\n % (\n self.noise_std,\n self.lstm_dim,\n self.dec_layers,\n self.td_dense_dim,\n self.batch_size,\n self.codelayer_dim,\n self.lr,\n )\n )",
"def print_info(self):\n print(\"Num samples (train/test/val): {} tot: {}\\n\"\n \"Samples per class: {}\\n\"\n \"Sample type {}\\n\"\n \"Sample shape: {}\\n\"\n \"Label type {}\\n\"\n \"Label shape: {}\\n\"\n \"Root dirs: {}\".format([int(np.floor(frac * len(self.__labels))) for frac in self.split_fraction],\n len(self.__labels),\n self.__samples_per_class,\n self.train.output_types[0], self.train.output_shapes[0][1:],\n self.train.output_types[1], self.train.output_shapes[1][1:],\n self.__root_directory_list))",
"def state(self):\n msg = f\"Procs: {self.running_procs} / {self.procs_no}\"\n if self.gpus:\n msg += f\" | {len(self.gpus):d} GPUS:\"\n for gpu in self.gpus:\n msg += f\" {gpu}:{self.gpu_running_procs[gpu]}/{self.per_gpu[gpu]};\"\n return msg",
"def print_network(self):\n #plot_model(self.model, to_file='model.png', show_shapes=True)\n logging.info(\"\")\n logging.info(self.network)\n logging.info(\"Network accuracy: %.2f%%\" % (self.accuracy * 100))\n logging.info(\"Network loss: %.2f%%\" % (self.loss))",
"def show_learning_stats(track, train_loss, train_acc, valid_acc, test_acc):\n\n if track[\"valid\"] and track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Val acc: {:.4f} -- Test acc: {:.4f}\".format(\n train_loss, train_acc, valid_acc, test_acc))\n\n if track[\"valid\"] and not track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Val acc: {:.4f}\".format(\n train_loss, train_acc, valid_acc))\n\n if not track[\"valid\"] and track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Test acc: {:.4f}\".format(\n train_loss, train_acc, test_acc))\n\n if not track[\"valid\"] and not track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} \".format(\n train_loss, train_acc))",
"def show_training_history(self):\n hist = [i.history[\"loss\"][0] for i in self.history]\n plt.plot(hist)",
"def write_epoch_summary_text(self):\n self._write_text_to_tensorboard(\n tag=\"MLRun\",\n text=self._generate_epoch_text(),\n step=self._training_iterations,\n )",
"def print_state(self):\n\t\tprint self.time, len(self.state['s']), len(self.state['p']), len(self.state['c'])",
"def status_print(optim_result):\n \n # Get all the models tested so far in DataFrame format\n all_models = pd.DataFrame(bayes_cv_tuner.cv_results_) \n \n # Get current parameters and the best parameters \n best_params = pd.Series(bayes_cv_tuner.best_params_)\n print('Model #{}\\nBest mse: {}\\nBest params: {}\\n'.format(\n len(all_models),\n np.round(bayes_cv_tuner.best_score_, 4),\n bayes_cv_tuner.best_params_\n ))\n \n # Save all model results\n clf_name = bayes_cv_tuner.estimator.__class__.__name__\n all_models.to_csv(clf_name+\"_cv_results.csv\")",
"def print_cpu_state(self):\n print(\"PC:\", hex(self.pc))\n print(\"SP:\", hex(self.sp))\n print(\"A:\", hex(self.a))\n print(\"X:\", hex(self.x))\n print(\"Y:\", hex(self.y))\n print(\"P:\", bin(self.p))",
"def log_training(self, batch, total_batches, result):\n metrics = [\"loss\", \"accuracy\"]\n for metric in metrics:\n if metric not in self.logs:\n self.logs[metric] = []\n self.logs[metric].append(result[metric])\n if batch % self.log_frequency == 0 or batch + 1 == total_batches:\n print(\"Batch {} / {} = {:.2f} %\".format(batch, total_batches, 100 * batch / total_batches))\n print(\"{:20}: {}\".format(\"Global step\", result[\"global_step\"]))\n print(\"{:20}: {:.4e}\".format(\"Learning rate\", result[\"learning_rate\"]))\n for metric in metrics:\n metric_logs = self.logs[metric]\n average = sum(metric_logs) / len(metric_logs)\n print(\"{:20}: {:.4}\".format(\"Training \" + metric, average))\n self.logs[metric] = []\n val_metrics = self.evaluate(self.batches_valid)\n for k, v in val_metrics.items():\n print(\"{:20}: {:.4}\".format(\"Validation \" + k, v))"
]
| [
"0.72262985",
"0.7183004",
"0.7071686",
"0.6913451",
"0.6806319",
"0.6769184",
"0.6690559",
"0.65998715",
"0.6593775",
"0.65109324",
"0.6402722",
"0.6387626",
"0.63702345",
"0.6350585",
"0.62587076",
"0.6226373",
"0.62072736",
"0.6168022",
"0.6164347",
"0.61623514",
"0.61486083",
"0.6144908",
"0.611477",
"0.609658",
"0.60727483",
"0.60684425",
"0.6063308",
"0.6061152",
"0.6056013",
"0.60534734"
]
| 0.7943574 | 0 |
Will create a new EclGrid instance from grdecl file. This function will scan the input file and look for the keywords required to build a grid. The following keywords | def loadFromGrdecl(cls , filename):
if os.path.isfile(filename):
with open(filename) as f:
specgrid = EclKW.read_grdecl(f, "SPECGRID", ecl_type=EclTypeEnum.ECL_INT_TYPE, strict=False)
zcorn = EclKW.read_grdecl(f, "ZCORN")
coord = EclKW.read_grdecl(f, "COORD")
try:
actnum = EclKW.read_grdecl(f, "ACTNUM", ecl_type=EclTypeEnum.ECL_INT_TYPE)
except ValueError:
actnum = None
try:
mapaxes = EclKW.read_grdecl(f, "MAPAXES")
except ValueError:
mapaxes = None
return EclGrid.create( specgrid , zcorn , coord , actnum , mapaxes )
else:
raise IOError("No such file:%s" % filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_from_file(self,grd_fn):\n self.grd_fn = grd_fn\n self.fp = open(self.grd_fn,'rt')\n hdr = self.fp.readline().strip() #header &GRD_2008 or &LISTGRD\n\n if hdr == self.hdr_08:\n print( \"Will read 2008 format for grid\" )\n n_parms = 11\n elif hdr == self.hdr_old:\n print( \"Will read old UnTRIM grid format\" )\n n_parms = 10\n\n for i in range(n_parms): # ignore TNE and TNS in new format files\n l = self.fp.readline()\n lhs,rhs = l.split('=')\n val = rhs.strip().strip(',')\n varname = lhs.strip()\n print( \"%s=%s\"%(varname,val) )\n\n if varname=='NV':\n Nvertices = int(val)\n elif varname=='NE':\n Npolys = int(val)\n elif varname=='NS':\n Nsides = int(val)\n elif varname=='NBC':\n Nboundary_poly = int(val)\n elif varname=='NSI':\n Ninternal_sides = int(val)\n elif varname=='NSF':\n Nflow_sides = int(val)\n elif varname=='NBC':\n Nbc = int(val)\n elif varname=='ANGLE':\n self.angle = float(val)\n elif varname=='LOCATION':\n self.location = val\n elif varname=='NR': ## these are read, but not used\n Nred = int(val)\n elif varname=='TNE':\n TNE=int(val)\n elif varname=='TNS':\n TNS=int(val)\n # others: HLAND for older fmt.\n \n while 1:\n s = self.fp.readline().strip() # header: /\n if s == '/':\n break\n\n # We know the size of everything, and can ask UnstructuredGrid to allocate\n # arrays now, with the 'special' meaning that passing an integer means allocate\n # the array of that size, full of zeros.\n # this allocates\n # self.nodes, self.edges, self.cells\n self.from_simple_data(points = Nvertices,edges = Nsides, cells = Npolys)\n\n for v in range(Nvertices):\n Cv = self.fp.readline().split()\n if hdr == self.hdr_08:\n vertex_num = int(Cv.pop(0))\n if vertex_num != v+1:\n print( \"Mismatched vertex numbering: %d != %d\"%(vertex_num,v+1) )\n self.nodes['x'][v,0] = float(Cv[0])\n self.nodes['x'][v,1] = float(Cv[1])\n \n print( \"Npolys\",Npolys )\n self.cells['edges'] = self.UNKNOWN # initialize all\n self.cells['nodes'] = self.UNKNOWN\n \n for c in range(Npolys):\n l = self.fp.readline()\n Cp = l.split()\n if hdr == self.hdr_08:\n poly_num = int(Cp.pop(0))\n if poly_num-1 != c:\n print( \"Mismatched polygon id: %fd != %d\"%(poly_num,c+1) )\n \n numsides = int(Cp[0])\n\n self.cells['_center'][c,0] = float(Cp[1])\n self.cells['_center'][c,1] = float(Cp[2])\n\n if hdr == self.hdr_old:\n # vertex index is Cp[3,5,7,9]\n # the others, 4,6,8,10, are edges, right?\n # convert to 0 based indices here\n\n # This is probably wrong! I think it's actually reading the\n # sides\n self.cells['edges'][c,0] = int(Cp[4]) - 1\n self.cells['edges'][c,1] = int(Cp[6]) - 1 \n self.cells['edges'][c,2] = int(Cp[8]) - 1\n if numsides == 4:\n self.cells['edges'][c,3] = int(Cp[10]) - 1 \n else:\n self.cells['edges'][c,3]=self.UNDEFINED\n #HERE - need to copy that to self.cells['nodes']\n else:\n for ei in range(numsides):\n self.cells['nodes'][c,ei] = int(Cp[3+ei]) - 1\n self.cells['edges'][c,ei] = int(Cp[3+numsides+ei]) - 1\n self.cells['nodes'][c,numsides:]=self.UNDEFINED\n self.cells['edges'][c,numsides:]=self.UNDEFINED\n \n # choose some large, above-sea-level depth\n self.cells['depth_mean'] = -1000 # not sure this is doing anything...\n\n for e in range(Nsides):\n Cs = self.fp.readline().split()\n if hdr == self.hdr_08:\n # side num = int(Cs.pop(0))\n Cs.pop(0)\n elif hdr == self.hdr_old:\n # side depth?\n edge_depth = self.edges['depth_mean'][e] = float(Cs.pop(0))\n \n self.edges['nodes'][e,0] = int(Cs[0])-1 # vertex indices\n self.edges['nodes'][e,1] = int(Cs[1])-1\n \n self.edges['cells'][e,0] = int(Cs[2])-1 # cell neighbors\n self.edges['cells'][e,1] = int(Cs[3])-1\n\n if hdr == self.hdr_old:\n for nc in self.edges['cells'][e]:\n if nc >= 0 and edge_depth > self.cells['depth_mean'][nc]:\n self.cells['depth_mean'][nc] = edge_depth\n\n if hdr==self.hdr_old:\n # old format - have to infer cell nodes from edges\n self.make_cell_nodes_from_edge_nodes()\n\n # Try to make sense of the marks and red/black:\n self.cells['red'][:Nred] = True\n self.cells['mark'][:Nboundary_poly] = self.BOUNDARY\n self.edges['mark'][:Ninternal_sides] = 0\n self.edges['mark'][Ninternal_sides:Nflow_sides] = self.FLOW\n self.edges['mark'][Nflow_sides:] = self.LAND\n\n # Bathymetry:\n if hdr == self.hdr_08:\n # make a cheap tokenizer to read floats across lines\n # note that it's up to the user to know that all values from\n # the line are read, and not to get the iterator until you're\n # ready for some values to be read\n def tokenizer():\n while True:\n for item in self.fp.readline().split():\n yield item\n for c in range(Npolys):\n check_c,nis = [int(s) for s in self.fp.readline().split()]\n if check_c != c+1:\n print(\"ERROR: while reading cell subgrid, cell index mismatch: %s vs. %d\"%(c+1,check_c))\n \n next_token = tokenizer().next\n areas = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n \n self.cells['depth_mean'][c] = np.sum(areas*depths) / np.sum(areas)\n self.cells['_area'][c] = np.sum(areas)\n self.cells['depth_max'][c] = depths.max()\n self.cells['subgrid'][c] = (areas,depths)\n for e in range(Nflow_sides):\n l = self.fp.readline()\n # print \"%d/%d - Read line: %s\"%(e,self.Nsides,l)\n check_e,nis = [int(s) for s in l.split()]\n if check_e != e+1:\n print( \"ERROR: While reading edge subgrid, edge index mismatch: %s vs. %s\"%(e+1,check_e) )\n next_token = tokenizer().next\n lengths = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n if sum(lengths)<=0:\n print( \"edge %d has bad lengths\"%e )\n self.edges['depth_mean'][e] = np.sum(lengths*depths) / sum(lengths)\n self.edges['depth_max'][e] = depths.max()\n self.edges['subgrid'][e] = (lengths,depths)\n # and land boundaries get zeros.\n for e in range(Nflow_sides,Nsides):\n self.edges['depth_mean'][e] = 0.0\n self.edges['depth_max'][e] = 0.0\n self.edges['subgrid'][e] = ([],[])",
"def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map",
"def loadFromFile(cls , filename):\n if FortIO.isFortranFile( filename ):\n return EclGrid( filename )\n else:\n return EclGrid.loadFromGrdecl( filename )",
"def ReadGrid(self, grdfile):\n nc = Dataset(grdfile,'r')\n \n self.xv = nc.variables['xv'][:]\n self.yv = nc.variables['yv'][:]\n self.xp = nc.variables['xp'][:]\n self.yp = nc.variables['yp'][:]\n self.xe = nc.variables['xe'][:]\n self.ye = nc.variables['ye'][:]\n self.dz = nc.variables['dz'][:] \n self.dv = nc.variables['dv'][:]\n self.Ac = nc.variables['Ac'][:]\n self.Nk = nc.variables['Nk'][:]\n self.face = nc.variables['face'][:]\n self.mark = nc.variables['mark'][:]\n\tself.cells = nc.variables['cells'][:]\n \n self.Nc = len(self.xv)\n self.Np = len(self.xp)\n self.Ne = len(self.xe)\n self.Nk = len(self.dz)\n self.numsides = self.face.shape[1]",
"def exportECL(self, fname):\r\n\r\n # TODO add consistency of dimensions across the inputs\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) - 1 # ECLIPSE\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename + \".GRDECL\", 'w', newline='\\r\\n') as f:\r\n f.write('-- Generated [\\n')\r\n f.write('-- Format : ECLIPSE keywords (grid geometry and properties) (ASCII)\\n')\r\n # f.write('-- Exported by : Petrel 2013.7 (64-bit) Schlumberger\\n'\r\n f.write('-- Exported by : ReGrid v.' + version + \"\\n\")\r\n f.write('-- User name : ' + getpass.getuser() + \"\\n\")\r\n f.write('-- Date : ' + datetime.now().strftime(\"%A, %B %d %Y %H:%M:%S\") + \"\\n\")\r\n f.write('-- Project : ' + \"ReGrid project\\n\")\r\n f.write('-- Grid : ' + \"Description\\n\")\r\n f.write('-- Generated ]\\n\\n')\r\n\r\n f.write('SPECGRID -- Generated : ReGrid\\n')\r\n f.write(' %i %i %i 1 F /\\n\\n' % (self.ne, self.nn, self.nz))\r\n f.write('COORDSYS -- Generated : ReGrid\\n')\r\n f.write(' 1 4 /\\n\\n') # what is this line?\r\n\r\n f.write('COORD -- Generated : ReGrid\\n')\r\n nz = self.nz\r\n fstr = str(\" \")\r\n\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(0)\r\n fstr = self.printCOORDS(f, p0, fstr)\r\n p1 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(4)\r\n fstr = self.printCOORDS(f, p1, fstr)\r\n # outside edge on far x\r\n p2 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, p2, fstr)\r\n p3 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, p3, fstr)\r\n # outside edge on far y\r\n for ix in range(self.ne):\r\n p8 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(3)\r\n fstr = self.printCOORDS(f, p8, fstr)\r\n p9 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(7)\r\n fstr = self.printCOORDS(f, p9, fstr)\r\n # outside edge on far northeast\r\n p14 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, p14, fstr)\r\n p15 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, p15, fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n\r\n f.write('ZCORN -- Generated : ReGrid\\n')\r\n for iz in range(self.nz):\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(0)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(3)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # bottom layer\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(4)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(7)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write('ACTNUM -- Generated : ReGrid\\n')\r\n\r\n c = -999\r\n N = 0\r\n for iac in self.ActiveCells.flatten(order='F'):\r\n if iac == c:\r\n N += 1\r\n else:\r\n if c != -999:\r\n fstr = self.printAC(f, c, N, fstr)\r\n c = iac\r\n N = 1\r\n fstr = self.printAC(f, c, N, fstr)\r\n f.write(fstr)\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n else:\r\n print(\"Only structured grids can be converted to ECLIPSE files\")",
"def import_grid(file_name):\n\n return FileReader(file_name=file_name).grid",
"def _load_grdfile(casename=None):\n \n data={} \n\n if casename==None:\n print('_load_grdfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_grd.dat','r')\n except IOError:\n print('_load_grdfiles: invalid case name.')\n return data\n\n nodes_str=fp.readline().split('=')\n elements_str=fp.readline().split('=')\n nnodes=int(nodes_str[1])\n nele=int(elements_str[1])\n t_data1=np.genfromtxt(casename+'_grd.dat',skip_header=2, skip_footer=nnodes,dtype='int64')\n t_data2=np.genfromtxt(casename+'_grd.dat',skip_header=2+nele,dtype='float64')\n fp.close()\n\n data['nnodes']=nnodes\n data['nele']=nele\n data['nodexy']=t_data2[:,1:3]\n data['x']=t_data2[:,1]\n data['y']=t_data2[:,2]\n data['nv']=t_data1[:,1:4].astype(int)-1\n data['trigridxy'] = mplt.Triangulation(data['x'], data['y'],data['nv'])\n \n return data",
"def loadfile(filename):\n with open(filename,'r') as fin:\n lines = fin.readlines()\n\n fixnames = []\n freenames = []\n\n config_list = []\n for line in [L.strip() for L in lines if L.strip() != '']:\n if not line[0]=='#':\n if 'Ebase' in line:\n Ebase = float(line.split(':')[1].strip())\n elif 'fixnames' in line.lower():\n fixnames = line.split(':')[1].strip().split()\n elif 'freenames' in line.lower():\n freenames = line.split(':')[1].strip().split()\n elif 'fixed' in line.lower():\n fixed = [float(val) for val in\n line.split(':')[1].strip().split()]\n elif 'free' in line.lower():\n free = [float(val) for val in\n line.split(':')[1].strip().split()]\n else:\n name = ''\n entry = line.split(':')\n if len(entry)==3:\n name = entry[0].strip()\n Eref = float(entry[-2])\n values = [int(val) for val in entry[-1].split()]\n nvector = values[:len(fixed)]\n mvector = values[len(fixed):]\n config_list.append(LGHconfig(nvector=nvector,mvector=mvector,\n Eref=Eref,name=name))\n return LGH(base=Ebase,fixed = fixed, free = free,\n config_list=config_list,\n fixnames=fixnames,\n freenames=freenames)",
"def read_grid(filename_grid, dim=2, slc=None):\n ## get shape and slice\n fid = h5py.File(filename_grid, 'r')\n if dim==2:\n varnames = ['x', 'y', 'ep']\n if slc is None: slc = np.s_[0,:,:]\n if dim==3:\n varnames = ['x', 'y', 'z', 'ep']\n if slc is None: slc = np.s_[:,:,:]\n\n dset = fid.get(varnames[0])\n shape = dset[slc].shape\n (nx,ny,nz) = dset.shape\n ## read variables\n grid = {}\n for varname in varnames:\n try:\n dset = fid.get(varname)\n grid[varname] = np.zeros(shape)\n dset.read_direct(grid[varname], source_sel=slc)\n grid[varname] = grid[varname].transpose()\n except:\n pass\n fid.close()\n return grid, nx, ny, nz",
"def __init__(self, filename: str):\n\n # init\n self._filename = filename\n self._n_of_dim = 0\n self._n_p = []\n self._labels = []\n self._llimits = []\n self._steps = []\n self._npix = None\n self._wave = []\n self._logw = None\n self._spec_start = None\n self._line_length = None\n self._line_pos = []\n self._params_line = {}\n\n # load grid\n self._load_grid()\n self._map_lines()\n\n # build axes\n axes = []\n for i in range(self._n_of_dim):\n values = [self._llimits[i] + k * self._steps[i] for k in range(self._n_p[i])]\n axes.append(GridAxis(name=self._labels[i],\n values=values,\n min=values[0],\n max=values[-1]))\n\n # init grid\n Grid.__init__(self, axes)",
"def CART(self, fname):\r\n print('Building cartesian grid')\r\n self.iWidths = []\r\n self.jWidths = []\r\n with open(fname, \"r\") as fp:\r\n\r\n # Read header\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n # Searches for line of format *GRID *CART I J K\r\n if item[0] == \"GRID\" or item[0] == \"*GRID\":\r\n self.gridType = item[1]\r\n self.size = np.array(item[2:5], dtype=int)\r\n break\r\n\r\n k_spacing = 0\r\n # Assumes DEPTH is the final keyword describing grid structure (move 'break' if not)\r\n for line in fp:\r\n item = [ i.strip('*') for i in line.split() ]\r\n # Read Z-axis orientation\r\n if item[0] == \"KDIR\" and item[1] == \"DOWN\": # K=1 is top layer\r\n kdir = 1\r\n elif item[0] == \"KDIR\" and item[1] == \"UP\": # K=1 is bottom layer\r\n kdir = -1\r\n # Read X-axis spacing\r\n elif item[0] == \"DI\":\r\n if item[1] == \"CON\":\r\n self.X = np.linspace(0, float(item[2]) * self.size[0], self.size[0]+1)\r\n # Read Y-axis spacing\r\n elif item[0] == \"DJ\":\r\n if item[1] == \"CON\":\r\n self.Y = np.linspace(0, float(item[2]) * (self.size[1]), self.size[1]+1)\r\n # Read Z-axis spacing\r\n elif item[0] == \"DK\":\r\n if item[1] == \"CON\":\r\n k_spacing = float(item[2])\r\n # Read DEPTH (assumes of form *DEPTH *TOP I J K depth)\r\n elif item[0] == \"DEPTH\":\r\n depth = float(item[5])\r\n self.Z = np.linspace(depth, depth + (k_spacing * kdir * float(self.size[2])), self.size[2]+1)\r\n break\r\n\r\n # Write cell vertex coordinates\r\n XX, YY, ZZ = ([] for el in range(3))\r\n for k in range(self.size[2] + 1):\r\n for j in range(self.size[1] + 1):\r\n XX.extend(self.X)\r\n YY.extend([self.Y[j]] * (self.size[0] + 1))\r\n ZZ.extend([self.Z[k]] * (self.size[0] + 1) * (self.size[1] + 1))\r\n self.structured_grid(XX, YY, ZZ)",
"def __init__(self, path, grid_path=\"./\", grids=['T', 'U', 'V', 'W'],\n decode_times=True,\n\t chunks=None, autoclose=False):\n\t\tself.open_grid_files(grid_path)\n\t\tdef open_files(filenames):\n\t\t\tds = (xr.open_mfdataset(filenames,\n\t\t\t decode_times=decode_times,\n\t\t\t autoclose=autoclose,\n\t\t\t data_vars='minimal')\n .set_coords(['nav_lon', 'nav_lat'])\n\t\t\t )\n\t\t\tds = ds.chunk(chunks=chunks)\n\t\t\treturn ds\n\t\tif glob.glob(path + \"/*gridT*.nc\") and ('T' in grids):\n\t\t\tself.gridT = open_files(path + \"/*gridT*.nc\")\n\t\tif glob.glob(path + \"/*gridU*.nc\") and ('U' in grids):\n\t\t\tself.gridU = open_files(path + \"/*gridU*.nc\")\n\t\tif glob.glob(path + \"/*gridV*.nc\") and ('V' in grids):\n\t\t\tself.gridV = open_files(path + \"/*gridV*.nc\")\n\t\tif glob.glob(path + \"/*gridW*.nc\") and ('W' in grids):\n\t\t\tself.gridW = open_files(path + \"/*gridW*.nc\")\n\t\tif glob.glob(path + \"/*flxT*.nc\") and ('T' in grids):\n\t\t\tself.flxT = open_files(path + \"/*flxT*.nc\")",
"def _load_grid(self):\n\n grid_metrics = ['nbe', 'ntsn', 'nbsn', 'ntve', 'nbve', 'art1', 'art2', 'a1u', 'a2u']\n grid_variables = ['lon', 'lat', 'x', 'y', 'lonc', 'latc', 'xc', 'yc',\n 'h', 'siglay', 'siglev']\n\n # Get the grid data.\n for grid in grid_variables:\n try:\n setattr(self.grid, grid, self.ds.variables[grid][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[grid].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[grid], attribute))\n setattr(self.atts, grid, attributes)\n except KeyError:\n # Make zeros for this missing variable so we can convert from the non-missing data below.\n if grid.endswith('c'):\n setattr(self.grid, grid, np.zeros(self.dims.nele).T)\n else:\n setattr(self.grid, grid, np.zeros(self.dims.node).T)\n except ValueError as value_error_message:\n warn('Variable {} has a problem with the data. Setting value as all zeros.'.format(grid))\n print(value_error_message)\n setattr(self.grid, grid, np.zeros(self.ds.variables[grid].shape))\n\n # Load the grid metrics data separately as we don't want to set a bunch of zeros for missing data.\n for metric in grid_metrics:\n if metric in self.ds.variables:\n setattr(self.grid, metric, self.ds.variables[metric][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[metric].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[metric], attribute))\n setattr(self.atts, metric, attributes)\n\n # Fix the indexing and shapes of the grid metrics variables. Only transpose and offset indexing for nbe.\n try:\n if metric == 'nbe':\n setattr(self.grid, metric, getattr(self.grid, metric).T - 1)\n else:\n setattr(self.grid, metric, getattr(self.grid, metric))\n except AttributeError:\n # We don't have this variable, so just pass by silently.\n pass\n\n try:\n self.grid.nv = self.ds.variables['nv'][:].astype(int) # force integers even though they should already be so\n self.grid.triangles = copy.copy(self.grid.nv.T - 1) # zero-indexed for python\n except KeyError:\n # If we don't have a triangulation, make one.\n triangulation = tri.Triangulation(self.grid.lon, self.grid.lat)\n self.grid.triangles = triangulation.triangles\n self.grid.nv = self.grid.triangles.T + 1\n\n # Fix broken triangulations if necessary.\n if self.grid.nv.min() != 1:\n if self._debug:\n print('Fixing broken triangulation. Current minimum for nv is {} and for triangles is {} but they '\n 'should be 1 and 0, respectively.'.format(self.grid.nv.min(), self.grid.triangles.min()))\n self.grid.nv = (self.ds.variables['nv'][:].astype(int) - self.ds.variables['nv'][:].astype(int).min()) + 1\n self.grid.triangles = copy.copy(self.grid.nv.T) - 1\n\n # If we've been given an element dimension to subsample in, fix the triangulation here. We should really do\n # this for the nodes too.\n if 'nele' in self._dims:\n if self._debug:\n print('Fix triangulation table as we have been asked for only specific elements.')\n print('Triangulation table minimum/maximum: {}/{}'.format(self.grid.nv[:, self._dims['nele']].min(),\n self.grid.nv[:, self._dims['nele']].max()))\n # Redo the triangulation here too.\n new_nv = copy.copy(self.grid.nv[:, self._dims['nele']])\n for i, new in enumerate(np.unique(new_nv)):\n new_nv[new_nv == new] = i\n self.grid.nv = new_nv + 1\n self.grid.triangles = new_nv.T\n\n # Update dimensions to match those we've been given, if any. Omit time here as we shouldn't be touching that\n # dimension for any variable in use in here.\n for dim in self._dims:\n if dim != 'time':\n setattr(self.dims, dim, len(self._dims[dim]))\n\n # Add compatibility for FVCOM3 (these variables are only specified on the element centres in FVCOM4+ output\n # files). Only create the element centred values if we have the same number of nodes as in the triangulation.\n # This does not occur if we've been asked to extract an incompatible set of nodes and elements, for whatever\n # reason (e.g. testing). We don't add attributes for the data if we've created it as doing so is a pain.\n for var in 'h_center', 'siglay_center', 'siglev_center':\n try:\n setattr(self.grid, var, self.ds.variables[var][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[var].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[var], attribute))\n setattr(self.atts, var, attributes)\n except KeyError:\n if self.grid.nv.max() == len(self.grid.x):\n try:\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]), self.grid.triangles))\n except IndexError:\n # Maybe the array's the wrong way around. Flip it and try again.\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]).T, self.grid.triangles))\n\n # Convert the given W/E/S/N coordinates into node and element IDs to subset.\n if self._bounding_box:\n self._dims['node'] = np.argwhere((self.grid.lon > self._dims['wesn'][0]) &\n (self.grid.lon < self._dims['wesn'][1]) &\n (self.grid.lat > self._dims['wesn'][2]) &\n (self.grid.lat < self._dims['wesn'][3])).flatten()\n self._dims['nele'] = np.argwhere((self.grid.lonc > self._dims['wesn'][0]) &\n (self.grid.lonc < self._dims['wesn'][1]) &\n (self.grid.latc > self._dims['wesn'][2]) &\n (self.grid.latc < self._dims['wesn'][3])).flatten()\n\n # If we've been given dimensions to subset in, do that now. Loading the data first and then subsetting\n # shouldn't be a problem from a memory perspective because if you don't have enough memory for the grid data,\n # you probably won't have enough for actually working with the outputs. Also update dimensions to match the\n # given dimensions.\n if 'node' in self._dims:\n self.dims.node = len(self._dims['node'])\n for var in 'x', 'y', 'lon', 'lat', 'h', 'siglay', 'siglev':\n try:\n node_index = self.ds.variables[var].dimensions.index('node')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[node_index] = self.dims.node\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n else:\n for ni, node in enumerate(self._dims['node']):\n _temp[..., ni] = self.ds.variables[var][..., node]\n except KeyError:\n if 'siglay' in var:\n _temp = np.empty((self.dims.siglay, self.dims.node))\n elif 'siglev' in var:\n _temp = np.empty((self.dims.siglev, self.dims.node))\n else:\n _temp = np.empty(self.dims.node)\n setattr(self.grid, var, _temp)\n if 'nele' in self._dims:\n self.dims.nele = len(self._dims['nele'])\n for var in 'xc', 'yc', 'lonc', 'latc', 'h_center', 'siglay_center', 'siglev_center':\n try:\n nele_index = self.ds.variables[var].dimensions.index('nele')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[nele_index] = self.dims.nele\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n else:\n for ni, nele in enumerate(self._dims['nele']):\n _temp[..., ni] = self.ds.variables[var][..., nele]\n except KeyError:\n # FVCOM3 files don't have h_center, siglay_center and siglev_center, so make var_shape manually.\n if var.startswith('siglev'):\n var_shape = [self.dims.siglev, self.dims.nele]\n elif var.startswith('siglay'):\n var_shape = [self.dims.siglay, self.dims.nele]\n else:\n var_shape = self.dims.nele\n _temp = np.zeros(var_shape)\n setattr(self.grid, var, _temp)\n\n # Check if we've been given vertical dimensions to subset in too, and if so, do that. Check we haven't\n # already done this if the 'node' and 'nele' sections above first.\n for var in 'siglay', 'siglev', 'siglay_center', 'siglev_center':\n short_dim = copy.copy(var)\n # Assume we need to subset this one unless 'node' or 'nele' are missing from self._dims. If they're in\n # self._dims, we've already subsetted in the 'node' and 'nele' sections above, so doing it again here\n # would fail.\n subset_variable = True\n if 'node' in self._dims or 'nele' in self._dims:\n subset_variable = False\n # Strip off the _center to match the dimension name.\n if short_dim.endswith('_center'):\n short_dim = short_dim.split('_')[0]\n if short_dim in self._dims:\n if short_dim in self.ds.variables[var].dimensions and subset_variable:\n _temp = getattr(self.grid, var)[self._dims[short_dim], ...]\n setattr(self.grid, var, _temp)\n\n # Check ranges and if zero assume we're missing that particular type, so convert from the other accordingly.\n self.grid.lon_range = np.ptp(self.grid.lon)\n self.grid.lat_range = np.ptp(self.grid.lat)\n self.grid.lonc_range = np.ptp(self.grid.lonc)\n self.grid.latc_range = np.ptp(self.grid.latc)\n self.grid.x_range = np.ptp(self.grid.x)\n self.grid.y_range = np.ptp(self.grid.y)\n self.grid.xc_range = np.ptp(self.grid.xc)\n self.grid.yc_range = np.ptp(self.grid.yc)\n\n # Only do the conversions when we have more than a single point since the relevant ranges will be zero with\n # only one position.\n if self.dims.node > 1:\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.lon, self.grid.lat = lonlat_from_utm(self.grid.x, self.grid.y, zone=self._zone)\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.x, self.grid.y, _ = utm_from_lonlat(self.grid.lon, self.grid.lat)\n if self.dims.nele > 1:\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.lonc, self.grid.latc = lonlat_from_utm(self.grid.xc, self.grid.yc, zone=self._zone)\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.xc, self.grid.yc, _ = utm_from_lonlat(self.grid.lonc, self.grid.latc)",
"def create_object(self, confM2R, grd_filename):\n ds = xr.open_dataset(grd_filename)\n\n if self.type == 'FORCINGDATA':\n\n logging.info(\"[M2R_grd] ---> Assuming {} grid type for {}\".format(confM2R.grd_type, self.type))\n logging.info(\"[M2R_grd] ---> Using dimension names {} and {} and {}\".format(confM2R.lon_name,\n confM2R.lat_name,\n confM2R.depth_name))\n\n self.lon = ds[str(confM2R.lon_name)][:]\n self.lat = ds[str(confM2R.lat_name)][:]\n self.h = ds[str(confM2R.depth_name)][:]\n self.nlevels = len(self.h)\n self.fillval = -9.99e+33\n self.hc = None\n\n if self.lon.ndim == 1:\n self.lon, self.lat = np.meshgrid(self.lon, self.lat)\n\n # Create grid for ESMF interpolation\n\n self.esmfgrid = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True, coord_names=[str(confM2R.lon_name), str(confM2R.lat_name)],\n add_mask=False)\n self.esmfgrid_u = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[str(confM2R.lon_name_u), str(confM2R.lat_name_u)],\n add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[str(confM2R.lon_name_v), str(confM2R.lat_name_v)],\n add_mask=False)\n\n if confM2R.ocean_indata_type == 'SODA3':\n self.fillval = -1.e+20\n if confM2R.ocean_indata_type == 'SODA3_5DAY':\n self.fillval = -1.e+20\n if confM2R.ocean_indata_type == 'GLORYS':\n self.fillval = 9.96921e+36\n\n if confM2R.ocean_indata_type == 'NORESM':\n # self.h = ds[\"depth\"][:]\n self.h = np.asarray([0, 5, 10, 15, 20, 25, 30, 40, 50, 62.5, 75, 87.5, 100, 112.5, 125,\n 137.5, 150, 175, 200, 225, 250, 275, 300, 350, 400, 450, 500, 550, 600,\n 650, 700, 750, 800, 850, 900, 950, 1000, 1050, 1100, 1150, 1200, 1250,\n 1300, 1350, 1400, 1450, 1500, 1625, 1750, 1875, 2000, 2250, 2500, 2750,\n 3000, 3250, 3500, 3750, 4000, 4250, 4500, 4750, 5000, 5250, 5500, 5750,\n 6000, 6250, 6500, 6750])\n self.fillval = 32768\n self.nlevels = len(self.h)\n\n IOverticalGrid.get_z_levels(self)\n\n if self.type == 'STATION':\n self.lon = ds[confM2R.lon_name][:]\n self.lat = ds[confM2R.lat_name][:]\n self.h = ds[confM2R.depth_name][:]\n self.time = ds[confM2R.time_name][:]\n\n self.Lp = 1\n self.Mp = 1\n self.fillval = -9.99e+33\n\n if self.type in ['ROMS']:\n\n self.write_clim = True\n self.write_bry = True\n self.write_init = True\n self.write_stations = False\n\n self.lonname = 'lon_rho'\n self.latname = 'lat_rho'\n\n \"\"\"\n Set initTime to 1 if you dont want the first time-step to be\n the initial field (no ubar and vbar if time=0)\n \"\"\"\n\n self.inittime = 0\n self.ocean_time = 0\n self.NT = 2\n self.tracer = self.NT\n\n self.message = None # Used to store the date for printing to screen (IOwrite.py)\n self.time = 0\n self.reftime = 0\n self.grdtype = 'regular'\n self.mask_rho = ds[\"mask_rho\"][:, :]\n self.lon_rho = ds[\"lon_rho\"][:, :]\n self.lat_rho = ds[\"lat_rho\"][:, :]\n self.h = ds[\"h\"][:, :]\n\n masked_h = np.where(self.h > 0, self.h, self.h.max())\n\n self.hmin = masked_h.min()\n if \"Vtransform\" in ds.variables:\n self.vtransform = ds[\"Vtransform\"].values\n else:\n self.vtransform = confM2R.vtransform\n\n if \"s_rho\" in ds.variables:\n self.s_rho = ds[\"s_rho\"].values\n self.nlevels = len(self.s_rho)\n else:\n self.nlevels = confM2R.nlevels\n\n if \"Vstretching\" in ds.variables:\n self.vstretching = ds[\"Vstretching\"].values\n if \"theta_s\" in ds.variables:\n self.theta_s = ds[\"theta_s\"].values\n else:\n self.theta_s = confM2R.theta_s\n if \"theta_b\" in ds.variables:\n self.theta_b = ds[\"theta_b\"].values\n else:\n self.theta_b = confM2R.theta_b\n if \"Tcline\" in ds.variables:\n self.tcline = ds[\"Tcline\"].values\n else:\n self.tcline = confM2R.tcline\n if \"hc\" in ds.variables:\n self.hc = ds[\"hc\"].values\n else:\n self.hc = confM2R.hc\n\n if self.vtransform == 1:\n self.hc = min(self.hmin, self.tcline)\n self.hc = self.tcline\n if self.tcline > self.hmin:\n print('Vertical transformation parameters are not defined correctly in either gridid.txt '\n 'or in the history files: \\n Tc\\\n line = %d and hmin = %d. \\n You need to make sure that '\n 'tcline <= hmin when using transformation 1.' % (\n self.tcline, self.hmin))\n else:\n self.hc = self.tcline\n\n zeta = None\n if zeta is None:\n self.zeta = np.zeros(self.h.shape)\n else:\n self.zeta = zeta\n\n # for findvar in ds:\n # if findvar==\"hraw\":\n # self.hraw = ds[\"hraw\"][:,:,:]\n\n self.lon_u = ds[\"lon_u\"][:, :]\n self.lat_u = ds[\"lat_u\"][:, :]\n self.mask_u = ds[\"mask_u\"][:, :]\n for findvar in ds:\n if findvar == \"lon_vert\":\n self.lon_vert = ds[\"lon_vert\"][:, :]\n self.lat_vert = ds[\"lat_vert\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_rho\":\n self.x_rho = ds[\"x_rho\"][:, :]\n self.y_rho = ds[\"y_rho\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_u\":\n self.x_u = ds[\"x_u\"][:, :]\n self.y_u = ds[\"y_u\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_v\":\n self.x_v = ds[\"x_v\"][:, :]\n self.y_v = ds[\"y_v\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_psi\":\n self.x_psi = ds[\"x_psi\"][:, :]\n self.y_psi = ds[\"y_psi\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_vert\":\n self.x_vert = ds[\"x_vert\"][:, :]\n self.y_vert = ds[\"y_vert\"][:, :]\n\n for findvar in ds:\n if findvar == \"xl\":\n self.xl = ds[\"xl\"]\n self.el = ds[\"el\"]\n\n for findvar in ds:\n if findvar == \"dmde\":\n self.dmde = ds[\"dmde\"][:, :]\n self.dndx = ds[\"dndx\"][:, :]\n\n self.lon_v = ds[\"lon_v\"][:, :]\n self.lat_v = ds[\"lat_v\"][:, :]\n self.mask_v = ds[\"mask_v\"][:, :]\n\n # self.spherical = ds[\"spherical\"][:]\n\n self.lon_psi = self.lon_u[:-1, :]\n self.lat_psi = self.lat_v[:, :-1]\n self.mask_psi = self.mask_v[:, :-1]\n\n # self.f = ds[\"f\"][:, :]\n self.angle = ds[\"angle\"][:, :]\n\n self.pm = ds[\"pm\"][:, :]\n self.invpm = 1.0 / np.asarray(ds[\"pm\"][:, :])\n self.pn = ds[\"pn\"][:, :]\n self.invpn = 1.0 / np.asarray(ds[\"pn\"][:, :])\n\n self.Lp = len(self.lat_rho[1, :])\n self.Mp = len(self.lat_rho[:, 1])\n\n self.fillval = -9.99e33\n\n self.eta_rho = self.Mp\n self.eta_u = self.Mp\n self.eta_v = self.Mp - 1\n self.eta_psi = self.Mp - 1\n self.xi_rho = self.Lp\n self.xi_u = self.Lp - 1\n self.xi_v = self.Lp\n self.xi_psi = self.Lp - 1\n\n # Boolean to check if we need to initialize the CLIM file before writing\n self.ioClimInitialized = False\n self.ioInitInitialized = False\n\n if self.lon_rho.ndim == 1:\n self.lon_rho, self.lat_rho = np.meshgrid(self.lon_rho, self.lat_rho)\n self.lon_u, self.lat_u = np.meshgrid(self.lon_u, self.lat_u)\n self.lon_v, self.lat_v = np.meshgrid(self.lon_v, self.lat_v)\n\n # Setup the vertical coordinate system\n IOverticalGrid.calculateVgrid(self)\n\n self.esmfgrid_u = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n coord_names=['lon_u', 'lat_u'],\n is_sphere=True,\n add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=['lon_v', 'lat_v'],\n add_mask=False)\n self.esmfgrid = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[self.lonname, self.latname],\n add_mask=False)",
"def main(fpath, kwds=True):\n\n gcmd_keywords_path = os.path.join(os.path.dirname(__file__), 'gcmd_contents.json')\n suggested_keywords = {'suggested_keywords': []}\n\n with open(gcmd_keywords_path) as fp:\n gcmd_keywords = json.load(fp)\n fp.close()\n\n with netCDF4.Dataset(fpath) as nc:\n # Add GCMD keywords\n for cf_var in nc.variables:\n cf_var = nc.variables[cf_var]\n standard_name = getattr(cf_var, 'standard_name', None)\n if standard_name is None:\n continue\n if standard_name in gcmd_keywords:\n for keyword in gcmd_keywords[standard_name]:\n if keyword:\n suggested_keywords['suggested_keywords'].append(keyword)\n\n # Add cf standard names\n standard_name_table = util.StandardNameTable()\n for cf_var in nc.variables:\n cf_var = nc.variables[cf_var]\n standard_name = getattr(cf_var, 'standard_name', None)\n if standard_name is None:\n continue\n if standard_name in ['time', 'latitude', 'longitude']:\n continue\n if standard_name in standard_name_table:\n suggested_keywords['suggested_keywords'].append(standard_name)\n\n # make dict to unpack in the get_geo_extents() function\n geo_cfg = {\n \"lat\": {\n \"possible_units\": (\n 'degrees_east',\n 'degree_east',\n 'degrees_E',\n 'degree_E',\n 'degreesE',\n 'degreeE'\n ),\n \"std_name\": \"latitude\",\n \"axis_name\": \"X\",\n \"short_name\": \"lat\"\n },\n\n \"lon\": {\n \"possible_units\": (\n 'degrees_north',\n 'degree_north',\n 'degrees_N',\n 'degree_N',\n 'degreesN',\n 'degreeN'\n ),\n \"std_name\": \"longitude\",\n \"axis_name\": \"Y\",\n \"short_name\": \"lon\"\n\n }\n }\n\n # print\n for g in geo_cfg.values():\n get_geo_extents(nc, **g)\n\n if kwds:\n print(','.join(suggested_keywords['suggested_keywords']))\n\n return",
"def from_cdo_griddes(griddes):\n\n with open(griddes) as grid_file:\n grid_file_lines = grid_file.readlines()\n\n grid_dic = {}\n\n for line in grid_file_lines:\n words = line.split()\n if words[0] == '#':\n continue\n else:\n length = len(words)\n if length == 3:\n grid_dic[words[0]] = words[2]\n else:\n value_string = ' '.join(words[2:length-1])\n grid_dic[words[0]] = value_string\n\n if grid_dic['gridtype'] != 'lonlat':\n print(('Gridtype {0} not supported'.format(grid_dic['gridtype'])))\n return ''\n\n lon = np.zeros(int(grid_dic['xsize']))\n lat = np.zeros(int(grid_dic['ysize']))\n\n for i in range(len(lon)):\n lon[i] = float(grid_dic['xfirst']) + i * float(grid_dic['xinc'])\n for j in range(len(lat)):\n lat[j] = float(grid_dic['yfirst']) + j * float(grid_dic['yinc'])\n\n if grid_dic['xname'] == 'rlon':\n pol_lon = float(grid_dic['xnpole'])\n pol_lat = float(grid_dic['ynpole'])\n grid = RotGrid(lon, lat, pol_lon, pol_lat)\n else:\n grid = Grid(lon, lat)\n\n return grid",
"def read_gds(\n self,\n infile,\n units=\"skip\",\n rename={},\n rename_template=\"{name}\",\n layers={},\n datatypes={},\n texttypes={},\n ):\n self._references = []\n close = True\n if hasattr(infile, \"__fspath__\"):\n infile = open(infile.__fspath__(), \"rb\")\n elif isinstance(infile, (basestring, Path)):\n infile = open(infile, \"rb\")\n else:\n close = False\n emitted_warnings = []\n kwargs = {}\n create_element = None\n factor = 1\n cell = None\n properties = {}\n attr = -1\n for record in _record_reader(infile):\n # LAYER\n if record[0] == 0x0D:\n kwargs[\"layer\"] = layers.get(record[1][0], record[1][0])\n # DATATYPE or BOXTYPE\n elif record[0] == 0x0E or record[0] == 0x2E:\n kwargs[\"datatype\"] = datatypes.get(record[1][0], record[1][0])\n # TEXTTYPE\n elif record[0] == 0x16:\n kwargs[\"texttype\"] = texttypes.get(record[1][0], record[1][0])\n # XY\n elif record[0] == 0x10:\n if \"xy\" in kwargs:\n kwargs[\"xy\"] = numpy.concatenate((kwargs[\"xy\"], factor * record[1]))\n else:\n kwargs[\"xy\"] = factor * record[1]\n # WIDTH\n elif record[0] == 0x0F:\n kwargs[\"width\"] = factor * abs(record[1][0])\n if record[1][0] < 0:\n kwargs[\"width_transform\"] = False\n # ENDEL\n elif record[0] == 0x11:\n if create_element is not None:\n el = create_element(**kwargs)\n if len(properties) > 0:\n el.properties = properties\n properties = {}\n cell.add(el)\n create_element = None\n kwargs = {}\n # BOUNDARY\n elif record[0] == 0x08:\n create_element = self._create_polygon\n # PATH\n elif record[0] == 0x09:\n create_element = self._create_path\n # BOX\n elif record[0] == 0x2D:\n create_element = self._create_polygon\n if record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] GDSII elements of type BOX are imported as polygons.\",\n stacklevel=2,\n )\n emitted_warnings.append(record[0])\n # TEXT\n elif record[0] == 0x0C:\n create_element = self._create_label\n # SNAME\n elif record[0] == 0x12:\n if record[1] in rename:\n name = rename[record[1]]\n else:\n name = rename_template.format(name=record[1])\n kwargs[\"ref_cell\"] = name\n # COLROW\n elif record[0] == 0x13:\n kwargs[\"columns\"] = record[1][0]\n kwargs[\"rows\"] = record[1][1]\n # STRANS\n elif record[0] == 0x1A:\n kwargs[\"x_reflection\"] = (int(record[1][0]) & 0x8000) > 0\n if (int(record[1][0]) & 0x0006) and record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] Absolute magnification or rotation of \"\n \"references is not supported. Transformations \"\n \"will be interpreted as relative.\",\n stacklevel=2,\n )\n emitted_warnings.append(record[0])\n # MAG\n elif record[0] == 0x1B:\n kwargs[\"magnification\"] = record[1][0]\n # ANGLE\n elif record[0] == 0x1C:\n kwargs[\"rotation\"] = record[1][0]\n # SREF\n elif record[0] == 0x0A:\n create_element = self._create_reference\n # AREF\n elif record[0] == 0x0B:\n create_element = self._create_array\n # STRNAME\n elif record[0] == 0x06:\n if record[1] in rename:\n name = rename[record[1]]\n else:\n name = rename_template.format(name=record[1])\n cell = Cell(name, exclude_from_current=True)\n if name in self.cells:\n raise ValueError(\"[GDSPY] Multiple cells with name: {0} in GDSII file\".format(name))\n self.cells[name] = cell\n # STRING\n elif record[0] == 0x19:\n kwargs[\"text\"] = record[1]\n # ENDSTR\n elif record[0] == 0x07:\n cell = None\n # UNITS\n elif record[0] == 0x03:\n if units == \"skip\":\n factor = record[1][0]\n elif units == \"import\":\n self.unit = record[1][1] / record[1][0]\n self.precision = record[1][1]\n factor = record[1][0]\n elif units == \"convert\":\n factor = record[1][1] / self.unit\n else:\n raise ValueError(\n \"[GDSPY] units must be one of 'convert', 'import' or 'skip'.\"\n )\n # LIBNAME\n elif record[0] == 0x02:\n self.name = record[1]\n # PRESENTATION\n elif record[0] == 0x17:\n kwargs[\"anchor\"] = GdsLibrary._import_anchors[\n int(record[1][0]) & 0x000F\n ]\n # PATHTYPE\n elif record[0] == 0x21:\n kwargs[\"ends\"] = GdsLibrary._pathtype_dict.get(record[1][0], \"extended\")\n # BGNEXTN\n elif record[0] == 0x30:\n kwargs[\"bgnextn\"] = factor * record[1][0]\n # ENDEXTN\n elif record[0] == 0x31:\n kwargs[\"endextn\"] = factor * record[1][0]\n # ENDLIB\n elif record[0] == 0x04:\n for ref in self._references:\n if ref.ref_cell in self.cells:\n ref.ref_cell = self.cells[ref.ref_cell]\n # PROPATTR\n elif record[0] == 0x2B:\n attr = record[1][0]\n # PROPVALUE\n elif record[0] == 0x2C:\n properties[attr] = record[1]\n # Not supported\n elif (\n record[0] not in emitted_warnings\n and record[0] not in GdsLibrary._unused_records\n ):\n warnings.warn(\n \"[GDSPY] Record type {0} ({1:02X}) is not supported.\".format(\n GdsLibrary._record_name[record[0]], record[0]\n ),\n stacklevel=2,\n )\n emitted_warnings.append(record[0])\n if close:\n infile.close()\n return self",
"def read_grid(self, file_path=None):\n print('[info] reading the grid ...')\n if not file_path:\n file_path = os.path.join(self.directory, 'grid.dat')\n if not os.path.exists(file_path):\n file_path = os.path.join(self.directory, 'grid.txt')\n # test if file written in binary format\n textchars = bytearray({7, 8, 9, 10, 12, 13, 27}\n | set(range(0x20, 0x100)) - {0x7f})\n is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))\n infile = open(file_path, 'rb')\n binary_format = is_binary_string(infile.read(1024))\n infile.close()\n if binary_format:\n with open(file_path, 'rb') as infile:\n # x-direction\n nx = struct.unpack('i', infile.read(4))[0]\n x = numpy.array(struct.unpack('d' * (nx + 1),\n infile.read(8 * (nx + 1))))\n # y-direction\n ny = struct.unpack('i', infile.read(4))[0]\n y = numpy.array(struct.unpack('d' * (ny + 1),\n infile.read(8 * (ny + 1))))\n self.grid = numpy.array([x, y])\n else:\n with open(file_path, 'r') as infile:\n n_cells = numpy.array([int(n)\n for n in infile.readline().strip().split()])\n coords = numpy.loadtxt(infile, dtype=numpy.float64)\n self.grid = numpy.array(numpy.split(coords,\n numpy.cumsum(n_cells[:-1] + 1)))\n if self.grid.size == 2:\n print('\\tgrid-size: {}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1))\n elif self.grid.size == 3:\n print('\\tgrid-size: {}x{}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1,\n self.grid[2].size - 1))",
"def ParseFEM(self, use_cython=True, raw=None):\n if not vtk_loaded:\n raise Exception('Unable to load VTK module. Cannot parse raw cdb data')\n return\n \n if self.CheckRaw():\n raise Exception('Missing key data. Cannot parse into unstructured grid.') \n \n # Convert to vtk style arrays\n if use_cython and cython_loaded:\n self.data = CDBparser.ParseForFEM(self.raw)\n else:\n self.data = PythonParser.ParseForFEM(self.raw)\n \n # Create unstructured grid\n self.uGrid = Utilities.MakeuGrid(self.data['offset'], self.data['cells'], \n self.data['cell_type'],\n self.data['nodes'][:, :3])\n\n # Store original ANSYS cell and node numbering\n Utilities.AddPointScalars(self.uGrid, self.data['orignode'], 'ANSYSnodenum')\n\n # Extract ANSYS element numbering and store\n ansyselem = self.raw['enum'].compress(self.data['elemused'])\n Utilities.AddCellScalars(self.uGrid, ansyselem, 'ANSYSelemnum')\n\n # Add node components to unstructured grid\n ibool = np.empty(self.uGrid.GetNumberOfPoints(), dtype=np.int8)\n for comp in self.data['node_comps']:\n ibool[:] = 0 # reset component array\n ibool[self.data['node_comps'][comp]] = 1 \n Utilities.AddPointScalars(self.uGrid, ibool, comp)\n \n # Add tracker for original node numbering\n Utilities.AddPointScalars(self.uGrid,\n np.arange(self.uGrid.GetNumberOfPoints()),\n 'VTKorigID')\n \n return self.data, self.uGrid, self.data['cellarr'], self.data['ncellpts']",
"def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description",
"def read_gds(self,\n infile,\n units='skip',\n rename={},\n layers={},\n datatypes={},\n texttypes={}):\n self._references = []\n if isinstance(infile, basestring):\n infile = open(infile, 'rb')\n close = True\n else:\n close = False\n emitted_warnings = []\n record = self._read_record(infile)\n kwargs = {}\n create_element = None\n factor = 1\n cell = None\n while record is not None:\n # LAYER\n if record[0] == 0x0d:\n kwargs['layer'] = layers.get(record[1][0], record[1][0])\n # DATATYPE\n elif record[0] == 0x0e:\n kwargs['datatype'] = datatypes.get(record[1][0], record[1][0])\n # TEXTTYPE\n elif record[0] == 0x16:\n kwargs['texttype'] = texttypes.get(record[1][0], record[1][0])\n # XY\n elif record[0] == 0x10:\n kwargs['xy'] = factor * record[1]\n # WIDTH\n elif record[0] == 0x0f:\n kwargs['width'] = factor * abs(record[1][0])\n if record[1][0] < 0 and record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] Paths with absolute width value are not \"\n \"supported. Scaling these paths will also scale \"\n \"their width.\",\n stacklevel=2)\n emitted_warnings.append(record[0])\n # ENDEL\n elif record[0] == 0x11:\n if create_element is not None:\n cell.add(create_element(**kwargs))\n create_element = None\n kwargs = {}\n # BOUNDARY\n elif record[0] == 0x08:\n create_element = self._create_polygon\n # PATH\n elif record[0] == 0x09:\n create_element = self._create_path\n # TEXT\n elif record[0] == 0x0c:\n create_element = self._create_label\n # SNAME\n elif record[0] == 0x12:\n kwargs['ref_cell'] = rename.get(record[1], record[1])\n # COLROW\n elif record[0] == 0x13:\n kwargs['columns'] = record[1][0]\n kwargs['rows'] = record[1][1]\n # STRANS\n elif record[0] == 0x1a:\n kwargs['x_reflection'] = ((int(record[1][0]) & 0x8000) > 0)\n if (int(record[1][0]) &\n 0x0006) and record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] Absolute magnification or rotation of \"\n \"references is not supported. Transformations will \"\n \"be interpreted as relative.\",\n stacklevel=2)\n emitted_warnings.append(record[0])\n # MAG\n elif record[0] == 0x1b:\n kwargs['magnification'] = record[1][0]\n # ANGLE\n elif record[0] == 0x1c:\n kwargs['rotation'] = record[1][0]\n # SREF\n elif record[0] == 0x0a:\n create_element = self._create_reference\n # AREF\n elif record[0] == 0x0b:\n create_element = self._create_array\n # STRNAME\n elif record[0] == 0x06:\n name = rename.get(record[1], record[1])\n cell = Cell(name, exclude_from_current=True)\n self.cell_dict[name] = cell\n # STRING\n elif record[0] == 0x19:\n kwargs['text'] = record[1]\n # ENDSTR\n elif record[0] == 0x07:\n cell = None\n # UNITS\n elif record[0] == 0x03:\n if units == 'skip':\n factor = record[1][0]\n elif units == 'import':\n self.unit = record[1][1] / record[1][0]\n self.precision = record[1][1]\n factor = record[1][0]\n elif units == 'convert':\n factor = record[1][1] / self.unit\n else:\n raise ValueError(\"[GDSPY] units must be one of 'convert', \"\n \"'import' or 'skip'.\")\n # LIBNAME\n elif record[0] == 0x02:\n self.name = record[1]\n # PRESENTATION\n elif record[0] == 0x17:\n kwargs['anchor'] = GdsLibrary._import_anchors[int(record[1][0])\n & 0x000f]\n # PATHTYPE\n elif record[0] == 0x21:\n if record[1][0] > 2:\n if 0x21 not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] Path ends with custom size are not \"\n \"supported.\",\n RuntimeWarning,\n stacklevel=2)\n emitted_warnings.append(0x21)\n else:\n kwargs['ends'] = record[1][0]\n # ENDLIB\n elif record[0] == 0x04:\n for ref in self._references:\n if ref.ref_cell in self.cell_dict:\n ref.ref_cell = self.cell_dict[ref.ref_cell]\n elif ref.ref_cell in current_library.cell_dict:\n ref.ref_cell = current_library.cell_dict[ref.ref_cell]\n # Not supported\n elif (record[0] not in emitted_warnings and\n record[0] not in GdsLibrary._unused_records):\n warnings.warn(\n \"[GDSPY] Record type {0} ({1:02X}) is not \"\n \"supported.\".format(GdsLibrary._record_name[record[0]],\n record[0]),\n RuntimeWarning,\n stacklevel=2)\n emitted_warnings.append(record[0])\n record = self._read_record(infile)\n if close:\n infile.close()\n return self",
"def loadNodes(self, fname):\r\n with open(fname, \"r\") as fp:\r\n\r\n # Read in the header\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"SPECGRID\":\r\n self.SPECGRID = np.array(fp.readline().split()[0:3], dtype=int)\r\n if item[0] == \"COORDSYS\":\r\n self.COORDSYS = fp.readline().split()\r\n if item[0] == \"COORD\":\r\n break\r\n\r\n # Read in the coordinates\r\n self.coords = []\r\n for line in fp:\r\n if line.split()[-1] != \"/\":\r\n item = line.split()\r\n for c in item:\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(int(cc[0])):\r\n self.coords.append(cc[-1])\r\n else:\r\n self.coords.append(c)\r\n else:\r\n if len(line.split()) > 1:\r\n item = line.split()\r\n for i in range(len(item) - 1):\r\n cc = item[i]\r\n if '*' in cc:\r\n ccc = cc.split('*')\r\n for j in range(int(ccc[0])):\r\n self.coords.append(ccc[-1])\r\n else:\r\n self.coords.append(c)\r\n break\r\n else:\r\n break\r\n\r\n # Read in ZCORN\r\n self.zcorn = []\r\n i = 0\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ZCORN\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n self.zcorn += line.split()\r\n else:\r\n self.zcorn += line.split()[0:-1]\r\n break\r\n if len(self.zcorn) > 0:\r\n break\r\n\r\n # Read in (in)active cells\r\n self.active = []\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ACTNUM\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n c = line.split()\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(float(cc[0])):\r\n self.active += cc[-1]\r\n else:\r\n self.active += c\r\n else:\r\n self.active += line.split()[0:-1]\r\n break\r\n\r\n self.coords = np.array(self.coords, dtype=float)\r\n print(self.coords)\r\n\r\n # In Petrel...\r\n self.ne = self.SPECGRID[0] # x i\r\n self.nn = self.SPECGRID[1] # y j\r\n self.nz = self.SPECGRID[2] # z k\r\n\r\n # build grid\r\n self.buildGrid(plot=False)\r\n self.buildActiveCells(plot=False)\r\n self.buildZGrid(plot=False)\r\n # self.calculateVolumes(plot=False)\r\n #\r\n # Convert to VTK\r\n self.GridType = \"vtkStructuredGrid\"\r\n self.Grid = vtk.vtkStructuredGrid()\r\n self.Grid.SetDimensions(self.ne+1, self.nn+1, self.nz+1)\r\n vtk_points = vtk.vtkPoints()\r\n ve = 1.\r\n\r\n for iz in range(self.nz):\r\n if iz == 0:\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZT[iz][ix,iy] )\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZB[iz][ix,iy] )\r\n self.Grid.SetPoints(vtk_points)\r\n\r\n # Add in active cells\r\n ac = vtk.vtkIntArray()\r\n ac.SetName( \"ActiveCells\" )\r\n for iac in self.ActiveCells.flatten( order='F' ):\r\n ac.InsertNextTuple1( iac )\r\n self.Grid.GetCellData().AddArray(ac)",
"def __init__(self, file_pattern, validate=True, **nucleus_kwargs):\n\n super(ReadGenomicsFile, self).__init__()\n self._source = self._source_class(\n file_pattern, validate=validate, **nucleus_kwargs)",
"def __init__(self, filename, topology_file=None, grid_type=1,\n extrapolate=False, time_offset=0,\n **kwargs):\n self._grid_type = grid_type\n\n self.filename = filename\n self.topology_file = topology_file\n\n if self._grid_type == 1:\n self.grid = CyTimeGridWindRect(filename)\n elif self._grid_type == 2:\n self.grid = CyTimeGridWindCurv(filename, topology_file)\n else:\n raise Exception('grid_type not implemented ')\n\n self.grid.load_data(filename, topology_file)\n\n super(Grid, self).__init__(**kwargs)",
"def __init__(self, path, coordkeys = \"time time_bounds TFLAG ETFLAG latitude latitude_bounds longitude longitude_bounds lat lat_bnds lon lon_bnds etam_pressure etai_pressure layer_bounds layer47 layer\".split(), delimiter = ',', names = True, **kwds):\n kwds['names'] = names\n kwds['delimiter'] = delimiter\n data = np.recfromtxt(path, **kwds)\n dimkeys = [dk for dk in data.dtype.names if dk in coordkeys]\n varkeys = [vk for vk in data.dtype.names if not vk in coordkeys]\n for dk in dimkeys:\n dv = np.unique(data[dk])\n dv.sort()\n self.createDimension(dk, len(dv))\n dvar = self.createVariable(dk, dv.dtype.char, (dk,))\n dvar[:] = dv\n \n for vk in varkeys:\n vv = data[vk]\n var = self.createVariable(vk, vv.dtype.char, tuple(dimkeys))\n for idx in np.ndindex(var.shape):\n thisidx = np.sum([data[dk] == self.variables[dk][di] for di, dk in zip(idx, dimkeys)], axis = 0) == len(dimkeys)\n if thisidx.any():\n var[idx] = vv[thisidx]",
"def read( self, Filename ):\n try:\n self.name = Filename\n Filedata = open(self.name,'r').readlines()\n self.ncols = string.atoi( Filedata[0].strip().split()[-1] )\n self.nrows = string.atoi( Filedata[1].strip().split()[-1] )\n self.xllcorner = string.atof( Filedata[2].strip().split()[-1] )\n self.yllcorner = string.atof( Filedata[3].strip().split()[-1] )\n self.cellsize = string.atof( Filedata[4].strip().split()[-1] )\n self.nodata = string.atof( Filedata[5].strip().split()[-1] )\n self.data = numpy.ones( (self.nrows, self.ncols ) ) *1.0\n row = self.nrows\n for t in Filedata[6:]:\n row -= 1\n col = -1\n values = map(string.atof, t.strip().split())\n for x in values:\n col += 1\n self.data[(row,col)] = x\n except:\n print \"Error opening grid ::\", Filename\n raise",
"def read(self, file):\n text = open(file, 'r')\n text.readline() # header crap\n text.readline()\n text.readline()\n self.__xmin = float(text.readline().rstrip().split()[2])\n self.__xmax = float(text.readline().rstrip().split()[2])\n text.readline()\n m = int(text.readline().rstrip().split()[2]) # will be self.__n soon\n text.readline()\n for i in range(m): # loop over grids\n text.readline()\n if text.readline().rstrip().split()[2] == '\"IntervalTier\"': \n # inam = text.readline().rstrip().split()[2][1:-1]\n inam = text.readline().split('=')[1].strip().strip('\"') # Joseph Keshet: handle space in the tier name\n imin = float(text.readline().rstrip().split()[2])\n imax = float(text.readline().rstrip().split()[2])\n itie = IntervalTier(inam, imin, imax) # redundant FIXME\n n = int(text.readline().rstrip().split()[3])\n for j in range(n):\n text.readline().rstrip().split() # header junk\n jmin = float(text.readline().rstrip().split()[2])\n jmax = float(text.readline().rstrip().split()[2])\n # MS changed, to account for intervals where label\n # begins with spacing\n #jmrk = text.readline().rstrip().split()[2][1:-1]\n jmrk = getMark(text)\n #\n itie.append(Interval(jmin, jmax, jmrk))\n \n self.append(itie) \n else: # pointTier\n # inam = text.readline().rstrip().split()[2][1:-1]\n inam = text.readline().split('=')[1].strip().strip('\"') # Joseph Keshet: handle space in the tier name\n imin = float(text.readline().rstrip().split()[2])\n imax = float(text.readline().rstrip().split()[2])\n itie = PointTier(inam, imin, imax) # redundant FIXME\n n = int(text.readline().rstrip().split()[3])\n for j in range(n):\n text.readline().rstrip() # header junk\n jtim = float( text.readline().rstrip().split()[2])\n jmrk = text.readline().rstrip().split()[2][1:-1]\n itie.append(Point(jtim, jmrk))\n self.append(itie)\n text.close()",
"def __init__(self):\r\n self.label = \"ProcessGeogridFile\"\r\n self.description = \"This tool takes an input WRF Geogrid file in NetCDF format\" + \\\r\n \" and uses the HGT_M grid and an input high-resolution elevation grid\" + \\\r\n \"to produce a high-resolution hydrologically processed output.\"\r\n #self.canRunInBackground = False\r\n self.canRunInBackground = True\r\n self.category = \"Processing\"",
"def read_from_file(self,fn):\n fh = open(fn,\"r\")\n labels = []\n xyz = []\n sizes = []\n colors = []\n for line in fh.readlines():\n try:\n if not line.startswith(\"#\"):\n label,x,y,z,size,r,g,b = line.split(\",\")\n labels.append(label)\n xyz.append([x,y,z])\n sizes.append(size)\n colors.append((float(r),float(g),float(b)))\n except IOError, ioe:\n print \"IOError:\", ioe\n self._labels = np.array(labels)\n self._xyz = np.array(xyz).astype(\"f\")\n self._sizes = np.array(sizes).astype(\"f\")\n self._colors = np.array(colors)",
"def read_gro(filename):\n top = Topology()\n\n with open(filename, \"r\") as gro_file:\n top.name = str(gro_file.readline().strip())\n n_atoms = int(gro_file.readline())\n coords = u.nm * np.zeros(shape=(n_atoms, 3))\n for row, _ in enumerate(coords):\n line = gro_file.readline()\n content = line.split()\n if not line:\n msg = (\n \"Incorrect number of lines in .gro file. Based on the \"\n \"number in the second line of the file, {} rows of\"\n \"atoms were expected, but at least one fewer was found.\"\n )\n raise ValueError(msg.format(n_atoms))\n\n res = content[0]\n atom_name = content[1]\n atom_id = content[2]\n coords[row] = u.nm * np.array(\n [\n float(content[3]),\n float(content[4]),\n float(content[5]),\n ]\n )\n site = Atom(name=atom_name, position=coords[row])\n\n r = re.compile(\"([0-9]+)([a-zA-Z]+)\")\n m = r.match(res)\n site.molecule = (m.group(2), int(m.group(1)))\n site.residue = (m.group(2), int(m.group(1)))\n top.add_site(site, update_types=False)\n top.update_topology()\n\n # Box information\n line = gro_file.readline().split()\n top.box = Box(u.nm * np.array([float(val) for val in line[:3]]))\n\n # Verify we have read the last line by ensuring the next line in blank\n line = gro_file.readline()\n if line:\n msg = (\n \"Incorrect number of lines in input file. Based on the \"\n \"number in the second line of the file, {} rows of atoms \"\n \"were expected, but at least one more was found.\"\n )\n raise ValueError(msg.format(n_atoms))\n\n return top"
]
| [
"0.6322648",
"0.62795305",
"0.6265699",
"0.6259004",
"0.59612995",
"0.58936375",
"0.5617548",
"0.56006193",
"0.55665076",
"0.54528695",
"0.5436702",
"0.5436639",
"0.5416839",
"0.53553003",
"0.5331694",
"0.5330871",
"0.53176844",
"0.5292191",
"0.5279908",
"0.52744323",
"0.52644247",
"0.5249434",
"0.51793706",
"0.5156529",
"0.5156251",
"0.5147808",
"0.5104796",
"0.5085934",
"0.5075352",
"0.5055295"
]
| 0.7024199 | 0 |
Will convert either active_index or (i,j,k) to global index. | def global_index( self , active_index = None, ijk = None):
return self.__global_index( active_index = active_index , ijk = ijk ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __global_index( self , active_index = None , global_index = None , ijk = None):\n\n set_count = 0\n if not active_index is None:\n set_count += 1\n\n if not global_index is None:\n set_count += 1\n\n if ijk:\n set_count += 1\n \n if not set_count == 1:\n raise ValueError(\"Exactly one of the kewyord arguments active_index, global_index or ijk must be set\")\n \n if not active_index is None:\n global_index = self._get_global_index1A( active_index )\n elif ijk:\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n \n i,j,k = ijk\n\n if not 0 <= i < nx:\n raise IndexError(\"Invalid value i:%d Range: [%d,%d)\" % (i , 0 , nx)) \n\n if not 0 <= j < ny:\n raise IndexError(\"Invalid value j:%d Range: [%d,%d)\" % (j , 0 , ny)) \n \n if not 0 <= k < nz:\n raise IndexError(\"Invalid value k:%d Range: [%d,%d)\" % (k , 0 , nz)) \n\n global_index = self._get_global_index3( i,j,k)\n else:\n if not 0 <= global_index < self.getGlobalSize():\n raise IndexError(\"Invalid value global_index:%d Range: [%d,%d)\" % (global_index , 0 , self.getGlobalSize())) \n return global_index",
"def get_global_index( self , ijk = None , active_index = None):\n gi = self.__global_index( active_index = active_index , ijk = ijk)\n return gi",
"def get_active_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_index1( gi)",
"def global_index(self):\n raise NotImplementedError",
"def local_to_global(local_index):\n return global_index.value.get(tokens.value[local_index], -1)",
"def get_ijk( self, active_index = None , global_index = None):\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n\n gi = self.__global_index( active_index = active_index , global_index = global_index)\n self._get_ijk1( gi , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k))\n\n return (i.value , j.value , k.value)",
"def edit_index(state):\n node = state\n for key in (\"layers\", \"mode\"):\n node = node.get(key, {})\n return node.get(\"index\", 0)",
"def get_active_fracture_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_fracture_index1( gi )",
"def _create_idx(self):\n self._idx = {}\n for idx, (L, M, N) in enumerate(self.modes):\n if L not in self._idx:\n self._idx[L] = {}\n if M not in self._idx[L]:\n self._idx[L][M] = {}\n self._idx[L][M][N] = idx",
"def get_current_index(self, index):\n\n if self.method == 1:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]) & \\\n (self.unassigned_data[4,:]==self.unassigned_data_relax[4,index]))\n else:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]))\n\n current_idx = current_idx[0][0]\n\n return current_idx",
"def __get_index_pair__(self, target_tile:Union[StaticTile, DynamicTile]) -> tuple:\n for colNum, col in enumerate(self.map):\n for rowNum, tile in enumerate(col):\n if tile == target_tile:\n return (colNum, rowNum)",
"def get_global_index1F( self , active_fracture_index):\n return self._get_global_index1F( active_fracture_index )",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def mainIndices(self):\n return self.i1, self.i2",
"def grid_to_index(mapdata, x, y):\n i = (y * mapdata.info.width) + x\n return int (i)",
"def __return_feature_index__(self, tup):\n index = self._features_index.get(tup, False)\n return index",
"def index(self):\n if hasattr(self, '_m_index'):\n return self._m_index if hasattr(self, '_m_index') else None\n\n self._m_index = (self.index_separate if self.is_index_separate else self.index_in_tag)\n return self._m_index if hasattr(self, '_m_index') else None",
"def tree_idx(tree,j1,J1,J2):\n j = j1\n for k in np.arange(J1+1,J2+1,1):\n j = tree[k]['IDX'][j]\n \n j2 = j\n return j2",
"def _get_ea_index():\n ea_index_temp = {'Address': 5, 'Agency': 10, 'City': 4, 'Country': 3,\n 'Datacenter': 7, 'Division': 8, 'Interface Name': 13,\n 'Region_List': 2, 'Requester Email': 9, 'Site': 6,\n 'VLAN Description': 11, 'IPR Designation': 16}\n return ea_index_temp",
"def getbaraidxij(self,idx_):\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbaraidxij(self.__nativep,idx_,ctypes.byref(i_),ctypes.byref(j_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n return (_i_return_value,_j_return_value)",
"def update_id2idx(self):\n self._id2idx = {}\n for n, cell in enumerate(self._cell_list):\n self._id2idx[cell.id()] = n",
"def sub2ind(self, ix, iy):\n idx = np.ravel_multi_index((ix, iy), self.shape)\n return idx",
"def getbaraidxij(self,idx_): # 3\n res,resargs = self.__obj.getbaraidxij(idx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _i_return_value,_j_return_value = resargs\n return _i_return_value,_j_return_value",
"def getGlobalIdxVals( self, i : int ):\n return range(self._layout.starts[i],self._layout.ends[i])",
"def get_indexed_param(self):\n switcher_index = self.input_param(\"switch_index\").value \n indexed_param = self.input_param(\"index_%s\" % switcher_index)\n if indexed_param is None:\n raise Exception(\"Switch index value for %s is out of bouned.\" % self)\n return indexed_param",
"def index_object(idxs=None):",
"def compute_binary_set_mappings(indexing, counts): \n ret = np.zeros_like(indexing)-1\n for vertex,index in enumerate(indexing):\n if counts[index] == 2:\n if ret[index] == -1:\n ret[index] = vertex\n return ret",
"def _index_lookup(self, key: int) -> str:\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token",
"def fixIndexes(self,ind1,ind2):\n if ind1 > 0:\n ind1 -= 1\n else:\n ind2 += 1\n return ind1,ind2",
"def find_index(self):\n current = self.from_grid\n #find index of \"*\"\n for x in range(len(current)):\n for y in range(len(current[x])):\n if current[x][y] == \"*\":\n index = (x,y)\n return index"
]
| [
"0.75835437",
"0.70099026",
"0.6881584",
"0.6451329",
"0.6263909",
"0.62555516",
"0.61454946",
"0.602932",
"0.6009992",
"0.5978144",
"0.59321",
"0.5818314",
"0.57944906",
"0.57812554",
"0.57411104",
"0.5684214",
"0.56785935",
"0.5664866",
"0.5656509",
"0.5655089",
"0.5635291",
"0.5619243",
"0.56096214",
"0.5601828",
"0.5583168",
"0.5572672",
"0.5563013",
"0.55511504",
"0.5546422",
"0.55262756"
]
| 0.7180106 | 1 |
Will convert or to global_index. This method will convert or to a global index. Exactly one of the arguments , or must be supplied. The method is used extensively internally in the EclGrid class; most methods which take coordinate input pass through this method to normalize the coordinate representation. | def __global_index( self , active_index = None , global_index = None , ijk = None):
set_count = 0
if not active_index is None:
set_count += 1
if not global_index is None:
set_count += 1
if ijk:
set_count += 1
if not set_count == 1:
raise ValueError("Exactly one of the kewyord arguments active_index, global_index or ijk must be set")
if not active_index is None:
global_index = self._get_global_index1A( active_index )
elif ijk:
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
i,j,k = ijk
if not 0 <= i < nx:
raise IndexError("Invalid value i:%d Range: [%d,%d)" % (i , 0 , nx))
if not 0 <= j < ny:
raise IndexError("Invalid value j:%d Range: [%d,%d)" % (j , 0 , ny))
if not 0 <= k < nz:
raise IndexError("Invalid value k:%d Range: [%d,%d)" % (k , 0 , nz))
global_index = self._get_global_index3( i,j,k)
else:
if not 0 <= global_index < self.getGlobalSize():
raise IndexError("Invalid value global_index:%d Range: [%d,%d)" % (global_index , 0 , self.getGlobalSize()))
return global_index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def global_index(self):\n raise NotImplementedError",
"def global_index( self , active_index = None, ijk = None):\n return self.__global_index( active_index = active_index , ijk = ijk )",
"def position_to_index(self, position, grid_size):\n x, y = position\n return x * grid_size + y",
"def local_to_global(local_index):\n return global_index.value.get(tokens.value[local_index], -1)",
"def get_global_index( self , ijk = None , active_index = None):\n gi = self.__global_index( active_index = active_index , ijk = ijk)\n return gi",
"def grid_to_index(mapdata, x, y):\n i = (y * mapdata.info.width) + x\n return int (i)",
"def ego_pos_to_global_pos(self, ego_pos):\n\t\tif ego_pos.ndim == 1:\n\t\t\tego_pos_ = np.array([ego_pos[0], ego_pos[1], 1])\n\t\t\tglobal_pos = np.dot(self.T_global_ego, ego_pos_)\n\t\t\treturn global_pos[:2]\n\t\telse:\n\t\t\tego_pos_ = np.hstack([ego_pos, np.ones((ego_pos.shape[0], 1))])\n\t\t\tglobal_pos = np.dot(self.T_global_ego, ego_pos_.T).T\n\t\t\treturn global_pos[:, :2]",
"def xy_to_index(x, y):\n index = y * columns + x\n return index",
"def get_index(self, *args, **dargs):\n pass",
"def global_pos_to_ego_pos(self, global_pos):\n\t\tego_pos = np.dot(np.linalg.inv(self.T_global_ego), np.array([global_pos[0], global_pos[1], 1]))\n\t\treturn ego_pos[:2]",
"def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0",
"def get_active_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_index1( gi)",
"def position_to_index(position, grid_size):\n return position[0]*grid_size+position[1]",
"def index_to_obs(a,b, grid_x, grid_y):\n position = grid_x[a]\n velocity = grid_y[b]\n return position, velocity",
"def get_global_index1F( self , active_fracture_index):\n return self._get_global_index1F( active_fracture_index )",
"def index(self, x):\n if isinstance(x, str):\n return self.string_to_index[x]\n elif isinstance(x, tuple):\n return self.tuple_to_index[x]\n else:\n raise ValueError('x should be tuple or string; received {}'.format(\n x))",
"def global_coords(self) -> GlobalCoordsABC:",
"def index_mapping_from_parameter(self, index, *param):\n pass",
"def get_index(self, point, cell_size):\n return (point[1] / cell_size, point[0] / cell_size)",
"def get_indices(data, coarse_grid_path, proj_str, use_saved_indices=False, index_path=None):\n if use_saved_indices:\n return joblib.load(index_path)\n else:\n coarse_grid = xr.open_dataset(coarse_grid_path)\n lat, lon = data['lat'].values, data['lon'].values\n coarse_lat, coarse_lon = coarse_grid['lat'].values, coarse_grid['lon'].values\n indices = find_coord_indices(coarse_lon, coarse_lat, lon.ravel(), lat.ravel(), proj_str)\n\n return indices",
"def get_index(self, row, col):\n return (row * self.cols) + col",
"def local2global(self, local_coord, start, end, strand):\n local2global(local_coord, start, end, strand)",
"def cal_globalIndexH(self):\n h_local = self.cal_localIndexH()\n h_global = np.sum(h_local)\n\n return h_global",
"def _resolve_index(self, cls):\n # If we have just a string, it's a simple index\n if isinstance(self.index, basestring):\n return self._resolve_name(cls, self.index)\n\n # Otherwise it must be an iterable\n for i in xrange(len(self.index)):\n # Of 2-tuples\n pair = self.index[i]\n if len(pair) != 2:\n raise TypeError(\"Invalid index: {!r}\".format(self.index))\n # Where the first is the key, and the second the direction\n self.index[i] = (self._resolve_name(cls, pair[0]), pair[1])\n\n return self.index",
"def local2global(local_coord, start, end, strand):\n\n # swap if strands disagree\n if strand == 1:\n return local_coord + start\n else:\n return end - local_coord",
"def _get_indexer_level_0(self, target) -> npt.NDArray[np.intp]:\n lev = self.levels[0]\n codes = self._codes[0]\n cat = Categorical.from_codes(codes=codes, categories=lev, validate=False)\n ci = Index(cat)\n return ci.get_indexer_for(target)",
"def d_index(self, coord):\n return coord - 1 if coord - 1 < 0 else self.dimensions - 1",
"def index_to_single_index(a,b, resolution):\n return a*resolution+b",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def getIndicesGlobCurrent(lons, lats):\n if np.size(lons) == 1:\n lon_0, lon_1 = int(np.floor(lons-5)), int(np.ceil(lons+5))\n else:\n lon_0, lon_1 = int(np.round(np.min(lons))), int(np.round(np.max(lons)))\n\n if np.size(lats) == 1:\n lat_0, lat_1 = int(np.floor(lats-5)), int(np.ceil(lats+5))\n else:\n lat_0, lat_1 = int(np.round(np.min(lats))), int(np.round(np.max(lats)))\n\n lon_range = range((lon_0-5+180)*4-1, (lon_1+5+180)*4+1)\n lat_range = range((lat_0-5+80)*4-1, (lat_1+5+80)*4+1)\n\n indices = {\"lon\": lon_range,\n \"lat\": lat_range}\n\n print \"getIndicesGlobCurrent(): Success! Indices created.\"\n return indices"
]
| [
"0.64048535",
"0.58219904",
"0.5818594",
"0.5818252",
"0.5786247",
"0.5762174",
"0.5650124",
"0.55458665",
"0.5530507",
"0.54503554",
"0.54221517",
"0.5288694",
"0.5283449",
"0.5277573",
"0.52770066",
"0.52442706",
"0.5210543",
"0.5206184",
"0.51815325",
"0.51768124",
"0.5117256",
"0.5109168",
"0.5104239",
"0.5101869",
"0.5101635",
"0.5099524",
"0.5092153",
"0.5069768",
"0.5058351",
"0.5052773"
]
| 0.6640061 | 0 |
Lookup active index based on ijk or global index. Will determine the active_index of a cell, based on either = (i,j,k) or . If the cell specified by the input arguments is not active the function will return 1. | def get_active_index( self , ijk = None , global_index = None):
gi = self.__global_index( global_index = global_index , ijk = ijk)
return self._get_active_index1( gi) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __global_index( self , active_index = None , global_index = None , ijk = None):\n\n set_count = 0\n if not active_index is None:\n set_count += 1\n\n if not global_index is None:\n set_count += 1\n\n if ijk:\n set_count += 1\n \n if not set_count == 1:\n raise ValueError(\"Exactly one of the kewyord arguments active_index, global_index or ijk must be set\")\n \n if not active_index is None:\n global_index = self._get_global_index1A( active_index )\n elif ijk:\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n \n i,j,k = ijk\n\n if not 0 <= i < nx:\n raise IndexError(\"Invalid value i:%d Range: [%d,%d)\" % (i , 0 , nx)) \n\n if not 0 <= j < ny:\n raise IndexError(\"Invalid value j:%d Range: [%d,%d)\" % (j , 0 , ny)) \n \n if not 0 <= k < nz:\n raise IndexError(\"Invalid value k:%d Range: [%d,%d)\" % (k , 0 , nz)) \n\n global_index = self._get_global_index3( i,j,k)\n else:\n if not 0 <= global_index < self.getGlobalSize():\n raise IndexError(\"Invalid value global_index:%d Range: [%d,%d)\" % (global_index , 0 , self.getGlobalSize())) \n return global_index",
"def cell_contains( self , x , y , z , active_index = None , global_index = None , ijk = None):\n gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)\n return self._cell_contains( gi , x,y,z)",
"def get_ijk( self, active_index = None , global_index = None):\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n\n gi = self.__global_index( active_index = active_index , global_index = global_index)\n self._get_ijk1( gi , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k))\n\n return (i.value , j.value , k.value)",
"def get_global_index( self , ijk = None , active_index = None):\n gi = self.__global_index( active_index = active_index , ijk = ijk)\n return gi",
"def active( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n active_index = self._get_active_index1( gi)\n if active_index >= 0:\n return True\n else:\n return False",
"def find_cell( self , x , y , z , start_ijk = None):\n\n if start_ijk:\n start_index = self.__global_index( ijk = start_ijk )\n else:\n start_index = 0\n global_index = self._get_ijk_xyz( x , y , z , start_index)\n if global_index >= 0:\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n self._get_ijk1( global_index , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k)) \n return (i.value , j.value , k.value)\n else:\n return None",
"def global_index( self , active_index = None, ijk = None):\n return self.__global_index( active_index = active_index , ijk = ijk )",
"def get_current_index(self, index):\n\n if self.method == 1:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]) & \\\n (self.unassigned_data[4,:]==self.unassigned_data_relax[4,index]))\n else:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]))\n\n current_idx = current_idx[0][0]\n\n return current_idx",
"def find_index(self):\n current = self.from_grid\n #find index of \"*\"\n for x in range(len(current)):\n for y in range(len(current[x])):\n if current[x][y] == \"*\":\n index = (x,y)\n return index",
"def __get_index_pair__(self, target_tile:Union[StaticTile, DynamicTile]) -> tuple:\n for colNum, col in enumerate(self.map):\n for rowNum, tile in enumerate(col):\n if tile == target_tile:\n return (colNum, rowNum)",
"def getbaraidxij(self,idx_):\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbaraidxij(self.__nativep,idx_,ctypes.byref(i_),ctypes.byref(j_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n return (_i_return_value,_j_return_value)",
"def get_lookup(self, cell_status, num_neighbors):\n return self.lookup[cell_status,num_neighbors]",
"def get_active_fracture_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_fracture_index1( gi )",
"def findRepIdx(self, rep, cell = 1):\n\n match = self.findRep(rep = rep, cell = cell)\n return np.arange(self.atoms.shape[0])[match]",
"def getbaraidxij(self,idx_): # 3\n res,resargs = self.__obj.getbaraidxij(idx_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _i_return_value,_j_return_value = resargs\n return _i_return_value,_j_return_value",
"def cell_regular(self, active_index = None , global_index = None , ijk = None):\n gi = self.__global_index( ijk = ijk , active_index = active_index , global_index = global_index)\n return self._cell_regular( gi )",
"def cell_index(self, coord):\n\n for x in range(len(self.cell_x)):\n if coord[0] >= self.cell_x[x] and coord[0] <= self.cell_x[x] + self.cell_size:\n i = x\n\n for y in range(len(self.cell_y)):\n if coord[1] >= self.cell_y[y] and coord[1] <= self.cell_y[y] + self.cell_size:\n j = y\n\n return [i, j]",
"def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index",
"def _find_index(self, index, iimin=None, iimax=None):\n if iimin is None:\n aa = 0\n else:\n aa = iimin\n\n if iimax is not None:\n bb = iimax\n else:\n bb = len(self.index)-1\n \n # Check to see if the index is even in the range\n if bb < aa:\n return (False, aa)\n elif index <= self.index[aa]:\n return (index == self.index[aa], aa)\n elif index == self.index[bb]:\n return (True, bb)\n elif index > self.index[bb]:\n return (False, bb+1)\n \n # the value definitely lies inside the list, and it is neither aa\n # nor bb.\n while bb-aa>1:\n ii = (aa+bb)//2\n # Eventually, we'll hit the value\n if index == self.index[ii]:\n return (True, ii) \n elif index < self.index[ii]:\n bb = ii\n else:\n aa = ii\n # Unless the value isn't in the list.\n return (False, bb)",
"def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit",
"def coordLookup_l(i, j, k, I, J):\n return i + j*I + k*J*I",
"def get_cell_idx(max_coord, min_coord, separator, x_current):\n lenght = max_coord - min_coord\n return max(0, min(int((x_current - min_coord) * separator / lenght), separator - 1))",
"def netInputIndex(s, current, x, y):\n # if the piece is an ally\n if s[0]:\n # if the piece is the one selected by Environment\n if current is not None and (x, y) == current:\n return 5 if s[1] else 4\n # if it's a normal piece\n else:\n return 1 if s[1] else 0\n else:\n return 3 if s[1] else 2",
"def calc_index(i, j):\n if i == j:\n print 'i and j must not be the same!'\n return -1\n if i < j:\n i, j = j, i\n\n return (i*(i-1))/2+j",
"def neighbour(t, i, j):\n v0 = t[i][(j + 1) % 3]\n v1 = t[i][(j + 2) % 3]\n\n for k in range(len(t)):\n if k != i:\n if v0 in t[k] and v1 in t[k]:\n return k\n\n return None",
"def get_idx(velocity, coord):\n d = velocity.node_intervals\n dx=d[0]\n dz=d[1]\n dy=d[2]\n mn = velocity.min_coords\n mnx=mn[0]\n mnz=mn[1]\n mny=mn[2]\n ix = int((coord[0] - mnx)/dx)\n iz = int((coord[1] - mnz)/dz)\n iy = int((coord[2] - mny)/dy)\n return (ix, iz, iy)",
"def coordLookup_ijk(l, I, J):\n k = (l // (I*J)) + 1\n j = (l - k*J*I) // I + 1\n i = l - (j*I + k*J*I)-1\n return i,j,k",
"def _get_cand_index(signature):\n\n # This map translates between the last \"I<n>\" field value and the\n # actual CAND cell index.\n INDEX_MAP = {\n 10: 0,\n 9: 1,\n 8: 2,\n 7: 3,\n 6: 4,\n }\n\n # Split the signature\n parts = signature.split(\".\")\n\n # Get the last \"I<n>\" field\n for i, word in enumerate(parts):\n if word in [\"I_hilojoint\", \"I_enjoint\"]:\n part = parts[i-1]\n break\n else:\n assert False, signature\n\n # Decode the index\n idx = int(part[1:])\n\n # Remap the index\n assert idx in INDEX_MAP, (signature, idx)\n return INDEX_MAP[idx]",
"def get_index(band_nums,chan_num):\n ch_index=np.searchsorted(band_nums,chan_num)\n return int(ch_index)",
"def get_index(self, point, cell_size):\n return (point[1] / cell_size, point[0] / cell_size)"
]
| [
"0.72220266",
"0.67399526",
"0.6588938",
"0.657614",
"0.65294176",
"0.64901483",
"0.6479824",
"0.6127703",
"0.59284616",
"0.59080535",
"0.5897826",
"0.58777493",
"0.5840648",
"0.5834276",
"0.57921964",
"0.5760958",
"0.57434833",
"0.57338476",
"0.5721414",
"0.5609708",
"0.5569182",
"0.5527598",
"0.55032235",
"0.54963094",
"0.5484124",
"0.54687285",
"0.5467487",
"0.545446",
"0.54266226",
"0.5411145"
]
| 0.7030846 | 1 |
For dual porosity get the active fracture index. | def get_active_fracture_index( self , ijk = None , global_index = None):
gi = self.__global_index( global_index = global_index , ijk = ijk)
return self._get_active_fracture_index1( gi ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_global_index1F( self , active_fracture_index):\n return self._get_global_index1F( active_fracture_index )",
"def getNumActiveFracture(self):\n return self._get_active_fracture( )",
"def get_index(self):\n return (np.sqrt(self.dielectric))",
"def get_spectral_index(self):\n try:\n return self.alpha\n except AttributeError:\n return None",
"def index_of_refraction(self):\n return self.microsphere.index_of_refraction(self.wavelength)",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def focn(self):\n return self.table[1, 1] / (self.table[1, 0] + self.table[1, 1])",
"def bias_index(self):\n return _div(abs(self.FN - self.FP), self.grand_total)",
"def get_index(self):\n return self.inverted_index",
"def _getFIdx(self, featureName):\n return np.where(self.featureNames == featureName)[0][0]",
"def get_current_index(self, index):\n\n if self.method == 1:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]) & \\\n (self.unassigned_data[4,:]==self.unassigned_data_relax[4,index]))\n else:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]))\n\n current_idx = current_idx[0][0]\n\n return current_idx",
"def getUVIndex(self):\n\t\tval = grovepi.analogRead(self.uv_sensor)\n\t\tillumination_intensity = val*307\n\t\tuv_index = illumination_intensity/float(200)\n\t\treturn uv_index",
"def index(self):\n return prod([p**e + p**(e-1) for (p,e) in self.level().factor()])",
"def idx(self):\n if self._idx is None:\n self._idx = list(np.where(self.polar_angle < self.polar_max)[0])\n return self._idx",
"def _forest_field_indices(self):\n return self._ffi",
"def get_index_of_surface_gate(data, setup={}):\n alts = data['alt']\n return np.argmin(np.abs(alts), 1)",
"def get_ref_index(self):\n total_pol = self.get_compound_pol()\n molar_volume = self.get_molar_volume()\n if not total_pol:\n return None\n ref_index = np.sqrt((4 * np.pi * total_pol) / ((2.26 - 4 * np.pi / 3) * total_pol + molar_volume) + 1)\n return ref_index",
"def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd",
"def get_current_s(self):\n return 1 if self.ff_states[0] else 0",
"def get_fx(self):\n return self.fx[:self.nump, :]",
"def get_fr_index(self):\n return int(self.get('fresp_index'))",
"def get_closest_fiber(self, coords, exp=None):\n if exp in [1, 2, 3]:\n sel = self.expnum = exp\n fib_idx = coords.match_to_catalog_sky(self.coords[sel])[0]\n else:\n fib_idx = coords.match_to_catalog_sky(self.coords)[0]\n return fib_idx",
"def get_closest_fiber(self, coords, exp=None):\n if exp in [1, 2, 3]:\n sel = self.expnum = exp\n fib_idx = coords.match_to_catalog_sky(self.coords[sel])[0]\n else:\n fib_idx = coords.match_to_catalog_sky(self.coords)[0]\n return fib_idx",
"def index(self) -> int:",
"def get_active_coeff(self):\r\n num_active_coeff = 0\r\n for coefficient in self.model_.coef_:\r\n if abs(coefficient) > 0:\r\n num_active_coeff += 1\r\n return num_active_coeff",
"def get_days_index(self):\n return np.where(self.np_image_matrix()[3] == 3)[0]",
"def calculateTrafoIndex(self):\n if self.S <= TRAFO_LIMIT:\n # trafoIndex = (-1 / 24200) * self.S + 5\n trafoIndex = (-10 / 403333) * self.S + 3\n if (trafoIndex >= 0.0) & (trafoIndex <= 1.0):\n return trafoIndex\n elif trafoIndex > 1:\n return 1.0\n return 0.0",
"def getNumActive(self):\n return self._get_active( )",
"def get_active_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_index1( gi)",
"def activeVal(selforcls, val, index = None):\n if index is None:\n return selforcls.activeValues()\n else:\n return selforcls.activeValues()[\n index%len(selforcls.activeValues())]"
]
| [
"0.6997283",
"0.69108033",
"0.64579755",
"0.63483775",
"0.6228953",
"0.61389655",
"0.5921288",
"0.5858171",
"0.58559376",
"0.58210796",
"0.5810789",
"0.58045286",
"0.5769987",
"0.5764283",
"0.56984854",
"0.5679604",
"0.5670899",
"0.566244",
"0.5644965",
"0.56232315",
"0.56101114",
"0.56028855",
"0.56028855",
"0.55815643",
"0.5570716",
"0.555232",
"0.5531642",
"0.5531142",
"0.55123186",
"0.5501133"
]
| 0.69569004 | 1 |
Will return the global index corresponding to active fracture index. | def get_global_index1F( self , active_fracture_index):
return self._get_global_index1F( active_fracture_index ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_active_fracture_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_fracture_index1( gi )",
"def global_index( self , active_index = None, ijk = None):\n return self.__global_index( active_index = active_index , ijk = ijk )",
"def get_global_index( self , ijk = None , active_index = None):\n gi = self.__global_index( active_index = active_index , ijk = ijk)\n return gi",
"def global_index(self):\n raise NotImplementedError",
"def get_active_index( self , ijk = None , global_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk)\n return self._get_active_index1( gi)",
"def __global_index( self , active_index = None , global_index = None , ijk = None):\n\n set_count = 0\n if not active_index is None:\n set_count += 1\n\n if not global_index is None:\n set_count += 1\n\n if ijk:\n set_count += 1\n \n if not set_count == 1:\n raise ValueError(\"Exactly one of the kewyord arguments active_index, global_index or ijk must be set\")\n \n if not active_index is None:\n global_index = self._get_global_index1A( active_index )\n elif ijk:\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n \n i,j,k = ijk\n\n if not 0 <= i < nx:\n raise IndexError(\"Invalid value i:%d Range: [%d,%d)\" % (i , 0 , nx)) \n\n if not 0 <= j < ny:\n raise IndexError(\"Invalid value j:%d Range: [%d,%d)\" % (j , 0 , ny)) \n \n if not 0 <= k < nz:\n raise IndexError(\"Invalid value k:%d Range: [%d,%d)\" % (k , 0 , nz)) \n\n global_index = self._get_global_index3( i,j,k)\n else:\n if not 0 <= global_index < self.getGlobalSize():\n raise IndexError(\"Invalid value global_index:%d Range: [%d,%d)\" % (global_index , 0 , self.getGlobalSize())) \n return global_index",
"def local_to_global(local_index):\n return global_index.value.get(tokens.value[local_index], -1)",
"def global_start_index(self):\n return self._global_start_index",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def _getFIdx(self, featureName):\n return np.where(self.featureNames == featureName)[0][0]",
"def get_current_index(self):\n assert(self.is_started())\n return self.currIndex",
"def get_index(self):\n return self.index",
"def get_index(self):\n return self.index",
"def get_fr_index(self):\n return int(self.get('fresp_index'))",
"def get_index(self):\n return self.inverted_index",
"def get_index(self, _quals):\n return self._options['index']",
"def cal_globalIndexH(self):\n h_local = self.cal_localIndexH()\n h_global = np.sum(h_local)\n\n return h_global",
"def cloud_index():\n import alltheitems.cloud\n return alltheitems.cloud.index()",
"def index(self):\n return self._index",
"def currentSubIndex(self):\n logger.debug(\"Func: currentSubIndex/getter\")\n return self._currentsDict[\"currentSubIndex\"]",
"def index(self) -> int:",
"def current_index(self):\n return self._current_index",
"def edit_index(state):\n node = state\n for key in (\"layers\", \"mode\"):\n node = node.get(key, {})\n return node.get(\"index\", 0)",
"def getIndex(self):\n return self.index",
"def __return_feature_index__(self, tup):\n index = self._features_index.get(tup, False)\n return index",
"def index(self):\n return self.container['index']",
"def get_ijk( self, active_index = None , global_index = None):\n i = ctypes.c_int()\n j = ctypes.c_int()\n k = ctypes.c_int()\n\n gi = self.__global_index( active_index = active_index , global_index = global_index)\n self._get_ijk1( gi , ctypes.byref(i) , ctypes.byref(j) , ctypes.byref(k))\n\n return (i.value , j.value , k.value)",
"def get_frame_index(self, global_idx):\n vid_idx_idx = np.searchsorted(self.num_frames_array, global_idx, side='right')-1\n frame_idx = global_idx - self.num_frames_array[vid_idx_idx]\n vid_idx = self.task_ids[int(vid_idx_idx)]\n return vid_idx, frame_idx",
"def get_current_index(self, index):\n\n if self.method == 1:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]) & \\\n (self.unassigned_data[4,:]==self.unassigned_data_relax[4,index]))\n else:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]))\n\n current_idx = current_idx[0][0]\n\n return current_idx",
"def default_index(self):\n return self._default_index"
]
| [
"0.81322634",
"0.7880399",
"0.78281593",
"0.7697947",
"0.7535059",
"0.71836853",
"0.67078066",
"0.6659681",
"0.63780624",
"0.62409",
"0.6091238",
"0.59864235",
"0.59864235",
"0.5970467",
"0.5953205",
"0.5949563",
"0.5941727",
"0.5938283",
"0.5936778",
"0.59265053",
"0.5893494",
"0.5866823",
"0.584684",
"0.5844228",
"0.57977015",
"0.57811654",
"0.5765508",
"0.57011604",
"0.5693001",
"0.56593585"
]
| 0.84458816 | 0 |
Tries to check if a cell is invalid. Cells which are used to represent numerical aquifers are typically located in UTM position (0,0); these cells have completely whacked up shape and size, and should NOT be used in calculations involving real world coordinates. To protect against this a heuristic is used identify such cells and mark them as invalid. There might be other sources than numerical aquifers to this problem. | def cell_invalid( self , ijk = None , global_index = None , active_index = None):
gi = self.__global_index( global_index = global_index , ijk = ijk , active_index = active_index)
return self._invalid_cell( gi ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_valid(self):\n for cell in self._cells_iterable():\n if cell not in self._valid_elements:\n return False\n return True",
"def verify_cell_details(app, style, color, cell):\n error = ''\n found_cell = find_cell(app, cell)\n if found_cell is None:\n return 'Cell not found. may be invalid column and row in block or cell'\n # verify state: raised or normal\n if found_cell.get_style() != style:\n return f'Cell invalid style: expected {style}, actual: {found_cell.get_style()}'\n if color is not None and found_cell.get_cell_color() != color:\n return f'Cell not expected color: expected: {color}, actual: {found_cell.get_cell_color()}'\n\n return error",
"def isvalid(board, num, cell):\r\n x, y = cell\r\n row = board[x]\r\n col = transpose(board)[y]\r\n\r\n # check row if num already present\r\n if any(row[i] == num for i in range(9)):\r\n return False\r\n # check col if num already present\r\n if any(col[i] == num for i in range(9)):\r\n return False\r\n \r\n # get start position of box\r\n Xbox = (x//3) * 3\r\n Ybox = (y//3) * 3\r\n for i in range(Xbox, Xbox+3):\r\n for j in range(Ybox, Ybox+3):\r\n if board[i][j] == num:\r\n return False\r\n \r\n return True",
"def cell_invalidation(_frame, _val, row, col, grid, _col_dets):\n if col == 0:\n return _invalid_fldname(row, grid)\n elif col == 1:\n return _invalid_fldtype(row, grid)\n else:\n raise Exception('Two many columns for default cell invalidation test')",
"def is_valid(problem, i, j, e):\n row_map = row_maps[i]\n column_map = column_maps[j]\n sector_map = sector_maps[get_sector_number(i, j)]\n not_in_row = row_map[e-1] == 0\n not_in_column = column_map[e-1] == 0\n not_in_sector = sector_map[e-1] == 0\n\n return not_in_row and not_in_column and not_in_sector",
"def is_cell_valid(board, r, c):\n return is_cell_row_valid(board, r, c) or is_cell_col_valid(board, r, c)",
"def _assert_cell_no_errors(c):\n if c['cell_type'] != 'code':\n return\n errors = [\"Error name: {}, Error Value: {}\".format(o[\"ename\"], o[\"evalue\"])\n for o in c['outputs']\n if o['output_type'] == 'error']\n\n assert not errors, errors",
"def _checkCells(self):\r\n if(self.startCell.isEmpty()):\r\n raise IllegalMoveException(\"No pawn in start cell\")\r\n if(self.endCell.isOccupied()):\r\n raise IllegalMoveException(\"Targeted cell is already occupied\")\r\n return True",
"def isValid(self):\n for ir in range(self.nRow): # Check rows for duplicates\n row = ir + 1\n vals = {}\n for ic in range(self.nCol):\n col = ic + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"doing row {row} at col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n \n for ic in range(self.nCol): # Check cols for duplicates\n col = ic + 1\n vals = {}\n for ir in range(self.nRow):\n row = ir + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"at row={row} doing col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n return True",
"def validCellGeometry(self, ijk = None , global_index = None , active_index = None):\n gi = self.__global_index( global_index = global_index , ijk = ijk , active_index = active_index)\n return self._valid_cell( gi )",
"def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True",
"def _assert_cell_no_errors(c):\n if c[\"cell_type\"] != \"code\":\n return\n errors = [\n \"Error name: {}, Error Value: {}, trace: {}\".format(\n o[\"ename\"], o[\"evalue\"], \"\\n\".join(o.get(\"traceback\"))\n )\n for o in c[\"outputs\"]\n if o[\"output_type\"] == \"error\"\n ]\n\n if errors:\n pytest.fail(\"Found notebook errors: {}\".format(\"\\n\".join(errors)))",
"def is_valid(self):\n if self.get_row() != -1 and self.get_column() != -1:\n return True\n else:\n return False",
"def is_valid_number(self):\n for condition in [self.game.getRow(self.pos), self.game.getCol(self.pos), self.game.getSquare(self.pos)]:\n if not self.check_alignement_condition(condition):\n return False\n return True",
"def is_solved(self, grid: list):\n # Iterates over rows\n for i in range(9):\n\n if 0 in grid[i]: # Looks for 0s\n return False\n for j in range(9):\n if not self.validate_cell(grid, i, j): # validates each cell\n return False\n return True",
"def has_invalid_characters(self):\n return has_invalid_characters(text=self._sample_sheet.show())",
"def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col",
"def is_valid_board(self):\n for (row, col), value in np.ndenumerate(self.final_values): # Iterate through each position\n if not self.__is_valid_value(row, col, value): # Check that the value is valid\n return False # An invalid (duplicate) value was found\n return True",
"def check_position_is_legal(grid, num, i, j):\n args = (grid, num, i, j)\n return (not check_row(*args)) and (not check_col(*args)) and (not check_local_square(*args))",
"def is_valid(gr, pos, num):\n \n row = pos[0]\n col = pos[1]\n \n for i in range(0, 9):\n # test row\n if(i != col and gr[row][i] == num):\n return False\n # test col\n if(i != row and gr[i][col] == num):\n return False\n\n # test 3x3 square\n small_row = floor(row / 3) * 3\n small_col = floor(col / 3) * 3\n\n for i in range(small_row, small_row + 3):\n for j in range(small_col, small_col + 3):\n if((i != row and j != col) and gr[i][j] == num):\n return False\n \n return True",
"def valid_tile(self, i, j):\n if (i >= 0 and i < self.rows) and (j >= 0 and j < self.cols):\n return True\n return False",
"def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )",
"def color_invalid(self):\n for i in self.invalid:\n self.color_cell(i, INVALID)",
"def validate_cell(self, board: list, x: int, y: int):\n # Empty dictionaries, these will contain the counts of numbers\n row_counts = {}\n col_counts = {}\n group_counts = {}\n\n for i in range(9):\n # Registers if numbers for given row and column (if they are not 0)\n if not board[y][i] == 0:\n row_counts[str(board[y][i])] = row_counts.get(\n str(board[y][i]), 0) + 1\n if not board[i][x] == 0:\n col_counts[str(board[i][x])] = col_counts.get(\n str(board[i][x]), 0) + 1\n # Finds group for both positions\n col_group = self.find_group(y)\n row_group = self.find_group(x)\n\n # Registers number in the cell's group (if they are not 0)\n for y_pos in col_group:\n for x_pos in row_group:\n if not board[y_pos][x_pos] == 0:\n group_counts[str(board[y_pos][x_pos])] = group_counts.get(\n str(board[y_pos][x_pos]), 0) + 1\n\n # Checks whether there are duplicates\n if 2 in row_counts.values() or 2 in col_counts.values() or \\\n 2 in group_counts.values():\n return False\n\n return True",
"def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False",
"def update_invalid(self):\n self.invalid = []\n for i in range(9):\n for j in range(9):\n if not self.check_if_locked((i, j)) and not self.check_entered((i, j)) and self.grid[i][j] != 0:\n self.invalid.append((i, j))",
"def is_invalid(self):\n return self._is_invalid",
"def testEmptyCell(self, row, column, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n row = self.limitValue(row, 0, self.rows-1)\n column = self.limitValue(column, 0, self.columns-1)\n if gameGrid.getItem(row, column) == emptyValue:\n return True\n else:\n return False",
"def is_valid(self, num, position):\n\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n \n # Check row for other numbers\n for i in range(num_cols):\n if self.board[position[0]][i] == num and position[1] != i:\n return False\n\n # Check column for other numbers\n for i in range(num_rows):\n if self.board[i][position[1]] == num and position[0] != i:\n return False\n \n # Check 3z3 subsquare\n box_x = position[1] // 3\n box_y = position[0] // 3\n\n for i in range(box_y * 3, box_y*3 + 3):\n for j in range(box_x * 3, box_x*3 + 3):\n if self.board[i][j] == num and (i, j) != position:\n return False\n \n return True",
"def is_valid_position(self, x, y):\n if (x > self.n_cols-1 or y > self.n_rows-1) or (x < 0 or y < 0):\n return False\n\n elif self.grid[x][y] == 3:\n return False\n\n return True"
]
| [
"0.7039616",
"0.6568949",
"0.6549772",
"0.65140575",
"0.6419735",
"0.6371772",
"0.63522184",
"0.6340446",
"0.63166225",
"0.62470424",
"0.6218878",
"0.62010264",
"0.61809516",
"0.6105728",
"0.60955465",
"0.60486126",
"0.60447174",
"0.6013415",
"0.6009598",
"0.600179",
"0.5979722",
"0.59629375",
"0.59461886",
"0.5942513",
"0.59376395",
"0.59105396",
"0.58681554",
"0.58425564",
"0.58378285",
"0.58300567"
]
| 0.69550496 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.