query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Sets the color at the specified cells on the gameboard.
def set_color(self, color, filled): for cell in filled: self.board[cell[0], cell[1]] = color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_color(board, tile, color):\n for el in tile:\n el_x = el[0]\n el_y = el[1]\n board[el_x,el_y] = color", "def set(self, row: int, col: int, color: Color) -> None:\n super(ColorGrid, self).set(row, col, color)", "def change_cell_bgcolor(self, cell: tuple, color: str = \"#fefefe\") -> None:\n self.cells[cell].set_background(color)", "def setCellColor(self, row, column, color = \"CCCCCC\"):\n\n\t\t\t\tfillObject = openpyxl.styles.PatternFill(start_color = color, end_color = color, fill_type = \"solid\")\n\t\t\t\tcell = self.getCell(row = row, column = column)\n\t\t\t\tcell.fill = fillObject", "def set_color(self, c, color, draw=True):\n \n if c == self.maze.get_start_cell() or c == self.maze.get_end_cell():\n return\n self.cvs.itemconfig(self.cvs_cells[c], fill=color)\n\n if draw: self.draw()", "def set_tile_color(self, x, y, color):\n self.__tile_grid[y][x].configure(bg=color)", "def set_cells(self, val=None):\t\r\n self._cells = \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def set_cell(self, x, y, val):\n pass", "def set_cell(self, x, y, val):\n pass", "def red2blue(self):\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n self.cells[x][y] = 2", "def set_color(self, background_color, color):\n self.background_color = background_color\n self.tile_color = color\n self.controller.refresh_board()", "def set_all(self, color):\n for x in range(self.width):\n for y in range(self.height):\n self.set([x,y], color)", "def paint_cell(self, col, row, color):\r\n if isinstance(color, Number):\r\n self.A[row, col] = color\r\n else:\r\n self.A[row, col] = self.cdict[color]\r\n self.plot()", "def set_cell(self, cell, val):\n a = b = 0\n try:\n a, b = self.__ret_cell(cell)\n self._grid[a][b] = val\n except IndexError as e:\n self.perror(\"Error: '%s'.\" % e, cell, a, b, 5)\n self.perror(\"Error.\", cell, a, b, 5)\n sys.exit()", "def set_board(board):", "def setColour(self, col):\n\t\tself.colour = col", "def set(self, coords, colors):\n if all(isinstance(e, list) for e in coords):\n # unpack list of coordinates\n for e, c in zip(coords, colors):\n self.set(e, c)\n else:\n led_nr = self.pos_to_led_nr(coords)\n #print \"Setting LED at [%d, %d] (nr. %d) to color %s\" % (coords[0], coords[1], led_nr, colors)\n self.strip.setPixelColor(led_nr, colors)", "def setLeds(number: int, red: int, green: int, blue: int):\n pass", "def change_color(self, x, y, state):\n if state == 1:\n color = self.tile_color\n else:\n color = self.background_color\n self.canvas.itemconfig(self.board[(x, y)], fill=color)", "def paint_cells(self, data):\r\n if len(data) == 0: return\r\n col, row = zip(*data.keys())\r\n colors = tuple(data.values())\r\n if not isinstance(colors[0], Number):\r\n colors = [self.cdict[color] for color in colors] \r\n self.A[row, col] = colors\r\n self.plot()", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def change_cell(self, event):\n try:\n (x, y) = self.get_id_from_coor(event.x, event.y)\n if self._board[x][y]:\n self._board[x][y] = False\n else:\n self._board[x][y] = True\n if self._board[x][y]:\n self.canvas.itemconfig(self.rect[y,x], fill=self._secondary_color)\n else:\n self.canvas.itemconfig(self.rect[y,x], fill=self._primary_color)\n except KeyError:\n pass # tkinter bug", "def set_color(self, color):\n pass", "def set_cell(self, point, cell):\n self._grid[point.x][point.y] = cell", "def set_color(self, r=0, g=0, b=0):\n r = clamp(r)\n g = clamp(g)\n b = clamp(b)\n self._state.color = (r, g, b)\n self.send_command(Command.SET_COLOR, [int(r), int(g), int(b)])", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def set(self, pixels):\n rgb_array = [int(x) for pixel in pixels for x in pixel.rgb]\n if self._are_rgb_arrays_equal(rgb_array, self.current_rgb_vals):\n # if led values are the same, don't bother sending. This stops\n # spamming the serial port when nothing's happening... dunno if that's necessary,\n # but it keeps me up at night.\n return\n self.current_rgb_vals = rgb_array\n self._check_values(rgb_array)\n self._set_leds(rgb_array)", "def setColor(self,value):\n\t\tself.politics = value if(type(value) is int)else int(value[1:],16)\n\t\tself.canvas.itemconfig('node_'+self.identifier,fill=self.toRGB())", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;" ]
[ "0.66752845", "0.6670882", "0.6661175", "0.6591124", "0.6471529", "0.6251041", "0.61291766", "0.61236733", "0.61236733", "0.60985804", "0.60878223", "0.60815275", "0.60369647", "0.6032888", "0.6024978", "0.60133576", "0.60054994", "0.59898573", "0.5964465", "0.59419614", "0.593346", "0.58977455", "0.58050346", "0.5783768", "0.5776713", "0.57735705", "0.57678854", "0.5761031", "0.5759232", "0.57540256" ]
0.7248661
0
Checks if the new cell has the same color. If so, then it will be added to filled_edges.
def check_if_filled(self, new_cell, cell_color, filled_edges, filled_surrounded): new_cell_color = self.get_color(new_cell) if new_cell_color != cell_color: return False if new_cell not in filled_edges + filled_surrounded: filled_edges.append(new_cell) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_filled(self, filled_edges, filled_surrounded):\n surrounded_cells = []\n for cell in filled_edges:\n coord_x = cell[1]\n coord_y = cell[0]\n color = self.get_color(cell)\n surrounded = True\n\n # up\n if coord_y - 1 >= 0:\n surrounded &= self.check_if_filled((coord_y-1, coord_x), color, filled_edges, filled_surrounded)\n\n # down\n if coord_y + 1 < self.height:\n surrounded &= self.check_if_filled((coord_y+1, coord_x), color, filled_edges, filled_surrounded)\n\n # left\n if coord_x - 1 >= 0:\n surrounded &= self.check_if_filled((coord_y, coord_x-1), color, filled_edges, filled_surrounded)\n\n # right\n if coord_x + 1 < self.width:\n surrounded &= self.check_if_filled((coord_y, coord_x+1), color, filled_edges, filled_surrounded)\n\n if surrounded:\n surrounded_cells.append(cell)\n\n for cell in surrounded_cells:\n filled_surrounded.append(cell)\n filled_edges.remove(cell)", "def add_new_cell(self, x, y, color):\n # if the origin changes then we are going to need to update all of the cells in the grid with new relative\n # positions.\n self.num_colored_cells += 1\n if color != self.color:\n self.color = -1\n x_origin_change = 0\n y_origin_change = 0\n bounding_box_change = False\n if x < self.top_left_x:\n x_origin_change = self.top_left_x - x\n self.top_left_x = x\n self.bounding_box_x_len += x_origin_change\n bounding_box_change = True\n elif x > self.top_left_x + self.bounding_box_x_len:\n self.bounding_box_x_len = x - self.top_left_x\n bounding_box_change = True\n if y < self.top_left_y:\n y_origin_change = self.top_left_y - y\n self.top_left_y = y\n self.bounding_box_y_len += y_origin_change\n bounding_box_change = True\n elif y > self.top_left_y + self.bounding_box_y_len:\n self.bounding_box_y_len = y - self.top_left_y\n bounding_box_change = True\n\n if bounding_box_change:\n new_cells = np.zeros((self.bounding_box_x_len + 1, self.bounding_box_y_len + 1), dtype=np.int32)\n new_cells[x_origin_change:len(self.cells) + x_origin_change,\n y_origin_change:len(self.cells[0]) + y_origin_change] = self.cells\n self.cells = new_cells\n self.cells[x - self.top_left_x][y - self.top_left_y] = color", "def cell(x, y):\n try:\n if cells[y][x]['filled'] == 1:\n return # this has already been processed\n except IndexError:\n return\n cells[y][x]['filled'] = 1 # this cell is now filled\n\n nn = []\n for nx, ny in neighbours(x, y):\n try:\n if cells[ny][nx]['filled']:\n nn.append(cells[ny][nx])\n except IndexError:\n continue\n \n c = 0 # colour weighting\n \n #------ Flippedness\n flipped = sum([i['inverted'] for i in nn if i['inverted']])\n cells[y][x]['inverted'] = (randint(0, 3) + flipped) % 4\n \n #------- Colour calculation\n avg_colour = sum([i['colour'][0] for i in nn]) / len(nn)\n avg_sat = sum([i['colour'][1] for i in nn]) / len(nn)\n avg_bri = sum([i['colour'][2] for i in nn]) / len(nn)\n \n # small chance of going totally random otherwise small variation from neighbours\n if random(100) > 90:\n h = randint(0, 100)\n s = randint(0, 100)\n b = randint(0, 100)\n else:\n h = (avg_colour + randint(-15, 15)) % 100\n s = (avg_sat + randint(-15, 15)) % 100\n b = (avg_bri + randint(-15, 15)) % 100\n cells[y][x]['colour'] = (h, s, b)\n \n #------- Alpha calculation\n d = sqrt((x*cell_size - rx)**2 + (y*cell_size - ry)**2) # distance from epicenter\n mx = sqrt((w-rx*cell_size)**2 + (h-ry*cell_size)**2)\n a = d/sqrt(w**2+h**2)*255\n cells[y][x]['alpha'] = a\n \n for cx,cy in neighbours(x, y):\n cell(cx, cy)", "def set_color(self, color, filled):\n for cell in filled:\n self.board[cell[0], cell[1]] = color", "def fill(self, color):", "def same_color(self, other: 'Piece') -> bool:\n\n return self.color == other.color", "def red(self, new_value):\r\n if self.empty is True and self.yellow is False and self.red is False and new_value is True:\r\n self._red = new_value\r\n self.empty = False\r\n else:\r\n raise DomainError('Square already full! ')", "def red2blue(self):\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n self.cells[x][y] = 2", "def get_color_count(self, color, filled):\n count = 0\n for cell in filled:\n coord_x = cell[1]\n coord_y = cell[0]\n\n # up\n if coord_y - 1 >= 0:\n new_cell = (coord_y-1, coord_x)\n cell_up_color = self.get_color(new_cell)\n if cell_up_color == color and new_cell not in filled:\n count += 1\n filled.append(new_cell)\n\n # down\n if coord_y + 1 < self.height:\n new_cell = (coord_y+1, coord_x)\n cell_down_color = self.get_color(new_cell)\n if cell_down_color == color and new_cell not in filled:\n count += 1\n filled.append(new_cell)\n\n # left\n if coord_x - 1 >= 0:\n new_cell = (coord_y, coord_x-1)\n cell_left_color = self.get_color(new_cell)\n if cell_left_color == color and new_cell not in filled:\n count += 1\n filled.append(new_cell)\n\n # right\n if coord_x + 1 < self.width:\n new_cell = (coord_y, coord_x+1)\n cell_right_color = self.get_color(new_cell)\n if cell_right_color == color and new_cell not in filled:\n count += 1\n filled.append(new_cell)\n\n return count", "def color_invalid(self):\n for i in self.invalid:\n self.color_cell(i, INVALID)", "def yellow(self, new_value):\r\n if self.empty is True and self.yellow is False and self.red is False and new_value is True:\r\n self._yellow = new_value\r\n self.empty = False\r\n else:\r\n raise DomainError('Square already full! ')", "def paint_fill(image, col, row, color, orig_color):\n\n if image[row][col] != orig_color:\n return\n if row < 0 or row >= len(image) or col < 0 or col >= len(image[0]):\n return\n\n image[row][col] = color\n\n paint_fill(image, col - 1, row, color, orig_color)\n paint_fill(image, col + 1, row, color, orig_color)\n paint_fill(image, col, row - 1, color, orig_color)\n paint_fill(image, col, row + 1, color, orig_color)\n\n return", "def update_colourin(self):\n if self.fill1:\n self.ax.collections.remove(self.fill1)\n if self.fill2:\n self.ax.collections.remove(self.fill2)\n\n strengths = [np.array([1, 1, 1, 0, 0]), np.array([0, 0, 1, 1, 1])]\n edges = [[], []]\n for s in range(2):\n edges[s] = np.array(\n self.straight.p_beam_range(strengths[s]))[:, [0, 2]]\n\n beam1max = edges[0][0]\n beam1min = edges[1][0]\n beam2max = edges[1][1]\n beam2min = edges[0][1]\n\n self.fill1 = self.ax.fill_between(\n self.straight.data.photon_coordinates[0],\n beam1min, beam1max, facecolor='blue', alpha=0.2)\n self.fill2 = self.ax.fill_between(\n self.straight.data.photon_coordinates[1],\n beam2min, beam2max, facecolor='green', alpha=0.2)", "def paint_cell(self, col, row, color):\r\n if isinstance(color, Number):\r\n self.A[row, col] = color\r\n else:\r\n self.A[row, col] = self.cdict[color]\r\n self.plot()", "def update1(self):\r\n tmp = [row.copy() for row in self.grid]\r\n changed = False\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n if self.grid[y][x] == '#' and 5 <= sum(\r\n self.is_occupied((x + i, y + j)) for i in [-1, 0, 1] for j in [-1, 0, 1]):\r\n # >= 5, because we also count (x,y)\r\n tmp[y][x] = 'L'\r\n changed = True\r\n elif self.grid[y][x] == 'L' and self.is_available(x, y):\r\n tmp[y][x] = '#'\r\n changed = True\r\n else:\r\n tmp[y][x] = self.grid[y][x]\r\n self.grid = tmp\r\n return changed", "def fill(self, colour: int, /) -> None:", "def CheckProperColoring(G):\r\n coloring_proper = True\r\n\r\n for vertex in G._color:\r\n #print('Vertex',vertex)\r\n #print('G._color',G._color[vertex])\r\n #print('G._adj[vertex]', G._adj[vertex])\r\n for adj_vertex in G._adj[vertex]:\r\n if G._color[vertex] == G._color[adj_vertex]:\r\n coloring_proper = False\r\n #end\r\n #end\r\n #end\r\n\r\n return coloring_proper", "def setCellColor(self, row, column, color = \"CCCCCC\"):\n\n\t\t\t\tfillObject = openpyxl.styles.PatternFill(start_color = color, end_color = color, fill_type = \"solid\")\n\t\t\t\tcell = self.getCell(row = row, column = column)\n\t\t\t\tcell.fill = fillObject", "def change_cell_bgcolor(self, cell: tuple, color: str = \"#fefefe\") -> None:\n self.cells[cell].set_background(color)", "def is_on_the_edge(self):\r\n if self.for_color == 1:\r\n new_sum = np.count_nonzero(self.result_board.state[:, 0] == 1) + np.count_nonzero(\r\n self.result_board.state[:, 4] == 1) + np.count_nonzero(self.result_board.state[0, 1:4] == 1) +\\\r\n np.count_nonzero(self.result_board.state[4, 1:4] == 1)\r\n else:\r\n new_sum = np.count_nonzero(self.result_board.state[:, 0] == 2) + np.count_nonzero(\r\n self.result_board.state[:, 4] == 2) + np.count_nonzero(\r\n self.result_board.state[0, 1:4] == 2) + np.count_nonzero(\r\n self.result_board.state[4, 1:4] == 2)\r\n\r\n self.priority += (-new_sum) * 0.1", "def solve(arr, pos, color):\n i = 0\n same_color = [pos]\n while i < len(same_color):\n for j in get_neighbors(arr, same_color[i], arr[pos[0]][pos[1]]):\n if j not in same_color:\n same_color.append(j)\n i += 1\n for i in same_color:\n arr[i[0]][i[1]] = color\n return arr", "def fill(self, x, y, color):\n raise NotImplementedError # Override this function in the Solution classes", "def change_cell(self, event):\n try:\n (x, y) = self.get_id_from_coor(event.x, event.y)\n if self._board[x][y]:\n self._board[x][y] = False\n else:\n self._board[x][y] = True\n if self._board[x][y]:\n self.canvas.itemconfig(self.rect[y,x], fill=self._secondary_color)\n else:\n self.canvas.itemconfig(self.rect[y,x], fill=self._primary_color)\n except KeyError:\n pass # tkinter bug", "def shade_locked_cells(self):\n for i in range(9):\n for j in range(9):\n if self.grid_check[i][j] != 0:\n self.color_cell(pos=(i, j), color=LOCKED_CELL)", "def region_growing_from_input(self, color, bone_from_scan=None):\n collect()\n # initilize\n if not bone_from_scan:\n self.load_original_data()\n else:\n self.copy_original_from_bone(bone_from_scan)\n checked = zeros(self._original_img_data.shape)\n seg = zeros(self._original_img_data.shape)\n need_to_check = []\n # Color the seeds and check for neighbors\n for seed in self._seeds_points:\n seg[seed] = color\n checked[seed] = 1\n neighbors = self._get_neighbors(seed, checked, self.\n _original_img_data.shape)\n for neighbor in neighbors:\n if self._get_threshold(self._original_img_data[neighbor],\n VOID_VALUES[0],\n VOID_VALUES[1]):\n need_to_check.append(neighbor)\n # Region Growing - while there's a neighbor, color it and keep going\n bone_to_check = []\n while need_to_check:\n pt = need_to_check.pop()\n if checked[pt] == 1:\n continue\n else:\n checked[pt] = 1\n neighbors = self._get_neighbors(pt, checked, self.\n _original_img_data.shape)\n for neighbor in neighbors:\n if self._get_threshold(\n self._original_img_data[neighbor],\n VOID_VALUES[0], VOID_VALUES[1]):\n need_to_check.append(neighbor)\n if self._get_threshold(\n self._original_img_data[neighbor],\n BONE_BOUND_VALUES[0], BONE_BOUND_VALUES[1]):\n bone_to_check.append(neighbor)\n seg[pt] = color\n # Closing holes\n del need_to_check\n # check for Bone value - edge of the radius\n while bone_to_check:\n pt = bone_to_check.pop()\n if checked[pt] == 1:\n continue\n else:\n checked[pt] = 1\n neighbors = self._get_neighbors(pt, checked, self.\n _original_img_data.shape)\n for neighbor in neighbors:\n if self._get_threshold(\n self._original_img_data[neighbor],\n RADIUS_VALUES[0], RADIUS_VALUES[1]):\n bone_to_check.append(neighbor)\n seg[pt] = color\n del checked, bone_to_check\n for i in range(self._dilation):\n seg = dilation(seg, cube(3, uint8))\n for i in range(self._dilation - 1):\n seg = erosion(seg, cube(3, uint8))\n self._segmentation_data = seg\n del seg\n collect()", "def test_exist_and_change(self):\n colorList = ColorList()\n prev = colorList.pickColor()\n self.assertIsNotNone(prev)\n for i in range(100):\n color = colorList.pickColor()\n self.assertIsNotNone(color)\n self.assertTrue(color.r != prev.r or color.g != prev.g or color.b != prev.b)\n prev = color", "def push_color(self, color):\n self[color.name] = color\n # for every added new color, set the map as colored\n self.black_and_white = False", "def _greedy_color(self, source):\n for target in self.graph.iteradjacent(source):\n if self.color[target] is not None:\n self._color_list[self.color[target]] = True\n for c in xrange(self.graph.v()): # check colors\n if not self._color_list[c]:\n self.color[source] = c\n break\n for target in self.graph.iteradjacent(source):\n if self.color[target] is not None:\n self._color_list[self.color[target]] = False\n return c", "def test_nan_color_copy():\n\n data = np.zeros((16, 16))\n\n f1 = FITSFigure(data)\n f1.show_grayscale()\n f1.set_nan_color('blue')\n\n f2 = FITSFigure(data)\n f2.show_grayscale()\n f2.set_nan_color('red')\n\n assert f1.image.get_cmap()._rgba_bad == (0.0, 0.0, 1.0, 1.0)\n assert f2.image.get_cmap()._rgba_bad == (1.0, 0.0, 0.0, 1.0)", "def to_fill(self, color:list): \n return {\n 'bbox' : list(self.bbox), \n 'color': rgb_value(color)\n }" ]
[ "0.68136656", "0.6298156", "0.62293226", "0.61843455", "0.60629505", "0.6019774", "0.5996726", "0.59899193", "0.59833056", "0.59146136", "0.58846736", "0.58377993", "0.57722366", "0.5716285", "0.5706663", "0.56588453", "0.5620642", "0.5616831", "0.56144696", "0.56116164", "0.55945003", "0.558724", "0.55178183", "0.5516886", "0.5515216", "0.54962546", "0.5494036", "0.5485145", "0.54759216", "0.5471445" ]
0.8191558
0
Counts the number of adjacent cells of the specified color.
def get_color_count(self, color, filled): count = 0 for cell in filled: coord_x = cell[1] coord_y = cell[0] # up if coord_y - 1 >= 0: new_cell = (coord_y-1, coord_x) cell_up_color = self.get_color(new_cell) if cell_up_color == color and new_cell not in filled: count += 1 filled.append(new_cell) # down if coord_y + 1 < self.height: new_cell = (coord_y+1, coord_x) cell_down_color = self.get_color(new_cell) if cell_down_color == color and new_cell not in filled: count += 1 filled.append(new_cell) # left if coord_x - 1 >= 0: new_cell = (coord_y, coord_x-1) cell_left_color = self.get_color(new_cell) if cell_left_color == color and new_cell not in filled: count += 1 filled.append(new_cell) # right if coord_x + 1 < self.width: new_cell = (coord_y, coord_x+1) cell_right_color = self.get_color(new_cell) if cell_right_color == color and new_cell not in filled: count += 1 filled.append(new_cell) return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_colors(board, color):\n n = 0\n for cell in board:\n if cell == color:\n n += 1\n elif cell == cinv(color):\n n -= 1\n return n", "def countDiff(self, color):\n count = 0\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y]==color:\n count += 1\n if self[x][y]==-color:\n count -= 1\n return count", "def count_pixels_of_certain_color(\n self, color: Tuple[int, int, int]\n ) -> int:\n image = self.image\n\n color = CVUtils.rgb_to_bgr(color)\n\n mask = cv2.inRange(image, color, color)\n return cv2.countNonZero(mask)", "def count(self,color):\n count = 0\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n if(self.gameState[x,y]==color):\n count+=1\n return count", "def sum_color(self, board, color):\n sum_of_color = 0\n for i in range(board.size):\n for j in range(board.size):\n if board.board[i][j].name == color:\n sum_of_color += 1\n return sum_of_color", "def rec_count(color : str) -> int:\n return sum(\n (1 + rec_count(child)) * count\n for child, count in contents[color].items()\n )", "def get_disk_count(self, self_color, board):\r\n count = 0\r\n for r in range(8):\r\n for c in range(8):\r\n if board[r][c] == self_color:\r\n count += 1\r\n return count", "def currentScore(self, playerColor):\n total = 0\n for col in range(0, 8):\n for row in range(0, 8):\n if self.board[col][row].color == playerColor:\n total+=1\n return total", "def count_neighbors(self, row, col):\n neighbors = 0\n neighbors += self.get_cell_value(row - 1, col - 1)\n neighbors += self.get_cell_value(row - 1, col)\n neighbors += self.get_cell_value(row - 1, col + 1)\n neighbors += self.get_cell_value(row, col - 1)\n neighbors += self.get_cell_value(row, col + 1)\n neighbors += self.get_cell_value(row + 1, col - 1)\n neighbors += self.get_cell_value(row + 1, col)\n neighbors += self.get_cell_value(row + 1, col + 1)\n\n return neighbors", "def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count", "def G_colour_count(self, r, b=-1):\n counts = [0 for _ in range(r)]\n for row in self.G_colour_tableau(r,b):\n for cell_colour in row:\n counts[cell_colour] += 1\n assert sum(counts) == self.size()\n return counts", "def getAdjacentCount(grid, x, y, X, Y, char):\n count = 0\n try{\n if x == 0:\n\n if y == 0:\n\n if x == X-1:\n\n if y == Y-1:\n }", "def get_neighbours_count(self, cell: Position) -> int:\n possible_neighbours = self.get_neighbours(cell)\n return sum(self.is_alive(n) for n in possible_neighbours)", "def get_neighbors_of(cell, board):\n count = 0\n (x, y) = cell\n for cell in board:\n if cell == (x - 1, y - 1):\n count += 1\n elif cell == (x, y - 1):\n count += 1\n elif cell == (x + 1, y - 1):\n count += 1\n elif cell == (x - 1, y):\n count += 1\n elif cell == (x + 1, y):\n count += 1\n elif cell == (x - 1, y + 1):\n count += 1\n elif cell == (x, y + 1):\n count += 1\n elif cell == (x + 1, y + 1):\n count += 1\n return count", "def count_neighbors(lights, r, c):\n neighbors = 0\n\n if r > 0 and c > 0: # 1\n neighbors += 1 if lights[r - 1][c - 1] == \"#\" else 0\n\n if r > 0: # 2\n neighbors += 1 if lights[r - 1][c] == \"#\" else 0\n\n if r > 0 and c < GRID_SIZE - 1: # 3\n neighbors += 1 if lights[r - 1][c + 1] == \"#\" else 0\n\n if c < GRID_SIZE - 1: # 4\n neighbors += 1 if lights[r][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c < GRID_SIZE - 1: # 5\n neighbors += 1 if lights[r + 1][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1: # 6\n neighbors += 1 if lights[r + 1][c] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c > 0: # 7\n neighbors += 1 if lights[r + 1][c - 1] == \"#\" else 0\n\n if c > 0: # 8\n neighbors += 1 if lights[r][c - 1] == \"#\" else 0\n\n return neighbors", "def count_legal_moves(board, color):\n return len(legal_moves(board, color))", "def _adjacent_blob_size(self, pos, board, visited) -> int:\n col, row = pos[0], pos[1]\n total = 0\n total += self._undiscovered_blob_size((col - 1, row), board, visited)\n total += self._undiscovered_blob_size((col, row - 1), board, visited)\n total += self._undiscovered_blob_size((col + 1, row), board, visited)\n total += self._undiscovered_blob_size((col, row + 1), board, visited)\n return total", "def countNeighbors(row, col, A):\n h = len(A)\n w = len(A[0])\n count = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n if abs(x) + abs(y) != 0:\n count += A[row+x][col+y]\n return count", "def countOccupied(data):\n\tcounter = 0\n\n\t# loop through rows and columns and\n\t# count the number of '#'s\n\tfor r in range(len(data)):\n\t\tfor c in range(len(data[r])):\n\t\t\tif data[r][c] == '#':\n\t\t\t\tcounter += 1\n\n\treturn counter", "def _count_living_neighbors(self, cell: Cell) -> int:\n count = 0\n # borders of the area in which we are trying to find neighbors\n # Let's assume y axis directs downside and x axis directs to the left\n \n for x in range(cell.x - 1, cell.x + 2):\n for y in range(cell.y - 1, cell.y + 2):\n if cell.x == x and cell.y == y:\n continue\n if (x, y) in self.living_cells.keys():\n count += 1\n \n return count", "def distict_color_count(img):\n return Counter([tuple(colors) for i in img for colors in i])", "def distict_color_count(img):\n return Counter([tuple(colors) for i in img for colors in i])", "def count_islands(matrix):\n visited = init_visited(matrix)\n num_islands = 0\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] and not visited[i][j]:\n check_neighbours(matrix, (i, j), visited)\n num_islands += 1\n # print(visited)\n return num_islands", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def count_winning_blocks(self, gameboard):\r\n count = {'red':0.1, 'blue':0.1}\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n h = gameboard.check_horizontal_state(position)\r\n v = gameboard.check_vertical_state(position)\r\n d1 = gameboard.check_diag_1_state(position)\r\n d2 = gameboard.check_diag_2_state(position)\r\n for state in [h, v, d1, d2]:\r\n if ((state.count('red') + state.count('x') == 5)\r\n and (state.count('red') > 0)):\r\n count['red'] += np.power(3, (state.count('red') - 1))\r\n elif ((state.count('blue') + state.count('x') == 5)\r\n and (state.count('blue') > 0)):\r\n count['blue'] += np.power(3, (state.count('blue') - 1))\r\n return count", "def neighbor(board, x, y, n, m):\n deltas = (\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1),\n )\n count = 0\n for dx, dy in deltas:\n xx = x + dx\n yy = y + dy\n if xx >= 0 and xx < n and yy >= 0 and yy < m and board[xx][yy] % 2 == 1:\n count += 1\n\n return count", "def count_island(row, col, island):\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count", "def get_neighbors(self, line, col):\n neighbors = 0\n for line_shift in [-1, 0, 1]:\n for col_shift in [-1, 0, 1]:\n if line_shift == 0 and col_shift == 0:\n continue # Do not count given cell\n # % connects left/right and up/down\n i = (line + line_shift) % self.lines\n j = (col + col_shift) % self.cols\n if self[i][j] == self.cell_state['alive']:\n neighbors += 1\n return neighbors", "def on_the_edge_without_neighbors(self, board, color):\n disks_on_the_edge_without_neighbors = 0\n disks_on_the_edge = self.get_on_edge(board, color)\n for disk_on_edge in disks_on_the_edge:\n if not self.get_opposite_neighbors_on_edge(board, disk_on_edge):\n disks_on_the_edge_without_neighbors += 1\n return disks_on_the_edge_without_neighbors", "def neighbor_count(A):\n sum2 = lambda A, B: map2(add, A, B)\n neighbors = ((-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1))\n return reduce(sum2,\n map(lambda d: rotate2(A, d[0], d[1]),\n neighbors))" ]
[ "0.7808628", "0.7358172", "0.7177182", "0.6915037", "0.6702927", "0.66539484", "0.634662", "0.6150625", "0.60990244", "0.6096208", "0.6083482", "0.59975606", "0.5963964", "0.59438294", "0.5927667", "0.5881134", "0.58247036", "0.58202237", "0.5811849", "0.5797079", "0.57578915", "0.57578915", "0.56839573", "0.5674444", "0.5657674", "0.5631109", "0.5623052", "0.5595438", "0.5498465", "0.54774517" ]
0.74115384
1
Find apropriate files for the subject and download them Search through all files for all acquisitions for all sessions for this subject and download only the T1 nifti files. If file names are repeated a number is prepended. Troublesome characters in the file name are replaced with "_". The file's original name, full path, and creation date are logged.
def find_and_download_files(context): input_path = 'input/' if os.path.isdir(input_path): log.debug('Path already exists: ' + input_path) else: log.debug('Creating: ' + input_path) os.mkdir(input_path) fw = context.client if 'classification_measurement' in context.config: class_meas = context.config['classification_measurement'].split() else: class_meas = ['T1'] # session and acquisition include/exclude lists can come from: # project info metadata, # subject info metadata, and # config options # The last one wins (how about getting it from an input file also, eh?) ses_exclude_list = None ses_include_list = None acq_exclude_list = None acq_include_list = None fs = 'freesurfer_longitudinal_' where = 'Found in project info' # check for exclude/include lists of regexs for sessions in project info sel = context.gear_dict['project'].info.get(fs + 'session_excludelist') if sel: ses_exclude_list = sel.split() log.info(where+' '+fs+'session_excludelist: "'+sel+'"') sil = context.gear_dict['project'].info.get(fs + 'session_includelist') if sil: ses_include_list = sil.split() log.info(where+' '+fs+'session_includelist: "'+sil+'"') # check for exclude/include lists of regexs for acquisitions in project info ael = context.gear_dict['project'].info.get(fs + 'acquisition_excludelist') if ael: acq_exclude_list = ael.split() log.info(where+' '+fs+'acquisition_excludelist: "'+ael+'"') ail = context.gear_dict['project'].info.get(fs + 'acquisition_includelist') if ail: acq_include_list = ail.split() log.info(where+' '+fs+'acquisition_includelist: "'+ail+'"') where = 'Found in subject info' # check for exclude/include lists of regexs for sessions in subject info sel = context.gear_dict['subject'].info.get(fs + 'session_excludelist') if sel: ses_exclude_list = sel.split() log.info(where+' '+fs+'session_excludelist: "'+sel+'"') sil = context.gear_dict['subject'].info.get(fs + 'session_includelist') if sil: ses_include_list = sil.split() log.info(where+' '+fs+'session_includelist: "'+sil+'"') # check for exclude/include lists of regexs for acquisitions in subject info ael = context.gear_dict['subject'].info.get(fs + 'acquisition_excludelist') if ael: acq_exclude_list = ael.split() log.info(where+' '+fs+'acquisition_excludelist: "'+ael+'"') ail = context.gear_dict['subject'].info.get(fs + 'acquisition_includelist') if ail: acq_include_list = ail.split() log.info(where+' '+fs+'acquisition_includelist: "'+ail+'"') where = 'Found in config' # set up exclude/include lists of reegexs for sessions in config if 'session_excludelist' in context.config: ses_exclude_list = context.config['session_excludelist'].split() log.info(where+' session_excludelist: "'+str(ses_exclude_list)+'"') if 'session_includelist' in context.config: ses_include_list = context.config['session_includelist'].split() log.info(where+' session_includelist: "'+str(ses_include_list)+'"') # set up exclude/include lists of reegexs for acquisitions in config if 'acquisition_excludelist' in context.config: acq_exclude_list = context.config['acquisition_excludelist'].split() log.info(where+' acquisition_excludelist: "'+str(acq_exclude_list)+'"') if 'acquisition_includelist' in context.config: acq_include_list = context.config['acquisition_includelist'].split() log.info(where+' acquisition_includelist: "'+str(acq_include_list)+'"') # go through all sessions, acquisitions to find files for session in context.gear_dict['subject'].sessions(): lemme_out = False if ses_exclude_list: for regex in ses_exclude_list: if re.search(regex, session.label): # if excluded, skip log.info('Session "' + session.label + '" matches ' + \ 'exclusion regex, skipping it') lemme_out = True continue if lemme_out: continue if ses_include_list: match = False for regex in ses_include_list: if not re.search(regex, session.label): match = True if match: continue # if not included (matches any regex), skip else: log.info('Session "' + session.label + '" matches ' \ 'an inclusion regex, keeping it') for acquisition in fw.get_session_acquisitions(session.id): lemme_out = False if acq_exclude_list: for regex in acq_exclude_list: if re.search(regex, acquisition.label): # if excluded, skip log.info('Acquisition "' + acquisition.label + \ '" matches exclusion regex, skipping it') lemme_out = True continue if lemme_out: continue if acq_include_list: match = False for regex in acq_include_list: if not re.search(regex, acquisition.label): match = True if match: continue # if not included (matches any regex), skip else: log.info('Acquisition "' + acquisition.label + '" ' + \ 'matches an inclusion regex, keeping it') for afile in acquisition.files: # Scan must be nifti if afile.type == 'nifti': found_one = False for cm in class_meas: if 'Measurement' in afile.classification: if cm in afile.classification['Measurement']: found_one = True log.info('Found ' + cm + ' file') if found_one: download_it(fw, acquisition, afile.name, input_path) context.gear_dict['visits'].append( make_file_name_safe(session.label, '_')) else: log.info('Ignoring ' + afile.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_subject(self, subject_ID):\r\n hcp_data_path = os.path.join(self.hcp_directory, str(subject_ID))\r\n\r\n if not os.path.exists(hcp_data_path):\r\n os.makedirs(hcp_data_path)\r\n\r\n print('Downloading data to {}'.format(hcp_data_path))\r\n\r\n counter = 0\r\n for key in self.s3_bucket.list(\"HCP_1200\"):\r\n path = pathlib.Path(key.name)\r\n if (\r\n len(path.parts) == 5 and\r\n subject_ID == int(path.parts[1]) and\r\n path.parts[-2] == \"Diffusion\"\r\n ):\r\n if (\r\n 'bval' in path.parts[-1] or\r\n 'bvec' in path.parts[-1] or\r\n 'data' in path.parts[-1] or\r\n 'nodif' in path.parts[-1]\r\n ):\r\n print('Downloading {}'.format(path.parts[-1]))\r\n filepath = os.path.join(hcp_data_path, path.parts[-1])\r\n with open(filepath, 'wb') as f:\r\n key.get_contents_to_file(f)\r\n counter += 1\r\n if counter == 4:\r\n break", "def download_files(self):", "def file_names(acqfolder):\n log.info('anonymizer.py file_names {0}'.format(acqfolder))\n\n subj_path = path(acqfolder)\n\n done = -1\n for ext in dicom_file_extensions:\n file_lst = subj_path.glob('*' + ext)\n if file_lst:\n rename_file_group_to_serial_nums(file_lst)\n done = 0\n\n return done", "def download_files(self, inpDate):\n # construct day of year from date\n inpDoY = inpDate.timetuple().tm_yday\n strDoY = str(inpDoY)\n if inpDoY < 10:\n strDoY = \"00\" + str(inpDoY)\n if ( inpDoY > 10) & (inpDoY < 100):\n strDoY = \"0\" + str(inpDoY)\n\n dwnldUrl = self.baseUrl +\\\n \"data_fetch_l1c_imaging_v013?y=\"+\\\n str(inpDate.year) + \"&d=\"+strDoY\n driver = webdriver.Chrome()\n driver.get(dwnldUrl)\n\n try:\n element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'output')))\n filesDiv = driver.find_element_by_id(\"output\")\n fileLinks = filesDiv.find_elements_by_css_selector('a')\n for uEl in fileLinks:\n fUrl = uEl.get_attribute('href')\n if \"L1C-2-disk\" not in fUrl:\n continue\n print \"currently downloading-->\", fUrl\n rf = requests.get( fUrl, verify=False )\n currFName = rf.url.split(\"/\")[-1]\n outDir = self.outBaseDir + inpDate.strftime( \"%Y%m%d\" ) + \"/\"\n if not os.path.exists(outDir):\n os.makedirs(outDir)\n with open( outDir + currFName, \"wb\" ) as ssusiData:\n ssusiData.write( rf.content )\n finally:\n driver.quit()", "def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)", "def _get_subject_files(self):\n from itertools import chain\n\n subjsf = fetch_one_file(self.ica_dir, self._subjects_fname)\n mat_file = sio.loadmat(subjsf)['files']\n return [f.strip() for f in list(chain.from_iterable(chain.from_iterable(chain.from_iterable(mat_file))))]", "def downloadFiles(ftpConn, pmcUrl, theirPmc, missingIds, outDir, connCount):\n # special case for lftp parallel download\n if connCount:\n assert(missingIds!=None) # can't do parallel download with on-disk checking\n fileNames = [theirPmc[pmcId] for pmcId in missingIds]\n pubGeneric.lftpGet(pmcUrl, outDir, fileNames, connCount)\n return\n\n downloadCount = 0\n alreadyExistCount = 0\n errCount = 0\n\n downloadIds = missingIds\n\n if downloadIds==None:\n downloadIds = theirPmc.keys()\n logging.info(\"scheduling all %d ids for download\" % len(downloadIds))\n\n for pmcId in downloadIds:\n fname = theirPmc[pmcId]\n locPath = join(outDir, fname)\n locDir = dirname(locPath)\n if not isdir(locDir):\n logging.info(\"Making dir %s\" % locDir)\n os.makedirs(locDir)\n if isfile(locPath):\n if missingIds!=None:\n logging.info(\"File %s already exists\" % locPath)\n else:\n logging.debug(\"File %s already exists\" % locPath)\n alreadyExistCount += 1\n else:\n downloadCount += 1\n downloadOk = util.ftpDownload(ftpConn, fname, locPath)\n if not downloadOk:\n logging.error(\"could not download file %s\" % fname)\n errCount +=1\n pubGeneric.appendLog(outDir, \"add\", pmcId+\":\"+fname)\n logging.info(\"%d PMC-IDs: %d already here, downloaded %d, %d skipped due to error\" % \\\n (len(downloadIds), alreadyExistCount, downloadCount, errCount))", "def download_file(year: str, month: str, career: str, kind: str = \"ativo\") -> str:\n\n career = career.lower()\n downloaded_files = []\n existing_files = []\n res = \"\"\n for file_ in CGU_FILES[career][kind]:\n url = f\"{URL_CGU_DOWNLOADS}/{year}{month}_{file_}\"\n try:\n division = file_.split(\"_\")[-1] if career == \"civil\" else None\n if not file_exists(year, month, career, kind, division):\n print(f\"Downloading {url}\")\n sleep(10)\n req = requests.get(url, stream=True, timeout=90)\n req.raise_for_status()\n filename = (\n req.headers.get(\"Content-Disposition\")\n .split(\"=\")[-1]\n .replace('\"', \"\")\n )\n saved = save_file(filename, req.content)\n unzipped_file = unzip_salary_file(saved, year, month, career, kind)\n downloaded_files.append(unzipped_file)\n else:\n print(f\"Arquivo {url} já existe\")\n existing_files.append(file_)\n res = f\"Arquivos baixados: {', '.join(downloaded_files)} \\nArquivos existentes: {', '.join(existing_files)}\"\n except requests.exceptions.ConnectionError as err:\n res = f\"Erro de conexão: {err}\"\n # pylint: disable=line-too-long\n # Erro de conexão: HTTPConnectionPool(host='www.portaltransparencia.gov.br', port=80): Max retries\n # exceeded with url: /download-de-dados/servidores/202202_Reserva_Reformas_Militares (Caused by\n # NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f9cd019cd90>: Failed to establish\n # a new connection: [Errno -3] Temporary failure in name resolution')\n except requests.exceptions.HTTPError as err:\n res = f\"Arquivo inexistente: {url.split('/')[-1]}.zip - {err}\"\n # pylint: disable=line-too-long\n # Erro no download: 404 Client Error: Not Found for url:\n # https://www.portaltransparencia.gov.br/download-de-dados/servidores/202202_Reserva_Reformas_Militares\n\n print(res)\n return res", "def download_file(session_requests, file_url, job_num, file_num, ext):\n \n filename = \"job_\" + str(job_num) + \"_file_\" + str(file_num) + ext\n pathname = Path(OUTPUT_PDF_PATH + filename) \n response = session_requests.get(file_url)\n pathname.write_bytes(response.content)\n \n return filename", "def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)", "def download(self):\r\n \r\n # RAR Files names\r\n if self.debug==0:\r\n rar_files_name = [\"K001.rar\",\"K002.rar\",\"K003.rar\",\"K004.rar\",\"K005.rar\",\"K006.rar\",\r\n \"KA01.rar\", \"KA03.rar\", \"KA04.rar\", \"KA05.rar\", \"KA06.rar\", \"KA07.rar\", \r\n \"KA08.rar\", \"KA09.rar\", \"KA15.rar\", \"KA16.rar\", \"KA22.rar\", \"KA30.rar\", \r\n \"KB23.rar\", \"KB24.rar\", \"KB27.rar\", \r\n \"KI01.rar\", \"KI03.rar\", \"KI04.rar\", \"KI05.rar\", \"KI07.rar\", \"KI08.rar\", \r\n \"KI14.rar\", \"KI16.rar\", \"KI17.rar\", \"KI18.rar\", \"KI21.rar\"]\r\n else:\r\n rar_files_name = [\"K002.rar\", \"KA01.rar\", \"KI01.rar\"]\r\n\r\n url = self.url\r\n \r\n dirname = self.rawfilesdir\r\n dir_rar = \"rar_files\"\r\n if not os.path.isdir(dirname):\r\n os.mkdir(dirname)\r\n if not os.path.isdir(os.path.join(dirname, dir_rar)):\r\n os.mkdir(os.path.join(dirname, dir_rar))\r\n \r\n\r\n print(\"Downloading RAR files:\")\r\n for i in rar_files_name:\r\n file_name = i\r\n if not os.path.exists(os.path.join(dirname, dir_rar, file_name)):\r\n urllib.request.urlretrieve(url+file_name, os.path.join(dirname, dir_rar, file_name))\r\n print(file_name)\r\n \r\n print(\"Extracting files:\")\r\n for i in rar_files_name:\r\n if not os.path.exists(os.path.join(dirname, i[:4])):\r\n file_name = os.path.join(dirname, dir_rar, i)\r\n Archive(file_name).extractall(dirname) \r\n print(i)\r\n\r\n if self.debug==0:\r\n files_path = self.files\r\n else:\r\n files_path = files_debug(self.rawfilesdir)\r\n\r\n print(files_path)\r\n self.files = files_path", "def download_extracted_files(a1000):\n hash_value = demisto.getArg('hash')\n try:\n response = a1000.download_extracted_files(hash_value)\n except Exception as e:\n return_error(str(e))\n\n filename = hash_value + '.zip'\n command_results = CommandResults(\n readable_output=f\"## ReversingLabs A1000 download extraced files \\nExtracted files are available for download \"\n f\"under the name {filename}\"\n )\n\n file_result = fileResult(filename, response.content, file_type=EntryType.FILE)\n\n return [command_results, file_result]", "def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))", "def download_templates(self):\n filename1 = self._download_data('CecchiG_LB_s1_ok.txt', 'syn4538204')\n filename2 = self._download_data('CecchiG_LB_s2_ok.txt', 'syn4538216')\n return filename1, filename2", "def download_test_files(request):\n\n # Log the start of the function\n logger.info(\"=========== returns ms1 test files from code directory input/ms1\")\n\n # create an absolute path to the 'example_data_dir' containing the test data files, then create\n # absolute paths to each test data file. Note the test data files are located in this code base.\n example_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','input/ms1')\n pos_input = os.path.join(example_data_dir, example_pos_filename)\n neg_input = os.path.join(example_data_dir, example_neg_filename)\n tracer_file = os.path.join(example_data_dir, example_tracer_filename)\n run_sequence_pos_file = os.path.join(example_data_dir, example_run_sequence_pos_filename)\n run_sequence_neg_file = os.path.join(example_data_dir, example_run_sequence_neg_filename)\n\n # create filenames\n filename1 = 'ms1_pos_input_test_data.csv'\n filename2 = 'ms1_neg_input_test_data.csv'\n filename3 = 'ms1_tracer_test_data.csv'\n filename4 = 'ms1_run_sequence_pos_test_data.csv'\n filename5 = 'ms1_run_sequence_neg_test_data.csv'\n\n # List of files to be zipped\n files_to_zip = {filename1: pos_input, filename2: neg_input, filename3: tracer_file, filename4: run_sequence_pos_file, filename5: run_sequence_neg_file}\n\n # Create an in-memory zip file\n in_memory_zip = BytesIO()\n with ZipFile(in_memory_zip, 'w', ZIP_DEFLATED) as zipf:\n # Add each file to the zipfile\n for filename in files_to_zip:\n logger.info('filename: {}'.format(filename))\n file_path = files_to_zip[filename]\n with open(file_path, 'rb') as file:\n file_content = file.read()\n zipf.writestr(filename, file_content)\n # The ZipFile object is automatically closed when exiting the 'with' block\n\n zip_filename = \"ms1_test_data_files.zip\"\n # Create an HTTP response with the zip file attached for download\n response = HttpResponse(in_memory_zip.getvalue(),content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=' + zip_filename\n response['Content-length'] = in_memory_zip.tell()\n\n # Return the HTTP response\n return response", "def get_files(self, subj_id, study, modality, series):\n if type(series) is int:\n series = str(series)\n\n url = 'files?' + self._login_code + '&projectCode=' + \\\n self.proj_code + '&subjectNo=' + subj_id + '&study=' + \\\n study + '&modality=' + modality + '&serieNo=' + series\n output = self._send_request(url)\n\n # Split at '\\n'\n file_list = output.split('\\n')\n # Remove any empty entries!\n file_list = [x for x in file_list if x]\n\n return(file_list)", "def download1():\n #t=request.vars.arg(0)\n response.flash=request\n #print request.wsgi.environ['HTTP_REFERER']\n #print 'yghklo=',request.args[0]\n a=db(db.Project.Project_File==request.args[0]).select(db.Project.ALL)\n #a=db(db.Project.id==38).select(db.Project.ALL)\n #if a == None:\n#\t print 'silent'\n # print 'a= aabhas download',a[0].no_of_download, a[0].Project_File\n # if a[0].no_of_download==None:\n#\t a[0].no_download=0\n db(db.Project.Project_File==a[0].Project_File).update(no_of_download=(a[0].no_of_download or 0)+1)\n print 'a.id=',a[0].id\n # print len(a),'\\n'\n #print \"\\n\\n\\n\\n\"\n return response.download(request, db)", "def get_drms_files(self):\n import drms\n client = drms.Client(email=self.email,verbose=True)\n fmt = '%Y.%m.%d_%H:%M'\n self.t_qstr = self.series+'[{0}_TAI-{1}_TAI@{2}]'.format(self.start.strftime(fmt),self.end.strftime(fmt),self.cadence) \n\n\n #create wavelength query string\n self.w_qstr = '[' \n for i in self.wav: self.w_qstr = self.w_qstr+'{0},'.format(int(i.value))\n #remove last , and add bracket\n self.w_qstr = self.w_qstr[:-1]+']'\n \n #make the series string\n self.s_qstr = '{'+self.segment+'}'\n\n #the full query\n self.qstr = self.t_qstr+self.w_qstr+self.s_qstr\n\n #IF ERRORS WITH URL ERROR IT IS BECAUSE THE DOWNLOAD FILE SIZE IS TOO LARGE\n #export the data file list \n self.expt = client.export(self.qstr)\n#create an array of indexes to download\n index = np.arange(np.size(self.expt.urls.url))\n# get file from JSOC\n #set directory to current if no path set\n outf = self.expt.download(self.odir,index,fname_from_rec=True)", "def extract_details( session_requests, job_id ):\n \n url_prefix = CONFIG[\"url_prefix\"]\n \n #Extract html from web\n url = CONFIG[\"url_jobno\"] + str(job_id)\n tree = scrape_html(session_requests, url)\n \n #Extact description\n title = \"; \".join(tree.xpath(\"//p[@class='listheader']/text()\"))\n description = \"; \".join(tree.xpath(\"//p//text()\")) #more than one element\n \n #Extract files\n num_file = int(tree.xpath(\"count(//p[contains(text(),'Job Description Document :')]//a)\"))\n loop_range = min(num_file, (MAX_NUM_OF_FILES - 1))\n \n file_link = [\"NA\"] * MAX_NUM_OF_FILES\n file_name = [\"NA\"] * MAX_NUM_OF_FILES\n down_file_name = [\"NA\"] * MAX_NUM_OF_FILES\n \n if (num_file > (MAX_NUM_OF_FILES - 1)):\n file_link[(MAX_NUM_OF_FILES - 1)] = \"More than 9 files\"\n file_name[(MAX_NUM_OF_FILES - 1)] = \"More than 9 files\"\n \n for i in range(loop_range):\n file_link[i] = url_prefix + tree.xpath(\"//p[contains(text(),'Job Description Document :')]//a/@href\")[i]\n file_name[i] = tree.xpath(\"//p[contains(text(),'Job Description Document :')]//a/text()\")[i]\n \n ext = find_file_extention(file_name[i])\n down_file_name[i] = download_file(session_requests, file_link[i], job_id, i, ext)\n \n # dataframe\n row_names_link = init_file_dataframe()[1]\n row_names_name = init_file_dataframe()[2]\n row_names_down = init_file_dataframe()[3]\n \n df_link = np.transpose(pd.DataFrame(file_link, row_names_link))\n df_name = np.transpose(pd.DataFrame(file_name, row_names_name))\n df_down = np.transpose(pd.DataFrame(down_file_name, row_names_down))\n \n df_file = pd.DataFrame(data = {\"job_title\": [title], \"description\": [description], \"num_of_file\": [loop_range]})\n df_file = pd.concat([df_file.reset_index(drop=True), df_link], axis=1, sort=False)\n df_file = pd.concat([df_file.reset_index(drop=True), df_name], axis=1, sort=False)\n df_file = pd.concat([df_file.reset_index(drop=True), df_down], axis=1, sort=False)\n \n return df_file", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def initial_processing(subject_dir):\n # get subject name\n subject_name = subject_dir.parts[-1]\n\n # create ${subject_dir}/ASL and ${subject_dir}/T1w/Results/ASL \n # directories\n asl_dir = subject_dir / 'ASL'\n tis_dir = asl_dir / 'TIs'\n calib_dir = asl_dir / 'Calib'\n calib0_dir = calib_dir / 'Calib0'\n calib1_dir = calib_dir / 'Calib1'\n strucasl_dir = subject_dir / 'T1w/ASL'\n create_dirs([asl_dir, tis_dir, calib0_dir, calib1_dir, strucasl_dir])\n\n # find sub-directories\n # structural\n t1_dir = subject_dir / 'T1w'\n t1_name = t1_dir / 'T1w_acpc_dc_restore.nii.gz'\n t1_brain_name = t1_dir / 'T1w_acpc_dc_restore_brain.nii.gz'\n\n # asl\n b_dir = subject_dir / f'{subject_name}_V1_B'\n try:\n mbpcasl_dir = list(b_dir.glob('**/scans/*mbPCASLhr'))[0]\n # if no files match this format, it throws an IndexError\n except IndexError as e:\n print(e)\n mbpcasl = mbpcasl_dir / 'resources/NIFTI/files' / f'{subject_name}_V1_B_mbPCASLhr_PA.nii.gz'\n \n # output names\n tis_name = tis_dir / 'tis.nii.gz'\n calib0_name = calib0_dir / 'calib0.nii.gz'\n calib1_name = calib1_dir / 'calib1.nii.gz'\n # get tis\n fslroi(str(mbpcasl), tis_name, 0, 86)\n # get calibration images\n fslroi(str(mbpcasl), calib0_name, 88, 1)\n fslroi(str(mbpcasl), calib1_name, 89, 1)\n\n # get surface names\n surfaces_dir = t1_dir / 'fsaverage_LR32k'\n L_mid = surfaces_dir / f'{subject_name}_V1_MR.L.midthickness.32k_fs_LR.surf.gii'\n R_mid = surfaces_dir / f'{subject_name}_V1_MR.R.midthickness.32k_fs_LR.surf.gii'\n L_pial = surfaces_dir / f'{subject_name}_V1_MR.L.pial.32k_fs_LR.surf.gii'\n R_pial = surfaces_dir / f'{subject_name}_V1_MR.R.pial.32k_fs_LR.surf.gii'\n L_white = surfaces_dir / f'{subject_name}_V1_MR.L.white.32k_fs_LR.surf.gii'\n R_white = surfaces_dir / f'{subject_name}_V1_MR.R.white.32k_fs_LR.surf.gii'\n\n # add filenames to a dictionary to be saved to a json\n json_name = asl_dir / 'ASL.json'\n fields = [\n \"T1w_dir\",\n \"T1w_acpc\",\n \"T1w_acpc_brain\",\n \"ASL_seq\",\n \"ASL_dir\",\n \"TIs_dir\",\n \"structasl\",\n \"calib_dir\",\n \"calib0_dir\",\n \"calib1_dir\",\n \"calib0_img\",\n \"calib1_img\",\n \"L_mid\",\n \"R_mid\",\n \"L_pial\",\n \"R_pial\",\n \"L_white\",\n \"R_white\",\n \"json_name\"\n ]\n field_values = [\n t1_dir,\n t1_name,\n t1_brain_name,\n tis_name,\n asl_dir,\n tis_dir,\n strucasl_dir,\n calib_dir,\n calib0_dir,\n calib1_dir,\n calib0_name,\n calib1_name,\n L_mid,\n R_mid,\n L_pial,\n R_pial,\n L_white,\n R_white,\n json_name\n ]\n names_dict = {}\n for key, value in zip(fields, field_values):\n names_dict[key] = str(value)\n with open(json_name, 'w') as fp:\n json.dump(names_dict, fp, sort_keys=True, indent=4)", "def get_download_file_name(self):\n # Use 'unknown' if the course instance does not have a term\n if self.course_instance.term:\n term = self.course_instance.term.get_url_name()\n else:\n term = 'unknown'\n\n return '{course}-{term}-{number}-{instructors}-{type}{ext}'.format(\n course=self.course_instance.course.get_url_name(),\n term=term,\n number=self.exam_number,\n instructors='_'.join([i.last_name for i in self.instructors]),\n type=self.exam_type,\n ext=self.file_ext)", "def __getFile_requests(self, _src, _dst):\n\n #-------------------- \n # Get the content size from scan json\n #-------------------- \n self.downloadTracker['downloadedSize']['bytes'] = 0 \n self.downloadTracker['totalDownloadSize'] = self.getFileSize(_src)\n\n #-------------------- \n # Pre-download callbacks\n #-------------------- \n size = self.downloadTracker['totalDownloadSize']['bytes'] \\\n if self.downloadTracker['totalDownloadSize']['bytes'] else -1\n self.runEventCallbacks('downloadStarted', _src, size)\n self.runEventCallbacks('downloading', _src, 0)\n\n #-------------------- \n # Open the local destination file \n # so that it can start reading in the buffers.\n #-------------------- \n try:\n dstFile = _dst\n dstDir = os.path.dirname(_dst) \n if not os.path.exists(dstDir):\n os.makedirs(dstDir)\n # print(\"dstFile: {}\".format(dstFile))\n except Exception as e:\n print(e)\n self.__downloadFailed(_src, _dst, dstFile, str(e))\n self.exceptionPopup.setText(str(e))\n return\n\n #-------------------- \n # Construct the request\n #-------------------- \n url = Xnat.path.makeXnatUrl(self.host, _src)\n r = self.__httpsRequest('GET', url, stream=True)\n f = open(dstFile, 'wb')\n\n for chunk in r.iter_content(chunk_size=1024*1024):\n # Check for cancel event\n if not self.inDownloadQueue(_src):\n f.close()\n os.remove(f.name)\n self.runEventCallbacks('downloadCancelled', _src)\n break\n\n f.write(chunk)\n\n self.downloadTracker['downloadedSize']['bytes'] += len(chunk)\n self.runEventCallbacks('downloading', _src, \n self.downloadTracker['downloadedSize']['bytes'])\n\n r.close()\n f.close()\n\n #-------------------- \n # Post-download callbacks\n #-------------------- \n self.removeFromDownloadQueue(_src)\n self.runEventCallbacks('downloadFinished', _src)", "def download_files(path):\n return edgar.download_index(path,2019,skip_all_present_except_last=False)", "def get_subjects_info(data_folder, dataset_id, format=\"dict\"):\r\n subjects_info = {} # build of dictionnary of all session for each subject\r\n\r\n if dataset_id == \"raw_clean_32\":\r\n \"\"\" High Versus Low inhibitory Stimuli of Tinnitus and control patients\r\n \"\"\"\r\n patient = 2 # patient group (static for a given dataset)\r\n session = 9 # 6 = 1 old remplacer apres (session 'high')\r\n ses2 = 8 # (session 'low')\r\n names = os.listdir(os.path.join(data_folder, dataset_id, str(patient) + \"_\" + str(session)))\r\n names2 = os.listdir(os.path.join(data_folder, dataset_id, str(patient) + \"_\" + str(ses2)))\r\n\r\n pat = []\r\n pat2 = []\r\n for name in names:\r\n # print name.split('_')[0]\r\n pat.append(name.split('_')[0]) # all subjects ID from names\r\n for name in names2:\r\n # print name.split('_')[0]\r\n pat2.append(name.split('_')[0]) # all subjects ID from names2\r\n\r\n for name in names2:\r\n if pat.__contains__(name.split('_')[0]):\r\n if subjects_info.keys().__contains__(name.split('_')[0]):\r\n subjects_info[name.split('_')[0]].append(name) # add file to the list\r\n else:\r\n subjects_info[name.split('_')[0]] = [name] # add first file to the list\r\n for name in names:\r\n if pat2.__contains__(name.split('_')[0]):\r\n subjects_info[name.split('_')[0]].append(name)\r\n\r\n elif dataset_id == \"Distress2010\":\r\n \"\"\" High Versus Low Distress patients (1, 2, 3, 4 Distress)\r\n \"\"\"\r\n sub_high = 'high distress'\r\n sub_low = 'low distress'\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id, sub_high)) + \\\r\n os.listdir(os.path.join(data_folder, dataset_id, sub_low))\r\n\r\n # get all subjects ID\r\n valid_id = [\"1\", \"2\", \"3\", \"4\"] # Distress group (file begin with)\r\n\r\n for filename in filenames:\r\n if filename[0] in valid_id:\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n symptoms.append({\"distress\": int(filename[0])})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session`\r\n elif dataset_id == \"Tinnitus_EEG\":\r\n \"\"\" extended Distress2010 dataset with more than 310 patients\r\n \"\"\"\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id))\r\n subjects_csv = pd.read_csv(os.path.join(data_folder, dataset_id,\"labels_name_cat_TQ_vas.csv\"),\r\n names=[\"session\", \"distress\", \"TQ\", \"VAS\"], index_col=\"session\")\r\n\r\n for filename in filenames:\r\n if filename.split(\".\")[1] == \"txt\":\r\n if np.any(subjects_csv.index.str.match(filename)):\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n distress_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"distress\"].values[0])\r\n TQ_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"TQ\"].values[0])\r\n VAS_val = int(subjects_csv[subjects_csv.index.str.match(filename)][\"VAS\"].values[0])\r\n\r\n symptoms.append({\"distress\": distress_val})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms, \"TQ\": TQ_val, \"VAS\": VAS_val}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session`\r\n else:\r\n print(\"file \" + filename + \" not listed in labels_name_cat_TQ_vas.csv, subject rejected\")\r\n\r\n elif dataset_id == \"NormativeDB\":\r\n \"\"\" Control subjects in resting state\r\n \"\"\"\r\n filenames = os.listdir(os.path.join(data_folder, dataset_id, \"clean-up\", \"M\")) + \\\r\n os.listdir(os.path.join(data_folder, dataset_id, \"clean-up\", \"F\"))\r\n\r\n # get all subjects ID\r\n valid_id = [\"1\", \"2\", \"3\", \"4\"] # Distress group (file begin with)\r\n\r\n for filename in filenames:\r\n if not (filename.split(\".\")[0][-2:] == \"EC\"): # remove eyes closed\r\n symptoms, subjectname = _sparse_info_from_file(filename.split(\".\")[0], separator=\"_\")\r\n symptoms.append(\"Control\")\r\n symptoms.append({\"distress\": int(0)})\r\n paradigm = \"rest\"\r\n session_info = {\"paradigm\": paradigm, \"symptoms\": symptoms, \"gender\": filename[2]}\r\n\r\n try:\r\n subjects_info[subjectname].update(\r\n {filename: session_info} # add new session\r\n )\r\n except KeyError:\r\n subjects_info[subjectname] = {filename: session_info} # create session\r\n\r\n else:\r\n print(\"get_subjects_info: unknown dataset\")\r\n if format == \"DataFrame\":\r\n subjects_info = _subjects_dict_to_pandas(subjects_info)\r\n\r\n return subjects_info", "def extract_list_of_patients(self, subjects):\n\n print('\\nExtracting the brain of {} subject(s)'.format(len(subjects)))\n allfiles = FileOperations.get_filelist_as_tuple(f\"{FILEDIR}\", subjects, subdir='')\n strings2exclude = ['bcorr', 'reg_run', '_ep2d', 'bc_', 'diff_', 'reg_']\n\n start_extraction = time.time()\n sequences = {'t1': '_MDEFT3D', 't2': '_t2_'}\n list_of_files = {k: [] for k in sequences.keys()}\n\n # print(allfiles)\n template_folder = os.path.join(ROOTDIR, 'data', 'template', 'icbm152')\n\n for seq, keyword in sequences.items():\n list_of_files[seq] = [x for x in allfiles if x[0].endswith('.gz') and\n any(re.search(r'\\w+(?!_).({})|^({}[\\-])\\w+.|^({})[a-z\\-\\_0-9].'.format(z, z, z),\n os.path.basename(x[0]),\n re.IGNORECASE) for z in [keyword] * 3) and not\n any(re.search(r'\\w+(?!_).({})|^({}[\\-])\\w+.|^({})[a-z\\-\\_0-9].'.format(z, z, z),\n os.path.basename(x[0]),\n re.IGNORECASE) for z in strings2exclude)]\n\n for file in list_of_files['t1']:\n output_folder = os.path.join(FILEDIR, file[1], 'output')\n FileOperations.create_folder(output_folder)\n\n print(f\"creating mask for {file[0]}\")\n filename2save = os.path.join(output_folder, 'brainmask_' + os.path.split(file[0])[1])\n modality = 't1combined' if seq == 't1' else 't2'\n\n template = os.path.join(template_folder, 'mni_icbm152_t1_tal_nlin_asym_09b_hires.nii')\n\n preprocess_imaged, mask = self.create_brainmask(file[0])\n self.skullstrip(image=preprocess_imaged, mask=mask,\n output_file=os.path.join(output_folder, 'noskull_' + os.path.split(file[0])[1]))\n\n\n print(\"mask created... ok\\n\")\n return_dict = {'preprocessed_image': preprocessed_image}\n\n print('\\nIn total, a list of {} file(s) was processed \\nOverall, brain_extraction took '\n '{:.1f} secs.'.format(len(subjects), time.time() - start_extraction))", "def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None", "def download_optional_inputs(flywheel_basedir, sub_dir, ses_dir, rootdir):\n print('Looking for manifest-defined anatomical files')\n t1_anat_dir = os.path.join(flywheel_basedir, 'input', 't1w_anatomy')\n if os.path.isdir(t1_anat_dir):\n t1_file = os.listdir(t1_anat_dir)\n if t1_file:\n t1_file = os.path.join(t1_anat_dir, t1_file[0])\n anat_dir = os.path.join(rootdir, sub_dir, ses_dir, 'anat')\n if not os.path.isdir(anat_dir):\n os.mkdir(anat_dir)\n dest_file = os.path.join(anat_dir, sub_dir + '_' + ses_dir + '_T1w.nii.gz')\n if os.path.exists(dest_file):\n print('Found downloaded T1 file - overwriting!')\n os.remove(dest_file)\n os.remove(dest_file.replace('.nii.gz', '.json'))\n shutil.copyfile(t1_file, dest_file)\n\n t2_anat_dir = os.path.join(flywheel_basedir, 'input', 't2w_anatomy')\n if os.path.isdir(t2_anat_dir):\n t2_file = os.listdir(t2_anat_dir)\n if t2_file:\n anat_dir = os.path.join(rootdir, sub_dir, ses_dir, 'anat')\n if not os.path.isdir(anat_dir):\n os.mkdir(anat_dir)\n t2_file = os.path.join(t2_anat_dir, t2_file[0])\n dest_file = os.path.join(anat_dir, sub_dir + '_' + ses_dir + '_T2w.nii.gz')\n if os.path.exists(dest_file):\n print('Found downloaded T2 file - overwriting!')\n os.remove(dest_file)\n os.remove(dest_file.replace('.nii.gz', '.json'))\n shutil.copyfile(t2_file, dest_file)", "def download_fits_files(self, observation_id, *, verbose=False):\n results = self.get_associated_files(observation_id=observation_id, verbose=verbose)\n for file in [i['filename'] for i in results if i['filename'].endswith('.fits')]:\n if verbose:\n print(f\"Downloading {file} ...\")\n self.download_file(file=file, filename=file, verbose=verbose)", "def download_assignment_student(request, pk, i):\n evalassignment = Evalassignment.objects.\\\n filter(pk=pk, evaluator=request.user).first()\n if evalassignment:\n eval_name = '%s_%s' % (evalassignment.assignment.assignmentype.title.\n replace(\" \", \"\"), i)\n filename = 'assign_%s.%s' % (eval_name, evalassignment.assignment.\n document.name.split('.')[-1])\n response = HttpResponse(evalassignment.assignment.document,\n content_type='application/force_download')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n return response\n else:\n return redirect('gradapp:dashboard_student')" ]
[ "0.6075932", "0.5923535", "0.58468294", "0.5837011", "0.57766956", "0.5741573", "0.5726924", "0.5726823", "0.56845653", "0.56502473", "0.556752", "0.55647373", "0.5562809", "0.55589074", "0.5542313", "0.55303603", "0.5507099", "0.54877055", "0.54732233", "0.5440725", "0.5436211", "0.5430703", "0.5425105", "0.5413553", "0.5404179", "0.5370694", "0.5331673", "0.5330227", "0.5318344", "0.53044355" ]
0.64693093
0
Set final status to last line of reconallstatus.log.
def set_recon_all_status(subject_dir): path = context.gear_dict['output_analysisid_dir'] + '/' + \ subject_dir + '/scripts/recon-all-status.log' if os.path.exists(path): with open(path, 'r') as fh: for line in fh: pass last_line = line else: last_line = 'recon-all-status.log is missing' update_gear_status(subject_dir, last_line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def final_status(self, final_status):\n\n self._final_status = final_status", "def after_epoch(self):\n line = ' '.join([str(k) + ': ' + str(v) for k, v in self.trainer.status.items()])\n with open(os.path.join(self.root_path, 'log.txt'), 'a+') as fout:\n fout.write(line + '\\n')", "def updateLastObs(self):\n result = self.robot['SCRIPTOBS_STATUS'].read()\n with open('/u/rjhanson/master/lastObs.txt','w') as f:\n f.write(\"%s\\n\" % self.ucam('OBSNUM').read())\n apflog(\"Recording last ObsNum as %d\" % int(self.ucam[\"OBSNUM\"].read()))\n if result == 'Exited/Failure':\n # Last observation failed, so no need to update files\n return\n elif result == 'Exited/Success': \n try:\n f = open(\"/u/rjhanson/master/apf_sched.txt\",'r')\n except IOError:\n pass\n else:\n for line in f:\n if line.strip() != '':\n with open('/u/rjhanson/master/hit_list','a') as o:\n o.write(line + '\\n')\n f.close()", "async def last(self, update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n self.door_status.update_status()\n await update.message.reply_text(text=f\"🐤 {self.door_status.last_line}\")", "async def last_lines(self, update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n self.door_status.update_status()\n await update.message.reply_text(text=self.door_status.last_lines)", "def writeCompletelog(self, locallog, remotelog):\n\n # pause the bot from parsing, because we don't\n # want to parse the log from the beginning\n if self.console._paused is False:\n self.console.pause()\n self.debug('Pausing')\n # Remove last line if not complete\n i = remotelog.rfind ('\\r\\n')\n remotelog = remotelog[:i + 2]\n # remove any blank lines\n while remotelog[-4:-2] == '\\r\\n':\n remotelog = remotelog[:-2]\n \n # use Just a baka's lazy cursor\n self.lastlines = remotelog[-1000:]\n\n # create or open the local log file\n if self._logAppend:\n output = open(locallog, 'ab')\n else:\n output = open(locallog, 'wb')\n\n output.write('\\r\\n')\n output.write('B3 has restarted writing the log file\\r\\n')\n output.write('\\r\\n')\n output.close()\n\n self.info('remote log downloaded successfully')\n\n # we can now start parsing again\n if self.console._paused:\n self.console.unpause()\n self.debug('unpausing')", "def update_status(status):\n global _current_line\n if _current_line is not None:\n _current_line.update(status)", "def unmoving_update_log(self):\n self.log.append(self.log[-1])", "def state_finalStatus(self, fileObj):\n if self.fillResponseBuffer(fileObj, 4):\n status = struct.unpack(\"<I\", self.responseBuffer)[0]\n self.responseBuffer = ''\n self.decodeStatus(status)\n self.result.callback(self.reader.transferred)", "def print_status(self, current_interval_end: datetime, force_status: bool=False):\n\n # Only update if we've reached the next threshold:\n if self.num_events_since_last_status > self.status_num_events_interval or force_status:\n first_stamp_str = self.first_event_stamp.strftime(\"%Y-%m-%d %H:%M:%S.%f\") \\\n if self.first_event_stamp is not None else \"?\"\n current_stamp_str = current_interval_end.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n\n # Print status over previous status:\n print(\n f\"Processed {self.num_input_events_processed} events ({first_stamp_str} to {current_stamp_str}) \",\n end='\\r'\n )\n\n self.num_events_since_last_status = 0", "def set_status(self, msg):\n if self.msg[:5] != \"ERROR\":\n self.msg = msg\n else:\n if msg[:5] == \"ERROR\":\n self.msg = \"\\n\" + msg", "def log(self):\n lines = tailer.tail(open('logs/status.log'), 10)\n\n statement = \"\"\n\n for line in lines:\n statement += (line + \"<br />\")\n return statement", "def reset(self):\n self.last_line_was_empty = True", "def resetlast(self):\n self.last = None\n self.dlast = 0", "def log_success(self):\n with open(self.logfile, 'a+') as f:\n f.write(self.BEGIN + self.message + '\\n' + self.END)\n self.message = ''", "def _update_status(self, message):\n message = \"[{}] {}\".format(strftime(\"%H:%M:%S\", localtime()), message)\n self.tracker.write_log(message)\n self.ui.update_status(message)", "def end_logging(self):\n self.append_to_logfile()", "def log_server_status(self):\n cmd = '{}logServerStatus'.format(self.console)\n self.write_command(cmd)", "def _write_status(self, status, cls=MySQLStatus):", "def endprogress():\n global title_global\n sys.stdout.write(\"\\r\" + title_global + \": [\" +\"#\" * 40 + \"]100% -- Done! \\n\")\n sys.stdout.flush()", "def outputStatus(self, line):\r\n for l in line.strip('\\r\\n').split('\\n'):\r\n self.output('%s: %s' % (ctime(), l), 0)", "def setstatus(self, text):\n if type(text) in (bytes, str):\n T = text\n else:\n # list probably:\n T = '\\n'.join(text)\n print(('-'*60))\n print(T)\n print(('='*60))", "def end(self):\n if not self.logfile:\n return\n\n self.logfile.close()\n self.logfile = None\n\n # FIXME: it's a little hack here\n # delete color code\n os.system(\"sed -i 's/\\x1b\\[[0-9]*m//g' %s\" % self.logname)\n os.system(\"sed -i 's/\\x1b\\[[0-9]*K//g' %s\" % self.logname)", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)", "def after(self, status):\n return", "def redo(self):\n pass", "def finalize(self):\n self.clear()\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def refresh_status(self):\n\n pass", "def setLastRepOffset(self):\n self.lastRepOffset = self.firstRepOffset + \\\n (self.numberOfSamples * self.numberOfTechReps)", "def on_R1(self):\r\n self.log()" ]
[ "0.6525308", "0.63573575", "0.61293614", "0.5867392", "0.58495337", "0.5713314", "0.56992143", "0.5678644", "0.5619614", "0.5558952", "0.5548329", "0.55442154", "0.54692274", "0.54651254", "0.5439889", "0.54174083", "0.53965306", "0.5389551", "0.53609776", "0.53462833", "0.5333738", "0.5324261", "0.52501124", "0.52449393", "0.52407336", "0.52358323", "0.52296823", "0.52275056", "0.52268565", "0.5224085" ]
0.71194637
0
Set final status to last line of reconallstatus.log.
def set_recon_all_status(subject_dir): path = context.gear_dict['output_analysisid_dir'] + '/' + \ subject_dir + '/scripts/recon-all-status.log' if os.path.exists(path): with open(path, 'r') as fh: for line in fh: pass last_line = line else: last_line = 'recon-all-status.log is missing' update_gear_status(subject_dir, last_line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def final_status(self, final_status):\n\n self._final_status = final_status", "def after_epoch(self):\n line = ' '.join([str(k) + ': ' + str(v) for k, v in self.trainer.status.items()])\n with open(os.path.join(self.root_path, 'log.txt'), 'a+') as fout:\n fout.write(line + '\\n')", "def updateLastObs(self):\n result = self.robot['SCRIPTOBS_STATUS'].read()\n with open('/u/rjhanson/master/lastObs.txt','w') as f:\n f.write(\"%s\\n\" % self.ucam('OBSNUM').read())\n apflog(\"Recording last ObsNum as %d\" % int(self.ucam[\"OBSNUM\"].read()))\n if result == 'Exited/Failure':\n # Last observation failed, so no need to update files\n return\n elif result == 'Exited/Success': \n try:\n f = open(\"/u/rjhanson/master/apf_sched.txt\",'r')\n except IOError:\n pass\n else:\n for line in f:\n if line.strip() != '':\n with open('/u/rjhanson/master/hit_list','a') as o:\n o.write(line + '\\n')\n f.close()", "async def last(self, update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n self.door_status.update_status()\n await update.message.reply_text(text=f\"🐤 {self.door_status.last_line}\")", "async def last_lines(self, update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n self.door_status.update_status()\n await update.message.reply_text(text=self.door_status.last_lines)", "def writeCompletelog(self, locallog, remotelog):\n\n # pause the bot from parsing, because we don't\n # want to parse the log from the beginning\n if self.console._paused is False:\n self.console.pause()\n self.debug('Pausing')\n # Remove last line if not complete\n i = remotelog.rfind ('\\r\\n')\n remotelog = remotelog[:i + 2]\n # remove any blank lines\n while remotelog[-4:-2] == '\\r\\n':\n remotelog = remotelog[:-2]\n \n # use Just a baka's lazy cursor\n self.lastlines = remotelog[-1000:]\n\n # create or open the local log file\n if self._logAppend:\n output = open(locallog, 'ab')\n else:\n output = open(locallog, 'wb')\n\n output.write('\\r\\n')\n output.write('B3 has restarted writing the log file\\r\\n')\n output.write('\\r\\n')\n output.close()\n\n self.info('remote log downloaded successfully')\n\n # we can now start parsing again\n if self.console._paused:\n self.console.unpause()\n self.debug('unpausing')", "def update_status(status):\n global _current_line\n if _current_line is not None:\n _current_line.update(status)", "def unmoving_update_log(self):\n self.log.append(self.log[-1])", "def state_finalStatus(self, fileObj):\n if self.fillResponseBuffer(fileObj, 4):\n status = struct.unpack(\"<I\", self.responseBuffer)[0]\n self.responseBuffer = ''\n self.decodeStatus(status)\n self.result.callback(self.reader.transferred)", "def print_status(self, current_interval_end: datetime, force_status: bool=False):\n\n # Only update if we've reached the next threshold:\n if self.num_events_since_last_status > self.status_num_events_interval or force_status:\n first_stamp_str = self.first_event_stamp.strftime(\"%Y-%m-%d %H:%M:%S.%f\") \\\n if self.first_event_stamp is not None else \"?\"\n current_stamp_str = current_interval_end.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n\n # Print status over previous status:\n print(\n f\"Processed {self.num_input_events_processed} events ({first_stamp_str} to {current_stamp_str}) \",\n end='\\r'\n )\n\n self.num_events_since_last_status = 0", "def set_status(self, msg):\n if self.msg[:5] != \"ERROR\":\n self.msg = msg\n else:\n if msg[:5] == \"ERROR\":\n self.msg = \"\\n\" + msg", "def log(self):\n lines = tailer.tail(open('logs/status.log'), 10)\n\n statement = \"\"\n\n for line in lines:\n statement += (line + \"<br />\")\n return statement", "def reset(self):\n self.last_line_was_empty = True", "def resetlast(self):\n self.last = None\n self.dlast = 0", "def log_success(self):\n with open(self.logfile, 'a+') as f:\n f.write(self.BEGIN + self.message + '\\n' + self.END)\n self.message = ''", "def _update_status(self, message):\n message = \"[{}] {}\".format(strftime(\"%H:%M:%S\", localtime()), message)\n self.tracker.write_log(message)\n self.ui.update_status(message)", "def end_logging(self):\n self.append_to_logfile()", "def log_server_status(self):\n cmd = '{}logServerStatus'.format(self.console)\n self.write_command(cmd)", "def _write_status(self, status, cls=MySQLStatus):", "def endprogress():\n global title_global\n sys.stdout.write(\"\\r\" + title_global + \": [\" +\"#\" * 40 + \"]100% -- Done! \\n\")\n sys.stdout.flush()", "def outputStatus(self, line):\r\n for l in line.strip('\\r\\n').split('\\n'):\r\n self.output('%s: %s' % (ctime(), l), 0)", "def setstatus(self, text):\n if type(text) in (bytes, str):\n T = text\n else:\n # list probably:\n T = '\\n'.join(text)\n print(('-'*60))\n print(T)\n print(('='*60))", "def end(self):\n if not self.logfile:\n return\n\n self.logfile.close()\n self.logfile = None\n\n # FIXME: it's a little hack here\n # delete color code\n os.system(\"sed -i 's/\\x1b\\[[0-9]*m//g' %s\" % self.logname)\n os.system(\"sed -i 's/\\x1b\\[[0-9]*K//g' %s\" % self.logname)", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)", "def after(self, status):\n return", "def redo(self):\n pass", "def finalize(self):\n self.clear()\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def refresh_status(self):\n\n pass", "def setLastRepOffset(self):\n self.lastRepOffset = self.firstRepOffset + \\\n (self.numberOfSamples * self.numberOfTechReps)", "def on_R1(self):\r\n self.log()" ]
[ "0.65249884", "0.6357503", "0.61292785", "0.58676416", "0.5849602", "0.57127684", "0.56986594", "0.56790066", "0.56193364", "0.55592257", "0.5547861", "0.5544232", "0.54690313", "0.5465183", "0.543945", "0.54176474", "0.53966546", "0.5389639", "0.5360609", "0.5345862", "0.53339916", "0.53241867", "0.5249969", "0.5244675", "0.5240985", "0.5236185", "0.5230011", "0.52279085", "0.52272683", "0.5224508" ]
0.71203095
1
Check if the given value is "close enough" to the desired value Because sometimes MagneticFieldStrength can be provide in mT (3000, 1500) or something like 2.9721T
def field_strength_close_enough(field_strength, desired_value): if field_strength > 100: # assume it is in mT instead of Teslas field_strength /= 1000 # and turn it into Teslas diff = abs(field_strength - desired_value) if diff < 0.2: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)", "def check_value(value, sensor):\n if not GraphModel.check_if_int(value):\n return False\n\n return (sensor == 't' and ba.min_temp < int(value) < ba.max_temp) or \\\n (sensor == 'l' and ba.min_light < int(value) < ba.max_light)", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def _check_within_tolerance(value, tolerance):\n return tf.norm(tensor=value, ord=np.inf) <= tolerance", "def check_for_float(check):", "def limitsExsess(topic, value):\n\n if isNotifyTime(topic):\n if \"temperature\" in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram(\"Temperature out of bounds: \"+value+\"degC\")\n return True\n if \"CO\" in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram(\"Carbon Monoxide level above threshold: \"+value)\n return True\n if \"All_Gas\" in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram(\"Poison gas level above threshold: \"+value)\n return True\n if \"alarm\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"ALARM in Living room is On!\")\n return True\n if \"MotionHUE\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"HUE Motion sensor detected movement!\")\n return True\n return False", "def check(self, value):\n\t\t\n\t\tif value <= self.current_rate:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def calc_tolerance(wt):\n return 1 - wt", "def constrainSpeed(self, speedRPM):\n\n if speedRPM > self.motorMaxRPM:\n speedRPM = self.motorMaxRPM\n\n if speedRPM < 0.0:\n speedRPM = 0.0\n\n# print ( \"motorSpeed RPM adjusted: \", speedRPM )\n\n return speedRPM", "def distance_tolerance(distance: float) -> float:\n ret = 10.0\n if distance < 0:\n ret += distance * (100 - ret) / -2500.0\n return ret", "def ge(value, limit):\n return value >= limit", "def check_min_value(self, tracked_price):\n if tracked_price < self.min_value and self.warning_calls <= 2:\n print(f'Warning! Price dropeed under {self.min_value} pln {tracked_price}')\n self.make_phone_call()\n self.warning_calls += 1\n elif tracked_price < self.min_value and self.warning_calls == 3:\n self.send_a_message(\n f'This is a warning message. Price of EUR/PLN dropped under critical value!'\n f' {self.min_value} pln')\n print(f'Called 3 times! Price dropeed under {self.min_value} pln {tracked_price}')\n self.warning_calls = 0\n else:\n print(f\"Current price for Euro in PLN is {tracked_price}\")", "def if_value_higher_3000():\n res = requests.get(\"https://www.nasdaq.com/\")\n SnP500_value = extractor.findall(res.text)[0]\n # You can see this result from the log\n print(SnP500_value)\n if float(SnP500_value) > 3000:\n return 'send_email'\n else:\n return 'do_nothing'", "def voltageHighEnough(self, Vm):\n if Vm > 230 * 0.88:\n return True\n else:\n return False", "def _validate_value(self, value):\n if self.limits[0] <= value <= self.limits[1]:\n return True\n else:\n return False", "def validate(c_name, val):\n n = 80\n threshold = 4\n while (threshold >= 0):\n if ((len(channels[c_name]) > n) and (val <= threshold)):\n return True\n else:\n n -= 20\n threshold -= 1\n\n return False", "def supports(self, x):\n return 0.0 < x", "def supports(self, x):\n return 0.0 < x", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def constrain(value):\n size = 2**m\n return (value%size)", "def is_acceptable_multiplier(m):\n return 1 < m < (2 ** 61 - 1)", "def Catch(X,Tolerance=0):\n if X < (.5-(Tolerance/2)):\n return(0)\n elif X > (.5+(Tolerance/2)):\n return(1)\n else:\n return(.5)", "def test_optimal_thickness():\n structure = Material(input)\n assert (structure.calc_optimal_thickness() == 1.9552936422413782)", "def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)", "def approximate(val):\r\n if val >=2 or val == 3:\r\n return 250\r\n elif val >=1:\r\n return 150\r\n elif val >=0:\r\n return 50", "def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False", "def is_high(speed: dict) -> dict:\n if speed['speed'] >= 100 or speed['speed'] <= 90:\n speed['exceeds'] = True\n else: \n speed['exceeds'] = False\n return speed", "def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res", "def _validate(self, instance, value):\n\n if not isinstance(value, Real):\n raise TypeError(f\"Value for {self.prop_name} shoulde be real numbers.\")\n\n if (\n self.min_val is not None\n and value < self.min_val\n and not isclose(value, self.min_val)\n ):\n raise ValueError(\n f\"Value should be greater than or equal to {self.min_val}.\"\n )\n\n if (\n self.max_val is not None\n and value > self.max_val\n and not isclose(value, self.max_val)\n ):\n raise ValueError(f\"Value should be less than or equal to {self.max_val}.\")", "def wilight_trigger(value: Any) -> str | None:\n step = 1\n err_desc = \"Value is None\"\n result_128 = False\n result_24 = False\n result_60 = False\n result_2 = False\n\n if value is not None:\n step = 2\n err_desc = \"Expected a string\"\n\n if (step == 2) & isinstance(value, str):\n step = 3\n err_desc = \"String should only contain 8 decimals character\"\n if re.search(r\"^([0-9]{8})$\", value) is not None:\n step = 4\n err_desc = \"First 3 character should be less than 128\"\n result_128 = int(value[0:3]) < 128\n result_24 = int(value[3:5]) < 24\n result_60 = int(value[5:7]) < 60\n result_2 = int(value[7:8]) < 2\n\n if (step == 4) & result_128:\n step = 5\n err_desc = \"Hour part should be less than 24\"\n\n if (step == 5) & result_24:\n step = 6\n err_desc = \"Minute part should be less than 60\"\n\n if (step == 6) & result_60:\n step = 7\n err_desc = \"Active part should be less than 2\"\n\n if (step == 7) & result_2:\n return value\n\n raise vol.Invalid(err_desc)" ]
[ "0.6649637", "0.6314969", "0.6307575", "0.6263273", "0.621036", "0.61276513", "0.61120135", "0.60805684", "0.6008309", "0.59797776", "0.59608775", "0.59385926", "0.59351903", "0.5876801", "0.5854414", "0.5851238", "0.58508086", "0.58508086", "0.58364314", "0.58322096", "0.58270305", "0.5821932", "0.5807647", "0.58010083", "0.57969", "0.57900244", "0.57808346", "0.57776856", "0.57740617", "0.57681596" ]
0.788576
0
Powers or unpowers the bells.
def power_bells(state): if not pinlessMode: if state: for pin in bellPins: GPIO.output(pin, GPIO.HIGH) elif not state: for pin in bellPins: GPIO.output(pin, GPIO.LOW) else: logging.debug("Bell state: " + str(state))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handlePowers(self):\n self.player.sprite.handlePowers(self.get_remaining_time())", "def poweroff(self) -> None:\n pass", "def poweroff(self):\n raise NotImplementedError()", "def set_powers(self, power_1, power_2):\n pass", "def power_off(self):\n raise NotImplementedError", "def power(self, on_off, bulb_index=-1):\n\n # TODO: Throw an error if value not in range\n\n on_off = int(on_off)\n if bulb_index == -1:\n for bulb in self.bulbs:\n if on_off == PowerStates.ON:\n bulb.turn_on(effect=\"smooth\", duration=1000)\n if on_off == PowerStates.OFF:\n bulb.turn_off(effect=\"smooth\", duration=1000)\n if on_off == PowerStates.SWITCH:\n bulb.toggle(effect=\"smooth\", duration=1000)\n elif bulb_index <= len(self.bulbs):\n if on_off == PowerStates.ON:\n self.bulbs[bulb_index].turn_on(effect=\"smooth\", duration=1000)\n if on_off == PowerStates.OFF:\n self.bulbs[bulb_index].turn_off(effect=\"smooth\", duration=1000)\n if on_off == PowerStates.SWITCH:\n self.bulbs[bulb_index].toggle(effect=\"smooth\", duration=1000)", "def injure(self, amount):\n self.health -= amount\n if self.health < 0.0:\n self.health = 0.0\n if self.health > 1.0:\n self.health = 1.0", "def turn_effects(self):\n if self.side_effects[\"shield\"] > 0:\n self.side_effects[\"shield\"] -= 1", "def set_power(self, dbm=-30):\r\n self.write(\"POW \"+str(dbm))", "def consume_by(self, character):\n power = self.options[self.power]\n if power == 'bomb':\n self.increase_bombs(character)\n\n elif power == 'radius':\n self.increase_radius(character)\n\n elif power == 'speed':\n self.increase_speed(character)\n\n self.physics.blocks['powerup'].remove(self.rects[0])\n self.pjs.powerups.remove(self)", "def _buy(self, units=1):\n self.quantity -= units", "def test_weight_decrease(self):\n new_weight = (1 - 0.05) * self.herb.weight\n self.herb.weightloss()\n nt.assert_equal(round(self.herb.weight, 7), round(new_weight, 7))", "def power_on(self):\n pass", "def set_power(self, dbm=-30):\r\n self.write(\"SOURce1:POWer:POWer \"+str(dbm))", "def set_power(self, dbm=-30):\r\n self.write(\"SOURce1:POWer:POWer \"+str(dbm))", "def power(self):\r\n return self.model * self.percent / 100", "def _doPowerState(self, state=False):\n if state:\n self._cmdPowerOn()\n else:\n self._cmdPowerOff()", "def power_on(self):\n raise NotImplementedError", "def do_damage(self) -> float:\n sum = 0\n for operator in self.__operators:\n if operator.is_alive:\n operator.experience += 1\n sum += operator.experience / 100\n return 0.1 + sum", "def set_power(self, dbm=-30):\r\n _debug('simq03b_api.set_power')\r\n \r\n self.write(\"POWer \"+str(dbm))", "def get_setPower(self):\n self.read(\":POW?\")", "def simulate_power(self):\n if self.p_treatment - self.p_control < 0:\n thresh = 1 - self.alpha\n else:\n thresh = self.alpha\n\n try:\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n except:\n self.norm_distribution()\n p_crit = self.norm_null.ppf(1 - thresh)\n beta = self.norm_alt.cdf(p_crit)\n\n power = (1 - beta) if self.p_treatment > self.p_control else beta\n self.power = power\n\n return power", "def decrease_bullet_power(screen, player):\n\n #If the player bullet power is greater than 1\n if player.get_bullet_power() > 1:\n\n #Decrease the player bullet power\n player.increase_bullet_power(-1)", "def powerWeapons(self, interval, availPower):\n if self.allWeaponsPowered == 0:\n weaponList = []\n for position, myQuad in self.quads.iteritems():\n weaponIDList = []\n weaponIDList.extend(funcs.sortStringList(myQuad.weapons.keys()))\n for wID in weaponIDList:\n weaponList.append(myQuad.weapons[wID])\n\n while availPower > 0 and self.allWeaponsPowered == 0:\n toCharge = []\n toChargeAMS = []\n # go through each quadrant looking for weapons to power\n for myWeapon in weaponList:\n if myWeapon.operational == 1 and myWeapon.currentPower < myWeapon.myWeaponData.maxPower:\n if 1 == myWeapon.myWeaponData.AMS:\n toChargeAMS.append(myWeapon)\n else:\n toCharge.append(myWeapon)\n\n if len(toChargeAMS) == 0 and len(toCharge) == 0:\n self.allWeaponsPowered = 1\n return availPower\n\n #AMS are charged first and sequentially\n if len(toChargeAMS) != 0:\n if availPower !=0:\n for myW in toChargeAMS:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= availPower:\n myW.currentPower+=availPower\n availPower=0\n break\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=defecit\n\n #non-AMS weapons are charged concurrently; each gets an equal share of the available power \n if len(toCharge) != 0:\n kW=availPower/len(toCharge)\n if kW !=0:\n #print \"tT:\",len(toCharge),\"aP:\",availPower,\"kW each:\",kW\n for myW in toCharge:\n defecit=myW.myWeaponData.maxPower - myW.currentPower\n if defecit >= kW:\n myW.currentPower+=kW\n availPower-=kW\n else:\n myW.currentPower=myW.myWeaponData.maxPower\n availPower-=kW-defecit\n else:\n availPower=0\n\n return availPower", "def sprinkler_tick(self, water_available=-1):\n # idk if Python uses pass by reference, so I'm copying this variable just to be safe\n available_water = water_available\n for room in self.room_list:\n if room.sprinkling and room.fire_level > 0 and available_water != 0:\n room.fire_level -= 1\n available_water -= 1\n ## decrement num_onfire if fire was fully extinguished\n if room.fire_level == 0:\n self.num_onfire -= 1", "def stop(self):\n self.change_power(0)", "def run(self):\n self.coffee_machine.water_tank.decrease_weight(self.coffee_machine.chosen_coffee_data.get('water_weight'))", "def power():\n request_command(tv_command=TVCommand.power)", "def heal(self):\n self.infected = False", "def run(self):\n self.coffee_machine.beans_tank.decrease_weight(self.coffee_machine.chosen_coffee_data.get('beans_weight'))" ]
[ "0.64014935", "0.6041298", "0.5899135", "0.5873446", "0.58522767", "0.5775622", "0.5713468", "0.5671503", "0.5662006", "0.56604326", "0.56582326", "0.5653668", "0.56442696", "0.5575584", "0.5575584", "0.55735165", "0.55625665", "0.5539788", "0.55348086", "0.5527003", "0.55260134", "0.5512618", "0.55040425", "0.5503478", "0.548852", "0.54747856", "0.54724145", "0.54581887", "0.5446831", "0.5444172" ]
0.6172118
1
Rings the school bells in a pattern for the given schedule/time.
def ring_bells(): # Need to get the pattern for this time slot and apply it. curTime = time.strftime("%H:%M") if curTime not in jsonConfig["schedules"][curSchedule]: logging.error("Couldn't find time record for time " + curTime + " in schedule " + curSchedule) return # Obtain the pattern to use. pattern = jsonConfig["schedules"][curSchedule][curTime] if pattern not in jsonConfig["patterns"]: logging.error("Could not find pattern '" + pattern + "'.") return # Play the pattern. logging.debug("Playing bell: " + pattern) bellRings = jsonConfig["patterns"][pattern]["rings"] bellDuration = jsonConfig["patterns"][pattern]["duration"] bellSpacing = jsonConfig["patterns"][pattern]["spacing"] for _ in range(bellRings): power_bells(True) time.sleep(bellDuration) power_bells(False) time.sleep(bellSpacing)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tick(self) -> None:\n\n bell = self._row[self._place]\n user_controlled = self._user_assigned_bell(bell)\n\n self._rhythm.wait_for_bell_time(time.time(), bell, self._row_number, self._place,\n user_controlled, self.stroke)\n\n if not user_controlled:\n self._tower.ring_bell(bell, self.stroke)\n\n self._place += 1\n\n if self._place >= self.number_of_bells:\n # Determine if we're finishing a handstroke\n has_just_rung_rounds = self._row == self._rounds\n\n # Generate the next row and update row indices\n self._row_number += 1\n self._place = 0\n self.start_next_row()\n\n next_stroke = Stroke.from_index(self._row_number)\n\n # ===== SET FLAGS FOR HANDBELL-STYLE RINGING =====\n\n # Implement handbell-style 'up down in'\n if self._do_up_down_in and self._is_ringing_rounds and self._row_number == 2:\n self._should_start_method = True\n\n # Implement handbell-style stopping at rounds\n if self._stop_at_rounds and has_just_rung_rounds and not self._is_ringing_rounds:\n self._should_stand = False\n self._is_ringing = False\n\n # ===== CONVERT THE FLAGS INTO ACTIONS =====\n\n if self._should_start_method and self._is_ringing_rounds \\\n and next_stroke == self.row_generator.start_stroke():\n self._should_start_method = False\n self._is_ringing_rounds = False\n self.start_method()\n\n # If we're starting a handstroke, we should convert all the flags into actions\n if next_stroke.is_hand():\n if self._should_stand:\n self._should_stand = False\n self._is_ringing = False\n\n if self._should_start_ringing_rounds and not self._is_ringing_rounds:\n self._should_start_ringing_rounds = False\n self._is_ringing_rounds = True", "def fillSchedule(self, schedule):\n\n self.rooster = schedule\n\n # select courses from zaalrooster\n courses2 = []\n for key, value in self.rooster.items():\n if key == self.room:\n value = value\n for courses in value:\n for course in courses:\n course = str(course)\n courses2.append(course)\n\n # fill schedule with courses from zaalrooster\n for i in range(5):\n for j in range(5):\n self.w.create_text(100 + i, 150 + j, text = courses2[i], width = 80)\n self.w.create_text(100 + i, 250 + j, text = courses2[i+1], width = 80)\n self.w.create_text(100 + i, 350 + j, text = courses2[i+2], width = 80)\n self.w.create_text(100 + i, 450 + j, text = courses2[i+3], width = 80)\n self.w.create_text(300 + i, 150 + j, text = courses2[i+4], width = 80)\n self.w.create_text(300 + i, 250 + j, text = courses2[i+5], width = 80)\n self.w.create_text(300 + i, 350 + j, text = courses2[i+6], width = 80)\n self.w.create_text(300 + i, 450 + j, text = courses2[i+7], width = 80)\n self.w.create_text(500 + i, 150 + j, text = courses2[i+8], width = 80)\n self.w.create_text(500 + i, 250 + j, text = courses2[i+9], width = 80)\n self.w.create_text(500 + i, 350 + j, text = courses2[i+10], width = 80)\n self.w.create_text(500 + i, 450 + j, text = courses2[i+11], width = 80)\n self.w.create_text(700 + i, 150 + j, text = courses2[i+12], width = 80)\n self.w.create_text(700 + i, 250 + j, text = courses2[i+13], width = 80)\n self.w.create_text(700 + i, 350 + j, text = courses2[i+14], width = 80)\n self.w.create_text(700 + i, 450 + j, text = courses2[i+15], width = 80)\n self.w.create_text(900 + i, 150 + j, text = courses2[i+16], width = 80)\n self.w.create_text(900 + i, 250 + j, text = courses2[i+17], width = 80)\n self.w.create_text(900 + i, 350 + j, text = courses2[i+18], width = 80)\n self.w.create_text(900 + i, 450 + j, text = courses2[i+19], width = 80)\n\n\n mainloop()", "def __init__(self):\n self.usablerooms = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26,\n 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42]\n\n buzztimes = [110, 110.5, 111, 111.5, 113, 113.5, 114, 114.5,\n 211, 211.5, 212, 212.5, 214, 214.5, 215, 215.5, ]\n self.buzzerschedule = list(map(lambda time: interval([time, time + .5]), buzztimes))\n\n # fill exam schedule\n examtimes = [110, 111, 114, 115, 118, 119, 120, 209, 211, 213, 214, 215]\n self.examschedule = list(map(lambda time: interval([time, time + 1]), examtimes))\n self.militaryschedule = list(map(lambda time: interval([time, time + 1]), [112, 117, 217]))\n self.geographyschedule = list(map(lambda time: interval([time, time + 1]), [112, 117, 217]))\n self.csaexamschedule = list(map(lambda time: interval([time, time + 1]), [116, 210]))\n\n # fill side schedule\n self.citizenschedule = list(map(lambda time: interval([time, time + .5]), [115, 209]))\n self.sandeschedule = list(map(lambda time: interval([time, time + .5]), [115.5, 209.5]))\n self.anniversaryschedule = list(map(lambda time: interval([time, time + .5]), [213, 213.5]))\n\n # fill bowl schedule\n self.bowlschedule = list(map(lambda time: interval([time, time + 3]), [118, 218]))\n\n # fill fqn schedule\n self.fqnschedule = [interval([118, 118 + 2])]\n\n \"\"\" Initialize rooms. \"\"\"\n # start with buzzer rooms\n self.buzzerrooms = []\n for i, item in enumerate(self.buzzerschedule):\n roundrooms = list(map(lambda j: BuzzerRoom(self.buzzerschedule, i, j), ROOM_RANGE))\n self.buzzerrooms.append(roundrooms)\n\n # anniversary rooms\n self.anniversaryrooms = []\n for i, item in enumerate(self.anniversaryschedule):\n roundrooms = list(map(lambda j: SideEventRoom(\"anniversary\", self.anniversaryschedule, i, j), ROOM_RANGE))\n self.anniversaryrooms.append(roundrooms)\n\n # sports and enterinament rooms\n self.sanderooms = []\n for i, item in enumerate(self.sandeschedule):\n roundrooms = list(map(lambda j: SideEventRoom(\"sande\", self.sandeschedule, i, j), ROOM_RANGE))\n self.sanderooms.append(roundrooms)\n\n # citizenship bee rooms\n self.citizenrooms = []\n for i, item in enumerate(self.citizenschedule):\n roundrooms = list(map(lambda j: SideEventRoom(\"citizen\", self.citizenschedule, i, j), ROOM_RANGE))\n self.citizenrooms.append(roundrooms)\n\n # regular exam rooms\n k = xrange(len(self.examschedule))\n self.examrooms = list(map(lambda j: ExamRoom(\"exam\", self.examschedule, j), k))\n\n # military exam rooms\n k = xrange(len(self.militaryschedule))\n self.militaryrooms = list(map(lambda j: ExamRoom(\"military\", self.militaryschedule, j), k))\n\n # geography subject exam rooms\n k = xrange(len(self.geographyschedule))\n self.geographyrooms = list(map(lambda j: ExamRoom(\"geography\", self.geographyschedule, j), k))\n\n # csa exam rooms\n self.csarooms = []\n for i in xrange(len(self.csaexamschedule)):\n cit = ExamRoom(\"cit\", self.csaexamschedule, i)\n sport = ExamRoom(\"sports\", self.csaexamschedule, i)\n self.csarooms.append((cit, sport))", "def test_rocket():\n ring = [(0,0), (10, 0), (15,5), (10,9), (1,7), (6,4), (0,0)]\n conv = ToPointsAndSegments()\n conv.add_polygon([ring])\n skel = calc_skel(conv, output=True, pause=True)\n print \"DONE\"", "def arrr_starrr_graph(self):\n\n plt.figure()\n total_cost = 0\n\n # plot batteries\n counter = 0\n for batt in self.grid.batteries:\n plt.plot(batt.x, batt.y, marker='x',\n color=colors[counter], markersize=10)\n counter += 1\n\n # iterate over houses and path\n for house in self.grid.houses:\n battery = self.grid.batteries[house.connection]\n\n # get path coordinates\n path_data = house.path\n\n # plot path and house\n plt.plot(path_data[0][0], path_data[0][1],\n color=colors[house.connection], linewidth=.3)\n plt.plot(house.x, house.y, marker='p',\n color=colors[house.connection])\n total_cost += path_data[1]\n plt.draw()\n plt.pause(0.000000001)\n\n plt.title(f\"total cost = {total_cost}\")", "def schedulebuzzerrooms(self, field):\n divisions = ['8', '7', 'Elementary']\n field = list(filter(lambda stu: stu.bee, field))\n for player in field:\n player.schedule = list(sorted(player.schedule, key=itemgetter(1)))\n\n # create a list of players in each round (1 - 16)\n playersperround = [[] for _ in range(len(self.buzzerschedule))]\n for i, time in enumerate(self.buzzerschedule, 0):\n for player in field:\n for event in player.schedule:\n if time in event:\n playersperround[i].append(player)\n\n # create a list of rooms being used to try and spread across hotels\n \"\"\"\n totrooms = [[] for _ in range(len(self.buzzerschedule))]\n for i, rnd in enumerate(totrooms):\n k = len(list(filter(lambda stu: stu.division == '8' and stu.seed == 'a', playersperround[i])))\n k += len(list(filter(lambda stu: stu.division == '7' and stu.seed == 'a', playersperround[i])))\n k += len(list(filter(lambda stu: stu.division == 'Elementary' and stu.seed == 'a', playersperround[i])))\n rnd = range(1, MAX_ROOMS + 1)\n numtoremove = MAX_ROOMS - k\n toremove = sample(rnd, numtoremove)\n rnd = [x for x in rnd if x not in toremove]\n shuffle(rnd)\n totrooms[i] = rnd\n \"\"\"\n\n totrooms = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 22, 23, 24, 25, 26, 27, 28, 29, 30, 33, 34, 35, 36, 39, 40, 41, 42]]\n\n for rnd in totrooms:\n shuffle(rnd)\n\n # for each round, assign the players in totrooms to a room\n count = 0\n for rnd, rooms in enumerate(totrooms):\n tosched = playersperround[rnd]\n for div in divisions:\n divforround = list(filter(lambda stu: stu.division == div, tosched))\n for i in range(\n len(list(filter(lambda stu: stu.division == div and stu.seed == 'a', playersperround[rnd])))):\n room = sample(rooms, 1)[0]\n roomplayers = self.pickten(divforround)\n rooms.remove(room)\n for player in roomplayers:\n self.buzzerrooms[rnd][room - 1].addplayer(player)\n for ev in player.schedule:\n if ev[0] == \"History Bee Buzzer Round\" and ev[1] == self.buzzerschedule[rnd]:\n ev[2] = \"ACE Room \" + str(room)\n count += 1", "def _on_bell_ring(self, bell: Bell, stroke: Stroke) -> None:\n if self._user_assigned_bell(bell):\n # This will give us the stroke _after_ the bell rings, we have to invert it, because\n # otherwise this will always expect the bells on the wrong stroke and no ringing will\n # ever happen\n self._rhythm.on_bell_ring(bell, stroke.opposite(), time.time())", "def findRings(graph):\n # TODO add a planarity check?\n rings5 = []\n rings6 = []\n if DEBUG: print \"- starting ring detection...\"\n for head in graph.keys():\n tier1 = graph[head]\n tier2 = []\n tier3 = []\n # populate tier2 \n for node1 in tier1:\n for tmp in graph[node1]:\n if not tmp == head and not tmp in tier2 and (not tmp in tier1) :\n tier2.append(tmp)\n # populate tier3\n for node2 in tier2:\n for tmp in graph[node2]:\n if (not tmp == head) and (not tmp in tier2) and (not tmp in tier1) and (not tmp in tier3):\n tier3.append(tmp)\n # 6 member rings\n for x in tier3:\n candidate = []\n for c in tier2:\n if x in graph[c]:\n if not c in candidate:\n candidate.append(c)\n if len(candidate) >1:\n r6 = [ head ] \n r6.append(x)\n r6 += candidate\n for c in candidate:\n r6 += intersect( graph[head], graph[c])\n r6.sort()\n if not r6 in rings6:\n rings6.append( r6 )\n if DEBUG: print \" 6member!\", r6\n break\n # 5 member rings\n for c1 in tier2:\n for c2 in tier2:\n if not c1 == c2:\n if (c2 in graph[c1]) and (c1 in graph[c2]):\n is_3_ring = False\n for k in graph[c1]:\n if k in graph[c2]: \n is_3_ring =True\n if DEBUG: print \" [ ...catched a cycle_3... ]\"\n break\n if not is_3_ring :\n r5 = [ head ] \n r5.append(c1)\n r5.append(c2)\n r5 += intersect( graph[head], graph[c1])\n r5 += intersect( graph[head], graph[c2])\n r5.sort()\n if not r5 in rings5:\n if DEBUG: print \" 5member ring!\",r5\n rings5.append(r5)\n break\n return rings5, rings6", "def mock_recurring_another_day_schedule() \\\n -> Generator[SwitcherV2Schedule, Any, None]:\n schedule_patch = patch(\n 'aioswitcher.schedules.SwitcherV2Schedule',\n recurring=True,\n start_time=create_random_time(),\n days=[WEEKDAY_TUP[get_weekday_for_day_delta(3)]])\n\n schedule = schedule_patch.start()\n yield schedule\n schedule_patch.stop()", "def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,windowList,startgrid):#for burst mode\n self.sam_x.umv(xStart)\n self.sam_y.umv(windowList[startgrid])\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n # looping through n round trips\n \n for j in range(len(windowList)-startgrid):\n self.sam_y.umv(windowList[startgrid+j])\n self.sam_y.wait()\n print('Windos position %f'%(self.sam_y.wm()))\n\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i+1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()#start sequence Need to be set \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition',self.sam_y.wm())\n sleep(1.2)#wait for turning around \n self.sam_x.mv(xStart)\n sleep(0.1)\n #pp.open()\n seq.start()#start sequence \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition',self.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n \n daq.end_run()\n daq.disconnect()\n\n\n #daq.end()", "def hillclimb_roomlocks2(times, chambers, allcourses, student_list, schedule):\n\n\t# amount of steps hillclimber\n\tfor i in range(0, times):\n\n\t\t# calculate score before swap\n\t\tpoints = calc_score(allcourses, student_list, chambers)\n\n\t\t# perform swap\n\t\troomlock1, roomlock2, chambers, allcourses, student_list, schedule = swap_course2(chambers, allcourses, student_list, schedule)\n\n\t\t# calculate new scores\n\t\tnewpoints = calc_score(allcourses, student_list, chambers)\n\n\t\t# if new score lower than old score\n\t\tif newpoints < points:\n\n\t\t\t# swap back\n\t\t\troomlock1, roomlock2, chambers, allcourses, student_list, schedule = swap_course2(chambers, allcourses, student_list, schedule, roomlock2, roomlock1)\n\n\t\t\t# calculate new score\n\t\t\tnewpoints = calc_score(allcourses, student_list, chambers)\n\n\t\t\t# if back-swap didn't go well\n\t\t\tif points != newpoints:\n\n\t\t\t\t# print courses and break loop\n\t\t\t\tprint(roomlock2, roomlock1)\n\t\t\t\tprint(\"ERROR\")\n\t\t\t\tbreak", "def week_schedule(year, stype, week):\n url = schedule_url(year, stype, week)\n try:\n dom = xml.parse(urllib.request.urlopen(url))\n except urllib.error.HTTPError:\n print >> sys.stderr, 'Could not load %s' % url\n return []\n\n games = []\n for g in dom.getElementsByTagName(\"g\"):\n gsis_id = g.getAttribute('eid')\n games.append({\n 'eid': gsis_id,\n 'wday': g.getAttribute('d'),\n 'year': year,\n 'month': int(gsis_id[4:6]),\n 'day': int(gsis_id[6:8]),\n 'time': g.getAttribute('t'),\n 'meridiem': None,\n 'season_type': stype,\n 'week': week,\n 'home': g.getAttribute('h'),\n 'away': g.getAttribute('v'),\n 'gamekey': g.getAttribute('gsis'),\n })\n\n for game in games:\n h = int(game['time'].split(':')[0])\n m = int(game['time'].split(':')[1])\n if 0 < h <= 5: # All games before \"6:00\" are PM until proven otherwise\n game['meridiem'] = 'PM'\n\n if game['meridiem'] is None:\n\n days_games = [g for g in games if g['wday'] == game['wday']]\n preceeding = [g for g in days_games if g['eid'] < game['eid']]\n proceeding = [g for g in days_games if g['eid'] > game['eid']]\n\n # If any games *after* this one are AM then so is this\n if any(g['meridiem'] == 'AM' for g in proceeding):\n game['meridiem'] = 'AM'\n # If any games *before* this one are PM then so is this one\n elif any(g['meridiem'] == 'PM' for g in preceeding):\n game['meridiem'] = 'PM'\n # If any games *after* this one have an \"earlier\" start it's AM\n elif any(h > t for t in [int(g['time'].split(':')[0]) for g in proceeding]):\n game['meridiem'] = 'AM'\n # If any games *before* this one have a \"later\" start time it's PM\n elif any(h < t for t in [int(g['time'].split(':')[0]) for g in preceeding]):\n game['meridiem'] = 'PM'\n\n if game['meridiem'] is None:\n if game['wday'] not in ['Sat', 'Sun']:\n game['meridiem'] = 'PM'\n if game['season_type'] == 'POST':\n game['meridiem'] = 'PM'\n\n return games", "def plot_rings(self,x_shift,y_shift):\n\n if not self.rings: return # Bounce if option not selected\n\n patches = []\n colours = []\n for ring in self.ring_crds:\n ring[:,0] += x_shift*self.pbc[0]\n ring[:,1] += y_shift*self.pbc[1]\n patches.append(Polygon(np.array(ring), True))\n colours.append(self.ring_colours[ring[:,0].size])\n ring[:,0]-=x_shift*self.pbc[0]\n ring[:,1]-=y_shift*self.pbc[1]\n self.ax.add_collection(PatchCollection(patches,facecolor=colours,linewidths=self.lw,edgecolor=\"k\",zorder=0))\n\n patches = []\n for ring in self.perimeter_ring_crds:\n ring[:,0] += x_shift*self.pbc[0]\n ring[:,1] += y_shift*self.pbc[1]\n patches.append(Polygon(np.array(ring), True))\n ring[:,0]-=x_shift*self.pbc[0]\n ring[:,1]-=y_shift*self.pbc[1]\n self.ax.add_collection(PatchCollection(patches,facecolor=(0,0,0,0),linewidths=self.lw*3,edgecolor=\"orange\",zorder=0))", "def hillclimb_roomlocks(times, chambers, allcourses, student_list, schedule):\n\n\t# amount of steps hillclimber\n\tfor i in range(0, times):\n\n\t\t# calculate score before swap\n\t\tpoints = calc_score(allcourses, student_list, chambers)\n\n\t\t# perform swap\n\t\tcourse1, activity1, course2, activity2, schedule = swap_course(chambers, allcourses, student_list, schedule)\n\n\t\t# calculate new scores\n\t\tnewpoints = calc_score(allcourses, student_list, chambers)\n\n\t\t# if new score lower than old score\n\t\tif newpoints < points:\n\n\t\t\t# swap back\n\t\t\tswap_course(chambers, allcourses, student_list, schedule, course1, activity1, course2, activity2)\n\n\t\t\t# calculate new score\n\t\t\tnewpoints = calc_score(allcourses, student_list, chambers)\n\n\t\t\t# if back-swap didn't go well\n\t\t\tif points != newpoints:\n\n\t\t\t\t# print courses and break loop\n\t\t\t\tprint(course2, course1)\n\t\t\t\tprint(\"ERROR\")\n\t\t\t\tbreak\n\n\treturn newpoints", "def reload_schedule():\n global jsonConfig\n global curSchedule\n\n jsonConfig = None\n curSchedule = None\n\n # Clear currently scheduled bells.\n schedule.clear(\"current\")\n\n logging.debug(\"Reloading schedule...\")\n with open(jsonFile) as jsonFileHandle:\n jsonConfig = json.load(jsonFileHandle)\n\n # Check that default structure for json config is respected.\n if \"calendar\" not in jsonConfig or \"default\" not in jsonConfig[\"calendar\"]:\n logging.error(\"Malformed json config. Invalid calendar table.\")\n return\n elif \"schedules\" not in jsonConfig:\n logging.error(\"Malformed json config. Invalid schedules table.\")\n return\n elif \"patterns\" not in jsonConfig:\n logging.error(\"Malformed json config. Invalid patterns table.\")\n return\n\n # Check to see if this date has a specific schedule.\n curDate = datetime.datetime.today().strftime(\"%Y-%m-%d\")\n if curDate in jsonConfig[\"calendar\"]:\n curSchedule = jsonConfig[\"calendar\"][curDate]\n else:\n # If this isn't a special day, we look up the schedule by day of the week.\n curDayOfWeek = datetime.datetime.now().strftime(\"%A\")\n if curDayOfWeek in jsonConfig[\"calendar\"][\"default\"]:\n curSchedule = jsonConfig[\"calendar\"][\"default\"][curDayOfWeek]\n else:\n logging.debug(\"No schedule found for date.\")\n return\n\n # Now that we have the schedule to use, does it exist?\n if curSchedule not in jsonConfig[\"schedules\"]:\n logging.error(\"Schedule\" + curSchedule + \" not found in json config. Aborting.\")\n return\n\n # Add bells for this schedule.\n for bellTime in jsonConfig[\"schedules\"][curSchedule]:\n schedule.every().day.at(bellTime).do(ring_bells).tag(\"current\")\n logging.debug(\"Scheduled bells using pattern '\" + jsonConfig[\"schedules\"][curSchedule][bellTime] + \"' at \" + bellTime)", "def create_rent_schedule_landlord(self):\n rent_obj = self.env['tenancy.rent.schedule']\n for tenancy_rec in self:\n amount = tenancy_rec.landlord_rent\n if tenancy_rec.rent_type_id.renttype == 'Weekly':\n d1 = tenancy_rec.date_start\n d2 = tenancy_rec.date\n interval = int(tenancy_rec.rent_type_id.name)\n if d2 < d1:\n raise Warning(\n _('End date must be greater than start date.'))\n wek_diff = (d2 - d1)\n wek_tot1 = (wek_diff.days) / (interval * 7)\n wek_tot = (wek_diff.days) % (interval * 7)\n if wek_diff.days == 0:\n wek_tot = 1\n if wek_tot1 > 0:\n for wek_rec in range(wek_tot1):\n rent_obj.create(\n {\n 'start_date': d1,\n 'amount': amount * interval or 0.0,\n 'property_id': tenancy_rec.property_id and\n tenancy_rec.property_id.id or False,\n 'tenancy_id': tenancy_rec.id,\n 'currency_id': tenancy_rec.currency_id.id or\n False,\n 'rel_tenant_id': tenancy_rec.tenant_id.id\n })\n d1 = d1 + relativedelta(days=(7 * interval))\n if wek_tot > 0:\n one_day_rent = 0.0\n if amount:\n one_day_rent = (amount) / (7 * interval)\n rent_obj.create({\n 'start_date': d1.strftime(\n DEFAULT_SERVER_DATE_FORMAT),\n 'amount': (one_day_rent * (wek_tot)) or 0.0,\n 'property_id': tenancy_rec.property_id and\n tenancy_rec.property_id.id or False,\n 'tenancy_id': tenancy_rec.id,\n 'currency_id': tenancy_rec.currency_id.id or False,\n 'rel_tenant_id': tenancy_rec.tenant_id.id\n })\n elif tenancy_rec.rent_type_id.renttype != 'Weekly':\n if tenancy_rec.rent_type_id.renttype == 'Monthly':\n interval = int(tenancy_rec.rent_type_id.name)\n if tenancy_rec.rent_type_id.renttype == 'Yearly':\n interval = int(tenancy_rec.rent_type_id.name) * 12\n d1 = tenancy_rec.date_start\n d2 = tenancy_rec.date\n diff = abs((d1.year - d2.year) * 12 + (d1.month - d2.month))\n tot_rec = diff / interval\n tot_rec2 = diff % interval\n if abs(d1.month - d2.month) >= 0 and d1.day < d2.day:\n tot_rec2 += 1\n if diff == 0:\n tot_rec2 = 1\n if tot_rec > 0:\n tot_rec = int(tot_rec)\n for rec in range(tot_rec):\n rent_obj.create({\n 'start_date': d1.strftime(\n DEFAULT_SERVER_DATE_FORMAT),\n 'amount': amount * interval or 0.0,\n 'property_id': tenancy_rec.property_id and\n tenancy_rec.property_id.id or False,\n 'tenancy_id': tenancy_rec.id,\n 'currency_id': tenancy_rec.currency_id.id or\n False,\n 'rel_tenant_id': tenancy_rec.tenant_id.id\n })\n d1 = d1 + relativedelta(months=interval)\n if tot_rec2 > 0:\n rent_obj.create({\n 'start_date': d1.strftime(DEFAULT_SERVER_DATE_FORMAT),\n 'amount': amount * tot_rec2 or 0.0,\n 'property_id': tenancy_rec.property_id and\n tenancy_rec.property_id.id or False,\n 'tenancy_id': tenancy_rec.id,\n 'currency_id': tenancy_rec.currency_id.id or False,\n 'rel_tenant_id': tenancy_rec.tenant_id.id\n })\n return self.write({'rent_entry_chck': True})", "def gen_rhombus(width):\n for row in range(1, width +1, 2):\n yield f\"{(STAR * row).center(width)}\"\n\n for row in range(width -2, 0, -2):\n yield f\"{(STAR * row).center(width)}\"", "def seasonal_pattern(season_time):\n\treturn np.where(season_time < 0.4,\n\t\t\t\t\tnp.cos(season_time * 2 * np.pi),\n\t\t\t\t\t1 / np.exp(3* season_time))", "def timinggrid(self):\n\n gelem = Element(\"g\") # create a group\n for i in range(int(self.cycles)):\n\n lelem = Element(\"line\")\n lelem.attrib['x1'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y1'] = str(0);\n lelem.attrib['x2'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y2'] = str(self.signalcnt*(self.height + self.signalspacing) + self.signalspacing)\n lelem.attrib['stroke'] = \"grey\"\n lelem.attrib['stroke-width'] = \"0.5\"\n gelem.append(lelem)\n\n \n self.svgelem.append(gelem)\n self.svgelem.append(self.signalselem)", "def a_star(start, end, board):\n board_n = board\n closed_set = deque()\n open_set = deque()\n open_set.append(start)\n\n path = list()\n\n while open_set:\n lowest_f_index = 0\n for i, node in enumerate(open_set):\n if open_set[i].f < open_set[lowest_f_index].f:\n lowest_f_index = i\n # Adds an additional check in case the f values are similar. Then we compare the g score instead\n # and find the lowest\n if open_set[i].f == open_set[lowest_f_index].f:\n if open_set[i].g < open_set[lowest_f_index].g:\n lowest_f_index = i\n\n current_node = open_set[lowest_f_index]\n\n if current_node == end:\n tmp = current_node\n path.append(tmp)\n while tmp.previous:\n path.append(tmp.previous)\n tmp = tmp.previous\n for elem in path[1:-1]: \n elem.symbol = '▪'\n draw_4k(board_n, wait = True)\n\n open_set.remove(current_node)\n closed_set.append(current_node)\n\n neighbors = current_node.neighbors\n for nb in neighbors:\n if nb in closed_set: #Doesnt check walls here since there is no walls\n continue\n \n tmp_g = current_node.g + nb.cost # Adds the cost of the neighbor cell to the tentative g score instead of just 1\n\n if nb not in open_set:\n open_set.append(nb)\n \n elif tmp_g >= nb.g:\n continue\n\n nb.previous = current_node \n nb.g = tmp_g \n nb.h = calculate_manhattan(nb, end)\n nb.f = nb.g + nb.h", "def ggpl_spiral_staircase(dx,dy,dz):\n\tnstep = int(dy*2.7)+1\n\t\"\"\" steps parameters \"\"\"\n\triserHeight = (0.50*dy)/nstep\n\ttreadDept = (0.6300-riserHeight)/2.0\n\t\"\"\" number of steps and length of landing for each side \"\"\"\n\tlandingLengthY=dy-((nstep+1)*treadDept)\n\tif dx>dy:\n\t\tstepWidth = landingLengthY\n\telse:\n\t\tstepWidth = dx/2.5\n\t\tlandingLengthY = stepWidth\n\tnsteplatox = int(((dx-2*stepWidth)/treadDept)+0.5) \n\tlandingLengthX=stepWidth\n\tnsteplatoy = int(((dy-stepWidth-landingLengthY)/treadDept)+0.5)\n\t\"\"\" skeleton of the box that contains the stair \"\"\"\n\tbox = SKEL_1(CUBOID([dx,dy,dz]))\n\t\"\"\" total steps \"\"\"\n\ttotalSteps = int((dz/riserHeight))\n\t\"\"\" number and height of floor \"\"\"\n\tnfloor = int(round(dz/2)+1)\n\theightfloor = (nsteplatoy)*riserHeight\n\t\"\"\" first stair \"\"\"\n\tstair=make_stair(nsteplatoy,treadDept,riserHeight,landingLengthY+treadDept,stepWidth,1)\n\tstair = T(2)([dy-((nsteplatoy+2)*treadDept)-landingLengthY]) (stair)\n\t\"\"\" variable that takes into account the number of steps made \"\"\"\n\trealizedStep = nsteplatoy\n\tr =4\n\n\t\"\"\" realization of the stairs \"\"\"\n\tfor j in range(int(nfloor)*2):\n\t\t\"\"\" condition for the realization of the final stair \"\"\"\n\t\tif (totalSteps-realizedStep<=nsteplatox) or (totalSteps-realizedStep<=nsteplatoy):\n\t\t\tif (totalSteps-realizedStep<=nsteplatox) and r%2==1:\n\t\t\t\tfinalStair = make_stair((totalSteps-realizedStep-1),treadDept,riserHeight,dy-stepWidth-(totalSteps-realizedStep-1)*treadDept,stepWidth,2)\n\t\t\telse:\n\t\t\t\tfinalStair = make_stair((totalSteps-realizedStep-1),treadDept,riserHeight,dx-stepWidth-(totalSteps-realizedStep-1)*treadDept,stepWidth,2)\n\t\t\t\t\"\"\" rotation and translation of the scale in the correct position \"\"\"\n\t\t\tif r==4:\n\t\t\t\tfinalStair=R([1,2])(3*PI/2)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([stepWidth-treadDept,dy,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==1:\n\t\t\t\tfinalStair = R([1,2])(PI)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([dx,dy-landingLengthY+treadDept ,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==2:\n\t\t\t\tfinalStair = R([1,2])(PI/2)(finalStair)\n\t\t\t\tfinalStair = T([1,2,3])([dx-landingLengthY+treadDept,0,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\t\t\tif r==3:\n\t\t\t\tfinalStair = T([1,2,3])([0,stepWidth-treadDept,heightfloor])(finalStair)\n\t\t\t\tstair = STRUCT([stair,finalStair])\n\t\t\t\tbreak\n\n\t\telse:\n\t\t\tif j%4== 0:\n\t\t\t\tstepsX = make_stair(nsteplatox,treadDept,riserHeight,landingLengthX,stepWidth,1)\n\t\t\t\tstepsX = R([1,2])(3*PI/2)(stepsX)\n\t\t\t\tstepsX = T([1,2,3])([stepWidth-treadDept,dy,heightfloor])(stepsX)\n\t\t\t\tstair = STRUCT([stair,stepsX])\n\t\t\t\theightfloor += (nsteplatox+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatox+1\n\t\t\t\tr=1\n\t\t\tif j%4== 1:\n\t\t\t\tstepsY = make_stair(nsteplatoy,treadDept,riserHeight,dy-nsteplatoy*treadDept-stepWidth,stepWidth,1)\n\t\t\t\tstepsY = R([1,2])(PI)(stepsY)\n\t\t\t\tstepsY = T([1,2,3])([dx,dy-landingLengthY+treadDept ,heightfloor])(stepsY)\n\t\t\t\tstair = STRUCT([stair,stepsY])\n\t\t\t\theightfloor += (nsteplatoy+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatoy+1\n\t\t\t\tr=2\n\t\t\tif j%4== 2:\n\t\t\t\tstepsX = make_stair(nsteplatox,treadDept,riserHeight,landingLengthX,stepWidth,1)\n\t\t\t\tstepsX = R([1,2])(PI/2)(stepsX)\n\t\t\t\tstepsX = T([1,2,3])([dx-landingLengthY+treadDept,0,heightfloor])(stepsX)\n\t\t\t\tstair = STRUCT([stair,stepsX])\n\t\t\t\theightfloor += (nsteplatox+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatox+1\n\t\t\t\tr=3\n\t\t\tif j%4== 3:\n\t\t\t\tstepsY = make_stair(nsteplatoy,treadDept,riserHeight,landingLengthY,stepWidth,1)\n\t\t\t\tstepsY = T([1,2,3])([0,stepWidth-treadDept,heightfloor])(stepsY)\n\t\t\t\tstair = STRUCT([stair,stepsY])\n\t\t\t\theightfloor += (nsteplatoy+1)*riserHeight \n\t\t\t\trealizedStep += nsteplatoy+1\n\t\t\t\tr=4\n\t\"\"\"floor of the stair\"\"\"\n\tfloor = CUBOID([dx,dy,0.05])\n\tfloor = TEXTURE(\"texture/floorStair.jpg\")(floor)\n\n\treturn STRUCT([stair,floor,box])", "def test_burst_loop(self):\n chans, gains, scans, rate = (10,10,10,10), (1,2,4,5), 1024, 2048\n v = [v[0] for v in self.l.burst_sync(\n channels=chans, gains=gains,\n num_scans=scans, rate=rate)]\n for vi in v:\n for r in vi:\n self.assertTrue(abs(r-2.5) < .1,\n \"%s should be cal, 2.5v\" % vi[0])", "def selectBestSchedule(self, remainder):\n # gas boiler? no schedules available!\n if self.getTER1() == 0:\n return -1\n\n\n #load_sched = [[0 for x in range(len(self.schedules[0])-1)] for y in range(self.noOfSchedules)]\n abs_sum = [0 for x in range(self.noOfSchedules)]\n max_min_diff = [0 for x in range(self.noOfSchedules)]\n #remainder_average = [0 for x in range(self.noOfSchedules)]\n #NO_worse_slots = [0 for x in range(self.noOfSchedules)] # saves number of timeslots in which the remainder is worse for each schedule\n\n min_diff = 0\n idx_min_diff = -1\n child_load = [0 for x in range(len(self.schedules[0])-1)]\n\n\n #if self.Children: # if not a leave node: use local knowledge about child loads\n # for c in range(len(self.Children)):\n # for t in range(len(child_load)):\n # child_load[t] += self.EConsumptionChildCurves[c][t]\n\n for s in range(self.noOfSchedules):\n\n current_remainder = [0 for x in range(len(remainder))]\n current_remainder_abs = [0 for x in range(len(remainder))]\n\n for t in range(len(remainder)):\n # add schedule load curve to compensation curve\n current_remainder[t] = remainder[t] + self.EConsumptionScheduleCurves[s][t] #- child_load[t]\n\n # as currently chosen schedule is included in remainder, subtract it (if not in first round)\n if self.chosenScheduleIndex != -1:\n current_remainder[t] -= self.EConsumptionChosenSchedule[t]\n\n current_remainder_abs[t] = abs(current_remainder[t])\n #if current_remainder_abs[t] > remainder[t]:\n # NO_worse_slots[s] += 1\n\n\n # accumulated absolute gradients as measure for similarity of curves\n abs_sum[s] = sum(current_remainder_abs)\n max_min_diff[s] = max(current_remainder)- min(current_remainder)\n #remainder_average[s] = sum(current_remainder_abs)/len(current_remainder_abs)\n\n #print 'abs_grad_sum: {0}'.format(abs_grad_sum[s])\n\n # new minimal abs difference?\n if self.OPTcriterion == 'maxmindiff':\n if idx_min_diff == -1 or min_diff - max_min_diff[s] > 0.001 : # min difference is 0.001 Watt to avoid oscillations\n idx_min_diff = s\n min_diff = max_min_diff[s]\n elif self.OPTcriterion == 'absremainder':\n if idx_min_diff == -1 or min_diff - abs_sum[s] > 0.001 : # min difference is 0.001 Watt to avoid oscillations\n idx_min_diff = s\n min_diff = abs_sum[s]\n\n if (idx_min_diff != self.chosenScheduleIndex):\n self.chosenSchedule = copy.deepcopy(self.schedules[idx_min_diff])\n if self.chosenScheduleIndex != -1:\n self.prevChosenScheduleIndex = self.chosenScheduleIndex # remember previously chosen schedule\n self.chosenScheduleIndex = idx_min_diff\n self.EConsumptionChosenSchedule = copy.deepcopy(self.EConsumptionScheduleCurves[idx_min_diff])\n #print 'ID {0}: new schedule has index {1}'.format(self.CommID, idx_min_diff)\n return 1\n else:\n if self.chosenScheduleIndex != -1:\n self.prevChosenScheduleIndex = self.chosenScheduleIndex\n #print 'ID {0}: new schedule = old schedule with index {1}'.format(self.CommID, self.chosenScheduleIndex)\n return 0", "def beamPattern(freq, fD, D, efficiency, FFBW, dB_at_bw, feed_type, defects={},lw=2,plotbp=None,plotgp=102,plot_label_prefix='',plot_color=None):\n\n wavelength = 300.0/freq\n # compute diffraction pattern\n dtheta = 0.1\n T = np.arange(-90.0,90.0+dtheta,dtheta)\n G = []\n F = fD*D\n dr = 0.001\n r = np.arange(0.0,1.0+dr,dr)\n tt = (2.0*180.0/math.pi)*np.arctan(r/(4.0*fD))\n g = np.sqrt(feedPattern(freq, tt, FFBW, dB_at_bw=dB_at_bw, feed_type=feed_type)*illuminationFactor(r,fD))\n for theta in T:\n u = (math.pi*D/wavelength)*math.sin(theta*math.pi/180.0)\n kern=[]\n for ii,rint in enumerate(r):\n kern.append(g[ii]*spec.jn(0,u*rint)*rint)\n fu = integ.trapz(kern,dx=dr)*math.pi*(D**2)/2.0\n G.append( 10.0*math.log10(fu**2.) )\n G = np.array(G)\n \n # compute error pattern(s) -- see Baars 86-90 (ruze/block)\n if 'ruze_rms' in defects.keys():\n if 'ruze_corr' in defects.keys():\n C = defects['ruze_corr']\n else:\n C = D/25.0 #assume fairly small correlation length\n sigma = 4.0*math.pi*defects['ruze_rms']/wavelength\n if sigma > 1.0:\n ferr = \"Doesn't do anything yet\"\n \n # compute normalized pattern and FWHM\n bp = G - max(G)\n FWHM = calcBW(T,bp,-3.0)\n taper = freespaceTaper(fD) + feedTaper(freq, fD, FFBW, dB_at_bw=dB_at_bw, feed_type=feed_type)\n #print 'FWHM (f/D=%.2f, taper=%.2f) = %.4f' % (fD,taper,FWHM)\n\n # plot beam pattern\n if plotbp is not None:\n plt.figure(plotbp)\n s = '%s%.1f-m: %.1f$^o$' % (plot_label_prefix,D,FWHM)\n if plot_color is not None:\n plt.plot(T,bp,color=plot_color,label=s)\n else:\n plt.plot(T,bp,label=s)\n plt.grid()\n\n # gain pattern: compute, plot and write\n Do = 4.0*np.pi*efficiency*(np.pi*D**2.0/4.0)/(wavelength**2)\n Do = 10.0*np.log10(Do)\n gp = bp + Do\n if plotgp is not None:\n plt.figure(plotgp)\n s = '%s:%.1f: %.1f$^o$' % (plot_label_prefix,D,FWHM)\n if plot_color is not None:\n plt.plot(T,gp,color=plot_color,label=s,lw=lw)\n else:\n plt.plot(T,gp,label=s,lw=lw)\n plt.legend()\n plt.grid()\n bpfn = \"beamPattern%.2f\" % (freq)\n bpfn = bpfn.replace('.','_') + '.dat'\n print \"Writing \",bpfn\n fp = open(bpfn,'w')\n for i,v in enumerate(T):\n s = '%.1f\\t%f\\n' % (v,gp[i])\n fp.write(s)\n fp.close()\n\n return FWHM", "def rainbow(self):\r\n\r\n # hidden feature of speed=0: makes one cycle per ~hour\r\n if self.speed == 0:\r\n self.hidden += 1\r\n if self.hidden > 336:\r\n self.counter += 1\r\n self.hidden = 0\r\n\r\n block = []\r\n if self.cycles == 3:\r\n for i in range(Const.LED_COUNT):\r\n block.append(RainbowModule.wheel((i + self.counter)))\r\n self.counter += self.speed\r\n while self.counter < 0:\r\n self.counter += 306\r\n else:\r\n for i in range(Const.LED_COUNT):\r\n block.append( RainbowModule.wheel((i + self.counter) / 3));\r\n self.counter += self.speed\r\n while self.counter < 0:\r\n self.counter += 918\r\n\r\n\r\n return block", "def seasonal_pattern(season_time):\r\n return np.where(season_time < 0.4,\r\n np.cos(season_time * 2 * np.pi),\r\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\r\n return np.where(season_time < 0.4,\r\n np.cos(season_time * 2 * np.pi),\r\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))" ]
[ "0.5695601", "0.5432977", "0.5410973", "0.5381197", "0.53569686", "0.51683617", "0.5059934", "0.50112706", "0.49925143", "0.49726513", "0.49589247", "0.49199778", "0.49188292", "0.4897901", "0.48784587", "0.48409376", "0.48267853", "0.4821929", "0.4812405", "0.47980654", "0.47935113", "0.4748392", "0.47440615", "0.4722927", "0.47096047", "0.47029015", "0.47029015", "0.47019088", "0.47019088", "0.47019088" ]
0.72606444
0
Reloads the schedule from our json file.
def reload_schedule(): global jsonConfig global curSchedule jsonConfig = None curSchedule = None # Clear currently scheduled bells. schedule.clear("current") logging.debug("Reloading schedule...") with open(jsonFile) as jsonFileHandle: jsonConfig = json.load(jsonFileHandle) # Check that default structure for json config is respected. if "calendar" not in jsonConfig or "default" not in jsonConfig["calendar"]: logging.error("Malformed json config. Invalid calendar table.") return elif "schedules" not in jsonConfig: logging.error("Malformed json config. Invalid schedules table.") return elif "patterns" not in jsonConfig: logging.error("Malformed json config. Invalid patterns table.") return # Check to see if this date has a specific schedule. curDate = datetime.datetime.today().strftime("%Y-%m-%d") if curDate in jsonConfig["calendar"]: curSchedule = jsonConfig["calendar"][curDate] else: # If this isn't a special day, we look up the schedule by day of the week. curDayOfWeek = datetime.datetime.now().strftime("%A") if curDayOfWeek in jsonConfig["calendar"]["default"]: curSchedule = jsonConfig["calendar"]["default"][curDayOfWeek] else: logging.debug("No schedule found for date.") return # Now that we have the schedule to use, does it exist? if curSchedule not in jsonConfig["schedules"]: logging.error("Schedule" + curSchedule + " not found in json config. Aborting.") return # Add bells for this schedule. for bellTime in jsonConfig["schedules"][curSchedule]: schedule.every().day.at(bellTime).do(ring_bells).tag("current") logging.debug("Scheduled bells using pattern '" + jsonConfig["schedules"][curSchedule][bellTime] + "' at " + bellTime)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reload(self):\n with open(self._config) as f:\n self.data = json.load(f)", "def reload(self):\n try:\n with open(self.__file_path, 'r') as f:\n for key, value in json.load(f).items():\n self.__objects[key] = eval(key.split('.')[0])(**value)\n except FileNotFoundError:\n pass", "def reload(self):\n try:\n with open(self.__file_path, 'r') as f:\n dicts = json.load(f)\n for key, value in dicts.items():\n obj1 = eval(value['__class__'])(**value)\n self.__objects[key] = obj1\n except FileNotFoundError:\n pass", "def reload(self):\n try:\n with open(self.__file_path, mode=\"r\", encoding='UTF-8') as f:\n readit = json.load(f)\n for v in readit.values():\n from ..base_model import BaseModel\n from ..user import User\n from ..state import State\n from ..city import City\n from ..amenity import Amenity\n from ..place import Place\n from ..review import Review\n\n a = eval(\"{}(**v)\".format(v[\"__class__\"]))\n self.new(a)\n\n except FileNotFoundError:\n \"\"\"\n No file has been found so pass\n \"\"\"\n pass", "def reload(self):\n if os.path.exists(FileStorage.__file_path):\n with open(FileStorage.__file_path, \"r\", encoding=\"utf-8\") as f:\n loaded = json.load(f)\n for _id, v in loaded.items():\n cls = loaded[_id].pop(\"__class__\", None)\n try:\n loaded[_id][\"created_at\"] = datetime.strptime(\n loaded[_id][\"created_at\"], dt_format)\n loaded[_id][\"updated_at\"] = datetime.strptime(\n loaded[_id][\"updated_at\"], dt_format)\n except:\n pass\n FileStorage.__objects[_id] = FileStorage.class_models[cls](**v)", "def reload(self):\n try:\n with open(FileStorage.__file_path) as f:\n objs = json.load(f)\n for obj in objs.values():\n name = obj['__class__']\n del obj['__class__']\n self.new(eval(name)(**obj))\n except FileNotFoundError:\n return", "def reload(self):\n from ..base_model import BaseModel\n from ..user import User\n from ..place import Place\n from ..state import State\n from ..city import City\n from ..amenity import Amenity\n from ..review import Review\n\n if exists(self.__file_path):\n with open(self.__file_path) as jsonfile:\n deserialized = json.load(jsonfile)\n\n cls = {\"BaseModel\": BaseModel, \"User\": User, \"Place\": Place,\n \"State\": State, \"City\": City, \"Amenity\": Amenity,\n \"Review\": Review}\n\n for keys in deserialized.keys():\n for cls_key in cls.keys():\n if deserialized[keys]['__class__'] == cls_key:\n self.__objects[keys] = cls[cls_key\n ](**deserialized[keys])\n break", "def reload(self):\n from models.base_model import BaseModel\n from models.user import User\n from models.amenity import Amenity\n from models.city import City\n from models.place import Place\n from models.review import Review\n from models.state import State\n dict_reload = {}\n try:\n with open(FileStorage.__file_path) as file:\n dict_reload = json.load(file)\n for key, value in dict_reload.items():\n obj = value[\"__class__\"]\n self.__objects[key] = locals()[obj](**value)\n except:\n pass", "def reload(self):\n\n dict_of_dicts = {}\n classes = {\n \"BaseModel\": BaseModel,\n \"User\": User,\n \"Amenity\": Amenity,\n \"City\": City,\n \"Place\": Place,\n \"Review\": Review,\n \"State\": State}\n\n try:\n temp_dict = {}\n with open(self.__file_path, \"r\") as r:\n dict_of_dicts = json.load(r)\n for k, v in dict_of_dicts.items():\n if v['__class__'] in classes:\n temp_dict[k] = classes[v['__class__']](**v)\n self.__objects = temp_dict\n except Exception:\n pass", "def reload(self):\n if not os.path.exists(FileStorage.__file_path):\n return\n with open(FileStorage.__file_path, 'rt') as file:\n toLoad = json.load(file)\n if not isinstance(toLoad, Mapping):\n raise ValueError('value in JSON file is not an object')\n FileStorage.__objects = {\n key: models.classes[key.partition('.')[0]](**obj)\n for key, obj in toLoad.items()\n }", "def reload(self):\n try:\n with open(FileStorage.__file_path) as json_file:\n dict_from_json = json.load(json_file)\n for key, value in dict_from_json.items():\n tmp = eval(value['__class__'])(**value)\n FileStorage.__objects[key] = tmp\n except(FileNotFoundError):\n pass", "def reloadfile(self, ):\n self.loadfile()", "def Load(self, filename):\n if os.path.exists(filename):\n\n norm_file_path = os.path.normpath(filename)\n\n if self.verbose:\n\n print \"Loading schedule '%s'\" % norm_file_path\n \n try:\n \n self._schedule_data = yaml.load(open(norm_file_path,'rb'))\n \n except yaml.YAMLError, exc: \n\n raise errors.ScheduleError(\"Failed to load schedule '%s' from file: %s\" % (filename, exc))\n\n else:\n\n self._schedule_loaded = False\n \n raise errors.ScheduleError(\"Schedule file '%s' doesn't exist\" % filename)\n\n try:\n\n self.ParseSchedule(self._schedule_data)\n\n except errors.ScheduleError, e:\n\n print \"%s\" % e\n\n self._schedule_loaded = False\n\n self._schedule_loaded = True", "def load(self):\n if not self.data_path.exists():\n raise ValueError(f'Invalid path - it does not exist: {self.data_path}')\n elif not self.data_path.is_file():\n raise ValueError(f'Invalid path - it is not a file: {self.data_path}')\n\n import json\n\n with self.data_path.open('r') as f:\n data = json.load(f)\n\n if self.only_played:\n data = {k: v for k, v in data.items() if v}\n\n to_update = {}\n for movie, path, is_played in self.iter_movies():\n if (was_played := data.get(path.name)) is not None and was_played != is_played:\n to_update[movie] = was_played\n\n for movie, played in sorted(to_update.items(), key=lambda kv: kv[0].title):\n log.info(f'{self.lp.update} key={movie._int_key} year={movie.year} title={movie.title!r} {played=}')\n if not self.dry_run:\n if played:\n movie.markPlayed()\n else:\n movie.markUnplayed()", "def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)", "def load_irrigation_schedule():\n global irrigation_schedule\n\n # Read and parse the properties file.\n if not os.path.exists(FILE_PROPERTIES):\n return\n\n f = open(FILE_PROPERTIES)\n try:\n data = json.loads(f.read())\n except JSONDecodeError:\n data = {}\n finally:\n f.close()\n\n if PROP_SCHEDULE in data:\n irrigation_schedule = data[PROP_SCHEDULE]", "def _use_existing_schedule(self):\n sh = shelve.open(os.path.expanduser('~/.config/scheduler/schedule'))\n self.schedule = sh['schedule']\n sh.close()", "def Reload(self, data):\n self.__dict__ = json.loads(data, encoding='utf-8-sig')", "def reload_from_json(self, json):\n if json:\n self.__dict__ = json", "def reload(self):\n try:\n # if os.path.isfile(FileStorage.__file_path):\n with open(FileStorage.__file_path, 'r', encoding='UTF-8') as f:\n context2 = json.load(f)\n\n for key in context2.keys():\n new_value = context2[key]\n clss = new_value['__class__']\n# self.new(eval(clss)(**value))\n\n except Exception as e:\n pass", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def _load_schedule(self, filename):\n with open(filename, 'rt', encoding='utf-8') as f:\n xml = f.read()\n\n # Compose the message for the controller.\n message = ScheduleDefinitionMessage(xml, os.path.realpath(filename))\n\n # Send the message to the controller.\n self._zmq_controller.send_pyobj(message)\n\n # Await the response from the controller.\n response = self._zmq_controller.recv_json()\n\n if response['ret'] == 0:\n self._io.log_verbose(response['message'])\n else:\n self._io.error(response['message'])\n\n return response['ret'] == 0", "def load(self):\n basepath = os.path.dirname(os.path.abspath(__file__))\n filename = os.sep.join([basepath, c.FOLDER_JSON, c.FILE_GAME_VERSIONS])\n Handler.ALL_VERS_DATA = {} # reset known data; do not retain defunct information\n with open(filename, \"r\") as f:\n data = json.loads( f.read() )\n self.update(data)\n self._updated = False\n #for v,record in iteritems(Handler.ALL_VERS_DATA):\n # print(type(v), v)\n #for k,v in iteritems(record): ", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def load(self, loadpath=None):\n\n if loadpath:\n with open(loadpath, mode='r') as f:\n self.update(json.load(f))", "def set_irrigation_schedule(schedule):\n global irrigation_schedule\n\n # Read and parse the properties file.\n f = open(FILE_PROPERTIES)\n try:\n data = json.loads(f.read())\n except JSONDecodeError:\n data = {}\n finally:\n f.close()\n\n # Parse the given schedule.\n try:\n sch_json = json.loads(schedule)\n except JSONDecodeError:\n sch_json = {}\n\n data[PROP_SCHEDULE] = sch_json[PROP_SCHEDULE]\n\n # Write the file with the new schedule.\n f = open(FILE_PROPERTIES, \"w\")\n f.write(json.dumps(data))\n f.close()\n\n irrigation_schedule = data[PROP_SCHEDULE]\n\n print_log(\"Changed the irrigation schedule: {}\".format(irrigation_schedule))", "def reload(self):\n\n pass", "def reset_dict():\n global COURSE_DICT\n with open(abs_file_path) as f:\n COURSE_DICT = json.load(f)", "def ReloadSettings(self, data):\n self.__dict__ = json.loads(data, encoding='utf-8-sig')\n return", "def reloadData(self):\n self.dto.readFromData()\n print(\"Record reloaded.\")" ]
[ "0.719504", "0.69318056", "0.6864976", "0.67585194", "0.6752321", "0.6690034", "0.66177434", "0.6605925", "0.65952766", "0.6545089", "0.64971936", "0.6459408", "0.6386498", "0.632974", "0.62427646", "0.62313604", "0.62149954", "0.61784226", "0.61701703", "0.6152167", "0.61414576", "0.608478", "0.6084596", "0.6053076", "0.6039288", "0.60355526", "0.60338736", "0.6023157", "0.5985824", "0.59699297" ]
0.7724108
0
Undistort the image using distortion coefficients
def undistort_image(mtx_, dist_, img_): dst = cv2.undistort(img_, mtx_, dist_, None, mtx_) return dst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undistort(self, image):\n return cv2.undistort(image, self.camera_matrix, self.distortion_coeffs, None, self.camera_matrix)", "def undistort(self, image):\n dst = cv2.undistort(image, self.mtx, self.dist_coeff, None)\n\n if self.args.is_test:\n self.image_logger.save_image(dst, 'undistorted')\n images = [[{'title': 'Original', 'data': image},\n {'title': 'Undistorted', 'data': dst}]]\n self.image_logger.plot_results(images)\n return dst", "def undistort(self, image):\n return cv2.undistort(image, self.mtx, self.dst, None, self.mtx)", "def undistort(img, mtx, dist):\n return cv2.undistort(img, mtx, dist, None, mtx)", "def __call__(self, img):\n if self.camera_matrix is not None and self.distortion_coef is not None:\n return cv2.undistort(\n img, self.camera_matrix, self.distortion_coef, None, self.camera_matrix)\n else:\n print(\"You should calculate Camera Matrix and Distortion coefficient first!\")\n return img", "def undistort_img(img, mtx, dist, debug=False):\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n if (debug):\n window_name = \"Undistorted Image\"\n cv2.imshow('Undistorted Image', undist)\n cv2.moveWindow(\"Undistorted Image\", 10, 50);\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return undist", "def un_distort_image(image):\n global _remap_x, _remap_y\n image = cv2.UMat(image)\n res = cv2.remap(image, _remap_x, _remap_y, cv2.INTER_LINEAR) # 进行remap\n res = res.get()\n return res", "def undistort(self,src):\n # note: no check over src.shape and self.size\n return cv2.undistort(src,self.K,self.dist)", "def image_undistort():\n # read test images\n all_test_images = os.listdir('test_images')\n test_images = []\n for name in all_test_images:\n if name.endswith(\".jpg\"):\n test_images.append(name)\n # apply distortion correction on test images\n undistort_images(test_images, './camera_calib_dist_pickle.p')\n print(\"DONE: undistorted test-images saved\")", "def undistort(basedir, img_extension, output_dir, output_prefix, calibration, distortion, output_image_shape=(640, 480), scaling_param=1):\n search = os.path.join(basedir, '*'+img_extension)\n img_paths = glob.glob(search)\n img_paths.sort()\n print(\"Number of Images: \", len(img_paths))\n maxlen = len(img_paths)\n if maxlen == 0:\n raise IOError(\n 'No images were found (maybe wrong \\'image extension\\' parameter?)')\n\n if not os.path.exists(os.path.dirname(output_dir)):\n os.makedirs(os.path.dirname(output_dir))\n\n for img_idx, img_path in enumerate(img_paths):\n img = cv2.imread(img_path, 1)\n height, width, _ = img.shape\n new_camera_matrix = calibration\n\n # scaling parameter between 0 (when all the pixels in the undistorted image are valid)\n # and 1 (when all the source image pixels are retained in the undistorted image)\n new_camera_mtx, roi = cv2.getOptimalNewCameraMatrix(\n calibration, distortion, (width, height), scaling_param, output_image_shape)\n print(\"calibration\", calibration)\n print(\"new_camera_matrix\", new_camera_matrix)\n\n # undistort\n mapx, mapy = cv2.initUndistortRectifyMap(\n calibration, distortion, None, new_camera_mtx, output_image_shape, 5)\n dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)\n\n # crop the image\n x, y, w, h = roi\n dst = dst[y:y+h, x:x+w]\n\n output_path = output_dir+output_prefix+'_%d' % img_idx+img_extension\n print(output_path)\n cv2.imwrite(output_path, dst)\n return True", "def undistort_image(self, img, calibration_dict: dict):\n if img is None:\n return None\n\n if 'mtx' not in calibration_dict or 'dist' not in calibration_dict:\n raise ValueError('Missing mtx or dist in calibration dictionary.')\n\n return cv2.undistort(img, calibration_dict['mtx'], calibration_dict['dist'], None, calibration_dict['mtx'])", "def undistort_image(frame, mtx, dist, display=True):\r\n frame_undistorted = cv2.undistort(frame, mtx, dist, newCameraMatrix=mtx)\r\n\r\n if display:\r\n fig, ax = plt.subplots(nrows=1, ncols=2)\r\n # fig.suptitle('Undistort Image Before & After')\r\n ax[0].set_title('Before calibration')\r\n ax[1].set_title('After calibration')\r\n ax[0].imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\r\n ax[1].imshow(cv2.cvtColor(frame_undistorted, cv2.COLOR_BGR2RGB))\r\n \r\n #for comparing camera undistorted\r\n plt.savefig('../output_images/undistort_image_before_to_after.jpg',dpi=300)\r\n plt.show()\r\n\r\n return frame_undistorted", "def cal_undist(self, img = None):\n return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)", "def un_distort_point(point):\n points = np.array([[(point.x, point.y)]], np.float32)\n temp = cv2.undistortPoints(points, _camera_matrix, _camera_distortion)\n fx, fy = _camera_tuned_matrix[0][0], _camera_tuned_matrix[1][1]\n cx, cy = _camera_tuned_matrix[0][2], _camera_tuned_matrix[1][2]\n x = temp[0][0][0] * fx + cx\n y = temp[0][0][1] * fy + cy\n return ge.Point(x, y)", "def undistort_points(points, K, dist):\n return cv2.undistortPoints(points, K, dist)", "def distort_img(input_img, d_limit=4):\n if d_limit == 0:\n return input_img\n rows, cols, ch = input_img.shape\n pts2 = np.float32([[0, 0], [rows - 1, 0], [0, cols - 1], [rows - 1, cols - 1]])\n pts1 = np.float32(pts2 + np.random.uniform(-d_limit, d_limit, pts2.shape))\n M = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(input_img, M, (cols, rows), borderMode=1)\n return dst", "def distort_images(self, images, seed):\n if self.mode == \"train\":\n images = image_processing.distort_image(images, seed)\n\n # Rescale to [-1,1] instead of [0, 1]\n images = tf.subtract(images, 0.5)\n images = tf.multiply(images, 2.0)\n return images", "def _undistort_regulafalsi(\n self, xy: np.ndarray, iterations: int = 100, tolerance: Number = 0\n ) -> np.ndarray:\n # Start at center of image (distortion free)\n x1 = np.zeros(xy.shape, dtype=float)\n y1 = -xy\n # Then try halfway towards distorted coordinate\n # (more stable to approach solution from image center)\n x2 = xy / 2\n y2 = self._distort(x2) - xy\n uxy = np.full(xy.shape, np.nan)\n for n in range(iterations):\n dy = y2 - y1\n not_converged = np.all(dy != 0, axis=1)\n if tolerance > 0:\n not_converged &= np.any(np.abs(y2) > tolerance / self.f.mean())\n if n == 0:\n mask = np.ones(len(xy), dtype=bool)\n converged = np.zeros(mask.shape, dtype=bool)\n converged[mask] = ~not_converged\n uxy[converged] = x2[~not_converged]\n mask[mask] = not_converged\n x1 = x1[not_converged]\n y1 = y1[not_converged]\n x2 = x2[not_converged]\n y2 = y2[not_converged]\n if not np.any(not_converged):\n break\n x3 = (x1 * y2 - x2 * y1) / dy[not_converged]\n y3 = self._distort(x3) - xy[mask]\n x1 = x2\n y1 = y2\n x2 = x3\n y2 = y3\n uxy[mask] = x2\n return uxy", "def _undistort_lookup(self, xy: np.ndarray, density: Number = 1) -> np.ndarray:\n # Estimate undistorted camera coordinate bounds\n uv_edges = self.imgsz * np.array(\n [[0, 0], [0.5, 0], [1, 0], [1, 0.5], [1, 1], [0.5, 1], [0, 1], [0, 0.5]]\n )\n xyu_edges = (uv_edges - (self.imgsz / 2 + self.c)) / self.f\n xyd_edges = self._distort(xyu_edges)\n # Build undistorted camera coordinates on regular grid\n ux = np.linspace(\n min(xyu_edges[:, 0].min(), xyd_edges[:, 0].min()),\n max(xyu_edges[:, 0].max(), xyd_edges[:, 0].max()),\n int(density * self.imgsz[0]),\n )\n uy = np.linspace(\n min(xyu_edges[:, 1].min(), xyd_edges[:, 1].min()),\n max(xyu_edges[:, 1].max(), xyd_edges[:, 1].max()),\n int(density * self.imgsz[1]),\n )\n UX, UY = np.meshgrid(ux, uy)\n uxy = np.column_stack((UX.flatten(), UY.flatten()))\n # Distort grid\n dxy = self._distort(uxy)\n # Interpolate distortion removal from gridded results\n # NOTE: Cannot use faster grid interpolation because dxy is not regular\n return scipy.interpolate.griddata(dxy, uxy, xy, method=\"linear\")", "def normalise(image):", "def optical_distortion(\n img: np.ndarray,\n k: int = 0,\n dx: int = 0,\n dy: int = 0,\n interpolation: int = cv2.INTER_LINEAR,\n border_mode: int = cv2.BORDER_REFLECT_101,\n value: Optional[ImageColorType] = None,\n) -> np.ndarray:\n height, width = img.shape[:2]\n\n fx = width\n fy = height\n\n cx = width * 0.5 + dx\n cy = height * 0.5 + dy\n\n camera_matrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32)\n\n distortion = np.array([k, k, 0, 0, 0], dtype=np.float32)\n map1, map2 = cv2.initUndistortRectifyMap(camera_matrix, distortion, None, None, (width, height), cv2.CV_32FC1)\n return cv2.remap(img, map1, map2, interpolation=interpolation, borderMode=border_mode, borderValue=value)", "def warp_to_overhead(self, undistorted_img):\n return cv2.warpPerspective(undistorted_img, self.overhead_transform, dsize=(self.img_width, self.img_height))", "def initialize_undistortion_maps(self):\n\n new_camera_matrix, valid_roi = cv2.getOptimalNewCameraMatrix(\n self.camera_matrix, self.distortion_coefficients, self.image_size,\n 0)\n\n self.map1, self.map2 = cv2.initUndistortRectifyMap(\n self.camera_matrix, self.distortion_coefficients, None,\n new_camera_matrix, self.image_size, cv2.CV_16SC2)", "def distortion(im1, im2, mask=None):\n im1, mask = check_image_mask_single_channel(im1, mask)\n im2 = check_image_single_channel(im2)\n if im1.shape != im2.shape: raise ValueError('im1 and im2 must be the same shape')\n if mask is not None: im1, im2 = im1[mask], im2[mask]\n # Need to avoid divide-by-zero\n mask = im2 != 0\n im1, im2 = im1[mask], im2[mask]\n return (im1 / im2).var()", "def _distort(self, xy: np.ndarray) -> np.ndarray:\n # X' = dr * X + dt\n if not any(self.k) and not any(self.p):\n return xy\n dxy = xy.copy()\n r2 = np.sum(xy ** 2, axis=1)\n if any(self.k):\n dxy *= self._radial_distortion(r2)\n if any(self.p):\n dxy += self._tangential_distortion(xy, r2)\n return dxy", "def __distance_trans(self):\n self.img = cv2.distanceTransform(self.img, 1, 5) + 1\n self.img[self.sure_bg == 0] -= 1\n # plt.figure()\n # plt.hist(self.img.flatten(), bins=100)\n # plt.show()\n # self.img = np.power(self.img/float(np.max(self.img)), 0.6) * 255\n # remove too small region\n # self.img[self.img < 50] = 0\n\n self.distance_img = self.img.copy()\n if self.plot_mode:\n # self.plot_gray(self.img, \"dist image\")\n plt.figure()\n plt.title(\"distance transform\")\n plt.imshow(self.img, cmap='jet')\n plt.colorbar()\n plt.show()\n\n plt.figure()\n plt.hist(self.img.flatten(), bins=100)\n plt.show()", "def deconvolute(args):\n prism.deconvolute.run(\n input_fps=args.input,\n output_fp=args.output,\n full_pattern_proportion=args.full_pattern_proportion,\n merge_cutoff=args.merge_cutoff,\n outlier_dispersion_cutoff=args.outlier_dispersion_cutoff,\n intersection_method=args.intersection_method,\n copynumber=args.copynumber,\n cn_prior=args.cn_prior,\n num_max_cluster=args.num_max_cluster,\n seed=args.seed,\n verbose=args.verbose,\n )", "def _undistort_oulu(\n self, xy: np.ndarray, iterations: int = 20, tolerance: Number = 0\n ) -> np.ndarray:\n # Initial guess\n uxy = xy\n for _ in range(iterations):\n r2 = np.sum(uxy ** 2, axis=1)\n if any(self.p) and not any(self.k):\n uxy = xy - self._tangential_distortion(uxy, r2)\n elif any(self.k) and not any(self.k):\n uxy = xy * (1 / self._radial_distortion(r2))\n else:\n uxy = (xy - self._tangential_distortion(uxy, r2)) * (\n 1 / self._radial_distortion(r2)\n )\n if tolerance > 0 and np.all(\n (np.abs(self._distort(uxy) - xy)) < tolerance / self.f.mean()\n ):\n break\n return uxy", "def decompose(self, *args, **kwargs):\n return _image.image_decompose(self, *args, **kwargs)", "def perspective_transform(self, undistorted, direction='forward'):\n\t\t# Source image points\n\t\tsrc = np.float32([[255, 695], [585, 455], [700, 455], [1060, 690]])\n\t\t# Destination image points\n\t\tdst = np.float32([[305, 695], [305, 0], [1010, 0], [1010, 690]])\n\t\t# Perform forward or inverse perspective transform\n\t\tif direction == 'forward':\n\t\t\t# Compute the perspective transform, M\n\t\t\tM = cv2.getPerspectiveTransform(src, dst)\n\t\t\t# Create warped image - uses linear interpolation\n\t\t\treturn cv2.warpPerspective(undistorted, M, (undistorted.shape[1], undistorted.shape[0]), flags=cv2.INTER_LINEAR)\n\t\telif direction == 'inverse':\n\t\t\t# Compute the inverse also by swapping the input parameters\n\t\t\tMinv = cv2.getPerspectiveTransform(dst, src)\n\t\t\treturn cv2.warpPerspective(undistorted, Minv, (undistorted.shape[1], undistorted.shape[0]), flags=cv2.INTER_LINEAR)" ]
[ "0.7884479", "0.7551686", "0.7374037", "0.7349525", "0.7250263", "0.72426575", "0.71945417", "0.7146802", "0.7065099", "0.671219", "0.66244733", "0.6621529", "0.64361244", "0.6337158", "0.6207002", "0.6132985", "0.6115165", "0.59107697", "0.59106004", "0.5851052", "0.5788503", "0.57583916", "0.5752569", "0.5726207", "0.5710327", "0.56791097", "0.56178623", "0.5551219", "0.55197036", "0.54672265" ]
0.7565127
1
Calculate Perspective and Inverse Perspective Transform Matrices
def calc_transform(src_, dst_): M_ = cv2.getPerspectiveTransform(src_, dst_) Minv_ = cv2.getPerspectiveTransform(dst_, src_) return M_, Minv_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perspective_transform():\n src = np.float32([(220,720), (1110, 720), (570, 470), (722, 470)]) # Manually get these numbers from plot\n dst = np.float32([[320, 720], [920, 720], [320, 1], [920, 1]])\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n return M, Minv", "def myPerspectiveTransform(pts, H):\n\n # Clone and reshape the list of points\n new_pts = np.reshape(pts, (-1, 2))\n # Allocate a vector filled with one with size (-1, 1)\n one_vector = np.zeros((pts.shape[0], 1)) + 1\n # Concatenate the one vector to the list of points to form the homogenious coordiniate system\n new_pts = np.concatenate((new_pts, one_vector), axis=len(new_pts.shape)-1)\n\n # Perform transformation and transform results into the pixel coord. system\n # i.e., x' = x/w, and y' = y/w\n for i, pt in enumerate(new_pts):\n new_pts[i] = H.dot(pt.T)\n new_pts[i] /= new_pts[i, -1]\n\n # Return results with the same shape as the input has\n return new_pts[:, :-1].reshape(pts.shape)", "def __set_perspective(self):\n\n src = np.float32([[(.42 * self.img_shape[1],.65 * self.img_shape[0] ),\n (.58 * self.img_shape[1], .65 * self.img_shape[0]),\n (0 * self.img_shape[1],self.img_shape[0]),\n (1 * self.img_shape[1], self.img_shape[0])]])\n\n dst = np.float32([[0,0],\n [self.img_shape[1],0],\n [0,self.img_shape[0]],\n [self.img_shape[1],self.img_shape[0]]])\n\n self.M = cv2.getPerspectiveTransform(src, dst)\n self.M_inv = cv2.getPerspectiveTransform(dst, src)", "def perspectiveNormalizationXform(self):\n return np.array([[1.0/np.tan(self.view_angle_h), 0, 0, 0],\n [0, 1.0/np.tan(self.view_angle_v), 0, 0],\n [0, 0, (self.far + self.near)/(self.far - self.near),\n 2*self.far*self.near/(self.far - self.near)],\n [0, 0, -1, 0]])", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def compute_perspective_transform(corner_points, width, height, image):\n # Create an array out of the 4 corner points\n corner_points_array = np.float32(corner_points)\n # Create an array with the parameters (the dimensions) required to build the matrix\n img_params = np.float32([[0, 0], [width, 0], [0, height], [width, height]])\n # Compute and return the transformation matrix\n matrix = cv2.getPerspectiveTransform(corner_points_array, img_params)\n img_transformed = cv2.warpPerspective(image, matrix, (width, height))\n return matrix, img_transformed", "def compute_perspective_transform(self, binary_image):\r\n transform_src = np.float32([[300, 309], [500, 315], [120, 381], [685, 392]])\r\n transform_dst = np.float32([ [0,0], [800, 0], [0,600], [800,600]])\r\n perspective_transform = cv2.getPerspectiveTransform(transform_src, transform_dst)\r\n inverse_perspective_transform = cv2.getPerspectiveTransform(transform_dst, transform_src)\r\n warped_image = cv2.warpPerspective(binary_image, perspective_transform, \r\n (binary_image.shape[1], binary_image.shape[0]), \r\n flags=cv2.INTER_NEAREST)\r\n\r\n return warped_image, inverse_perspective_transform", "def computeMVP(self):\n projMat = self.converterYUR\n modelViewMat = self.transforMat.invertCompose(\n Globals.render.getTransform(self.cameraNode)).getMat()\n return UnalignedLMatrix4f(modelViewMat * projMat)", "def perspective_transform(self, undistorted, direction='forward'):\n\t\t# Source image points\n\t\tsrc = np.float32([[255, 695], [585, 455], [700, 455], [1060, 690]])\n\t\t# Destination image points\n\t\tdst = np.float32([[305, 695], [305, 0], [1010, 0], [1010, 690]])\n\t\t# Perform forward or inverse perspective transform\n\t\tif direction == 'forward':\n\t\t\t# Compute the perspective transform, M\n\t\t\tM = cv2.getPerspectiveTransform(src, dst)\n\t\t\t# Create warped image - uses linear interpolation\n\t\t\treturn cv2.warpPerspective(undistorted, M, (undistorted.shape[1], undistorted.shape[0]), flags=cv2.INTER_LINEAR)\n\t\telif direction == 'inverse':\n\t\t\t# Compute the inverse also by swapping the input parameters\n\t\t\tMinv = cv2.getPerspectiveTransform(dst, src)\n\t\t\treturn cv2.warpPerspective(undistorted, Minv, (undistorted.shape[1], undistorted.shape[0]), flags=cv2.INTER_LINEAR)", "def myWarpPerspective(img, H, output_shapes):\n c, r = output_shapes\n \n # Create an output canvas according to the parameter \"output_shapes\"\n if len(img.shape) == 3:\n output = np.zeros((r, c, 3))\n else:\n output = np.zeros((r, c, 1))\n\n # List of pixel coordinates in canvas\n inverse_map = [[i, j] for i in range(c) for j in range(r)]\n\n # Covert the coordinates in the system of img2 back to the system of img1 \n # to find out the reference points\n inverse_map = np.asarray(inverse_map)\n inverse_map = myPerspectiveTransform(inverse_map, np.linalg.inv(H))\n \n \n for i in range(c):\n for j in range(r):\n index = i*r + j\n ix, iy = inverse_map[index]\n \n # Because the converted coords. are float, \n # we need to find out four ref. points to do bilinear interpolation\n tix, bix = np.ceil(ix), np.floor(ix)\n tiy, biy = np.ceil(iy), np.floor(iy)\n\n x_ratio = ix - bix\n y_ratio = iy - biy\n\n # Indexing does not allow float indices\n tix, bix, tiy, biy = np.int32(tix), np.int32(bix), np.int32(tiy), np.int32(biy)\n \n # Boundary checking: each ref point should locate within the input image\n if bix < 0 or biy < 0 or tix >= img.shape[1] or tiy >= img.shape[0]:\n continue\n else:\n # Bilinear interpolation\n output[j, i] = x_ratio*y_ratio*img[tiy, tix] \\\n + x_ratio*(1-y_ratio)*img[biy, tix] \\\n + (1-x_ratio)*y_ratio*img[tiy, bix] \\\n + (1-x_ratio)*(1-y_ratio)*img[biy, bix]\n output[j, i] = np.round(output[j, i])\n\n # Cast back to uint8 because of displaying and return results\n return np.uint8(output)", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def get_perspective_matrix(fov_degrees, aspect, near, far):\n radians = math.radians(fov_degrees)\n\n zoom = 1 / math.tan(radians / 2)\n y_zoom = zoom\n x_zoom = y_zoom / aspect\n\n z_clip_a = (far + near) / (far - near)\n z_clip_b = (-2 * near * far) / (far - near)\n\n return np.matrix([[x_zoom, 0, 0, 0],\n [0, y_zoom, 0, 0],\n [0, 0, z_clip_a, z_clip_b],\n [0, 0, 1, 0]])", "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def _derive_transformation_matrices(self):\n\n if hasattr(self, '_primaries') and hasattr(self, '_whitepoint'):\n if self._primaries is not None and self._whitepoint is not None:\n npm = normalised_primary_matrix(self._primaries,\n self._whitepoint)\n\n self._derived_RGB_to_XYZ_matrix = npm\n self._derived_XYZ_to_RGB_matrix = np.linalg.inv(npm)", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def perspective_matrix(self) -> TransformationMatrixType:\n z_near, z_far = self._clipping[self.projection_mode.value]\n return perspective_matrix(\n math.radians(self.fov), self.aspect_ratio, z_near, z_far\n )", "def adjustPerspectiveX(img, idx=-1, fac=0.15, scale=(1.0, 1.0)):\n h1, w1, _ = img.shape\n\n w, h = int(w1 * scale[0]), int(h1 * scale[1])\n aw = (w1 - w) // 2\n ah = (h1 - h) // 2\n\n dh = int(fac * w)\n dw = int(fac * h)\n pts1 = np.float32([[0, 0], [w1, 0], [0, h1], [w1, h1]])\n\n views = []\n #1. from left to right\n #pts2 = np.float32([[0, 0], [w-dw, dh], [0, h], [w-dw, h-dh]])\n pts2 = np.float32([[aw, ah], [w - dw, dh], [aw, h - ah], [w - dw, h - dh]])\n views.append(pts2)\n\n #2. from right to left\n pts2 = np.float32([[dw, dh], [w, 0], [dw, h - dh], [w, h]])\n views.append(pts2)\n\n #3. from bottom to head\n pts2 = np.float32([[dw, dh], [w - dw, dh], [0, h], [w, h]])\n views.append(pts2)\n\n #4. from header to bottom\n pts2 = np.float32([[0, 0], [w, 0], [dw, h - dh], [w - dw, h - dh]])\n views.append(pts2)\n\n ##5. from top-left to bottom-right\n pts2 = np.float32([[0, 0], [w - dw/2, dh/2], [dw/2, h-dh/2], [w-dw, h-dh]])\n views.append(pts2)\n\n #6. from bottom-right to top-left\n pts2 = np.float32([[dw, dh], [w-dw/2, dh/2], [dw/2, h-dh/2], [w, h]])\n views.append(pts2)\n pts2 = np.float32([[0, 0], [w-dw/2, dh/2], [dw/2, h-dh/2], [w, h]])\n views.append(pts2)\n\n #7. from top-right to bottom-left\n pts2 = np.float32([[dw/2, dh/2], [w, 0], [dw, h-dh], [w-dw/2, h-dh/2]])\n views.append(pts2)\n\n #8. from bottom-left to top-right\n pts2 = np.float32([[dw/2, dh/2], [w-dw, dh], [0, h], [w-dw/2, h-dh/2]])\n views.append(pts2)\n pts2 = np.float32([[dw/2, dh/2], [w, 0], [0, h], [w-dw/2, h-dh/2]])\n views.append(pts2)\n\n if idx < 0:\n idx = random.randint(0, len(views) - 1)\n else:\n idx = idx % len(views)\n\n pts2 = views[idx]\n fcolor = _genRandomColor()\n M = cv2.getPerspectiveTransform(pts1, pts2)\n img2 = cv2.warpPerspective(img, M, (w, h),\n borderMode=cv2.BORDER_CONSTANT, borderValue=fcolor)\n\n ## get it back\n #M = cv2.getPerspectiveTransform(pts2, pts1)\n #img3 = cv2.warpPerspective(img2, M, (w, h))\n\n if w != w1 or h != h1:\n bg_img = _genRandomImg(img.shape)\n img2 = randomPaste(bg_img, img2)\n \n return img2", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def get_perspective_transform(src, dst):\n if not isinstance(src, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(src)))\n\n if not isinstance(dst, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(dst)))\n\n if not src.shape[-2:] == (4, 2):\n raise ValueError(\"Inputs must be a Bx4x2 tensor. Got {}\".format(src.shape))\n\n if not src.shape == dst.shape:\n raise ValueError(\"Inputs must have the same shape. Got {}\".format(dst.shape))\n\n if not (src.shape[0] == dst.shape[0]):\n raise ValueError(\n \"Inputs must have same batch size dimension. Expect {} but got {}\".format(src.shape, dst.shape)\n )\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n p = []\n for i in [0, 1, 2, 3]:\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'x'))\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'y'))\n\n # A is Bx8x8\n A = torch.stack(p, dim=1)\n\n # b is a Bx8x1\n b = torch.stack(\n [\n dst[:, 0:1, 0],\n dst[:, 0:1, 1],\n dst[:, 1:2, 0],\n dst[:, 1:2, 1],\n dst[:, 2:3, 0],\n dst[:, 2:3, 1],\n dst[:, 3:4, 0],\n dst[:, 3:4, 1],\n ],\n dim=1,\n )\n\n # solve the system Ax = b\n X, LU = _torch_solve_cast(b, A)\n\n # create variable to return\n batch_size = src.shape[0]\n M = torch.ones(batch_size, 9, device=src.device, dtype=src.dtype)\n M[..., :8] = torch.squeeze(X, dim=-1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def getPerspectiveProjectionMatrix(l, r, b, t, n, f):\n e11 = 2 * n / (r - l)\n e13 = (r + l) / (r - l)\n e22 = (2 * n) / (t - b)\n e23 = (t + b) / (t - b)\n e33 = -1 * (f + n) / (f - n)\n e34 = (-2 * f * n) / (f - n)\n\n return MatrixExtended([\n [e11, 0, e13, 0],\n [0, e22, e23, 0],\n [0, 0, e33, e34],\n [0, 0, -1, 0]])", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def get_perspective_transform3d(src: Tensor, dst: Tensor) -> Tensor:\n if not isinstance(src, (Tensor)):\n raise TypeError(f\"Input type is not a Tensor. Got {type(src)}\")\n\n if not isinstance(dst, (Tensor)):\n raise TypeError(f\"Input type is not a Tensor. Got {type(dst)}\")\n\n if not src.shape[-2:] == (8, 3):\n raise ValueError(f\"Inputs must be a Bx8x3 tensor. Got {src.shape}\")\n\n if not src.shape == dst.shape:\n raise ValueError(f\"Inputs must have the same shape. Got {dst.shape}\")\n\n if not (src.shape[0] == dst.shape[0]):\n raise ValueError(f\"Inputs must have same batch size dimension. Expect {src.shape} but got {dst.shape}\")\n\n if not (src.device == dst.device and src.dtype == dst.dtype):\n raise AssertionError(\n f\"Expect `src` and `dst` to be in the same device (Got {src.dtype}, {dst.dtype}) \"\n f\"with the same dtype (Got {src.dtype}, {dst.dtype}).\"\n )\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n p = []\n\n # 000, 100, 110, 101, 011\n for i in [0, 1, 2, 5, 7]:\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'x'))\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'y'))\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'z'))\n\n # A is Bx15x15\n A = stack(p, 1)\n\n # b is a Bx15x1\n b = stack(\n [\n dst[:, 0:1, 0],\n dst[:, 0:1, 1],\n dst[:, 0:1, 2],\n dst[:, 1:2, 0],\n dst[:, 1:2, 1],\n dst[:, 1:2, 2],\n dst[:, 2:3, 0],\n dst[:, 2:3, 1],\n dst[:, 2:3, 2],\n # dst[:, 3:4, 0], dst[:, 3:4, 1], dst[:, 3:4, 2],\n # dst[:, 4:5, 0], dst[:, 4:5, 1], dst[:, 4:5, 2],\n dst[:, 5:6, 0],\n dst[:, 5:6, 1],\n dst[:, 5:6, 2],\n # dst[:, 6:7, 0], dst[:, 6:7, 1], dst[:, 6:7, 2],\n dst[:, 7:8, 0],\n dst[:, 7:8, 1],\n dst[:, 7:8, 2],\n ],\n 1,\n )\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return\n batch_size: int = src.shape[0]\n M = torch.empty(batch_size, 16, device=src.device, dtype=src.dtype)\n M[..., :15] = X[..., 0]\n M[..., -1].fill_(1)\n\n return M.view(-1, 4, 4) # Bx4x4", "def get_projection_matrix(left, right, bottom, top):\r\n zNear = -25.0\r\n zFar = 25.0\r\n inv_z = 1.0 / (zFar - zNear)\r\n inv_y = 1.0 / (top - bottom)\r\n inv_x = 1.0 / (right - left)\r\n mat = [[(2.0 * inv_x), 0.0, 0.0, (-(right + left) * inv_x)],\r\n [0.0, (2.0 * inv_y), 0.0, (-(top + bottom) * inv_y)],\r\n [0.0, 0.0, (-2.0 * inv_z), (-(zFar + zNear) * inv_z)],\r\n [0.0, 0.0, 0.0, 1.0]]\r\n return mat", "def get_warp_perspective(transpose_image, h_matrix, dimension):\n warped_image = np.zeros((dimension[0], dimension[1], 3))\n for index1 in range(0, transpose_image.shape[0]):\n for index2 in range(0, transpose_image.shape[1]):\n new_vec = np.dot(h_matrix, [index1, index2, 1])\n new_row, new_col, _ = (new_vec / new_vec[2] + 0.4).astype(int)\n if 5 < new_row < (dimension[0] - 5):\n if 5 < new_col < (dimension[1] - 5):\n warped_image[new_row, new_col] = transpose_image[index1, index2]\n warped_image[new_row - 1, new_col - 1] = transpose_image[index1, index2]\n warped_image[new_row - 2, new_col - 2] = transpose_image[index1, index2]\n warped_image[new_row - 3, new_col - 3] = transpose_image[index1, index2]\n warped_image[new_row + 1, new_col + 1] = transpose_image[index1, index2]\n warped_image[new_row + 2, new_col + 2] = transpose_image[index1, index2]\n warped_image[new_row + 3, new_col + 3] = transpose_image[index1, index2]\n\n return np.array(warped_image, dtype=np.uint8)", "def problem3():\n t = np.array([-27.1, -2.9, -3.2])\n principal_point = np.array([8, -10])\n focal_length = 8\n\n # model transformations\n T = gettranslation(t)\n Ry = getyrotation(135)\n Rx = getxrotation(-30)\n Rz = getzrotation(90)\n print(T)\n print(Ry)\n print(Rx)\n print(Rz)\n\n K = getcentralprojection(principal_point, focal_length)\n\n P,M = getfullprojection(T, Rx, Ry, Rz, K)\n print(P)\n print(M)\n\n points = loadpoints()\n displaypoints2d(points)\n\n z = loadz()\n Xt = invertprojection(K, points, z)\n\n Xh = inverttransformation(M, Xt)\n\n worldpoints = hom2cart(Xh)\n displaypoints3d(worldpoints)\n\n points2 = projectpoints(P, worldpoints)\n displaypoints2d(points2)\n\n plt.show()", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def decompose_essential_matrix(E, x1, x2):\n\n # Fix left camera-matrix\n Rl = np.eye(3)\n tl = np.array([[0, 0, 0]]).T\n Pl = np.concatenate((Rl, tl), axis=1)\n\n # TODO: Compute possible rotations and translations\n \n # s must be [1, 1, 0]\n u, s, vh = np.linalg.svd(E)\n E = u @ np.diag([1, 1, 0]) @ vh\n u, s, vh = np.linalg.svd(E)\n\n w = np.array([[ 0, 1, 0], \n [-1, 0, 0], \n [ 0, 0, 1]]) \n \n z = np.array([[ 0, -1, 0], \n [ 1, 0, 0],\n [ 0, 0, 0]])\n \n R1 = u @ w.T @ vh\n s1 = -u @ z @ u.T\n R2 = u @ w @ vh\n s2 = u @ z @ u.T\n\n t1 = np.array([[s1[2, 1]], \n [s1[0, 2]],\n [s1[1, 0]]])\n \n t2 = np.array([[s2[2, 1]], \n [s2[0, 2]], \n [s2[1, 0]]]) \n\n # Four possibilities\n Pr = [np.concatenate((R1, t1), axis=1),\n np.concatenate((R1, t2), axis=1),\n np.concatenate((R2, t1), axis=1),\n np.concatenate((R2, t2), axis=1)]\n\n # Compute reconstructions for all possible right camera-matrices\n X3Ds = [infer_3d(x1[:, 0:1], x2[:, 0:1], Pl, x) for x in Pr]\n\n # Compute projections on image-planes and find when both cameras see point\n test = [np.prod(np.hstack((Pl @ np.vstack((X3Ds[i], [[1]])), Pr[i] @ np.vstack((X3Ds[i], [[1]])))) > 0, 1) for i in\n range(4)]\n test = np.array(test)\n idx = np.where(np.hstack((test[0, 2], test[1, 2], test[2, 2], test[3, 2])) > 0.)[0][0]\n\n # Choose correct matrix\n Pr = Pr[idx]\n\n return Pl, Pr", "def compute_point_perspective_transformation(matrix, list_downoids):\n # Compute the new coordinates of our points\n list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)\n transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)\n # Loop over the points and add them to the list that will be returned\n transformed_points_list = list()\n for i in range(0, transformed_points.shape[0]):\n transformed_points_list.append([transformed_points[i][0][0], transformed_points[i][0][1]])\n return transformed_points_list", "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv" ]
[ "0.77655315", "0.6772391", "0.67308986", "0.669059", "0.6618192", "0.65587133", "0.64216304", "0.6264239", "0.6257005", "0.62168366", "0.6215676", "0.6160854", "0.6142326", "0.6108185", "0.6086129", "0.6077356", "0.60574424", "0.6047167", "0.6012863", "0.599367", "0.5987706", "0.5972167", "0.5967639", "0.59607065", "0.5937446", "0.59313554", "0.58857006", "0.5872851", "0.58446836", "0.5837853" ]
0.7251248
1
Extract all Non Zero Pixels and return X, Y Coordinates
def extract_pixels(img_): non_zero_pixels = np.argwhere(0 < img_) x = non_zero_pixels.T[0].astype(np.float32) y = non_zero_pixels.T[1].astype(np.float32) return x, y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __create_xyz_points(raster, no_data=-9999):\n y, x = np.where(raster != no_data)\n z = np.extract(raster != no_data, raster)\n\n return x, y, z", "def find_pixels(self):\n ref_image=Image.open('sample0000.png')\n imarray=np.array(ref_image)\n ref_image.close()\n self.number_of_pix=imarray.shape\n print self.number_of_pix\n ref_image=None\n imarray=None", "def get_scanner_xy(points, proj):\n\n # Find the pixel corresponding to (x=0,y=0)\n res_x = proj.projector.res_x # 5 pixels / m, 1 px = 20 cm\n res_y = proj.projector.res_y # 5 pixels / m , 1 px = 20 cm\n\n min_x, min_y, min_z = points.min(0)\n\n # the first coordinate is associated to the row coordinate of the image\n y0 = int(np.floor((0 - min_y) * res_y).astype(np.int))\n # the second coordinate is associated to the column coordinate of the image\n x0 = int(np.floor((0 - min_x) * res_x).astype(np.int))\n\n return x0, y0", "def getPixel(data,x,y):\n d0= data[y,x*2]\n \n if ( (d0[0]==255) and (d0[1]==127)):\n return [0.0,0.0,0.0]\n d1= data[y,x*2+1]\n test=_U()\n test.data=(c_ubyte * 6)(d0[0],d0[1],d0[2],d1[0],d1[1],d1[2])\n X=hex (test.DistXYZ.x)\n Y=hex (test.DistXYZ.y)\n Z=hex (test.DistXYZ.z)\n \n X=float(int(X,16)-int(\"0x7FFF\",16))/1000.0\n Y=float(int(Y,16)-int(\"0x7FFF\",16))/1000.0\n Z=float(int(Z,16)-int(\"0x7FFF\",16))/1000.0\n return [X,Y,Z]", "def pixelPoints(img, cnt):\n\tm = np.zeros(grayscale(img).shape, np.uint8)\n\tcv2.drawContours(m, [cnt], 0, 255, -1)\n\tpixelpoints = cv2.findNonZero(m)\n\treturn pixelpoints", "def neighbour_pixels(x, y):\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y ), (x, y ), (x + 1, y ),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)\n ]", "def pixel_to_coords(self, x, y):\n rx, ry = self.size\n nx = (x / rx - 0.5) * self.scale + self.center[0]\n ny = ((ry - y) / ry - 0.5) * self.scale + self.center[1]\n nz = self.center[2]\n return [nx, ny, nz]", "def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles", "def getpixels(self,x,y,dx,dy,Nx,Ny):\n \n Dx = (Nx*dx)\n Dy = (Ny*dy)\n\n # Not Nx + 1 to account for rounding\n pX = (x/dx + (Nx + 2)/2.).astype(int)\n pY = (y/dy + (Ny + 2)/2.).astype(int)\n pixels = pX + pY*Nx\n pixels[((pX < 0) | (pX >= Nx)) | ((pY < 0) | (pY >= Ny))] = -1\n\n # here we do use Nx + 1 as you want the precise float value of the pixel.\n return pixels,x/dx + (Nx + 1)/2., y/dx + (Nx + 1.)/2.", "def getPixels(self):\n self._logger.debug(\"getPixels\")", "def coordinates(self, mask):\n y,x = mask.nonzero()\n return list(zip(x,y))", "def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours", "def remove_blank_pixels(self,pixels,non_zero=None):\n self.uni2pix= np.unique(pixels).astype(int)\n self.pix2uni = {u:k for k,u in enumerate(self.uni2pix)}\n\n gb, gl = hp.pix2ang(self.nside, self.uni2pix)\n\n self.npix = self.uni2pix.size\n if isinstance(non_zero,type(None)):\n non_zero = np.where(self.wei != 0)[0]\n\n self.output = self.output[self.uni2pix]\n self.sigwei = self.sigwei[self.uni2pix]\n self.wei = self.wei[self.uni2pix]\n\n print('SIZE CHECK', self.wei.size, self.npix)", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def extract(pixels, rmin, rmax, cmin, cmax):\n copy = blank_image(rmax-rmin, cmax -cmin) \n for r in range(rmin, rmax):\n for c in range(cmin, cmax):\n copy[r-rmin][c-cmin] = pixels[r][c]\n return copy", "def get_pixel_locations(self, pixels):\n if self._cached_point_cloud is None:\n self._cached_point_cloud = self.as_point_cloud()\n pixel_locations = [\n self._cached_point_cloud[pixel.y * self.camera_setup.width +\n pixel.x] for pixel in pixels\n ]\n return [\n pylot.utils.Location(loc[0], loc[1], loc[2])\n for loc in pixel_locations\n ]", "def get_point(img, threshold):\n binary = np.zeros_like(img)\n binary[\n (img > threshold)\n ] = 1\n\n nonzero = binary.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n return nonzeroy, nonzerox", "def image_to_points(numpy_image):\r\n res = []\r\n for i in range(numpy_image.shape[0]):\r\n for j in range(numpy_image.shape[1]):\r\n if numpy_image[i,j]==0:\r\n res.append([i,j])\r\n return res", "def numZeroesAround(imgArray, (x, y)):\n\tnum = 0\n\tfor x_r in range(-1, 2):\n\t\tfor y_r in range(-1, 2):\n\t\t\tif x_r != 0 or y_r != 0:\n\t\t\t\tif imgArray[x + x_r][y + y_r] == (0, 0, 0, 255):\n\t\t\t\t\tnum += 1\n\n\treturn num", "def extract_pixels(self, pixel_geometry, strategy=strategies.nearest):\n\t\tif pixel_geometry.area == 0:\n\t\t\treturn strategy(self.get_array(),pixel_geometry)\n\t\telse:\n\t\t\treturn self.extract_area(pixel_geometry)", "def findEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return [x,y]", "def process_coords():\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed", "def get_none_zero_region(im, margin):\n input_shape = im.shape\n if(type(margin) is int ):\n margin = [margin]*len(input_shape)\n assert(len(input_shape) == len(margin))\n indxes = np.nonzero(im)\n idx_min = []\n idx_max = []\n for i in range(len(input_shape)):\n idx_min.append(indxes[i].min())\n idx_max.append(indxes[i].max())\n\n for i in range(len(input_shape)):\n idx_min[i] = max(idx_min[i] - margin[i], 0)\n idx_max[i] = min(idx_max[i] + margin[i], input_shape[i] - 1)\n return idx_min, idx_max", "def get_pixels(self):\n\n # pygame board needs to be initialized the first time\n if not self.board:\n self.setup_display(render_gui=False)\n\n self.draw_window(draw_leaderboard=False)\n pixels = pygame.surfarray.array3d(self.window)\n return np.moveaxis(pixels, 1, 0)", "def pixelsizex(self) -> ErrorValue:\n return ErrorValue(self._data['XPixel'], self._data.setdefault('XPixelError',0.0))", "def _get_coordinates(x,y,z):\n\t\ttemp = Board.board\n\t\ttemp1 = temp=='M'\n\t\tfor i in range(6,x):\n\t\t\tfor j in range(y,z):\n\t\t\t\tif(temp1[i][j]==True):\n\t\t\t\t\tcurrent_x = i\n\t\t\t\t\tcurrent_y = j\n\n\t\treturn current_x,current_y", "def get_pixels(surface):\n pixels = []\n for y in range(surface.get_height()):\n for x in range(surface.get_width()):\n pixels.append(surface.get_at((x,y))[:3])\n return pixels", "def _build_list_of_excluded_pixels(self, exclude_zones):\n \n pixels = []\n for x, y, width, height in exclude_zones:\n for row in range(height):\n for col in range(width):\n pixels.append(Pixel(col + x, row + y))\n \n return pixels", "def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)", "def _xy_locs(mask):\n y, x = mask.nonzero()\n return list(zip(x, y))" ]
[ "0.655391", "0.64457804", "0.6414161", "0.63365555", "0.6304538", "0.62975013", "0.6216841", "0.62149465", "0.6204824", "0.61987865", "0.6195701", "0.6187824", "0.61301506", "0.60819465", "0.6074533", "0.60721606", "0.60360277", "0.6013827", "0.59923506", "0.597244", "0.59497803", "0.59497446", "0.5945289", "0.594088", "0.59152704", "0.5906459", "0.5903735", "0.58962256", "0.5813987", "0.58030176" ]
0.81292105
0
Get x intercepts for given y value
def get_intercepts(fit, y): x = fit[0] * (y * y) + fit[1] * y + fit[2] return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_x_y_for_line(bounds, y_intercept, slope): \n\n x = np.sort(bounds)\n\n y = y_intercept + (slope * x)\n\n return x, y", "def intercept(x1, y1, x2, y2):\r\n m = slope(x1, y1, x2, y2)\r\n return y1 - m*x1", "def get_fit_x(self, y):\n if self.line_fit_m.size == 0:\n return np.empty(y.shape)\n fit = self.line_fit\n return np.array(fit[0] * y ** 2 + fit[1] * y + fit[2]).astype(\"int\")", "def get_outliers_inliers(X, y):\n X_outliers = X[np.where(y == 1)]\n X_inliers = X[np.where(y == 0)]\n return X_outliers, X_inliers", "def intercept(self) -> np.ndarray:\n return self._intercept", "def intercept_(self):\n return self.regression.intercept_", "def getX(self, y=0):\n if y is not None:\n a = self.coefficients[\"x2\"]\n b = self.coefficients[\"x1\"]\n c = (\n self.coefficients[\"c\"]\n + self.coefficients[\"y2\"] * y ** 2.0\n + self.coefficients[\"y1\"] * y\n )\n\n return self.quadratic(a, b, c)\n else:\n return [None]", "def get_index(self, x, y):\n i = (y - self.y0) // self.dy\n j = (x - self.x0) // self.dx\n i = min(max(i, 0), self.n-1)\n j = min(max(j, 0), self.m-1)\n return [i, j]", "def index ( self, x, y ):\n if x < self.x_min or x > self.x_max:\n raise ValueError(\"x-value out of range\")\n if y < self.y_min or y > self.y_max:\n raise ValueError(\"y-value out of range\")\n xi = int((x-self.x_min)/self.increment+0.5)\n yi = int((y-self.y_min)/self.increment+0.5)\n return xi, yi", "def index ( self, x, y ):\n if x < self.x_min or x > self.x_max:\n raise ValueError(\"x-value out of range\")\n if y < self.y_min or y > self.y_max:\n raise ValueError(\"y-value out of range\")\n xi = int((x-self.x_min)/self.increment+0.5)\n yi = int((y-self.y_min)/self.increment+0.5)\n return xi, yi", "def xintercept(self):\n if self.slope() == 0:\n return None\n else:\n return self.c/self.a", "def get_intercept(self, independent, dependent):\n\n try:\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n\n lm = LinearRegression()\n lm.fit(x, y)\n b = lm.intercept_\n return round(b[0], 4)\n except Exception as e:\n print(e)", "def get_x_and_theta_with_intercept(self):\n intercept = np.ones((self.test_x_matrix.shape[0], 1))\n x_with_intercept = np.concatenate((intercept, self.test_x_matrix), axis=1)\n theta_with_intercept = np.zeros(x_with_intercept.shape[1])\n return [x_with_intercept, theta_with_intercept]", "def yintercept(self):\n if self.slope() is None:\n return None\n else:\n return self.c/self.b", "def find_at(self, x, y):\n return list(self.ifind_at(x, y))", "def regress_residuals(x, y):\r\n slope, intercept = regress(x, y)\r\n coords = zip(x, y)\r\n residuals = []\r\n for x, y in coords:\r\n e = y - (slope * x) - intercept\r\n residuals.append(e)\r\n return residuals", "def xy2ind(self, x, y):\n return self.sub2ind(*self.xy2sub(x, y))", "def y(self, x):\n return x", "def get_poly_intercept(self, independent, dependent):\n\n try:\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n\n poly = PolynomialFeatures(degree = 2)\n x_poly = poly.fit_transform(x) \n\n model = LinearRegression()\n model.fit(x_poly, y)\n intercept_arr = model.intercept_\n return round(intercept_arr[0], 4)\n except Exception as e:\n print(e)", "def getPointValues(a, Y, x):\n raise NotImplementedError('getPoint not implemented')", "def intercept(self):\n return self.core.fmlayer.b.numpy()", "def get_Xy(self, latents=False):\n if latents:\n return (self.latent, self.y)\n else:\n if not hasattr(self, \"Xs\"):\n raise NameError(\"sample_views has not been called yet\")\n return (self.Xs, self.y)", "def _fit_intercept(self, X, y):\n if self.fit_intercept:\n mu = np.exp(np.dot(X, self.coef_))\n self.intercept_ = np.log(np.mean(y)/np.mean(mu))\n else:\n self.intercept_ = np.zeros(1)", "def _get_xy_lims(self):\n \n x = self.datapos[0] - 1\n y = self.datapos[1] - 1\n\n return x, y", "def y(df,x):\r\n x_p=np.array(df['Vertices'])\r\n y_p=np.array(df['DIxPRE 252'])\r\n cs = scipy.interpolate.splrep(x_p,y_p)\r\n return scipy.interpolate.splev(x,cs)", "def findCurvePoints(self, x, y, c):\n\t\tyCurve = []\n\t\tfor xi in x:\n\t\t\tyi = self.polynomialFunct(c, xi)\n\t\t\t\n\t\t\tyCurve.append( yi )\n\t\t\n\t\treturn np.asarray(yCurve)", "def y(x,xi):\n return np.exp(-xi)-np.exp(-xi)*(x-xi)", "def getxy(hist):\n y_vals = hist['h']\n x_vals = hist['bins']\n return x_vals, np.r_[y_vals, y_vals[-1]]", "def get_intercept(self):\n return self.intercept", "def add_intercept(self, x):\n\t\tif x.ndim == 1:\n\t\t\treturn np.array([[1.0, elem] for elem in x])\n\t\telse:\n\t\t\tlst = []\n\t\t\tfor elem in x:\n\t\t\t\ttmp = elem.tolist()\n\t\t\t\ttmp.insert(0, 1.0)\n\t\t\t\tlst.append(tmp)\n\t\t\treturn np.array(lst)" ]
[ "0.63922507", "0.6389413", "0.6266955", "0.6237579", "0.62286144", "0.609001", "0.60876924", "0.6066772", "0.605182", "0.605182", "0.6022421", "0.59712934", "0.5959732", "0.5946199", "0.5945855", "0.5922003", "0.59173673", "0.5792059", "0.57911325", "0.57743794", "0.57610834", "0.5734935", "0.5727229", "0.57152706", "0.57118404", "0.56842184", "0.5674705", "0.5667412", "0.56664705", "0.5661578" ]
0.7766451
0
Get Left_x, Right_x, Left_y, Right_y, Image , return Image with Polygon
def draw_polygon(left_x, right_x, left_y, right_y, img_): pts_left = np.array([np.flipud(np.transpose(np.vstack([left_x, left_y])))]) pts_right = np.array([np.transpose(np.vstack([right_x, right_y]))]) pts = np.hstack((pts_left, pts_right)) img_ = cv2.polylines(img_, np.int_([pts]), isClosed=False, color=(60, 200, 60), thickness=10, lineType=cv2.LINE_AA) img_ = cv2.fillPoly(img_, np.int_(pts), (50, 90, 50)) return img_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_image(self):\n dir_ship = self.direction\n dir_cannon=self.cannon_dir\n temp_image = self.base_image.copy()\n pygame.draw.polygon(temp_image, (0,0,0), [(2,2),(2,3),(3,3),(3,2)])\n if dir_cannon == dir_ship:\n pygame.draw.polygon(temp_image, (60,60,60), [(4,3),(4,2), (5,3),(5,2)])\n if (dir_ship - dir_cannon)%4 ==1:#-90° angle\n pygame.draw.polygon(temp_image, (60,60,60), [(2,4),(3,4), (2,5),(3,5)])\n if (dir_ship - dir_cannon)%4 ==3:#+90° angle\n pygame.draw.polygon(temp_image, (60,60,60), [(2,1),(3,1), (2,0),(3,0)])\n if (dir_ship - dir_cannon)%4 ==2:#180° angle\n pygame.draw.polygon(temp_image, (60,60,60), [(1,2),(1,3), (0,2),(0,3)])\n temp_image=pygame.transform.rotate(temp_image,dir_ship*90)\n return temp_image", "def generatePolygons():", "def get_boundary_as_polygon(self, do_geo=True):\n xhor, yhor = self.get_coordinates()\n dimensions = xhor.shape\n xbottom = xhor[0, :]\n xright = xhor[:, dimensions[1]-1]\n xtop = xhor[dimensions[0]-1, :][::-1]\n xleft = xhor[:, 0][::-1]\n\n ybottom = yhor[0, :]\n yright = yhor[:, dimensions[1]-1]\n ytop = yhor[dimensions[0]-1, :][::-1]\n yleft = yhor[:, 0][::-1]\n\n lon_square = np.concatenate((xbottom, xright, xtop, xleft))\n lat_square = np.concatenate((ybottom, yright, ytop, yleft))\n\n return lon_square, lat_square", "def geotif_image(self, tile_bounds, image_bounds, imagepath,image_gdal):\n i_srid=3857\n s_srid=\"WGS 84 / Pseudo-Mercator\"\n # i_srid=3395\n # s_srid=\"WGS 84 / World Mercator\"\n # 4326 Wsg84\n # Upper Left ( -8.4375000, 77.1571625) ( 8d26'15.00\"W, 77d 9'25.79\"N)\n # Lower Left ( -8.4375000, 35.4606700) ( 8d26'15.00\"W, 35d27'38.41\"N)\n # Upper Right ( 80.1562500, 77.1571625) ( 80d 9'22.50\"E, 77d 9'25.79\"N)\n # Lower Right ( 80.1562500, 35.4606700) ( 80d 9'22.50\"E, 35d27'38.41\"N)\n # Center ( 35.8593750, 56.3089162) ( 35d51'33.75\"E, 56d18'32.10\"N)\n # 3857 'WGS 84 / Pseudo-Mercator'\n # Upper Left ( -939258.204,13932330.020) ( 8d26'15.00\"W, 77d 9'25.79\"N)\n # Lower Left ( -939258.204, 4226661.916) ( 8d26'15.00\"W, 35d27'38.41\"N)\n # Upper Right ( 8922952.934,13932330.020) ( 80d 9'22.50\"E, 77d 9'25.79\"N)\n # Lower Right ( 8922952.934, 4226661.916) ( 80d 9'22.50\"E, 35d27'38.41\"N)\n # Center ( 3991847.365, 9079495.968) ( 35d51'33.75\"E, 62d54'54.84\"N)\n # 3395 'WGS 84 / World Mercator'\n # Upper Left ( -939258.204,13932330.020) ( 8d26'15.00\"W, 77d14'24.81\"N)\n # Lower Left ( -939258.204, 4226661.916) ( 8d26'15.00\"W, 35d38'33.56\"N)\n # Upper Right ( 8922952.934,13932330.020) ( 80d 9'22.50\"E, 77d14'24.81\"N)\n # Lower Right ( 8922952.934, 4226661.916) ( 80d 9'22.50\"E, 35d38'33.56\"N)\n # Center ( 3991847.365, 9079495.968) ( 35d51'33.75\"E, 63d 4'14.87\"N)\n bounds_west,bounds_south,bounds_east,bounds_north=tile_bounds\n bounds_wsg84=\"bounds_wsg84: %f,%f,%f,%f\"% (bounds_west,bounds_south,bounds_east,bounds_north)\n mercator = GlobalMercator()\n tile_bounds=mercator.BoundsToMeters(tile_bounds)\n mbtiles_name=\"\";\n mbtiles_description=\"\"\n s_TIFFTAG_DOCUMENTNAME=\"\"\n s_TIFFTAG_IMAGEDESCRIPTION=\"\"\n s_TIFFTAG_SOFTWARE=\"\"\n s_TIFFTAG_DATETIME=\"\"\n s_TIFFTAG_ARTIST=\"\"\n s_TIFFTAG_HOSTCOMPUTER=\"\"\n s_TIFFTAG_COPYRIGHT=\"\"\n if self.metadata_input:\n metadata=dict(self.metadata_input)\n mbtiles_name=metadata.get('name','')\n mbtiles_description=metadata.get('description','')\n if self._metadata:\n for metadata_list in self._metadata:\n metadata=dict(metadata_list[0])\n mbtiles_name=metadata.get('name',mbtiles_name)\n mbtiles_description=metadata.get('description',mbtiles_description)\n s_TIFFTAG_DOCUMENTNAME=metadata.get('TIFFTAG_DOCUMENTNAME',mbtiles_name)\n s_TIFFTAG_IMAGEDESCRIPTION=metadata.get('TIFFTAG_IMAGEDESCRIPTION',mbtiles_description)\n s_TIFFTAG_SOFTWARE=metadata.get('TIFFTAG_SOFTWARE','')\n s_TIFFTAG_DATETIME=metadata.get('TIFFTAG_DATETIME','')\n s_TIFFTAG_ARTIST=metadata.get('TIFFTAG_ARTIST','')\n s_TIFFTAG_HOSTCOMPUTER=metadata.get('TIFFTAG_HOSTCOMPUTER','')\n s_TIFFTAG_COPYRIGHT=metadata.get('TIFFTAG_COPYRIGHT','')\n if s_TIFFTAG_DOCUMENTNAME == \"\":\n s_TIFFTAG_DOCUMENTNAME=mbtiles_name\n if s_TIFFTAG_IMAGEDESCRIPTION == \"\":\n s_TIFFTAG_IMAGEDESCRIPTION=mbtiles_description\n tiff_metadata=[]\n if s_TIFFTAG_DOCUMENTNAME != \"\":\n tiff_metadata.append(('TIFFTAG_DOCUMENTNAME',s_TIFFTAG_DOCUMENTNAME))\n if s_TIFFTAG_IMAGEDESCRIPTION != \"\":\n tiff_metadata.append(('TIFFTAG_IMAGEDESCRIPTION',s_TIFFTAG_IMAGEDESCRIPTION))\n if s_TIFFTAG_SOFTWARE != \"\":\n tiff_metadata.append(('TIFFTAG_SOFTWARE',s_TIFFTAG_SOFTWARE))\n else:\n tiff_metadata.append(('TIFFTAG_SOFTWARE',bounds_wsg84))\n if s_TIFFTAG_DATETIME != \"\":\n tiff_metadata.append(('TIFFTAG_DATETIME',s_TIFFTAG_DATETIME))\n if s_TIFFTAG_ARTIST != \"\":\n tiff_metadata.append(('TIFFTAG_ARTIST',s_TIFFTAG_ARTIST))\n if s_TIFFTAG_HOSTCOMPUTER != \"\":\n tiff_metadata.append(('TIFFTAG_HOSTCOMPUTER',s_TIFFTAG_HOSTCOMPUTER))\n if s_TIFFTAG_COPYRIGHT != \"\":\n tiff_metadata.append(('TIFFTAG_COPYRIGHT',s_TIFFTAG_COPYRIGHT))\n # this assumes the projection is Geographic lat/lon WGS 84\n xmin,ymin,xmax,ymax=tile_bounds\n image_width,image_height=image_bounds\n # Upper Left ( 20800.000, 22000.000)\n # Lower Right ( 24000.000, 19600.000)\n # Size is 15118, 11339\n # (24000-20800)/15118 = 3200 = 0,21166821 [xres]\n # (19600-22000)/11339 = 2400 = −0,211658876 [yres]\n # geo_transform = (20800.0, 0.2116682100807, 0.0, 22000.0, 0.0, -0.21165887644413)\n geo_transform = [xmin, (xmax-xmin)/image_width, 0, ymax, 0, (ymin-ymax)/image_height ]\n spatial_projection = osr.SpatialReference()\n spatial_projection.ImportFromEPSG(i_srid)\n logger.info(_(\"-I-> geotif_image: Saving as GeoTiff - image[%s] compression[%s]\") % (imagepath,self.tiff_compression))\n image_dataset = gdal.Open(image_gdal, gdal.GA_Update )\n image_dataset.SetProjection(spatial_projection.ExportToWkt())\n image_dataset.SetGeoTransform(geo_transform)\n driver = gdal.GetDriverByName(\"GTiff\")\n output_dataset = driver.CreateCopy(imagepath,image_dataset, 0, self.tiff_compression )\n if tiff_metadata:\n logger.info(_(\"-I-> geotif_image: tiff_metadata[%s]\") % tiff_metadata)\n output_dataset.SetMetadata(dict(tiff_metadata))\n # Once we're done, close properly the dataset\n output_dataset = None\n image_dataset = None\n os.remove(image_gdal)\n logger.info(_(\"-I-> geotif_image: Saved resulting image to '%s' as GeoTiff- bounds[%s]\") % (imagepath,tile_bounds))", "def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()", "def Hexagon(image):\n return x, y", "def generate_image(self):\n\t\tcenters = self.generate_centers()\n\t\timg = Image.new('RGB', (self.config.image_size, self.config.image_size), color=(0,0,0))\n\t\tshapes = np.random.randint(2, size=len(centers))\n\t\tdrawer = ImageDraw.Draw(img)\n\t\tr = int(0.05 * self.config.image_size)\n\t\tR = []\n\t\tfor i in range(len(centers)):\n\t\t\tcoor = (centers[i][0] - r , centers[i][1] - r, centers[i][0] + r, centers[i][1] + r)\n\t\t\tif shapes[i] < 0.5:\n\t\t\t\tdrawer.rectangle(coor, fill=COLOR[i])\n\t\t\telse:\n\t\t\t\tdrawer.ellipse(coor, fill=COLOR[i])\n\t\t\tR.append([centers[i], i, shapes[i]])\n\t\treturn np.array(img), R", "def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]", "def get_polygons(annotation):\n print(f\"Loadding: {annotation}\")\n tree = ET.parse(annotation)\n root = tree.getroot()\n polygons = {}\n for obj in root.findall('object'):\n name = obj.find('name').text\n id_ = obj.find('id').text\n polygon = []\n for pt in obj.find('polygon').findall('pt'):\n polygon.append([pt.find('x').text, pt.find('y').text])\n if name in polygons:\n x_ref= int(polygons[name]['left'][0][0])\n x = int(polygon[0][0])\n if x > x_ref:\n polygons[name]['right'] = polygons[name]['left']\n id_ = 'left'\n else:\n id_ = 'right'\n else:\n polygons[name] = {}\n id_ = 'left'\n polygons[name][id_] = polygon\n for i in list(polygons.keys()):\n if not('right' in polygons[i]):\n print(i,' only has one polygon: ',polygons[i]['left'])\n y = input('Do you wish to label it as \\'right\\'? (leave empy if No): ')\n if (y):\n polygons[i]['right'] = polygons[i]['left']\n polygons[i].pop('left')\n return polygons", "def get_zone_pixels(feat, input_zone_polygon, input_value_raster, band, coords=[]): #, raster_band\n \n \n \n # Open data\n raster = gdal.Open(input_value_raster)\n shp = ogr.Open(input_zone_polygon)\n lyr = shp.GetLayer()\n \n # Get raster georeference info\n transform = raster.GetGeoTransform()\n xOrigin = transform[0]\n yOrigin = transform[3]\n pixelWidth = transform[1]\n pixelHeight = transform[5]\n \n sizeX = raster.RasterXSize\n sizeY = raster.RasterYSize\n lrx = xOrigin + (sizeX * pixelWidth)\n lry = yOrigin + (sizeY * pixelHeight)\n \n \n \n # Reproject vector geometry to same projection as raster\n #sourceSR = lyr.GetSpatialRef()\n #targetSR = osr.SpatialReference()\n #targetSR.ImportFromWkt(raster.GetProjectionRef())\n #coordTrans = osr.CoordinateTransformation(sourceSR,targetSR)\n #feat = lyr.GetNextFeature()\n #geom = feat.GetGeometryRef()\n #geom.Transform(coordTrans)\n \n # Get extent of feat\n geom = feat.GetGeometryRef()\n if (geom.GetGeometryName() == 'MULTIPOLYGON'):\n count = 0\n pointsX = []; pointsY = []\n for polygon in geom:\n geomInner = geom.GetGeometryRef(count)\n ring = geomInner.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n count += 1\n elif (geom.GetGeometryName() == 'POLYGON'):\n ring = geom.GetGeometryRef(0)\n numpoints = ring.GetPointCount()\n pointsX = []; pointsY = []\n for p in range(numpoints):\n lon, lat, z = ring.GetPoint(p)\n pointsX.append(lon)\n pointsY.append(lat)\n\n else:\n sys.exit(\"ERROR: Geometry needs to be either Polygon or Multipolygon\")\n\n #xmin = min(pointsX) \n #xmax = max(pointsX)\n #ymin = min(pointsY)\n #ymax = max(pointsY)\n \n \n if len(coords) == 0: \n xmin = xOrigin if (min(pointsX) < xOrigin) else min(pointsX)\n xmax = lrx if (max(pointsX) > lrx) else max(pointsX)\n ymin = lry if (min(pointsY) < lry) else min(pointsY)\n ymax = yOrigin if (max(pointsY) > yOrigin) else max(pointsY)\n else:\n xmin = coords[0] if (min(pointsX) < coords[0]) else min(pointsX)\n xmax = coords[1] if (max(pointsX) > coords[1]) else max(pointsX)\n ymin = coords[2] if (min(pointsY) < coords[2]) else min(pointsY)\n ymax = coords[3] if (max(pointsY) > coords[3]) else max(pointsY)\n \n # Specify offset and rows and columns to read\n xoff = int((xmin - xOrigin)/pixelWidth)\n yoff = int((yOrigin - ymax)/pixelWidth)\n xcount = int((xmax - xmin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the right side\n ycount = int((ymax - ymin)/pixelWidth) #+1 !!!!!!!!!!!!!!!!!!!!! This adds a pixel to the bottom side\n \n #print(xoff, yoff, xcount, ycount)\n \n # Create memory target raster\n target_ds = gdal.GetDriverByName('MEM').Create('', xcount, ycount, 1, gdal.GDT_Byte)\n target_ds.SetGeoTransform((\n xmin, pixelWidth, 0,\n ymax, 0, pixelHeight,\n ))\n\n # Create for target raster the same projection as for the value raster\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(raster.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n\n # Rasterize zone polygon to raster\n gdal.RasterizeLayer(target_ds, [1], lyr, burn_values=[1])\n\n # Read raster as arrays\n dataBandRaster = raster.GetRasterBand(band)\n data = dataBandRaster.ReadAsArray(xoff, yoff, xcount, ycount).astype(np.float)\n bandmask = target_ds.GetRasterBand(1)\n datamask = bandmask.ReadAsArray(0, 0, xcount, ycount).astype(np.float)\n\n # data zone of raster\n dataZone = np.ma.masked_array(data, np.logical_not(datamask))\n\n raster_srs = None\n raster = None\n shp = None\n lyr = None\n return [dataZone, [xmin,xmax,ymin,ymax]]", "def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")", "def fill_lane(self,img_shape):\n\n\t\tbinary_l = np.zeros(img_shape, dtype=np.uint8)\n\n\t\tploty = np.linspace(0, img_shape[0]-1, img_shape[0])\n\t\tplotx_l = self.leftLine.fit[0]*ploty**2 + self.leftLine.fit[1]*ploty + self.leftLine.fit[2]\n\t\tplotx_r = self.rightLine.fit[0]*ploty**2 + self.rightLine.fit[1]*ploty + self.rightLine.fit[2]\n\n\t\tline_points_l = np.column_stack((plotx_l,ploty))\n\t\tline_points_r = np.column_stack((plotx_r,ploty))\n\t\tline_points = np.concatenate((line_points_l,line_points_r[::-1],line_points_l[:1]))\n\n\t\tcv2.fillPoly(binary_l, np.int32([line_points]),color=255)\n\n\t\tpolygon = np.dstack((np.zeros(img_shape),binary_l,np.zeros(img_shape))).astype('uint8')\n\t\t\n\t\treturn polygon\n\t\tunwarped_polygon = self.cam.unwarp(polygon)\n\t\treturn unwarped_polygon", "def region_points(x, y, width, xmin, xmax):\n right = (x, y + width / 2)\n top = (xmax, y)\n left = (x, y - width / 2)\n bottom = (xmin, y)\n return (right, top, left, bottom)", "def get_image(self):\n image = Image.new('1', (8, 16))\n draw = ImageDraw.Draw(image)\n for x in xrange(8):\n for y in xrange(16):\n draw.point((x,y),self.get_pixel(x, y))\n return image", "def __draw_polygon(self, event, klick):\n global creating_polygon\n curX = self.canvas.canvasx(event.x)\n curY = self.canvas.canvasy(event.y)\n if not klick and len(self.polygon_points) >= 2:\n c_r_x, c_r_y = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n distanceX = curX - c_r_x\n distanceY = curY - c_r_y\n if pow(pow(distanceX, 2) + pow(distanceY, 2), 1 / 2) <= 15:\n return\n image_relative_x, image_relative_y = self.get_image_relative_coords((curX, curY))\n self.polygon_points.extend((image_relative_x, image_relative_y))\n self.polygon_groundstructure.append(self.canvas.create_rectangle(curX - 2, curY - 2, curX + 2, curY + 2,\n outline='magenta', width=1,\n activewidth=2))\n if not creating_polygon: # start with a new polygon\n creating_polygon = True\n return\n else: # draw a line between the last points\n c_r_x1, c_r_y1 = self.get_canvas_relative_coords((self.polygon_points[-4], self.polygon_points[-3]))\n c_r_x2, c_r_y2 = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n self.polygon_groundstructure.append(self.canvas.create_line([c_r_x1, c_r_y1, c_r_x2, c_r_y2],\n fill='magenta', width=2))", "def draw_shape(self, image, shape, p, color):\n # Get the center x, y and the size s\n p = p['shape_attributes']\n if shape == 'rect':\n image = cv2.rectangle(\n image, (p['x'], p['y']), (p['x'] + p['width'], p['y'] + p['height']), color, -1)\n elif shape == \"circle\":\n #image = cv2.circle(image, (p['cx'], p['cy']), np.int(p['r']), color, -1)\n image = cv2.rectangle(image, (p['cx']-np.int32(p['r']/2.0), p['cy']-np.int32(\n p['r']/2.0)), (p['cx'] + np.int32(p['r']), p['cy'] + np.int32(p['r'])), color, -1)\n elif shape == \"point\":\n #image = cv2.circle(image, (p['cx'], p['cy']), 15, color, -1)\n image = cv2.rectangle(\n image, (p['cx']-8, p['cy']-8), (p['cx']+16, p['cy']+16), color, -1)\n elif shape == \"polygon\":\n pts = np.zeros((len(p['all_points_x']), 2), np.int32)\n for i in range(len(p['all_points_x'])):\n pts[i] = [p['all_points_x'][i], p['all_points_y'][i]]\n if (self.config.MODE == \"Combined\"):\n pts = pts.reshape((-1, 1, 2))\n elif (self.config.MODE == \"Separate\"):\n pts = pts.reshape((1, -1, 2))\n image = cv2.fillPoly(image, pts, color, lineType=cv2.LINE_AA)\n\n return image", "def _getshapepoly(self, polygon, compound=False):\n if self._resizemode == \"user\" or compound:\n t11, t12, t21, t22 = self._shapetrafo\n elif self._resizemode == \"auto\":\n l = max(1, self._pensize/5.0)\n t11, t12, t21, t22 = l, 0, 0, l\n elif self._resizemode == \"noresize\":\n return polygon\n return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon)", "def draw(self, binary, leftx, rightx):\n\t\tfilled = np.zeros_like(binary)\n\t\tploty = np.linspace(0, filled.shape[0] - 1, filled.shape[0])\n\t\t# Recast the x and y points into usable format for cv2.fillpoly()\n\t\tpts_left = np.array([np.transpose(np.vstack([leftx, ploty]))])\n\t\tpts_right = np.array([np.flipud(np.transpose(np.vstack([rightx, ploty])))])\n\t\tpts = np.hstack((pts_left, pts_right))\n\t\t# Draw the lane onto the warped blank image\n\t\tcv2.fillPoly(filled, np.int_([pts]), (0, 255, 0))\n\t\treturn filled", "def bilinear(upperleft, upperright, lowerright, lowerleft, side = 'middle'):\r\n\tx1, y1, x2, y2, x3, y3, x4, y4 = upperleft[0], upperleft[1], upperright[0], upperright[1], lowerright[0], lowerright[1], lowerleft[0], lowerleft[1]\r\n\ty14 = (y1 + y4) / 2.\r\n\ty23 = (y2 + y3) / 2.\r\n\ty = (y14 + y23) / 2.\r\n\tx12 = (x1 + x2) / 2.\r\n\tx43 = (x4 + x3) / 2.\r\n\tx = (x12 + x43) / 2.\r\n\r\n\tif side == 'middle':\r\n\t\treturn (x, y)\r\n\telif side == 'left':\r\n\t\treturn ((x1 + x4) / 2., y4)\r\n\telif side == 'right':\r\n\t\treturn ((x2 + x3) / 2., y3)# get middle right coordinate\r\n\telse:\r\n\t\traise Exception('No other sides!')", "def draw_boxes(image, bounds):\n draw = ImageDraw.Draw(image)\n if bounds[0].normalized_vertices:\n width = image.width\n height = image.height\n for i in range(len(bounds)):\n draw.polygon([\n bounds[i].normalized_vertices[0].x * width, bounds[i].normalized_vertices[0].y * height,\n bounds[i].normalized_vertices[1].x * width, bounds[i].normalized_vertices[1].y * height,\n bounds[i].normalized_vertices[2].x * width, bounds[i].normalized_vertices[2].y * height,\n bounds[i].normalized_vertices[3].x * width, bounds[i].normalized_vertices[3].y * height],\n None, colors[i % len(colors)])\n return image\n else:\n for i in range(len(bounds)):\n draw.polygon([\n bounds[i].vertices[0].x, bounds[i].vertices[0].y,\n bounds[i].vertices[1].x, bounds[i].vertices[1].y,\n bounds[i].vertices[2].x, bounds[i].vertices[2].y,\n bounds[i].vertices[3].x, bounds[i].vertices[3].y],\n None, colors[i % len(colors)])\n return image", "def draw_obstacle(start, end, img):\n # start, end, top_right, top_left = generate_obstacle_point(start, (start[0] + _OBSTACLE_SIZE, start[1] ))\n cv2.fillPoly(img, np.array([[[start[0] - 25, start[1] - 25],\n [start[0] + 25, start[1] - 25],\n [start[0] + 25, start[1] + 25],\n [start[0] - 25, start[1] + 25]]]), _RED)\n # cv2.rectangle(img, (start[0] - 25, start[1] - 25), (start[0] + 25, start[1] + 25), (0, 255, 0), 3)\n return img", "def polygon(self):\n return self._polygon", "def _get_polygon(element):\n polygon = element.find('%s/%s/%s/%s/%s' %\n (NRML04_AREA_GEOMETRY, gml.GML_POLYGON,\n gml.GML_EXTERIOR, gml.GML_LINEAR_RING,\n gml.GML_POS_LIST)).text\n\n polygon = gml._get_polygon_from_2DLinestring(polygon)\n\n return polygon", "def rig_matte((height, width), vectors, dtype=np.float_):\n img = Image.new('L', (width, height), 1)\n ImageDraw.Draw(img).polygon(vectors, outline=0, fill=0)\n return np.array(img, dtype=dtype)", "def to_shapely_polygon(self):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])", "def shape(self):\n return (self.xres, self.yres)", "def draw(self):\n\t\tif self.image != None:\n\t\t\treturn\n\n\t\tself.image = Image.new(\"RGBA\", \n\t\t\t(self.__class__._width, self.__class__._height), (0,0,0,0))\n\t\tdr = aggdraw.Draw(self.image)\n\n\t\tzIndexList = [] # list of z-indexes\n\t\tgIndexList = [] # list of gene indexes\n\n\t\tgeneIdx = 0\n\t\tfor gene in self.chromosome: \n\t\t\tzIndexList.append(gene[0])\n\t\t\tgIndexList.append(geneIdx)\n\t\t\tgeneIdx+=1\n\n\t\twhile len(zIndexList) > 0:\n\t\t\tzIndexMin = zIndexList.index(min(zIndexList))\n\t\t\tgeneIdx = gIndexList[zIndexMin]\n\t\t\tz = self.chromosome[geneIdx,0]\n\t\t\tcolor = self.chromosome[geneIdx,1]\n\t\t\tpoly = self.chromosome[geneIdx,2]\n\n\t\t\tcol = color.getColor()\n\t\t\tcords = poly.getCords()\n\t\t\tbrush = aggdraw.Brush(col)\n\n\t\t\t#dr.ellipse((x, y, x+rad, y+rad), None, brush)\n\t\t\tdr.polygon(cords, None, brush)\n\t\t\tdr.flush()\n\n\t\t\tif 0: # Debug - draw dot in center of polygon\n\t\t\t\tcenter = (poly.xAnchor, poly.yAnchor, poly.xAnchor+5, poly.yAnchor+5)\n\t\t\t\tdr.ellipse(center, None, aggdraw.Brush((255,0,0,255)))\n\t\t\t\tdr.flush()\n\n\t\t\tzIndexList.pop(zIndexMin)\n\t\t\tgIndexList.pop(zIndexMin)\n\n\t\tself.image = self.image.convert(\"RGB\")", "def img_roi(img, vertices):\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image", "def region_of_interest(self,img):\r\n #defining a blank mask\r\n mask = np.zeros_like(img) \r\n #checking number of image channel(color/grayscale) and applying mask\r\n if len(img.shape) > 2:\r\n ignore_mask_color = (255,255,255)\r\n else:\r\n ignore_mask_color = 255\r\n #filling color to pixels inside the polygon \r\n cv2.fillPoly(mask, self.vertices_img, ignore_mask_color)\r\n #image where mask pixels are nonzero\r\n masked_image = cv2.bitwise_and(img, mask)\r\n #cv2.imshow('',masked_image)\r\n return masked_image", "def get_polygon(img, gradx, grady, seed):\n rays = get_rays(NRAY, RHO, RHO_SKIP)\n # minCellSize = np.pi * MINCELLRADIUS**2\n # maxCellSize = np.pi * MAXCELLRADIUS**2\n assert 0<seed[0]<img.shape[0] and 0<seed[1]<img.shape[1]\n (cr,cc) = seed # cr, cc is the coordinates of the seed\n [ac, ar] = np.meshgrid(np.array(range(img.shape[0])), np.array(range(img.shape[1])))\n cac = (ac-cc).astype(np.float32) # cac,car represent the distance of each pixel on the image to the seed\n car = (ar-cr).astype(np.float32)\n with np.errstate(all='ignore'):\n unitx = np.cos(np.arctan(np.abs(car/cac))) * np.sign(cac) # unitx,unity represent cosine value of each pixel on the image to the seed\n unity = np.cos(np.arctan(np.abs(cac/car))) * np.sign(car)\n dirslopes = gradx * unitx + grady * unity # dirslopes is the gradient map which consider the seed points as the center\n\n tab = np.zeros((RHO - RHO_SKIP, NRAY))\n gxtab = np.zeros((RHO - RHO_SKIP, NRAY))\n gytab = np.zeros((RHO - RHO_SKIP, NRAY))\n pixtab = np.zeros((RHO - RHO_SKIP, NRAY))\n for i in range(NRAY):\n for j in range(RHO-RHO_SKIP):\n pr = int(cr + rays[i][j, 0])\n pc = int(cc + rays[i][j, 1])\n tab[j, i] = dirslopes[pr, pc]\n gxtab[j, i] = gradx[pr, pc]\n gytab[j, i] = grady[pr, pc]\n pixtab[j, i] = img[pr, pc]\n\n minpath = findminpath(tab, gxtab, gytab, pixtab) # get the minpath\n\n polygon = np.zeros((NRAY, 2))\n for i in range(NRAY):\n polygon[i, 0] = cr + rays[i][minpath[i], 0]\n polygon[i, 1] = cc + rays[i][minpath[i], 1]\n #hull = ConvexHull(polygon)\n #polygon = polygon[hull.vertices]\n #print(polygon.shape[0])\n return polygon" ]
[ "0.65409863", "0.6449552", "0.62613237", "0.61531603", "0.61507094", "0.6134605", "0.5918344", "0.5892853", "0.58924294", "0.58919007", "0.58887494", "0.58701646", "0.58659583", "0.5839686", "0.58155435", "0.5813019", "0.581092", "0.5798064", "0.57906735", "0.5779135", "0.57643324", "0.5759361", "0.5755348", "0.57522845", "0.5746816", "0.5746685", "0.5742059", "0.57363904", "0.5733292", "0.569904" ]
0.7396713
0
Draw Polylines for points with given thickness specified by Window Size
def draw_polylines(input_img, pts, window_size): return cv2.polylines(input_img, np.int_([pts]), isClosed=False, color=(255, 255, 255), thickness=2 * window_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_lines(self, color, points, width = 1, closed = False):\n color = spyral.color._determine(color)\n pygame.draw.aalines(self._surf, color, closed, points)", "def drawLines(self):\n\t\tintersections = [[], []]\n\t\tfor l in self.lines:\n\t\t\tif l.direction == 'v':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + int((self.width - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.width / 100) if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[0].append(position)\n\t\t\t\tfor yPos in range(1, self.height - 2):\n\t\t\t\t\tself.wts(yPos, position, '│', self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(0, position, '┬',self._borderColor)\n\t\t\t\tself.wts(self.height - 2, position, '┴', self._borderColor)\n\t\t\telif l.direction == 'h':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + ((self.height - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.height / 100) - 1 if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[1].append(position)\n\t\t\t\tself.wts(position, 1, '─' * (self.width - 2), self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(position, 0, '├', self._borderColor)\n\t\t\t\tself.wts(position, self.width - 1, '┤', self._borderColor)\n\t\t# draw intersections\n\t\tfor x in intersections[1]:\n\t\t\tfor y in intersections[0]:\n\t\t\t\tself.wts(x, y, '┼', self._borderColor)\n\t\tself.verticalBoundaries = intersections[0]\n\t\tif self.screenBorder:\n\t\t\tself.verticalBoundaries.append(self.width)", "def drawPaths(points, lines, height, lineWidth, pointRadius):\r\n\r\n\tlineArraySize = len(lines)\r\n\tpointArraySize = len(points)\r\n\tlineArrayItems = lineArraySize / 4\r\n\tpointArrayItems = pointArraySize / 2\r\n\r\n\r\n\tglLineWidth(lineWidth)\r\n\tglPointSize(pointRadius)\r\n\r\n\tglColor4f(0.0, 0.0, 1.0, 1.0)\r\n\tglNormal3f(0.0, 0.0, 1.0)\r\n\r\n\tglDisable(GL_TEXTURE_2D)\r\n\r\n\tglBegin(GL_LINES)\r\n\r\n#\tglLoadIdentity()\r\n\r\n\tfor i in range(lineArrayItems):\r\n\t\tglVertex3f(lines[i * 4], height - lines[i * 4 + 1], 0.1)\r\n\t\tglVertex3f(lines[i * 4 + 2], height - lines[i * 4 + 3], 0.1)\r\n\r\n\tglEnd()\r\n\r\n\tglBegin(GL_POINTS)\r\n\r\n#\tglLoadIdentity()\r\n\r\n\tfor i in range(pointArrayItems):\r\n\t\tglVertex3f(points[i * 2], height - points[i * 2 + 1], 0.11)\r\n\r\n\tglEnd()\r\n\r\n\tglEnable(GL_TEXTURE_2D)", "def wdraw_polyline(self, wcoords, color):\r\n dpoints = []\r\n for i in range(0, len(wcoords), 2):\r\n dpoints += self.w_to_d(wcoords[i], wcoords[i+1])\r\n self.canvas.create_line(dpoints, fill=color)", "def linewidth(self, size: float):\n for line in self.ax.lines:\n line.set_linewidth(size)\n self.canvas.draw()", "def draw_lines(self, color, points, width=1, closed=False):\n if width == 1:\n pygame.draw.aalines(self._surf, color, closed, points)\n else:\n pygame.draw.lines(self._surf, color, closed, points, width)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self", "def draw_lines(point_list, color, border_width=1):\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n\n # Set line width\n GL.glLineWidth(border_width)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINES)\n for point in point_list:\n GL.glVertex3f(point[0], point[1], 0.5)\n GL.glEnd()", "def drawcutline(f,layernamelist,cutline_entities_count): \r\n \r\n #layernamelist=[layernamelist[0]] \r\n layercount=0\r\n ringlist=[[[-0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[-0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET],[0.215+globalconfig.CUTLINE_X_OFFSET,175.68+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,0.0+globalconfig.CUTLINE_Y_OFFSET]],\r\n [[171.4650+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET],[171.8950+globalconfig.CUTLINE_X_OFFSET,171.68+globalconfig.CUTLINE_Y_OFFSET]]]\r\n flashlist=buildflashlist()\r\n cutlineset=buildcutlineset() \r\n \r\n f.write(\"0\\nSECTION\\n2\\nENTITIES\\n\")\r\n \r\n for layername in layernamelist:\r\n layercount=layercount+1\r\n for polyline in cutlineset:\r\n cutline_entities_count=cutline_entities_count+1\r\n f.write(\"0\\nPOLYLINE\\n8\\n\"+layername+\"\\n5\\n\"+hex(cutline_entities_count)[2:]) # begin writing a polyline\r\n f.write(\"\\n66\\n1\\n10\\n0.0\\n20\\n0.0\\n30\\n0.0\\n40\\n0.08\\n41\\n0.08\\n\")\r\n cutline_entities_count=drawwidthpolyline(polyline, cutline_entities_count, f,layername)\r\n cutline_entities_count=drawring(ringlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawflash(flashlist, cutline_entities_count, f, layername)\r\n cutline_entities_count=drawtext(cutline_entities_count, f, layername,layercount)\r\n \r\n return cutline_entities_count", "def draw_lines(self):\n for x_cord in range(0, Dimension.SCREEN_WIDTH.value, Dimension.SQUARE_WIDTH.value):\n pg.draw.line(self.window, Colors.BLACK.value, (x_cord, 0), (x_cord, Dimension.SCREEN_HEIGHT.value))\n\n for y_cord in range(0, Dimension.SCREEN_HEIGHT.value, Dimension.SQUARE_HEIGHT.value):\n pg.draw.line(self.window, Colors.BLACK.value, (0, y_cord), (Dimension.SCREEN_WIDTH.value, y_cord))\n\n pg.display.update()", "def draw_line():\n\n # Small Size Line\n glLineWidth(0.1)\n glColor3f(0.5, 1.0, 0.9)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n length += 10\n wid += 50\n # Medium Size Line\n glLineWidth(2.0)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n length += 50\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n wid += 50\n # Main Line\n # ordinat\n glLineWidth(1.5)\n glColor3f(0.5, 0.4, 0.8)\n glBegin(GL_LINES)\n glVertex3f(height / 2, 0, 0.0)\n glVertex3f(height / 2, width, 0)\n glEnd()\n # absis\n glBegin(GL_LINES)\n glVertex3f(0, width / 2, 0.0)\n glVertex3f(height, width / 2, 0)\n glEnd()", "def draw_line_strip(point_list, color, border_width=1):\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n # Set line width\n GL.glLineWidth(border_width)\n\n GL.glLoadIdentity()\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINE_STRIP)\n for point in point_list:\n GL.glVertex3f(point[0], point[1], 0.5)\n GL.glEnd()", "def drawPoints(self, qp):\n\n# pen = self.pen\n\n\n size = self.size()\n self.yOffset = [size.height()*0.2 + size.height()*0.618/self.NUM_CHANNEL * y for y in xrange(self.NUM_CHANNEL) ]\n\n for ix in xrange(self.NUM_CHANNEL):\n self.pen.setStyle(Qt.SolidLine)\n self.pen.setWidth(2)\n self.pen.setBrush(self.PEN_COLOR[ix])\n self.pen.setCapStyle(Qt.RoundCap)\n self.pen.setJoinStyle(Qt.RoundJoin)\n qp.setPen(self.pen)\n\n qp.drawLine(self.x - 2, self.yOffset[ix] - \\\n self.data_1[ix] * self.DISPLAY_SCALING[ix],\\\n self.x , self.yOffset[ix] - \\\n self.data[ix] * self.DISPLAY_SCALING[ix])", "def draw_poly(t, n, sz):\r\n\r\n\tfor i in range(n):\r\n\t\tt.forward(sz)\r\n\t\tt.left(360/n)", "def L(width = 1, size = (10, 20), layer = 0):\n D = Device(name = 'L')\n w = width/2\n s1, s2 = size\n points = [(-w, -w), (s1, -w), (s1, w), (w, w), (w, s2), (-w, s2), (-w, -w)]\n D.add_polygon(points, layer = layer)\n D.add_port(name = 1, midpoint = (0, s2), width = width, orientation = 90)\n D.add_port(name = 2, midpoint = (s1, 0), width = width, orientation = 0)\n return D", "def draw_polygon_outline(point_list, color, border_width=1):\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n # Set line width\n GL.glLineWidth(border_width)\n\n GL.glLoadIdentity()\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINE_LOOP)\n for point in point_list:\n GL.glVertex3f(point[0], point[1], 0.5)\n GL.glEnd()", "def draw_polyline(self, points, line_width, line_color):\n line_color = check_color(line_color)\n STline.line(self.canvas, points, line_width, line_color)", "def draw_equitriangle(t,sz):\r\n\r\n\tdraw_poly(t, 3, sz)", "def drawWarpLines(self):\n # draw warp lines\n for item in self.game.warpLines:\n anwp.sl.engine.drawLine(item[0]+self.bufferX, item[1]+self.bufferY, item[2]+self.bufferX, item[3]+self.bufferY, pyui.colors.blue)", "def draw_polyline(*points):\r\n global _canvas\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n #print(points)\r\n #print(len(points))\r\n newpoints = []\r\n for x in range(0, len(points), 2):\r\n #print(x)\r\n pt = Point(points[x], points[x+1])\r\n newpoints += [ pt ]\r\n #print(newpoints)\r\n path = Path(*newpoints)\r\n path.setBorderWidth(_current_line_thickness)\r\n path.setBorderColor(_current_color)\r\n _canvas.add(path)", "def graphicsDraw(self, win, center):\n\t\tlastPoint = None\n\t\tfor p in self.points:\n\t\t\tthisPoint = Point(p[0] + center.x, p[1] + center.y)\n\t\t\tif lastPoint is not None:\n\t\t\t\tline = Line(lastPoint, thisPoint)\n\t\t\t\tline.draw(win)\n\t\t\tlastPoint = thisPoint", "def draw_lines(img, lines, color=[0, 0, 255], thickness=10):\n \n yFinal = 540 # tweak these values as per the frame size\n yIni = 350\n xPlus = []\n yPlus = []\n xMinus = []\n yMinus= []\n slope_range = 0.2\n\n if lines is not None:\n for line in lines:\n if line is not None:\n for x1,y1,x2,y2 in line:\n # check slope \n slope = (y2-y1)/(x2-x1)\n\t\t \n \t\t # Collect all points with + ve slope (right lane)\n if (slope > slope_range):\n xPlus.append(x1)\n xPlus.append(x2)\n yPlus.append(y1)\n yPlus.append(y2)\n\n # Collect all points with - ve slope (left lane)\n elif ((slope) < (-slope_range)):\n xMinus.append(x1)\n xMinus.append(x2)\n yMinus.append(y1)\n yMinus.append(y2)\n # If out of range, lists defined in beginning of this function will be empty \n else:\n continue\n \n # draw right lane\n x1,y1,x2,y2 = fit_line(xPlus, yPlus, yIni, yFinal)\n cv2.line(img,(x1,y1),(x2,y2),color, thickness) \n\n # draw left lane\n x1,y1,x2,y2 = fit_line(xMinus, yMinus, yIni, yFinal)\n cv2.line(img,(x1,y1),(x2,y2),color,thickness)", "def set_spines(plot):\n for spines in plot.spines.values():\n spines.set_linewidth(.2)", "def line_layer(self):\n screen_origin = self.ids.mapview.get_window_xy_from(lat1, lon1, self.ids.mapview.zoom)\n screen_destination = self.ids.mapview.get_window_xy_from(lat2, lon2, self.ids.mapview.zoom)\n point_list = [screen_origin[0], screen_origin[1], screen_destination[0], screen_destination[1]]\n\n with self.ids.line.canvas:\n self.ids.line.canvas.clear()\n\n Color(0, 0, 0, .6)\n Line(points=point_list, width=3, joint=\"bevel\")", "def setLineWidth(w=1):\n dislin.linwid(w)", "def to_strokes(self, width:float, color:list):\n # corner points\n # NOTE: center line of path without stroke width considered\n x0, y0, x1, y1 = self.rect\n points = [\n (x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)\n ]\n # connect each line\n strokes = []\n for i in range(len(points)-1):\n strokes.append({\n 'start': points[i],\n 'end' : points[i+1],\n 'width': width * 2.0, # seems need adjustment by * 2.0\n 'color': rgb_value(color)\n })\n return strokes", "def plot(self, frame, fit, thickness=2):\n if not fit:\n return frame\n X,Y = fit.linspace()\n X = np.array(X,dtype=np.int32)\n Y = np.array(Y,dtype=np.int32)\n curve = np.column_stack((X,Y))\n cv2.polylines(frame, [curve], False, color=(0,0,255),thickness=thickness)\n return frame", "def draw_lines(display, coord, box_size, color, bg_color):\n left, top = coord\n stroke = 6\n half_stroke = int(stroke / 2)\n left = left + half_stroke\n top = top + half_stroke\n box_size = box_size - stroke\n for i in range(0, box_size, int(stroke + 2)):\n pygame.draw.line(\n display, color,\n (left, top + i),\n (left + i, top),\n stroke,\n )\n pygame.draw.line(\n display, color,\n (left + i, top + box_size - 1),\n (left + box_size - 1, top + i),\n stroke,\n )\n return", "def draw_lines(self):\n # draw x lines\n y = self.step_y\n while y <= self.height:\n x = 0\n while x <= self.width:\n self.canvas.create_line(x, y, x+3.5, y)\n self.canvas.update()\n x += 3.5\n y += self.step_y\n \n # draw y lines\n x = self.step_x\n while x <= self.width:\n y = 0\n while y <= self.height:\n self.canvas.create_line(x, y, x, y+3.5)\n self.canvas.update()\n y += 3.5\n x += self.step_x\n \n self.is_operating = False", "def plot_insertsize():", "def line(canvas, points, line_width, line_color):\n \n # duplicate first point in case only one point was given\n points = points[0], points\n canvas.create_line(points, width = int(line_width), fill = line_color)" ]
[ "0.6230124", "0.61896694", "0.61215425", "0.6083533", "0.5963582", "0.5954477", "0.58733976", "0.58443934", "0.5826821", "0.58263594", "0.58262116", "0.5811678", "0.57939386", "0.573556", "0.5720824", "0.5711357", "0.57111347", "0.57054645", "0.56073576", "0.557913", "0.55431724", "0.5536334", "0.553583", "0.5531654", "0.5529746", "0.55269706", "0.55130357", "0.5499437", "0.54823595", "0.54584074" ]
0.69677657
0
Use polyfit from the mask points for smoothening them
def smoothen_masks(fit, img_, window_size): img_size = img_.shape mask_poly = np.zeros_like(img_) # Get top to Bottom for refactoring # mask_y = np.linspace(0, img_size[0] - 1, img_size[0]) mask_x = get_intercepts(fit, mask_y) # Smoothen the mask # pts = coordinates_to_imgpts(mask_x, mask_y) mask_poly_smooth = draw_polylines(mask_poly, pts, window_size) return mask_poly_smooth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sigclip_polyfit(p, xx, yy, degree, weight = None):\n # read constants from p\n sigclip = p['WAVE_SIGCLIP']\n # initialise the while loop\n sigmax = sigclip + 1\n # initialise mask\n mask = np.ones_like(xx, dtype='Bool')\n while sigmax > sigclip:\n # Need to mask weight here if not None\n if weight is not None:\n weight2 = weight[mask]\n else:\n weight2 = None\n # fit on masked values\n coeff = nanpolyfit(xx[mask], yy[mask], deg=degree, w=weight2)\n # get residuals (not masked or dimension breaks)\n res = yy - np.polyval(coeff, xx)\n # normalise the residuals\n res = np.abs(res / np.nanmedian(np.abs(res[mask])))\n # get the max residual in sigmas\n sigmax = np.max(res[mask])\n # mask all outliers\n if sigmax > sigclip:\n mask[res >= sigclip] = False\n # return the coefficients and mask\n return coeff, mask", "def fit_clip(x, y, clip=0.4, index_fit = 2, kernel = 19, mask =\"\", \n xmin=\"\",xmax=\"\",ymin=\"\",ymax=\"\",percentile_min=2, percentile_max=98,\n ptitle=None, xlabel=None, ylabel = None, label=\"\", \n hlines=[], vlines=[],chlines=[], cvlines=[], axvspan=[[0,0]], hwidth =1, vwidth =1,\n plot=True, verbose=True):\n \n # Preparing the data. Trim edges and remove nans\n \n \n \n \n if kernel != 0:\n x = np.array(x)\n y = np.array(y)\n \n y_smooth = signal.medfilt(y, kernel)\n residuals = y - y_smooth\n residuals_std = np.std(residuals)\n \n y_nan = [np.nan if np.abs(i) > residuals_std*clip else 1. for i in residuals ] \n y_clipped = y * y_nan\n \n idx = np.isfinite(x) & np.isfinite(y_clipped)\n \n fit = np.polyfit(x[idx], y_clipped[idx], index_fit) \n pp=np.poly1d(fit)\n y_fit=pp(x)\n y_fit_clipped =pp(x[idx])\n \n if verbose: \n print(\"\\n> Fitting a polynomium of degree\",index_fit,\"using clip =\",clip,\"* std ...\")\n print(\" Eliminated\",len(x)-len(x[idx]),\"outliers, the solution is: \",fit)\n \n if plot:\n if ylabel is None: ylabel = \"y (x)\"\n \n if ptitle is None:\n ptitle = \"Polyfit of degree \"+np.str(index_fit)+\" using clip = \"+np.str(clip)+\" * std\"\n plot_plot(x, [y,y_smooth, y_clipped, y_fit], psym=[\"+\",\"-\", \"+\",\"-\"],\n alpha=[0.5,0.5,0.8,1], color=[\"r\",\"b\",\"g\",\"k\"], label=label,\n xlabel=xlabel, ylabel=ylabel, ptitle=ptitle, \n xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax,percentile_min=percentile_min, percentile_max=percentile_max,\n hlines=hlines, vlines=vlines,chlines=chlines, cvlines=cvlines, \n axvspan=axvspan, hwidth =hwidth, vwidth =vwidth)\n\n return fit, pp, y_fit, y_fit_clipped, x[idx], y_clipped[idx] \n else:\n fit = np.polyfit(x, y, index_fit) \n pp=np.poly1d(fit)\n y_fit=pp(x)\n return fit, pp, y_fit, y_fit, x, y", "def poly_scale(self,p,ind=5,groupby=None):\n\n time = self.data.index.values[:ind]\n\n if groupby is None:\n #group = {(None,self.meta.index)}\n for i in range(self.data.shape[1]):\n temp = self.data.iloc[:,i]\n od = temp.values[:ind]\n\n coeff = np.polyfit(time,od,p)\n\n temp = temp - np.polyval(coeff,self.data.index.values[0])\n self.data.iloc[:,i] = temp\n else:\n group = self.meta.groupby(groupby)\n for k,index in group.groups.items():\n temp = self.data.loc[:,index]\n od = temp.values[:ind,:].ravel()\n\n coeff = np.polyfit(time.tolist()*temp.shape[1],od,p)\n\n temp = temp - np.polyval(coeff,self.data.index.values[0])\n self.data.loc[:,index] = temp", "def linear_regression(self, x_data, y_data, mask = None, ax = None):\n if mask is None:\n mask = full(len(y_data), True, dtype=bool)\n poly = poly1d(polyfit(x_data[mask], y_data[mask], 1))\n\n if ax is not None:\n ax.plot(x_data, polyval(poly, x_data), \"--r\",\\\n label = \"Slope: %.2f\" %(poly[1]))\n return poly", "def poly2mask(self):\n self.x_gridnum = int((self.x_range[1] - self.x_range[0]) / self.x_gridsize)\n self.y_gridnum = int((self.y_range[1] - self.y_range[0]) / self.y_gridsize)\n img = Image.new(\"L\", (self.x_gridnum, self.y_gridnum), 0)\n\n self.perimeter = 0.0\n for ii in self.polygons:\n pp = np.array(ii) * self.CD # polygon\n polygonlen = len(pp)\n self.perimeter += np.sum(np.abs(pp[0:-1] - pp[1:polygonlen]))\n pp[:, 0] = (pp[:, 0] - self.x_range[0]) / self.x_gridsize\n pp[:, 1] = (pp[:, 1] - self.y_range[0]) / self.y_gridsize\n vetex_list = list(pp)\n polygon = [tuple(y) for y in vetex_list]\n ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)\n\n self.data = np.array(img)\n self.data = np.float64(self.data)\n\n self.spat_part = pyfftw.empty_aligned(\n (self.y_gridnum, self.x_gridnum), dtype=\"complex128\"\n )\n self.freq_part = pyfftw.empty_aligned(\n (self.y_gridnum, self.x_gridnum), dtype=\"complex128\"\n )\n self.fft_mask = pyfftw.FFTW(self.spat_part, self.freq_part, axes=(0, 1))", "def test_linear_fit_2d_model_set_masked_values(self):\n init_model = models.Polynomial2D(1, n_models=2)\n x, y = np.mgrid[0:5, 0:5]\n z = np.ma.masked_array(\n [2 * x + 3 * y + 1, x - 0.5 * y - 2], mask=np.zeros_like([x, x])\n )\n\n z[0, 3, 1] = -1000.0 # throw off fit coefficients if unmasked\n z.mask[0, 3, 1] = True\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, z)\n\n assert_allclose(fitted_model.c0_0, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c1_0, [2.0, 1.0], atol=1e-14)\n assert_allclose(fitted_model.c0_1, [3.0, -0.5], atol=1e-14)", "def fitdata(X, Y, func, mask, p0 = None, filename = \"fit\", sigma = None,\\\r\n xlabel = None, ylabel = None, scatter = False):\r\n \r\n #gather indices that are within the masking region \r\n index = (X>mask[0]) & (X<mask[1])\r\n \r\n #get parameters of the fitting of the function using scipy\r\n (p, er) = optimize.curve_fit(func, X[index], Y[index], p0 = p0,\\\r\n sigma = sigma)\r\n \r\n #print out the parameter values and the errors\r\n print(\"###################\")\r\n print(p)\r\n print(np.sqrt(np.diag(er)))\r\n print(\"###################\")\r\n \r\n #create scatter plot if required\r\n if scatter:\r\n f = plt.figure()\r\n ax1 = f.add_subplot(111)\r\n ax1.scatter(X, Y, linewidth = 1, label = \"data\", marker = 'x')\r\n ax1.set_xlabel(xlabel)\r\n ax1.set_ylabel(ylabel)\r\n else:\r\n f = makeplot(X, [Y], [\"data\"], xlabel, ylabel, plainlines = True)\r\n f.axes[0].plot(np.linspace(X[index][0],X[index][-1],1000), \\\r\n func(np.linspace(X[index][0],X[index][-1],1000), p[0],p[1],p[2]), \\\r\n 'r-', label = \"fit\")\r\n f.axes[0].errorbar(X, Y, yerr = sigma, fmt = 'bx', elinewidth = 1,\\\r\n ecolor = 'black', capsize = 2)\r\n f.axes[0].legend(loc = 'best')\r\n f.axes[0].grid()\r\n f.savefig(filename+\".svg\")", "def fit_polynomial_regression(self, x_train, y_train):\n x_poly = self.poly_reg.fit_transform(x_train)\n self.lin_reg.fit(x_poly, y_train)", "def slopemap(inr,insp,dims): \n slope,intercept = np.polyfit(inr,insp, 1)\n slopemap = slope.reshape(dims)\n\n return slopemap", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def fit_1d_solution_sigclip(p, loc):\n func_name = __NAME__ + '.fit_1d_solution_sigclip()'\n # read constants from p\n n_init = p['WAVE_N_ORD_START']\n n_fin = p['WAVE_N_ORD_FINAL']\n\n # set up storage arrays\n xpix = np.arange(loc['NBPIX'])\n wave_map_final = np.zeros((n_fin - n_init, loc['NBPIX']))\n poly_wave_sol_final = np.zeros((n_fin - n_init, p['IC_LL_DEGR_FIT'] + 1))\n\n # fit x v wavelength w/sigma-clipping\n # we remove modulo 1 pixel errors in line centers - 3 iterations\n n_ite_mod_x = 3\n for ite in range(n_ite_mod_x):\n # set up storage\n wsumres = 0.0\n wsumres2 = 0.0\n sweight = 0.0\n fp_x_final_clip = []\n fp_x_in_clip = []\n fp_ll_final_clip = []\n fp_ll_in_clip = []\n fp_ord_clip = []\n res_clip = []\n wei_clip = []\n scale = []\n res_modx = np.zeros_like(loc['FP_XX_NEW'])\n # loop over the orders\n for onum in range(n_fin - n_init):\n # order mask\n ord_mask = np.where(loc['FP_ORD_NEW'] == onum +\n n_init)\n # get FP line pixel positions for the order\n fp_x_ord = loc['FP_XX_NEW'][ord_mask]\n # get new FP line wavelengths for the order\n fp_ll_new_ord = np.asarray(loc['FP_LL_NEW'])[ord_mask]\n # get weights for the order\n wei_ord = np.asarray(loc['FP_WEI'])[ord_mask]\n # fit solution for the order w/sigma-clipping\n coeffs, mask = sigclip_polyfit(p, fp_x_ord, fp_ll_new_ord,\n p['IC_LL_DEGR_FIT'], wei_ord)\n # store the coefficients\n poly_wave_sol_final[onum] = coeffs[::-1]\n # get the residuals modulo x\n res_modx[ord_mask] = speed_of_light * (fp_ll_new_ord /\n np.polyval(coeffs,\n fp_x_ord) - 1)\n # mask input arrays for statistics\n fp_x_ord = fp_x_ord[mask]\n fp_ll_new_ord = fp_ll_new_ord[mask]\n wei_ord = wei_ord[mask]\n # get final wavelengths\n fp_ll_final_ord = np.polyval(coeffs, fp_x_ord)\n # save wave map\n wave_map_final[onum] = np.polyval(coeffs, xpix)\n # save masked arrays\n fp_x_final_clip.append(fp_x_ord)\n fp_x_in_clip.append(loc['FP_XX_INIT'][ord_mask][mask])\n fp_ll_final_clip.append(fp_ll_final_ord)\n fp_ll_in_clip.append(fp_ll_new_ord)\n fp_ord_clip.append(loc['FP_ORD_NEW'][ord_mask][mask])\n wei_clip.append(wei_ord)\n # residuals in km/s\n # calculate the residuals for the final masked arrays\n res = fp_ll_final_ord - fp_ll_new_ord\n res_clip.append(res * speed_of_light / fp_ll_new_ord)\n # save stats\n # get the derivative of the coefficients\n poly = np.poly1d(coeffs)\n dldx = np.polyder(poly)(fp_x_ord)\n # work out conversion factor\n convert = speed_of_light * dldx / fp_ll_final_ord\n scale.append(convert)\n # sum the weights (recursively)\n sweight += np.nansum(wei_clip[onum])\n # sum the weighted residuals in km/s\n wsumres += np.nansum(res_clip[onum] * wei_clip[onum])\n # sum the weighted squared residuals in km/s\n wsumres2 += np.nansum(wei_clip[onum] * res_clip[onum] ** 2)\n\n # we construct a sin/cos model of the error in line center position\n # and fit it to the residuals\n cos = np.cos(2 * np.pi * (loc['FP_XX_NEW'] % 1))\n sin = np.sin(2 * np.pi * (loc['FP_XX_NEW'] % 1))\n\n # find points that are not residual outliers\n # We fit a zeroth order polynomial, so it returns\n # outliers to the mean value.\n outl_fit, mask_all = sigclip_polyfit(p, loc['FP_XX_NEW'],\n res_modx, 0)\n # create model\n acos = np.nansum(cos[mask_all] * res_modx[mask_all]) / \\\n np.nansum(cos[mask_all] ** 2)\n asin = np.nansum(sin[mask_all] * res_modx[mask_all]) / \\\n np.nansum(sin[mask_all] ** 2)\n model_sin = (cos * acos + sin * asin)\n # update the xpeak positions with model\n loc['FP_XX_NEW'] += model_sin / 2.2\n\n # calculate the final var and mean\n total_lines = len(np.concatenate(fp_ll_in_clip))\n final_mean = wsumres / sweight\n final_var = (wsumres2 / sweight) - (final_mean ** 2)\n # log the global stats\n wmsg1 = 'On fiber {0} fit line statistic:'.format(p['FIBER'])\n wargs2 = [final_mean * 1000.0, np.sqrt(final_var) * 1000.0,\n total_lines, 1000.0 * np.sqrt(final_var / total_lines)]\n wmsg2 = ('\\tmean={0:.3f}[m/s] rms={1:.1f} {2} lines (error on mean '\n 'value:{3:.4f}[m/s])'.format(*wargs2))\n WLOG(p, 'info', [wmsg1, wmsg2])\n\n # save final (sig-clipped) arrays to loc\n loc['FP_ORD_CL'] = np.array(np.concatenate(fp_ord_clip).ravel())\n loc['FP_LLIN_CL'] = np.array(np.concatenate(fp_ll_in_clip).ravel())\n loc['FP_XIN_CL'] = np.array(np.concatenate(fp_x_in_clip).ravel())\n loc['FP_XOUT_CL'] = np.array(np.concatenate(fp_x_final_clip).ravel())\n loc['FP_WEI_CL'] = np.array(np.concatenate(wei_clip).ravel())\n loc['RES_CL'] = np.array(np.concatenate(res_clip).ravel())\n loc['LL_OUT_2'] = wave_map_final\n loc['LL_PARAM_2'] = poly_wave_sol_final\n loc['X_MEAN_2'] = final_mean\n loc['X_VAR_2'] = final_var\n loc['TOTAL_LINES_2'] = total_lines\n loc['SCALE_2'] = scale\n\n # set up x_details and ll_details structures for line list table:\n # X_DETAILS_i: list, [lines, xfit, cfit, weight] where\n # lines= original wavelength-centers used for the fit\n # xfit= original pixel-centers used for the fit\n # cfit= fitted pixel-centers using fit coefficients\n # weight=the line weights used\n # LL_DETAILS_i: numpy array (1D), the [nres, wei] where\n # nres = normalised residuals in km/s\n # wei = the line weights\n x_details = []\n ll_details = []\n for ord_num in range(n_init, n_fin):\n omask = loc['FP_ORD_CL'] == ord_num\n x_details.append([loc['FP_LLIN_CL'][omask], loc['FP_XIN_CL'][omask],\n loc['FP_XOUT_CL'][omask], loc['FP_WEI_CL'][omask]])\n ll_details.append([loc['RES_CL'][omask], loc['FP_WEI_CL'][omask]])\n loc['X_DETAILS_2'] = x_details\n loc['LL_DETAILS_2'] = ll_details\n\n # return\n return loc", "def test_linear_fit_model_set_masked_values(self):\n # NB. For single models, there is an equivalent doctest.\n\n init_model = models.Polynomial1D(degree=1, n_models=2)\n x = np.arange(10)\n y = np.ma.masked_array([2 * x + 1, x - 2], mask=np.zeros_like([x, x]))\n\n y[0, 7] = 100.0 # throw off fit coefficients if unmasked\n y.mask[0, 7] = True\n y[1, 1:3] = -100.0\n y.mask[1, 1:3] = True\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n\n assert_allclose(fitted_model.c0, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c1, [2.0, 1.0], atol=1e-14)", "def fit():\n pass", "def fit_data(x, y, flag_thr, deg=9):\n # Flag sorted data\n xf, yf = diff_xy(x, y, flag_thr)\n\n # Fit polynomial to flagged data\n p = np.polyfit(xf, yf, deg)\n p = np.poly1d(p)\n pg = p(x)\n\n return pg", "def smoothing(data, mask):\n smooth_data = gaussian_filter(data, [2, 2, 2, 0])\n\n Y = smooth_data[mask].T\n\n return Y", "def polyfit(x, y, yerror, pinit=[0,0,0,0]):\n x = np.array(x)\n y = np.array(y)\n pinit[2] = np.mean(y)\n pinit[3] = x[len(x)/2]\n if (type(yerror) != list and type(yerror) != np.ndarray):\n yerror = np.ones(len(x)) * yerror\n fitfunc = lambda p, x: p[2] + p[1]*(x-p[3]) + p[0]*(x-p[3])**2\n errfunc = lambda p,x,y,err: (y-fitfunc(p,x))/(err**2)\n out = scipy.optimize.leastsq(errfunc, pinit, args=(x,y,yerror/y), full_output=1)\n p = out[0]\n covar = out[1]\n return(p)", "def poly_regression_cubic(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0", "def linearfit(x,y):\n fit = np.polyfit(x,y,1)\n fit_fn = np.poly1d(fit)\n yy = fit_fn(x) \n \n return yy", "def polyfitr(x, y, N, s, fev=100, w=None, diag=False, clip='both', \\\n verbose=False, plotfit=False, plotall=False, eps=1e-13, catchLinAlgError=False):\n # 2008-10-01 13:01 IJC: Created & completed\n # 2009-10-01 10:23 IJC: 1 year later! Moved \"import\" statements within func.\n # 2009-10-22 14:01 IJC: Added 'clip' options for continuum fitting\n # 2009-12-08 15:35 IJC: Automatically clip all non-finite points\n # 2010-10-29 09:09 IJC: Moved pylab imports inside this function\n # 2012-08-20 16:47 IJMC: Major change: now only reject one point per iteration!\n # 2012-08-27 10:44 IJMC: Verbose < 0 now resets to 0\n # 2013-05-21 23:15 IJMC: Added catchLinAlgError\n\n from CARSMath import polyfitw\n from numpy import polyfit, polyval, isfinite, ones\n from numpy.linalg import LinAlgError\n from pylab import plot, legend, title\n\n if verbose < 0:\n verbose = 0\n\n xx = array(x, copy=False)\n yy = array(y, copy=False)\n noweights = (w==None)\n if noweights:\n ww = ones(xx.shape, float)\n else:\n ww = array(w, copy=False)\n\n ii = 0\n nrej = 1\n\n if noweights:\n goodind = isfinite(xx)*isfinite(yy)\n else:\n goodind = isfinite(xx)*isfinite(yy)*isfinite(ww)\n \n xx2 = xx[goodind]\n yy2 = yy[goodind]\n ww2 = ww[goodind]\n\n while (ii<fev and (nrej<>0)):\n if noweights:\n p = polyfit(xx2,yy2,N)\n residual = yy2 - polyval(p,xx2)\n stdResidual = std(residual)\n clipmetric = s * stdResidual\n else:\n if catchLinAlgError:\n try:\n p = polyfitw(xx2,yy2, ww2, N)\n except LinAlgError:\n p = np.zeros(N+1, dtype=float)\n else:\n p = polyfitw(xx2,yy2, ww2, N)\n\n p = p[::-1] # polyfitw uses reverse coefficient ordering\n residual = (yy2 - polyval(p,xx2)) * np.sqrt(ww2)\n clipmetric = s\n\n if clip=='both':\n worstOffender = abs(residual).max()\n if worstOffender <= clipmetric or worstOffender < eps:\n ind = ones(residual.shape, dtype=bool)\n else:\n ind = abs(residual) <= worstOffender\n elif clip=='above':\n worstOffender = residual.max()\n if worstOffender <= clipmetric:\n ind = ones(residual.shape, dtype=bool)\n else:\n ind = residual < worstOffender\n elif clip=='below':\n worstOffender = residual.min()\n if worstOffender >= -clipmetric:\n ind = ones(residual.shape, dtype=bool)\n else:\n ind = residual > worstOffender\n else:\n ind = ones(residual.shape, dtype=bool)\n \n xx2 = xx2[ind]\n yy2 = yy2[ind]\n if (not noweights):\n ww2 = ww2[ind]\n ii = ii + 1\n nrej = len(residual) - len(xx2)\n if plotall:\n plot(x,y, '.', xx2,yy2, 'x', x, polyval(p, x), '--')\n legend(['data', 'fit data', 'fit'])\n title('Iter. #' + str(ii) + ' -- Close all windows to continue....')\n\n if verbose:\n print str(len(x)-len(xx2)) + ' points rejected on iteration #' + str(ii)\n\n if (plotfit or plotall):\n plot(x,y, '.', xx2,yy2, 'x', x, polyval(p, x), '--')\n legend(['data', 'fit data', 'fit'])\n title('Close window to continue....')\n\n if diag:\n chisq = ( (residual)**2 / yy2 ).sum()\n p = (p, chisq, ii)\n\n return p", "def ApplyMask(data,mask):\n \n # loop through portions\n for portion in data.keys():\n # match data keys and apply mask \n for key in data[portion].keys():\n if key in 'xyerr':\n if mask != 'UnMasked':\n data[portion][key].mask = data[portion]['UnMasked']\n data[portion][key].mask = data[portion][mask]\n\t\n return data", "def fit_circle_func():\n pass", "def fit(x, a, p, b):\n return a * (p ** x) + b", "def fit_linreg_robust(x, y, mask=None, intercept=False, r2=True, est_method=\"rlm\"):\n\n x = x.A if issparse(x) else x\n y = y.A if issparse(y) else y\n\n _mask = np.logical_and(~np.isnan(x), ~np.isnan(y))\n if mask is not None:\n _mask &= mask\n xx = x[_mask]\n yy = y[_mask]\n\n try:\n if est_method.lower() == \"rlm\":\n xx_ = sm.add_constant(xx) if intercept else xx\n res = sm.RLM(yy, xx_).fit()\n k, b = res.params[::-1] if intercept else (res.params[0], 0)\n elif est_method.lower() == \"ransac\":\n reg = RANSACRegressor(LinearRegression(fit_intercept=intercept), random_state=0)\n reg.fit(xx.reshape(-1, 1), yy.reshape(-1, 1))\n k, b = reg.estimator_.coef_[0, 0], (reg.estimator_.intercept_[0] if intercept else 0)\n else:\n raise ImportError(\n f\"estimation method {est_method} is not implemented. \"\n f\"Currently supported linear regression methods include `rlm` and `ransac`.\"\n )\n except:\n if intercept:\n ym = np.mean(yy)\n xm = np.mean(xx)\n\n cov = np.mean(xx * yy) - xm * ym\n var_x = np.mean(xx * xx) - xm * xm\n k = cov / var_x\n b = ym - k * xm\n # # assume b is always positive\n # if b < 0:\n # k, b = np.mean(xx * yy) / np.mean(xx * xx), 0\n else:\n # use uncentered cov and var_x\n cov = np.mean(xx * yy)\n var_x = np.mean(xx * xx)\n k = cov / var_x\n b = 0\n\n if r2:\n SS_tot_n, all_SS_tot_n = np.var(yy), np.var(y)\n SS_res_n, all_SS_res_n = (\n np.mean((yy - k * xx - b) ** 2),\n np.mean((y - k * x - b) ** 2),\n )\n r2, all_r2 = 1 - SS_res_n / SS_tot_n, 1 - all_SS_res_n / all_SS_tot_n\n\n return k, b, r2, all_r2\n else:\n return k, b", "def fit_slope_with_zero_intercept_residue(X,Y):\n X = np.array(X)\n Y = np.array(Y)\n slope = np.sum(Y*X)/np.sum(np.power(X,2))\n return slope*X - Y", "def fit_poly(img_shape, leftx, lefty, rightx, righty):\n left_fit = np.polyfit(lefty, leftx, deg=2)\n right_fit = np.polyfit(righty, rightx, deg=2)\n # Generate x and y values for plotting\n ploty = np.linspace(0, img_shape[0]-1, img_shape[0])\n ### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n \n return left_fitx, right_fitx, ploty", "def fit(self, X, Y):\n ...", "def scatter_singlet_fit(self):\n return self._scatter_singlet_fit", "def partial_fit(self, X, y=...):\n ..." ]
[ "0.62117845", "0.62038964", "0.60165256", "0.5831612", "0.5827311", "0.57904327", "0.572582", "0.5706521", "0.56874967", "0.56852", "0.56852", "0.56852", "0.56538486", "0.5650198", "0.5639281", "0.5617409", "0.5605478", "0.5585841", "0.557275", "0.55547965", "0.5492608", "0.54752845", "0.5471376", "0.5469833", "0.5465314", "0.5464438", "0.5464387", "0.542986", "0.54242533", "0.5408492" ]
0.7063888
0
Get the mean value of fit "Left" and "Right" based on flag
def get_mean_fit(flag='L'): if flag == 'L': return np.mean(np.vstack(l_coeff_queue), axis =0) if len(l_coeff_queue)>1 else l_coeff_queue[-1] else: return np.mean(np.vstack(r_coeff_queue), axis =0) if len(r_coeff_queue)>1 else r_coeff_queue[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mask_and_avg(values, padding_mask):\n\tdec_lens = torch.sum(padding_mask,dim=1)\n\tlosses = torch.stack(values, dim=1)\n\tlosses = losses * padding_mask\n\tvalues_per_ex = torch.sum(losses, dim=1)/dec_lens\n\treturn torch.sum(values_per_ex)", "def mean(self):\n\n\t\tif not self._masked:\n\t\t\t\n\t\t\treturn self.data.mean()\n\t\t\n\t\telse:\n\t\t\t\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\t\t\t\n\t\t\treturn self.data[self._full_mask].mean()", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def right_or_left(self): \n right_sum = 0\n right_avg = 0\n left_sum = 0\n left_avg = 0\n self.scan()\n for angle in self.scan_data:\n if angle < self.MIDPOINT:\n right_sum += self.scan_data[angle]\n right_avg += 1\n else: \n left_avg += self.scan_data[angle]\n left_avg += 1\n\n left_avg = left_sum / left_avg \n right_avg = right_sum / right_avg\n\n if left_avg > right_avg: \n return 'l' \n else:\n return 'r'", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def mean_baseline(d, mode='test'):\n m = d.trainY.mean()\n y = getattr(d, mode + \"Y\")\n preds = np.array([m] * y.shape[0])\n return (get_mse(d, preds, mode), get_mae(d, preds, mode),\n get_mape(d, preds, mode))", "def measure(mode, x, y, x0, x1):\n xm = ma.masked_outside(x, x0, x1)\n ym = ma.array(y, mask = ma.getmask(xm))\n if mode == 'mean':\n r1 = ma.mean(ym)\n r2 = ma.std(ym)\n if mode == 'max':\n r1 = ma.max(ym)\n r2 = 0\n if mode == 'min':\n r1 = ma.min(ym)\n r2 = 0\n if mode == 'median':\n r1 = ma.median(ym)\n r2 = 0\n if mode == 'p2p': # peak to peak\n r1 = ma.ptp(ym)\n r2 = 0\n return(r1, r2)", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def _get_mean(self):\n mu = self._get_conditional_negative_energy()\n return sigmoid(mu)", "def _mask_and_avg(values, padding_mask):\n\n dec_lens = tf.reduce_sum(padding_mask, axis=1) # shape batch_size. float32\n values_per_step = [v * padding_mask[:,dec_step] for dec_step,v in enumerate(values)]\n values_per_ex = sum(values_per_step)/dec_lens # shape (batch_size); normalized value for each batch member\n return tf.reduce_mean(values_per_ex) # overall average", "def update_average_best_fit(self):\r\n # print(result_buffer)\r\n total = len(self.result_buffer)\r\n left_fit, right_fit = np.empty((0, 3)), np.empty((0, 3))\r\n left_curve_rad, right_curve_rad = [], []\r\n vehicle_offset = []\r\n for i in range(total):\r\n calc_fit_dict = self.result_buffer[i]\r\n left_fit = np.append(left_fit, [calc_fit_dict['left_fit']], axis=0)\r\n right_fit = np.append(right_fit, [calc_fit_dict['right_fit']], axis=0)\r\n left_curve_rad.append(calc_fit_dict['left_curve_rad'])\r\n right_curve_rad.append(calc_fit_dict['right_curve_rad'])\r\n vehicle_offset.append(calc_fit_dict['vehicle_offset'])\r\n self.avg_fit_dict['left_fit'] = np.mean(left_fit, axis=0)\r\n self.avg_fit_dict['right_fit'] = np.mean(right_fit, axis=0)\r\n self.avg_fit_dict['left_curve_rad'] = np.mean(left_curve_rad)\r\n self.avg_fit_dict['right_curve_rad'] = np.mean(right_curve_rad)\r\n self.avg_fit_dict['vehicle_offset'] = np.mean(vehicle_offset)\r\n # print(avg_fit_dict)\r\n return", "def mean_average_position():\n pass", "def _get_reference_fit(self, img):\n bw_img = 255 * (img >= self.contrast)\n fit = [center_on_box(bw_img, self.radius, self.min_ref, *ref) for ref in self.refzone]\n meanfit = num.mean(num.ma.masked_array(fit, fit == -9999), axis=0).astype('i')\n if meanfit[0] is num.ma.masked:\n raise StandardError('At least one reference box match required')\n\n return meanfit, fit", "def _ave(self):\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()", "def mean(vals):", "def mean(self, weight_by_area=True):\n if weight_by_area:\n return self.integral() / self.indicator.integral()\n else:\n return self.sum() / self.indicator.sum()", "def calculate_mean(self) -> float:\n\n if self.data:\n return np.mean(self.data)\n else:\n return self.mu", "def _ave(self):\n\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()", "def _get_mean(self):\n return (0.485, 0.456, 0.406)", "def measure(mode, x, y, x0, x1, thresh = 0):\n xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster!\n v = y.view(numpy.ndarray)\n \n xm = ma.masked_outside(xt, x0, x1).T\n ym = ma.array(v, mask = ma.getmask(xm))\n if mode == 'mean':\n r1 = ma.mean(ym)\n r2 = ma.std(ym)\n if mode == 'max' or mode == 'maximum':\n r1 = ma.max(ym)\n r2 = xm[ma.argmax(ym)]\n if mode == 'min' or mode == 'minimum':\n r1 = ma.min(ym)\n r2 = xm[ma.argmin(ym)]\n if mode == 'median':\n r1 = ma.median(ym)\n r2 = 0\n if mode == 'p2p': # peak to peak\n r1 = ma.ptp(ym)\n r2 = 0\n if mode == 'std': # standard deviation\n r1 = ma.std(ym)\n r2 = 0\n if mode == 'var': # variance\n r1 = ma.var(ym)\n r2 = 0\n if mode == 'cumsum': # cumulative sum\n r1 = ma.cumsum(ym) # Note: returns an array\n r2 = 0\n if mode == 'anom': # anomalies = difference from averge\n r1 = ma.anom(ym) # returns an array\n r2 = 0\n if mode == 'sum':\n r1 = ma.sum(ym)\n r2 = 0\n if mode == 'area' or mode == 'charge':\n r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm))\n r2 = 0\n if mode == 'latency': # return first point that is > threshold\n sm = ma.nonzero(ym > thresh)\n r1 = -1 # use this to indicate no event detected\n r2 = 0\n if ma.count(sm) > 0:\n r1 = sm[0][0]\n r2 = len(sm[0])\n if mode == 'count':\n r1 = ma.count(ym)\n r2 = 0\n if mode == 'maxslope':\n return(0,0)\n slope = numpy.array([])\n win = ma.flatnotmasked_contiguous(ym)\n st = int(len(win)/20) # look over small ranges\n for k in win: # move through the slope measurementwindow\n tb = range(k-st, k+st) # get tb array\n newa = numpy.array(self.dat[i][j, thisaxis, tb])\n ppars = numpy.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures\n slope = numpy.append(slope, ppars[0]) # keep track of max slope\n r1 = numpy.amax(slope)\n r2 = numpy.argmax(slope)\n return(r1, r2)", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def mean(image, selem, out=None, mask=None, shift_x=False, shift_y=False):\n\n return _apply(_crank8.mean, _crank16.mean, image, selem, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)", "def mean(self):\n return self.cond_proba.mean", "def meanEnergy(self, count=False):\n \n _E = (self.E).groupby(pd.cut(self.E, self.binEdges))\n try:\n def wm(x):\n try:\n return np.average(x, weights=self.Weight.loc[x.index])\n except ZeroDivisionError:\n return np.nan\n wc = lambda x: np.sum(self.Weight.loc[x.index])\n if count:\n E_count = _E.apply(wc)\n E_mean = _E.apply(wm)\n except AttributeError:\n if count:\n E_count = _E.count()\n E_mean = _E.mean()\n \n E_masked = np.ma.masked_invalid(E_mean)\n \n if count:\n C_masked = np.ma.masked_array(E_count, mask=E_masked.mask)\n return C_masked, E_masked\n return E_masked", "def masked_mean(x: torch.FloatTensor, m: torch.BoolTensor):\n if m.bool().sum() == len(m):\n return torch.full((1, ), fill_value=float('inf'), device=x.device)\n return x[m.bool()].mean()", "def compute_means(opts, train_data, sampler):\n exp_names = train_data[\"exp_names\"].value\n means = []\n stds = []\n if opts[\"flags\"].normalize is True:\n running_stats = []\n # a running stat for each channel\n running_stats = RunningStats(3)\n # loop over the experiments\n\n # for exp_name in exp_names:\n for j in range(0, len(exp_names), 2):\n batch = sampler.get_minibatch()\n exp_name = batch[2][0]\n print(exp_name)\n # loop over the keys\n\n seq_len = train_data[\"exps\"][exp_name][\"labels\"].shape[0]\n temp_feat = batch[0].cpu().numpy()\n temp_feat = temp_feat[:seq_len, :, :, :]\n\n channel_feats = []\n for i in range(3):\n # channel_feat = temp_feat[0, :, i, :]\n # sample frames\n channel_feat = temp_feat[::100, i, :]\n channel_feat = channel_feat.reshape(-1, 1)\n channel_feats.append(channel_feat)\n\n channel_feats = np.concatenate(channel_feats, axis=1)\n running_stats.add_data(\n channel_feat\n )\n\n means = running_stats.mean.tolist()\n stds = running_stats.compute_std().tolist()\n else:\n means = [.5, .5, .5]\n stds = [1, 1, 1]\n # for key in opts[\"flags\"].feat_keys:\n # temp_feat = train_data[\"exps\"][exp_names[0]][key].value\n # mean = np.zeros((temp_feat.shape[2], ))\n # std = np.ones((temp_feat.shape[2], ))\n # means.append(mean)\n # stds.append(std)\n normalize = transforms.Normalize(mean=means,\n std=stds)\n\n return normalize", "def getMean(self):\n return self.mean", "def get_fitness_mean(self):\n return self.get_fitness_stat(mean)", "def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean", "def mean_absolute_percentage_error(y_true, y_pred, sample_weight=..., multioutput=...):\n ..." ]
[ "0.60307616", "0.59903556", "0.5989535", "0.5927027", "0.58250964", "0.58250463", "0.58083445", "0.5792237", "0.57348174", "0.57280666", "0.57189894", "0.571298", "0.56587595", "0.5591607", "0.5536695", "0.55357367", "0.5512962", "0.55089694", "0.5496177", "0.5463986", "0.54556686", "0.5437334", "0.5435072", "0.5426911", "0.541448", "0.54117227", "0.53799963", "0.5377896", "0.5362354", "0.53585285" ]
0.7186115
0
Gets the Last Fit depending on the flag
def get_last_fit(flag='L'): if flag == 'L': return l_coeff_queue[-1] else: return r_coeff_queue[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFit(self):\n if self.fits.has_key('default'):\n return self.fits['default']\n else:\n return None", "def last_fmeasure(self):\n return self.get_fvalue(self.last_position())", "def get_last_saved_estimation(self):\n return None", "def get_fit(self, space=False):\n\n if space:\n model_type = 'space'\n else:\n model_type = 'risk'\n\n fit_type = 'likelihood'\n\n fitter = self.fits[model_type].linear_fits[fit_type]\n\n return fitter", "def get_last_save(self) -> Optional[float]:\n return None if self._save_marker is None else self._save_marker + self._offset", "def get_last_measurement(self, param):\n return self.__buffer[param][-1]", "def restore_last(self, points_x = None, points_y = None):\n\n # when calling with no parameter, we just keep the last state activated\n # and keep signaling a detected line to absorb small flaws of a few\n # frames - otherwise calculate with new input data\n\n # on new input points, reset and update lane\n if (points_x is not None) and (points_y is not None):\n #self.reset(self.roi_warped_points)\n self.update(points_x, points_y)\n\n return self.detected\n\n elif len(self.recent_fit) >= 2:\n # ensure detected\n self.detected = True\n\n # remove current broken fit from recent fits\n self.recent_fit = self.recent_fit[:-1]\n\n # make last valid recent fit to current fit\n self.current_fit = self.recent_fit[-1]\n\n # calculate new best fit\n sum = [np.array([False])]\n \"\"\"\n current_weight = self.max_n + 1\n divisor = 0\n\n for r in self.recent_fit:\n current_weight = current_weight - 1\n sum = sum + current_weight * r\n divisor = divisor + current_weight\n\n self.best_fit = (sum / divisor)[0]\n \"\"\"\n\n for r in self.recent_fit:\n sum = sum + r\n\n self.best_fit = (sum / len(self.recent_fit))[0]\n\n # re calculate diffs\n self.riffs = self.current_fit - self.best_fit\n\n # we need to re-calculate the metrics\n self.calculate_metrics()\n\n return True\n else:\n # if not, there's currently no way out\n return False", "def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]", "def get_lip_best(self) -> float:\n if self._fitted:\n if self._ready_lip_best:\n return self._lip_best\n else:\n lip_best = self._get_lip_best()\n self._set(\"_lip_best\", lip_best)\n self._set(\"_ready_lip_best\", True)\n return lip_best\n else:\n raise ValueError(\"call ``fit`` before calling ``get_lip_best``\")", "def last_value(self):\n return self.samples[-1]", "def best_step(self):\r\n return self._best_value_step", "def is_fitted(self):\n\n return self.isFitted", "def scatter_back_fit(self):\n return self._scatter_back_fit", "def max_log_likelihood_fit(self) -> FitInterferometer:\r\n return self.analysis.fit_interferometer_via_instance_from(\r\n instance=self.instance\r\n )", "def get_fit(self) -> np.poly1d:\n if self.log_fits is not None:\n return next(self.log_fits)\n x = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: [line.point1.y for line in m.marker_lines])\n .to_list()\n )\n y = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: [line.point1.x for line in m.marker_lines])\n .to_list()\n )\n if self.orientation == Orientation.UP_DOWN:\n fit = np.polyfit(x, y, 1)\n else:\n fit = np.polyfit(y, x, 1)\n return np.poly1d(fit)", "def stop_fit(self):\n self._stop_fit = True", "def get_last_solution(self):\n return self.last_result", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def max_log_likelihood_fit(self) -> ag.FitQuantity:\r\n\r\n return self.analysis.fit_quantity_for_instance(instance=self.instance)", "def last_percept(self):\n return self.percept", "def _get_lip_best(self) -> float:\n pass", "def best_value(self):\n return np.max(self.y.numpy())", "def has_been_fit(self):\n return self.predictor.has_been_fit", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def get_bestparameter(self):\n if self._df_test is None:\n raise RuntimeError('get_bestparameter: please the '\n 'train model first')\n mean = self._df_test.mean(axis=1)\n if len(mean) == 1:\n result = mean.idxmax()\n elif len(mean) == 2:\n result = mean.loc[mean.index > 1].idxmax()\n else:\n result = mean.loc[mean.index > 2].idxmax()\n return result", "def get_last_save(self) -> Optional[int]:\n return self._bin_iter.get_last_save()", "def last_value(self):\n return self._stop", "def get_score(self):\n return np.max(self._scores) if self._scores is not None else self._score_history[-1]", "def last_hit(self):\n return self._last_hit", "def is_fitted(self):\n return self.__fdata is not None" ]
[ "0.6239705", "0.6228102", "0.6141779", "0.6008824", "0.59334695", "0.5871821", "0.5755246", "0.5743263", "0.5716989", "0.57042426", "0.5698672", "0.56794965", "0.56396884", "0.56118524", "0.55922616", "0.55794775", "0.55546254", "0.55316406", "0.55230576", "0.5493914", "0.54734224", "0.5470537", "0.5467277", "0.5463756", "0.5463756", "0.54407865", "0.54403305", "0.5427564", "0.5399268", "0.5394421" ]
0.8070429
0
Use The current values of Curvature and Offset from Left and Right Lanes to decide if Lanes are sane
def curvature_sanity(left_curvature, left_offset, right_curvature, right_offset): if return_queue_len(flag='L') >= 1 and return_queue_len(flag='R') >= 1: offset = center_position - (left_offset + right_offset) / 2. offset_measure = np.abs(overall_offset - offset) return True if offset_measure < 0.2 else False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _curvature(self):\n y_eval = self.left_fitx.shape[0] - 10\n left_curverad = (((1 + (2 * self.left_fit[0] * y_eval + self.left_fit[1]) ** 2) ** 1.5) /\n np.absolute(2 * self.left_fit[0]))\n right_curverad = (((1 + (2 * self.right_fit[0] * y_eval + self.right_fit[1]) ** 2) ** 1.5) /\n np.absolute(2 * self.right_fit[0]))\n return left_curverad, right_curverad", "def sanity_check(self):\n score = 0\n curvatures = self._curvature()\n if abs(curvatures[0] - curvatures[1]) / max(curvatures) > 0.15:\n # difference in curvature is more than 15%\n score -= 1\n\n diff_std = np.std(self.right_fitx - self.left_fitx)\n if diff_std > 30:\n # std of the difference between the right lane and left lane is more than 30 pixel\n score -= 1\n\n # roughly parallel\n if abs(self.left_fit[0] - self.right_fit[0]) / max(self.left_fit[0], self.right_fit[0]) > 0.15:\n # difference in slope is more than 15%\n score -= 1\n\n return score", "def GetLoCorner(self):\n ...", "def fix_curvature(self) -> None:\n self.n1.fix = True\n self.n2.fix = True", "def calculate_leg_xy_limits(self, visualize=False):\n \n #Find the fixed plate position at the \"0\" point\n gonio_zero = copy.copy(self)\n gonio_zero.relative_sample_position = column([0.0, 0.0, 0.0]) #Tell the sample to be centered well.\n gonio_zero.getplatepos(0.0, 0.0, 0.0)\n fixed_plate_zero = np.copy(gonio_zero.fixed_plate)\n #This defines the center of the following matrices\n self.fixed_plate_zero = fixed_plate_zero\n \n #Now we generate a matrix of allowed positions around those points.\n self.leg_safe_xaxis = np.arange(-self.travel, self.travel, self.leg_safe_resolution)\n self.leg_safe_zaxis = np.copy(self.leg_safe_xaxis)\n\n #Create the \"safe zone\" array, initialized to False\n self.leg_safe_zone = np.zeros( (3, self.leg_safe_xaxis.size, self.leg_safe_zaxis.size), dtype=bool ) \n\n #Now make a reasonable approximation\n real_travel_x = 12.5\n real_travel_z = real_travel_x\n for leg in range(3):\n for i_x in range(self.leg_safe_xaxis.size):\n x = self.leg_safe_xaxis[i_x]\n if abs(x)<real_travel_x:\n for i_z in range(self.leg_safe_zaxis.size):\n z = self.leg_safe_zaxis[i_z]\n if abs(z)<real_travel_z:\n self.leg_safe_zone[leg, i_x, i_z] = True\n# #Upper left corner of leg A (0)\n# center = int(len(self.leg_safe_xaxis)/2)\n# self.leg_safe_zone[0, :, :] = False\n# self.leg_safe_zone[0, :center, :center] = True\n# self.leg_safe_zone[1, :, :] = False\n# self.leg_safe_zone[1, center:, 0:center] = True\n# self.leg_safe_zone[2, :, :center] = False\n\n\n if visualize:\n pylab.figure(0)\n pylab.hold(True)\n for leg in range(3):\n pylab.pcolor(self.leg_safe_xaxis+fixed_plate_zero[COORD_X, leg], self.leg_safe_zaxis+fixed_plate_zero[COORD_Z, leg], self.leg_safe_zone[leg, :, :].transpose())\n pylab.xlabel(\"x\")\n pylab.ylabel(\"z\")\n pylab.title(\"Allowable XZ leg positions for the 3 legs.\")\n pylab.draw()\n pylab.axis('equal')\n #pylab.show()", "def check_borders(self):\n # Go Homer!\n # https://en.wikipedia.org/wiki/Torus#Flat_torus\n if self._posn.x < 0:\n self._posn.x += self._win_w\n elif self._posn.x > self._win_w:\n self._posn.x -= self._win_w\n if self._posn.y < 0:\n self._posn.y += self._win_h\n elif self._posn.y > self._win_h:\n self._posn.y -= self._win_h", "def check_boundary(self):\n\n\t\tif self.Bubble_initial_pos[0] <= self.Bubble_radius or self.Bubble_initial_pos[0] >= self.tk_pic.width - self.Bubble_radius:\n\t\t\tself.Bubble_vel[0] = -self.Bubble_vel[0]", "def is_valid(self):\n posit1 = (self.mean_v > 0) & (self.kappa_y > 0) & (self.eta_y > 0)\n posit2 = (self.kappa_s > 0) & (self.eta_s > 0)\n return posit1 & posit2 & self.feller()", "def detectBorders(self, points):\n lane1 = []; lane2 = []\n self.leftLane = [None for _ in range(int(np.floor(self.BIRDVIEW_HEIGHT / self.slideThickness)))]\n self.rightLane = [None for _ in range(int(np.floor(self.BIRDVIEW_HEIGHT / self.slideThickness)))]\n\n pointMap = np.zeros((points.shape[0], 20))\n prePoint = np.zeros((points.shape[0], 20))\n postPoint = np.zeros((points.shape[0], 20))\n\n dis = 10\n max1 = -1; max2 = -1\n\n ##\n ## /!\\ UNSAFE LOOP, TODO: FIX\n ##\n for i in range(points.shape[0]):\n for j in range(len(points[i])):\n pointMap[i][j] = 1\n prePoint[i][j] = -1\n postPoint[i][j] = -1\n\n for i in reversed(range(points.shape[0] - 2)):\n\n for j in range(len(points[i])):\n\n err = 320\n for m in range(1, min(points.shape[0] - 1 - i, 5)):\n check = False ## TODO: why unused ?\n\n for k in range(len(points[i + 1])):\n\n (x_m, y_m) = points[i + m][k].pt\n (x, y) = points[i][j].pt\n\n if (abs(x_m - x) < dis and abs(y_m - y) < err):\n err = abs(x_m - x)\n\n pointMap[i][j] = pointMap[i + m][k] + 1\n prePoint[i][j] = k\n postPoint[i + m][k] = j\n check = True\n\n break ## breaks out of the m loop. Why is it not conditioned by check ? TODO: ???\n\n if (pointMap[i][j] > max1):\n max1 = pointMap[i][j]\n posMax = cv2.KeyPoint(i, j, _size=0)\n \n else:\n posMax = None\n\n for i in range(points.shape[0]):\n for j in range(len(points[i])):\n if posMax:\n if (pointMap[i][j] > max2 and (i != posMax.pt[0] or j != posMax.pt[1]) and postPoint[i][j] == -1): #FIXME \"local variable 'posMax' referenced before assignment\" possible\n max2 = pointMap[i][j]\n posMax2 = cv2.KeyPoint(i, j, _size=0)\n\n\n\n if max1 == -1:\n return\n\n # DEFINES LANE 1 POINTS\n while (max1 >= 1):\n (x,y) = points[int(posMax.pt[0])][int(posMax.pt[1])].pt\n lane1.append(\n [x,y]\n )\n if (max1 == 1):\n break\n\n posMax = cv2.KeyPoint(\n posMax.pt[0]+1,\n prePoint[int(posMax.pt[0])][int(posMax.pt[1])],\n _size=0\n )\n\n max1 -= 1\n\n # DEFINES LANE 2 POINTS\n while (max2 >= 1):\n (x,y) = points[int(posMax2.pt[0])][int(posMax2.pt[1])].pt\n lane2.append(\n [x, y]\n )\n if (max2 == 1):\n break\n\n posMax2 = cv2.KeyPoint(\n posMax2.pt[0]+1,\n prePoint[int(posMax2.pt[0])][int(posMax2.pt[1])],\n _size=0\n )\n\n max2-= 1\n\n subLane1 = np.array(lane1[0:5])\n subLane2 = np.array(lane2[0:5])\n\n # checking if sublane has an empty value\n\n line1 = cv2.fitLine(subLane1, 2, 0, 0.01, 0.01)\n line2 = cv2.fitLine(subLane2, 2, 0, 0.01, 0.01)\n\n try:\n lane1X = (self.BIRDVIEW_WIDTH - line1[3]) * line1[0] / line1[1] + line1[2]\n except:\n lane1X = 0\n\n try:\n lane2X = (self.BIRDVIEW_WIDTH - line2[3]) * line2[0] / line2[1] + line2[2]\n except:\n lane2X = 0\n \n if (lane1X < lane2X):\n for i in range(len(lane1)):\n self.leftLane[int(np.floor(lane1[i][1] / self.slideThickness ))] = lane1[i]\n\n for i in range(len(lane2)):\n self.rightLane[int(np.floor(lane2[i][1] / self.slideThickness ))] = lane2[i]\n\n else:\n\n for i in range(len(lane1)):\n self.rightLane[int(np.floor(lane1[i][1] / self.slideThickness ))] = lane1[i]\n\n for i in range(len(lane2)):\n self.leftLane[int(np.floor(lane2[i][1] / self.slideThickness ))] = lane2[i]", "def curvature(self):\n if self.x[0] == self.x[-1] and self.y[0] == self.y[-1]:\n x_looped=self.x\n y_looped=self.y\n else:\n x_looped=np.append(self.x,self.x[0])\n y_looped=np.append(self.y,self.y[0])\n\n dsx=np.diff(x_looped)\n dsy=np.diff(y_looped)\n ds=np.sqrt(dsx**2+dsy**2)\n Tx=dsx/ds\n Ty=dsy/ds\n ds2=0.5*(np.append(ds[-1],ds[:-1])+ds)\n if self.test:\n print('x_looped', x_looped)\n print('y_looped', y_looped)\n print('dsx', dsx)\n print('dsy', dsy)\n print('ds', ds)\n print('ds2', ds2)\n Hx=np.diff(np.append(Tx[-1],Tx))/ds2\n Hy=np.diff(np.append(Ty[-1],Ty))/ds2\n self._curvature_vector=np.asarray([Hx,Hy]).transpose()\n curvature=np.sqrt(Hx**2+Hy**2)\n if self.test:\n print('curvature', curvature)\n return curvature", "def check_point_left(nodeL, nodeR, city):\n A = get_city_points(city)\n B = get_node_points(nodeR)\n C = get_node_points(nodeL)\n slope = _slope(A, B)\n (F, G) = calibrator(A, B, slope)\n sign = math.copysign(1, ((G[0] - F[0]) * (C[1] - F[1]) - (G[1] - F[1]) * (C[0] - F[0])))\n\n if slope == \"horizontal\":\n if sign == -1:\n if A[0] > B[0]:\n return True\n else:\n return False\n else:\n if A[0] < B[0]:\n return True\n else:\n return False\n\n if slope == \"vertical\":\n if sign == -1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False\n\n if slope == \"inclined\":\n if sign == -1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False\n\n if slope == \"declined\":\n if sign == -1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False", "def boundaries_and_initialize():\n greenLower = (29, 86, 6) # define the lower and upper boundaries of the \"green\"\n greenUpper = (64, 255, 255)\n pts = [((200,300),(255,255,255), 0)]\n blanks = []\n linecolor = (0,0,0)\n counter = 1\n radius = 11\n return greenLower, greenUpper, pts, linecolor, counter, blanks, radius", "def _calc_side(self):\n\n # Calculation of the side of the car with respect to the trajectory\n next_index = self.index + 1\n\n if next_index == len(self.x_trajectory):\n next_index = self.index\n\n trajectory_vector = ((self.x_trajectory[next_index]\n - self.x_trajectory[self.index]),\n (self.y_trajectory[next_index]\n - self.y_trajectory[self.index]))\n\n x_diff = self.x - self.x_trajectory[self.index]\n y_diff = self.y - self.y_trajectory[self.index]\n\n ugv_vector = (x_diff, y_diff)\n\n vector_z = ugv_vector[0] * trajectory_vector[1] \\\n - ugv_vector[1] * trajectory_vector[0]\n\n if vector_z >= 0:\n\n # It is in the right side\n self.sign = 1\n\n else:\n\n # It is in the left side\n self.sign = -1\n\n return self.sign", "def getVisualFieldOrigin(self):\r\n\r\n if not hasattr(self, 'finalPatchesMarked'):\r\n raise LookupError('Please mark the final patches first!!')\r\n\r\n if not hasattr(self, 'altPosMapf'):\r\n _ = self._getSignMap()\r\n\r\n try:\r\n V1 = self.finalPatchesMarked['V1'].array.astype(np.float)\r\n LM = self.finalPatchesMarked['LM'].array.astype(np.float)\r\n RL = self.finalPatchesMarked['RL'].array.astype(np.float)\r\n\r\n overlap = 0 # number of overlaping pixels\r\n iterNum = 1 # number of iteration\r\n while overlap < 1:\r\n # print 'Iteration number for finding overlapping pixel:', iterNum\r\n V1 = ni.morphology.binary_dilation(V1, iterations=1).astype(np.float)\r\n LM = ni.morphology.binary_dilation(LM, iterations=1).astype(np.float)\r\n RL = ni.morphology.binary_dilation(RL, iterations=1).astype(np.float)\r\n totalField = V1 + LM + RL\r\n # plt.imshow(totalField)\r\n overlap = len(np.argwhere(totalField == 3))\r\n iterNum += 1\r\n # print 'Number of overlapping pixels:', overlap\r\n # plt.show()\r\n\r\n altPosOrigin = np.mean(self.altPosMapf[totalField == 3], axis=0)\r\n aziPosOrigin = np.mean(self.aziPosMapf[totalField == 3], axis=0)\r\n\r\n except KeyError:\r\n print('Can not find necessary visual areas (V1, LM, RL) for normalization. \\nSetting origins to 0 ...')\r\n altPosOrigin = 0.\r\n aziPosOrigin = 0.\r\n\r\n return altPosOrigin, aziPosOrigin", "def lefton(hedge, point):\r\n\r\n return area2(hedge, point) >= 0", "def test_velocity_boundaries(self):\n L_x = self.x_edge[-1]\n np.testing.assert_array_almost_equal(self.v_box(self.t, 0), 0, decimal=4)\n np.testing.assert_array_almost_equal(self.v_box(self.t, L_x), 0, decimal=4)", "def detect_boundary(self, x, l_old):\n pass", "def measure_curvature(self, warped, leftx, rightx):\n\t\t# Define conversions in x and y from pixels space to meters\n\t\t#xm_per_pix = 3.7/warped.shape[1] # meters per pixel in x dimension\n\t\t#ym_per_pix = 30.0/warped.shape[0] # meters per pixel in y dimension\n\t\txm_per_pix = 3.7/700 # meters per pixel in x dimension\n\t\tym_per_pix = 30.0/720 # meters per pixel in y dimension\n\t\t# Generate some fake data to represent lane-line pixels\n\t\tploty = np.linspace(0, 719, num=720) # to cover same y-range as image\n\t\t# Fit second order polynomials to x, y in world space\n\t\tleft_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2)\n\t\tright_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2)\n\t\t# Define y-value where we want radius of curvature\n\t\t# Choose the maximum y-value, corresponding to the bottom of the image\n\t\ty_eval = np.max(ploty)\n\t\t# Calculate radius of fitted curvature\n\t\tleft_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit_cr[0])\n\t\tright_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit_cr[0])\n\t\t# Calculate the lane deviation\n\t\tlane_deviation = self.lane_deviation(warped, xm_per_pix)\n\n\t\treturn left_curverad, right_curverad, lane_deviation", "def near_way(self):\r\n\r\n prey_position = np.array(self.prey.position)\r\n actual_position = np.array(self.previous_data[-1])\r\n previous_position = np.array(self.previous_data[-2])\r\n\r\n difference_actual = np.linalg.norm(prey_position - actual_position)\r\n difference_previous = np.linalg.norm(prey_position - previous_position)\r\n\r\n if difference_actual < difference_previous:\r\n return True\r\n else:\r\n return False", "def offset(x, y, L):\n length = x.size\n offsetx = np.zeros((length, 2))\n offsety = np.zeros((length, 2))\n dx = np.zeros(length-1)\n dy = np.zeros(length-1)\n dxL = np.zeros(length-1)\n dyL = np.zeros(length-1)\n xl = np.zeros(length) # counterclockwise\n xr = np.zeros(length) # clockwise\n yl = np.zeros(length)\n yr = np.zeros(length)\n xl0 = np.zeros(length)\n xr0 = np.zeros(length)\n yl0 = np.zeros(length)\n yr0 = np.zeros(length) \n for i in range(0, length-1):\n dx[i] = x[i+1]-x[i]\n dy[i] = y[i+1]-y[i]\n for i in range(0, length-1):\n r = np.sqrt(dx[i]**2 + dy[i]**2)\n dxL[i] = dx[i]*L/r\n dyL[i] = dy[i]*L/r\n xl0[i] = -dyL[i] + x[i]\n yl0[i] = dxL[i] + y[i]\n xr0[i] = dyL[i] + x[i]\n yr0[i] = -dxL[i] + y[i]\n xl0[length-1] = xl0[length-2] + dx[length-2]\n yl0[length-1] = yl0[length-2] + dy[length-2]\n xr0[length-1] = xr0[length-2] + dx[length-2]\n yr0[length-1] = yr0[length-2] + dy[length-2]\n xl[0] = xl0[0]\n yl[0] = yl0[0]\n xl[length-1] = xl0[length-1]\n yl[length-1] = yl0[length-1]\n xr[0] = xr0[0]\n yr[0] = yr0[0]\n xr[length-1] = xr0[length-1]\n yr[length-1] = yr0[length-1]\n for i in range(1, length-1):\n a = np.array([[dy[i-1], -dx[i-1]], [dy[i], -dx[i]]])\n bl = np.array([dy[i-1]*xl0[i-1]-dx[i-1]*yl0[i-1], dy[i]*xl0[i]-dx[i]*yl0[i]])\n br = np.array([dy[i-1]*xr0[i-1]-dx[i-1]*yr0[i-1], dy[i]*xr0[i]-dx[i]*yr0[i]])\n theta = (dx[i-1]*dx[i]+dy[i-1]*dy[i])/(dx[i-1]**2+dy[i-1]**2)**0.5/(dx[i]**2+dy[i]**2)**0.5\n if theta > 1 - 1e-10:\n xl[i] = xl0[i]\n yl[i] = yl0[i]\n xr[i] = xr0[i]\n yr[i] = yr0[i]\n else:\n pl = np.linalg.solve(a, bl)\n xl[i] = pl[0]\n yl[i] = pl[1]\n pr = np.linalg.solve(a, br)\n xr[i] = pr[0]\n yr[i] = pr[1]\n offsetx[:, 0], offsetx[:, 1] = xl, xr\n offsety[:, 0], offsety[:, 1] = yl, yr\n return offsetx, offsety", "def maybe_rectal(self):\n return bool(set(self.locations) & set(StandardTerminology.RECTAL_LOCATIONS))", "def find_field_angle(self) -> None:\r\n\r\n # Gather required info\r\n size = self.picture.get_size()\r\n \r\n # Make a center box in the field\r\n # Make sure there are no more than 50 plants in this box\r\n smol = 5\r\n while len([i\r\n for i in self.plants\r\n if within(i.get_center(), (smol//2*size[0]//smol,\r\n smol//2*size[1]//smol,\r\n (smol//2+1)*size[0]//smol,\r\n (smol//2+1)*size[1]//smol))]\r\n ) > 50:\r\n \r\n smol += 1\r\n\r\n small_box = (2*size[0]//smol,\r\n 2*size[1]//smol,\r\n 3*size[0]//smol,\r\n 3*size[1]//smol)\r\n\r\n slopes = []\r\n\r\n # iterate throught the centers of all plants in the box \r\n for pivot in [i for i in self.centers if within(i, small_box)]:\r\n\r\n # Get a pivot point within the small_box\r\n pivot = (-1, -1)\r\n while not within(pivot, small_box):\r\n pivot = choice(self.centers)\r\n\r\n # Initialize variables\r\n best_slope = 0\r\n percent_green = 0\r\n\r\n # Go throught all of the plants within the small_box\r\n for i in self.centers:\r\n if within(i, small_box) and i != pivot:\r\n\r\n # Determine how much green a line draw\r\n # from the pivot to the new point hits\r\n try:\r\n slope = (pivot[1] - i[1]) / (pivot[0] - i[0])\r\n except ZeroDivisionError:\r\n continue\r\n PGOL = self.picture.percent_plant_on_line(i, slope)\r\n\r\n # If the green hit is more than the current green,\r\n # replace it\r\n if PGOL > percent_green:\r\n best_slope = slope\r\n percent_green = PGOL\r\n\r\n # Save the best slope\r\n slopes.append(best_slope)\r\n\r\n # Find and set the median of all the best slopes\r\n self.slope = median(slopes)", "def _ul_lr(self):\n ulx, xres, xskew, uly, yskew, yres = self.geotransform\n # Index from the end - GDal usually orders bands-first:\n lrx = ulx + (self.array.shape[-2] * xres)\n lry = uly + (self.array.shape[-1] * yres)\n return ulx, uly, lrx, lry", "def increase_left_boundary(self):\n self.L = self.L - 1.0\n self.Ne = self.Ne + 1", "def detectWallCollision(self):\n if self.right >= GAME_WIDTH or self.left <= 0:\n self._vx = -1.0 * self._vx\n if self.top >= GAME_HEIGHT:\n self._vy = -1.0 * self._vy", "def __isFarFromLevel(self, l):\n\n s = np.mean(self.df['high'] - self.df['low'])\n return np.sum([abs(l-x) < s for x in self.levels]) == 0", "def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.lower_left.y = self.upper_right.y\n self.upper_right.y = dly", "def test_non_euclidean_scale_curvature(self):\n\n magic = 77773.333773777773733\n for kdir in (1, -1):\n for mul in (2, 5, 1/3, 1/11, magic, 1/magic):\n for name, dim in (\n ('sphere_s1', 1),\n ('sphere_v2', 2),\n ('sphere_s2', 2),\n ('sphere_v3', 3)\n ):\n s1 = space(fake_curvature=kdir)\n s2 = space(fake_curvature=kdir / mul)\n self.assertTrue(isclose(\n getattr(s1, name)(1) * mul**dim,\n getattr(s2, name)(mul)\n ))", "def check(self):\n\n # get values\n ystart, xleft, xright, nx, ny = self.get()\n\n # are they all OK individually?\n ok = self.ystart.ok() and self.xleft.ok() and self.xright.ok() and \\\n self.nx.ok() and self.ny.ok()\n\n # now come cross-value checks:\n\n # is rightmost pixel of lefthand window within range\n if xleft is None or xleft + nx - 1 > 512:\n self.xleft.config(bg=COL_WARN)\n ok = False\n\n # is rightmost pixel of righthand window within range\n if xright is None or xright + nx - 1 > 1024:\n self.xright.config(bg=COL_WARN)\n ok = False\n\n # is top pixel within range\n if ystart is None or ystart + ny - 1 > 1024:\n self.ystart.config(bg=COL_WARN)\n ok = False\n\n if ok:\n # set all OK is everything checks\n self.ystart.config(bg=COL_TEXT_BG)\n self.xleft.config(bg=COL_TEXT_BG)\n self.xright.config(bg=COL_TEXT_BG)\n self.nx.config(bg=COL_TEXT_BG)\n self.ny.config(bg=COL_TEXT_BG)\n \n return ok", "def find_reference_radials(azi, vel, debug=False):\n pos_valid = get_valid_rays(vel)\n pos_static = get_static_rays(vel)\n\n # Finding intersects of criteria 1 to 3.\n weight_valid = np.arange(0, len(pos_valid), 1)\n weight_static = np.arange(0, len(pos_static), 1)\n\n total_weight = np.zeros(len(pos_valid)) + np.NaN\n for cnt, (one_valid, one_valid_weight) in enumerate(zip(pos_valid, weight_valid)):\n try:\n one_static_weight = weight_static[one_valid == pos_static][0]\n except IndexError:\n one_static_weight = 9999\n\n total_weight[cnt] = one_static_weight + one_valid_weight\n\n pos1 = pos_valid[np.argmin(total_weight)]\n\n# # Finding the 2nd radial of reference\n# pos2 = pos1 + len(azi) // 2\n# if pos2 >= len(azi):\n# pos2 -= len(azi)\n\n try:\n ref2_range_min, ref2_range_max = get_opposite_azimuth(azi[pos1])\n if ref2_range_min < ref2_range_max:\n goodpos = np.where((azi >= ref2_range_min) & (azi <= ref2_range_max))[0]\n else:\n goodpos = np.where((azi >= ref2_range_min) | (azi <= ref2_range_max))[0]\n\n rslt = [(a, total_weight[a == pos_valid][0]) for a in goodpos if a in pos_valid]\n opposite_pos, opposite_weight = zip(*rslt)\n pos2 = opposite_pos[np.argmin(opposite_weight)]\n except Exception:\n pos2 = pos1 + len(azi) // 2\n if pos2 > len(azi):\n pos2 -= len(azi)\n if debug:\n print(f\"References are azimuths {azi[pos1]} and {azi[pos2]}, i.e. azimuthal positions {pos1} and {pos2}.\")\n\n return pos1, pos2" ]
[ "0.63404727", "0.6133634", "0.6060749", "0.58401936", "0.56271565", "0.5616536", "0.5516699", "0.5483635", "0.54565936", "0.5450176", "0.5428849", "0.54229635", "0.540363", "0.54026043", "0.53971386", "0.5384905", "0.53581065", "0.5353302", "0.53430974", "0.5340143", "0.532937", "0.53236294", "0.53084755", "0.5306773", "0.5305699", "0.5302767", "0.5294419", "0.5294159", "0.5280441", "0.52649295" ]
0.70147413
0
Use the left and right fit
def update_lanewidth(left_fit, right_fit, img_): img_size = img_.shape y_eval = np.linspace(0, img_size[0], 20) left_x = get_intercepts(left_fit, y_eval) right_x = get_intercepts(right_fit, y_eval) return np.clip(right_x - left_x, 400, 800)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, left, mode, right, size=None, **kwargs):\n return super().__call__(left, mode, right, size=size, **kwargs)", "def hflip(self):\n self.leftimg, self.rightimg = self.rightimg, self.leftimg", "def rightbox(self):\r\n pass", "def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.lower_left.y = self.upper_right.y\n self.upper_right.y = dly", "def update(self):\n left_height = self.left.height if self.left else -1\n right_height = self.right.height if self.right else -1\n self.height = 1 + max(left_height, right_height)\n self.bf = right_height - left_height", "def shrink(self, right=0, bottom=0, left=0, top=0):\n self.min_col += left\n self.min_row += top\n self.max_col -= right\n self.max_row -= bottom", "def reshape(self, bottom, top):\n\t\tpass", "def reshape(self, bottom, top):\r\n pass", "def _change_shape(self,x,y,w,h):\n top = y \n left = x\n right = x + w\n bottom = y + h\n return top,right,bottom,left", "def increase_right_boundary(self):\n self.R = self.R + 1.0\n self.Ne = self.Ne + 1", "def reshape(self,bottom,top):\n pass", "def fit_lanes(self, points_left, points_right, fit_globally=False) -> dict:\n xl, yl = points_left\n xr, yr = points_right\n\n fit_vals = {}\n if fit_globally:\n # Define global model to fit\n x_left, y_left, x_right, y_right = symfit.variables('x_left, y_left, x_right, y_right')\n a, b, x0_left, x0_right = symfit.parameters('a, b, x0_left, x0_right')\n\n model = symfit.Model({\n x_left: a * y_left ** 2 + b * y_left + x0_left,\n x_right: a * y_right ** 2 + b * y_right + x0_right\n })\n\n # Apply fit\n xl, yl = points_left\n xr, yr = points_right\n fit = symfit.Fit(model, x_left=xl, y_left=yl, x_right=xr, y_right=yr)\n fit = fit.execute()\n fit_vals.update({'ar': fit.value(a), 'al': fit.value(a), 'bl': fit.value(b), 'br': fit.value(b),\n 'x0l': fit.value(x0_left), 'x0r': fit.value(x0_right)})\n\n else:\n # Fit lines independently\n x, y = symfit.variables('x, y')\n a, b, x0 = symfit.parameters('a, b, x0')\n\n model = symfit.Model({\n x: a * y ** 2 + b * y + x0,\n })\n\n # Apply fit on left\n fit = symfit.Fit(model, x=xl, y=yl)\n fit = fit.execute()\n fit_vals.update({'al': fit.value(a), 'bl': fit.value(b), 'x0l': fit.value(x0)})\n\n # Apply fit on right\n fit = symfit.Fit(model, x=xr, y=yr)\n fit = fit.execute()\n fit_vals.update({'ar': fit.value(a), 'br': fit.value(b), 'x0r': fit.value(x0)})\n\n return fit_vals", "def setHeuristics(self, fitwid, fitoff, bothpass, relht, relwid, reloff):\n\tself.fwidth = fitwid\n\tself.foffset = fitoff\n\tself.bothpass = bothpass\n\tself.rheight = relht\n\tself.rwidth = relwid\n\tself.roffset = reloff", "def expand(self, right=0, down=0, left=0, up=0):\n self.min_col -= left\n self.min_row -= up\n self.max_col += right\n self.max_row += down", "def expanded_boundaries(self):\n width = self._points[0][3][0] - self._points[0][1][0]\n height = self._points[0][3][1] - self._points[0][1][1]\n factor = np.multiply((width, height), Window.BORDER)\n return (\n np.subtract(self._points[0][1], factor),\n np.add(self._points[0][3], factor))", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def __call__(self, axes, renderer):\n bbox0 = self.get_original_position(axes, renderer)\n bbox = bbox0\n\n x1, y1, w, h = bbox.bounds\n extesion_fraction = self.extesion_fraction\n dw, dh = w*extesion_fraction, h*extesion_fraction\n\n if self.extend in [\"min\", \"both\"]:\n if self.orientation == \"horizontal\":\n x1 = x1 + dw\n else:\n y1 = y1+dh\n\n if self.extend in [\"max\", \"both\"]:\n if self.orientation == \"horizontal\":\n w = w-2*dw\n else:\n h = h-2*dh\n\n return Bbox.from_bounds(x1, y1, w, h)", "def _rect_right(self):\n\treturn max(self.x, self.x + self.w)", "def right(self):\n return self.left + self.width", "def sidebyside(w1, w2, buffer=20):\n import re\n import warnings\n # regex to extract the geometry\n geore = re.compile(r'(\\d+)x(\\d+)([\\+-]\\d+)([\\+-]\\d+)')\n\n # First, get the geometry of w1\n rematch = geore.match(w1.winfo_geometry())\n if not rematch:\n # We tried...\n warnings.warn('Could not determine geometry of parent window!')\n return\n\n w1_width, w1_height, w1_offsetx, w1_offsety = rematch.groups()\n\n # Now get w2's geometry\n rematch = geore.match(w2.winfo_geometry())\n if not rematch:\n # We tried...\n warnings.warn('Could not determine geometry of slave window!')\n return\n\n w2_width, w2_height, w2_offsetx, w2_offsety = rematch.groups()\n\n # If the original x offset is - (i.e., closer to RHS of screen), put w2\n # on the left. Otherwise, put it on the right\n if w1_offsetx[0] is '-':\n w2_offsetx = '-%d' % (int(w1_offsetx) - int(w1_width) - 20)\n w2.geometry('%sx%s%s%s' % (w2_width, w2_height, w2_offsetx, w2_offsety))\n else:\n w2_offsetx = '+%d' % (int(w1_offsetx) + int(w1_width) + 20)\n w2.geometry('%sx%s%s%s' % (w2_width, w2_height, w2_offsetx, w2_offsety))" ]
[ "0.5895596", "0.5836528", "0.5623411", "0.55394584", "0.5525405", "0.5424819", "0.53840476", "0.53774345", "0.53398556", "0.53164613", "0.52830184", "0.5280889", "0.5272965", "0.52709484", "0.52354926", "0.51937866", "0.51937866", "0.51937866", "0.51937866", "0.51937866", "0.51937866", "0.51937866", "0.51937866", "0.51937866", "0.51937866", "0.51937866", "0.51778024", "0.51444316", "0.5143091", "0.51209366" ]
0.6062885
0
Create an ISO 6346 shipping container code.
def create(owner_code, serial, category='U'): if not (len(owner_code) == 3 and owner_code.isalpha()): raise ValueError("Invalid ISO 6346 owner code '{}'".format(owner_code)) if category not in ('U', 'J', 'Z', 'R'): raise ValueError("Invalid ISO 6346 category identifier '{}'".format(category)) if not (len(serial) == 6 and serial.isdigit()): raise ValueError("Invalid ISO 6346 serial number") raw_code = owner_code + category + serial full_code = raw_code + str(check_digit(raw_code)) return full_code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def USCode(self, short):\n states = {\n 'AK': 'Alaska',\n 'AL': 'Alabama',\n 'AR': 'Arkansas',\n 'AS': 'American Samoa',\n 'AZ': 'Arizona',\n 'CA': 'California',\n 'CO': 'Colorado',\n 'CT': 'Connecticut',\n 'DC': 'District of Columbia',\n 'DE': 'Delaware',\n 'FL': 'Florida',\n 'GA': 'Georgia',\n 'GU': 'Guam',\n 'HI': 'Hawaii',\n 'IA': 'Iowa',\n 'ID': 'Idaho',\n 'IL': 'Illinois',\n 'IN': 'Indiana',\n 'KS': 'Kansas',\n 'KY': 'Kentucky',\n 'LA': 'Louisiana',\n 'MA': 'Massachusetts',\n 'MD': 'Maryland',\n 'ME': 'Maine',\n 'MI': 'Michigan',\n 'MN': 'Minnesota',\n 'MO': 'Missouri',\n 'MP': 'Northern Mariana Islands',\n 'MS': 'Mississippi',\n 'MT': 'Montana',\n 'NA': 'National',\n 'NC': 'North Carolina',\n 'ND': 'North Dakota',\n 'NE': 'Nebraska',\n 'NH': 'New Hampshire',\n 'NJ': 'New Jersey',\n 'NM': 'New Mexico',\n 'NV': 'Nevada',\n 'NY': 'New York',\n 'OH': 'Ohio',\n 'OK': 'Oklahoma',\n 'OR': 'Oregon',\n 'PA': 'Pennsylvania',\n 'PR': 'Puerto Rico',\n 'RI': 'Rhode Island',\n 'SC': 'South Carolina',\n 'SD': 'South Dakota',\n 'TN': 'Tennessee',\n 'TX': 'Texas',\n 'UT': 'Utah',\n 'VA': 'Virginia',\n 'VI': 'Virgin Islands',\n 'VT': 'Vermont',\n 'WA': 'Washington',\n 'WI': 'Wisconsin',\n 'WV': 'West Virginia',\n 'WY': 'Wyoming'\n }\n return states.get(short)", "def getCurrencyIsoCode(id=None):", "def create_currency_from_ISO(isocode):\n from .commodity import Commodity\n\n # if self.get_session().query(Commodity).filter_by(isocode=isocode).first():\n # raise GncCommodityError(\"Currency '{}' already exists\".format(isocode))\n\n from .currency_ISO import ISO_currencies\n\n cur = ISO_currencies.get(isocode)\n\n if cur is None:\n raise ValueError(\n \"Could not find the ISO code '{}' in the ISO table\".format(isocode)\n )\n\n # create the currency\n cdty = Commodity(\n mnemonic=cur.mnemonic,\n fullname=cur.currency,\n fraction=10 ** int(cur.fraction),\n cusip=cur.cusip,\n namespace=\"CURRENCY\",\n quote_flag=1,\n )\n\n # self.gnc_session.add(cdty)\n return cdty", "def country(alpha_2_code: str) -> None:", "def create_code(root_node):\r\n huff_list = [\"\"] * 256\r\n code = \"\"\r\n return create_code_helper(root_node, code, huff_list)", "def add_city(g, code, name, country, continent, timezone, coordinates, population, region):\n port = Ports(code, name, country, continent, timezone, coordinates, population, region)\n g.city_dict[code] = port\n g.convert[name] = code \n return g", "def getAAAZZZSId(self):\n if self.element.symbol == \"C\":\n return \"120060\"\n elif self.element.symbol == \"V\":\n return \"510230\"\n else:\n return None", "def create_symbol(self, base_ccy, asset):\n asset = self.user_to_exchange(self.name, asset)\n return (asset+base_ccy).upper()", "def encode_v33_ce(geometry: GeomDict, identifier: str) -> Element:\n\n return GML33CE_ENCODER.encode(geometry, identifier)", "def build_cart(self) -> str:\n return \"bc {} {}\".format(self.pos.x, self.pos.y)", "def getSerpentId(self):\n return \"{}-nat\".format(self.element.symbol.capitalize())", "def card_factory(value: str, base: str, is_hidden: bool = False) -> str:\n if 1 <= len(value) <= 2:\n card = list(base)\n card[13:15] = f\"{value} \" if len(value) == 1 else f\"{value}\"\n card[74:76] = f\" {value}\" if len(value) == 1 else f\"{value}\"\n else:\n raise Exception(\"Invalid value lenght. Must be 1 or 2 charaters\")\n\n return hidden_face if is_hidden else \"\".join(card)", "def create_dgp_contract(self):\n contract_data = self.node.createcontract(\"6060604052601e6003556000600460006101000a81548160ff02191690831515021790555060d8600555341561003457600080fd5b5b613183806100446000396000f30060606040523615610126576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630c83ebac1461012b578063153417471461019757806319971cbd146101fa5780631ec28e0f1461022657806327e357461461025d57806330a79873146102865780633a32306c146102e95780634364725c146103575780634afb4f111461038e5780634cc0e2bc146103fc5780635f302e8b1461043e5780636b102c49146104825780636fb81cbb146104d35780637b993bf3146104e8578063850d9758146105395780638a5a9d07146105b2578063aff125f6146105e9578063bec171e51461064c578063bf5f1e83146106ba578063e9944a81146106fc578063f769ac481461078d578063f9f51401146107f0575b600080fd5b341561013657600080fd5b6101556004808035906020019091908035906020019091905050610872565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101a257600080fd5b6101b86004808035906020019091905050610928565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561020557600080fd5b6102246004808035906020019091908035906020019091905050610976565b005b341561023157600080fd5b6102476004808035906020019091905050610f95565b6040518082815260200191505060405180910390f35b341561026857600080fd5b610270610fed565b6040518082815260200191505060405180910390f35b341561029157600080fd5b6102a76004808035906020019091905050610ffa565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156102f457600080fd5b61034160048080359060200190820180359060200190808060200260200160405190810160405280939291908181526020018383602002808284378201915050505050509190505061103a565b6040518082815260200191505060405180910390f35b341561036257600080fd5b61037860048080359060200190919050506110a9565b6040518082815260200191505060405180910390f35b341561039957600080fd5b6103e66004808035906020019082018035906020019080806020026020016040519081016040528093929190818152602001838360200280828437820191505050505050919050506110db565b6040518082815260200191505060405180910390f35b341561040757600080fd5b61043c600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050611172565b005b341561044957600080fd5b61046860048080359060200190919080359060200190919050506118b0565b604051808215151515815260200191505060405180910390f35b341561048d57600080fd5b6104b9600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611977565b604051808215151515815260200191505060405180910390f35b34156104de57600080fd5b6104e6611a1d565b005b34156104f357600080fd5b61051f600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611ab9565b604051808215151515815260200191505060405180910390f35b341561054457600080fd5b61055a6004808035906020019091905050611b5f565b6040518080602001828103825283818151815260200191508051906020019060200280838360005b8381101561059e5780820151818401525b602081019050610582565b505050509050019250505060405180910390f35b34156105bd57600080fd5b6105d36004808035906020019091905050611ca9565b6040518082815260200191505060405180910390f35b34156105f457600080fd5b61060a6004808035906020019091905050611cd7565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561065757600080fd5b6106a4600480803590602001908201803590602001908080602002602001604051908101604052809392919081815260200183836020028082843782019150505050505091905050611d17565b6040518082815260200191505060405180910390f35b34156106c557600080fd5b6106fa600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050611dae565b005b341561070757600080fd5b610773600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190820180359060200190808060200260200160405190810160405280939291908181526020018383602002808284378201915050505050509190505061285f565b604051808215151515815260200191505060405180910390f35b341561079857600080fd5b6107ae60048080359060200190919050506128de565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156107fb57600080fd5b61081a6004808035906020019091908035906020019091905050612a18565b6040518080602001828103825283818151815260200191508051906020019060200280838360005b8381101561085e5780820151818401525b602081019050610842565b505050509050019250505060405180910390f35b600060018311806108835750600282115b1561088d57600080fd5b60008314156108d7576006600001600083815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050610922565b6001831415610921576006600201600083815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050610922565b5b92915050565b6000808281548110151561093857fe5b906000526020600020906002020160005b5060010160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505b919050565b61097f33611977565b151561098a57600080fd5b600281111561099857600080fd5b60008114806109a75750600281145b8015610a405750610a3d6001805480602002602001604051908101604052809291908181526020018280548015610a3357602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116109e9575b505050505061103a565b82115b15610a4a57600080fd5b600181148015610ae75750610ae46002805480602002602001604051908101604052809291908181526020018280548015610ada57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311610a90575b505050505061103a565b82115b15610af157600080fd5b6000811415610b0d57816009600001541415610b0c57600080fd5b5b6001811415610b2957816009600101541415610b2857600080fd5b5b6002811415610b4557816009600201541415610b4457600080fd5b5b6006600101600082815260200190815260200160002060000160009054906101000a900460ff161515610c875760016006600101600083815260200190815260200160002060000160006101000a81548160ff02191690831515021790555081600660010160008381526020019081526020016000206002018190555043600660010160008381526020019081526020016000206003018190555060006006600101600083815260200190815260200160002060010181610c069190613046565b50600660010160008281526020019081526020016000206001018054806001018281610c329190613072565b916000526020600020900160005b33909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050610e0e565b600554600660010160008381526020019081526020016000206003015443031115610cba57610cb581612c4a565b610f90565b816006600101600083815260200190815260200160002060020154141515610ce157600080fd5b610d883360066001016000848152602001908152602001600020600101805480602002602001604051908101604052809291908181526020018280548015610d7e57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311610d34575b505050505061285f565b15610d9257600080fd5b600660010160008281526020019081526020016000206001018054806001018281610dbd9190613072565b916000526020600020900160005b33909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505b600960020154610eba60066001016000848152602001908152602001600020600101805480602002602001604051908101604052809291908181526020018280548015610eb057602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311610e66575b5050505050611d17565b101515610f8f576000811480610ed05750600181145b15610ee057610edf6002612ce5565b5b6000811415610f0d5760066001016000828152602001908152602001600020600201546009600001819055505b6002811415610f2b57610f206000612ce5565b610f2a6001612ce5565b5b6001811415610f585760066001016000828152602001908152602001600020600201546009600101819055505b6002811415610f855760066001016000828152602001908152602001600020600201546009600201819055505b610f8e81612c4a565b5b5b5b5050565b60006002821115610fa557600080fd5b6000821415610fbb576009600001549050610fe8565b6001821415610fd1576009600101549050610fe8565b6002821415610fe7576009600201549050610fe8565b5b919050565b6000808054905090505b90565b60028181548110151561100957fe5b906000526020600020900160005b915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000806000809050600091505b835182101561109e576000848381518110151561106057fe5b9060200190602002015173ffffffffffffffffffffffffffffffffffffffff161415156110905780806001019150505b5b8180600101925050611047565b8092505b5050919050565b600060028211156110b957600080fd5b600660010160008381526020019081526020016000206002015490505b919050565b6000806000809050600091505b8351821015611167576000848381518110151561110157fe5b9060200190602002015173ffffffffffffffffffffffffffffffffffffffff161415801561114b575061114a848381518110151561113b57fe5b90602001906020020151611ab9565b5b156111595780806001019150505b5b81806001019250506110e8565b8092505b5050919050565b60008061117e33611977565b151561118957600080fd5b60008473ffffffffffffffffffffffffffffffffffffffff1614156111ad57600080fd5b60018311156111bb57600080fd5b600083141561128b57611253600180548060200260200160405190810160405280929190818152602001828054801561124957602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116111ff575b505050505061103a565b915060096000015482148061126c575060096002015482145b1561127657600080fd5b61127f84611977565b151561128a57600080fd5b5b600183141561134957600960010154611329600280548060200260200160405190810160405280929190818152602001828054801561131f57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116112d5575b505050505061103a565b141561133457600080fd5b61133d84611ab9565b151561134857600080fd5b5b6006600201600084815260200190815260200160002060000160009054906101000a900460ff1615156114c55760016006600201600085815260200190815260200160002060000160006101000a81548160ff021916908315150217905550836006600201600085815260200190815260200160002060020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550436006600201600085815260200190815260200160002060030181905550600060066002016000858152602001908152602001600020600101816114449190613046565b506006600201600084815260200190815260200160002060010180548060010182816114709190613072565b916000526020600020900160005b33909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050611698565b6005546006600201600085815260200190815260200160002060030154430311156114f8576114f383612dba565b6118a9565b8373ffffffffffffffffffffffffffffffffffffffff166006600201600085815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614151561156b57600080fd5b611612336006600201600086815260200190815260200160002060010180548060200260200160405190810160405280929190818152602001828054801561160857602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116115be575b505050505061285f565b1561161c57600080fd5b6006600201600084815260200190815260200160002060010180548060010182816116479190613072565b916000526020600020900160005b33909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505b6009600201546117446006600201600086815260200190815260200160002060010180548060200260200160405190810160405280929190818152602001828054801561173a57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116116f0575b5050505050611d17565b1015156118a85760008314801561179957506117986006600201600085815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16611977565b5b156117e2576117e1836006600201600086815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16612e8f565b5b600183148015611830575061182f6006600201600085815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16611ab9565b5b1561187957611878836006600201600086815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16612e8f565b5b600090505b600381101561189e5761189081612ce5565b5b808060010191505061187e565b6118a783612dba565b5b5b5b50505050565b600060028311806118c15750600282115b156118cb57600080fd5b6000831415611902576006600001600083815260200190815260200160002060000160009054906101000a900460ff169050611971565b6001831415611939576006600101600083815260200190815260200160002060000160009054906101000a900460ff169050611971565b6002831415611970576006600201600083815260200190815260200160002060000160009054906101000a900460ff169050611971565b5b92915050565b600080600090505b600180549050811015611a12578273ffffffffffffffffffffffffffffffffffffffff166001828154811015156119b257fe5b906000526020600020900160005b9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161415611a045760019150611a17565b5b808060010191505061197f565b600091505b50919050565b600460009054906101000a900460ff1615611a3757600080fd5b60018054806001018281611a4b9190613072565b916000526020600020900160005b33909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550506001600460006101000a81548160ff0219169083151502179055505b565b600080600090505b600280549050811015611b54578273ffffffffffffffffffffffffffffffffffffffff16600282815481101515611af457fe5b906000526020600020900160005b9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161415611b465760019150611b59565b5b8080600101915050611ac1565b600091505b50919050565b611b6761309e565b6001821115611b7557600080fd5b6000821415611c0c576001805480602002602001604051908101604052809291908181526020018280548015611c0057602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311611bb6575b50505050509050611ca4565b6001821415611ca3576002805480602002602001604051908101604052809291908181526020018280548015611c9757602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311611c4d575b50505050509050611ca4565b5b919050565b60008082815481101515611cb957fe5b906000526020600020906002020160005b506000015490505b919050565b600181815481101515611ce657fe5b906000526020600020900160005b915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000806000809050600091505b8351821015611da35760008483815181101515611d3d57fe5b9060200190602002015173ffffffffffffffffffffffffffffffffffffffff1614158015611d875750611d868483815181101515611d7757fe5b90602001906020020151611977565b5b15611d955780806001019150505b5b8180600101925050611d24565b8092505b5050919050565b611db733611977565b158015611dca5750611dc833611ab9565b155b15611dd457600080fd5b600081148015611e745750600354611e716001805480602002602001604051908101604052809291908181526020018280548015611e6757602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311611e1d575b505050505061103a565b10155b15611e7e57600080fd5b600181148015611f1e5750600354611f1b6002805480602002602001604051908101604052809291908181526020018280548015611f1157602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311611ec7575b505050505061103a565b10155b15611f2857600080fd5b60008273ffffffffffffffffffffffffffffffffffffffff161415611f4c57600080fd5b6002811115611f5a57600080fd5b6000811480611f695750600181145b8015611f8a5750611f7982611977565b80611f895750611f8882611ab9565b5b5b15611f9457600080fd5b6006600001600082815260200190815260200160002060000160009054906101000a900460ff16151561212357611fca33611ab9565b15611fd457600080fd5b60016006600001600083815260200190815260200160002060000160006101000a81548160ff021916908315150217905550816006600001600083815260200190815260200160002060020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550436006600001600083815260200190815260200160002060030181905550600060066000016000838152602001908152602001600020600101816120a29190613046565b506006600001600082815260200190815260200160002060010180548060010182816120ce9190613072565b916000526020600020900160005b33909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550506122f6565b6005546006600001600083815260200190815260200160002060030154430311156121565761215181612ce5565b61285a565b8173ffffffffffffffffffffffffffffffffffffffff166006600001600083815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161415156121c957600080fd5b612270336006600001600084815260200190815260200160002060010180548060200260200160405190810160405280929190818152602001828054801561226657602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001906001019080831161221c575b505050505061285f565b1561227a57600080fd5b6006600001600082815260200190815260200160002060010180548060010182816122a59190613072565b916000526020600020900160005b33909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505b60008114806123055750600181145b156125ab576009600201546123b6600660000160008481526020019081526020016000206001018054806020026020016040519081016040528092919081815260200182805480156123ac57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311612362575b5050505050611d17565b1015156125aa576123ff6006600001600083815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16611977565b8061244857506124476006600001600083815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16611ab9565b5b1561245257600080fd5b60008114156124f9576001805480600101828161246f9190613072565b916000526020600020900160005b6006600001600085815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505b60018114156125a057600280548060010182816125169190613072565b916000526020600020900160005b6006600001600085815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505b6125a981612ce5565b5b5b6002811415612859576009600001546126606006600001600084815260200190815260200160002060010180548060200260200160405190810160405280929190818152602001828054801561265657602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001906001019080831161260c575b5050505050611d17565b1015801561271857506009600101546127156006600001600084815260200190815260200160002060010180548060200260200160405190810160405280929190818152602001828054801561270b57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190600101908083116126c1575b50505050506110db565b10155b15612858576000808054905011801561275f575060014301600060016000805490500381548110151561274757fe5b906000526020600020906002020160005b5060000154145b1561276957600080fd5b6000805480600101828161277d91906130b2565b916000526020600020906002020160005b60408051908101604052806001430181526020016006600001600087815260200190815260200160002060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681525090919091506000820151816000015560208201518160010160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050505061285781612ce5565b5b5b5b5b5050565b600080600090505b82518110156128d2578373ffffffffffffffffffffffffffffffffffffffff16838281518110151561289557fe5b9060200190602002015173ffffffffffffffffffffffffffffffffffffffff1614156128c457600191506128d7565b5b8080600101915050612867565b600091505b5092915050565b6000806000808054905014156128f75760009150612a12565b60016000805490500390505b6000811115612994578260008281548110151561291c57fe5b906000526020600020906002020160005b50600001541115156129855760008181548110151561294857fe5b906000526020600020906002020160005b5060010160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169150612a12565b5b808060019003915050612903565b826000808154811015156129a457fe5b906000526020600020906002020160005b5060000154111515612a0d576000808154811015156129d057fe5b906000526020600020906002020160005b5060010160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169150612a12565b600091505b50919050565b612a2061309e565b6002831180612a2f5750600282115b15612a3957600080fd5b6000831415612ae75760066000016000838152602001908152602001600020600101805480602002602001604051908101604052809291908181526020018280548015612adb57602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311612a91575b50505050509050612c44565b6001831415612b955760066001016000838152602001908152602001600020600101805480602002602001604051908101604052809291908181526020018280548015612b8957602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311612b3f575b50505050509050612c44565b6002831415612c435760066002016000838152602001908152602001600020600101805480602002602001604051908101604052809291908181526020018280548015612c3757602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311612bed575b50505050509050612c44565b5b92915050565b6000600660010160008381526020019081526020016000206002018190555060006006600101600083815260200190815260200160002060010181612c8f9190613046565b506000600660010160008381526020019081526020016000206003018190555060006006600101600083815260200190815260200160002060000160006101000a81548160ff0219169083151502179055505b50565b60006006600001600083815260200190815260200160002060020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060006006600001600083815260200190815260200160002060010181612d649190613046565b506000600660000160008381526020019081526020016000206003018190555060006006600001600083815260200190815260200160002060000160006101000a81548160ff0219169083151502179055505b50565b60006006600201600083815260200190815260200160002060020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060006006600201600083815260200190815260200160002060010181612e399190613046565b506000600660020160008381526020019081526020016000206003018190555060006006600201600083815260200190815260200160002060000160006101000a81548160ff0219169083151502179055505b50565b600080831415612f6857600090505b600180549050811015612f67578173ffffffffffffffffffffffffffffffffffffffff16600182815481101515612ed157fe5b906000526020600020900160005b9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161415612f5957600181815481101515612f2957fe5b906000526020600020900160005b6101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690555b5b8080600101915050612e9e565b5b600183141561304057600090505b60028054905081101561303f578173ffffffffffffffffffffffffffffffffffffffff16600282815481101515612fa957fe5b906000526020600020900160005b9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614156130315760028181548110151561300157fe5b906000526020600020900160005b6101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690555b5b8080600101915050612f76565b5b5b505050565b81548183558181151161306d5781836000526020600020918201910161306c91906130e4565b5b505050565b8154818355818115116130995781836000526020600020918201910161309891906130e4565b5b505050565b602060405190810160405280600081525090565b8154818355818115116130df576002028160020283600052602060002091820191016130de9190613109565b5b505050565b61310691905b808211156131025760008160009055506001016130ea565b5090565b90565b61315491905b80821115613150576000808201600090556001820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690555060020161310f565b5090565b905600a165627a7a723058203193cc570fd198d6b9da1b2fcac2a6332e140446e820454c1ac4467f811341e30029\", 4000000)\n self.contract_address = contract_data['address']\n self.node.generate(1)", "def create_proposal_contract(self):\n contract_data = self.node.createcontract(\"60806040526104e060405190810160405280600a62ffffff168152602001600a62ffffff168152602001600a62ffffff168152602001600a62ffffff168152602001600a62ffffff168152602001600a62ffffff168152602001600a62ffffff168152602001600a62ffffff168152602001600a62ffffff168152602001603262ffffff168152602001601e62ffffff168152602001600662ffffff16815260200160c862ffffff16815260200160c962ffffff16815260200161138862ffffff168152602001613a9862ffffff168152602001600162ffffff1681526020016205b4f062ffffff168152602001600862ffffff16815260200161017762ffffff168152602001617d0062ffffff1681526020016102bc62ffffff1681526020016108fc62ffffff16815260200161232862ffffff1681526020016161a862ffffff168152602001615dc062ffffff168152602001600362ffffff16815260200161020062ffffff16815260200160c862ffffff16815260200161520862ffffff16815260200161cf0862ffffff168152602001600462ffffff168152602001604462ffffff168152602001600362ffffff1681526020016102bc62ffffff1681526020016102bc62ffffff16815260200161019062ffffff16815260200161138862ffffff16815260200161600062ffffff168152506000906027610206929190610219565b5034801561021357600080fd5b506102ee565b8260276007016008900481019282156102aa5791602002820160005b8382111561027857835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610235565b80156102a85782816101000a81549063ffffffff0219169055600401602081600301049283019260010302610278565b505b5090506102b791906102bb565b5090565b6102eb91905b808211156102e757600081816101000a81549063ffffffff0219169055506001016102c1565b5090565b90565b610160806102fd6000396000f300608060405260043610610041576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806326fadbe214610046575b600080fd5b34801561005257600080fd5b5061005b610099565b6040518082602760200280838360005b8381101561008657808201518184015260208101905061006b565b5050505090500191505060405180910390f35b6100a1610110565b6000602780602002604051908101604052809291908260278015610106576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c95790505b5050505050905090565b6104e0604051908101604052806027906020820280388339808201915050905050905600a165627a7a723058205e249731b14c9492ca6a161a7342bd0796c89a7eea6a30255be7fe5c0ee8995a0029\", 10000000)\n self.proposal_address = contract_data['address']\n self.abiGetSchedule = \"26fadbe2\"", "def build(self, definition_id, to_format, container_name):\n url = f\"{self.base_url}/build\"\n payload = {\"definition_id\": definition_id, \"to_format\": to_format, \"container_name\": container_name}\n response = requests.post(url, json=payload, headers=self.headers)\n build_id = response.text\n\n return build_id", "def _mk_coref_id():\n num, alpha = int(_mk_coref_id.id[:-1]), _mk_coref_id.id[-1]\n if alpha == 'Z':\n alpha = 'A'\n num += 1\n else:\n alpha = chr(ord(alpha) + 1)\n\n _mk_coref_id.id = '%s%s' % (num, alpha)\n return _mk_coref_id.id", "def uCSIsLatin1Supplement(code):\n ret = libxml2mod.xmlUCSIsLatin1Supplement(code)\n return ret", "def getAAAZZZSId(self):\n return f\"{self.a}{self.z:>03d}{self.state}\"", "def card_html_id(card):\n return f'c{card:02d}'", "def to_code(self, ipt_args_in_construct: str, variable_name: str, output_var: str, code_fragment):", "def COUNTRY_CODE():\n COUNTRY_CODE = \"us/90210\"\n return COUNTRY_CODE", "def reformat_tract_code(tract: str, state_code: str, county_code: str) -> str:\n \n # If the tract code contains a period, remove it, then prepend zeroes until length is 6\n if \".\" in tract:\n tract = tract.replace(\".\", \"\")\n num_zeroes = 6 - len(tract)\n tract = (\"0\" * num_zeroes) + tract\n # Else prepend zeroes until the length is 4, then add 2 zeroes to the end\n else:\n num_zeroes = 4 - len(tract)\n tract = (\"0\" * num_zeroes) + tract + \"00\"\n \n # Prepend state and county FIPS codes\n geoid = state_code + county_code + tract\n \n return geoid", "def getSerpentId(self):\n symbol = self.element.symbol.capitalize()\n return \"{}-{}{}\".format(symbol, self.a, \"m\" if self.state else \"\")", "def gee_ic_id(self):\n toa = 'COPERNICUS/S2'\n sr = 'COPERNICUS/S2_SR'\n return toa if self._isTOA() else sr", "def test_create_shipping_address(self):\n self.cim.create_shipping_address(\n customer_profile_id=100,\n ship_phone=u'415-415-4154',\n ship_first_name=u'valentino'\n )", "def generate_new_code():\n code = ''.join(random.choice(string.digits) for i in range(6))\n return code", "def set_code_to_printings_key(printing):\n return (\n printing.set_integer or 0,\n str(printing.set_variant),\n printing.multiverseid or 0,\n printing.card_name,\n )", "def make_quantity(string):\n pass", "def _generate_cart_id():\n cart_id = ''\n characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()'\n cart_id_length = 50\n cart_id = ''.join([ _generate(characters) for y in range(cart_id_length)])\n\n return cart_id", "def _format_intermediary_institution_56D(self, val):\n account = val.get('ACCOUNT')\n name = val.get('NAME')\n address = val.get('ADDRESS')\n if name and address:\n name = FSwiftWriterUtils.split_text_and_prefix(name, 35)\n address = FSwiftWriterUtils.split_text_and_prefix(address, 35)\n val = FSwiftWriterUtils.allocate_space_for_name_address_without_constraint(name, address)\n if account:\n val = \"/\" + str(account) + \"\\n\" + str(val)\n return val" ]
[ "0.5175448", "0.5125011", "0.50932604", "0.49715528", "0.49012083", "0.48026282", "0.48011735", "0.47937286", "0.47771695", "0.47705403", "0.47160998", "0.47139278", "0.47057864", "0.47034556", "0.46948606", "0.46944666", "0.4689189", "0.4687901", "0.46823987", "0.46695173", "0.46665677", "0.46463287", "0.463197", "0.46269512", "0.4620182", "0.46178904", "0.45858875", "0.45800152", "0.45725527", "0.4569992" ]
0.5852458
0
Compute the check digit for an ISO 6346 code without that digit
def check_digit(raw_code): s = sum(code(char) * 2**index for index, char in enumerate(raw_code)) return s % 11 % 10
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)", "def calc_check_digit_issn(issn):\n\n total = 0\n lissn = list(issn.replace('-', ''))\n\n for i, v in enumerate(lissn[:-1]):\n total = total + ((8-i) * int(v))\n\n remainder = total % 11\n\n if not remainder:\n check_digit = 0\n else:\n check_digit = 11 - remainder\n\n return 'X' if check_digit == 10 else str(check_digit)", "def calculate_issn_checkdigit(s):\n if len(s) != 7:\n raise ValueError('seven digits required')\n ss = sum([int(digit) * f for digit, f in zip(s, range(8, 1, -1))])\n _, mod = divmod(ss, 11)\n checkdigit = 0 if mod == 0 else 11 - mod\n if checkdigit == 10:\n checkdigit = 'X'\n return '{}'.format(checkdigit)", "def evalute_number(dialed):\n if (len(dialed) == 11 or len(dialed) == 10) and str(dialed).startswith(\"0\"):\n # UK Number\n return \"+44%s\" % (dialed[1:])\n elif len(dialed) == 6:\n # Local Fishguard numbers\n return \"+441348%s\" % (dialed)\n return None", "def calc_check_digit(number):\n number = compact(number)\n alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n weights = (29, 23, 19, 17, 13, 7, 5, 3)\n if not isdigits(number):\n number = number[0] + str('ABCEHKMOPT'.index(number[1])) + number[2:]\n c = sum(w * alphabet.index(n) for w, n in zip(weights, number)) % 11\n if c > 9:\n raise InvalidChecksum()\n return str(c)", "def issuer(number):\n res = \"unknown\"\n num = str(number)\n if num[:1]==\"4\":\n res = \"Visa\"\n elif num[:2] in (\"34\",\"37\"):\n res = \"American Express\"\n elif num[:2] in (\"51\",\"55\"):\n res = \"MasterCard\"\n elif num[:4]==\"6011\":\n res = \"Discover/Novus\"\n return res", "def mpc2internal(self,Code):\n if (Code.isdigit()):\n internal_code=int(Code)\n else:\n internal_code=(ord(Code[0])-55)*100+int(Code[1:])\n internal_code = -internal_code\n return (internal_code)", "def _orcid_checksum_digit(orcid):\n total = 0\n digits = [int(ch) for ch in orcid.replace(\"-\", \"\")]\n for digit in digits:\n total = (total + digit) * 2\n remainder = total % 11\n result = (12 - remainder) % 11\n\n if result == 10:\n return \"X\"\n\n return str(result)", "def _select_market_code(code):\n code = str(code)\n if code[0] in ['5', '6', '9'] or code[:3] in [\"009\", \"126\", \"110\", \"201\", \"202\", \"203\", \"204\"]:\n return 1\n return 0", "def mpc2internal(self,Code):\n \n if (Code.isdigit()):\n internal_code=int(Code)\n\n else:\n internal_code=(ord(Code[0])-55)*100+int(Code[1:])\n\n internal_code = -internal_code\n return (internal_code)", "def check_digits_cpf(x: str, n: int) -> int:\n check_vec = np.flip(np.arange(2, 10 + n))\n digits = np.array(list(x[: 8 + n])).astype(\"int\")\n result = np.dot(check_vec, digits) % 11\n\n return 0 if result < 2 else 11 - result", "def code_format(self):\n return \"^\\\\d{%s}$\" % self._digits", "def test_luhn_checksum(self):\n check_digit = calculate_luhn(\"7992739871\")\n assert check_digit == 3", "def calculate_luhn_check_digit(partial_card_number):\n checksum = luhn_checksum(int(partial_card_number) * 10)\n if checksum == 0:\n check_digit = 0\n else:\n check_digit = 10 - checksum\n return check_digit", "def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])", "def check_converted_account(converted_account):\r\n checksum = 0\r\n if \"?\" in converted_account:\r\n return str(converted_account) + \" ILL\"\r\n else:\r\n for i,j in zip(converted_account, range(1,10)):\r\n checksum += int(i) * j\r\n if (checksum % 11) == 0:\r\n return str(converted_account)\r\n else:\r\n return str(converted_account) + \" ERR\"", "def check_and_repair_right_format(self, digit):\n if digit in one_digit_number:\n digit = '0' + digit\n return digit", "def ean_check_digit(ean):\n return (10 - (sum((2 + (-1) ** x) * y for (x, y) in enumerate(ean,\n start=1)) % 10)) % 10", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def _validate_bank_operation_code_23B(self, val):\n return val", "def _validate_intermediary_institution_56D(self, val):\n return val", "def valid_checkdigit(ccnum):\n\n sum = 0\n num_digits = len(ccnum)\n oddeven = num_digits & 1\n\n for count in range(0, num_digits):\n digit = int(ccnum[count])\n\n if not ((count & 1) ^ oddeven):\n digit = digit * 2\n if digit > 9:\n digit = digit - 9\n\n sum = sum + digit\n\n return ((sum % 10) == 0)", "def is_istc(val):\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 16:\n return False\n sequence = [11, 9, 3, 1]\n try:\n r = sum([int(x, 16) * sequence[i % 4] for i, x in enumerate(val[:-1])])\n ck = hex(r % 16)[2:].upper()\n return ck == val[-1]\n except ValueError:\n return False", "def is_code_has_unknown_digit(processed_code):\n return True if list(processed_code).count(\"?\") == 0 else False", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def fn(c):\n ans = 0\n for k in range(1, 16): \n ans = min(ans, k*16+k, key=lambda x: abs(x - int(c, 16)))\n return hex(ans)[2:].zfill(2)", "def check_digit(tracking_number):\n check_digit = 10 - ((sum(itertools.starmap(operator.mul, zip(itertools.cycle((3, 1)), map(int, str(tracking_number))))) + 1) % 10)\n if check_digit == 10:\n check_digit = 0\n return check_digit", "def get_four_digit_code(self):\n return (\n self.subhead[\"New_Code\"].str[0:2] + \".\" + self.subhead[\"New_Code\"].str[2:4]\n )", "def luhn_checksum(num: str) -> str:\n check = 0\n for i, s in enumerate(reversed(num)):\n sx = int(s)\n if i % 2 == 0:\n sx *= 2\n if sx > 9:\n sx -= 9\n check += sx\n return str(check * 9 % 10)", "def isbn_10_check_digit(nine_digits):\r\n if len(nine_digits) != 9: return None\r\n try: int(nine_digits)\r\n except: return None\r\n remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11)\r\n if remainder == 0: tenth_digit = 0\r\n else: tenth_digit = 11 - remainder\r\n if tenth_digit == 10: tenth_digit = 'X'\r\n return str(tenth_digit)" ]
[ "0.6888133", "0.6631145", "0.6620832", "0.6535597", "0.6298015", "0.6270086", "0.6102347", "0.6022895", "0.6003835", "0.59928644", "0.5986576", "0.59091353", "0.58925956", "0.5891122", "0.5886155", "0.5840528", "0.58109224", "0.5803279", "0.5776782", "0.5743615", "0.5738659", "0.57323015", "0.5727749", "0.5703101", "0.56719273", "0.56675", "0.56660366", "0.56646174", "0.5654082", "0.56520844" ]
0.697806
0
Determine the ISO 6346 numeric code for a letter.
def letter_code(letter): value = ord(letter.lower()) - ord('a') + 10 return value + value // 11
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def alpha_number(alpha):\r\n if alpha.isupper() == False:\r\n num = ord(alpha) - 96\r\n return num\r\n elif alpha.isupper() == True:\r\n num = ord(alpha) - 64\r\n return num", "def code(char):\n return int(char) if char.isdigit() else letter_code(char)", "def letter_num(num: int):\n if abs(num) > 26 or num == 0:\n let = ord('a') + 26 - 1\n else:\n let = ord('a') + abs(num) - 1\n return chr(let)", "def get_alphabet(number):\n return chr(number + 96)", "def get_alphabet(number):\n return chr(number + 96)", "def convert_letter_to_int(letter):\n\treturn ord(letter.lower())-ord('a')", "def letter_to_index(letter):\r\n return ord(letter.lower()) - CHAR_A", "def letter_to_index(letter):\r\n return ord(letter.lower()) - CHAR_A", "def char_to_number(char):\n if not char.isalpha():\n return\n elif char.isupper():\n return (ord(char) - ord(\"A\"))\n else:\n return (ord(char) - ord(\"a\"))", "def _above128char_to_keycode(self, char: str) -> int:\n if ord(char) in self.HIGHER_ASCII:\n return self.HIGHER_ASCII[ord(char)]\n if char in self.HIGHER_ASCII:\n return self.HIGHER_ASCII[char]\n return 0", "def encode_identifier(alphabet, n):\r\n c = alphabet[n & 0b1111]\r\n n>>=4\r\n while n > 0:\r\n c = c + alphabet[n & 0b111111]\r\n n>>=6\r\n return c", "def get_letter(value, IC_type):\n\tif value > 10 or value < 0:\n\t\t# Invalid value (must be between 0 to 10)\n\t\treturn None\n\telif IC_type == 'S' or IC_type == 'T':\n\t\tindex_of_value = code_ST.index(value)\n\t\treturn code_ST[index_of_value + 1] # Letter is always after number.\n\telif IC_type == 'F' or IC_type == 'G':\n\t\tindex_of_value = code_FG.index(value)\n\t\treturn code_FG[index_of_value + 1]\n\telse:\n\t\t# IC_type is invalid\n\t\treturn None", "def CODE(string):\n return ord(string[0])", "def country_letter_code(self):\n if \"countryLetterCode\" in self._prop_dict:\n return self._prop_dict[\"countryLetterCode\"]\n else:\n return None", "def base26(w):\n val = 0\n for ch in w.lower():\n next_digit = ord(ch) - ord('a')\n val = 26*val + next_digit\n return val", "def getFENtileLetter(fen,letter,number):\n l2i = lambda l: ord(l)-ord('A') # letter to index\n piece_letter = fen[(8-number)*8+(8-number) + l2i(letter)]\n return ' KQRBNPkqrbnp'.find(piece_letter)", "def get_ordinal(inChar):\n if inChar in SPECIAL_CHARS:\n index = SPECIAL_CHARS.index(inChar) + 38\n elif ord(inChar) in range (ord('a'), ord('z')):\n index = ord(inChar) - ord('a')\n elif inChar == ' ':\n index = 26\n elif (int(inChar) >= 0 & int(inChar) <= 9):\n index = int(inChar)+27\n else:\n print(inChar + \"** Unsupported **\")\n index = 27\n return index", "def _hexchar(c):\n if c == '1': return 1\n if c == '2': return 2\n if c == '3': return 3\n if c == '4': return 4\n if c == '5': return 5\n if c == '6': return 6\n if c == '7': return 7\n if c == '8': return 8\n if c == '9': return 9\n if c == 'A' or c == 'a': return 10\n if c == 'B' or c == 'b': return 11\n if c == 'C' or c == 'c': return 12\n if c == 'D' or c == 'd': return 13\n if c == 'E' or c == 'e': return 14\n if c == 'F' or c == 'f': return 15\n return 0", "def ord(char: str) -> int:\n if len(char) > 1:\n raise ValueError(\"Expected a single character\")\n if char[0] not in Base64._CHARSET:\n raise ValueError(\"Invalid char value\")\n\n if \"a\" <= char <= \"z\":\n return ord(char) - ord(\"a\")\n\n if \"A\" <= char <= \"Z\":\n return ord(char) - ord(\"A\") + 26\n\n if \"0\" <= char <= \"9\":\n return ord(char) - ord(\"0\") + 52\n\n if char == \"-\":\n return 62\n\n return 63 # char is equal to '_'", "def prefer_alphabet(i):\n if 0 <= i <= 25:\n return chr(i + 65)\n if 26 <= i <= 51:\n return chr(i + 97 - 26)\n return str(i)", "def random_alpha_num_char():\n num = random.randint(0, 26 + 26 + 10)\n if num < 26:\n return chr(num + 65)\n num -= 26\n if num < 26:\n return chr(num + 97)\n return chr(num + 48)", "def letter2num(letters, zbase=True):\n\n letters = letters.upper()\n res = 0\n weight = len(letters) - 1\n for i, ch in enumerate(letters):\n res += (ord(ch) - 64) * 26 ** (weight - i)\n if not zbase:\n return res\n return res - 1", "def letter_to_num(self, string, dict_):\n #dict_= {'A': '0', 'C': '1', 'D': '2', 'E': '3', 'F': '4', 'G': '5', 'H': '6', 'I': '7', 'K': '8', 'L': '9', 'M': '10', 'N': '11', 'P': '12', 'Q': '13', 'R': '14', 'S': '15', 'T': '16', 'V': '17', 'W': '18', 'Y': '19'}\n patt = re.compile('[' + ''.join(dict_.keys()) + ']')\n num_string = patt.sub(lambda m: dict_[m.group(0)] + ' ', string)\n #print(num_string)\n #print(type(num_string))\n num = [int(i) for i in num_string.split()]\n return num", "def convert_char(char):\n if char == 'F' or char == 'L':\n return 0\n \n return 1", "def getCode1Letter(self):\n dataDict = self.__dict__\n cc = self.stdChemComp\n if cc is None:\n result = None\n else:\n result = cc.code1Letter\n return result", "def return_index(character: str) -> int:\n if character.islower():\n return ord(character) - ord(\"a\")\n else:\n return ord(character) - ord(\"A\")", "def ordChar(self, char):\n char = char.upper()\n num = ord(char) - 65\n return num", "def GetCharByNum(self, inputNum):\n # ROTOR USE ONLY\n for char, num in self.alphabet.items():\n if num == inputNum: \n return char", "def letter_to_base40(letter):\n letters = {'C': 3, 'D': 9, 'E': 15, 'F': 20, 'G': 26, 'A': 32, 'B': 38}\n if letter not in letters.keys():\n raise ValueError('invalid letter \\'{}\\''.format(letter))\n return letters[letter]", "def GetNumByChar(self, inputChar):\n # ROTOR USE ONLY\n for char, num in self.alphabet.items():\n if char == inputChar:\n return num" ]
[ "0.7418329", "0.7332461", "0.6975198", "0.6789075", "0.6789075", "0.67152506", "0.65995985", "0.65995985", "0.65719616", "0.6529032", "0.64173263", "0.6383725", "0.63769406", "0.63268995", "0.6257811", "0.62500453", "0.62329525", "0.6205183", "0.61922914", "0.6171451", "0.6063202", "0.6057931", "0.6057364", "0.60467124", "0.60017097", "0.5996392", "0.5989963", "0.5955221", "0.5938658", "0.5926492" ]
0.73669213
1
Insert item into dynamic vp tree by first adding to pool, and then building a tree from the pool if min size reached Then merge trees of equal sizes so that there are at most log(log (n)) trees, with the largest tree having roughly n/2 nodes
def insert(self, item): self.pool.append(item) if len(self.pool) == self.min_tree_size: self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn)) self.pool = [] while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size: a = self.trees.pop() b = self.trees.pop() self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bst_insert(sizes):\n tree = rbTree_main.BinarySearchTree();\n for i in range(sizes):\n tree.insert(random.random())", "def rbt_insert(sizes):\n tree = rbTree_main.RBTree();\n for i in range(sizes):\n tree.rb_insert(random.random());\n pass", "def _insort(self, node):\n lo = 0\n hi = len(self._pool)\n while lo < hi:\n mid = (lo+hi)//2\n if node.getFScore() < self._pool[mid].getFScore(): hi = mid\n else: lo = mid + 1\n self._pool.insert(lo, node)", "def expand_tree(self, N=1):\n # type: (int) -> None\n assert self._initialized, 'Search not initialized.'\n for _ in range(N): \n x_rand = self.sample_free()\n x_nearest = self.nearest(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n if self.coll_free(x_nearest, x_new):\n self.index+=1\n X_near = [x for x in self.near(x_new) if self.coll_free(x, x_new)]\n cost_min = self.costs[self.research_index(self.nodes,x_nearest)][1] + self.dist(x_nearest, x_new)\n x_min = x_nearest\n for x in X_near:\n cost = self.costs[self.research_index(self.nodes,x)][1] + self.dist(x, x_new)\n if cost < cost_min:\n cost_min = cost\n x_min = x\n \n self.nodes.append(x_new)\n j=self.research_index(self.nodes,x_min)\n self.parents[self.index,j]=1\n self.costs[self.index] = (x_new,self.costs[j][1] + self.dist(x_min, x_new))\n for x in X_near:\n k=self.research_index(self.nodes,x)\n if self.costs[self.index][1] + self.dist(x_new, x) < self.costs[k][1]:\n self.parents[self.index]=np.zeros(self.N)\n self.parents[self.index,k] = 1\n self.costs[k] = (self.costs[k][0],self.costs[self.index][1] + self.dist(x_new, x))", "def nocache_create_equal_size_subtrees():\n N = len(self)\n subTrees = [set(range(i, N, numSubTrees)) for i in range(numSubTrees)]\n totalCost = N\n return subTrees, totalCost", "def insert(self, element):\n if self.size >= self.maxsize : \n return\n self.size+= 1\n self.Heap[self.size] = element \n \n current = self.size \n \n while self.Heap[current] < self.Heap[self.parent(current)]: \n self.swap(current, self.parent(current)) \n current = self.parent(current)", "def greedy_variable_order(primal_graph:PrimalGraph, pvo:List[List[int]]=None, pool_size=8, cutoff=INF):\n def fill_count(nid):\n \"\"\"\n count number of fill-in edges after removing nid\n number of combinations of nhd - existing edges (nodes in the subgraph of nhd)\n \"\"\"\n n_edges = G.subgraph(G.neighbors(nid)).number_of_edges()\n deg = G.degree[nid]\n n_fill = deg*(deg-1)//2 - n_edges\n return n_fill\n\n def remove_fill_in_edges(nid):\n G.add_edges_from(itertools.combinations(G.neighbors(nid), 2)) # adding edge twice? no effect\n G.remove_node(nid)\n\n G = primal_graph.copy() # G = copy.deepcopy(primal_graph)\n if pvo is None:\n pvo = [list(G.nodes())] #[ [all in one block] ]\n ordering = []\n induced_width = 0\n for each_block in pvo:\n processing_nodes = SortedList( [(fill_count(nid), nid) for nid in each_block] ) # ascending order\n while processing_nodes:\n fill, selected_nid = processing_nodes[0]\n if fill != 0: # don't add any edge\n # pick a node in random from a pool of best nodes; each node has prob 1/(fill_in edges)\n scores, candidates = zip(*processing_nodes[:pool_size])\n probs = np.power(np.array(scores), -1.0)\n selected_ind = np.random.choice(len(probs), p=probs/(np.sum(probs)))\n selected_nid = candidates[selected_ind]\n ordering.append(selected_nid)\n # current_width = len(G.neighbors(selected_nid))\n current_width = G.degree[selected_nid]\n if current_width > cutoff:\n return None, induced_width\n if current_width > induced_width:\n induced_width = current_width\n remove_fill_in_edges(selected_nid)\n # recompute score after removing the selected node from primal graph\n processing_nodes = SortedList( [(fill_count(nid), nid) for _, nid in processing_nodes if nid != selected_nid] )\n return ordering, induced_width", "def _insert(self, key):\n if self.min > key:\n self.min = key\n if self.max < key:\n self.max = key\n if key == self.key:\n return self\n self.size += 1\n if key < self.key:\n if self.left is None:\n self.left = self._create_new(key)\n self.left.parent = self\n return self\n self.left = self.left._insert(key)\n else:\n if self.right is None:\n self.right = self._create_new(key)\n self.right.parent = self\n return self\n self.right = self.right._insert(key)\n return self", "def test_insert_increases_size(sample_priorityq):\n assert len(sample_priorityq.heap_list) == 0\n sample_priorityq.insert([5, 1])\n assert len(sample_priorityq.heap_list) == 1\n sample_priorityq.insert([6, 2])\n assert len(sample_priorityq.heap_list) == 2", "def _insort(self, node):\n lo = 0\n hi = len(self._pool)\n f_score = node.get_f_score()\n while lo < hi:\n mid = (lo+hi)//2\n if f_score < self._pool[mid].get_f_score(): hi = mid\n else: lo = mid + 1\n self._pool.insert(lo, node)", "def add_node(p, nodes, retval, size=0):\n if p.id not in nodes:\n nodes[p.id] = len(nodes)\n retval[\"nodes\"].append({\"id\": str(p.id), \"title\": p.title, \"size\": size})", "def capacity_enlarge(self, k):\n count = 0\n idx = self.capacity - 1\n while count < k:\n left = self.tree[idx]\n right = priorityNode(0, None)\n insert_pos = self.tree.shape[0]\n self.tree = np.insert(self.tree, insert_pos, [left,right])\n idx += 1\n count += 1\n\n self.last_capacity = self.capacity # mark down the last capacity for adding operation\n self.capacity += k # Update the value of capacity", "def __init__(self, size):\n\n self._root = Node()\n size_left = int(size/2)\n # Initialization of the tree\n self._root.left = self._createSubtree(self._root, 0, size_left) # [a,b[\n self._root.right = self._createSubtree(self._root, size_left, size)\n self._max_priority = 1", "def resize(self):\n load_factor = self._get_load_factor()\n if load_factor < 0.2:\n self.capacity //= 2\n elif load_factor > 0.7:\n self.capacity *= 2\n else:\n print(f'Resizing unnecessary due to a load factor of {load_factor}:.2f')\n return\n temp_storage = [None] * self.capacity\n for i in range(len(self.storage)):\n node = self.storage[i]\n while node is not None:\n index = self._hash_mod(node.key)\n node_to_add = temp_storage[index]\n if node_to_add is None:\n temp_storage[index] = LinkedPair(node.key, node.value)\n else:\n while node_to_add is not None:\n if node_to_add.next is None:\n node_to_add.next = LinkedPair(node.key, node.value)\n break\n node_to_add = node_to_add.next\n node = node.next\n self.storage = temp_storage", "def insert(self, k): \r\n self.heap_array.append(k)\r\n\r\n current_index = len(self.heap_array) - 1\r\n while (current_index > 0):\r\n parent_index = ((current_index-1)//2)\r\n\r\n if int(self.heap_array[current_index]) > int(self.heap_array[parent_index]): # if no vialation of the min heap property \r\n return\r\n else: # if heap property is broken then swap the parent and child that are breaking the prop \r\n self.heap_array[parent_index], self.heap_array[current_index] = self.heap_array[current_index], self.heap_array[parent_index]\r\n current_index = parent_index", "def allocate(self):\n index = 0\n if self.bool_array[index] == True:\n raise CannotAllocateException(\"No ids available\")\n while index < max_val:\n left_child_index = 2 * index + 1\n right_child_index = 2 * index + 2\n if self.bool_array[left_child_index] == False: #There's an unallocated id in the subtree\n index = left_child_index\n elif self.bool_array[right_child_index] == False: #... in the right subtree\n index = right_child_index\n else: #Both subtrees are allocated, this actually means you broke your tree\n raise CannotAllocateException(\"No ids available\")\n id = self.get_index_from_id(index)\n self.update_tree(id)", "def __init__(self, capacity):\n assert isinstance(capacity, int)\n if capacity <= 0:\n raise ValueError(\n 'Sum tree capacity should be positive. Got: {}'.format(capacity))\n\n self.nodes = []\n self.depth = int(np.ceil(np.log2(capacity)))\n self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx\n self.high_idx = capacity + self.low_idx\n self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision.\n self.capacity = capacity\n\n self.highest_set = 0\n\n self.max_recorded_priority = 1.0", "def compute_pool(in_size):\n return (in_size - 2) // 2 + 1", "def insert(self, item):\n # Handle the case where the tree is empty\n if self.is_empty():\n # if self.root is None:\n # TODO: Create a new root node\n self.root = ...\n # TODO: Increase the tree size\n self.size ...\n return\n # Find the parent node of where the given item should be inserted\n parent = self._find_parent_node(item)\n # TODO: Check if the given item should be inserted left of the parent node\n if ...:\n # TODO: Create a new node and set the parent's left child\n parent.left = ...\n # TODO: Check if the given item should be inserted right of the parent node\n elif ...:\n # TODO: Create a new node and set the parent's right child\n parent.right = ...\n # TODO: Increase the tree size\n self.size ...", "def insert(pq):\n\ti = r.randint(0, bound-1)\n\tpq.put(i)\n\tlogging.info(\"insert %s\", i)", "def push(self, node):\n try:\n self._load(True)\n\n # Stow the new node at our head and increment it\n self.db[self.head] = node\n self.head = self.head + 1\n if self.head >= self.size:\n self.head -= self.size\n self.db['head'] = self.head\n\n # If we haven't just also pushed out an old item,\n # increment the count of items in our db.\n if self.count < self.size:\n self.count += 1\n self.db['count'] = self.count\n except KeyError:\n # HACK\n self.clear()", "def insert(self, item):\n index = self.insert_at_next_index(item)\n self.items[index] = item\n while index > 1:\n parent_index = index / 2 # Truncate, e.g. 4 and 5 have parent 2.\n if self.is_heap_order(self.items[parent_index], self.items[index]):\n # The item does not need to bubble up anymore. Done.\n return\n else:\n # Swap items at index and parent_index\n temp = self.items[index]\n self.items[index] = self.items[parent_index]\n self.items[parent_index] = temp\n index = parent_index\n # The item bubbled all the way to the root. Done.\n return", "def test_insert_will_not_duplicate_value(bst_balanced):\n bst_balanced.insert(6)\n assert bst_balanced.size() == 6", "def insert(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n if self.contains(val):\n raise ValueError('Node already in tree.')\n new_node = Node(val)\n if self._size == 0:\n self._root = new_node\n self._max_depth = 1\n self._rbal = 1\n self._lbal = 1\n else:\n current_depth = 1\n current_node = self._root\n while val is not current_node._data:\n current_depth += 1\n if val < current_node._data:\n if current_node._lkid:\n current_node = current_node._lkid\n else:\n current_node._lkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n elif val > current_node._data:\n if current_node._rkid:\n current_node = current_node._rkid\n else:\n current_node._rkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n self._size += 1", "def insert(self, p, elem):\n node = self._validate(p)\n new_node = self._Node(elem, idx=self._curr_idx, parent=node._parent)\n self._curr_idx += 1\n node._parent = new_node\n new_node._children.append(node)\n self._size += 1\n\n # Invalidate depths and heights after modifying the tree.\n self._depths, self._heights = None, None\n return self._make_position(new_node)", "def ExpandTopInto(src_queue, trg_queue, cached_states, min_bound=1.0):\n _, best_state = src_queue[0]\n # Produce more candidate items.\n new_states = best_state.ProduceNewStates()\n for new_state in new_states:\n if new_state.state_id not in cached_states:\n score = new_state.score * min_bound\n heapq.heappush(trg_queue, (score, new_state))\n cached_states.add(new_state.state_id)", "def union(self, p, q):\n root_p = self.find(p)\n root_q = self.find(q)\n if root_p == root_q:\n return\n \n #if the sites were isolated make them un-isolated\n if self._nodes[p] == 1:\n self._nodes[p] = 0\n self._iso -= 1\n if self._nodes[q] == 1:\n self._nodes[q] = 0\n self._iso -= 1\n\n # make root of smaller rank point to root of larger rank\n if self._rank[root_p] < self._rank[root_q]:\n self._parent[root_p] = root_q\n #add the small root size to the big root size\n self._size[root_q] += self._size[root_p]\n #check if the big root size is now the biggest\n if self._size[root_q] > self._max:\n self._max = self._size[root_q]\n elif self._rank[root_p] > self._rank[root_q]:\n self._parent[root_q] = root_p\n self._size[root_p] += self._size[root_q]\n if self._size[root_p] > self._max:\n self._max = self._size[root_p]\n else:\n self._parent[root_q] = root_p\n self._size[root_p] += self._size[root_q]\n if self._size[root_p] > self._max:\n self._max = self._size[root_p]\n self._rank[root_p] += 1\n\n self._count -= 1", "def push(self, value):\n idx = self.__capacity - 1 + self.__size\n self.__tree[idx] = value\n self.__update(idx)\n self.__size += 1", "def update(self, probs: torch.Tensor):\n tree, capacity = self._create_tree(probs, self.tree)\n self.tree = tree\n self.capacity = capacity", "def _place_nodes(self, i, j, step, max_nodes):\n points = []\n for k in range(max_nodes):\n while(True):\n t = Point(random.randint(i,i+step), random.randint(j,j+step)) \n if all([point.get_distance(t) > self.min_distance for point in points]):\n points.append(t)\n break\n \n for point in points:\n n=Node(self.counter, point)\n self.nodes.append(n)\n self.counter+=1" ]
[ "0.65738744", "0.59414244", "0.58873856", "0.5721595", "0.5673607", "0.5613784", "0.5607082", "0.5599457", "0.5549631", "0.54778767", "0.5471518", "0.53812885", "0.5376035", "0.53730154", "0.53416336", "0.5339837", "0.5338522", "0.5326516", "0.5284125", "0.52726436", "0.5268878", "0.52648264", "0.52543294", "0.52194184", "0.5195453", "0.51873296", "0.5184968", "0.51747066", "0.51698154", "0.51681185" ]
0.7945118
0
Return node nearest to query by finding nearest node in each tree and returning the global minimum (including nodes in pool)
def nearest(self, query): nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees)) distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool)) best = None best_cost = np.inf for cost, near in nearest_trees + distances_pool: if cost <= best_cost: best = near best_cost = cost return best
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def min_distance(distance, spt_set, self_nodes):\n minimum = sys.maxsize\n minimum_node = None\n for curr_node in self_nodes.values():\n if distance[curr_node.id] < minimum and not spt_set[curr_node.id]:\n minimum = distance[curr_node.id]\n minimum_node = curr_node\n return minimum_node", "def nearest(self, value):\n coords = value[:2] # value only has 2 coords (x, y) right now, but it may have theta in the future\n hits = self.idx.nearest(self.make_bounding_box(coords), 1, objects=False)\n for hit in hits:\n # take the first index in the event of any ties\n return self.nodes[hit]\n \n \n \n #assert that value is valid here\n \"\"\"def recur(node, depth=0):\n closest, distance = node, self.cost(node.value, value)\n if depth < self.max_size:\n for child in node.children:\n (child_closest, child_distance) = recur(child, depth+1)\n if child_distance < distance:\n closest = child_closest\n distance = child_distance \n return closest, distance\n return recur(self.root)[0]\"\"\"", "def find_min(self):\n return min(self.nodes, key=int)", "def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def getMinNode(self):\n currentNode = self.openList[0]\n for node in self.openList:\n if node.g + node.h < currentNode.g + currentNode.h:\n currentNode = node\n return currentNode", "def find_min(self) -> TreeNode:\n node = self.root\n while True:\n if not node.left:\n return node\n node = node.left", "def nodeAtMinimumDistance(self, notFoundYet, distances):\n # found minimal\n minimal = None\n for node in notFoundYet:\n if (distances[node] >= 0): \n if minimal == None or (distances[minimal] > distances[node]):\n minimal = node\n\n # return\n if minimal == -1: return None\n else: return minimal", "def get_nearest_node(self, nodes, pick):\n\n dl = [distance(node.pos[:2], pick) for node in nodes]\n \n return nodes[dl.index(min(dl))]", "def nearest(node):\n count = 0\n distance = 100000\n while count != node_count[0]:\n city = d_list[node.value - 1]\n if city != []:\n if city[0][1] < distance:\n distance = city[0][1]\n new_city = city[0][0]\n closest_city = node.value\n node = node.left\n count = count + 1\n return (closest_city, new_city, distance)", "def find_smallest(self):\n return self._find_smallest(self.root)", "def _find_lowest_cost_node(self) -> str:\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node in self.costs:\n cost = self.costs[node]\n if cost < lowest_cost and node not in self.closed_nodes:\n lowest_cost = cost\n lowest_cost_node = node\n return lowest_cost_node", "def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current", "def find_min(self):\n\n if self.left:\n return self.left.find_min()\n\n return self.data", "def get_closest_node(data, loc):\n min_dist = None\n closest = None\n for i in data:\n # Standard min-value search loop\n dist = great_circle_distance(get_coords(data, i), loc)\n if closest is None or dist < min_dist:\n closest = i\n min_dist = dist\n return closest", "def find_min_node(self):\n min_energy = 10 ** 10\n min_id = -1\n for node in self.node:\n if node.energy < min_energy:\n min_energy = node.energy\n min_id = node.id\n return min_id", "def get_min_distance(distances, unvisited_nodes):\n min_value = None\n node = None\n for city, distance in distances.items():\n if city not in unvisited_nodes:\n continue\n if min_value is None:\n node = city\n min_value = distance\n elif distance < min_value:\n node = city\n min_value = distance\n return node", "def _get_nearest_neighbor(self, sample):\n d_min=float('inf') #minimum distance\n node_neighbor=self.start\n\n for iter in self.start:\n d=0 #distance between sample and each node in the node tree\n for j in range(sample.size):\n d+=(iter.state[j]-sample[j])**2\n if(d<d_min):\n d_min=d\n node_neighbor=iter\n\n return node_neighbor", "def closest_node(node, nodes):\n nodes = np.asarray(nodes)\n deltas = nodes - node\n dist_2 = np.einsum(\"ij,ij->i\", deltas, deltas)\n return np.argmin(dist_2), np.min(dist_2)", "def prim_solve(self):\n\n\t\tmin_span_tree = Graph([self.graph.vertices[0]], [])\n\t\tdup_graph = self.graph.duplicate()\n\n\t\tfor i in range(len(self.graph.vertices) - 1):\n\t\t\tneighbour_edges = []\n\t\t\tfor cur in min_span_tree.vertices:\n\t\t\t\tneighbour_edges += dup_graph.get_neighbour_edges(cur)\n\n\t\t\tneighbour_edges.sort(key=lambda x: x[2])\n\t\t\tshortest_edge = neighbour_edges[0]\n\t\t\tnew_node = shortest_edge[0] if shortest_edge[1] in min_span_tree.vertices else shortest_edge[1]\n\n\t\t\tmin_span_tree.edges.append(shortest_edge)\n\t\t\tmin_span_tree.vertices.append(new_node)\n\t\t\tdup_graph.edges.remove(shortest_edge)\n\n\t\treturn min_span_tree", "def min_avail_match_index(sorted_nodes: list[Node], query_node: Node) -> int:\n low = 0\n high = len(sorted_nodes) - 1\n guess = (low + high) // 2\n while True:\n if sorted_nodes[guess].avail == query_node.used:\n return guess\n\n avail_too_low = sorted_nodes[guess].avail < query_node.used\n if low == high:\n if avail_too_low:\n return -1\n else:\n return guess\n else:\n if avail_too_low:\n low = guess + 1\n else:\n high = guess\n guess = (low + high) // 2", "def _find_smallest(node):\n if node.left:\n return BinarySearchTree._find_smallest(node.left)\n else:\n return node", "def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)", "def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)\n f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])\n return min(f_cost_open, key=f_cost_open.get)", "def getNearestNode(self, point):\n nodes = list(mm.nodeidx.nearest((point.getPoint().x, point.getPoint().y)))\n return self.node_counter__node.get(nodes[0])", "def get_nearest_node_index(node_list, random_node):\n\n dist_list = [\n (node.x - random_node.x) ** 2 + (node.y - random_node.y) ** 2\n for node in node_list\n ]\n minind = dist_list.index(min(dist_list))\n\n return minind", "def _update_min(self):\n tmp = self\n while tmp.left is not None:\n tmp = tmp.left\n return tmp.parent.key", "def min_neighbor_node(g):\r\n return min(g.degree_iter(),key = lambda item:item[1])[0]" ]
[ "0.73361427", "0.73361427", "0.73157585", "0.72876966", "0.7200594", "0.71982765", "0.7060152", "0.70410466", "0.69066334", "0.6884978", "0.68516064", "0.6803381", "0.67801213", "0.6773021", "0.67226744", "0.6675192", "0.6634299", "0.66336787", "0.66082364", "0.65941596", "0.65608114", "0.6542622", "0.6522033", "0.6511728", "0.64980567", "0.64980567", "0.6495254", "0.64136046", "0.6380218", "0.63776535" ]
0.80384624
0
Return all nodes within distance radius of the given query, by collating neighbourhoods for each internal tree (and pool)
def neighbourhood(self, query, radius): tree_neighbourhood = lambda tree: list(map(lambda x: x[1], tree.get_all_in_range(query, radius))) neighbourhood_trees = list(itertools.chain.from_iterable(map(tree_neighbourhood, self.trees))) return neighbourhood_trees + list(filter(lambda x: self.dist_fn(x, query) < radius, self.pool))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours", "def nearest(self, query):\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in nearest_trees + distances_pool:\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best", "def get_n_nearest_neighbors(self, query, n_neighbors):\n if not isinstance(n_neighbors, int) or n_neighbors < 1:\n raise ValueError('n_neighbors must be strictly positive integer')\n neighbors = vptree._AutoSortingList(max_size=n_neighbors)\n nodes_to_visit = [(self, 0)]\n furthest_d = np.inf\n while len(nodes_to_visit) > 0:\n node, d0 = nodes_to_visit.pop(0)\n if node is None or d0 > furthest_d:\n continue\n d = self.dist_fn(query, node.vp)\n if d <= furthest_d: #Replaced < with <=\n neighbors.append((d, node.vp))\n furthest_d, _ = neighbors[-1]\n if node._is_leaf():\n continue\n if node.left_min <= d <= node.left_max:\n nodes_to_visit.insert(0, (node.left, 0))\n elif node.left_min - furthest_d <= d <= node.left_max + furthest_d:\n nodes_to_visit.append((node.left,\n node.left_min - d if d < node.left_min\n else d - node.left_max))\n if node.right_min <= d <= node.right_max:\n nodes_to_visit.insert(0, (node.right, 0))\n elif node.right_min - furthest_d <= d <= node.right_max + furthest_d:\n nodes_to_visit.append((node.right,\n node.right_min - d if d < node.right_min\n else d - node.right_max))\n if len(neighbors) == 0:\n neighbors = [(np.nan, point) for point in self.points[:n_neighbors]] #Return any point(s) if query contains np.nan\n return list(neighbors)", "def _get_node_neighbors(\n self, node: Tuple[int, int], radius: int = 1\n ) -> List[Tuple[int, int]]:\n row_range = range(\n max(node[0] - radius, 0),\n min(node[0] + radius, self.n_rows - 1) + 1,\n )\n column_range = range(\n max(node[1] - radius, 0),\n min(node[1] + radius, self.n_columns - 1) + 1,\n )\n return list(itertools.product(row_range, column_range))", "def get_nodes(self, latlon=False):\n ids = np.where(np.isnan(self.data[:,:,:]))\n i_nan = ids[0][0] ; j_nan = ids[1][0]\n \n def area_neighbours(Area, i_nan, j_nan):\n rows = np.array(Area)[:,0]\n cols = np.array(Area)[:,1]\n rows_m = rows-1\n cols_m = cols-1\n rows_p = rows+1\n cols_p = cols+1\n \n p1 = np.array([rows_m,cols]).ravel().reshape(len(rows),2,order='F')\n p2 = np.array([rows_p,cols]).ravel().reshape(len(rows),2,order='F')\n p3 = np.array([rows,cols_m]).ravel().reshape(len(rows),2,order='F')\n p4 = np.array([rows,cols_p]).ravel().reshape(len(rows),2,order='F')\n cond1 = p1[:,0]<0\n cond2 = p2[:,0]>self.dimX-1\n cond3 = p3[:,1]<0\n cond4 = p4[:,1]>self.dimY-1\n if latlon:\n p3[:,1][cond3] = self.dimY-1\n p4[:,1][cond4] = 0\n else:\n p3[:,0][cond3] = i_nan\n p3[:,1][cond3] = j_nan\n p4[:,0][cond4] = i_nan\n p4[:,1][cond4] = j_nan\n p1[:,0][cond1] = i_nan\n p1[:,1][cond1] = j_nan\n p2[:,0][cond2] = i_nan\n p2[:,1][cond2] = j_nan\n p = np.concatenate((p1,p2,p3,p4)).tolist()\n return [i for i in p if i not in self.unavail]\n\n def area_max_correlation(Area, neighbours):\n Rmean = [] ; X = []\n for cell in neighbours:\n R = []\n new_cell = cell[0]*self.dimY + cell[1]\n if new_cell in self.gridcells:\n X.append(cell)\n IDm = np.where(self.gridcells==new_cell)\n Rmean.append(np.nanmean(self.corrs[cells_in_k,IDm]))\n try:\n Rmax = np.nanmax(Rmean)\n except ValueError:\n Rmax = np.nan\n return np.array(X), Rmean, Rmax\n \n def diag_indices(a, k):\n rows, cols = np.diag_indices_from(a)\n if k < 0:\n return rows[-k:], cols[:k]\n elif k > 0:\n return rows[:-k], cols[k:]\n else:\n return rows, cols\n\n #S T E P 1 (C R E A T E N O D E S)\n\n self.nodes = {}\n self.unavail = []\n if latlon:\n neighbour_corrs1 = self.corrs.diagonal(offset=1)\n neighbour_corrs2 = self.corrs.diagonal(offset=self.dimY-1)\n subset = np.arange(0,len(neighbour_corrs2),self.dimY)\n neighbour_corrs2 = neighbour_corrs2[subset]\n neighbour_corrs = np.concatenate((neighbour_corrs1,neighbour_corrs2))\n\n cellIDs1 = diag_indices(self.corrs,1)\n cellIDs2 = diag_indices(self.corrs,self.dimY-1)\n\n cellIDs = (np.concatenate((cellIDs1[0],cellIDs2[0][subset])),\\\n np.concatenate((cellIDs1[1],cellIDs2[1][subset])))\n else:\n neighbour_corrs = self.corrs.diagonal(offset=1)\n cellIDs = diag_indices(self.corrs,1)\n \n cellIDs = (self.gridcells[cellIDs[0]],self.gridcells[cellIDs[1]])\n k = 0\n neighbour_corrs,cellIDs1,cellIDs2 = list(zip(*sorted(zip(neighbour_corrs,cellIDs[0],cellIDs[1]),reverse=True)))\n cell_IDs = (cellIDs1,cellIDs2)\n np.random.seed(2)\n for it in range(len(neighbour_corrs)):\n cells_in_k = []\n i = cell_IDs[0][it]\n j = cell_IDs[1][it]\n r = neighbour_corrs[it]\n \n row_i = int(np.floor(i/self.dimY)) ; col_i = int(i % self.dimY)\n row_j = int(np.floor(j/self.dimY)) ; col_j = int(j % self.dimY)\n \n if ([row_i,col_i] not in self.unavail) & ([row_j,col_j] not in self.unavail):\n if r>self.tau:\n self.nodes.setdefault(k, []).append([row_i,col_i])\n self.nodes.setdefault(k, []).append([row_j,col_j])\n self.unavail.append([row_i,col_i])\n self.unavail.append([row_j,col_j])\n cells_in_k.extend(np.where(self.gridcells==i)[0])\n cells_in_k.extend(np.where(self.gridcells==j)[0])\n\n while True: #expand\n neighbours = area_neighbours(self.nodes[k], i_nan, j_nan)\n X, Rmean, Rmax = area_max_correlation(Area=self.nodes[k], neighbours=neighbours)\n if Rmax > self.tau:\n m = X[Rmean==Rmax].tolist()\n if len(m)>1:\n m = m[np.random.randint(low=0,high=len(m))]\n else:\n m = m[0]\n self.nodes.setdefault(k, []).append(m)\n self.unavail.append(m)\n cells_in_k.extend(np.where(self.gridcells==m[0]*self.dimY+m[1])[0])\n else:\n break\n if len(self.nodes[k]) <= 2:\n del self.nodes[k]\n k += 1\n else:\n break\n \n #S T E P 2 (M E R G E N O D E S)\n \n self.unavail = []\n while True:\n Rs = {}\n unavail_neighbours = {}\n num_cells = dict([(area,len(self.nodes[area])) if self.nodes[area] not in self.unavail else (area,np.inf) for area in self.nodes.keys()])\n maxID = min(num_cells.items(), key=operator.itemgetter(1))[0]\n if num_cells[maxID] > 175: #arbitrary choice?\n break\n else:\n cells_in_k = [np.where(self.gridcells==cell[0]*self.dimY+cell[1])[0] for cell in self.nodes[maxID]]\n neighbours = area_neighbours(self.nodes[maxID], i_nan, j_nan)\n for cell in neighbours:\n gcell = cell[0]*self.dimY + cell[1]\n Rmean = []\n cond1 = gcell in self.gridcells\n cond2 = cell not in self.nodes[maxID]\n cond3 = cell not in [k for k, g in itertools.groupby(sorted(itertools.chain(*unavail_neighbours.values())))]\n cond4 = len([area for area, cells in self.nodes.items() if cell in cells]) > 0\n if (cond1) & (cond2) & (cond3) & (cond4):\n nID = [area for area, cells in self.nodes.items() if cell in cells][0]\n unavail_neighbours[nID] = self.nodes[nID]\n X, Rmean, Rmax = area_max_correlation(Area=self.nodes[nID]+self.nodes[maxID], neighbours=self.nodes[nID]+self.nodes[maxID])\n if nID not in Rs: \n Rs[nID] = np.nanmean(Rmean)\n try:\n Rs_maxID = max(Rs.items(), key=operator.itemgetter(1))[0]\n if Rs[Rs_maxID] > self.tau:\n for cell in self.nodes.pop(Rs_maxID, None):\n self.nodes.setdefault(maxID, []).append([cell[0],cell[1]])\n else:\n self.unavail.append(self.nodes[maxID])\n except ValueError:\n self.unavail.append(self.nodes[maxID])", "def _tree_query_radius_parallel_helper(tree, *args, **kwargs):\n return tree.query_radius(*args, **kwargs)", "def test_get_neighborhood_radius_consistent():\r\n grid_spacing = random.uniform(1e-6, 10.0)\r\n center = numpy.random.random(random.randint(1, 3))\r\n\r\n # Find points with radius neighborhood\r\n radius = random.uniform(_distance_to_nearest(grid_spacing, center), grid_spacing*5)\r\n points = ill.get_neighborhood_radius(grid_spacing, center, radius)\r\n\r\n # Every points found within this radius, should be in the points of a larger radius\r\n outer_points = ill.get_neighborhood_radius(grid_spacing, center,\r\n radius+random.uniform(0.0, grid_spacing*5))\r\n\r\n for point in points:\r\n assert point in outer_points", "def expand2(self, network, radius):\n expansion = []\n neighbors = frozenset().union(*[set(network.getrow(g).indices) for g in self.genes]) \n for neighb in neighbors: \n if neighb in self.genes:\n continue\n dist_from_pattern = self.dist[network.getrow(neighb).indices] \n dist_of_added = dist_from_pattern[dist_from_pattern > -1].min() + 1\n if dist_of_added > radius:\n continue\n \n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n #next_pattern.edges.add((pred, neighb))\n next_pattern.added = neighb \n next_pattern.dist[neighb] = dist_of_added\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]", "def query(self, query: str) -> List[str]:\n nodes_to_explore: Deque[Node] = deque()\n nodes_to_explore.append(self.root)\n\n out: List[str] = []\n while nodes_to_explore:\n current = nodes_to_explore.popleft()\n total_kmers_found = 0\n total_kmers = 0\n for kmer in kmers_in_string(query, self.k):\n if current.filter.contains(kmer):\n total_kmers_found += 1\n total_kmers += 1\n if total_kmers_found >= self.theta * total_kmers:\n for child in current.children:\n nodes_to_explore.append(child)\n if current.num_children() == 0:\n out.append(current.dataset_id)\n return out", "def classify_treeNN(self, query_name):\n # 1) Find set of closest neighbors & their class names\n # ie. leaves with at most neighborhood_max_edges edges between itself \n # and the query node\n neighborhood_classes = self.getNeighborhoodClasses(query_name)\n print \"neighborhood \" , neighborhood_classes\n\n # 2) Find aggregate similarity score for each class\n # Use minimum operator for distance measure & maximum for similarity measure\n # EQ 6.1 in Chapt 6, Busa-Fekete et al\n R = {}\n for c,ids in neighborhood_classes.iteritems():\n sim_score = min([nx.shortest_path_length(self.tree, source=query_name, \n target=i, weight='length') for i in ids])\n if DEBUG: print \"\\tCLASS / SIM_SCORE: \", c, sim_score\n R[sim_score] = c # distance measure\n\n min_score = min(R.keys())\n if DEBUG: print \"MIN_SCORE: \", min_score\n\n return R[min_score] #class of minimum distance score", "def nodes_near_point(x, y, kdtree, kd_idx_dic, x_coord='x', y_coord='y',\n n_neighbors=-1,\n radius_m=150,\n verbose=False):\n\n point = [x, y]\n\n # query kd tree for nodes of interest\n if n_neighbors > 0:\n node_names, idxs_refine, dists_m_refine = _query_kd_nearest(\n kdtree, kd_idx_dic, point, n_neighbors=n_neighbors)\n else:\n node_names, idxs_refine, dists_m_refine = _query_kd_ball(\n kdtree, kd_idx_dic, point, radius_m)\n\n if verbose:\n print((\"subgraph node_names:\", node_names))\n\n # get subgraph\n # G_sub = G_.subgraph(node_names)\n\n return node_names, dists_m_refine # G_sub", "def construct_fast_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n coord_list_tree = scipy.spatial.cKDTree(coord_list)\n for j, data in enumerate(coord_list):\n '''save nodes which are in range'''\n connections_ckd = coord_list_tree.query_ball_point(data, radie)\n for i in connections_ckd:\n #only save upper half of the matrix\n if i > j:\n #save the connection\n connection.append([j, i])\n #save the relative distance of the nodes\n connection_distance.append(np.hypot(coord_list[i,0]-data[0], coord_list[i,1]-data[1]))\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n\n\n return connection, connection_distance", "def nearest_neighbor_search_radius_modified(tree, target_point, hr, distance, nearest=None, depth=0):\r\n \r\n global nearest_nn\r\n global distance_nn\r\n \r\n if tree is None:\r\n return \r\n # at the end the whole tree is pruned - None\r\n \r\n k = len(target_point.position) - 1 # k = 2\r\n \r\n cur_node = tree.location # current tree's node\r\n left_branch = tree.left_child # its left branch\r\n right_branch = tree.right_child # its right branch\r\n \r\n nearer_kd = further_kd = None\r\n nearer_hr = further_hr = None\r\n left_hr = right_hr = None\r\n \r\n # Select axis based on depth so that axis cycles through all valid values\r\n axis_pom = depth % k\r\n axis = 'x' if axis_pom == 0 else 'y'\r\n \r\n # hr = [(min_val-delta, max_val+delta), (max_val+delta, min_val-delta)] # initial splitting plane\r\n # = [(-2, 22), (22, -2)]\r\n \r\n # split the hyperplane depending on the axis\r\n if axis == 'x':\r\n left_hr = [hr[0], (cur_node.position[0], hr[1][1])]\r\n right_hr = [(cur_node.position[0],hr[0][1]), hr[1]]\r\n \r\n if axis == 'y':\r\n left_hr = [(hr[0][0], cur_node.position[1]), hr[1]]\r\n right_hr = [hr[0], (hr[1][0], cur_node.position[1])]\r\n \r\n # check which hyperplane the target point belongs to\r\n # if the target_point is on the left/bottom side\r\n if target_point.position[axis_pom] <= cur_node.position[axis_pom]:\r\n nearer_kd = left_branch # closer sub-tree is the left/bottom_branch\r\n further_kd = right_branch # further sub-tree is the right/top_branch\r\n nearer_hr = left_hr # closer hyperplane is the left/bottom_hyperplane\r\n further_hr = right_hr # futher hyperplane is the right/top_hyperplane\r\n \r\n # if the target_point is on the right/top side\r\n if target_point.position[axis_pom] > cur_node.position[axis_pom]:\r\n nearer_kd = right_branch\r\n further_kd = left_branch\r\n nearer_hr = right_hr\r\n further_hr = left_hr\r\n \r\n # check whether the current node is closer\r\n # print(\"curr node\", cur_node) #test\r\n # print(\"targ node\", target_point)\r\n dist = (cur_node.position[0] - target_point.position[0])**2 + (cur_node.position[1] - target_point.position[1])**2\r\n \r\n if dist < distance:\r\n nearest = cur_node\r\n distance = dist\r\n\r\n if dist < radius: # and all([i != j for i, j in zip(cur_node, target_point)]):\r\n in_range.append(cur_node)\r\n \r\n # go deeper in the tree, pass the sub-tree and hyperplane in which the target_point bellow,\r\n # pass current best distance and closest node, increase the depth \r\n nearest_neighbor_search_radius_modified(nearer_kd, target_point, nearer_hr, distance, nearest, depth+1)\r\n \r\n # once we reached the leaf node we check whether whether we found closer points inside the hypersphere\r\n if distance < distance_nn:\r\n nearest_nn = nearest\r\n distance_nn = distance\r\n \r\n # a nearer point (px,py) could only be in further_kd (further_hr) -> explore it\r\n px = compute_closest_coordinate(target_point.position[0], further_hr[0][0], further_hr[1][0])\r\n py = compute_closest_coordinate(target_point.position[1], further_hr[1][1], further_hr[0][1])\r\n \r\n # check whether it is closer than the current nearest neighbor => whether a hypersphere crosses the hyperplane\r\n dist = (px - target_point.position[0])**2 + (py - target_point.position[1])**2\r\n \r\n # explore the further kd-tree / hyperplane if necessary\r\n if radius > distance_nn: \r\n check_dist = radius\r\n else:\r\n check_dist = distance_nn\r\n \r\n if dist < check_dist:\r\n nearest_neighbor_search_radius_modified(further_kd, target_point, further_hr, distance, nearest, depth+1)\r\n \r\n return in_range", "def expand(self, network, radius):\n expansion = []\n neighbors = frozenset().union(*[set(network.getrow(g).indices) for g in self.genes]) \n for neighb in neighbors: \n if neighb in self.genes:\n continue\n preds = list(set(network.getrow(neighb).indices) & self.genes) \n if len(preds)>2:\n pass\n dist_seed = self.dist[preds].min() + 1\n if dist_seed > radius:\n continue\n \n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n next_pattern.edges |= set((pred, neighb) for pred in preds) \n next_pattern.added = neighb \n next_pattern.dist[neighb] = dist_seed\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]", "def _neighbours(self, query):\n\n # Because the query and memory keys are aready normalized, cosine\n # similarity can be calculated through a single matrix multiplication.\n similarity = T.dot(query, self.K.T)\n\n # Find the k-nearest neighbours\n k_nbrs = T.argsort(similarity, axis=1)[:, ::-1][:, :self.k_nbrs]\n k_nbrs_y = self.V[k_nbrs.flatten()].reshape(k_nbrs.shape)\n\n # Make a pseude row index via repeat\n idx = T.extra_ops.repeat(T.arange(query.shape[0]), self.k_nbrs)\n k_nbrs_sim = similarity[idx, k_nbrs.flatten()].reshape(k_nbrs.shape)\n\n return k_nbrs, k_nbrs_y, k_nbrs_sim", "def spatial_planner():\n from scipy.spatial import KDTree\n # KDTree", "def neighbors(districts, r, c):\r\n n_list = []\r\n if r>0:\r\n n_list += [districts[r-1,c]]\r\n if r<4:\r\n n_list += [districts[r+1,c]]\r\n if c>0:\r\n n_list += [districts[r,c-1]]\r\n if c<4:\r\n n_list += [districts[r,c+1]]\r\n return n_list", "def make_neighbor_tree(population):\n positions = np.array([person.position[:-1] for person in population])\n return KDTree(positions)", "def _neighbors_in_repositories(pool, transitive=False):\n package_ids = set(pool.iter_package_ids())\n neighbors = _compute_dependency_dict(pool, package_ids, transitive)\n return neighbors", "def locate(self, query, return_empty=False, limit=-1):\n query_address = query[0:1]\n query_name = query_address[0]\n pool = self._search(query_name, query_address)\n lc = limit\n if pool:\n query = query[1:]\n if len(query) == 0:\n # Nothing more to query.\n res = [ResultNode(pool.node, rank=0)]\n else:\n # Partial result match found. Now time to dip inside its pool/slice.\n res = pool.locate(query, return_empty, lc)\n if limit != -1:\n lc = lc - len(res)\n else: # No match found. We need to dip in all pools now!\n res = []\n for key in self.data:\n r = self.data[key].locate(query, return_empty, lc) # Sending full query.\n if r:\n res = res + r\n if limit != -1:\n lc = lc - len(r)\n if lc <= 0:\n break\n\n if res or return_empty:\n for rn in res:\n rn.add_parent_address(self.node.name) # Prepedning cuurent name to children ResultNodes' addresses.\n return res\n else:\n return []", "def get_neighbourhood(self, radius: int = 1) -> set:\n if radius == 0:\n return set()\n result = self.neighbours.copy()\n if radius > 1:\n # Recursively get neighbours of neighbours.\n for neighbour in self.neighbours:\n result |= neighbour.get_neighbourhood(radius - 1)\n return result - {self}", "def get_neighbours(self, business, num=5, add_self=False):\n\n def radius_step(radius, num_longtidues, num_latitudes, time):\n \"\"\"expand the search-radius exponentially\"\"\"\n step = int(exp(time))\n radius['long_down'] = radius['long_down'] - step\n if radius['long_down'] <= 0:\n radius['long_down'] = 0\n radius['long_up'] = radius['long_up'] + step\n if radius['long_up'] >= num_longtidues - 1:\n radius['long_up'] = num_longtidues - 1\n radius['lat_down'] = radius['lat_down'] - step\n if radius['lat_down'] <= 0:\n radius['lat_down'] = 0\n radius['lat_up'] = radius['lat_up'] + step\n if radius['lat_up'] >= num_latitudes - 1:\n radius['lat_up'] = num_latitudes - 1\n\n cell = self.get_cell(business)\n b_long = business.longitude\n b_lat = business.latitude\n radius = {'long_down': cell[0], 'long_up': cell[0] + 1,\n 'lat_down': cell[1], 'lat_up': cell[1] + 1}\n ret = []\n time = 0\n inner_radius = 0\n while len(ret) < num and inner_radius < 100:\n found = []\n radius_step(radius, self.longitudes.size, self.latitudes.size,\n time)\n time = time + 1\n for row in range(radius['long_down'], radius['long_up']):\n for col in range(radius['lat_down'], radius['lat_up']):\n if row in self.cells and col in self.cells[row]:\n for item in self.cells[row][col]:\n if item not in ret:\n found.append(item)\n if (len(found) + len(ret)) < num:\n continue\n # We approximate the in-radius of the search-rectangle by half of\n # the distance between the centers of left and right border\n # (Not exactly the in-radius on the surface of a sphereoid, but\n # easier to calculate)\n inner_radius = haversine((self.longitudes[radius['long_down']],\n self.latitudes[cell[1]]),\n (self.longitudes[radius['long_up']],\n self.latitudes[cell[1]])) / 2\n for neighbour in found:\n n_long = neighbour['longitude']\n n_lat = neighbour['latitude']\n dist = haversine((b_long, b_lat), (n_long, n_lat))\n # make sure we only include businesses in the in-circle of the\n # search-rectangle\n if dist <= inner_radius and \\\n (add_self or neighbour['index'] != business.name):\n neighbour['distance'] = dist\n ret.append(neighbour)\n return sorted(ret, key=itemgetter('distance'))[:num]", "def insert_nodes(self):\n neighbour_max_distance = 5\n new_nodes = []\n for node in self.nodes:\n left_distance = node.get_distance(node.neighbour1)\n right_distance = node.get_distance(node.neighbour2)\n if left_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour1.x - node.x) / 2,\n node.y + (node.neighbour1.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour1.connect(node.neighbour1.neighbour1, new_node)\n new_node.connect(node.neighbour1, node)\n node.connect(new_node, node.neighbour2)\n new_nodes.append(new_node)\n new_nodes.append(node)\n\n if right_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour2.x - node.x) / 2,\n node.y + (node.neighbour2.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour2.connect(new_node, node.neighbour2.neighbour2)\n new_node.connect(node, node.neighbour2)\n node.connect(node.neighbour1, new_node)\n new_nodes.append(new_node)\n\n return new_nodes", "def fetchNodes(tree):\n if tree.results is None: #Check if the node is a branch\n condItems = {} #Initialize a container for the node conditions from lower branches\n v = [\"true\", \"false\"] #\"Veracity values\"\n for branch, veracity in [(tree.tb, v[0]), (tree.fb, v[1])]: #iterate over this node's true and false child nodes\n lower_results = fetchNodes(branch)\n if len(lower_results) == 1: #Check if child node is actually a leaf. If so,\n lower_results.insert(0, (tree.col, tree.value, veracity))\n condItems[veracity] = [lower_results] #Initialize the condition needed to reach that leaf\n else:\n condItems[veracity] = [] #If the child is not a leaf, initialize an empty list to contain its updated conditions\n for item in lower_results: #Iterate over each set of node conditions that stem from this branch\n new_descriptor = deepcopy(item) #make a deep copy of the list of node conditions from the lower level nodes\n #insert this node's condition at the beginning of each of the node conditions from the lower levels\n new_descriptor.insert(0, (tree.col, tree.value, veracity)) \n condItems[veracity].append(new_descriptor) #append the updated set of node conditions to the branches items\n node_conditions = deepcopy(condItems[v[0]]) #Initialize the complete list of node conditions that stem from this node\n node_conditions.extend(deepcopy(condItems[v[1]])) #Add the node conditions from the second branch of this node\n return node_conditions #Send the full set of node conditions from this node up to the higher nodes.\n else: #If the node is a leaf, return the dictionary of results\n return [tree.results]", "def iter_dist(self):\n self.makeTree()\n coords = self.coords\n sd = selfdistance\n for i in self.loopindices:\n dists, inds = self.nntree.query(coords[i], self.nnmaxcount,\n distance_upper_bound=self.nncutoff)\n yield coords[i], dists.compress((dists > sd) & ~np.isinf(dists))", "def get_neighbours_round(self, cell, radius):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width or radius < 2):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-radius,y+radius+1) if 0<=i<width for j in range(x-radius,x+radius+1) if (0<=j<length)]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\ti , j = neigh\n\t\t\tif round(math.sqrt((j-x)**2+(i-y)**2),4) < round(radius,4):\n\t\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def build_rdn(coords, r, **kwargs):\n \n tree = BallTree(coords, **kwargs)\n ind = tree.query_radius(coords, r=r)\n # clean arrays of neighbors from self referencing neighbors\n # and aggregate at the same time\n source_nodes = []\n target_nodes = []\n for i, arr in enumerate(ind):\n neigh = arr[arr != i]\n source_nodes.append([i]*(neigh.size))\n target_nodes.append(neigh)\n # flatten arrays of arrays\n source_nodes = np.fromiter(itertools.chain.from_iterable(source_nodes), int).reshape(-1,1)\n target_nodes = np.fromiter(itertools.chain.from_iterable(target_nodes), int).reshape(-1,1)\n # remove duplicate pairs\n pairs = np.hstack((source_nodes, target_nodes))\n pairs = remove_duplicate_pairs(pairs)\n return pairs", "def get_neighbouring_nodes(node) :\r\n\r\n connected_nodes = [] #A list of the connected nodes\r\n\r\n #Checking if the node belongs to the 1st row\r\n if(node.coords[0] != 0) :\r\n connected_node = Node((node.coords[0] - 1, node.coords[1]), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the last row\r\n if(node.coords[0] != grid_dims[0] - 1) :\r\n connected_node = Node((node.coords[0] + 1, node.coords[1]), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != 0) :\r\n connected_node = Node((node.coords[0], node.coords[1] - 1), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != grid_dims[1] - 1) :\r\n connected_node = Node((node.coords[0], node.coords[1] + 1), goal_pos, node.gn_value - 1)\r\n #Checking if the node is an obstacle\r\n if(not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n return connected_nodes", "def NN_finder_all(initial_config_data, cut_off_distance, box_dim, path_to_test_dir, atom_list = None, save_results = False, re_calc = False):\n\t# set up path_to_file and check results out of this function before calling it\n\t# if check_results is True: \n\t# if path_to_file is None or os.path.exists(path_to_file):\n\t# raise Exception(\"NN results file not found, please specify the correct path to the file\")\n\t\t\n\tpath_to_nn_results = path_to_test_dir + \"/nn_results_dict.pkl\"\n\t\n\tif re_calc is False:\n\t\tif os.path.exists(path_to_nn_results):\n\t\t\tprint \"nn results dictionary already calculated and saved in pkl file, skip calculation\"\n\t\t\treturn pickle.load(open(path_to_nn_results,'r'))\n\tnn = dict()\n\t\t\n\t# if there is no atom_list specified, use all atoms in initial_config_data\n\tif atom_list is None:\n\t\tatom_list = (initial_config_data[\"item\"]).tolist()\n\t\n\t_data = initial_config_data\n\t\n\tgroups = Atom.classify_df(_data)\n\t\n\t#_atom_data = initial_config_data[['x','y','z']]\n\t\n\t_interested_data = _data.loc[_data['item'].isin(atom_list)]\n\t\n\tinterested_groups = Atom.classify_df(_interested_data)\n\t\n\t#_interested_atom = _interested_data[['x','y','z']]\n\t\n\t\n\t# build the efficient nearest neighbor KDTree algorithm\n\t# default distance metric Euclidian norm p = 2\n\t# create tree object using the larger points array\n\tfor (i, int_group) in interested_groups.items():\n\t\tfor (j, atom_group) in groups.items():\n\t\t\t# comparing atom_type_i and atom_type_j\n\t\t\tfor pair in [(i,j),(j,i)]:\n\t\t\t\tif pair in cut_off_distance:\n\t\t\t\t\t curr_cut_off = cut_off_distance[pair]\n\t\t\t\n\t\t\t# iterate over each row seems inefficient for (index, curr_atom) in int_group.iterrows()\n\t\t\tresult_tree = PeriodicCKDTree(box_dim, atom_group[['x','y','z']].values)\n\t\t\tresult_groups = result_tree.query_ball_point(int_group[['x','y','z']].values, curr_cut_off)\n\t\t\t#indices = np.unique(IT.chain.from_iterable(result_groups))\n\t\t\t\n\t\t\t#for (int_NN,(index,int_atom)) in (result_groups,int_group.iterrows()):\n\t\t\tk = 0\n\t\t\tfor index,int_atom in int_group.iterrows():\n\t\t\t\t# int_NN is a list of index of NN, index is according to the order\n\t\t\t\t# in atom_group \n\t\t\t\t# curr_NN is a dataframe storing NN found for current atom_group\n\t\t\t\tint_NN = result_groups[k]\n\t\t\t\tcurr_NN = atom_group.iloc[int_NN]\n\t\t\t\tif int_atom[\"item\"] not in nn:\n\t\t\t\t\tnn[int_atom[\"item\"]] = curr_NN\n\t\t\t\telif int_atom[\"item\"] in nn:\n\t\t\t\t\tnn[int_atom[\"item\"]] = nn[int_atom[\"item\"]].append(curr_NN)\t\t\t\t\n\t\t\t\tk = k + 1\t\n\t# it is best practice to save this NN dictionary results into a pkl file \n\t# to prevent rerun, if this file exists, let user know that\n\t# the file_of_nearest_neighbor exists before calling it\n\tif save_results is True:\n\t\twith open(path_to_nn_results, 'w') as f:\n\t\t\tpickle.dump(nn,f)\n\t\t\tf.close()\n\treturn nn", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass" ]
[ "0.6189898", "0.6067114", "0.5967871", "0.5918009", "0.5853346", "0.5850977", "0.57527864", "0.5742722", "0.57373124", "0.57318693", "0.5728062", "0.5716135", "0.5708966", "0.569612", "0.5669883", "0.5667582", "0.5654472", "0.5650004", "0.5642539", "0.56394213", "0.5612371", "0.560518", "0.5586471", "0.5562986", "0.55626965", "0.55538255", "0.5531566", "0.5522793", "0.550757", "0.5492565" ]
0.79401344
0
Add a new pair of products with times purchased together if the pair existed, just increase the times purchased otherwise just add the new pair
def add(self, prod1_name, prod2_name, times): if prod1_name == prod2_name: return try: self._purchased.update({PROD1: prod1_name, PROD2: prod2_name, TIMES: {'$exists': True}}, {'$inc': {TIMES: times}}, True ) self._purchased.update({PROD1: prod2_name, PROD2: prod1_name, TIMES: {'$exists': True}}, {'$inc': {TIMES: times}}, True ) print('add: succeeded') return True except pyerrors.OperationFailure as ex: print(ex.value) except pyerrors.PyMongoError as ex: print(ex.value) print('add: failed') return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, product, order_type, pickup, drop, quantity=1, update_quantity=False):\n\n #calculate duratin\n start_date = datetime.strptime(pickup, \"%Y-%m-%d %H:%M\")\n end_date = datetime.strptime(drop, \"%Y-%m-%d %H:%M\")\n duration = end_date-start_date\n\n #end of calculation\n\n product_id = str(product.id)\n product_category = str(product.category)\n if product_id not in self.cart:\n if order_type == 'HR':\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_hour), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':duration.seconds/3600, 'total_price':str(Decimal(product.price_hour) * Decimal(duration.seconds/3600)), 'category': product_category}\n elif order_type == 'DY':\n if duration.days == 0 and duration.seconds != 0 and duration.seconds/3600>15:\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_day), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':duration.days+1, 'total_price':str(Decimal(product.price_day) * Decimal(duration.days+1)), 'category': product_category}\n elif duration.days>0 and duration.seconds>0:\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_day), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':duration.days+1, 'total_price':str(Decimal(product.price_day) * Decimal(duration.days+1)), 'category': product_category}\n else:\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_day), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':duration.days, 'total_price':str(Decimal(product.price_day) * Decimal(duration.days)), 'category': product_category}\n elif order_type == 'WK':\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_week), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':math.ceil(duration.days/7), 'total_price':str(Decimal(product.price_week) * Decimal(math.ceil(duration.days/7))), 'category': product_category}\n else:\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_hour), 'order_type':order_type, 'pickup':pickup, 'drop':drop}\n\n if update_quantity and (product_category!='BIKES' or product_category!='CARS') :\n # if update_quantity:\n # self.cart[product_id]['quantity'] = quantity\n self.cart[product_id]['quantity'] += quantity\n else:\n # self.cart[product_id]['quantity'] += quantity\n self.cart[product_id]['quantity'] = quantity\n self.save()", "def _append_pairs(new_pairs):\n desired_pairs = restore_pairs() or []\n desired_pairs += new_pairs\n print(\"Adding {} new pairs, queue has {} pairs\".format(len(new_pairs), len(desired_pairs)))\n save_pairs(desired_pairs)", "def handle(self, *args, **options):\n\n ProductPair.objects.all().delete()\n person = Person.objects.get(name='klapshov')\n meals = Meal.objects.filter(person=person)\n for m in meals:\n products = [i.product for i in m.intake_set.all()]\n print(products)\n pairs = []\n for n in range(0, len(products)-1):\n for nn in range(n+1, len(products)):\n if n != nn:\n pairs.append([products[n], products[nn]])\n print(pairs)\n print(len(products), len(pairs))\n\n # REWRITE!!!!!!!!\n for p in pairs:\n try:\n pp = ProductPair.objects.get(product1=p[0], product2=p[1])\n except ProductPair.DoesNotExist:\n pp = ProductPair()\n pp.product1 = p[0]\n pp.product2 = p[1]\n pp.count = 1\n p2p = ProductToPerson.objects.get(\n person=person, product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()\n else:\n pp.count += 1\n p2p = ProductToPerson.objects.get(\n person=person,\n product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()\n\n for p in pairs:\n p.reverse()\n try:\n pp = ProductPair.objects.get(product1=p[0], product2=p[1])\n except ProductPair.DoesNotExist:\n pp = ProductPair()\n pp.product1 = p[0]\n pp.product2 = p[1]\n pp.count = 1\n p2p = ProductToPerson.objects.get(\n person=person,\n product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()\n else:\n pp.count += 1\n p2p = ProductToPerson.objects.get(\n person=person,\n product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()", "def add(self, product):\n pass", "def add(self, product):\n product_id = str(product.id)\n self.wishlist[product_id] = {'price': str(product.price)}\n self.save()", "async def prepare_trades(self, pair: str):\n\n if pair not in self.trades:\n self.trades[pair] = {\n 'last_open_time': 0.0,\n 'rebuy_count': 0,\n 'open': [],\n 'closed': []\n }", "def add_item(product, price):\n ADD_PRODUCTS[product] = price", "def add_product(self, name, energy_points):\n now = datetime.datetime.now()\n date = \"{}-{}-{}\".format(now.year, now.month, now.day)\n Product(productName=name, energyPoints=energy_points, date=date)", "def add_pairing(self, pairing): \n \n self.pairings.append(pairing)\n\n\n # Fill in the rest", "def new_product(self, product_price=None, lead_time=None):\n self.is_sold = False\n\n if not lead_time:\n lead_time = self.default_lead_time\n\n if not product_price:\n product_price = self.default_product_price\n\n self.remaining_slots = lead_time\n self.product_price = product_price", "def test_multiple_creates_do_not_increase_products(self):\n for i in xrange(0, 10):\n modified_po = copy.deepcopy(base_purchase_order)\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.post('/api/v1/purchase-order/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 201, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['status'], 'AWAITING APPROVAL')\n\n item1 = po_data['items'][0]\n #self.assertEqual(item1['supply']['id'], 1)\n self.assertEqual(item1['status'], u'Ordered')\n\n item2 = po_data['items'][1]\n #self.assertEqual(item1['supply']['id'], 2)\n self.assertEqual(item1['status'], u'Ordered')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=resp.data['id'])\n self.assertEqual(po.status, 'AWAITING APPROVAL')\n for item in po.items.all():\n self.assertEqual(item.status, u\"Ordered\")\n \n supplier = Supplier.objects.get(pk=1)\n\n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)\n\n supply = Supply.objects.get(pk=2)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)", "def add_item(self, product, price):\n if not product in self.items_in_cart:\n self.items_in_cart[product] = price\n print (product + \" added.\")\n else:\n print (product + \" is already in the cart.\")", "def __init__(self, pair, offers=None):\n self.book = {}\n self.book[Trade.WAY_BUY] = []\n self.book[Trade.WAY_SELL] = []\n self.pair = pair\n self.timestamp = 0\n\n if offers:\n for offer in offers:\n self.add(offer)\n self.sum_up()", "def shopping_in_store(goods_list) -> tuple:\n result = 0\n for product in goods_list:\n if stock[product] > 0:\n result += prices[product]\n stock[product] -= 1\n if stock[product] == 0:\n print('Note! Some of your needed products are out of stock. Please come later or pre-order them.')\n return f'Price is: {result}', '\\n'.join(f'{k}: {v}' for k, v in stock.items())", "def insert(self, product):\n pass", "def test_adding_item_to_list(create_shopping_item, create_shopping_list):\n shopping_list = create_shopping_list\n items_before = shopping_list.items.values_list().count()\n new_item = create_shopping_item\n shopping_list.items.add(new_item)\n items_after = shopping_list.items.values_list().count()\n assert items_after > items_before\n assert items_before == 0\n assert items_after == 1", "def insert_product_list(self, prod_list):\n try:\n json_list = []\n for item in prod_list:\n json_list.append({PROD1: item[0], PROD2: item[1], TIMES: item[2]})\n json_list.append({PROD1: item[1], PROD2: item[0], TIMES: item[2]})\n if len(json_list) > 20000:\n self._purchased.insert(json_list)\n del json_list[:]\n print('just inserted another 20000 items')\n print('insert_product_list: succeeded')\n return True\n except pyerrors.OperationFailure as ex:\n print(ex.value)\n except pyerrors.PyMongoError as ex:\n print(ex.value)\n print('insert_product_list: failed')\n return False", "def add_pair(self, pair: StudentPair, back_mode: bool = False) -> None:\n week_day = pair[\"dates\"].get_week_day()\n pairs_day = self._schedule_list[week_day]\n\n pairs_day.add_pair(pair)\n\n self._indexes[week_day.index_of()] = pairs_day.rows()\n self._rows = sum(self._indexes)\n\n if not back_mode:\n self._change = True", "def add_item(self, product, price):\r\n if not product in self.items_in_cart:\r\n self.items_in_cart[product] = price\r\n print(product + \" added.\")\r\n else:\r\n print(product + \" is already in the cart.\")", "def recommend_next_product(self, prod_list):\n scores = defaultdict(float)\n for prod in prod_list:\n for item in self._purchased.find({PROD1: prod}):\n if not item[PROD2] in prod_list:\n scores[item[PROD2]] += math.log(item[TIMES])\n if len(scores) == 0:\n return None\n max_tuple = max(scores.items(), key = operator.itemgetter(1))\n return max_tuple[0]", "def assign(self, prod1_name, prod2_name, times):\n try:\n self._purchased.update({PROD1: prod1_name, PROD2: prod2_name},\n {'$set': {TIMES: times}},\n True\n )\n self._purchased.update({PROD1: prod2_name, PROD2: prod1_name},\n {'$set': {TIMES: times}},\n True\n )\n print('assign: succeeded')\n return True\n except pyerrors.OperationFailure as ex:\n print(ex.value)\n except pyerrors.PyMongoError as ex:\n print(ex.value)\n print('assign: failed')\n return False", "def add_pair(self, new_pair: StudentPair) -> None:\n self.check_possible_added(new_pair)\n self._buffer.append(new_pair)\n self.reallocate()", "def test_add_with_end_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-1\", \n \"3\", \"2020-12-1\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def new_purchase_row(row):\n actual = row[\"products\"][1:-1] # Products purchased currently\n actual = set([int(p.strip()) for p in actual.strip().split(\",\")])\n liked = set([p_dict[i] for i in user_product_matrix[u_dict[row[\"user_id\"]]].indices]) # User's purchase history\n new_purchase = actual - liked\n return new_purchase", "async def add_payment(self, values):\n pipe = self.redis.pipeline()\n result = dict()\n record_max_id = int(await self.redis.get('record_max_id'))\n for item in values:\n record_max_id += 1\n value_data = values[item]\n await self.build_key(attribute=item, value=value_data['value'])\n value = value_data.pop('value')\n search_key_result = await self.redis.keys(self.key)\n if len(search_key_result) == 0:\n await self.build_key(attribute=item, value=value, record_id=record_max_id)\n ttl = value_data.pop('ttl')\n pipe.hmset_dict(self.key, values[item])\n result[item] = value_data\n if ttl is not None:\n pipe.expire(self.key, int(ttl)*3600)\n result[item]['value'] = value\n result[item]['record_id'] = record_max_id\n else:\n result[item] = {'message': \"Item with this value is already existed\"}\n pipe.set('record_max_id', record_max_id)\n await pipe.execute()\n return result", "def add_product():\n name = input(\"\\nPlease enter the name of the new product: \")\n\n quantity = input(\"Please enter the quantity of the new product: \")\n while quantity.isdigit() == False:\n print(\"Please enter a valid number.\")\n quantity = input(\"Please enter the quantity of the new product: \")\n quantity = int(quantity)\n\n price = input(\"Please enter the price of the new product(in dollars): \").strip(\"$\")\n while True:\n try:\n price = float(price)\n break\n except ValueError:\n print(\"Please enter a valid price\")\n price = input(\"Please enter the price of the new product: \")\n\n price = price * 100\n\n try:\n Product.create(product_name=name,\n product_price=price,\n product_quantity=quantity)\n latest_item = Product.select().order_by(Product.product_id.desc()).get()\n print(f\"You just added {latest_item.product_name} as the {latest_item.product_id}th item in the inventory.\\n\")\n\n except IntegrityError:\n to_update = Product.get(product_name=name)\n to_update.product_name = name\n to_update.product_price = price\n to_update.product_quantity = quantity\n to_update.date_updated = datetime.datetime.now()\n to_update.save()\n print(f\"You just updated {to_update.product_name}\\n\")\n input(\"\\nPress ENTER to continue\")\n clear()", "def add(self, product, qty):\n product_id = str(product.id)\n\n if product_id in self.basket:\n self.basket[product_id]['qty'] = qty\n else:\n self.basket[product_id] = {'price': str(product.price), 'qty': qty}\n\n self.save()", "def add(self, product, product_qty):\n product_id = str(product.id)\n if product_id in self.cart:\n self.cart[product_id][\"qty\"] = product_qty\n else:\n self.cart[product_id] = {'price': str(product.price), 'qty':int(product_qty)}\n self.save()", "def add_prod_to_cart():\n prod_id = int(request.vars.prod_id)\n logger.info(\"adding prod_id {%s} to cart\" %prod_id)\n prod_quant = request.vars.prod_quant\n logger.info(\"quantity {%s}\" %prod_quant)\n db.cart.update_or_insert(\n # try to find a record of the user/prod_id combo or insert\n (db.cart.user_email == auth.user.email) & (db.cart.prod_id == prod_id) ,\n prod_id = prod_id,\n user_email = auth.user.email,\n quantity = prod_quant\n )\n return \"ok\" # Might be useful in debugging.", "def _commuting_products(q_1: Q, q_2: Q) -> Dict:\n\n s_t, s_x, s_y, s_z = q_1.t, q_1.x, q_1.y, q_1.z\n q_2_t, q_2_x, q_2_y, q_2_z = q_2.t, q_2.x, q_2.y, q_2.z\n\n product_dict = {\n \"tt\": s_t * q_2_t,\n \"xx+yy+zz\": s_x * q_2_x + s_y * q_2_y + s_z * q_2_z,\n \"tx+xt\": s_t * q_2_x + s_x * q_2_t,\n \"ty+yt\": s_t * q_2_y + s_y * q_2_t,\n \"tz+zt\": s_t * q_2_z + s_z * q_2_t,\n }\n\n return product_dict" ]
[ "0.6113706", "0.6062423", "0.6022032", "0.57448155", "0.5658234", "0.5507529", "0.54974794", "0.5474968", "0.5401568", "0.5374355", "0.5366406", "0.5358892", "0.5353692", "0.5345051", "0.53433794", "0.5340769", "0.53113306", "0.529849", "0.529575", "0.5261819", "0.52604914", "0.52561927", "0.5214009", "0.5198811", "0.5181825", "0.51763374", "0.5167796", "0.51582766", "0.51497823", "0.51493853" ]
0.65889096
0
Assign a fixed times of the given pair of products with times purchased together
def assign(self, prod1_name, prod2_name, times): try: self._purchased.update({PROD1: prod1_name, PROD2: prod2_name}, {'$set': {TIMES: times}}, True ) self._purchased.update({PROD1: prod2_name, PROD2: prod1_name}, {'$set': {TIMES: times}}, True ) print('assign: succeeded') return True except pyerrors.OperationFailure as ex: print(ex.value) except pyerrors.PyMongoError as ex: print(ex.value) print('assign: failed') return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def given_a_series_of_prices(self, prices):\n timestamps = [datetime(2015, 5, 28), datetime(2015, 5, 29),\n datetime(2015, 5, 30)]\n for timestamp, price in zip(timestamps, prices):\n self.goog.update(timestamp, price)", "def promotion(time, sum_price):\n time = second_to_minute(time)\n for (pro, price) in [(24*60, 150), (12*60, 100), (8*60, 80), (3*60, 40), (60, 15), (20, 10)]:\n sum_price = sum_price + (time//pro)*price\n time = time % pro\n oneminute = time - 3\n return sum_price + oneminute if oneminute > 0 else sum_price", "def set_times(self):\n if self.anchor == \"P\":\n # specified pickup time, 5 minutes early.\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - 300\n # given pickup time, we are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) + 900\n # We are given pickup time, caluclate pickup time, and are 5 min early\n self.earliestDropoff = tools.time_to_seconds(self.times) - 300 + self.time_for_travel()\n # we are given pickup time, add travel time, and are 20 minutes\n self.latestDropoff = tools.time_to_seconds(self.times) + self.time_for_travel() + 900\n else:\n # this means the dropoff time is given. calculate the time it takes to drive, and then 5 minutes early\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel() - 1200\n # given dropoff time, we calucate when to arrive, and then are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel()\n # we are given dropoff time. It's earliest pickup time + travel time\n self.earliestDropoff = tools.time_to_seconds(self.times) - 1200\n self.latestDropoff = tools.time_to_seconds(self.times)", "def get_timed_product(fromday, endday, shop):\n queryset = OrderDetail.objects.filter(shop=shop).filter(start_time__gte=fromday).filter(start_time__lte=endday)\n return queryset", "def add(self, product, order_type, pickup, drop, quantity=1, update_quantity=False):\n\n #calculate duratin\n start_date = datetime.strptime(pickup, \"%Y-%m-%d %H:%M\")\n end_date = datetime.strptime(drop, \"%Y-%m-%d %H:%M\")\n duration = end_date-start_date\n\n #end of calculation\n\n product_id = str(product.id)\n product_category = str(product.category)\n if product_id not in self.cart:\n if order_type == 'HR':\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_hour), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':duration.seconds/3600, 'total_price':str(Decimal(product.price_hour) * Decimal(duration.seconds/3600)), 'category': product_category}\n elif order_type == 'DY':\n if duration.days == 0 and duration.seconds != 0 and duration.seconds/3600>15:\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_day), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':duration.days+1, 'total_price':str(Decimal(product.price_day) * Decimal(duration.days+1)), 'category': product_category}\n elif duration.days>0 and duration.seconds>0:\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_day), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':duration.days+1, 'total_price':str(Decimal(product.price_day) * Decimal(duration.days+1)), 'category': product_category}\n else:\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_day), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':duration.days, 'total_price':str(Decimal(product.price_day) * Decimal(duration.days)), 'category': product_category}\n elif order_type == 'WK':\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_week), 'order_type':order_type, 'pickup':pickup, 'drop':drop, 'duration':math.ceil(duration.days/7), 'total_price':str(Decimal(product.price_week) * Decimal(math.ceil(duration.days/7))), 'category': product_category}\n else:\n self.cart[product_id] = {'quantity': 1, 'price': str(product.price_hour), 'order_type':order_type, 'pickup':pickup, 'drop':drop}\n\n if update_quantity and (product_category!='BIKES' or product_category!='CARS') :\n # if update_quantity:\n # self.cart[product_id]['quantity'] = quantity\n self.cart[product_id]['quantity'] += quantity\n else:\n # self.cart[product_id]['quantity'] += quantity\n self.cart[product_id]['quantity'] = quantity\n self.save()", "def simulate_share_price_change(share_prices, t):\n time = 1\n for name in share_prices:\n Wt = share_prices[name]\n vola = VOLATILITY[name]\n exp_f = exp( (MARKET_INTEREST_RATE - vola**2/2)*time + vola * random_std(0,1) * time**(1/2) )\n share_prices[name] = max(Wt * exp_f, 0)", "def get_times(self, prod1_name, prod2_name):\n try:\n item = self._purchased.find_one({PROD1: prod1_name, PROD2: prod2_name})\n if item == None: return None\n else: return item[TIMES]\n except pyerrors.OperationFailure as ex:\n print(ex.value)\n except pyerrors.PyMongoError as ex:\n print(ex.value)", "def make_times(self, start, end, delta):\n assert type(start) is float or type(start) is int, \\\n 'Start time not specified with float'\n assert type(end) is float or type(end) is int, \\\n 'End time not specified with a number'\n assert type(delta) is float or type(delta) is int, \\\n 'Time increment not specified with a number'\n assert end >= start, 'End time is before start time'\n self.target_times = []\n step = start\n while step <= end:\n self.target_times.append(step)\n step += delta", "def create_timestructured(self, good, quantity):\n length = len(self._haves[good].time_structure)\n for i in range(length):\n qty = quantity[i] if type(quantity) == list else quantity / length\n self._haves[good].time_structure[i] += qty", "def test_save_slot_time_frames(self):\n coupon = COUPON_FACTORY.create_coupon()\n slot = Slot.objects.create(site_id=2,\n business_id=coupon.offer.business.id,\n start_date=datetime.datetime.today(),\n end_date = datetime.datetime.today() + datetime.timedelta(1))\n now = datetime.datetime.now()\n datetime_list = [\n # test iteration 1: Cannot start time frame before the slot starts.\n (now - datetime.timedelta(1), now, False),\n # 2. Cannot end time frame before starting it.\n (now + datetime.timedelta(1), now, False), \n # 3. Cannot start and end at the same time.\n (now, now, False), \n (now, now + datetime.timedelta(1), True), # Becomes slot id = 1.\n # 5. Cannot begin at same time.\n (now, now + datetime.timedelta(1), False), \n (now + datetime.timedelta(1), now + datetime.timedelta(2), True), #2\n (now + datetime.timedelta(2), now + datetime.timedelta(3), True), #3\n (now + datetime.timedelta(5), now + datetime.timedelta(8), True), #4\n # 9. Cannot be a subset of a timeframe.\n (now + datetime.timedelta(6), now + datetime.timedelta(7), False),\n # 10. Cannot wholly include a timeframe.\n (now + datetime.timedelta(4), now + datetime.timedelta(9), False),\n # 11. Cannot straddle a start datetime.\n (now + datetime.timedelta(4), now + datetime.timedelta(6), False),\n # 12. Cannot straddle an end datetime.\n (now + datetime.timedelta(7), now + datetime.timedelta(9), False),\n # 13. Cannot end at the same time.\n (now + datetime.timedelta(6), now + datetime.timedelta(8), False),\n (now + datetime.timedelta(10), None, True), # Becomes slot id = 5.\n (now + datetime.timedelta(9), None, True), # Becomes slot id = 6\n (now + datetime.timedelta(11), None, True), # Becomes slot id = 7\n (now + datetime.timedelta(12), now + datetime.timedelta(15), True),\n ]\n counter = 0\n for start_datetime, end_datetime, is_valid in datetime_list:\n counter += 1\n LOG.debug('test iteration: %s' % counter)\n LOG.debug('test start_datetime: %s' % start_datetime)\n LOG.debug('test end_datetime: %s' % end_datetime)\n slot_time_frame = SlotTimeFrame(slot=slot, coupon_id=coupon.id,\n start_datetime=start_datetime, end_datetime=end_datetime)\n if is_valid:\n slot_time_frame.save()\n self.assertTrue(slot_time_frame.id)\n else:\n with self.assertRaises(ValidationError) as context_manager:\n slot_time_frame.save()\n self.fail('Invalid time frame saved.')\n LOG.debug(context_manager.exception)", "def approximate_delivery_time(self):\n # this is set to seconds just of demonstration\n approx_delivery_time = random.randrange(300, 600)\n approx_time_to_main_station = approx_delivery_time / 2\n\n return {\n \"delivery_time\": approx_delivery_time,\n \"time_to_station\": approx_time_to_main_station,\n }", "def stock_times(mul, work_on):\n for i in work_on:\n pink = i[1] / mul\n blue = i[2] / mul\n red = i[3] / mul\n i[1] = float(\"{0:.2f}\".format(pink))\n i[2] = float(\"{0:.2f}\".format(blue))\n i[3] = float(\"{0:.2f}\".format(red))\n print(i)", "def _commuting_products(q_1: Q, q_2: Q) -> Dict:\n\n s_t, s_x, s_y, s_z = q_1.t, q_1.x, q_1.y, q_1.z\n q_2_t, q_2_x, q_2_y, q_2_z = q_2.t, q_2.x, q_2.y, q_2.z\n\n product_dict = {\n \"tt\": s_t * q_2_t,\n \"xx+yy+zz\": s_x * q_2_x + s_y * q_2_y + s_z * q_2_z,\n \"tx+xt\": s_t * q_2_x + s_x * q_2_t,\n \"ty+yt\": s_t * q_2_y + s_y * q_2_t,\n \"tz+zt\": s_t * q_2_z + s_z * q_2_t,\n }\n\n return product_dict", "def compute_time_price(supplier_with_transaction):\n supplier_item = supplier_with_transaction.get('supplier_detail')\n transaction_item = supplier_with_transaction.get('supplier_transaction')\n # Check if there is time prices or not\n if supplier_with_transaction.get('time_price'):\n # Check if we will compute in complex or simple\n if not supplier_item.get('has_complex_minute_price'):\n # start to calculate the simple version for time price\n charging_start = transaction_item.get('charging_start')\n charging_end = transaction_item.get('charging_end')\n if charging_start and charging_end:\n charging_start_obj = datetime.strptime(charging_start, '%Y-%m-%dT%H:%M:%S')\n charging_end_obj = datetime.strptime(charging_end, '%Y-%m-%dT%H:%M:%S')\n duration_in_minutes = (charging_end_obj - charging_start_obj).total_seconds() / 60\n # Check for min duration\n if supplier_item.get('min_duration') and duration_in_minutes < supplier_item.get('min_duration'):\n duration_in_minutes = supplier_item.get('min_duration')\n price = supplier_item.get('simple_minute_price')\n total_price = price * duration_in_minutes\n return total_price\n else:\n # start calculate the complex version for time price\n total_price = 0\n if supplier_item.get('interval') == 'start':\n for start_rec in supplier_item.get('time_price'):\n timeframe = start_rec.get('billing_each_timeframe') * 60\n if start_rec.get('hour_from', 0) > start_rec.get('hour_to', 0):\n duration = (start_rec.get('hour_to') - start_rec.get('hour_from')) * 60\n else:\n duration = (start_rec.get('hour_to') - (24 - start_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration + duration_after_timeframe\n total_price += total_duration * start_rec.get('minute_price')\n else:\n for end_rec in supplier_item.get('time_price'):\n timeframe = end_rec.get('billing_each_timeframe') * 60\n if end_rec.get('hour_from', 0) > end_rec.get('hour_to', 0):\n duration = (end_rec.get('hour_to') - end_rec.get('hour_from')) * 60\n else:\n duration = (end_rec.get('hour_to') - (24 - end_rec.get('hour_from'))) * 60\n duration_after_timeframe = duration % timeframe\n total_duration = duration - (timeframe - duration_after_timeframe)\n total_price += total_duration * end_rec.get('minute_price')\n\n return total_price\n else:\n total_price = 0\n return total_price", "def stepify(times, values):\n new_times = np.empty((2*times.size - 1,))\n new_values = np.empty_like(new_times)\n new_times[::2] = times\n new_times[1::2] = times[1:]\n new_values[::2] = values\n new_values[1::2] = values[:-1]\n return new_times, new_values", "def multiplyTimeBy(self, *args):\n return _libsbml.ASTNode_multiplyTimeBy(self, *args)", "async def prepare_trades(self, pair: str):\n\n if pair not in self.trades:\n self.trades[pair] = {\n 'last_open_time': 0.0,\n 'rebuy_count': 0,\n 'open': [],\n 'closed': []\n }", "def add(self, prod1_name, prod2_name, times):\n if prod1_name == prod2_name:\n return\n try:\n self._purchased.update({PROD1: prod1_name, PROD2: prod2_name, TIMES: {'$exists': True}},\n {'$inc': {TIMES: times}},\n True\n )\n self._purchased.update({PROD1: prod2_name, PROD2: prod1_name, TIMES: {'$exists': True}},\n {'$inc': {TIMES: times}},\n True\n )\n print('add: succeeded')\n return True\n except pyerrors.OperationFailure as ex:\n print(ex.value)\n except pyerrors.PyMongoError as ex:\n print(ex.value)\n print('add: failed')\n return False", "def addTimes(time1, time2):\n t1 = timedelta(hours=time1.hour, minutes=time1.minute, seconds=time1.second)\n t2 = timedelta(hours=time2.hour, minutes=time2.minute, seconds=time2.second)\n t3 = t1 + t2\n return (datetime.min + t3).time()", "def tt(obs_time, *whatevers):\n n = whatevers[0].size\n return tuple(\n [obs_time[:n], ] +\n list(whatevers)\n )", "def interpolate_triplet(r,g,b,timestamps, start, stop):\n numdays = (stop-start).days\n hires_dates = [start + datetime.timedelta(days=x) for x in range(0,numdays)]\n hires_time = [time.mktime(date.timetuple()) for date in hires_dates]\n \n # interpolate r,g,b\n R = np.clip(np.interp(hires_time, timestamps, r),0,1)\n G = np.clip(np.interp(hires_time, timestamps, g),0,1)\n B = np.clip(np.interp(hires_time, timestamps, b),0,1)\n \n return list(zip(R,G,B))", "def gen_start_end_times(start_time=[6, 0, 0], end_time=[23, 0, 0]):\n\n now = datetime.now()\n year = now.year\n month = now.month\n day = now.day\n\n start_time = datetime(\n year, month, day, start_time[0], start_time[1], start_time[2], 0\n )\n\n end_time = datetime(year, month, day, end_time[0], end_time[1], end_time[2], 0)\n\n if end_time < now:\n end_time += timedelta(days=1)\n start_time += timedelta(days=1)\n\n return start_time, end_time", "def get_available_time(times: list):\n available_time = datetime.timedelta(0)\n for time in times:\n if time:\n t = datetime_from_time(time)\n if times.index(time) > 0 and t < times[times.index(time)-1]:\n t += datetime.timedelta(1)\n times[times.index(time)] = t\n for i in range(len(times) // 2):\n try:\n available_time += times[2*i+1] - times[2*i]\n except TypeError:\n pass\n return available_time", "def replicate(self, rng_list):\n # Designate separate random number generators.\n arr_rng = rng_list[0]\n\n total_revenue = 0\n b = list(self.factors[\"booking_limits\"])\n A = np.array(self.factors[\"product_incidence\"])\n # Vector of next arrival time per product.\n # (Starts at time = -1*time_before, e.g., t = -168.)\n arrival = np.zeros(self.factors[\"num_products\"]) - self.factors[\"time_before\"]\n # Upper bound on number of arrivals over the time period.\n arr_bound = 10 * round(168 * np.sum(self.factors[\"lambda\"]))\n arr_time = np.zeros((self.factors[\"num_products\"], arr_bound))\n # Index of which arrival time to use next for each product.\n a = np.zeros(self.factors[\"num_products\"], dtype=int)\n # Generate all interarrival times in advance.\n for i in range(self.factors[\"num_products\"]):\n arr_time[i] = np.array([arr_rng.expovariate(self.factors[\"lambda\"][i]) for _ in range(arr_bound)])\n # Extract first arrivals.\n for i in range(self.factors[\"num_products\"]):\n arrival[i] = arrival[i] + arr_time[i, a[i]]\n a[i] = 1\n min_time = 0 # Keeps track of minimum time of the orders not yet received.\n while min_time <= self.factors[\"runlength\"]:\n min_time = self.factors[\"runlength\"] + 1\n for i in range(self.factors[\"num_products\"]):\n if ((arrival[i] < min_time) and (arrival[i] <= self.factors[\"time_limit\"][i])):\n min_time = arrival[i]\n min_idx = i\n if min_time > self.factors[\"runlength\"]:\n break\n if b[min_idx] > 0:\n if min_idx % 2 == 0: # Rack_rate.\n total_revenue += sum(self.factors[\"rack_rate\"] * A[:, min_idx])\n else: # Discount_rate.\n total_revenue += sum(self.factors[\"discount_rate\"] * A[:, min_idx])\n # Reduce the inventory of products sharing the same resource.\n for i in range(self.factors[\"num_products\"]):\n if np.dot(A[:, i].T, A[:, min_idx]) >= 1:\n if b[i] != 0:\n b[i] -= 1\n arrival[min_idx] += arr_time[min_idx, a[min_idx]]\n a[min_idx] = a[min_idx] + 1\n # Compose responses and gradients.\n responses = {\"revenue\": total_revenue}\n gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses}\n return responses, gradients", "def get_bruteforce_time(combo, speed=1000000000):\n return (combo // speed) // (24 * 3600)", "def __set_time_elements(*args):\n args[0].TimeState.delay_elements = args[1]\n args[0].TimeState.set_delay_elements()", "def time_settime(currenttime):\r\n\r\n time_query_times.append((getruntime(), currenttime))", "def several(self, times: int):\n return BagOfGoods({g: self[g] * times for g in self.keys()})", "def handle(self, *args, **options):\n\n ProductPair.objects.all().delete()\n person = Person.objects.get(name='klapshov')\n meals = Meal.objects.filter(person=person)\n for m in meals:\n products = [i.product for i in m.intake_set.all()]\n print(products)\n pairs = []\n for n in range(0, len(products)-1):\n for nn in range(n+1, len(products)):\n if n != nn:\n pairs.append([products[n], products[nn]])\n print(pairs)\n print(len(products), len(pairs))\n\n # REWRITE!!!!!!!!\n for p in pairs:\n try:\n pp = ProductPair.objects.get(product1=p[0], product2=p[1])\n except ProductPair.DoesNotExist:\n pp = ProductPair()\n pp.product1 = p[0]\n pp.product2 = p[1]\n pp.count = 1\n p2p = ProductToPerson.objects.get(\n person=person, product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()\n else:\n pp.count += 1\n p2p = ProductToPerson.objects.get(\n person=person,\n product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()\n\n for p in pairs:\n p.reverse()\n try:\n pp = ProductPair.objects.get(product1=p[0], product2=p[1])\n except ProductPair.DoesNotExist:\n pp = ProductPair()\n pp.product1 = p[0]\n pp.product2 = p[1]\n pp.count = 1\n p2p = ProductToPerson.objects.get(\n person=person,\n product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()\n else:\n pp.count += 1\n p2p = ProductToPerson.objects.get(\n person=person,\n product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()", "def test_product_available_by_time(self):\n product = ProductFactory()\n # The factory defines the timeframe as now and 31 days forward.\n self.assertTrue(product.is_time_available)\n self.assertTrue(product.is_available())" ]
[ "0.56735665", "0.5642687", "0.5597733", "0.55516917", "0.55381095", "0.54918313", "0.5367134", "0.53548855", "0.53330797", "0.5317737", "0.5311256", "0.5310855", "0.5292983", "0.5248051", "0.5220131", "0.52120817", "0.5160664", "0.51471895", "0.51454437", "0.51453066", "0.51409274", "0.5071789", "0.5064369", "0.50589055", "0.50283885", "0.50185454", "0.50148135", "0.5008455", "0.4997568", "0.4977013" ]
0.5754572
0
Recommend the next best relevant product
def recommend_next_product(self, prod_list): scores = defaultdict(float) for prod in prod_list: for item in self._purchased.find({PROD1: prod}): if not item[PROD2] in prod_list: scores[item[PROD2]] += math.log(item[TIMES]) if len(scores) == 0: return None max_tuple = max(scores.items(), key = operator.itemgetter(1)) return max_tuple[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_best_option(self):", "def popular_recommend(row):\n actual = new_purchase_row(row)\n return f1(actual, popular_products)", "def step(self):\n highest_offer = None\n\n if self.manager is None:\n highest_rep = 0\n\n else:\n highest_rep = self.manager.reputation\n\n for offer in self.offers:\n if offer.manager.reputation > highest_rep:\n highest_offer = offer\n\n if highest_offer is not None:\n highest_offer.accept()\n\n self.offers = []", "def proposal_product(self, user_prod):\n # Give the bests product, selected by nutriscore value.\n prod0 = orm_imp.list_prod(user_prod)\n prod = self.show_proposal(prod0)\n user_choice = int(input(\n \"==============================\\n\"\n \"= Que souhaitez-vous faire ? =\\n\"\n \"==============================\\n\"\n \"- Sauvegarder un produit proposé : taper 1\\n\"\n \"- Faire une autre recherche produit : taper 2\\n\"\n \"- Retourner à l'écran principal : taper 3\\n\"))\n if user_choice == 1: # save product, go to main section\n self.save_product(prod[0], prod[1], prod[2], user_prod)\n self.controle_point = 1\n elif user_choice == 2: # goback to product research section\n self.controle_point = 2\n elif user_choice == 3: # go to main section\n self.controle_point = 1\n else:\n print(\"Veuillez entrer un chiffre compris entre 1 et 3.\")\n self.controle_point = 4\n return self.controle_point", "def make_recommendation(fav_product,model_knn=model_knn,\n data=csr_matrix(df_product_features.values),\n\n mapper=products_to_idx,\n n_recommendations=6):\n # fit\n model_knn.fit(data)\n # get input movie index\n #print('You have input product:', fav_product)\n idx = fuzzy_matching(mapper, fav_product, verbose=True)\n if idx is None:\n return []\n #print('Recommendation system start to make inference')\n #print('......\\n')\n distances, indices = model_knn.kneighbors(data[idx], n_neighbors=n_recommendations + 1)\n\n raw_recommends = sorted(list(zip(indices.squeeze().tolist(), distances.squeeze().tolist())), key=lambda x: x[1])[\n :0:-1]\n\n # get reverse mapper\n #print(raw_recommends)\n reverse_mapper = {v: k for k, v in mapper.items()}\n # print recommendations\n #print('Recommendations for {}:'.format(fav_product))\n filter = []\n for i, (idx, dist) in enumerate(raw_recommends):\n #print('{0}: {1}, with distance of {2}'.format(i + 1, reverse_mapper[idx], dist))\n filter.append(reverse_mapper[idx])\n\n\n newproduct = pd.read_sql_query(\"\"\"SELECT p.*\n ,(SELECT img.url FROM image img WHERE p.id=img.product_id limit 1) as image\n ,(SELECT cate.cate_name FROM categories cate WHERE p.category_id=cate.id) as cateName\n FROM products p where p.name IN %s \"\"\", conn,params=(tuple(filter),))\n\n return newproduct.reset_index().to_json(orient='records')", "def products_list(request, product):\n product_found = get_object_or_404(Products, product=product)\n\n nut = product_found.nutrition_grade_fr\n\n query_set_product = (\n Products.objects.filter(category=product_found.category)\n .filter(\n Q(nutrition_grade_fr__lte=nut) \n ) # propose products with value less or equal at the search product\n .exclude(product=product_found.product)\n )\n\n if len(query_set_product) >= 6:\n random_six_products = random.sample(\n list(query_set_product), 6\n ) # select 6 products randomly\n \n else:\n query_set_product = Products.objects.filter(\n Q(nutrition_grade_fr__lte=nut) \n ).exclude(product=product_found.product)\n\n random_six_products = random.sample(\n list(query_set_product), 6\n ) # select 6 products randomly \n\n\n if \"submit\" in request.POST: # do something with interview_HTML button is clicked\n save_product = request.POST.get(\"submit\")\n save_product = Products.objects.get(product=save_product)\n if not request.user.is_authenticated:\n return redirect(\"%s?next=%s\" % (settings.LOGIN_URL, request.path))\n user = request.user\n\n user = CustomUser.objects.get(email=user)\n\n save = History(\n user=user,\n chosen_product=product_found,\n remplacement_product=save_product,\n )\n save.save()\n\n context = {\n \"proposed_product\": product_found,\n \"products\": random_six_products,\n }\n\n return render(request, \"products/products.html\", context)", "def recommend(self, user_id, N=10):\n scores = self.user_factors[user_id] @ self.product_factors.T\n best = np.argpartition(scores, -N)[-N:]\n return sorted(zip(best, scores[best]), key=lambda x: -x[1])", "def svd_recommend_new(row):\n actual = new_purchase_row(row)\n recommended = svd_rec.recommend_new(u_dict[row[\"user_id\"]], N=10)\n recommended = [p_dict[r[0]] for r in recommended]\n return f1(actual, recommended)", "def buy_one_get_one(products):\n if 'p1' in products and products['p1'] >= 2:\n return -20\n else:\n return 0", "def _next_action(self) -> SingleBriberyAction:\n self._current_rating = self.get_graph().eval_graph(self.get_briber_id())\n if self._previous_rating is None:\n self._previous_rating = self._current_rating\n next_act = SingleBriberyAction(self)\n try:\n self._next_node = self.get_graph().get_random_customer(excluding=self._info_gained | self._bribed)\n except IndexError:\n print(f\"WARNING: {self.__class__.__name__} found no influential nodes, not acting...\", file=sys.stderr)\n return next_act\n if self._current_rating - self._previous_rating > self._max_rating_increase:\n self._best_node = self._last_node\n self._max_rating_increase = self._current_rating - self._previous_rating\n maximum_bribe = min(self.get_resources(), self._bribe_to_max())\n if self._c >= self._i and self._best_node is not None and maximum_bribe > 0:\n next_act.add_bribe(self._best_node, maximum_bribe)\n self._bribed.add(self._best_node)\n self._info_gained = set()\n self._c = 0\n self._max_rating_increase = 0\n self._best_node = 0\n else:\n if self._c >= self._i:\n print(f\"WARNING: {self.__class__.__name__} has not found an influential node in {self._c} tries \"\n f\"(intended maximum tries {self._i}), continuing search...\",\n file=sys.stderr)\n # Bid an information gaining bribe, which is at most k, but is\n # smaller if you need to bribe less to get to the full bribe\n # or don't have enough money to bid k.\n next_act.add_bribe(self._next_node, min(self._bribe_to_max(), min(self.get_resources(), self._k)))\n self._info_gained.add(self._next_node)\n self._c = self._c + 1\n self._last_node = self._next_node\n self._previous_rating = self._current_rating\n return next_act", "def go_product_reviews_next(self, driver, website):\n paginator = driver.find_element_by_class_name(\"BVRRPager\")\n next_link = paginator.find_element_by_class_name(\"BVRRNextPage\")\n next_link.find_element_by_name(\"BV_TrackingTag_Review_Display_NextPage\").click()\n time.sleep(1)", "def go_product_search_next(self, driver):\n try:\n pagination = driver.find_element_by_class_name(\"divPageLinks\")\n pagination.find_element_by_class_name(\"next\").click()\n except NoSuchElementException:\n raise NoSuchElementException", "def recommend_new(self, user_id, N=10):\n scores = self.user_factors[user_id] @ self.product_factors.T\n bought_indices = self.product_user_matrix.T[user_id].nonzero()[1]\n count = N + len(bought_indices)\n ids = np.argpartition(scores, -count)[-count:]\n best = sorted(zip(ids, scores[ids]), key=lambda x: -x[1])\n return list(itertools.islice((rec for rec in best if rec[0] not in bought_indices), N))", "def test_get_rate_plan_by_product(self):\n pass", "def next_choice(self, opponent: 'Player') -> str:\n\n if self.adaptive_ai:\n # this is an adaptive_ai player, so see if it has collected\n # enough stats about the current opponent yet:\n if sum(self.opponent_choices[opponent.name].values()) > 5:\n # has enough samples to start adapting to the opponent\n print(' {} is trying to guess the opponent\\'s choice...'.format(self.name))\n\n # AI algorithm 1:\n # simply find the most-frequent selection by the opponent and\n # choose its killer.\n\n guess = self.opponent_choices[opponent.name].most_common(1)[0][0]\n ai_choice = weapon_to_beat(guess)\n print(' ', opponent.name, 'most often chose', guess, 'so he/she chose', ai_choice)\n return ai_choice\n\n # use the standard tendency distribution to choose a weapon:\n n = randint(1, self.randmax)\n if n <= self.tendency[0]:\n return 'rock'\n elif n <= self.tendency[0] + self.tendency[1]:\n return 'paper'\n else:\n return 'scissors'", "def search_next(self):\n self._raise_not_supported()", "def recommend_by_event(self, event = None):\n pass", "def strategy_expensive(cookies, cps, history, time_left, build_info):\n # Select the most expensive item you can afford\n _selected_item = None\n _max = 0\n _available = cookies + ( time_left * cps )\n _item_list = build_info.build_items()\n for _item in _item_list:\n _item_cost = build_info.get_cost(_item)\n if (_max < _item_cost) and (_item_cost <= _available):\n _max = _item_cost\n _selected_item = _item\n \n return _selected_item", "def _find_next_item_to_process(items, user, random_order=False):\n user_results = RankingResult.objects.filter(user=user)\n \n processed_items = user_results.values_list('item__pk', flat=True)\n \n # cfedermann: this might be sub optimal wrt. performance!\n unprocessed_items = list(items.exclude(pk__in=processed_items))\n\n if random_order:\n shuffle(unprocessed_items)\n \n if unprocessed_items:\n return unprocessed_items[0]\n \n return None", "def recommendations_similarity(aData, needed_param, user, products, n = 10, simfunc = sim_cosine):\n table_CF = preproc.make_CF_table(aData, needed_param)\n sim_measures_table = simfunc(table_CF) \n \n scores = sim_measures_table.dot(table_CF)\n mean_scores = np.array(np.sum(sim_measures_table, axis=1).T)\n mean_scores = pd.DataFrame(np.tile(mean_scores, (scores.shape[1],1))).T\n predicted_ratings = np.divide(scores, np.absolute(mean_scores))\n \n ratings = predicted_ratings[user].order(ascending= False)\n ratings = ratings[0:n]\n \n return (ratings.index[ratings.index.isin(products)==False])", "def _determine_next_eval_point(self):\n anc_data = self._get_ancillary_data_for_acquisition()\n acq_to_use = getattr(acquisitions.asy, self.options.acq.lower())\n next_eval_point = acq_to_use(self.gp, self.acq_optimise, anc_data)\n return next_eval_point", "def best_buy(self):\n return Library.functions.best_buy(self._book)", "def next_question(self):\n self.user_answers = []\n self.curent_question = choice(self.to_ask)", "def choice_product(self):\n self.first_number = 0\n self.leave_choice_product = 1\n while self.leave_choice_product:\n print(fr.FR[11])\n self.display_product(self.products)\n self.input_product = input(fr.FR[12])\n self.choice_product_input()", "def get_most_and_least_expensive_high_review_product(df):\n try:\n df3 = merge_metadata(df)\n product_filter = df3['overall'] >= 4.0\n high_reviewed_products = df3[product_filter]\n # print high_reviewed_products[:10]\n # The data contained NaN so we use the nanmax/min funtions to get max/min\n most_exp = round(np.nanmax(high_reviewed_products['price'])[0], 2)\n least_exp = round(np.nanmin(high_reviewed_products['price'])[0], 2)\n\n most_exp_prod = df3.loc[df3['price'] == most_exp, 'asin'].iloc[0]\n least_exp_prod = df3.loc[df3['price'] == least_exp, 'asin'].iloc[0]\n write_text_tofile(\"Most Expensive Product: \" + str(most_exp_prod) + \", Price: \" + str(most_exp))\n write_text_tofile(\"Least Expensive Product: \" + str(least_exp_prod) + \", Price: \" + str(least_exp))\n return {most_exp_prod: most_exp, least_exp_prod: least_exp}\n except Exception as e:\n print \"Error getting most and least expensive high review product\"\n print str(e)\n pass", "def determine_offer_tuple_NN(nn, ti, ca, eps, eps_ran, revenues, A, arrival_probabilities, preference_weights, no_purchase_preference):\n\n\n # opportunity costs\n opp_costs = 1.0*np.zeros_like(revenues)\n for pro in products:\n if functools.reduce(operator.or_, ca - A[:, pro] < 0):\n # set opp costs to infty if not enough capacity for product\n opp_costs[pro] = np.inf\n else:\n # calculate opportunity costs via Bellman: V(t+1, c) - V(t+1, c-A[i])\n t_df = pd.DataFrame([np.zeros(T + 1)] * 1)\n t_df.columns = [\"t\" + str(i) for i in t_df.columns]\n t_df.iloc[0, ti+1] = 1\n\n cs_unsold = {}\n for h in resources:\n c_df = pd.DataFrame([np.zeros(T +1)] * 1)\n c_df.columns = [\"c-h\" + str(h) + \"-t\" + str(i) for i in c_df.columns]\n c_df.iloc[0, ti+1] = ca[h]\n cs_unsold[h] = c_df\n\n cs_sold = {}\n for h in resources:\n c_df = pd.DataFrame([np.zeros(T + 1)] * 1)\n c_df.columns = [\"c-h\" + str(h) + \"-t\" + str(i) for i in c_df.columns]\n c_df.iloc[0, ti + 1] = ca[h] - A[h, pro]\n cs_sold[h] = c_df\n\n X_unsold = pd.concat([t_df, *[cs_unsold[h] for h in resources]], axis=1)\n X_sold = pd.concat([t_df, *[cs_sold[h] for h in resources]], axis=1)\n\n opp_costs[pro] = nn.predict(X_unsold) - nn.predict(X_sold)\n\n # epsilon greedy strategy - offer no products\n if eps_ran < eps / 2:\n return tuple(np.zeros_like(revenues))\n\n # epsilon greedy strategy - offer all products\n if eps_ran < eps:\n offer_tuple = np.ones_like(revenues)\n offer_tuple[opp_costs == np.inf] = 0 # one resource not available => don't offer product\n return tuple(offer_tuple)\n\n # setup\n offer_tuple = np.zeros_like(revenues)\n\n # line 1\n s_prime = revenues - opp_costs > 0\n if all(np.invert(s_prime)):\n return tuple(offer_tuple)\n\n # line 2-3\n # offer_sets_to_test has in each row an offer set, we want to test\n offer_sets_to_test = np.zeros((sum(s_prime), len(revenues)))\n offer_sets_to_test[np.arange(sum(s_prime)), np.where(s_prime)] = 1\n offer_sets_to_test += offer_tuple\n offer_sets_to_test = (offer_sets_to_test > 0)\n\n value_marginal = np.apply_along_axis(calc_value_marginal_nn, 1, offer_sets_to_test, opp_costs, revenues,\n arrival_probabilities, preference_weights, no_purchase_preference)\n\n offer_tuple = offer_sets_to_test[np.argmax(value_marginal)]*1\n s_prime = s_prime & offer_tuple == 0\n v_s = np.amax(value_marginal)\n\n # line 4\n while True:\n # 4a\n # offer_sets_to_test has in each row an offer set, we want to test\n offer_sets_to_test = np.zeros((sum(s_prime), len(revenues)))\n offer_sets_to_test[np.arange(sum(s_prime)), np.where(s_prime)] = 1\n offer_sets_to_test += offer_tuple\n offer_sets_to_test = (offer_sets_to_test > 0)\n\n # 4b\n value_marginal = np.apply_along_axis(calc_value_marginal_nn, 1, offer_sets_to_test, opp_costs, revenues,\n arrival_probabilities, preference_weights, no_purchase_preference)\n\n if np.amax(value_marginal) >= v_s:\n v_s = np.amax(value_marginal)\n offer_tuple = offer_sets_to_test[np.argmax(value_marginal)]*1 # to get 1 for product offered\n s_prime = (s_prime - offer_tuple) == 1 # only those products remain, that are neither in the offer_tuple\n if all(offer_tuple == 1):\n break\n else:\n break\n return tuple(offer_tuple)", "def best_sell(self):\n return Library.functions.best_sell(self._book)", "def find_substitute(self):\n\n products_list = None\n\n while not products_list:\n self.get_targeted_category()\n\n db.connect()\n db.execute(\"\"\"\n SELECT product_id, nutriscore_id\n FROM Product_per_category\n INNER JOIN Product\n ON Product.id = product_id\n WHERE category_id = %s AND nutriscore_id < %s\n ORDER BY nutriscore_id\n \"\"\", (self.category_id, self.nutriscore,))\n products_list = db.fetch(True)\n db.disconnect()\n self.category_concordance += 1\n\n return products_list[0][0]", "def get_n_best(self):\n pass", "def ap(self, result, next_item):\n if next_item in result.index:\n rank = result.index.get_loc(next_item) + 1\n return 1.0 / rank\n else:\n return 0" ]
[ "0.6451906", "0.6248928", "0.6203764", "0.6144406", "0.610878", "0.60792506", "0.59292257", "0.579324", "0.57634926", "0.5680954", "0.56799036", "0.56428087", "0.56206936", "0.55952084", "0.55937326", "0.5577743", "0.5574243", "0.5536686", "0.55234545", "0.55225116", "0.547927", "0.5477826", "0.5472022", "0.54233736", "0.5417448", "0.53982127", "0.539349", "0.5389771", "0.53891206", "0.53849655" ]
0.69993323
0
Trains the classifier model on the training set stored in file trainfile
def train(self, trainfile): sentences_emb,labels=self.read_data(trainfile) logReg = LogisticRegression(penalty="l2",C = 10, multi_class='auto',solver='newton-cg') logReg.fit(sentences_emb,labels) self.clf=logReg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, trainfile):", "def trainModel( self, featureTrain, classTrain):", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def train(self, trainfile):\r\n\r\n # We load the data and lower the text\r\n data_train = pd.read_csv(trainfile, sep = \"\\t\", names = [\"polarity\", \"category\", \"word\", \"offsets\", \"sentence\"])\r\n data_train['sentence_l'] = data_train['sentence'].apply(str.lower)\r\n data_train['word'] = data_train['word'].apply(str.lower)\r\n \r\n # We try to keep all the no/nor/not words as this changes radically the sentiment analysis\r\n data_train['sentence_l'] = data_train[\"sentence_l\"].apply(lambda sentence: sentence.replace(\"can\\'t\", \"can not\"))\r\n data_train['sentence_l'] = data_train[\"sentence_l\"].apply(lambda sentence: sentence.replace(\"n\\'t\", \" not\"))\r\n self.stopwords = stopwords.words(\"english\")\r\n self.stopwords.remove('nor')\r\n self.stopwords.remove('no')\r\n self.stopwords.remove('not')\r\n \r\n # We clean the train data and stem the words\r\n self.stemmer = nltk.porter.PorterStemmer()\r\n clean_sentences = []\r\n for row in data_train['sentence_l']:\r\n tokens = word_tokenize(row)\r\n tokens = [word for word in tokens if word.isalpha()]\r\n tokens = [w for w in tokens if not w in self.stopwords] \r\n tokens = [self.stemmer.stem(word) for word in tokens]\r\n clean_sentences.append(tokens)\r\n data_train['stems'] = clean_sentences\r\n \r\n # We also stem the target words to be coherent with the stemmed words in the sentences\r\n data_train['word'] = [self.stemmer.stem(word) for word in data_train['word']]\r\n \r\n # We recreate the sentences with the selected and cleaned words\r\n Classifier.create_sentence = staticmethod(Classifier.create_sentence)\r\n data_train.clean_sentence = Classifier.create_sentence(data_train.stems)\r\n \r\n # We create a BOW vector\r\n self.restaurant_vect = CountVectorizer(min_df=1, tokenizer=nltk.word_tokenize)\r\n reviews_counts = self.restaurant_vect.fit_transform(data_train.clean_sentence)\r\n \r\n # We transform the BOW vector with the tfidf scores\r\n self.tfidf_transformer = TfidfTransformer()\r\n reviews_tfidf = self.tfidf_transformer.fit_transform(reviews_counts)\r\n \r\n polarities = []\r\n for row in data_train['polarity']:\r\n if row == 'positive':\r\n polarities.append(1)\r\n if row == 'neutral':\r\n polarities.append(0)\r\n if row == 'negative':\r\n polarities.append(-1)\r\n data_train['polarity_floats'] = polarities\r\n \r\n # Split data into training and test sets\r\n test_size = 10\r\n X_train, X_test, y_train, y_test = train_test_split(reviews_tfidf, data_train.polarity_floats,\r\n test_size = test_size/100, random_state = None)\r\n \r\n ############# CNN MODEL ##############\r\n \r\n from keras.layers import Input, Dense, Embedding, Conv2D, MaxPool2D\r\n from keras.layers import Reshape, Flatten, Dropout, Concatenate\r\n from keras.callbacks import ModelCheckpoint\r\n from keras.optimizers import Adam\r\n from keras.models import Model\r\n \r\n sequence_length = X_train.shape[1] # 7\r\n vocabulary_size = X_train.shape[0] # 1503\r\n embedding_dim = 256\r\n filter_sizes = [3,4,5]\r\n num_filters = 512\r\n drop = 0.5\r\n \r\n epochs = 10\r\n batch_size = 50\r\n \r\n # this returns a tensor\r\n print(\"Creating Model...\")\r\n inputs = Input(shape=(sequence_length,), dtype='int32')\r\n embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=sequence_length)(inputs)\r\n reshape = Reshape((sequence_length,embedding_dim,1))(embedding)\r\n \r\n conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n \r\n maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1), strides=(1,1), padding='valid')(conv_0)\r\n maxpool_1 = MaxPool2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1), strides=(1,1), padding='valid')(conv_1)\r\n maxpool_2 = MaxPool2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1), strides=(1,1), padding='valid')(conv_2)\r\n \r\n concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])\r\n flatten = Flatten()(concatenated_tensor)\r\n dropout = Dropout(drop)(flatten)\r\n output = Dense(units=1, activation='softmax')(dropout)\r\n \r\n # this creates a model that includes\r\n model = Model(inputs=inputs, outputs=output)\r\n \r\n checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='auto')\r\n adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\r\n \r\n model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])\r\n print(\"Training Model...\")\r\n model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[checkpoint], validation_data=(X_test, y_test)) # starts training\r", "def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def fit(self, train_file_path: str):\n # TODO write code to extract features from train_file_path and \n # train the model\n return self._model", "def train(self):\n # 1. Extracting details of attributes\n\n self.get_attribute_data()\n if self.train_data is None and self.train_data_file is None:\n raise ValueError(\"Neither training data not training file provided\")\n\n self.get_train_data()\n self.classifier = self.build_tree(rows=self.train_data, attribute_list=self.attribute_names)", "def train(self, train_set) -> None:\n super().train(train_set)\n # split into data and target\n xlist, y = zip(*train_set)\n x = sparse.vstack(xlist)\n self._classifier.fit(x, y)", "def train():\n pass", "def train(self, training_data):\n pass", "def train_classifier(self, class_id):\n raise NotImplementedError(\"Classifier training must be implemented first.\")", "def train(self, absList, modelFilename):\n pass", "def set_train(self):\n self.model.train()", "def train(self, absList, modelFilename):\n pass", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self, train_loader):\n pass", "def train(self, trainData):\n pass", "def train_classifier(train_faces, train_faces_ids):\n recognizer_lbph = cv2.face.LBPHFaceRecognizer_create()\n print('Training model in progress...')\n recognizer_lbph.train(train_faces, np.array(train_faces_ids))\n print('Saving...')\n recognizer_lbph.save('trainner.yml')\n print('Model training complete!')", "def train(self,training_file,rare_thresh=100,clf_params=None,model_path=None,chosen_feats=None,tune_mode=None,size=None,as_text=False,multitrain=True,chosen_clf=None):\n\n\t\tif tune_mode is not None and size is None and tune_mode != \"hyperopt\":\n\t\t\tsize = 5000\n\t\t\tsys.stderr.write(\"o No sample size set - setting size to 5000\\n\")\n\n\t\tif not as_text:\n\t\t\ttrain = io.open(training_file,encoding=\"utf8\").read().strip().replace(\"\\r\",\"\") + \"\\n\"\n\t\telse:\n\t\t\ttrain = training_file\n\n\t\tif size is not None:\n\t\t\ttrain = shuffle_cut_conllu(train,size)\n\t\t#tagged = udpipe_tag(train,self.udpipe_model)\n\t\ttagged = tt_tag(train,self.lang,preserve_sent=True)\n\n\t\tif model_path is None: # Try default model location\n\t\t\tmodel_path = script_dir + os.sep + \"models\" + os.sep + self.corpus + \"_ensemble_sent.pkl\"\n\n\t\tif clf_params is None:\n\t\t\t# Default classifier parameters\n\t\t\t#clf_params = {\"n_estimators\":125,\"min_samples_leaf\":1, \"max_depth\":15, \"max_features\":None, \"n_jobs\":4, \"random_state\":42, \"oob_score\":True, \"bootstrap\":True}\n\t\t\tclf_params = {\"n_estimators\":100,\"min_samples_leaf\":1, \"min_samples_split\":5, \"max_depth\":10, \"max_features\":None, \"n_jobs\":4, \"random_state\":42, \"oob_score\":True, \"bootstrap\":True}\n\n\t\tif chosen_clf is None:\n\t\t\tchosen_clf = RandomForestClassifier(n_jobs=4,oob_score=True, bootstrap=True)\n\t\t\tchosen_clf.set_params(**clf_params)\n\n\t\tcat_labels = [\"word\",\"first\",\"last\",\"genre\",\"pos\",\"cpos\"]\n\t\tnum_labels = [\"tok_len\",\"tok_id\"]\n\n\t\ttrain_feats, vocab, toks, firsts, lasts = read_conll(tagged,genre_pat=self.genre_pat,mode=\"sent\",as_text=True,char_bytes=self.lang==\"zho\")\n\t\tgold_feats, _, _, _, _ = read_conll(train,mode=\"sent\",as_text=True)\n\t\tgold_feats = [{\"wid\":0}] + gold_feats + [{\"wid\":0}] # Add dummies to gold\n\n\t\t# Ensure that \"_\" is in the possible values of first/last for OOV chars at test time\n\t\toov_item = train_feats[-1]\n\t\toov_item[\"first\"] = \"_\"\n\t\toov_item[\"last\"] = \"_\"\n\t\toov_item[\"lemma\"] = \"_\"\n\t\toov_item[\"word\"] = \"_\"\n\t\toov_item[\"pos\"] = \"_\"\n\t\toov_item[\"cpos\"] = \"_\"\n\t\toov_item[\"genre\"] = \"_\"\n\t\ttrain_feats.append(oov_item)\n\t\ttrain_feats = [oov_item] + train_feats\n\t\ttoks.append(\"_\")\n\t\ttoks = [\"_\"] + toks\n\n\t\tvocab = Counter(vocab)\n\t\ttop_n_words = vocab.most_common(rare_thresh)\n\t\ttop_n_words, _ = zip(*top_n_words)\n\n\t\theaders = sorted(list(train_feats[0].keys()))\n\t\tdata = []\n\n\t\tpreds = {}\n\n\t\tfor e in self.estimators:\n\t\t\tif multitrain and e.name in [\"LRSentencer\",\"DNNSentencer\"]:\n\t\t\t\tpred = e.predict_cached(tagged)\n\t\t\telse:\n\t\t\t\tpred = e.predict(tagged)\n\t\t\t_, preds[e.name + \"_prob\"] = [list(x) for x in zip(*pred)]\n\t\t\tpreds[e.name + \"_prob\"] = [0.0] + preds[e.name + \"_prob\"] + [0.0] # Add dummy wrap for items -1 and +1\n\t\t\theaders.append(e.name + \"_prob\")\n\t\t\tnum_labels.append(e.name + \"_prob\")\n\n\t\tfor i, item in enumerate(train_feats):\n\t\t\tif item[\"word\"] not in top_n_words:\n\t\t\t\titem[\"word\"] = item[\"pos\"]\n\t\t\tfor e in self.estimators:\n\t\t\t\titem[e.name + \"_prob\"] = preds[e.name + \"_prob\"][i]\n\n\t\t\tfeats = []\n\t\t\tfor k in headers:\n\t\t\t\tfeats.append(item[k])\n\n\t\t\tdata.append(feats)\n\n\t\tdata, headers, cat_labels, num_labels = self.n_gram(data, headers, cat_labels, num_labels)\n\t\t# No need for n_gram feats for the following:\n\t\tif \"NLTKSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"NLTKSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"NLTKSentencer_prob_pls1\")\n\t\tif \"UDPipeSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"UDPipeSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"UDPipeSentencer_prob_pls1\")\n\t\tif \"LRSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"LRSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"LRSentencer_prob_pls1\")\n\t\tif \"RuleBasedSplitter_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"RuleBasedSplitter_prob_min1\")\n\t\t\tnum_labels.remove(\"RuleBasedSplitter_prob_pls1\")\n\t\tif \"DNNSentencer_prob_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"DNNSentencer_prob_min1\")\n\t\t\tnum_labels.remove(\"DNNSentencer_prob_pls1\")\n\t\tif \"tok_id_min1\" in num_labels:\n\t\t\tnum_labels.remove(\"tok_id_min1\")\n\t\t\tnum_labels.remove(\"tok_id_pls1\")\n\t\tif \"genre_min1\" in cat_labels:\n\t\t\tcat_labels.remove(\"genre_min1\")\n\t\t\tcat_labels.remove(\"genre_pls1\")\n\n\t\t# Use specific feature subset\n\t\tif chosen_feats is not None:\n\t\t\tnew_cat = []\n\t\t\tnew_num = []\n\t\t\tfor feat in chosen_feats:\n\t\t\t\tif feat in cat_labels:\n\t\t\t\t\tnew_cat.append(feat)\n\t\t\t\telif feat in num_labels:\n\t\t\t\t\tnew_num.append(feat)\n\t\t\tcat_labels = new_cat\n\t\t\tnum_labels = new_num\n\n\t\tdata = pd.DataFrame(data, columns=headers)\n\t\tdata_encoded, multicol_dict = self.multicol_fit_transform(data, pd.Index(cat_labels))\n\n\t\tdata_x = data_encoded[cat_labels+num_labels].values\n\t\tdata_y = [int(t['wid'] == 1) for t in gold_feats]\n\n\t\tsys.stderr.write(\"o Learning...\\n\")\n\n\t\tif tune_mode is not None:\n\t\t\t# Randomize samples for training\n\t\t\tdata_x = data_encoded[cat_labels+num_labels+[\"label\"]].sample(frac=1,random_state=42)\n\t\t\tdata_y = np.where(data_x['label'] == \"_\", 0, 1)\n\t\t\tdata_x = data_x[cat_labels+num_labels]\n\n\t\t\t# Reserve 10% for validation\n\t\t\tval_x = data_x[int(len(data_y)/9):]\n\t\t\tval_y = data_y[int(len(data_y)/9):]\n\t\t\tdata_x = data_x[:int(len(data_y)/9)]\n\t\t\tdata_y = data_y[:int(len(data_y)/9)]\n\n\t\tif tune_mode == \"importances\":\n\t\t\tsys.stderr.write(\"o Measuring correlation of categorical variables\\n\")\n\t\t\ttheil_implications = report_theils_u(val_x,cat_labels)\n\t\t\tfor (var1, var2) in theil_implications:\n\t\t\t\tif var1 in cat_labels and var2 in cat_labels:\n\t\t\t\t\tdrop_var = var2\n\t\t\t\t\tu = theil_implications[(var1, var2)]\n\t\t\t\t\tsys.stderr.write(\"o Removed feature \" + drop_var + \" due to Theil's U \" + str(u)[:6] + \" of \" + var1 + \"->\" + var2 + \"\\n\")\n\t\t\t\t\tcat_labels.remove(drop_var)\n\n\t\t\tsys.stderr.write(\"o Measuring correlation of numerical variables\\n\")\n\t\t\tcor_mat = report_correlations(val_x[num_labels],thresh=0.95)\n\t\t\tfor (var1, var2) in cor_mat:\n\t\t\t\tif var1 in num_labels and var2 in num_labels:\n\t\t\t\t\tdrop_var = var2\n\t\t\t\t\tcorr_level = cor_mat[(var1, var2)]\n\t\t\t\t\tsys.stderr.write(\"o Removed feature \" + drop_var + \" due to correlation \" + str(corr_level) + \" of \" + var1 + \":\" + var2 + \"\\n\")\n\t\t\t\t\tnum_labels.remove(drop_var)\n\n\t\t\treturn cat_labels, num_labels\n\n\t\tif tune_mode in [\"paramwise\",\"full\"]:\n\t\t\tbest_params = {}\n\t\t\t# Tune individual params separately for speed, or do complete grid search if building final model\n\t\t\tparams_list = [{\"n_estimators\":[100,125,150]},\n\t\t\t\t\t\t {'max_depth': [10,15,20,None]},\n\t\t\t\t\t\t {\"min_samples_split\": [5, 10, 15]},\n\t\t\t\t\t\t {\"min_samples_leaf\":[1,2,3]},\n\t\t\t\t\t\t {\"max_features\":[None,\"sqrt\",\"log2\"]}]\n\t\t\tif tune_mode == \"full\":\n\t\t\t\t# Flatten dictionary if doing full CV\n\t\t\t\tparams_list = [{k: v for d in params_list for k, v in d.items()}]\n\t\t\tfor params in params_list:\n\t\t\t\tbase_params = copy.deepcopy(clf_params) # Copy default params\n\t\t\t\tfor p in params:\n\t\t\t\t\tif p in base_params: # Ensure base_params don't conflict with grid search params\n\t\t\t\t\t\tbase_params.pop(p)\n\t\t\t\tgrid = GridSearchCV(RandomForestClassifier(**base_params),params,cv=3,n_jobs=4,error_score=\"raise\",refit=False)\n\t\t\t\tgrid.fit(data_x,data_y)\n\t\t\t\tfor param in params:\n\t\t\t\t\tbest_params[param] = grid.best_params_[param]\n\t\t\twith io.open(\"best_params.tab\",'a',encoding=\"utf8\") as bp:\n\t\t\t\tcorpus = os.path.basename(training_file).split(\"_\")[0]\n\t\t\t\tbest_clf = RandomForestClassifier(**best_params)\n\t\t\t\tclf_name = best_clf.__class__.__name__\n\t\t\t\tfor k, v in best_params.items():\n\t\t\t\t\tbp.write(\"\\t\".join([corpus, clf_name, k, str(v)]))\n\t\t\t\tbp.write(\"\\n\")\n\t\t\treturn best_clf, best_params\n\t\telif tune_mode == \"hyperopt\":\n\t\t\tfrom hyperopt import hp\n\t\t\tfrom hyperopt.pyll.base import scope\n\t\t\tspace = {\n\t\t\t\t'n_estimators': scope.int(hp.quniform('n_estimators', 50, 150, 10)),\n\t\t\t\t'max_depth': scope.int(hp.quniform('max_depth', 5, 30, 1)),\n\t\t\t\t'min_samples_split': scope.int(hp.quniform('min_samples_split', 2, 10, 1)),\n\t\t\t\t'min_samples_leaf': scope.int(hp.quniform('min_samples_leaf', 1, 10, 1)),\n\t\t\t\t'max_features': hp.choice('max_features', [\"sqrt\", None, 0.5, 0.7, 0.9]),\n\t\t\t\t'clf': hp.choice('clf', [\"rf\",\"et\",\"gbm\"])\n\t\t\t}\n\t\t\t#space = {\n\t\t\t#\t'n_estimators': scope.int(hp.quniform('n_estimators', 50, 150, 10)),\n\t\t\t#\t'max_depth': scope.int(hp.quniform('max_depth', 3, 30, 1)),\n\t\t\t#\t'eta': scope.float(hp.quniform('eta', 0.01, 0.2, 0.01)),\n\t\t\t#\t'gamma': scope.float(hp.quniform('gamma', 0.01, 0.2, 0.01)),\n\t\t\t#\t'colsample_bytree': hp.choice('colsample_bytree', [0.4,0.5,0.6,0.7,1.0]),\n\t\t\t#\t'subsample': hp.choice('subsample', [0.5,0.6,0.7,0.8,1.0]),\n\t\t\t#\t'clf': hp.choice('clf', [\"xgb\"])\n\t\t\t#}\n\n\t\t\tbest_clf, best_params = hyper_optimize(data_x,data_y,cat_labels=cat_labels,space=space,max_evals=50)\n\t\t\treturn best_clf, best_params\n\t\telse:\n\t\t\tclf = chosen_clf\n\t\t\tclf.set_params(**clf_params)\n\t\t\tif clf.__class__.__name__ in [\"RandomForestClassifier\",\"ExtraTreesClassifier\",\"XGBClassifier\"]:\n\t\t\t\tclf.set_params(**{\"n_jobs\":3,\"random_state\":42,\"oob_score\":True,\"bootstrap\":True})\n\t\t\telse:\n\t\t\t\tclf.set_params(**{\"random_state\":42})\n\t\t\tclf.fit(data_x,data_y)\n\n\t\tfeature_names = cat_labels + num_labels\n\n\t\tzipped = zip(feature_names, clf.feature_importances_)\n\t\tsorted_zip = sorted(zipped, key=lambda x: x[1], reverse=True)\n\t\tsys.stderr.write(\"o Feature importances:\\n\\n\")\n\t\tfor name, importance in sorted_zip:\n\t\t\tsys.stderr.write(name + \"=\" + str(importance) + \"\\n\")\n\n\t\tif hasattr(clf, \"oob_score_\"):\n\t\t\tsys.stderr.write(\"\\no OOB score: \" + str(clf.oob_score_)+\"\\n\")\n\n\t\tsys.stderr.write(\"\\no Serializing model...\\n\")\n\n\t\tjoblib.dump((clf, num_labels, cat_labels, multicol_dict, top_n_words, firsts, lasts), model_path, compress=3)", "def train(self, absList, modelFilename):\n raise NotImplementedError(\"Need to implement train()\")", "def train(self):\n if self.input_col is None:\n raise Exception(\"Preprocessing not specified\")\n self.classifier_model.train(self.input_col, self.output_col)", "def main(training_file_name):\n attribute, inverse = build_classifier(training_file_name)\n trained_file = open(TRAINED_FILE_NAME, mode='w')\n prolog(trained_file)\n write_body(trained_file, attribute, inverse)\n epilog(trained_file)", "def model(self):\n filePath = self.config['data_path']['train_data']\n data = self.loadCSV(filePath)\n cleandata = self.preprocess(data)\n X, y = self.dataSplit(cleandata)\n X = self.CountVect(X, self.config['transform_path']['transform_model_path'])\n X_train, X_test, y_train, y_test = self.TrainTestSplit(X, y)\n self.MultinomialNB(X_train, X_test, y_train, y_test, self.config['nlp_path']['model_path'])" ]
[ "0.8426095", "0.74116445", "0.73872614", "0.72419417", "0.71222615", "0.7112658", "0.7041241", "0.70203024", "0.7007973", "0.69176805", "0.69125766", "0.6865418", "0.6846726", "0.68443483", "0.68333745", "0.68271935", "0.6823557", "0.6811905", "0.6811905", "0.6811905", "0.6811905", "0.6811905", "0.6790479", "0.67879564", "0.67731106", "0.6748485", "0.67464244", "0.6734396", "0.67244947", "0.6715167" ]
0.7566852
1
Convenience split function for inverted index attributes. Useful for attributes that contain filenames. Splits the given string s into components parts (directories, filename), discarding the extension and all but the last two directories. What's remaining is split into words and the result is returned.
def split_path(s): dirname, filename = os.path.split(s) fname_noext, ext = os.path.splitext(filename) levels = dirname.strip('/').split(os.path.sep)[2:][-2:] return PATH_SPLIT.split(' '.join(levels + [fname_noext]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_path(s):\n dirname, filename = os.path.split(s)\n fname_noext, ext = os.path.splitext(filename)\n for part in dirname.strip('/').split(os.path.sep)[2:][-2:] + [fname_noext]:\n for match in PATH_SPLIT.split(part):\n if match:\n yield match", "def tokenize(\n s: str, exclude: list = None, sep: str = \"_\", take_basename: bool = False\n) -> str:\n\n if exclude is None:\n exclude = []\n\n if take_basename:\n s = os.path.basename(s)\n\n # Split words in s\n words = re.findall(r\"[\\w]+\", \" \".join(s.split(sep)))\n words = [sp.normalize(word, lower=True) for word in words]\n words = [word for word in words if word not in exclude]\n\n return words", "def inner_split(s):\n\n return s.split(split_string)", "def filenameSplit (p):\n\tfrom os.path import split as splitPath, splitdrive, splitext\n\t\n\tsplt = splitPath (p)\n\tdisk,dir_ = splitdrive(splt[0])\n\ttry:\n\t\tif disk[1] != \":\":\n\t\t\traise IndexError\n\texcept IndexError:\n\t\tdisk,dir_ = \"\", splt[0]\n\tname,ext = splitext(splt[1])\n\treturn disk,dir_,name,ext", "def split_at(words, verb):\n if verb in words:\n i = words.index(verb)\n first_half = words[0:i]\n second_half = words[i+1:]\n return [first_half, second_half]\n else:\n return -1", "def split3 (filename):\n directory, basename = os.path.split (filename)\n basename, extension = os.path.splitext (basename)\n return directory, basename, extension", "def split_string_path(base, path):\n for i in range(len(path)):\n if isinstance(base, string_types):\n return path[:i], path[i:]\n base = base[path[i]]\n return path, ()", "def filename_split(path):\n\tdirectory = os.path.dirname(path)\n\tfilename, extension = os.path.splitext(os.path.basename(path))\n\treturn directory, filename, extension", "def split(self, s):\r\n l = [self._split(x) for x in _SPLIT_RE.split(s)]\r\n return [item for sublist in l for item in sublist]", "def explode(part):\n if isinstance(part, str):\n ans = []\n while len(part) > 0:\n parts = part.partition(\"/\")\n ans.append(parts[0])\n if parts[1] != \"\":\n ans.append(SLASH)\n part = parts[2]\n return ans\n\n return [part]", "def string_to_index(s):\n s = Unquote(s)\n if s == \".\":\n return ()\n return tuple(s.split(\"/\"))", "def word_split_by_char(s):\n old_words = []\n old_words.append(s)\n result = []\n while len(old_words) > 0:\n new_words = []\n for s in old_words:\n if '-' in s: # Case: ab-cd-ef\n new_words+=s.split('-')\n elif '.' in s: # Case: ab.cd.ef\n new_words+=s.split('.')\n elif '_' in s: # Case: ab_cd_ef\n new_words+=s.split('_')\n elif '/' in s: # Case: ab/cd/ef\n new_words+=s.split('/')\n elif '\\\\' in s: # Case: ab\\cd\\ef\n new_words+=s.split('\\\\')\n else:\n t = camel_case_split(s)\n if len(t) > 1:\n new_words += t\n result.append(s)\n old_words = new_words\n return result", "def mysplit(string):\n result = []\n last_split = 0\n for i in range(len(string)-3):\n if( string[i] == \"a\" and\n string[i+1] == \"n\" and\n string[i+2] == \"d\"):\n partial = string[last_split:i]\n last_split = i+3\n result.append(partial)\n rest = string[last_split:]\n result.append(rest)\n return result", "def pathsplit(path):\n stem, basename = os.path.split(path)\n if stem == '':\n return (basename,)\n if stem == path: # fixed point, likely '/'\n return (path,)\n return pathsplit(stem) + (basename,)", "def split_file_name(file, dataset_type='ycb'):\n dirname, filename = osp.split(file)\n filename_without_ext, ext = osp.splitext(filename)\n\n if dataset_type == 'ObjectNet3D':\n category_name = dirname.split(\"/\")[-2]\n idx = dirname.split(\"/\")[-1]\n else: # ycb\n category_name = dirname.split(\"/\")[-1]\n idx = None\n return dirname, filename, category_name, idx", "def split_half(str):\n split_pairs = str[:len(str)//2], str[len(str)//2:]\n return split_pairs", "def split(history: str) -> list:\n return [his[1:] if his[0:1] == '/' else his for his in history.split('-')]", "def splitpath(path):\n\n # FIXME perhaps call op.split repetitively would be better.\n #s = string.split( path, '/' ) # we work with fwd slash only inside.\n\n#We have decided to use all kind of separator\n s = []\n while True:\n first, second = op.split(path)\n s.append(second)\n if first == \"\":\n break\n else:\n path = first\n s.reverse()\n if len(s) == 1 and s[0] == \"\":\n s = []\n return s", "def split_preserve_tokens(s):\n return re.split(r'(\\W)', s)", "def split(s, posix=True):\n if isinstance(s, bytes):\n s = s.decode(\"utf-8\")\n return shlex.split(s, posix=posix)", "def file_splitter(filename):\n filename_pieces = filename.split(delimiter)\n\n # Remove the last file piece and split file extension\n new_values = filename_pieces[-1].split('.')\n filename_pieces.pop(-1)\n for value in new_values:\n filename_pieces.append(value)\n\n return filename_pieces", "def split(test_name):\n recipe, simple_test_name = test_name.split('.', 1)\n return recipe, simple_test_name", "def splitPath(self, path):\n return os.path.split(path)", "def split_word(word):\n return [(word[:i], word[i:]) for i in range(len(word) + 1)]", "def split_file(document: str):\n class_name, sep, assignment_name = document.partition(\"-\")\n try:\n assignment_name = assignment_name.split('.')[0].split('_')[0]\n except TypeError:\n pass\n return class_name, assignment_name", "def splitFn(fn):\n\n (dir, bn) = op.split(fn)\n\n fidx = bn.find(opts.separator)\n if fidx != -1:\n # found separator, add as an alt repn\n base = bn[ :fidx ]\n (repn, ext) = splitext(bn[ fidx + len(opts.separator): ])\n\n else:\n # didn't find separator, split using extension\n (base, ext) = splitext(bn)\n repn = ''\n return (dir, base, repn, ext)", "def _split(self, uri):\n if '/' in uri:\n return uri.split('/', 1)\n return [uri, None]", "def splitexts(path, exts=None):\n exts = []\n ext = os.path.splitext(path)\n while True:\n if len(ext[1]) < 1:\n break\n else:\n exts.append(ext[1])\n ext = os.path.splitext(ext[0])\n exts.reverse()\n return (path, exts)", "def _split_url(url):\n return url[1:].split('/')", "def split_name(fullname):" ]
[ "0.63968426", "0.6263762", "0.61884594", "0.58648413", "0.5765297", "0.5761506", "0.5726431", "0.56870097", "0.56813276", "0.56165", "0.55843884", "0.5512251", "0.5427378", "0.5423563", "0.54086035", "0.5384905", "0.5383309", "0.5346063", "0.53299356", "0.52875656", "0.52805877", "0.5278812", "0.52366304", "0.52231526", "0.5220838", "0.52179813", "0.5206913", "0.51948756", "0.5187476", "0.5182388" ]
0.7065876
0
Registers one or more object attributes and/or multicolumn indexes for the given type name. This function modifies the database as needed to accommodate new indexes and attributes, either by creating the object's tables (in the case of a new object type) or by altering the object's tables to add new columns or indexes. Previously registered attributes may be updated in limited ways (e.g. by adding an index to the attribute). If the attributes and indexes specified have not changed from previous invocations, no changes will be made to the database.
def register_object_type_attrs(self, type_name, indexes = [], **attrs): if len(indexes) == len(attrs) == 0: raise ValueError, "Must specify indexes or attributes for object type" table_name = "objects_%s" % type_name # First pass over the attributes kwargs, sanity-checking provided values. for attr_name, attr_defn in attrs.items(): # We allow attribute definition to be either a 2- to 4-tuple (last two # are optional), so pad the tuple with None if a 2- or 3-tuple was specified. attrs[attr_name] = attr_defn = tuple(attr_defn) + (None,) * (4-len(attr_defn)) if len(attr_defn) != 4: raise ValueError, "Definition for attribute '%s' is not a 2- to 4-tuple." % attr_name # Verify the attribute flags contain either ATTR_SEARCHABLE or ATTR_SIMPLE; # it can't contain both as that doesn't make sense. if attr_defn[1] & (ATTR_SIMPLE | ATTR_SEARCHABLE) not in (ATTR_SIMPLE, ATTR_SEARCHABLE): raise ValueError, "Flags for attribute '%s' must contain exactly one " \ "of ATTR_SIMPLE or ATTR_SEARCHABLE" % attr_name # Attribute name can't conflict with reserved names. if attr_name in RESERVED_ATTRIBUTES: raise ValueError, "Attribute name '%s' is reserved." % attr_name elif attr_name in self._inverted_indexes: if not attr_defn[1] & ATTR_INVERTED_INDEX or attr_defn[2] != attr_name: # Attributes can be named after inverted indexes, but only if # ATTR_INVERTED_INDEX is specified and the attribute name is the # same as its ivtidx name. raise ValueError, "Attribute '%s' conflicts with inverted index of same name, " \ "but ATTR_INVERTED_INDEX not specified in flags." % attr_name if attr_defn[1] & ATTR_INVERTED_INDEX: # Attributes with ATTR_INVERTED_INDEX can only be certain types. if attr_defn[0] not in (str, unicode, tuple, list, set): raise TypeError, "Type for attribute '%s' must be string, unicode, list, tuple, or set " \ "because it is ATTR_INVERTED_INDEX" % attr_name # Make sure inverted index name is valid. if attr_defn[2] is None: raise ValueError, "Attribute '%s' flags specify inverted index, " \ "but no inverted index name supplied." % attr_name elif attr_defn[2] not in self._inverted_indexes: raise ValueError, "Attribute '%s' specifies undefined interverted index '%s'" % \ (attr_name, attr_defn[2]) # Compile split regexp if it was given. if attr_defn[3] is not None and not callable(attr_defn[3]): attrs[attr_name] = attr_defn[:3] + (re.compile(attr_defn[3]),) if type_name in self._object_types: # This type already exists. Compare given attributes with # existing attributes for this type to see what needs to be done # (if anything). cur_type_id, cur_type_attrs, cur_type_idx = self._object_types[type_name] new_attrs = {} table_needs_rebuild = False changed = False for attr_name, attr_defn in attrs.items(): attr_type, attr_flags, attr_ivtidx, attr_split = attr_defn # TODO: converting an attribute from SIMPLE to SEARCHABLE or vice # versa isn't supported yet. Raise exception here to prevent # potential data loss. if attr_name in cur_type_attrs and attr_flags & (ATTR_SEARCHABLE | ATTR_SIMPLE) != \ cur_type_attrs[attr_name][1] & (ATTR_SEARCHABLE | ATTR_SIMPLE): raise ValueError, "Unsupported attempt to convert attribute '%s' " \ "between ATTR_SIMPLE and ATTR_SEARCHABLE" % attr_name if attr_name not in cur_type_attrs or cur_type_attrs[attr_name] != attr_defn: # There is a new attribute specified for this type, or an # existing one has changed. new_attrs[attr_name] = attr_defn changed = True if attr_flags & ATTR_SEARCHABLE: # New attribute isn't simple, needs to alter table. table_needs_rebuild = True elif attr_flags & ATTR_INVERTED_INDEX: # TODO: there is no need to rebuild the table when adding/modifying # an ATTR_SIMPLE | ATTR_INVERTED_INDEX attribute, we just need to # recreate the delete trigger (and remove any rows from the # inverted index's map for this object type if we're removing # an association with that ivtidx). For now we will force a # rebuild since I'm too lazy to implement the proper way. table_needs_rebuild = True if attr_name in cur_type_attrs and not cur_type_attrs[attr_name][1] & ATTR_INVERTED_INDEX: # FIXME: if we add an inverted index to an existing attribute, we'd # need to reparse that attribute in all rows to populate the inverted # map. Right now just log a warning. log.warning("Adding inverted index '%s' to existing attribute '%s' not fully " \ "implemented; index may be out of sync.", attr_ivtidx, attr_name) if not changed: return # Update the attr list to merge both existing and new attributes. attrs = cur_type_attrs.copy() attrs.update(new_attrs) new_indexes = set(indexes).difference(cur_type_idx) indexes = set(indexes).union(cur_type_idx) self._register_check_indexes(indexes, attrs) if not table_needs_rebuild: # Only simple (i.e. pickled only) attributes are being added, # or only new indexes are added, so we don't need to rebuild the # table. if len(new_attrs): self._db_query("UPDATE types SET attrs_pickle=? WHERE id=?", (buffer(cPickle.dumps(attrs, 2)), cur_type_id)) if len(new_indexes): self._register_create_multi_indexes(new_indexes, table_name) self._db_query("UPDATE types SET idx_pickle=? WHERE id=?", (buffer(cPickle.dumps(indexes, 2)), cur_type_id)) self.commit() self._load_object_types() return # We need to update the database now ... else: # New type definition. Populate attrs with required internal # attributes so they get created with the table. new_attrs = cur_type_id = None # Merge standard attributes with user attributes for this new type. attrs.update({ 'id': (int, ATTR_SEARCHABLE, None, None), 'parent_type': (int, ATTR_SEARCHABLE, None, None), 'parent_id': (int, ATTR_SEARCHABLE, None, None), 'pickle': (buffer, ATTR_SEARCHABLE, None, None) }) self._register_check_indexes(indexes, attrs) create_stmt = 'CREATE TABLE %s_tmp (' % table_name # Iterate through type attributes and append to SQL create statement. sql_types = {int: 'INTEGER', float: 'FLOAT', buffer: 'BLOB', unicode: 'TEXT', str: 'BLOB', bool: 'INTEGER'} for attr_name, (attr_type, attr_flags, attr_ivtidx, attr_split) in attrs.items(): if attr_flags & ATTR_SEARCHABLE: # Attribute needs to be a column in the table, not a pickled value. if attr_type not in sql_types: raise ValueError, "Type '%s' not supported" % str(attr_type) create_stmt += '%s %s' % (attr_name, sql_types[attr_type]) if attr_name == 'id': # Special case, these are auto-incrementing primary keys create_stmt += ' PRIMARY KEY AUTOINCREMENT' create_stmt += ',' create_stmt = create_stmt.rstrip(',') + ')' self._db_query(create_stmt) # Add this type to the types table, including the attributes # dictionary. self._db_query('INSERT OR REPLACE INTO types VALUES(?, ?, ?, ?)', (cur_type_id, type_name, buffer(cPickle.dumps(attrs, 2)), buffer(cPickle.dumps(indexes, 2)))) # Sync self._object_types with the object type definition we just # stored to the db. self._load_object_types() if new_attrs: # Migrate rows from old table to new temporary one. Here we copy only # ATTR_SEARCHABLE columns that exist in both old and new definitions. columns = filter(lambda x: cur_type_attrs[x][1] & ATTR_SEARCHABLE and \ x in attrs and attrs[x][1] & ATTR_SEARCHABLE, cur_type_attrs.keys()) columns = ','.join(columns) self._db_query('INSERT INTO %s_tmp (%s) SELECT %s FROM %s' % \ (table_name, columns, columns, table_name)) # Delete old table. self._db_query('DROP TABLE %s' % table_name) # Rename temporary table. self._db_query('ALTER TABLE %s_tmp RENAME TO %s' % (table_name, table_name)) # Create a trigger that reduces the objectcount for each applicable # inverted index when a row is deleted. inverted_indexes = self._get_type_inverted_indexes(type_name) if inverted_indexes: sql = 'CREATE TRIGGER delete_object_%s DELETE ON %s BEGIN ' % (type_name, table_name) for idx_name in inverted_indexes: sql += "UPDATE inverted_indexes SET value=value-1 WHERE name='%s' AND attr='objectcount';" % idx_name sql += 'END' self._db_query(sql) # Create index for locating all objects under a given parent. self._db_query("CREATE INDEX %s_parent_idx on %s (parent_id, "\ "parent_type)" % (table_name, table_name)) # If any of these attributes need to be indexed, create the index # for that column. for attr_name, (attr_type, attr_flags, attr_ivtidx, attr_split) in attrs.items(): if attr_flags & ATTR_INDEXED: self._db_query("CREATE INDEX %s_%s_idx ON %s (%s)" % \ (table_name, attr_name, table_name, attr_name)) # Create multi-column indexes; indexes value has already been verified. self._register_create_multi_indexes(indexes, table_name) self.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_object_type_attrs(self, type_name, indexes = [], **attrs):\n if len(indexes) == len(attrs) == 0:\n raise ValueError(\"Must specify indexes or attributes for object type\")\n\n table_name = \"objects_%s\" % type_name\n\n # First pass over the attributes kwargs, sanity-checking provided values.\n for attr_name, attr_defn in attrs.items():\n # We allow attribute definition to be either a 2- to 4-tuple (last two\n # are optional), so pad the tuple with None if a 2- or 3-tuple was specified.\n attrs[attr_name] = attr_defn = tuple(attr_defn) + (None,) * (4-len(attr_defn))\n if len(attr_defn) != 4:\n raise ValueError(\"Definition for attribute '%s' is not a 2- to 4-tuple.\" % attr_name)\n\n # Verify the attribute flags contain either ATTR_SEARCHABLE or ATTR_SIMPLE;\n # it can't contain both as that doesn't make sense.\n if attr_defn[1] & (ATTR_SIMPLE | ATTR_SEARCHABLE) not in (ATTR_SIMPLE, ATTR_SEARCHABLE):\n raise ValueError(\"Flags for attribute '%s' must contain exactly one \" \\\n \"of ATTR_SIMPLE or ATTR_SEARCHABLE\" % attr_name)\n\n # Attribute name can't conflict with reserved names.\n if attr_name in RESERVED_ATTRIBUTES:\n raise ValueError(\"Attribute name '%s' is reserved.\" % attr_name)\n elif attr_name in self._inverted_indexes:\n if not attr_defn[1] & ATTR_INVERTED_INDEX or attr_defn[2] != attr_name:\n # Attributes can be named after inverted indexes, but only if\n # ATTR_INVERTED_INDEX is specified and the attribute name is the\n # same as its ivtidx name.\n raise ValueError(\"Attribute '%s' conflicts with inverted index of same name, \" \\\n \"but ATTR_INVERTED_INDEX not specified in flags.\" % attr_name)\n\n if attr_defn[1] & ATTR_INVERTED_INDEX:\n # Attributes with ATTR_INVERTED_INDEX can only be certain types.\n if attr_defn[0] not in (str, bytes, tuple, list, set):\n raise TypeError(\"Type for attribute '%s' must be string, bytes, list, tuple, or set \" \\\n \"because it is ATTR_INVERTED_INDEX\" % attr_name)\n\n # Make sure inverted index name is valid.\n if attr_defn[2] is None:\n raise ValueError(\"Attribute '%s' flags specify inverted index, \" \\\n \"but no inverted index name supplied.\" % attr_name)\n elif attr_defn[2] not in self._inverted_indexes:\n raise ValueError(\"Attribute '%s' specifies undefined interverted index '%s'\" % \\\n (attr_name, attr_defn[2]))\n\n # Compile split regexp if it was given.\n if attr_defn[3] is not None and not callable(attr_defn[3]):\n attrs[attr_name] = attr_defn[:3] + (re.compile(attr_defn[3]),)\n\n\n if type_name in self._object_types:\n # This type already exists. Compare given attributes with\n # existing attributes for this type to see what needs to be done\n # (if anything).\n cur_type_id, cur_type_attrs, cur_type_idx = self._object_types[type_name]\n new_attrs = {}\n table_needs_rebuild = False\n changed = False\n for attr_name, attr_defn in attrs.items():\n attr_type, attr_flags, attr_ivtidx, attr_split = attr_defn\n # TODO: converting an attribute from SIMPLE to SEARCHABLE or vice\n # versa isn't supported yet. Raise exception here to prevent\n # potential data loss.\n if attr_name in cur_type_attrs and attr_flags & (ATTR_SEARCHABLE | ATTR_SIMPLE) != \\\n cur_type_attrs[attr_name][1] & (ATTR_SEARCHABLE | ATTR_SIMPLE):\n raise ValueError(\"Unsupported attempt to convert attribute '%s' \" \\\n \"between ATTR_SIMPLE and ATTR_SEARCHABLE\" % attr_name)\n\n if attr_name not in cur_type_attrs or cur_type_attrs[attr_name] != attr_defn:\n # There is a new attribute specified for this type, or an\n # existing one has changed.\n new_attrs[attr_name] = attr_defn\n changed = True\n if attr_flags & ATTR_SEARCHABLE:\n # New attribute isn't simple, needs to alter table.\n table_needs_rebuild = True\n elif attr_flags & ATTR_INVERTED_INDEX:\n # TODO: there is no need to rebuild the table when adding/modifying\n # an ATTR_SIMPLE | ATTR_INVERTED_INDEX attribute, we just need to\n # recreate the delete trigger (and remove any rows from the\n # inverted index's map for this object type if we're removing\n # an association with that ivtidx). For now we will force a\n # rebuild since I'm too lazy to implement the proper way.\n table_needs_rebuild = True\n\n if attr_name in cur_type_attrs and not cur_type_attrs[attr_name][1] & ATTR_INVERTED_INDEX:\n # FIXME: if we add an inverted index to an existing attribute, we'd\n # need to reparse that attribute in all rows to populate the inverted\n # map. Right now just log a warning.\n log.warning(\"Adding inverted index '%s' to existing attribute '%s' not fully \" \\\n \"implemented; index may be out of sync.\", attr_ivtidx, attr_name)\n\n if not changed:\n return\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n\n # Update the attr list to merge both existing and new attributes.\n attrs = cur_type_attrs.copy()\n attrs.update(new_attrs)\n new_indexes = set(indexes).difference(cur_type_idx)\n indexes = set(indexes).union(cur_type_idx)\n self._register_check_indexes(indexes, attrs)\n\n if not table_needs_rebuild:\n # Only simple (i.e. pickled only) attributes are being added,\n # or only new indexes are added, so we don't need to rebuild the\n # table.\n if len(new_attrs):\n self._db_query(\"UPDATE types SET attrs_pickle=? WHERE id=?\", (self._pickle(attrs), cur_type_id))\n\n if len(new_indexes):\n self._register_create_multi_indexes(new_indexes, table_name)\n self._db_query(\"UPDATE types SET idx_pickle=? WHERE id=?\", (self._pickle(indexes), cur_type_id))\n\n self.commit()\n self._load_object_types()\n return\n\n # We need to update the database now ...\n\n else:\n # New type definition. Populate attrs with required internal\n # attributes so they get created with the table.\n\n new_attrs = cur_type_id = None\n # Merge standard attributes with user attributes for this new type.\n attrs.update({\n 'id': (int, ATTR_SEARCHABLE, None, None),\n 'parent_type': (int, ATTR_SEARCHABLE, None, None),\n 'parent_id': (int, ATTR_SEARCHABLE, None, None),\n 'pickle': (bytes, ATTR_SEARCHABLE, None, None)\n })\n self._register_check_indexes(indexes, attrs)\n\n create_stmt = 'CREATE TABLE %s_tmp (' % table_name\n\n # Iterate through type attributes and append to SQL create statement.\n sql_types = {int: 'INTEGER', float: 'FLOAT', bytes: 'BLOB',\n str: 'TEXT', bool: 'INTEGER', str: 'TEXT'}\n for attr_name, (attr_type, attr_flags, attr_ivtidx, attr_split) in attrs.items():\n if attr_flags & ATTR_SEARCHABLE:\n # Attribute needs to be a column in the table, not a pickled value.\n if attr_type not in sql_types:\n raise ValueError(\"Type '%s' not supported\" % str(attr_type))\n create_stmt += '%s %s' % (attr_name, sql_types[attr_type])\n if attr_name == 'id':\n # Special case, these are auto-incrementing primary keys\n create_stmt += ' PRIMARY KEY AUTOINCREMENT'\n create_stmt += ','\n\n create_stmt = create_stmt.rstrip(',') + ')'\n self._db_query(create_stmt)\n\n\n # Add this type to the types table, including the attributes\n # dictionary.\n self._db_query('INSERT OR REPLACE INTO types VALUES(?, ?, ?, ?)',\n (cur_type_id, type_name, self._pickle(attrs), self._pickle(indexes)))\n\n # Sync self._object_types with the object type definition we just\n # stored to the db.\n self._load_object_types()\n\n if new_attrs:\n # Migrate rows from old table to new temporary one. Here we copy only\n # ATTR_SEARCHABLE columns that exist in both old and new definitions.\n columns = filter(lambda x: cur_type_attrs[x][1] & ATTR_SEARCHABLE and \\\n x in attrs and attrs[x][1] & ATTR_SEARCHABLE, cur_type_attrs.keys())\n columns = ','.join(columns)\n self._db_query('INSERT INTO %s_tmp (%s) SELECT %s FROM %s' % \\\n (table_name, columns, columns, table_name))\n\n # Delete old table.\n self._db_query('DROP TABLE %s' % table_name)\n\n # Rename temporary table.\n self._db_query('ALTER TABLE %s_tmp RENAME TO %s' % (table_name, table_name))\n\n # Increase the objectcount for new inverted indexes, and create a\n # trigger that reduces the objectcount for each applicable inverted\n # index when a row is deleted.\n inverted_indexes = self._get_type_inverted_indexes(type_name)\n if inverted_indexes:\n n_rows = self._db_query_row('SELECT COUNT(*) FROM %s' % table_name)[0]\n sql = 'CREATE TRIGGER delete_object_%s DELETE ON %s BEGIN ' % (type_name, table_name)\n for idx_name in inverted_indexes:\n sql += \"UPDATE inverted_indexes SET value=MAX(0, value-1) WHERE name='%s' AND attr='objectcount';\" % idx_name\n # Add to objectcount (both in db and cached value)\n self._db_query(\"UPDATE inverted_indexes SET value=value+? WHERE name=? and attr='objectcount'\",\n (n_rows, idx_name))\n self._inverted_indexes[idx_name]['objectcount'] += n_rows\n sql += 'END'\n self._db_query(sql)\n\n # Create index for locating all objects under a given parent.\n self._db_query(\"CREATE INDEX %s_parent_idx on %s (parent_id, \"\\\n \"parent_type)\" % (table_name, table_name))\n\n # If any of these attributes need to be indexed, create the index\n # for that column.\n for attr_name, (attr_type, attr_flags, attr_ivtidx, attr_split) in attrs.items():\n if attr_flags & ATTR_INDEXED:\n self._db_query(\"CREATE INDEX %s_%s_idx ON %s (%s)\" % \\\n (table_name, attr_name, table_name, attr_name))\n\n # Create multi-column indexes; indexes value has already been verified.\n self._register_create_multi_indexes(indexes, table_name)\n self.commit()", "def add(self, object_type, parent=None, **attrs):\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n\n type_attrs = self._get_type_attrs(object_type)\n if parent:\n attrs['parent_type'], attrs['parent_id'] = self._to_obj_tuple(parent, numeric=True)\n\n # Increment objectcount for the applicable inverted indexes.\n inverted_indexes = self._get_type_inverted_indexes(object_type)\n if inverted_indexes:\n self._db_query(\"UPDATE inverted_indexes SET value=value+1 WHERE attr='objectcount' AND name IN %s\" % \\\n _list_to_printable(inverted_indexes))\n\n\n # Process inverted index maps for this row\n ivtidx_terms = []\n for ivtidx in inverted_indexes:\n # Sync cached objectcount with the DB (that we just updated above)\n self._inverted_indexes[ivtidx]['objectcount'] += 1\n terms_list = []\n split = self._inverted_indexes[ivtidx]['split']\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given in kwagrs,\n # but that ivtidx is not a registered attribute (which would be\n # handled in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n if terms:\n ivtidx_terms.append((ivtidx, terms))\n # If there are no terms for this ivtidx, we don't bother storing\n # an empty list in the pickle.\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n attrs[ivtidx] = list(terms.keys())\n\n query, values = self._make_query_from_attrs(\"add\", attrs, object_type)\n self._db_query(query, values)\n\n # Add id given by db, as well as object type.\n attrs['id'] = self._cursor.lastrowid\n attrs['type'] = str(object_type)\n attrs['parent'] = self._to_obj_tuple(parent) if parent else (None, None)\n\n for ivtidx, terms in ivtidx_terms:\n self._add_object_inverted_index_terms((object_type, attrs['id']), ivtidx, terms)\n\n # Populate dictionary with keys for this object type not specified in kwargs.\n attrs.update(dict.fromkeys([k for k in type_attrs if k not in list(attrs.keys()) + ['pickle']]))\n\n self._set_dirty()\n return ObjectRow(None, None, attrs)", "def add(self, object_type, parent = None, **attrs):\n type_attrs = self._get_type_attrs(object_type)\n if parent:\n attrs[\"parent_type\"] = self._get_type_id(parent[0])\n attrs[\"parent_id\"] = parent[1]\n\n # Increment objectcount for the applicable inverted indexes.\n inverted_indexes = self._get_type_inverted_indexes(object_type)\n if inverted_indexes:\n self._db_query(\"UPDATE inverted_indexes SET value=value+1 WHERE attr='objectcount' AND name IN %s\" % \\\n _list_to_printable(inverted_indexes))\n\n\n # Process inverted index maps for this row\n ivtidx_terms = []\n for ivtidx in inverted_indexes:\n # Sync cached objectcount with the DB (that we just updated above)\n self._inverted_indexes[ivtidx]['objectcount'] += 1\n terms_list = []\n split = self._inverted_indexes[ivtidx]['split']\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given in kwagrs,\n # but that ivtidx is not a registered attribute (which would be\n # handled in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n ivtidx_terms.append((ivtidx, terms))\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n attrs[ivtidx] = terms.keys()\n\n query, values = self._make_query_from_attrs(\"add\", attrs, object_type)\n self._db_query(query, values)\n\n # Add id given by db, as well as object type.\n attrs[\"id\"] = self._cursor.lastrowid\n attrs[\"type\"] = unicode(object_type)\n if parent:\n attrs['parent'] = (attrs['parent_type'], attrs['parent_id'])\n else:\n attrs['parent'] = (None, None)\n\n for ivtidx, terms in ivtidx_terms:\n self._add_object_inverted_index_terms((object_type, attrs['id']), ivtidx, terms)\n\n # Populate dictionary with keys for this object type not specified in kwargs.\n attrs.update(dict.fromkeys([k for k in type_attrs if k not in attrs.keys() + ['pickle']]))\n\n return ObjectRow(None, None, attrs)", "def update(cls, type_obj, name):\n cls.validate_name(name)\n type_obj.name = name\n DB.session.add(type_obj)\n DB.session.commit()\n return type_obj", "def retype(self, name, **attributes):\r\n self._retype_dictionary[name] = attributes", "def contribute_to_class(self, cls):\n if self.db_index:\n new_index = (self.name,)\n if new_index not in cls._meta.indexes:\n cls._meta.indexes = tuple(list(cls._meta.indexes) + [new_index])", "def update(self, obj, parent=None, **attrs):\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n object_type, object_id = self._to_obj_tuple(obj)\n\n type_attrs = self._get_type_attrs(object_type)\n get_pickle = False\n\n # Determine which inverted indexes need to be regenerated for this\n # object. Builds a dictionary of ivtidxes with a dirty flag and\n # a list of sql columns needed for reindexing.\n ivtidx_columns = {}\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if flags & ATTR_INVERTED_INDEX:\n if attr_ivtidx not in ivtidx_columns:\n ivtidx_columns[attr_ivtidx] = [ False, [] ]\n if flags & ATTR_SEARCHABLE:\n ivtidx_columns[attr_ivtidx][1].append(name)\n if flags & (ATTR_SIMPLE | ATTR_IGNORE_CASE):\n get_pickle = True\n if name in attrs:\n ivtidx_columns[attr_ivtidx][0] = True\n\n # If the updated attribute is stored in the pickle (either a simple attr\n # or an case-insensitive indexed attr in which __foo is in the pickle)\n # then we must first retrieve the pickle for this object from the db.\n if (flags & ATTR_SIMPLE or flags & ATTR_INDEXED_IGNORE_CASE == ATTR_INDEXED_IGNORE_CASE) and \\\n name in attrs:\n get_pickle = True\n\n # TODO: if ObjectRow is supplied, don't need to fetch columns\n # that are available in the ObjectRow. (Of course this assumes\n # the object wasn't changed via elsewhere during the life of the\n # ObjectRow object, so maybe we don't want to do that.)\n reqd_columns = ['pickle'] if get_pickle else []\n for dirty, searchable_attrs in ivtidx_columns.values():\n if dirty:\n reqd_columns.extend(searchable_attrs)\n\n if reqd_columns:\n q = 'SELECT %s FROM objects_%s WHERE id=?' % (','.join(reqd_columns), object_type)\n row = self._db_query_row(q, (object_id,))\n if not row:\n raise ValueError(\"Can't update unknown object (%s, %d)\" % (object_type, object_id))\n if reqd_columns[0] == 'pickle' and row[0]:\n # One of the attrs we're updating is in the pickle, so we\n # have fetched it; now convert it to a dict.\n row_attrs = self._unpickle(row[0])\n for key, value in row_attrs.items():\n # Rename all __foo to foo for ATTR_IGNORE_CASE columns\n if key.startswith('__') and type_attrs[key[2:]][1] & ATTR_IGNORE_CASE:\n row_attrs[key[2:]] = value\n del row_attrs[key]\n # Update stored pickle data with new ATTR_SIMPLE attribute values\n row_attrs.update(attrs)\n attrs = row_attrs\n\n\n if parent:\n attrs['parent_type'], attrs['parent_id'] = self._to_obj_tuple(parent, numeric=True)\n attrs['id'] = object_id\n # Make copy of attrs for later query, since we're now about to mess with it.\n orig_attrs = attrs.copy()\n\n # Merge the ivtidx columns we grabbed above into attrs dict.\n for n, name in enumerate(reqd_columns):\n if name not in attrs and name != 'pickle':\n attrs[name] = row[n]\n\n for ivtidx, (dirty, searchable_attrs) in ivtidx_columns.items():\n if not dirty:\n # No attribute for this ivtidx changed.\n continue\n split = self._inverted_indexes[ivtidx]['split']\n # Remove existing indexed words for this object.\n self._delete_object_inverted_index_terms((object_type, object_id), ivtidx)\n\n # TODO: code duplication from add()\n # Need to reindex all columns in this object using this ivtidx.\n terms_list = []\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n if attr_type == BYTES_TYPE and isinstance(attrs[name], RAW_TYPE):\n # We store string objects in the db as buffers, in\n # order to prevent any unicode issues. So we need\n # to convert the buffer we got from the db back to\n # a string before parsing the attribute into terms.\n attrs[name] = BYTES_TYPE(attrs[name])\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given, but\n # that ivtidx is not a named attribute (which would be handled\n # in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n self._add_object_inverted_index_terms((object_type, object_id), ivtidx, terms)\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n if not terms and ivtidx in orig_attrs:\n # Update removed all terms for this ivtidx, remove from pickle.\n orig_attrs[ivtidx] = None\n elif terms:\n # There are terms for this ivtidx, store in pickle.\n orig_attrs[ivtidx] = list(terms.keys())\n\n query, values = self._make_query_from_attrs(\"update\", orig_attrs, object_type)\n self._db_query(query, values)\n self._set_dirty()\n # TODO: if an objectrow was given, return an updated objectrow", "def add_attribute(self, attr_type, name, components):\n self.attributes[attr_type] = {\"name\": name, \"components\": components}", "def update(self, obj, parent=None, **attrs):\n if isinstance(obj, ObjectRow):\n object_type, object_id = obj['type'], obj['id']\n else:\n object_type, object_id = obj\n\n type_attrs = self._get_type_attrs(object_type)\n get_pickle = False\n\n # Determine which inverted indexes need to be regenerated for this\n # object. Builds a dictionary of ivtidxes with a dirty flag and\n # a list of sql columns needed for reindexing.\n ivtidx_columns = {}\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if flags & ATTR_INVERTED_INDEX:\n if attr_ivtidx not in ivtidx_columns:\n ivtidx_columns[attr_ivtidx] = [ False, [] ]\n if flags & ATTR_SEARCHABLE:\n ivtidx_columns[attr_ivtidx][1].append(name)\n if flags & (ATTR_SIMPLE | ATTR_IGNORE_CASE):\n get_pickle = True\n if name in attrs:\n ivtidx_columns[attr_ivtidx][0] = True\n\n if flags & ATTR_SIMPLE and name in attrs:\n # Simple attribute needs pickle\n get_pickle = True\n\n # TODO: if ObjectRow is supplied, don't need to fetch columns\n # that are available in the ObjectRow. (Of course this assumes\n # the object wasn't changed via elsewhere during the life of the\n # ObjectRow object, so maybe we don't want to do that.)\n reqd_columns = ['pickle'] if get_pickle else []\n for dirty, searchable_attrs in ivtidx_columns.values():\n if dirty:\n reqd_columns.extend(searchable_attrs)\n\n if reqd_columns:\n q = 'SELECT %s FROM objects_%s WHERE id=?' % (','.join(reqd_columns), object_type)\n row = self._db_query_row(q, (object_id,))\n if not row:\n raise ValueError, \"Can't update unknown object (%s, %d)\" % (object_type, object_id)\n if reqd_columns[0] == 'pickle' and row[0]:\n # One of the attrs we're updating is in the pickle, so we\n # have fetched it; now convert it to a dict.\n row_attrs = cPickle.loads(str(row[0]))\n for key, value in row_attrs.items():\n # Rename all __foo to foo for ATTR_IGNORE_CASE columns\n if key.startswith('__') and type_attrs[key[2:]][1] & ATTR_IGNORE_CASE:\n row_attrs[key[2:]] = value\n del row_attrs[key]\n # Update stored pickle data with new ATTR_SIMPLE attribute values\n row_attrs.update(attrs)\n attrs = row_attrs\n\n if isinstance(parent, ObjectRow):\n attrs['parent_type'], attrs['parent_id'] = parent['type'], parent['id']\n elif parent:\n attrs['parent_type'], attrs['parent_id'] = self._get_type_id(parent[0]), parent[1]\n\n attrs['id'] = object_id\n # Make copy of attrs for later query, since we're now about to mess with it.\n orig_attrs = attrs.copy()\n\n # Merge the ivtidx columns we grabbed above into attrs dict.\n for n, name in enumerate(reqd_columns):\n if name not in attrs and name != 'pickle':\n attrs[name] = row[n]\n\n for ivtidx, (dirty, searchable_attrs) in ivtidx_columns.items():\n if not dirty:\n # No attribute for this ivtidx changed.\n continue\n split = self._inverted_indexes[ivtidx]['split']\n # Remove existing indexed words for this object.\n self._delete_object_inverted_index_terms((object_type, object_id), ivtidx)\n\n # FIXME: code duplication from add()\n # Need to reindex all columns in this object using this ivtidx.\n terms_list = []\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n if attr_type == str and type(attrs[name]) == buffer:\n # We store string objects in the db as buffers, in\n # order to prevent any unicode issues. So we need\n # to convert the buffer we got from the db back to\n # a string before parsing the attribute into terms.\n attrs[name] = str(attrs[name])\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given, but\n # that ivtidx is not a named attribute (which would be handled\n # in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n self._add_object_inverted_index_terms((object_type, object_id), ivtidx, terms)\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n orig_attrs[ivtidx] = terms.keys()\n\n query, values = self._make_query_from_attrs(\"update\", orig_attrs, object_type)\n self._db_query(query, values)", "def _updateOwner(self, index=None, register=True):\n if not index is None:\n if not index in self._items:\n raise ValueError, \\\n \"Attribute %s is not known to %s\" % (index, self)\n indexes = [ index ]\n else:\n indexes = self.names\n\n ownerdict = self.owner.__dict__\n selfdict = self.__dict__\n owner_known = ownerdict['_known_attribs']\n for index_ in indexes:\n if register:\n if index_ in ownerdict:\n raise RuntimeError, \\\n \"Cannot register attribute %s within %s \" % \\\n (index_, self.owner) + \"since it has one already\"\n ownerdict[index_] = self._items[index_]\n if index_ in selfdict:\n raise RuntimeError, \\\n \"Cannot register attribute %s within %s \" % \\\n (index_, self) + \"since it has one already\"\n selfdict[index_] = self._items[index_]\n owner_known[index_] = self.__name\n else:\n if index_ in ownerdict:\n # yoh doesn't think that we need to complain if False\n ownerdict.pop(index_)\n owner_known.pop(index_)\n if index_ in selfdict:\n selfdict.pop(index_)", "def define_attribute(self, name, atype, data=None):\n self.attributes.append(name)\n self.attribute_types[name] = atype\n self.attribute_data[name] = data", "def extend_type(self, type_name, instances):\n if type_name not in self._symtab:\n self._symtab[type_name] = symbol.SymbolTable()\n for entity_name in instances:\n self._symtab[type_name].insert(entity_name)", "def register_inverted_index(self, name, min = None, max = None, split = None, ignore = None):\n # Verify specified name doesn't already exist as some object attribute.\n for object_name, object_type in self._object_types.items():\n if name in object_type[1] and name != object_type[1][name][2]:\n raise ValueError(\"Inverted index name '%s' conflicts with registered attribute in object '%s'\" % \\\n (name, object_name))\n\n if split is None:\n # Default split regexp is to split words on\n # alphanumeric/digits/underscore boundaries.\n split = re.compile(u\"(\\d+)|[_\\W]\", re.U)\n elif isinstance(split, str):\n split = re.compile(tostr(split), re.U)\n\n if name not in self._inverted_indexes and not self._readonly:\n self._db_query('INSERT INTO inverted_indexes VALUES(?, \"objectcount\", 0)', (name,))\n # Create the tables needed by the inverted index.\n with self._lock:\n self._db.executescript(CREATE_IVTIDX_TEMPLATE.replace('%IDXNAME%', name))\n elif name in self._inverted_indexes:\n defn = self._inverted_indexes[name]\n if min == defn['min'] and max == defn['max'] and split == defn['split'] and \\\n ignore == defn['ignore']:\n # Definition unchanged, nothing to do.\n return\n\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n\n defn = {\n 'min': min,\n 'max': max,\n 'split': split,\n 'ignore': ignore,\n }\n\n self._db_query(\"INSERT OR REPLACE INTO inverted_indexes VALUES(?, 'definition', ?)\",\n (name, self._pickle(defn)))\n\n defn['objectcount'] = 0\n self._inverted_indexes[name] = defn\n self.commit()", "def register_inverted_index(self, name, min = None, max = None, split = None, ignore = None):\n # Verify specified name doesn't already exist as some object attribute.\n for object_name, object_type in self._object_types.items():\n if name in object_type[1] and name != object_type[1][name][2]:\n raise ValueError, \"Inverted index name '%s' conflicts with registered attribute in object '%s'\" % \\\n (name, object_name)\n\n if split is None:\n # Default split regexp is to split words on\n # alphanumeric/digits/underscore boundaries.\n split = re.compile(\"[\\W_\\d]+\", re.U)\n elif isinstance(split, basestring):\n split = re.compile(split, re.U)\n\n if name not in self._inverted_indexes:\n self._db_query('INSERT INTO inverted_indexes VALUES(?, \"objectcount\", 0)', (name,))\n # Create the tables needed by the inverted index.\n self._lock.acquire()\n self._db.executescript(CREATE_IVTIDX_TEMPLATE.replace('%IDXNAME%', name))\n self._lock.release()\n else:\n defn = self._inverted_indexes[name]\n if min == defn['min'] and max == defn['max'] and split == defn['split'] and \\\n ignore == defn['ignore']:\n # Definition unchanged, nothing to do.\n return\n\n defn = {\n 'min': min,\n 'max': max,\n 'split': split,\n 'ignore': ignore,\n }\n\n self._db_query(\"INSERT OR REPLACE INTO inverted_indexes VALUES(?, 'definition', ?)\",\n (name, buffer(cPickle.dumps(defn, 2))))\n\n defn['objectcount'] = 0\n self._inverted_indexes[name] = defn", "def addType(self, name):\n setattr(self, name, name)\n self._type_names[name] = name\n if name in self._pending_type_names:\n del self._pending_type_names[name]", "def _alter_table(self, names, types) :\n\n cur = self.con.cursor()\n for i in range(min(len(names), len(types))) :\n alter_sql = 'ALTER TABLE \"%s\" ADD COLUMN \"%s\" %s' % (self.name, names[i], types[i])\n cur.execute(alter_sql)", "def set_index(self, idx, rel, attrs):\n\n query = 'CREATE INDEX {} ON {} ({})'.format(idx, rel, ','.join(attrs))\n\n with self.tpch_cxn.cursor() as curs:\n try:\n curs.execute(query)\n except pg.ProgrammingError as e:\n print(e)", "async def add_metadata(dbcon: DBConnection, object_type: str, object_id: int, metadict: Dict[str, str]):\n\n async def _run(cur: Cursor) -> None:\n q = \"\"\"insert into object_metadata (object_type, object_id, `key`, value) values (%s, %s, %s, %s)\"\"\"\n for key, value in metadict.items():\n q_args = (object_type, object_id, str(key), str(value))\n await cur.execute(q, q_args)\n\n await dbcon.transact(_run)", "def register_orm_base(self, base):\n for model in utils.searchable_sqlalchemy_models(base):\n self.register_type(model.es_type_name, model.es_properties, model)", "def batch_add(self, *args, **kwargs):\n new_attrobjs = []\n strattr = kwargs.get(\"strattr\", False)\n for tup in args:\n if not is_iter(tup) or len(tup) < 2:\n raise RuntimeError(\"batch_add requires iterables as arguments (got %r).\" % tup)\n ntup = len(tup)\n keystr = str(tup[0]).strip().lower()\n new_value = tup[1]\n category = str(tup[2]).strip().lower() if ntup > 2 and tup[2] is not None else None\n lockstring = tup[3] if ntup > 3 else \"\"\n\n attr_objs = self._getcache(keystr, category)\n\n if attr_objs:\n attr_obj = attr_objs[0]\n # update an existing attribute object\n attr_obj.db_category = category\n attr_obj.db_lock_storage = lockstring or \"\"\n attr_obj.save(update_fields=[\"db_category\", \"db_lock_storage\"])\n if strattr:\n # store as a simple string (will not notify OOB handlers)\n attr_obj.db_strvalue = new_value\n attr_obj.save(update_fields=[\"db_strvalue\"])\n else:\n # store normally (this will also notify OOB handlers)\n attr_obj.value = new_value\n else:\n # create a new Attribute (no OOB handlers can be notified)\n kwargs = {\n \"db_key\": keystr,\n \"db_category\": category,\n \"db_model\": self._model,\n \"db_attrtype\": self._attrtype,\n \"db_value\": None if strattr else to_pickle(new_value),\n \"db_strvalue\": new_value if strattr else None,\n \"db_lock_storage\": lockstring or \"\",\n }\n new_attr = Attribute(**kwargs)\n new_attr.save()\n new_attrobjs.append(new_attr)\n self._setcache(keystr, category, new_attr)\n if new_attrobjs:\n # Add new objects to m2m field all at once\n getattr(self.obj, self._m2m_fieldname).add(*new_attrobjs)", "def add_type(self, typename, db):\n self._dbs[typename] = db\n return None", "def _initIndexes(self):\n class Record:\n \"\"\" a moron simple object for carrying the 'extra'-payload to index\n constructors\n \"\"\"\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n addIndex = self.addIndex\n addColumn = self.addColumn\n\n # Content indexes\n self._catalog.indexes.clear()\n for (index_name, index_type, extra) in self.enumerateIndexes():\n if extra is None:\n addIndex( index_name, index_type)\n else:\n if isinstance(extra, StringTypes):\n p = Record(indexed_attrs=extra)\n elif isinstance(extra, DictType):\n p = Record(**extra)\n else:\n p = Record()\n addIndex( index_name, index_type, extra=p )\n\n # Cached metadata\n self._catalog.names = ()\n self._catalog.schema.clear()\n for column_name in self.enumerateColumns():\n addColumn( column_name )", "def _add_type(self, production, index, m_type):\n fully_qualified_name = None\n current_namespace = self._get_current_namespace()\n if current_namespace is not None:\n fully_qualified_name = current_namespace.fully_qualified_name()\n namespace_types = self._get_type_or_namespace_from_fully_qualified_name(fully_qualified_name)\n if m_type.name in namespace_types:\n raise ParseError(self.production_to_coord(production, index),\n \"Name '{0}' already exists\".format(m_type.fully_qualified_name()))\n namespace_types[m_type.name] = m_type", "def addAttr(*args, attributeType: Union[AnyStr, bool]=\"\", binaryTag: Union[AnyStr, bool]=\"\",\n cachedInternally: bool=True, category: Union[AnyStr, List[AnyStr], bool]=\"\",\n dataType: Union[AnyStr, List[AnyStr], bool]=\"\", defaultValue: Union[float,\n bool]=0.0, disconnectBehaviour: Union[int, bool]=0, enumName: Union[AnyStr,\n bool]=\"\", exists: bool=True, fromPlugin: bool=True, hasMaxValue: bool=True,\n hasMinValue: bool=True, hasSoftMaxValue: bool=True, hasSoftMinValue: bool=True,\n hidden: bool=True, indexMatters: bool=True, internalSet: bool=True, keyable:\n bool=True, longName: Union[AnyStr, bool]=\"\", maxValue: Union[float, bool]=0.0,\n minValue: Union[float, bool]=0.0, multi: bool=True, niceName: Union[AnyStr,\n bool]=\"\", numberOfChildren: Union[int, bool]=0, parent: Union[AnyStr, bool]=\"\",\n proxy: Union[AnyStr, bool]=\"\", readable: bool=True, shortName: Union[AnyStr,\n bool]=\"\", softMaxValue: Union[float, bool]=0.0, softMinValue: Union[float,\n bool]=0.0, storable: bool=True, usedAsColor: bool=True, usedAsFilename: bool=True,\n usedAsProxy: bool=True, writable: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass", "def retype(self, dictionary):\r\n\r\n for name, retype in dictionary.items():\r\n field = self._field_dict[name]\r\n for key, value in retype.items():\r\n if key in _valid_retype_attributes:\r\n field.__setattr__(key, value)\r\n else:\r\n raise Exception(\"Should not use retype to change field attribute '%s'\", key)", "def registerAgentType(self, agtype, handler, renderfunc, fields):\n iMaxTypeId = 0\n for i in agent_types.keys():\n if agent_types[i]['__TypeId__'] >= iMaxTypeId:\n iMaxTypeId = agent_types[i]['__TypeId__']\n \n \n newobj = {}\n newobj['__minId__'] = 1\n newobj['__maxId__'] = 1 + 99999999\n newobj['__lstDeletedIds__'] = []\n newobj['__TypeId__'] = iMaxTypeId + 1\n newobj['__type__'] = agtype\n newobj['__handler__'] = handler\n newobj['__renderfunc__'] = renderfunc\n \n newobj['__properties__'] = fields\n newobj['__properties__']['id'] = TYPE_INTEGER\n newobj['__properties__']['x'] = TYPE_FLOAT\n newobj['__properties__']['y'] = TYPE_FLOAT\n newobj['__properties__']['size'] = TYPE_INTEGER\n newobj['__properties__']['theta'] = TYPE_FLOAT\n newobj['__properties__']['shape'] = TYPE_INTEGER\n newobj['__properties__']['color'] = TYPE_INTEGER\n \n newobj['__cache__'] = {}\n newobj['__cache__']['__valid__'] = 0\n \n for i in CACHE_PERTYPE:\n newobj['__cache__'][i] = None\n\n for i in newobj['__properties__'].keys():\n newobj['__cache__'][i] = {}\n for j in CACHE_PERFIELD:\n newobj['__cache__'][i][j] = None\n\n agent_types[agtype] = newobj\n # This does not create table in the DB for it\n # is done by the server.\n return S_OK", "def create_index(cls, engine):\n\n reg_imei = db.Index('reg_imei_index', cls.imei, postgresql_concurrently=True)\n reg_imei.create(bind=engine)\n\n reg_normalized_imei = db.Index('reg_normalized_imei_index', cls.normalized_imei, postgresql_concurrently=True)\n reg_normalized_imei.create(bind=engine)", "def setAttributes(self, args):\n for atr in self.defaultAttributes:\n if args.has_key(atr):\n # convert atr to proper type\n objAttr = getattr(self, atr)\n myType = type(args[atr])\n if type(objAttr) == types.IntType and myType <> types.IntType:\n args[atr] = int(args[atr])\n elif type(objAttr) == types.StringType and myType <> types.StringType:\n args[atr] = str(args[atr])\n elif type(objAttr) == types.ListType and myType <> types.ListType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.DictType and myType <> types.DictType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.FloatType and myType <> types.FloatType:\n args[atr] = float(args[atr])\n setattr(self, atr, args[atr])", "def update(self, *args, **kwargs):\n if args is not () and args is not None:\n attr_names = [\"id\", \"size\", \"x\", \"y\"]\n for index, attr in enumerate(args):\n setattr(self, attr_names[index], attr)\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)", "def setName(self, attributeIndex, newName) -> None:\n ..." ]
[ "0.77066755", "0.6361328", "0.6264673", "0.61884093", "0.5833767", "0.57452655", "0.57315993", "0.56269467", "0.54736567", "0.544051", "0.54389435", "0.5431951", "0.53999454", "0.5295605", "0.52132535", "0.5177261", "0.5129444", "0.51156485", "0.5099704", "0.50790924", "0.50552803", "0.5051657", "0.50420964", "0.50361454", "0.5011238", "0.49837422", "0.49755013", "0.49553072", "0.49453864", "0.4944989" ]
0.7726764
0
Registers a new inverted index with the database. An inverted index maps arbitrary terms to objects and allows you to query based on one or more terms. If the inverted index already exists with the given parameters, no action is performed. name is the name of the inverted index and must be alphanumeric. min and max specify the minimum and maximum length of terms to index. Any terms of length smaller than min or larger than max will not be indexed. If neither is specified, terms of all sizes will be indexed. split is either a callable or a regular expression (or a string in which case it is compiled as a regexp) and is used to parse stringbased attributes using this inverted index into individual terms. If split is not specified, the default is to split words at nonalphanumeric/underscore/digit boundaries. If split is a callable, it will receive a string of text and must return a sequence, each each item in the sequence will be indexed as an individual term. ignore is a list of terms that will not be indexed. If it is specified, each indexed term for this inverted index will first be checked against this list. If it exists, the term is discarded. This is useful to ignore typical 'stop' words, such as 'the', 'at', 'to', etc.
def register_inverted_index(self, name, min = None, max = None, split = None, ignore = None): # Verify specified name doesn't already exist as some object attribute. for object_name, object_type in self._object_types.items(): if name in object_type[1] and name != object_type[1][name][2]: raise ValueError, "Inverted index name '%s' conflicts with registered attribute in object '%s'" % \ (name, object_name) if split is None: # Default split regexp is to split words on # alphanumeric/digits/underscore boundaries. split = re.compile("[\W_\d]+", re.U) elif isinstance(split, basestring): split = re.compile(split, re.U) if name not in self._inverted_indexes: self._db_query('INSERT INTO inverted_indexes VALUES(?, "objectcount", 0)', (name,)) # Create the tables needed by the inverted index. self._lock.acquire() self._db.executescript(CREATE_IVTIDX_TEMPLATE.replace('%IDXNAME%', name)) self._lock.release() else: defn = self._inverted_indexes[name] if min == defn['min'] and max == defn['max'] and split == defn['split'] and \ ignore == defn['ignore']: # Definition unchanged, nothing to do. return defn = { 'min': min, 'max': max, 'split': split, 'ignore': ignore, } self._db_query("INSERT OR REPLACE INTO inverted_indexes VALUES(?, 'definition', ?)", (name, buffer(cPickle.dumps(defn, 2)))) defn['objectcount'] = 0 self._inverted_indexes[name] = defn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_inverted_index(self, name, min = None, max = None, split = None, ignore = None):\n # Verify specified name doesn't already exist as some object attribute.\n for object_name, object_type in self._object_types.items():\n if name in object_type[1] and name != object_type[1][name][2]:\n raise ValueError(\"Inverted index name '%s' conflicts with registered attribute in object '%s'\" % \\\n (name, object_name))\n\n if split is None:\n # Default split regexp is to split words on\n # alphanumeric/digits/underscore boundaries.\n split = re.compile(u\"(\\d+)|[_\\W]\", re.U)\n elif isinstance(split, str):\n split = re.compile(tostr(split), re.U)\n\n if name not in self._inverted_indexes and not self._readonly:\n self._db_query('INSERT INTO inverted_indexes VALUES(?, \"objectcount\", 0)', (name,))\n # Create the tables needed by the inverted index.\n with self._lock:\n self._db.executescript(CREATE_IVTIDX_TEMPLATE.replace('%IDXNAME%', name))\n elif name in self._inverted_indexes:\n defn = self._inverted_indexes[name]\n if min == defn['min'] and max == defn['max'] and split == defn['split'] and \\\n ignore == defn['ignore']:\n # Definition unchanged, nothing to do.\n return\n\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n\n defn = {\n 'min': min,\n 'max': max,\n 'split': split,\n 'ignore': ignore,\n }\n\n self._db_query(\"INSERT OR REPLACE INTO inverted_indexes VALUES(?, 'definition', ?)\",\n (name, self._pickle(defn)))\n\n defn['objectcount'] = 0\n self._inverted_indexes[name] = defn\n self.commit()", "def create_index():", "def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuilder:\n self._index_keys = IndexKeyBuilder(self.indices.values()).build\n return index", "def create_index(self, indexname, table, columns, unique=False):\n if not isinstance(columns, list) and not isinstance(columns, tuple):\n columns = [columns]\n\n if \".\" in table:\n prefix = table.split(\".\")[0] + \".\"\n table = table.split(\".\")[1]\n else:\n prefix = \"\"\n # table = table\n\n self.LOG(\"index create \", indexname, table, columns, unique)\n if unique:\n sql = \"CREATE UNIQUE INDEX %s%s ON %s (%s);\" % (\n prefix, indexname, table, \",\".join(columns))\n else:\n sql = \"CREATE INDEX %s%s ON %s (%s);\" % (\n prefix, indexname, table, \",\".join(columns))\n self.execute(sql)", "def add(self, name, index = None):\n if index is None:\n while self.indexDict.has_key(self.count):\n self.count += 1\n index = self.count\n self.fieldDict[name] = index\n self.indexDict[index] = name", "def add_index_operation(self, name, operations):\n if name not in self._index_operations:\n self._add_io(name, operations)\n else:\n raise AttributeError(\"An index operation with the name {} was already taken\".format(name))", "def create_index(cls, engine):\n\n reg_imei = db.Index('reg_imei_index', cls.imei, postgresql_concurrently=True)\n reg_imei.create(bind=engine)\n\n reg_normalized_imei = db.Index('reg_normalized_imei_index', cls.normalized_imei, postgresql_concurrently=True)\n reg_normalized_imei.create(bind=engine)", "def solr_index(self, **kwargs):\n solr_dict = self.solr_dict()\n solr_dict['uuid'] = str(self.uuid)\n if kwargs.get('solrconn'):\n solrconn = kwargs.get('solrconn')\n else:\n solrconn = solr.SolrConnection(settings.SOLR_SERVER)\n solrconn.add(**solr_dict)\n\n if kwargs.get('commit', True):\n solrconn.commit()", "def index_document(self, text: str, name: str):\n tokens = self.tokenize(text)\n term_frequencies = Counter(tokens) # Calculate term frequencies\n doc_id = len(self.documents) # Get document id as newest document\n\n for term in term_frequencies:\n if term not in self.index:\n self.index[term] = {}\n self.index[term][doc_id] = term_frequencies[term]\n\n self.documents[doc_id] = {\n \"name\": name,\n \"mag\": self.magnitude(term_frequencies.values())\n }", "def indices_of_split(self, split_name='train'):\n return self.indices_of('split', split_name)", "def create_index(args, client):\n policy = {}\n client.index_geo2dsphere_create(args.nspace, args.set,\n LOCBIN, LOCNDX, policy)\n client.index_integer_create(args.nspace, args.set,\n HSHBIN, HSHNDX, policy)", "def create_index(self, *columns):\n self._assert_columns_exist(columns)\n\n # Build index name.\n whitelist = lambda col: ''.join(x for x in col if x.isalnum())\n idx_name = '_'.join(whitelist(col) for col in columns)\n idx_name = 'idx_{0}_{1}'.format(self._table, idx_name)\n\n # Build column names.\n col_names = [self._normalize_column(x) for x in columns]\n col_names = ', '.join(col_names)\n\n # Prepare statement.\n statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})'\n statement = statement.format(idx_name, self._table, col_names)\n\n # Create index.\n cursor = self._connection.cursor()\n cursor.execute(statement)", "def create_key_index(name):\r\n global _existing_indices\r\n _existing_indices = _existing_indices or execute_query('g.getIndexedKeys(Vertex.class)')\r\n if name not in _existing_indices:\r\n execute_query(\r\n \"g.createKeyIndex(keyname, Vertex.class); g.stopTransaction(SUCCESS)\",\r\n {'keyname':name}, transaction=False)\r\n _existing_indices = None", "def create_index(self):\n self.send_robust(self.es_index, data=self.es_meta)\n self.set_index_normal_settings()", "def create(\n self,\n index: IO,\n request_options: Optional[_models.RequestOptions] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.SearchIndex:", "def solr_index(serializer, instances):\n connection = __solr_prepare(instances)\n serialized = serializer(instances, many=True)\n data = serialized.data\n connection.add(data)\n connection.commit()", "def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)", "def create_index(self, *columns):\n self._assert_columns_exist(columns)\n\n # Build index name.\n whitelist = lambda col: ''.join(x for x in col if x.isalnum())\n idx_name = '_'.join(whitelist(col) for col in columns)\n idx_name = 'idx_{0}_{1}'.format(self._table, idx_name)\n\n # Build column names.\n col_names = [self._normalize_column(x) for x in columns]\n col_names = ', '.join(col_names)\n\n # Prepare statement.\n statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})'\n statement = statement.format(idx_name, self._table, col_names)\n\n # Create index.\n cursor = self._connection.cursor()\n cursor.execute('PRAGMA synchronous=OFF')\n cursor.execute(statement)", "def add_word(self,word,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n if word in self.word_dict:\r\n\r\n self.word_dict[word].add(str(index))\r\n else:\r\n self.word_dict[word] = {str(index)}\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, word,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_words \"\r\n +\"(word, notebook)\"\r\n +\" VALUES (?,?);\",value_tuple)\r\n value_tuple = (notebookname, word, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO word_to_indexes \"\r\n +\"(notebook, word, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def addCatalogIndexes(portal):\n catalog = getToolByName(portal, 'portal_catalog')\n indexes = catalog.indexes()\n wanted = (('standardTags', 'KeywordIndex'),\n ('iamTags', 'KeywordIndex'),\n ('isearchTags', 'KeywordIndex'),\n ('hiddenTags', 'KeywordIndex'))\n indexables = []\n for name, meta_type in wanted:\n if name not in indexes:\n catalog.addIndex(name, meta_type)\n indexables.append(name)\n logger.info(\"Added %s for field %s.\", meta_type, name)\n if len(indexables) > 0:\n logger.info(\"Indexing new indexes %s.\", ', '.join(indexables))\n catalog.manage_reindexIndex(ids=indexables)", "def index(data, names=None, indices=None, mask=None, **kwargs):\n return Component(\n \"Index\",\n arguments={\n 'data': Component.of(data),\n 'names': Component.of(names),\n 'indices': Component.of(indices),\n 'mask': Component.of(mask)\n },\n options={\n \n },\n constraints=kwargs)", "def _make_index(self, fname, sents, words):\n for w in words:\n # word index for this file only\n findex = []\n\n for ixS, s in enumerate(sents):\n # iterate over each word in the sentencep\n for ixT, token in enumerate(s):\n # could use regex for substring matching instead\n if w == token.lower():\n findex.append((ixS, ixT))\n # keep track of word use frequency\n self._freq[w] += 1\n\n # grow the main index \n self._index[w][fname]= findex", "async def _get_name_index(self, name, splits_list=None):\n\n # Generates a list if not provided \n if splits_list is None:\n splits_list = await self._get_all_splits()\n\n # Returns a name match with a fuzzy search\n fuzz_name = await self._fuzzy_search(name, splits_list)\n\n # Returns exact index if a matching name was found\n if fuzz_name is None:\n return -1\n else:\n index = await self._exact_search(fuzz_name, splits_list)\n return index", "def __init__(\n self,\n name: str,\n type: str,\n indexing: Optional[List[str]] = None,\n index: Optional[str] = None,\n ) -> None:\n self.name = name\n self.type = type\n self.indexing = indexing\n self.index = index", "def to_index(self, index_type, index_name, includes=None):\n return IndexField(self.name, self.data_type, index_type, index_name,\n includes)", "def create_new_index(self, index_name, value, is_cluster, check=False):\n print(f\"Creating {index_name} index started \\n\")\n add_index = \"/html//i[@id='addIndex']\"\n self.locator_finder_by_xpath(add_index).click()\n time.sleep(2)\n\n print(f\"selecting {index_name} from the list\\n\")\n self.locator_finder_by_select(self.select_index_type_id, value)\n\n if index_name == \"Persistent\":\n self.select_persistent_fields_id = self.locator_finder_by_hover_item_id(self.select_persistent_fields_id)\n time.sleep(1)\n self.select_persistent_fields_id.send_keys(\"pfields\").perform()\n self.select_persistent_name_id = self.locator_finder_by_hover_item_id(self.select_persistent_name_id)\n self.select_persistent_fields_id.send_keys(\"Persistent\").perform()\n time.sleep(1)\n\n if not is_cluster:\n self.select_persistent_unique_id = self.locator_finder_by_hover_item_id(\n self.select_persistent_unique_id\n )\n\n self.select_persistent_sparse_id = self.locator_finder_by_hover_item_id(self.select_persistent_sparse_id)\n self.select_persistent_duplicate_id = self.locator_finder_by_hover_item_id(\n self.select_persistent_duplicate_id\n )\n self.select_persistent_background_id = self.locator_finder_by_hover_item_id(self.select_persistent_background_id)\n time.sleep(1)\n\n elif index_name == \"Geo\":\n self.select_geo_fields_id = self.locator_finder_by_hover_item_id(self.select_geo_fields_id)\n self.select_geo_fields_id.send_keys(\"gfields\").perform()\n time.sleep(1)\n self.select_geo_name_id = self.locator_finder_by_hover_item_id(self.select_geo_name_id)\n self.select_geo_name_id.send_keys(\"Geo\").perform()\n time.sleep(1)\n self.select_geo_json_id = self.locator_finder_by_hover_item_id(self.select_geo_json_id)\n self.select_geo_background_id = self.locator_finder_by_hover_item_id(self.select_geo_background_id)\n time.sleep(1)\n self.wait_for_ajax()\n\n elif index_name == \"Fulltext\":\n self.select_fulltext_field_id = self.locator_finder_by_hover_item_id(self.select_fulltext_field_id)\n self.select_fulltext_field_id.send_keys(\"ffields\").perform()\n time.sleep(1)\n self.select_fulltext_name_id = self.locator_finder_by_hover_item_id(self.select_fulltext_name_id)\n self.select_fulltext_name_id.send_keys(\"Fulltext\").perform()\n time.sleep(1)\n self.select_fulltext_length_id = self.locator_finder_by_hover_item_id(self.select_fulltext_length_id)\n self.select_fulltext_length_id.send_keys(100)\n self.select_fulltext_background_id = self.locator_finder_by_hover_item_id(\n self.select_fulltext_background_id\n )\n time.sleep(1)\n self.wait_for_ajax()\n\n elif index_name == \"TTL\":\n self.select_ttl_field_id = self.locator_finder_by_hover_item_id(self.select_ttl_field_id)\n self.select_ttl_field_id.send_keys(\"tfields\").perform()\n time.sleep(1)\n self.select_ttl_name_id = self.locator_finder_by_hover_item_id(self.select_ttl_name_id)\n self.select_ttl_name_id.send_keys(\"TTL\").perform()\n time.sleep(1)\n self.select_ttl_expiry_id = self.locator_finder_by_hover_item_id(self.select_ttl_expiry_id)\n self.select_ttl_expiry_id.send_keys(1000)\n self.select_ttl_background_id = self.locator_finder_by_hover_item_id(self.select_ttl_background_id)\n time.sleep(1)\n self.wait_for_ajax()\n\n # experimental feature\n elif index_name == 'ZKD':\n if check:\n self.navbar_goto(\"collections\")\n print(\"Selecting computed values collections. \\n\")\n col = '//*[@id=\"collection_ComputedValueCol\"]/div/h5'\n self.locator_finder_by_xpath(col).click()\n self.select_index_menu()\n\n print(f\"Creating {index_name} index started \\n\")\n self.locator_finder_by_xpath(add_index).click()\n time.sleep(2)\n\n print(f\"selecting {index_name} from the list\\n\")\n self.locator_finder_by_select(self.select_index_type_id, 5)\n\n time.sleep(1)\n\n select_zkd_field_sitem = self.locator_finder_by_id('newZkdFields')\n select_zkd_field_sitem.click()\n select_zkd_field_sitem.clear()\n select_zkd_field_sitem.send_keys('x,y')\n time.sleep(1)\n else:\n select_zkd_field_sitem = self.locator_finder_by_id('newZkdFields')\n select_zkd_field_sitem.click()\n select_zkd_field_sitem.clear()\n select_zkd_field_sitem.send_keys('zkdfileds')\n time.sleep(1)\n\n select_zkd_name_sitem = self.locator_finder_by_id('newZkdName')\n select_zkd_name_sitem.click()\n select_zkd_name_sitem.clear()\n select_zkd_name_sitem.send_keys('ZKD')\n time.sleep(1)\n\n select_create_index_btn_id = \"createIndex\"\n self.locator_finder_by_id(select_create_index_btn_id).click()\n time.sleep(10)\n self.webdriver.refresh()\n\n if check:\n self.navbar_goto(\"collections\")\n self.select_collection(\"TestDoc\")\n self.select_index_menu()\n\n print(f\"Creating {index_name} index completed \\n\")", "def index(self, name, file, passages, index_name=\"default\"):\n raise NotImplementedError()", "def add_split(self, split_dim, value, split_size=\"\", num_split=0, name=None):\n if num_split > 0:\n return self._build_op(\n 'Split', [split_dim, value], name=name, attr={'num_split': num_split})\n else:\n return self._build_op('SplitV', [value, split_size, split_dim], name=name)", "def init_index(self, index_name):\n return Index(self, index_name)", "def _add_object_inverted_index_terms(self, obj, ivtidx, terms):\n object_type, object_id = obj\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)" ]
[ "0.8106289", "0.5157404", "0.513424", "0.49950093", "0.4974171", "0.48789895", "0.48250076", "0.47591364", "0.47572297", "0.4660967", "0.46581295", "0.46503413", "0.464838", "0.46435705", "0.46280825", "0.46089694", "0.46081924", "0.4605972", "0.4580362", "0.4576036", "0.45739207", "0.45736903", "0.45642275", "0.45414624", "0.45266637", "0.4522669", "0.45087412", "0.45032254", "0.44956696", "0.44870025" ]
0.81983155
0
Adds an object of type 'object_type' to the database. Parent is a (type, id) tuple which refers to the object's parent. 'object_type' and 'type' is a type name as given to register_object_type_attrs(). attrs kwargs will vary based on object type. ATTR_SIMPLE attributes which a None are not added. This method returns the dict that would be returned if this object were queried by query(). The "id" key of this dict refers to the id number assigned to this object.
def add(self, object_type, parent = None, **attrs): type_attrs = self._get_type_attrs(object_type) if parent: attrs["parent_type"] = self._get_type_id(parent[0]) attrs["parent_id"] = parent[1] # Increment objectcount for the applicable inverted indexes. inverted_indexes = self._get_type_inverted_indexes(object_type) if inverted_indexes: self._db_query("UPDATE inverted_indexes SET value=value+1 WHERE attr='objectcount' AND name IN %s" % \ _list_to_printable(inverted_indexes)) # Process inverted index maps for this row ivtidx_terms = [] for ivtidx in inverted_indexes: # Sync cached objectcount with the DB (that we just updated above) self._inverted_indexes[ivtidx]['objectcount'] += 1 terms_list = [] split = self._inverted_indexes[ivtidx]['split'] for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items(): if attr_ivtidx == ivtidx and name in attrs: terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx)) if ivtidx in attrs and ivtidx not in type_attrs: # Attribute named after an inverted index is given in kwagrs, # but that ivtidx is not a registered attribute (which would be # handled in the for loop just above). terms_list.append((attrs[ivtidx], 1.0, split, ivtidx)) terms = self._score_terms(terms_list) ivtidx_terms.append((ivtidx, terms)) if ivtidx in type_attrs: # Registered attribute named after ivtidx; store ivtidx # terms in object. attrs[ivtidx] = terms.keys() query, values = self._make_query_from_attrs("add", attrs, object_type) self._db_query(query, values) # Add id given by db, as well as object type. attrs["id"] = self._cursor.lastrowid attrs["type"] = unicode(object_type) if parent: attrs['parent'] = (attrs['parent_type'], attrs['parent_id']) else: attrs['parent'] = (None, None) for ivtidx, terms in ivtidx_terms: self._add_object_inverted_index_terms((object_type, attrs['id']), ivtidx, terms) # Populate dictionary with keys for this object type not specified in kwargs. attrs.update(dict.fromkeys([k for k in type_attrs if k not in attrs.keys() + ['pickle']])) return ObjectRow(None, None, attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Add(self, obj_type, name, node=None, obj=None):\n print \"Adding object %s, node: %s\" % (name, node)\n #check for duplicate object\n # also raise error if no such object type\n if self.ObjectExists(obj_type, name):\n raise DuplicateObjectError(name)\n \n #find out where we need to put it and stick it in there\n idx = bisect.bisect(self.objects[obj_type], name)\n if not node:\n node = game_objects.ObjectUtilities.ObjectNode(self, name, self.object_modules[obj_type])\n if obj:\n node.CopyObject(obj)\n self.objects[obj_type].insert(idx, node)\n \n #let our listeners know we added a new object and let them\n # know the parent in terms of alphabetical order\n if idx == 0:\n #if we're inserting at the start there is no preceding element\n self.sendODBEvent(ODBAdd(node, obj_type, None))\n else:\n self.sendODBEvent(ODBAdd(node, obj_type, self.objects[obj_type][idx-1].name))\n \n node.SetModified(True)\n self.MarkModified(node)", "def add_object(self, object_type, data=None, read_from_netbox=False, source=None):\n\n # create new object\n new_object = object_type(data, read_from_netbox=read_from_netbox, inventory=self, source=source)\n\n # add to inventory\n self.base_structure[object_type.name].append(new_object)\n\n if read_from_netbox is False:\n log.info(f\"Created new {new_object.name} object: {new_object.get_display_name()}\")\n\n return new_object", "def retype(self, obj, new_type):\n\n if new_type not in self._object_types:\n raise ValueError('Parent type %s not registered in database' % new_type)\n\n # Reload and force pickled attributes into the dict.\n try:\n attrs = dict(self.get(obj))\n except TypeError:\n raise ValueError('Object (%s, %s) is not found in database' % (obj['type'], obj['id']))\n\n parent = attrs.get('parent')\n # Remove all attributes that aren't also in the destination type. Also\n # remove type, id, and parent attrs, which get regenerated when we add().\n for attr_name in list(attrs.keys()):\n # TODO: check src and dst attr types and try to coerce, and if\n # not possible, raise an exception.\n if attr_name not in self._object_types[new_type][1] or attr_name in ('type', 'id', 'parent'):\n del attrs[attr_name]\n\n new_obj = self.add(new_type, parent, **attrs)\n # Reparent all current children to the new id.\n for child in self.query(parent=obj):\n # TODO: if this raises, delete new_obj (to rollback) and reraise.\n self.reparent(child, new_obj)\n\n self.delete(obj)\n return new_obj", "def add_update_object(self, object_type, data=None, read_from_netbox=False, source=None):\n\n if data is None:\n log.error(f\"Unable to find {object_type.name} object, parameter 'data' is 'None'\")\n return None\n\n # try to find exiting object based on submitted data\n this_object = self.get_by_data(object_type, data=data)\n\n if this_object is None:\n this_object = self.add_object(object_type, data=data, read_from_netbox=read_from_netbox, source=source)\n\n else:\n this_object.update(data, read_from_netbox=read_from_netbox, source=source)\n log.debug(\"Updated %s object: %s\" % (this_object.name, this_object.get_display_name()))\n\n return this_object", "def add(self, object_type, parent=None, **attrs):\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n\n type_attrs = self._get_type_attrs(object_type)\n if parent:\n attrs['parent_type'], attrs['parent_id'] = self._to_obj_tuple(parent, numeric=True)\n\n # Increment objectcount for the applicable inverted indexes.\n inverted_indexes = self._get_type_inverted_indexes(object_type)\n if inverted_indexes:\n self._db_query(\"UPDATE inverted_indexes SET value=value+1 WHERE attr='objectcount' AND name IN %s\" % \\\n _list_to_printable(inverted_indexes))\n\n\n # Process inverted index maps for this row\n ivtidx_terms = []\n for ivtidx in inverted_indexes:\n # Sync cached objectcount with the DB (that we just updated above)\n self._inverted_indexes[ivtidx]['objectcount'] += 1\n terms_list = []\n split = self._inverted_indexes[ivtidx]['split']\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given in kwagrs,\n # but that ivtidx is not a registered attribute (which would be\n # handled in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n if terms:\n ivtidx_terms.append((ivtidx, terms))\n # If there are no terms for this ivtidx, we don't bother storing\n # an empty list in the pickle.\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n attrs[ivtidx] = list(terms.keys())\n\n query, values = self._make_query_from_attrs(\"add\", attrs, object_type)\n self._db_query(query, values)\n\n # Add id given by db, as well as object type.\n attrs['id'] = self._cursor.lastrowid\n attrs['type'] = str(object_type)\n attrs['parent'] = self._to_obj_tuple(parent) if parent else (None, None)\n\n for ivtidx, terms in ivtidx_terms:\n self._add_object_inverted_index_terms((object_type, attrs['id']), ivtidx, terms)\n\n # Populate dictionary with keys for this object type not specified in kwargs.\n attrs.update(dict.fromkeys([k for k in type_attrs if k not in list(attrs.keys()) + ['pickle']]))\n\n self._set_dirty()\n return ObjectRow(None, None, attrs)", "def append( self, obj ):\n self[obj.getType()] = obj\n obj.setParent( self.parent )\n return obj", "def _add_object(self, object_dict):\n # Attempt to map the object first. This will raise an\n # ItemExistsError if a named object of the same type already\n # exists.\n self._add_object_to_map(self.append_key, object_dict)\n\n # Add the object to the end of the model.\n # TODO: which objects need added to the beginning?\n self.model_dict[self.append_key] = object_dict\n\n # Update append key.\n self._update_append_key()", "def _add_non_object(self, item_type, item_dict):\n\n # Map item.\n if item_type == 'clock':\n # Map clock.\n self._add_clock_to_map(self.prepend_key, item_dict)\n\n elif item_type == 'module':\n # Map module.\n self._add_module_to_map(self.prepend_key, item_dict)\n\n elif item_type == 'class':\n # Map class.\n self._add_class_to_map(self.prepend_key, item_dict)\n\n elif item_type in self.NO_MAP:\n # No mapping.\n pass\n\n else:\n s = 'No add method for {} item type.'.format(item_type)\n raise TypeError(s)\n\n # Add to beginning of model.\n self.model_dict[self.prepend_key] = item_dict\n\n # Update prepend key.\n self._update_prepend_key()", "def create_object_type(self, object_type=None):\n # Return Value\n # ------------\n # {object_type_id: ''}\n #\n if not is_basic_identifier(object_type.name):\n raise BadRequest(\"Invalid object_type name: %s\" % object_type.name)\n if not is_yaml_string_valid(object_type.definition):\n raise BadRequest(\"Invalid YAML definition\")\n object_type_id, version = self.clients.resource_registry.create(object_type)\n return object_type_id", "def add(self, obj):\n self._pkcache[obj.pk] = obj\n for ctype in obj._content_types:\n self._typecache[ctype][obj.pk] = True", "def update(self, obj, parent=None, **attrs):\n if isinstance(obj, ObjectRow):\n object_type, object_id = obj['type'], obj['id']\n else:\n object_type, object_id = obj\n\n type_attrs = self._get_type_attrs(object_type)\n get_pickle = False\n\n # Determine which inverted indexes need to be regenerated for this\n # object. Builds a dictionary of ivtidxes with a dirty flag and\n # a list of sql columns needed for reindexing.\n ivtidx_columns = {}\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if flags & ATTR_INVERTED_INDEX:\n if attr_ivtidx not in ivtidx_columns:\n ivtidx_columns[attr_ivtidx] = [ False, [] ]\n if flags & ATTR_SEARCHABLE:\n ivtidx_columns[attr_ivtidx][1].append(name)\n if flags & (ATTR_SIMPLE | ATTR_IGNORE_CASE):\n get_pickle = True\n if name in attrs:\n ivtidx_columns[attr_ivtidx][0] = True\n\n if flags & ATTR_SIMPLE and name in attrs:\n # Simple attribute needs pickle\n get_pickle = True\n\n # TODO: if ObjectRow is supplied, don't need to fetch columns\n # that are available in the ObjectRow. (Of course this assumes\n # the object wasn't changed via elsewhere during the life of the\n # ObjectRow object, so maybe we don't want to do that.)\n reqd_columns = ['pickle'] if get_pickle else []\n for dirty, searchable_attrs in ivtidx_columns.values():\n if dirty:\n reqd_columns.extend(searchable_attrs)\n\n if reqd_columns:\n q = 'SELECT %s FROM objects_%s WHERE id=?' % (','.join(reqd_columns), object_type)\n row = self._db_query_row(q, (object_id,))\n if not row:\n raise ValueError, \"Can't update unknown object (%s, %d)\" % (object_type, object_id)\n if reqd_columns[0] == 'pickle' and row[0]:\n # One of the attrs we're updating is in the pickle, so we\n # have fetched it; now convert it to a dict.\n row_attrs = cPickle.loads(str(row[0]))\n for key, value in row_attrs.items():\n # Rename all __foo to foo for ATTR_IGNORE_CASE columns\n if key.startswith('__') and type_attrs[key[2:]][1] & ATTR_IGNORE_CASE:\n row_attrs[key[2:]] = value\n del row_attrs[key]\n # Update stored pickle data with new ATTR_SIMPLE attribute values\n row_attrs.update(attrs)\n attrs = row_attrs\n\n if isinstance(parent, ObjectRow):\n attrs['parent_type'], attrs['parent_id'] = parent['type'], parent['id']\n elif parent:\n attrs['parent_type'], attrs['parent_id'] = self._get_type_id(parent[0]), parent[1]\n\n attrs['id'] = object_id\n # Make copy of attrs for later query, since we're now about to mess with it.\n orig_attrs = attrs.copy()\n\n # Merge the ivtidx columns we grabbed above into attrs dict.\n for n, name in enumerate(reqd_columns):\n if name not in attrs and name != 'pickle':\n attrs[name] = row[n]\n\n for ivtidx, (dirty, searchable_attrs) in ivtidx_columns.items():\n if not dirty:\n # No attribute for this ivtidx changed.\n continue\n split = self._inverted_indexes[ivtidx]['split']\n # Remove existing indexed words for this object.\n self._delete_object_inverted_index_terms((object_type, object_id), ivtidx)\n\n # FIXME: code duplication from add()\n # Need to reindex all columns in this object using this ivtidx.\n terms_list = []\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n if attr_type == str and type(attrs[name]) == buffer:\n # We store string objects in the db as buffers, in\n # order to prevent any unicode issues. So we need\n # to convert the buffer we got from the db back to\n # a string before parsing the attribute into terms.\n attrs[name] = str(attrs[name])\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given, but\n # that ivtidx is not a named attribute (which would be handled\n # in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n self._add_object_inverted_index_terms((object_type, object_id), ivtidx, terms)\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n orig_attrs[ivtidx] = terms.keys()\n\n query, values = self._make_query_from_attrs(\"update\", orig_attrs, object_type)\n self._db_query(query, values)", "def add_ldap_object(l, filter, objectname, nametemplate, objecttype, base, attributes, scope=ldap.SCOPE_SUBTREE, DEBUG=False):\r\n try:\r\n assert not '*' in objectname\r\n except:\r\n e = ldap.INVALID_DN_SYNTAX('no wild cards is allowed in the object name')\r\n raise e\r\n obj_list = []\r\n try:\r\n obj_list = search_ldap(l, filter, objectname, base, attributes=attributes.keys())\r\n except:\r\n pass\r\n finally:\r\n if obj_list and len(obj_list):\r\n e = ldap.ALREADY_EXISTS('object exists %s' % obj_list[0][nametemplate])\r\n raise e\r\n\r\n try:\r\n## dn = template % (objectname)\r\n dn = '%s=%s,%s' % (nametemplate, objectname, base)\r\n attrs = {}\r\n attrs['objectclass'] = objecttype\r\n for k in attributes:\r\n attrs[k] = attributes[k]\r\n ldif = modlist.addModlist(attrs)\r\n print dn\r\n print ldif\r\n l.add_s(dn, ldif)\r\n except ldap.LDAPError, e:\r\n print e\r\n return None\r\n except Exception as e:\r\n return None", "def object_type(self, object_type):\n\n self._object_type = object_type", "def object_type(self, object_type):\n\n self._object_type = object_type", "def object_type(self, object_type):\n\n self._object_type = object_type", "def object_type(self, object_type):\n\n self._object_type = object_type", "def object_type(self, object_type):\n\n self._object_type = object_type", "def get_object(self, pid=None, type=None, create=None):\n objtype = type or self.default_object_type\n\n if pid is None:\n if create is None:\n create = True\n else:\n if create is None:\n create = False\n\n return objtype(self.api, pid, create,\n default_pidspace=self.default_pidspace)", "def update_object_type(self, object_type=None):\n # Return Value\n # ------------\n # {success: true}\n #\n if not is_basic_identifier(object_type.name):\n raise BadRequest(\"Invalid object_type name: %s\" % object_type.name)\n if not is_yaml_string_valid(object_type.definition):\n raise BadRequest(\"Invalid YAML definition\")\n object_id, version = self.clients.resource_registry.update(object_type)\n return object_id", "def post(self, obj):\n\n\t\tmodelobj = (not obj.get('parent_type')) and model.get(obj) or None\n\t\tmodelobj and modelobj.before_post()\n\t\tmodelobj and modelobj.validate()\n\t\t\t\t\n\t\tobj_single, is_vector = self._get_single(obj)\n\t\t# save the parent\n\t\tself.post_single(obj_single)\n\t\tif is_vector:\t\n\t\t\tfor k in obj:\n\t\t\t\td = {\"type\":k, \"parent\":obj[\"name\"], \"parent_type\":obj[\"type\"]}\n\t\t\t\t# dict, one child only\n\t\t\t\tif type(obj[k]) is dict:\n\t\t\t\t\tobj[k].update(d)\n\t\t\t\t\tself.post(obj[k])\n\t\t\t\t\n\t\t\t\t# multiple children\n\t\t\t\tif type(obj[k]) in (list, tuple):\n\t\t\t\t\tidx = 0\n\t\t\t\t\tfor child in obj[k]:\n\t\t\t\t\t\td['idx'] = idx\n\t\t\t\t\t\tidx += 1\n\t\t\t\t\t\t\n\t\t\t\t\t\t# child is a dict\n\t\t\t\t\t\tif type(child) is dict:\n\t\t\t\t\t\t\tchild.update(d)\n\t\t\t\t\t\t\tself.post(child)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t# child is literal (only names)\n\t\t\t\t\t\telif type(child) in (str, int, float):\n\t\t\t\t\t\t\tc = {\"value\":child}\n\t\t\t\t\t\t\tc.update(d)\n\t\t\t\t\t\t\tself.post_single(c)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise Exception, \"child %s must be dict or literal\" % str(child)\t\n\t\tmodelobj and modelobj.after_post()", "def add_object(self, object):\n if isinstance(object, DSSDataset):\n data = {\"reference\": {\"projectKey\": object.project_key, \"type\": \"DATASET\", \"id\": object.dataset_name}}\n elif isinstance(object, DSSWikiArticle):\n data = {\"reference\": {\"projectKey\": object.project_key, \"type\": \"ARTICLE\", \"id\": object.article_id}}\n elif isinstance(object, DSSApp):\n data = {\"appId\": object.app_id}\n elif isinstance(object, DSSWorkspaceHtmlLinkObject):\n data = {\"htmlLink\": {\"name\": object.name, \"url\": object.url, \"description\": object.description}}\n elif isinstance(object, dict):\n data = object\n else:\n raise ValueError(\"Unsupported object type\")\n self.client._perform_json(\"POST\", \"/workspaces/%s/objects\" % self.workspace_key, body=data)", "def add_object(self, object):\n object.save()", "def _add_object_to_map(self, model_key, object_dict):\n # Grab reference to the object sub-dict.\n object_map = self.model_map['object']\n\n # Get type of object.\n obj_type = object_dict['object']\n\n # Define key object pair\n key_obj = [model_key, object_dict]\n\n # If this type isn't in the map, add it. NOTE: this can lead to\n # empty entries if the object isn't named.\n if obj_type not in object_map:\n object_map[obj_type] = {}\n\n try:\n # Never try to map an already existing named object.\n if object_dict['name'] in object_map[obj_type]:\n s = '{} already exists in the {} map!'\n raise ItemExistsError(s.format(object_dict['name'], obj_type))\n\n except KeyError:\n # Unnamed object. Add it to the unnamed list.\n self.model_map['object_unnamed'].append(key_obj)\n\n else:\n # Named object, map it.\n object_map[obj_type][object_dict['name']] = key_obj\n\n # No need to return; we're directly updating self.model_map", "def create(self, objecttype, under, **kwargs):\n self.LogCommand()\n tclcode = \"stc::create \" + objecttype + \" -under \" + under\n\n for key in kwargs:\n tclcode = tclcode + \" \" + \"-\" + key + \" \" + str(kwargs[key])\n\n objecthandle = self.Exec(tclcode)\n logging.debug(\" - Python result - \" + str(objecthandle))\n return objecthandle", "def add(self, object):\n if not object:\n self.save()\n return\n if not hasattr(object, 'id') or not object.id:\n raise ValueError(\"The model must be saved before add\")\n if not self.object_type:\n self.object_type = str(object._meta.object_name)\n elif str(object._meta.object_name) != self.object_type:\n raise ValueError(\"Model type don't match\")\n if self.objects_id:\n already_objects = self.objects_id.split(';')\n else:\n already_objects = []\n if str(object.id) in already_objects:\n return\n already_objects.append(str(object.id))\n self.objects_id = self._convertListToString(already_objects)\n self.save()", "def findOrCreate(cls, data):\n if \"type\" in data: # If we know the object type\n if not data[\"type\"] in cls.__objects_store: # New type collectore\n cls.__objects_store[data[\"type\"]] = weakref.WeakValueDictionary()\n obj = cls()\n cls.__objects_store[data[\"type\"]][str(data['id'])] = obj\n else:\n if str(data['id']) in cls.__objects_store[data[\"type\"]]:\n obj = cls.__objects_store[data[\"type\"]][str(data['id'])]\n else:\n obj = cls()\n cls.__objects_store[data[\"type\"]][str(data['id'])] = obj\n else:\n obj = cls()\n return obj._load_attributes_from_response(**data)", "def add_object(self, object_to_be_added):\n new_mapping = Map.add_object(self.id, object_to_be_added)\n if new_mapping:\n object_to_be_added.save()\n new_mapping.ref_id = object_to_be_added.id\n return True\n else:\n return False", "def create_ion_object(self, object_params):\n new_obj = IonObject(object_params[\"type_\"])\n\n # Iterate over the parameters to add to object; have to do this instead\n # of passing a dict to get around restrictions in object creation on setting _id, _rev params\n for param in object_params:\n self.set_object_field(new_obj, param, object_params.get(param))\n\n new_obj._validate() # verify that all of the object fields were set with proper types\n return new_obj", "def insert_object(self, object: ObjectHandle):\n # Serialize the object descriptor and data part. Both items are stored\n # as separate objects.\n descriptor, data = self.factory.serialize(object)\n object_id = self.store.write_object(descriptor)\n data_id = self.store.write_object(data)\n # Add the object information to the index and write the modified index\n # to the data store.\n self.index[object.namespace][object.name] = StoredObject(\n object_id=object_id,\n data_id=data_id,\n name=object.name,\n descriptor=descriptor\n )\n self._write_index()\n # If the object refers to a default object that object is removed since\n # it has been overwritten by the new object.\n try:\n del self.defaults.get(object.namespace, {})[object.name]\n except KeyError:\n pass", "def update(self, obj, parent=None, **attrs):\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n object_type, object_id = self._to_obj_tuple(obj)\n\n type_attrs = self._get_type_attrs(object_type)\n get_pickle = False\n\n # Determine which inverted indexes need to be regenerated for this\n # object. Builds a dictionary of ivtidxes with a dirty flag and\n # a list of sql columns needed for reindexing.\n ivtidx_columns = {}\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if flags & ATTR_INVERTED_INDEX:\n if attr_ivtidx not in ivtidx_columns:\n ivtidx_columns[attr_ivtidx] = [ False, [] ]\n if flags & ATTR_SEARCHABLE:\n ivtidx_columns[attr_ivtidx][1].append(name)\n if flags & (ATTR_SIMPLE | ATTR_IGNORE_CASE):\n get_pickle = True\n if name in attrs:\n ivtidx_columns[attr_ivtidx][0] = True\n\n # If the updated attribute is stored in the pickle (either a simple attr\n # or an case-insensitive indexed attr in which __foo is in the pickle)\n # then we must first retrieve the pickle for this object from the db.\n if (flags & ATTR_SIMPLE or flags & ATTR_INDEXED_IGNORE_CASE == ATTR_INDEXED_IGNORE_CASE) and \\\n name in attrs:\n get_pickle = True\n\n # TODO: if ObjectRow is supplied, don't need to fetch columns\n # that are available in the ObjectRow. (Of course this assumes\n # the object wasn't changed via elsewhere during the life of the\n # ObjectRow object, so maybe we don't want to do that.)\n reqd_columns = ['pickle'] if get_pickle else []\n for dirty, searchable_attrs in ivtidx_columns.values():\n if dirty:\n reqd_columns.extend(searchable_attrs)\n\n if reqd_columns:\n q = 'SELECT %s FROM objects_%s WHERE id=?' % (','.join(reqd_columns), object_type)\n row = self._db_query_row(q, (object_id,))\n if not row:\n raise ValueError(\"Can't update unknown object (%s, %d)\" % (object_type, object_id))\n if reqd_columns[0] == 'pickle' and row[0]:\n # One of the attrs we're updating is in the pickle, so we\n # have fetched it; now convert it to a dict.\n row_attrs = self._unpickle(row[0])\n for key, value in row_attrs.items():\n # Rename all __foo to foo for ATTR_IGNORE_CASE columns\n if key.startswith('__') and type_attrs[key[2:]][1] & ATTR_IGNORE_CASE:\n row_attrs[key[2:]] = value\n del row_attrs[key]\n # Update stored pickle data with new ATTR_SIMPLE attribute values\n row_attrs.update(attrs)\n attrs = row_attrs\n\n\n if parent:\n attrs['parent_type'], attrs['parent_id'] = self._to_obj_tuple(parent, numeric=True)\n attrs['id'] = object_id\n # Make copy of attrs for later query, since we're now about to mess with it.\n orig_attrs = attrs.copy()\n\n # Merge the ivtidx columns we grabbed above into attrs dict.\n for n, name in enumerate(reqd_columns):\n if name not in attrs and name != 'pickle':\n attrs[name] = row[n]\n\n for ivtidx, (dirty, searchable_attrs) in ivtidx_columns.items():\n if not dirty:\n # No attribute for this ivtidx changed.\n continue\n split = self._inverted_indexes[ivtidx]['split']\n # Remove existing indexed words for this object.\n self._delete_object_inverted_index_terms((object_type, object_id), ivtidx)\n\n # TODO: code duplication from add()\n # Need to reindex all columns in this object using this ivtidx.\n terms_list = []\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n if attr_type == BYTES_TYPE and isinstance(attrs[name], RAW_TYPE):\n # We store string objects in the db as buffers, in\n # order to prevent any unicode issues. So we need\n # to convert the buffer we got from the db back to\n # a string before parsing the attribute into terms.\n attrs[name] = BYTES_TYPE(attrs[name])\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given, but\n # that ivtidx is not a named attribute (which would be handled\n # in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n self._add_object_inverted_index_terms((object_type, object_id), ivtidx, terms)\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n if not terms and ivtidx in orig_attrs:\n # Update removed all terms for this ivtidx, remove from pickle.\n orig_attrs[ivtidx] = None\n elif terms:\n # There are terms for this ivtidx, store in pickle.\n orig_attrs[ivtidx] = list(terms.keys())\n\n query, values = self._make_query_from_attrs(\"update\", orig_attrs, object_type)\n self._db_query(query, values)\n self._set_dirty()\n # TODO: if an objectrow was given, return an updated objectrow" ]
[ "0.6409624", "0.62771475", "0.60935414", "0.6087404", "0.60587406", "0.5914331", "0.5884166", "0.55695456", "0.5552869", "0.53761256", "0.53064", "0.52908915", "0.5275135", "0.5275135", "0.5275135", "0.5275135", "0.5275135", "0.52567697", "0.514278", "0.51280934", "0.5127361", "0.5116803", "0.5080401", "0.50677496", "0.5059826", "0.50176394", "0.49867213", "0.49799523", "0.4967514", "0.49589297" ]
0.6481042
0
Update an object in the database. For updating, object is identified by a (type, id) tuple or an ObjectRow instance. Parent is a (type, id) tuple or ObjectRow instance, which refers to the object's parent. If specified, the object is reparented, otherwise the parent remains the same as when it was added with add(). attrs kwargs will vary based on object type. If a ATTR_SIMPLE attribute is set to None, it will be removed from the pickled dictionary.
def update(self, obj, parent=None, **attrs): if isinstance(obj, ObjectRow): object_type, object_id = obj['type'], obj['id'] else: object_type, object_id = obj type_attrs = self._get_type_attrs(object_type) get_pickle = False # Determine which inverted indexes need to be regenerated for this # object. Builds a dictionary of ivtidxes with a dirty flag and # a list of sql columns needed for reindexing. ivtidx_columns = {} for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items(): if flags & ATTR_INVERTED_INDEX: if attr_ivtidx not in ivtidx_columns: ivtidx_columns[attr_ivtidx] = [ False, [] ] if flags & ATTR_SEARCHABLE: ivtidx_columns[attr_ivtidx][1].append(name) if flags & (ATTR_SIMPLE | ATTR_IGNORE_CASE): get_pickle = True if name in attrs: ivtidx_columns[attr_ivtidx][0] = True if flags & ATTR_SIMPLE and name in attrs: # Simple attribute needs pickle get_pickle = True # TODO: if ObjectRow is supplied, don't need to fetch columns # that are available in the ObjectRow. (Of course this assumes # the object wasn't changed via elsewhere during the life of the # ObjectRow object, so maybe we don't want to do that.) reqd_columns = ['pickle'] if get_pickle else [] for dirty, searchable_attrs in ivtidx_columns.values(): if dirty: reqd_columns.extend(searchable_attrs) if reqd_columns: q = 'SELECT %s FROM objects_%s WHERE id=?' % (','.join(reqd_columns), object_type) row = self._db_query_row(q, (object_id,)) if not row: raise ValueError, "Can't update unknown object (%s, %d)" % (object_type, object_id) if reqd_columns[0] == 'pickle' and row[0]: # One of the attrs we're updating is in the pickle, so we # have fetched it; now convert it to a dict. row_attrs = cPickle.loads(str(row[0])) for key, value in row_attrs.items(): # Rename all __foo to foo for ATTR_IGNORE_CASE columns if key.startswith('__') and type_attrs[key[2:]][1] & ATTR_IGNORE_CASE: row_attrs[key[2:]] = value del row_attrs[key] # Update stored pickle data with new ATTR_SIMPLE attribute values row_attrs.update(attrs) attrs = row_attrs if isinstance(parent, ObjectRow): attrs['parent_type'], attrs['parent_id'] = parent['type'], parent['id'] elif parent: attrs['parent_type'], attrs['parent_id'] = self._get_type_id(parent[0]), parent[1] attrs['id'] = object_id # Make copy of attrs for later query, since we're now about to mess with it. orig_attrs = attrs.copy() # Merge the ivtidx columns we grabbed above into attrs dict. for n, name in enumerate(reqd_columns): if name not in attrs and name != 'pickle': attrs[name] = row[n] for ivtidx, (dirty, searchable_attrs) in ivtidx_columns.items(): if not dirty: # No attribute for this ivtidx changed. continue split = self._inverted_indexes[ivtidx]['split'] # Remove existing indexed words for this object. self._delete_object_inverted_index_terms((object_type, object_id), ivtidx) # FIXME: code duplication from add() # Need to reindex all columns in this object using this ivtidx. terms_list = [] for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items(): if attr_ivtidx == ivtidx and name in attrs: if attr_type == str and type(attrs[name]) == buffer: # We store string objects in the db as buffers, in # order to prevent any unicode issues. So we need # to convert the buffer we got from the db back to # a string before parsing the attribute into terms. attrs[name] = str(attrs[name]) terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx)) if ivtidx in attrs and ivtidx not in type_attrs: # Attribute named after an inverted index is given, but # that ivtidx is not a named attribute (which would be handled # in the for loop just above). terms_list.append((attrs[ivtidx], 1.0, split, ivtidx)) terms = self._score_terms(terms_list) self._add_object_inverted_index_terms((object_type, object_id), ivtidx, terms) if ivtidx in type_attrs: # Registered attribute named after ivtidx; store ivtidx # terms in object. orig_attrs[ivtidx] = terms.keys() query, values = self._make_query_from_attrs("update", orig_attrs, object_type) self._db_query(query, values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, obj, parent=None, **attrs):\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n object_type, object_id = self._to_obj_tuple(obj)\n\n type_attrs = self._get_type_attrs(object_type)\n get_pickle = False\n\n # Determine which inverted indexes need to be regenerated for this\n # object. Builds a dictionary of ivtidxes with a dirty flag and\n # a list of sql columns needed for reindexing.\n ivtidx_columns = {}\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if flags & ATTR_INVERTED_INDEX:\n if attr_ivtidx not in ivtidx_columns:\n ivtidx_columns[attr_ivtidx] = [ False, [] ]\n if flags & ATTR_SEARCHABLE:\n ivtidx_columns[attr_ivtidx][1].append(name)\n if flags & (ATTR_SIMPLE | ATTR_IGNORE_CASE):\n get_pickle = True\n if name in attrs:\n ivtidx_columns[attr_ivtidx][0] = True\n\n # If the updated attribute is stored in the pickle (either a simple attr\n # or an case-insensitive indexed attr in which __foo is in the pickle)\n # then we must first retrieve the pickle for this object from the db.\n if (flags & ATTR_SIMPLE or flags & ATTR_INDEXED_IGNORE_CASE == ATTR_INDEXED_IGNORE_CASE) and \\\n name in attrs:\n get_pickle = True\n\n # TODO: if ObjectRow is supplied, don't need to fetch columns\n # that are available in the ObjectRow. (Of course this assumes\n # the object wasn't changed via elsewhere during the life of the\n # ObjectRow object, so maybe we don't want to do that.)\n reqd_columns = ['pickle'] if get_pickle else []\n for dirty, searchable_attrs in ivtidx_columns.values():\n if dirty:\n reqd_columns.extend(searchable_attrs)\n\n if reqd_columns:\n q = 'SELECT %s FROM objects_%s WHERE id=?' % (','.join(reqd_columns), object_type)\n row = self._db_query_row(q, (object_id,))\n if not row:\n raise ValueError(\"Can't update unknown object (%s, %d)\" % (object_type, object_id))\n if reqd_columns[0] == 'pickle' and row[0]:\n # One of the attrs we're updating is in the pickle, so we\n # have fetched it; now convert it to a dict.\n row_attrs = self._unpickle(row[0])\n for key, value in row_attrs.items():\n # Rename all __foo to foo for ATTR_IGNORE_CASE columns\n if key.startswith('__') and type_attrs[key[2:]][1] & ATTR_IGNORE_CASE:\n row_attrs[key[2:]] = value\n del row_attrs[key]\n # Update stored pickle data with new ATTR_SIMPLE attribute values\n row_attrs.update(attrs)\n attrs = row_attrs\n\n\n if parent:\n attrs['parent_type'], attrs['parent_id'] = self._to_obj_tuple(parent, numeric=True)\n attrs['id'] = object_id\n # Make copy of attrs for later query, since we're now about to mess with it.\n orig_attrs = attrs.copy()\n\n # Merge the ivtidx columns we grabbed above into attrs dict.\n for n, name in enumerate(reqd_columns):\n if name not in attrs and name != 'pickle':\n attrs[name] = row[n]\n\n for ivtidx, (dirty, searchable_attrs) in ivtidx_columns.items():\n if not dirty:\n # No attribute for this ivtidx changed.\n continue\n split = self._inverted_indexes[ivtidx]['split']\n # Remove existing indexed words for this object.\n self._delete_object_inverted_index_terms((object_type, object_id), ivtidx)\n\n # TODO: code duplication from add()\n # Need to reindex all columns in this object using this ivtidx.\n terms_list = []\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n if attr_type == BYTES_TYPE and isinstance(attrs[name], RAW_TYPE):\n # We store string objects in the db as buffers, in\n # order to prevent any unicode issues. So we need\n # to convert the buffer we got from the db back to\n # a string before parsing the attribute into terms.\n attrs[name] = BYTES_TYPE(attrs[name])\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given, but\n # that ivtidx is not a named attribute (which would be handled\n # in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n self._add_object_inverted_index_terms((object_type, object_id), ivtidx, terms)\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n if not terms and ivtidx in orig_attrs:\n # Update removed all terms for this ivtidx, remove from pickle.\n orig_attrs[ivtidx] = None\n elif terms:\n # There are terms for this ivtidx, store in pickle.\n orig_attrs[ivtidx] = list(terms.keys())\n\n query, values = self._make_query_from_attrs(\"update\", orig_attrs, object_type)\n self._db_query(query, values)\n self._set_dirty()\n # TODO: if an objectrow was given, return an updated objectrow", "def update(self, collection_id, parent_id, object_id, object,\n unique_fields=None, id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n obj = Session.query(self.collection).get(object_id)\n # TODO: verify permissions\n if obj is None:\n obj = self.create(collection_id=collection_id, parent_id=parent_id,\n record=object, unique_fields=unique_fields,\n id_field=id_field, modified_field=modified_field,\n auth=None)\n else:\n for k, v in object.items():\n setattr(obj, k, v)\n return obj.deserialize()", "def retype(self, obj, new_type):\n\n if new_type not in self._object_types:\n raise ValueError('Parent type %s not registered in database' % new_type)\n\n # Reload and force pickled attributes into the dict.\n try:\n attrs = dict(self.get(obj))\n except TypeError:\n raise ValueError('Object (%s, %s) is not found in database' % (obj['type'], obj['id']))\n\n parent = attrs.get('parent')\n # Remove all attributes that aren't also in the destination type. Also\n # remove type, id, and parent attrs, which get regenerated when we add().\n for attr_name in list(attrs.keys()):\n # TODO: check src and dst attr types and try to coerce, and if\n # not possible, raise an exception.\n if attr_name not in self._object_types[new_type][1] or attr_name in ('type', 'id', 'parent'):\n del attrs[attr_name]\n\n new_obj = self.add(new_type, parent, **attrs)\n # Reparent all current children to the new id.\n for child in self.query(parent=obj):\n # TODO: if this raises, delete new_obj (to rollback) and reraise.\n self.reparent(child, new_obj)\n\n self.delete(obj)\n return new_obj", "def update(cls, row_id, **kwargs):\n cls.delete(row_id)\n # obj = cls.query.filter_by(id=row_id).first()\n # for k, v in kwargs.items():\n # obj[k] = v\n # obj = cls.query.filter_by(id=row_id).update(kwargs)\n kwargs[\"id\"] = row_id\n obj = cls(**kwargs)\n #print(\"the type of updated object is\", type(obj))\n return commit(obj)", "def _update(self, model_obj):\n conn = self._get_session()\n db_item = None\n\n # Fetch the record from database\n try:\n identifier = getattr(model_obj, id_field(self.entity_cls).attribute_name)\n db_item = conn.query(self.model_cls).get(\n identifier\n ) # This will raise exception if object was not found\n except DatabaseError as exc:\n logger.error(f\"Database Record not found: {exc}\")\n raise\n\n if db_item is None:\n conn.rollback()\n conn.close()\n raise ObjectNotFoundError(\n {\n \"_entity\": f\"`{self.entity_cls.__name__}` object with identifier {identifier} \"\n f\"does not exist.\"\n }\n )\n\n # Sync DB Record with current changes. When the session is committed, changes are automatically synced\n try:\n for attribute in attributes(self.entity_cls):\n if attribute != id_field(self.entity_cls).attribute_name and getattr(\n model_obj, attribute\n ) != getattr(db_item, attribute):\n setattr(db_item, attribute, getattr(model_obj, attribute))\n except DatabaseError as exc:\n logger.error(f\"Error while updating: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return model_obj", "def update(self, openid=None, **kwargs):\n assert openid\n\n with db.session.begin_nested():\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)\n db.session.merge(self)\n db.session.commit()\n return self", "def update(self, **kwargs):\n return self.parent.update_instance(self.name, kwargs)", "def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save() or self", "def update_object(self, label, data):\n self._validate_can_write()\n self._validate_labels(label, must_exist=True)\n\n cls = self._registry.get_inserter(data)\n if cls is None:\n msg = \"{!r} is not a supported type\".format(data)\n raise ValueError(msg)\n\n record_type = cls.record_type\n if record_type != 'structure':\n raise ValueError(\"Input data is not a dictionary\")\n\n with self._h5file('r+') as h5file:\n # Check the general structure of the data and file\n grp = h5file[label]\n attrs = get_decoded(grp.attrs)\n if not attrs['RecordType'] == 'object':\n raise ValueError(\"Record '{}' is not an object\".format(label))\n if attrs['Empty'] == 'yes':\n raise ValueError(\"Cannot update an empty record\")\n record_sig = unnest_record(grp)\n data_sig = unnest(data, self._registry)\n if not are_signatures_equivalent(record_sig, data_sig):\n msg = \"Data is not compatible with record '{}'\"\n raise ValueError(msg.format(label))\n\n del h5file[label]\n\n self.insert(label, data, attrs['Description'], int(attrs['Deflate']))\n\n # Fix the record type and update the header\n with self._h5file('r+') as h5file:\n grp = h5file[label]\n set_encoded(\n grp.attrs,\n RecordType='object',\n Class=attrs['Class'],\n )\n update_header(h5file.attrs)", "def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save() or self", "def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save() or self", "def update(self, commit=True, **kwargs):\n for attr, value in kwargs.iteritems():\n setattr(self, attr, value)\n return commit and self.save() or self", "def update_obj(obj, attributes, params):\n for key in params.keys():\n if key in attributes:\n try:\n set_attribute(obj, key, params[key])\n except:\n abort(400)\n \n Session.flush()\n Session.commit()", "def update(self, commit=True, **kwargs):\n # Prevent changing IDS\n kwargs.pop('id', None)\n for attr, value in kwargs.iteritems():\n # Flask-restful makes everything None by default\n if value is not None:\n setattr(self, attr, value)\n return commit and self.save() or self", "def update(self, **values):\r\n if self.__abstract__:\r\n raise ThunderdomeException('cant update abstract elements')\r\n self.pre_update(**values)\r\n for key in values.keys():\r\n if key not in self._columns:\r\n raise TypeError(\"unrecognized attribute name: '{}'\".format(key))\r\n\r\n for k,v in values.items():\r\n setattr(self, k, v)\r\n\r\n return self.save()", "def _update_object(self, data_dict):\r\n pass", "def update(self, obj, data):\n self.get(obj[self.model.pk_field.name])\n self.validate_fields(data)\n\n fields = []\n values = []\n\n for k, v in data.iteritems():\n if k in self.model.get_fields_name():\n fields.append(k)\n values.append(v)\n\n conn = self.get_connector()\n cursor = conn.cursor()\n update = \" ,\".join([\"{0}='{1}'\".format(f, v) for f, v in zip(fields,\n values)])\n query = \"update {0} set {1} WHERE {2}={3}\".format(\n self.ressource_config[\"table\"],\n update,\n self.model.pk_field.name,\n obj[self.model.pk_field.name]\n )\n\n cursor.execute(query)\n conn.commit()\n conn.close()\n\n return self.get(obj[self.model.pk_field.name])", "def update(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.update(self.__class__.__name__, data['id'], data)\n\n self.__dict__.update(saved_data)", "def update_object(self, instance, using=None, **kwargs):\n # Check to make sure we want to index this first.\n if self.should_update(instance, **kwargs):\n backend = self.get_backend(using)\n\n if backend is not None:\n backend.update(self, [instance])", "def _update(self, model_obj: Any):\n conn = self.provider.get_connection()\n\n identifier = model_obj.meta.id\n\n # Fetch the record from database\n try:\n # Calling `get` will raise `NotFoundError` if record was not found\n self.model_cls.get(\n id=identifier, using=conn, index=self.model_cls._index._name\n )\n except NotFoundError as exc:\n logger.error(f\"Database Record not found: {exc}\")\n raise ObjectNotFoundError(\n {\n \"_entity\": f\"`{self.entity_cls.__name__}` object with identifier {identifier} \"\n f\"does not exist.\"\n }\n )\n\n try:\n model_obj.save(\n refresh=True,\n index=self.model_cls._index._name,\n using=conn,\n )\n except Exception as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n\n return model_obj", "def update_object(self, name: str) -> None:", "def do_update(self, *args):\n if len(args) == 1:\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print(\"** instance id missing **\")\n return\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return\n elif len(args) < 4:\n print(\"** value missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n obj = dict_objs[key]\n if args[2] in obj.__class__.__dict__:\n obj.__dict__[args[2]] =\\\n type(obj.__class__.__dict__[args[2]])(args[3])\n else:\n obj.__dict__[args[2]] = args[3]\n storage.save()\n else:\n print(\"** no instance found **\")", "def update(self, *args, **kwargs):\n if args is not () and args is not None:\n attr_names = [\"id\", \"size\", \"x\", \"y\"]\n for index, attr in enumerate(args):\n setattr(self, attr_names[index], attr)\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)", "def post(self, obj):\n\n\t\tmodelobj = (not obj.get('parent_type')) and model.get(obj) or None\n\t\tmodelobj and modelobj.before_post()\n\t\tmodelobj and modelobj.validate()\n\t\t\t\t\n\t\tobj_single, is_vector = self._get_single(obj)\n\t\t# save the parent\n\t\tself.post_single(obj_single)\n\t\tif is_vector:\t\n\t\t\tfor k in obj:\n\t\t\t\td = {\"type\":k, \"parent\":obj[\"name\"], \"parent_type\":obj[\"type\"]}\n\t\t\t\t# dict, one child only\n\t\t\t\tif type(obj[k]) is dict:\n\t\t\t\t\tobj[k].update(d)\n\t\t\t\t\tself.post(obj[k])\n\t\t\t\t\n\t\t\t\t# multiple children\n\t\t\t\tif type(obj[k]) in (list, tuple):\n\t\t\t\t\tidx = 0\n\t\t\t\t\tfor child in obj[k]:\n\t\t\t\t\t\td['idx'] = idx\n\t\t\t\t\t\tidx += 1\n\t\t\t\t\t\t\n\t\t\t\t\t\t# child is a dict\n\t\t\t\t\t\tif type(child) is dict:\n\t\t\t\t\t\t\tchild.update(d)\n\t\t\t\t\t\t\tself.post(child)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t# child is literal (only names)\n\t\t\t\t\t\telif type(child) in (str, int, float):\n\t\t\t\t\t\t\tc = {\"value\":child}\n\t\t\t\t\t\t\tc.update(d)\n\t\t\t\t\t\t\tself.post_single(c)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise Exception, \"child %s must be dict or literal\" % str(child)\t\n\t\tmodelobj and modelobj.after_post()", "def updateItem(self, object):\n pass", "def update(self, **kwargs):\n return self._object.update(meta=kwargs)", "async def modify(\n self, item: T, update: Union[dict, MotycQuery], *,\n inject_default_id: bool = None,\n ) -> T:\n\n assert isinstance(item, BaseModel), \"Can only handle BaseModel, not dict i.g.\"\n\n document = item.dict(by_alias=True)\n\n assert document.get(self.identity) is not None, f\"Need identity ({self.identity}) to update model.\"\n\n return await self.update_one(\n {self.identity: document[self.identity]},\n update,\n inject_default_id=inject_default_id\n )", "def reparent(self, obj, parent):\n return self.update(obj, parent=parent)", "def update(self, attributes, type=None, name=None, identity=None):\n attributes = attributes or {}\n type = type or attributes.get('type')\n name = name or attributes.get('name')\n identity = identity or attributes.get('identity')\n if name and identity:\n name = None # Only send one\n request = self.request(operation='UPDATE', type=type, name=name,\n identity=identity, body=self.clean_attrs(attributes))\n return Entity(self, self.call(request).body)", "def do_update(self, arg):\n args = arg.split()\n object_dict = storage.all()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if args[0] in self.class_dict:\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n elif len(args) == 2:\n print(\"** attribute name missing **\")\n return\n elif len(args) == 3:\n print(\"** value missing **\")\n return\n else:\n print(\"** class doesn't exist **\")\n return\n\n for i in range(len(args)):\n if args[i].startswith('\"') and args[i].endswith('\"'):\n args[i] = args[i][1:-1]\n\n for full_key in object_dict.keys():\n key = full_key.split('.')\n key_id = key[1]\n if args[0] in self.class_dict:\n if args[1] == object_dict[full_key].id:\n setattr(object_dict[full_key], args[2], args[3])\n setattr(object_dict[full_key], \"updated_at\",\n datetime.now())\n storage.save()\n return\n else:\n print(\"** class doesn't exist **\")\n return\n print(\"** no instance found **\")" ]
[ "0.74381167", "0.6614651", "0.6315451", "0.61962694", "0.614676", "0.6144914", "0.6054113", "0.6040945", "0.5997564", "0.5952653", "0.5952653", "0.59390825", "0.59053296", "0.5887963", "0.5873624", "0.5767497", "0.57389915", "0.57276726", "0.5707913", "0.5680127", "0.5647404", "0.5640354", "0.55636173", "0.55448896", "0.55375713", "0.5528828", "0.55248845", "0.5514031", "0.5497346", "0.5493412" ]
0.786734
0
Scores the terms given in terms_list, which is a list of tuples (terms, coeff, split, ivtidx), where terms is the string or sequence of terms to be scored, coeff is the weight to give each term in this part (1.0 is normal), split is the function or regular expression used to split terms (only used if a string is given for terms), and ivtidx is the name of inverted index we're scoring for. Terms are either unicode objects or strings, or sequences of unicode or string objects. In the case of strings, they are passed through str_to_unicode() to try to decode them intelligently.
def _score_terms(self, terms_list): terms_scores = {} total_terms = 0 for terms, coeff, split, ivtidx in terms_list: if not terms: continue # Swap ivtidx name for inverted index definition dict ivtidx = self._inverted_indexes[ivtidx] if not isinstance(terms, (basestring, list, tuple)): raise ValueError, "Invalid type (%s) for ATTR_INVERTED_INDEX attribute. " \ "Only sequence, unicode or str allowed." % str(type(terms)) if isinstance(terms, (list, tuple)): parsed = terms else: if callable(split): parsed = split(terms) else: parsed = split.split(terms) for term in parsed: if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \ (ivtidx['min'] and len(term) < ivtidx['min']): continue term = str_to_unicode(term) lower_term = term.lower() if ivtidx['ignore'] and lower_term in ivtidx['ignore']: continue if lower_term not in terms_scores: terms_scores[lower_term] = [term, coeff] else: terms_scores[lower_term][1] += coeff total_terms += 1 # Score based on term frequency in document. (Add weight for # non-dictionary terms? Or longer terms?) for lower_term, score in terms_scores.items(): terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms) return dict(terms_scores.values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (str, list, tuple)):\n raise ValueError(\"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms)))\n\n if isinstance(terms, (list, tuple)):\n terms = [tostr(term) for term in terms]\n parsed = terms\n else:\n terms = tostr(terms)\n if callable(split):\n parsed = list(split(terms))\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())", "def _add_object_inverted_index_terms(self, (object_type, object_id), ivtidx, terms):\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)", "def query_score(terms, title):\n\n def term_score(term, word):\n # print (term, word)\n if word.startswith(term):\n return float(len(term)) / len(word)\n else:\n return 0.0\n\n words = list(self._clean_words(title))\n return sum(term_score(t, w) for t, w in product(terms, words))", "def _add_object_inverted_index_terms(self, obj, ivtidx, terms):\n object_type, object_id = obj\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)", "def rankDocuments_itp(terms, docs, index, tf, itp): \n\n\tdocVectors=defaultdict(lambda: [0]*len(terms)) \t\n\tqueryVector=[0]*len(terms) \n\n\t# compute the norm for the query tf\n\tquery_terms_count = collections.Counter(terms) \n\t\n\tquery_norm = la.norm(list(query_terms_count.values()))\n\t\n\tfor termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n\t\t\tif term not in index:\n\t\t\t\t\tcontinue \n\t\t\t## Compute tf*idf(normalize tf as done with documents)\n\t\t\tqueryVector[termIndex]=query_terms_count[term]/query_norm * itp[term] \n\t\t\t# Generate docVectors for matching docs\n\t\t\tfor docIndex, (doc, postings) in enumerate(index[term]):\n \n\t\t\t\t\tif doc in docs:\n\t\t\t\t\t\t\tdocVectors[doc][termIndex]=tf[term][docIndex] * itp[term] \n\t\n\tdocScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n\tdocScores.sort(reverse=True)\n\tresultDocs=[x[1] for x in docScores]\n\n\treturn resultDocs", "def score_doc_list(docList):\n return [(vectorize_doc(doc), doc) for doc in docList]", "def rankDocuments(terms, docs, index, idf, tf, rt, likes, score):\n \n # init docvectors and queryvector to dict and array of 0, to be filled later\n docVectors=collections.defaultdict(lambda: [0]*len(terms)) \n queryVector=[0]*len(terms) \n\n if score == \"1\":\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf*idf(normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm * idf[term]\n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex] * idf[term]\n # calculate the score of each doc\n # compute the cosine similarity between queyVector and each docVector:\n docScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n else:\n # as we just want cosine similarity but not use tf-idf, we're using the term frequency as a weight\n # in our custom ranking\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf (normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm \n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex]\n # calculate the score of each doc\n # compute the cosine similarity and add rt and fav score\n # rt brings to more visibility than a like, hence a higher score\n docScores=[ [np.dot(curDocVec, queryVector) + 1.5*rt[doc] + likes[doc], doc] for doc, curDocVec in docVectors.items() ]\n docScores.sort(reverse=True)\n resultDocs=[x[1] for x in docScores]\n if len(resultDocs) == 0:\n print(\"No results found, try again\")\n return None \n return resultDocs", "def compute_doc_scores(self, query_terms, inverted_indexes,\n doc_lengths, parameters):\n \n doc_scores = dict() # This is to contain each document's score\n for term in query_terms: # For each query term ...\n \n # Retrieve information regarding the current term\n term_info = inverted_indexes[term]\n n_docs_containing_term = len(term_info)\n \n # For each document that contains the term ...\n for cord_uid in term_info.keys():\n tf = term_info[cord_uid] # Retrieve the term frequency\n doc_length = doc_lengths[cord_uid] # Retrieve the document length\n \n # Compute document's score for this term\n score = self.compute_term_BM25(term, tf, n_docs_containing_term,\n Constants.doc_count,\n Constants.avg_doc_length, doc_length,\n parameters.k, parameters.b)\n \n # Store or increment the score\n if cord_uid in doc_scores:\n doc_scores[cord_uid] += score\n else:\n doc_scores[cord_uid] = score\n \n return doc_scores", "def perform_indexing(self, words_list):\n\n indexer_table = {}\n\n for word in words_list:\n hash_value = self.calculate_weighted_hash(word)\n freq_table = calculate_frequency_table(word)\n\n if hash_value not in indexer_table:\n indexer_table[hash_value] = {}\n indexer_table[hash_value][as_set(freq_table)] = [word]\n else:\n if as_set(freq_table) not in indexer_table[hash_value]:\n indexer_table[hash_value][as_set(freq_table)] = [word]\n else:\n indexer_table[hash_value][as_set(freq_table)].append(word)\n\n return indexer_table", "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(terms, (list, tuple)):\n split = self._inverted_indexes[ivtidx]['split']\n if callable(split):\n terms = split(str_to_unicode(terms).lower())\n else:\n terms = split.split(str_to_unicode(terms).lower())\n else:\n terms = [ str_to_unicode(x).lower() for x in terms ]\n\n # Remove terms that aren't indexed (words less than minimum length\n # or and terms in the ignore list for this ivtidx).\n if self._inverted_indexes[ivtidx]['min']:\n terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ]\n if self._inverted_indexes[ivtidx]['ignore']:\n terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ]\n\n terms_list = _list_to_printable(terms)\n nterms = len(terms)\n\n if nterms == 0:\n return []\n\n # Find term ids and order by least popular to most popular.\n rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \\\n 'term IN %s ORDER BY count' % (ivtidx, terms_list))\n save = map(lambda x: x.lower(), terms)\n terms = {}\n ids = []\n for row in rows:\n if row[2] == 0:\n return []\n\n # Give terms weight according to their order\n order_weight = 1 + len(save) - list(save).index(row[1])\n terms[row[0]] = {\n 'term': row[1],\n 'count': row[2],\n 'idf_t': math.log(objectcount / row[2] + 1) + order_weight,\n 'ids': {}\n }\n ids.append(row[0])\n\n # Not all the terms we requested are in the database, so we return\n # 0 results.\n if len(ids) < nterms:\n return []\n\n if object_type:\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n results, state = {}, {}\n for id in ids:\n results[id] = {}\n state[id] = {\n 'offset': [0]*11,\n 'more': [True]*11,\n 'count': 0,\n 'done': False\n }\n\n all_results = {}\n if limit == None:\n limit = objectcount\n\n if limit <= 0 or objectcount <= 0:\n return {}\n\n sql_limit = min(limit*3, 200)\n finished = False\n nqueries = 0\n\n # Keep a dict keyed on object_id that we can use to narrow queries\n # once we have a full list of all objects that match a given term.\n id_constraints = None\n t1 = time.time()\n while not finished:\n for rank in range(10, -1, -1):\n for id in ids:\n if not state[id]['more'][rank] or state[id]['done']:\n # If there's no more results at this rank, or we know\n # we've already seen all the results for this term, we\n # don't bother with the query.\n continue\n\n q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \\\n 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?'\n\n if object_type == None:\n q %= ''\n v = (id, rank, sql_limit, state[id][\"offset\"][rank])\n else:\n q %= 'AND object_type=?'\n v = (id, rank, object_type, sql_limit, state[id][\"offset\"][rank])\n\n if id_constraints:\n # We know about all objects that match one or more of the other\n # search terms, so we add the constraint that all rows for this\n # term match the others as well. Effectively we push the logic\n # to generate the intersection into the db.\n # XXX: This can't benefit from the index if object_type\n # is not specified.\n q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints))\n else:\n q %= ''\n\n rows = self._db_query(q, v)\n nqueries += 1\n state[id]['more'][rank] = len(rows) == sql_limit\n state[id]['count'] += len(rows)\n\n for row in rows:\n results[id][row[0], row[1]] = row[2] * terms[id]['idf_t']\n terms[id]['ids'][row[1]] = 1\n\n if state[id]['count'] >= terms[id]['count'] or \\\n (id_constraints and len(rows) == len(id_constraints)):\n # If we've now retrieved all objects for this term, or if\n # all the results we just got now intersect with our\n # constraints set, we're done this term and don't bother\n # querying it at other ranks.\n #print 'Done term '%s' at rank %d' % (terms[id]['term'], rank)\n state[id]['done'] = True\n if id_constraints is not None:\n id_constraints = id_constraints.intersection(terms[id]['ids'])\n else:\n id_constraints = set(terms[id]['ids'])\n #\n # end loop over terms\n\n\n for r in reduce(lambda a, b: set(a).intersection(b), results.values()):\n all_results[r] = 0\n for id in ids:\n if r in results[id]:\n all_results[r] += results[id][r]\n\n # If we have enough results already, no sense in querying the\n # next rank.\n if limit > 0 and len(all_results) > limit*2:\n finished = True\n #print \"Breaking at rank:\", rank\n break\n #\n # end loop over ranks\n\n if finished:\n break\n\n finished = True\n for index in range(len(ids)):\n id = ids[index]\n\n if index > 0:\n last_id = ids[index-1]\n a = results[last_id]\n b = results[id]\n intersect = set(a).intersection(b)\n\n if len(intersect) == 0:\n # Is there any more at any rank?\n a_more = b_more = False\n for rank in range(11):\n a_more = a_more or state[last_id]['more'][rank]\n b_more = b_more or state[id]['more'][rank]\n\n if not a_more and not b_more:\n # There's no intersection between these two search\n # terms and neither have more at any rank, so we\n # can stop the whole query.\n finished = True\n break\n\n # There's still hope of a match. Go through this term and\n # see if more exists at any rank, increasing offset and\n # unsetting finished flag so we iterate again.\n for rank in range(10, -1, -1):\n if state[id]['more'][rank] and not state[id]['done']:\n state[id]['offset'][rank] += sql_limit\n finished = False\n\n # If we haven't found enough results after this pass, grow our\n # limit so that we expand our search scope. (XXX: this value may\n # need empirical tweaking.)\n sql_limit *= 10\n\n # end loop while not finished\n log.info('%d results, did %d subqueries, %.04f seconds (%.04f overhead)',\n len(all_results), nqueries, time.time()-t0, t1-t0)\n return all_results", "def update_terms(self, terms):\n with transaction.atomic():\n self.phenotype_terms.all().delete()\n for name, lst in terms.items():\n self.phenotype_terms.create(individual=name, terms=lst)", "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(terms, (list, tuple)):\n split = self._inverted_indexes[ivtidx]['split']\n if callable(split):\n terms = [term for term in split(tostr(terms).lower()) if term]\n else:\n terms = [term for term in split.split(tostr(terms).lower()) if term]\n else:\n terms = [ tostr(x).lower() for x in terms ]\n\n # Remove terms that aren't indexed (words less than minimum length\n # or and terms in the ignore list for this ivtidx).\n if self._inverted_indexes[ivtidx]['min']:\n terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ]\n if self._inverted_indexes[ivtidx]['ignore']:\n terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ]\n\n terms_list = _list_to_printable(terms)\n nterms = len(terms)\n\n if nterms == 0:\n return []\n\n # Find term ids and order by least popular to most popular.\n rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \\\n 'term IN %s ORDER BY count' % (ivtidx, terms_list))\n save = [x.lower() for x in terms]\n terms = {}\n ids = []\n for row in rows:\n if row[2] == 0:\n return []\n\n # Give terms weight according to their order\n order_weight = 1 + len(save) - list(save).index(row[1])\n terms[row[0]] = {\n 'term': row[1],\n 'count': row[2],\n 'idf_t': math.log(objectcount / row[2] + 1) + order_weight,\n 'ids': {}\n }\n ids.append(row[0])\n\n # Not all the terms we requested are in the database, so we return\n # 0 results.\n if len(ids) < nterms:\n return []\n\n if object_type:\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n results, state = {}, {}\n for id in ids:\n results[id] = {}\n state[id] = {\n 'offset': [0]*11,\n 'more': [True]*11,\n 'count': 0,\n 'done': False\n }\n\n all_results = {}\n if limit == None:\n limit = objectcount\n\n if limit <= 0 or objectcount <= 0:\n return {}\n\n sql_limit = min(limit*3, 200)\n finished = False\n nqueries = 0\n\n # Keep a dict keyed on object_id that we can use to narrow queries\n # once we have a full list of all objects that match a given term.\n id_constraints = None\n t1 = time.time()\n while not finished:\n for rank in range(10, -1, -1):\n for id in ids:\n if not state[id]['more'][rank] or state[id]['done']:\n # If there's no more results at this rank, or we know\n # we've already seen all the results for this term, we\n # don't bother with the query.\n continue\n\n q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \\\n 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?'\n\n if object_type == None:\n q %= ''\n v = [id, rank, sql_limit, state[id][\"offset\"][rank]]\n else:\n q %= 'AND object_type=?'\n v = [id, rank, object_type, sql_limit, state[id][\"offset\"][rank]]\n\n if id_constraints:\n # We know about all objects that match one or more of the other\n # search terms, so we add the constraint that all rows for this\n # term match the others as well. Effectively we push the logic\n # to generate the intersection into the db.\n # XXX: This can't benefit from the index if object_type\n # is not specified.\n q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints))\n # But since we're specifying a list of ids to search for with this\n # term, we can't use limit/offset, since the constraints might be\n # different since the last iteration.\n v[-2:] = [-1, 0]\n else:\n q %= ''\n\n rows = self._db_query(q, v)\n nqueries += 1\n state[id]['more'][rank] = len(rows) == sql_limit\n state[id]['count'] += len(rows)\n\n for row in rows:\n results[id][row[0], row[1]] = row[2] * terms[id]['idf_t']\n terms[id]['ids'][row[1]] = 1\n\n if state[id]['count'] >= terms[id]['count'] or \\\n (id_constraints and len(rows) == len(id_constraints)):\n # If we've now retrieved all objects for this term, or if\n # all the results we just got now intersect with our\n # constraints set, we're done this term and don't bother\n # querying it at other ranks.\n #print \"Done term '%s' at rank %d\" % (terms[id]['term'], rank)\n state[id]['done'] = True\n if id_constraints is not None:\n id_constraints = id_constraints.intersection(terms[id]['ids'])\n else:\n id_constraints = set(terms[id]['ids'])\n #\n # end loop over terms\n\n\n for r in functools.reduce(lambda a, b: set(a).intersection(b), results.values()):\n all_results[r] = 0\n for id in ids:\n if r in results[id]:\n all_results[r] += results[id][r]\n\n # If we have enough results already, no sense in querying the\n # next rank.\n if limit > 0 and len(all_results) > limit*2:\n finished = True\n #print \"Breaking at rank:\", rank\n break\n #\n # end loop over ranks\n\n if finished:\n break\n\n finished = True\n for index in range(len(ids)):\n id = ids[index]\n\n if index > 0:\n last_id = ids[index-1]\n a = results[last_id]\n b = results[id]\n intersect = set(a).intersection(b)\n\n if len(intersect) == 0:\n # Is there any more at any rank?\n a_more = b_more = False\n for rank in range(11):\n a_more = a_more or state[last_id]['more'][rank]\n b_more = b_more or state[id]['more'][rank]\n\n if not a_more and not b_more:\n # There's no intersection between these two search\n # terms and neither have more at any rank, so we\n # can stop the whole query.\n finished = True\n break\n\n # There's still hope of a match. Go through this term and\n # see if more exists at any rank, increasing offset and\n # unsetting finished flag so we iterate again.\n for rank in range(10, -1, -1):\n if state[id]['more'][rank] and not state[id]['done']:\n state[id]['offset'][rank] += sql_limit\n finished = False\n\n # If we haven't found enough results after this pass, grow our\n # limit so that we expand our search scope. (XXX: this value may\n # need empirical tweaking.)\n sql_limit *= 10\n\n # end loop while not finished\n log.debug('%d results, did %d subqueries, %.04f seconds (%.04f overhead)',\n len(all_results), nqueries, time.time()-t0, t1-t0)\n return all_results", "def boostScore(self, result: str, words:set ):\n found = 0;\n for word in words:\n if result in self.invertedIndex[word]:\n found += 1\n return found/len(words)", "def evaluate_terms(terms):\n expr_terms = [x for x in terms]\n\n while expr_terms.count('^') != 0:\n expr_terms = eval_expon(expr_terms)\n\n while MUL_DIV_RE.search(str(expr_terms)) is not None:\n expr_terms = eval_a_op_b(expr_terms, 'md')\n\n while len(expr_terms) != 1:\n expr_terms = eval_a_op_b(expr_terms, 'pm')\n\n return expr_terms[0]", "def rankResults(self, results:set, queryVec: dict):\n rankedResults = {}\n for result in results:\n cosineSim = 0.0\n for word in queryVec.keys():\n if result in self.invertedIndex[word]:\n cosineSim += queryVec[word]*self.invertedIndex[word][result]\n cosineSim += self.boostScore(result, queryVec.keys())\n rankedResults[result] = cosineSim\n\n return rankedResults", "def rank_results(keywords, query_terms_info, results_list, collection_info, docset_info):\n # Get relevant collection info\n total_doc_count = collection_info.document_count\n avg_doc_length = collection_info.avg_doc_length\n\n doc_rankings = dict()\n\n for doc_id in results_list:\n doc_length = docset_info.get_doc_info(doc_id).doc_length\n\n # Keep sum of ranking values for all terms ranking values for the given document\n doc_ranking_val = 0\n for term in keywords:\n if term in query_terms_info:\n # Get term frequency & document frequency for each term x document cross product\n doc_freq = query_terms_info[term].get_document_frequency()\n term_freq = query_terms_info[term].get_term_frequency(doc_id)\n\n doc_ranking_val += compute_term_doc_rank(total_doc_count, avg_doc_length, doc_length, doc_freq, term_freq)\n\n doc_rankings[doc_id] = doc_ranking_val\n\n return sorted(doc_rankings.items(), key=operator.itemgetter(1), reverse=True)", "def compute_doc_scores_BM25F(self, query_terms, \n inverted_indexes, \n doc_length_info_bm25f,\n parameters):\n \n doc_scores = dict() # This is to contain each document's score\n for term in query_terms: # For each query term ...\n \n # Retrieve information regarding the current term\n term_info = inverted_indexes[term]\n n_docs_containing_term = len(term_info)\n \n # For each document that contains the term ...\n for cord_uid in term_info.keys():\n \n tf_field_dict = term_info[cord_uid]\n length_info = doc_length_info_bm25f[cord_uid]\n \n # Compute document's score for this term\n score = self.compute_term_BM25F(term, tf_field_dict, n_docs_containing_term, Constants.doc_count,\n length_info,\n parameters.k,\n parameters.weight_title, parameters.weight_author, parameters.weight_abstract, parameters.weight_sections,\n parameters.b_title, parameters.b_author, parameters.b_abstract, parameters.b_sections,\n Constants.avg_title_length, Constants.avg_author_length, Constants.avg_abstract_length, Constants.avg_sections_length)\n \n # Store or increment the score\n if cord_uid in doc_scores:\n doc_scores[cord_uid] += score\n else:\n doc_scores[cord_uid] = score\n \n return doc_scores", "def score_help(chain, res_list, score_table):\n from pro_angle import calc_dihedral\n from math import floor\n \n score = float(0)\n for res in res_list:\n try:\n (phi,psi) = calc_dihedral(chain, res) \n indx = int(floor(phi/10)+18)\n indy = int(floor(psi/10)+18)\n temp = float(score_table[indy][indx])\n score = score + temp\n except ValueError:\n pass\n# print \"ValueError: asked for score of non-scorable residue\"\n return score", "def answer(document, search_terms):\n idx = {k: [] for k in search_terms}\n doc = document.split()\n [idx[term].append(i) for i, term in enumerate(doc, start=1) if term in search_terms]\n min_score = sys.maxint\n winning_slice = None\n for term in idx.keys(): # ignore duplicate terms\n for position in idx[term]:\n positions = [position]\n for other_term in idx.keys():\n distances = \\\n [int(math.fabs(position - x)) for x in idx[other_term]]\n positions.append(\n idx[other_term][distances.index(min(distances))])\n score = max(positions) - min(positions) + 1\n if score < min_score:\n winning_slice = (min(positions) - 1, max(positions),)\n min_score = score\n return \" \".join(doc[slice(*winning_slice)])", "def score_candidates(self,\n cand_list: List[Union[CandidateEntry, Tuple[str, float]]],\n query_info_obj_or_dict: Union[DataEntryFields, dict]) -> Dict[str, float]:\n query_text = self.get_query_text(query_info_obj_or_dict)\n\n if self.text_proc_obj_query is not None:\n query_text = self.text_proc_obj_query(query_text)\n\n query_text = self.handle_case(query_text)\n query_toks = query_text.split()\n query_terms_idfs = {w: self.calc_idf(w) for w in set(query_toks)}\n\n res = {}\n\n for doc_id, score in cand_list:\n doc_text = self.fwd_indx.get_doc_text(doc_id)\n if self.text_proc_obj_doc is not None:\n doc_text = self.text_proc_obj_doc(doc_text)\n doc_text = self.handle_case(doc_text)\n doc_toks = doc_text.split()\n doc_len = len(doc_toks)\n counts = Counter(doc_toks)\n score = 0\n for qterm in query_toks:\n tf = counts[qterm]\n if tf > 0:\n qidf = query_terms_idfs[qterm]\n norm_tf = (tf * (self.k1 + 1)) / \\\n (tf + self.k1 * (1 - self.b + self.b * doc_len * self.inv_avg_doc_len))\n score += qidf * norm_tf\n\n res[doc_id] = score\n\n return res", "def test_scoring(self):\n scores = score_words(['foo', 'far', 'has', 'car'])\n expected = [(7, 'far'), (6, 'car'), (5, 'has'), (4 , 'foo')]\n self.assertEqual(scores, expected)", "def bulk_score(cls, csv_list, errors):\n # Split the list by namespace\n csv_dict = dict()\n for entry in csv_list:\n if len(entry) != 4:\n if len(entry) >= 1:\n errors.append('Invalid row %s' % ','.join(entry))\n continue\n\n namespace = entry[0].strip()\n if not namespace:\n errors.append('Invalid row %s' % ','.join(entry))\n continue\n\n score_list = csv_dict.get(namespace, [])\n score_list.append(entry[1:])\n csv_dict[namespace] = score_list\n\n # Call bulk score by course\n for namespace, score_list in csv_dict.iteritems():\n course_errors = []\n app_context = sites.get_app_context_for_namespace(namespace)\n if not app_context:\n errors.append('Course not found %s ' % namespace)\n continue\n course = courses.Course.get(app_context)\n with Namespace(namespace):\n cls.bulk_score_by_course(course, score_list, course_errors)\n if course_errors:\n errors.append('Errors for course %s: %s' %\n (namespace, transforms.dumps(course_errors)))", "def cosineSimilarity(index, nPages, query): \n scores = defaultdict(int)\n terms = query.split()\n qw = {t: tf_idf(1, nPages, len(index[t])) for t in terms if t in index}\n query_len = np.linalg.norm(list(qw.values()))\n for term in qw:\n query_weight = qw[term] / query_len\n for url, weight in index[term]:\n scores[url] += weight * query_weight\n return sorted(scores.items(), key=lambda x: x[1], reverse=True)", "def rank_results(result_list, search_title, search_artist, uploader_list):\n #scores = []\n #search_artist = search_artist.replace(\"+\", \" \").lower()\n search_title = search_title.replace(\"+\", \" \")\n #search_terms = search_title.split() + search_artist.split()\n\n ## Give score to each result\n #for index, title in enumerate(result_list):\n # title = title.lower()\n # score = 0\n\n # # One point for each word in result title\n # for term in search_terms:\n # if term in title:\n # score += 1\n\n # # 2 points if whole title in result, 2 points for whole artist, 4 points for both\n # if search_title in title:\n # score += 2\n # if search_artist in title:\n # score += 2\n # if search_title in title and search_artist in title:\n # score += 4\n # if search_title == title and (uploader_list[index] == search_artist+\" - topic\" or uploader_list[index] == 'various artists - topic' or uploader_list[index] == search_artist or uploader_list[index] == search_artist+'\\\\xa0'):\n # score += 100\n # if 'karaoke' in title:\n # score-=1000\n\n # scores.append(score)\n\n # return scores.index(max(scores))\n for index, title in enumerate(result_list):\n title = title\n if search_title == title:\n return index\n\n return 0", "def vectorize(self, terms):\n features = {}\n\n if self.parameters[LexiconFeature.PARAM_ENABLED] == 'false':\n return features\n\n tones = []\n if (self.terms_used == 'all'):\n tones = [self.get_tone(term) for term in terms]\n elif (self.used_terms == 'hashtags_only'):\n tones = [self.get_tone(term) for term in terms\n if len(term) > 0 and term[0] == '#']\n\n if (len(tones) == 0):\n tones.append(0)\n\n for function_name in self.functions:\n if (function_name == 'sum'):\n value = (sum(tones))\n elif (function_name == 'max'):\n value = max(tones)\n elif (function_name == 'min'):\n value = min(tones)\n else:\n raise ValueError(\n \"unexpected function: '{}'\".format(function_name))\n\n feature_name = \"{}_{}\".format(self.get_name(), function_name)\n features[feature_name] = utils.normalize(value)\n\n #\n # Calculate sum of cluster scores\n #\n # for cluster in self.bag_of_clusters_features:\n # cluster_tones = [self.get_cluster_tone(\n # cluster, cluster.get_cluster_id(word))\n # for word in terms if cluster.contains_word(word)]\n # if len(cluster_tones) == 0:\n # cluster_tones.append(0)\n\n # feature_name = \"{}_score_sum\".format(cluster.get_name())\n # value = sum(cluster_tones)\n # features[feature_name] = utils.normalize(value)\n\n return features", "def tf_idf_score():\n\n global final_doc_set\n global final_dictionary\n final_score = []\n\n for doc_id in final_doc_set:\n score = 0\n for query_term in final_dictionary.keys():\n if final_dictionary[query_term][1].get(doc_id):\n tf = final_dictionary[query_term][1][doc_id][0]\n df = final_dictionary[query_term][0]\n\n score += ((1 + log10(tf)) * log10(TOTAL_DOCS / df))\n\n final_score.append([doc_id, score])\n\n return final_score", "def score(self, urlids, wordids):\r\n\t\tself.urlids = urlids\r\n\t\tself.wordids = wordids\r\n\t\tself.scores = self.tf_score()\r\n\t\treturn self.scores", "def fashion_similarity(input_txt, features, keys):\n feature_index = keys.index(input_txt)\n input_vector = features[feature_index]\n\n scores = [similarity_function(input_vector, partner) for partner in features]\n return scores", "def calculate_score(self, edge_list):\n embs = np.array(\n [[self.emb[source], self.emb[target]] for source, target in edge_list]\n )\n\n if self.proximity_function == \"dot\":\n score_list = [\n np.dot(source_emb, target_emb) for source_emb, target_emb in embs\n ]\n elif self.proximity_function == \"cos\":\n score_list = cosine_similarity(embs[:, 0], embs[:, 1])\n\n return score_list", "def score(self, searcher, fieldnum, text, docnum, weight, QTF = 1):\n raise NotImplementedError" ]
[ "0.8230414", "0.54937506", "0.54905194", "0.5479236", "0.5440691", "0.5430716", "0.54254556", "0.5392035", "0.53528565", "0.51504266", "0.5112394", "0.510215", "0.5097729", "0.50681865", "0.5062038", "0.4980558", "0.49433053", "0.49381447", "0.49258786", "0.49234137", "0.49174914", "0.4907989", "0.4843224", "0.48411313", "0.4840773", "0.48385045", "0.47786513", "0.47754273", "0.4767628", "0.4765168" ]
0.82383937
0
Removes all indexed terms under the specified inverted index for the given object. This function must be called when an object is removed from the database, or when an ATTR_INVERTED_INDEX attribute of an object is being updated (and therefore that inverted index must be reindexed).
def _delete_object_inverted_index_terms(self, (object_type, object_id), ivtidx): self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _delete_object_inverted_index_terms(self, obj, ivtidx):\n object_type, object_id = obj\n self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})", "def _delete_multiple_objects_inverted_index_terms(self, objects):\n for type_name, (ivtidxes, object_ids) in objects.items():\n # Resolve object type name to id\n type_id = self._get_type_id(type_name)\n\n for ivtidx in ivtidxes:\n # Remove all terms for the inverted index associated with this\n # object. A trigger will decrement the count column in the\n # terms table for all term_id that get affected.\n self._db_query(\"DELETE FROM ivtidx_%s_terms_map WHERE object_type=? AND object_id IN %s\" % \\\n (ivtidx, _list_to_printable(object_ids)), (type_id,))\n self._inverted_indexes[ivtidx]['objectcount'] -= len(object_ids)", "def _delete_multiple_objects_inverted_index_terms(self, objects):\n for type_name, (ivtidxes, object_ids) in objects.items():\n # Resolve object type name to id\n type_id = self._get_type_id(type_name)\n\n for ivtidx in ivtidxes:\n # Remove all terms for the inverted index associated with this\n # object. A trigger will decrement the count column in the\n # terms table for all term_id that get affected.\n self._db_query(\"DELETE FROM ivtidx_%s_terms_map WHERE object_type=? AND object_id IN %s\" % \\\n (ivtidx, _list_to_printable(object_ids)), (type_id,))\n self._inverted_indexes[ivtidx]['objectcount'] -= len(object_ids)", "def remove_objects(self, indexes):\n fields = [\n \"object_position\",\n \"object_velocity\",\n \"object_radius\",\n \"object_rotation\",\n \"object_type\",\n \"object_steps\",\n ]\n for field in fields:\n setattr(\n self,\n field,\n [x for i, x in enumerate(getattr(self, field)) if i not in indexes],\n )", "def delete_terms(self, *terms):\n result = self.sequence\n for term in ANCOVA(*terms).sequence:\n result.remove(term)\n return ANCOVA(*result)", "def unindexReverseIndex(alphabet,reverseIndex,path):\n\tdef _deleteDocumentTermCounterString(docCount,termCount):\n\t\tdeleteString = \"[Document %8d Terms %8d]\" % (docCount,termCount)\n\t\tsys.stdout.write(\"\\b\" * len(deleteString))\n\tdef _writeDocumentTermCounterString(docCount,termCount):\n\t\tsys.stdout.write(\"[Document %8d Terms %8d]\" % (docCount,termCount))\n\toutputFileHash = dict()\n\tfor termWord,termId in alphabet.iteritems():\n\t\tdocCounter = 0\n\t\tdisplayTermWord = termWord[0:14]\n\t\tif len(displayTermWord) == 14: displayTermWord = \"\".join([\"<\",displayTermWord[:-2],\">\"])\n\t\tsys.stdout.write(\"Unindexing term %14s \" % displayTermWord)\n\t\t_writeDocumentTermCounterString(0,0)\n\t\tfor docIdTermInstanceVector in reverseIndex.lookupTermId(termId):\n\t\t\ttermCounter = 0\n\t\t\t_deleteDocumentTermCounterString(docCounter,termCounter)\n\t\t\tdocCounter += 1\n\t\t\t_writeDocumentTermCounterString(docCounter,termCounter)\n\t\t\tdocId = docIdTermInstanceVector.docId\n\t\t\tif docId not in outputFileHash:\n\t\t\t\toutputFileName = os.sep.join([path,str(docId) + \".fwd\"])\n\t\t\t\toutputFileHash[docId] = outputFileName\n\t\t\tfp = open(outputFileHash[docId],\"ab\")\n\n\t\t\tfor termInstance in docIdTermInstanceVector.termInstancesGenerator:\n\t\t\t\t_deleteDocumentTermCounterString(docCounter,termCounter)\n\t\t\t\ttermCounter += 1\n\t\t\t\t_writeDocumentTermCounterString(docCounter,termCounter)\n\t\t\t\tprint >> fp, \"%d %s\" % (termInstance.position,termWord)\n\t\t\tfp.close()\n\n\t\tsys.stdout.write(\" DONE\\n\")\n\t\n\tfor fileName in outputFileHash.values():\n\t\tfp = open(fileName,\"rb\")\n\t\tfileTerms = sorted([(int(position),word[:-1]) for position,word in [line.split(\" \",1) for line in fp]])\n\t\tfp.close()\n\t\tprint >> sys.stdout, \"Reorganizing: %s\" % fileName\n\t\tfp = open(fileName,\"wb\")\n\t\tfor termPosition,termWord in fileTerms:\n\t\t\tfp.write(termWord + \" \")\n\t\tfp.close()", "def remove_index_from_word(self,word,index):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n\r\n if word in self.word_dict:\r\n\r\n self.word_dict[word].remove(str(index))\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname,word,str(index),)\r\n db_cursor.execute(\"DELETE FROM\"\r\n +\" word_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" AND word=?\"\r\n +\" AND note_index=?;\",\r\n value_tuple)\r\n\r\n db_cursor.execute(\"SELECT * FROM word_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" and word=?;\",\r\n value_tuple[0:2])\r\n if db_cursor.fetchone():\r\n db_cursor.execute(\"DELETE FROM\"\r\n +\" all_words\"\r\n +\" WHERE notebook=?\"\r\n +\" AND word=?;\",\r\n value_tuple[0:2])", "def cleanup(self):\n index_id = self.params[\"index_id\"]\n\n # Remove the index document from the database.\n self.db.indexes.delete_one({\"_id\": index_id})\n\n self.dispatch(\"indexes\", \"delete\", [index_id])\n\n query = {\n \"_id\": {\n \"$in\": self.db.history.distinct(\"_id\", {\"index.id\": index_id})\n }\n }\n\n # Set all the otus included in the build to \"unbuilt\" again.\n self.db.history.update_many(query, {\n \"$set\": {\n \"index\": {\n \"id\": \"unbuilt\",\n \"version\": \"unbuilt\"\n }\n }\n })\n\n id_list = self.db.history.distinct(\"_id\", query)\n\n self.dispatch(\"history\", \"update\", id_list)\n\n virtool.utils.rm(self.params[\"index_path\"], True)", "def _add_object_inverted_index_terms(self, obj, ivtidx, terms):\n object_type, object_id = obj\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)", "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(terms, (list, tuple)):\n split = self._inverted_indexes[ivtidx]['split']\n if callable(split):\n terms = split(str_to_unicode(terms).lower())\n else:\n terms = split.split(str_to_unicode(terms).lower())\n else:\n terms = [ str_to_unicode(x).lower() for x in terms ]\n\n # Remove terms that aren't indexed (words less than minimum length\n # or and terms in the ignore list for this ivtidx).\n if self._inverted_indexes[ivtidx]['min']:\n terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ]\n if self._inverted_indexes[ivtidx]['ignore']:\n terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ]\n\n terms_list = _list_to_printable(terms)\n nterms = len(terms)\n\n if nterms == 0:\n return []\n\n # Find term ids and order by least popular to most popular.\n rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \\\n 'term IN %s ORDER BY count' % (ivtidx, terms_list))\n save = map(lambda x: x.lower(), terms)\n terms = {}\n ids = []\n for row in rows:\n if row[2] == 0:\n return []\n\n # Give terms weight according to their order\n order_weight = 1 + len(save) - list(save).index(row[1])\n terms[row[0]] = {\n 'term': row[1],\n 'count': row[2],\n 'idf_t': math.log(objectcount / row[2] + 1) + order_weight,\n 'ids': {}\n }\n ids.append(row[0])\n\n # Not all the terms we requested are in the database, so we return\n # 0 results.\n if len(ids) < nterms:\n return []\n\n if object_type:\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n results, state = {}, {}\n for id in ids:\n results[id] = {}\n state[id] = {\n 'offset': [0]*11,\n 'more': [True]*11,\n 'count': 0,\n 'done': False\n }\n\n all_results = {}\n if limit == None:\n limit = objectcount\n\n if limit <= 0 or objectcount <= 0:\n return {}\n\n sql_limit = min(limit*3, 200)\n finished = False\n nqueries = 0\n\n # Keep a dict keyed on object_id that we can use to narrow queries\n # once we have a full list of all objects that match a given term.\n id_constraints = None\n t1 = time.time()\n while not finished:\n for rank in range(10, -1, -1):\n for id in ids:\n if not state[id]['more'][rank] or state[id]['done']:\n # If there's no more results at this rank, or we know\n # we've already seen all the results for this term, we\n # don't bother with the query.\n continue\n\n q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \\\n 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?'\n\n if object_type == None:\n q %= ''\n v = (id, rank, sql_limit, state[id][\"offset\"][rank])\n else:\n q %= 'AND object_type=?'\n v = (id, rank, object_type, sql_limit, state[id][\"offset\"][rank])\n\n if id_constraints:\n # We know about all objects that match one or more of the other\n # search terms, so we add the constraint that all rows for this\n # term match the others as well. Effectively we push the logic\n # to generate the intersection into the db.\n # XXX: This can't benefit from the index if object_type\n # is not specified.\n q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints))\n else:\n q %= ''\n\n rows = self._db_query(q, v)\n nqueries += 1\n state[id]['more'][rank] = len(rows) == sql_limit\n state[id]['count'] += len(rows)\n\n for row in rows:\n results[id][row[0], row[1]] = row[2] * terms[id]['idf_t']\n terms[id]['ids'][row[1]] = 1\n\n if state[id]['count'] >= terms[id]['count'] or \\\n (id_constraints and len(rows) == len(id_constraints)):\n # If we've now retrieved all objects for this term, or if\n # all the results we just got now intersect with our\n # constraints set, we're done this term and don't bother\n # querying it at other ranks.\n #print 'Done term '%s' at rank %d' % (terms[id]['term'], rank)\n state[id]['done'] = True\n if id_constraints is not None:\n id_constraints = id_constraints.intersection(terms[id]['ids'])\n else:\n id_constraints = set(terms[id]['ids'])\n #\n # end loop over terms\n\n\n for r in reduce(lambda a, b: set(a).intersection(b), results.values()):\n all_results[r] = 0\n for id in ids:\n if r in results[id]:\n all_results[r] += results[id][r]\n\n # If we have enough results already, no sense in querying the\n # next rank.\n if limit > 0 and len(all_results) > limit*2:\n finished = True\n #print \"Breaking at rank:\", rank\n break\n #\n # end loop over ranks\n\n if finished:\n break\n\n finished = True\n for index in range(len(ids)):\n id = ids[index]\n\n if index > 0:\n last_id = ids[index-1]\n a = results[last_id]\n b = results[id]\n intersect = set(a).intersection(b)\n\n if len(intersect) == 0:\n # Is there any more at any rank?\n a_more = b_more = False\n for rank in range(11):\n a_more = a_more or state[last_id]['more'][rank]\n b_more = b_more or state[id]['more'][rank]\n\n if not a_more and not b_more:\n # There's no intersection between these two search\n # terms and neither have more at any rank, so we\n # can stop the whole query.\n finished = True\n break\n\n # There's still hope of a match. Go through this term and\n # see if more exists at any rank, increasing offset and\n # unsetting finished flag so we iterate again.\n for rank in range(10, -1, -1):\n if state[id]['more'][rank] and not state[id]['done']:\n state[id]['offset'][rank] += sql_limit\n finished = False\n\n # If we haven't found enough results after this pass, grow our\n # limit so that we expand our search scope. (XXX: this value may\n # need empirical tweaking.)\n sql_limit *= 10\n\n # end loop while not finished\n log.info('%d results, did %d subqueries, %.04f seconds (%.04f overhead)',\n len(all_results), nqueries, time.time()-t0, t1-t0)\n return all_results", "def delete_index(self):\n es = self.get_es()\n if es.head(self.es_index):\n es.delete(self.es_index)", "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(terms, (list, tuple)):\n split = self._inverted_indexes[ivtidx]['split']\n if callable(split):\n terms = [term for term in split(tostr(terms).lower()) if term]\n else:\n terms = [term for term in split.split(tostr(terms).lower()) if term]\n else:\n terms = [ tostr(x).lower() for x in terms ]\n\n # Remove terms that aren't indexed (words less than minimum length\n # or and terms in the ignore list for this ivtidx).\n if self._inverted_indexes[ivtidx]['min']:\n terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ]\n if self._inverted_indexes[ivtidx]['ignore']:\n terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ]\n\n terms_list = _list_to_printable(terms)\n nterms = len(terms)\n\n if nterms == 0:\n return []\n\n # Find term ids and order by least popular to most popular.\n rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \\\n 'term IN %s ORDER BY count' % (ivtidx, terms_list))\n save = [x.lower() for x in terms]\n terms = {}\n ids = []\n for row in rows:\n if row[2] == 0:\n return []\n\n # Give terms weight according to their order\n order_weight = 1 + len(save) - list(save).index(row[1])\n terms[row[0]] = {\n 'term': row[1],\n 'count': row[2],\n 'idf_t': math.log(objectcount / row[2] + 1) + order_weight,\n 'ids': {}\n }\n ids.append(row[0])\n\n # Not all the terms we requested are in the database, so we return\n # 0 results.\n if len(ids) < nterms:\n return []\n\n if object_type:\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n results, state = {}, {}\n for id in ids:\n results[id] = {}\n state[id] = {\n 'offset': [0]*11,\n 'more': [True]*11,\n 'count': 0,\n 'done': False\n }\n\n all_results = {}\n if limit == None:\n limit = objectcount\n\n if limit <= 0 or objectcount <= 0:\n return {}\n\n sql_limit = min(limit*3, 200)\n finished = False\n nqueries = 0\n\n # Keep a dict keyed on object_id that we can use to narrow queries\n # once we have a full list of all objects that match a given term.\n id_constraints = None\n t1 = time.time()\n while not finished:\n for rank in range(10, -1, -1):\n for id in ids:\n if not state[id]['more'][rank] or state[id]['done']:\n # If there's no more results at this rank, or we know\n # we've already seen all the results for this term, we\n # don't bother with the query.\n continue\n\n q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \\\n 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?'\n\n if object_type == None:\n q %= ''\n v = [id, rank, sql_limit, state[id][\"offset\"][rank]]\n else:\n q %= 'AND object_type=?'\n v = [id, rank, object_type, sql_limit, state[id][\"offset\"][rank]]\n\n if id_constraints:\n # We know about all objects that match one or more of the other\n # search terms, so we add the constraint that all rows for this\n # term match the others as well. Effectively we push the logic\n # to generate the intersection into the db.\n # XXX: This can't benefit from the index if object_type\n # is not specified.\n q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints))\n # But since we're specifying a list of ids to search for with this\n # term, we can't use limit/offset, since the constraints might be\n # different since the last iteration.\n v[-2:] = [-1, 0]\n else:\n q %= ''\n\n rows = self._db_query(q, v)\n nqueries += 1\n state[id]['more'][rank] = len(rows) == sql_limit\n state[id]['count'] += len(rows)\n\n for row in rows:\n results[id][row[0], row[1]] = row[2] * terms[id]['idf_t']\n terms[id]['ids'][row[1]] = 1\n\n if state[id]['count'] >= terms[id]['count'] or \\\n (id_constraints and len(rows) == len(id_constraints)):\n # If we've now retrieved all objects for this term, or if\n # all the results we just got now intersect with our\n # constraints set, we're done this term and don't bother\n # querying it at other ranks.\n #print \"Done term '%s' at rank %d\" % (terms[id]['term'], rank)\n state[id]['done'] = True\n if id_constraints is not None:\n id_constraints = id_constraints.intersection(terms[id]['ids'])\n else:\n id_constraints = set(terms[id]['ids'])\n #\n # end loop over terms\n\n\n for r in functools.reduce(lambda a, b: set(a).intersection(b), results.values()):\n all_results[r] = 0\n for id in ids:\n if r in results[id]:\n all_results[r] += results[id][r]\n\n # If we have enough results already, no sense in querying the\n # next rank.\n if limit > 0 and len(all_results) > limit*2:\n finished = True\n #print \"Breaking at rank:\", rank\n break\n #\n # end loop over ranks\n\n if finished:\n break\n\n finished = True\n for index in range(len(ids)):\n id = ids[index]\n\n if index > 0:\n last_id = ids[index-1]\n a = results[last_id]\n b = results[id]\n intersect = set(a).intersection(b)\n\n if len(intersect) == 0:\n # Is there any more at any rank?\n a_more = b_more = False\n for rank in range(11):\n a_more = a_more or state[last_id]['more'][rank]\n b_more = b_more or state[id]['more'][rank]\n\n if not a_more and not b_more:\n # There's no intersection between these two search\n # terms and neither have more at any rank, so we\n # can stop the whole query.\n finished = True\n break\n\n # There's still hope of a match. Go through this term and\n # see if more exists at any rank, increasing offset and\n # unsetting finished flag so we iterate again.\n for rank in range(10, -1, -1):\n if state[id]['more'][rank] and not state[id]['done']:\n state[id]['offset'][rank] += sql_limit\n finished = False\n\n # If we haven't found enough results after this pass, grow our\n # limit so that we expand our search scope. (XXX: this value may\n # need empirical tweaking.)\n sql_limit *= 10\n\n # end loop while not finished\n log.debug('%d results, did %d subqueries, %.04f seconds (%.04f overhead)',\n len(all_results), nqueries, time.time()-t0, t1-t0)\n return all_results", "def _add_object_inverted_index_terms(self, (object_type, object_id), ivtidx, terms):\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)", "def clear(self):\n self.solr.delete_query(\"%s:%s\"\n % (self.index_uuid_field, self.index_uuid))\n self.solr.commit()", "def _clear_document(self, docid):\n doc = self.get_document(docid)\n for term, count in doc.get_terms():\n term_entry = self.sql_session.query(Term).get(term)\n term_entry.count -= abs(count)\n term_entry.distinct_docs -= 1\n any_term = self.sql_session.query(Term).get(ANY)\n any_term.distinct_docs -= 1\n doc.delete()", "def wipe_index(self, index):\n url = f'{self.host}{index}/_delete_by_query?conflicts=proceed'\n data = {'query': {'match_all': {}}}\n resp = requests.post(url, json=data)\n self.flush(index)\n return resp.json()", "def vacuum(self):\n # We need to do this eventually, but there's no index on count, so\n # this could potentially be slow. It doesn't hurt to leave rows\n # with count=0, so this could be done intermittently.\n for ivtidx in self._inverted_indexes:\n self._db_query('DELETE FROM ivtidx_%s_terms WHERE count=0' % ivtidx)\n self._db_query(\"VACUUM\")", "def discard_index_from_word(self,word,index):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n\r\n if word in self.word_dict:\r\n\r\n self.word_dict[word].discard(str(index))\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname,word,str(index),)\r\n db_cursor.execute(\"DELETE FROM word_to_indexes \"\r\n +\"WHERE notebook=? AND word=? \"\r\n +\"AND note_index=?;\",\r\n value_tuple)\r\n\r\n db_cursor.execute(\"SELECT * FROM word_to_indexes\"\r\n +\" WHERE notebook=? and word=?;\",\r\n value_tuple[0:2])\r\n if db_cursor.fetchone():\r\n db_cursor.execute(\"DELETE FROM all_words\"\r\n +\" WHERE notebook=?\"\r\n +\" AND word=?;\",\r\n value_tuple[0:2])", "def deindex(self):\n self.deindex_value(self.proxy_get())", "def delete_object(self, object_id):\n if (len(\"%s\" % object_id) == 0):\n raise AlgoliaException(\"object_id is required\")\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"DELETE\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % object_id).encode('utf8'), safe='')), self.client.timeout)", "def get_inverted_index_terms(self, ivtidx, associated = None, prefix = None):\n if ivtidx not in self._inverted_indexes:\n raise ValueError(\"'%s' is not a registered inverted index.\" % ivtidx)\n\n if prefix:\n where_clause = 'WHERE terms.term >= ? AND terms.term <= ?'\n where_values = (prefix, prefix + 'z')\n else:\n where_clause = ''\n where_values = ()\n\n if not associated:\n return self._db_query('''SELECT term, count\n FROM ivtidx_%s_terms AS terms\n %s\n ORDER BY count DESC''' % (ivtidx, where_clause), where_values)\n\n\n rows = self._db_query('SELECT id FROM ivtidx_%s_terms WHERE term IN %s ORDER BY count' % \\\n (ivtidx, _list_to_printable(associated)))\n term_ids = [ x[0] for x in rows ]\n if len(term_ids) < len(associated):\n return []\n\n query = '''SELECT term, COUNT(*) AS total\n FROM ivtidx_%s_terms_map AS t0''' % ivtidx\n for n, term_id in enumerate(term_ids):\n query += ''' JOIN ivtidx_%s_terms_map t%d\n ON t%d.object_type = t%d.object_type AND\n t%d.object_id = t%d.object_id AND\n t%d.term_id = %d''' % \\\n (ivtidx, n + 1, n, n + 1, n, n + 1, n + 1, term_id)\n query += ''' JOIN ivtidx_%s_terms AS terms\n ON t0.term_id = terms.id AND\n t0.term_id NOT IN %s\n %s\n GROUP BY t0.term_id\n ORDER BY total DESC ''' % \\\n (ivtidx, _list_to_printable(term_ids), where_clause)\n return self._db_query(query, where_values)", "def clear_indexes(self):\n for keypoints in self:\n keypoints.clear_index()", "def get_inverted_index_terms(self, ivtidx, associated = None, prefix = None):\n if ivtidx not in self._inverted_indexes:\n raise ValueError, \"'%s' is not a registered inverted index.\" % ivtidx\n\n if prefix:\n where_clause = 'WHERE terms.term >= ? AND terms.term <= ?'\n where_values = (prefix, prefix + 'z')\n else:\n where_clause = ''\n where_values = ()\n\n if not associated:\n return self._db_query('''SELECT term, count\n FROM ivtidx_%s_terms AS terms\n %s\n ORDER BY count DESC''' % (ivtidx, where_clause), where_values)\n\n\n rows = self._db_query('SELECT id FROM ivtidx_%s_terms WHERE term IN %s ORDER BY count' % \\\n (ivtidx, _list_to_printable(associated)))\n term_ids = [ x[0] for x in rows ]\n if len(term_ids) < len(associated):\n return []\n\n query = '''SELECT term, COUNT(*) AS total\n FROM ivtidx_%s_terms_map AS t0''' % ivtidx\n for n, term_id in enumerate(term_ids):\n query += ''' JOIN ivtidx_%s_terms_map t%d\n ON t%d.object_type = t%d.object_type AND\n t%d.object_id = t%d.object_id AND\n t%d.term_id = %d''' % \\\n (ivtidx, n + 1, n, n + 1, n, n + 1, n + 1, term_id)\n query += ''' JOIN ivtidx_%s_terms AS terms\n ON t0.term_id = terms.id AND\n t0.term_id NOT IN %s\n %s\n GROUP BY t0.term_id\n ORDER BY total DESC ''' % \\\n (ivtidx, _list_to_printable(term_ids), where_clause)\n return self._db_query(query, where_values)", "def unindex_later(self):\n return", "def delete_index(self):\n if self.index_module:\n self.index_module = None\n gc.collect()", "def remove(self, index):\n raise NotImplementedError()", "def remove_extra_index(actions_structure, type_object):\n for i, action_dict in enumerate(actions_structure):\n for obj_dict in action_dict['context'][type_object]:\n obj_dict.pop('main_index')", "def clear_index(self):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s/clear\" % self.url_index_name, self.client.timeout)", "def remove_document_from_index(self, doc_name):\n\t\tif not doc_name:\n\t\t\treturn\n\n\t\tix = self.get_index()\n\t\twith ix.searcher():\n\t\t\twriter = AsyncWriter(ix)\n\t\t\twriter.delete_by_term(self.id, doc_name)\n\t\t\twriter.commit(optimize=True)", "def removeOntoIndex(ontology_id):\n # print('removeOntoIndex() =>', ontology_id)\n url = cfg.ontology_sim + '/delete'\n body = {\n \"ontologyId\": ontology_id\n }\n try:\n res = requests.post(url, json=body)\n return res.json()\n except:\n print(\"Could not remove details for ontology with id \" + ontology_id)\n return False" ]
[ "0.81035614", "0.75411546", "0.75411546", "0.6139424", "0.6045582", "0.603692", "0.6007786", "0.60001457", "0.59316987", "0.584059", "0.5829414", "0.5817072", "0.5789952", "0.57787883", "0.5730306", "0.56770205", "0.56318223", "0.5610385", "0.55822736", "0.5505812", "0.54964715", "0.5493718", "0.5487194", "0.5445244", "0.5432679", "0.54321873", "0.5427672", "0.54243386", "0.54183644", "0.5399062" ]
0.80202085
1
Adds the dictionary of terms (as computed by _score_terms()) to the specified inverted index database for the given object.
def _add_object_inverted_index_terms(self, (object_type, object_id), ivtidx, terms): if not terms: return # Resolve object type name to id object_type = self._get_type_id(object_type) # Holds any of the given terms that already exist in the database # with their id and count. db_terms_count = {} terms_list = _list_to_printable([ t.lower() for t in terms.keys() ]) q = "SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s" % (ivtidx, terms_list) rows = self._db_query(q) for row in rows: db_terms_count[row[1]] = row[0], row[2] # For executemany queries later. update_list, map_list = [], [] for term, score in terms.items(): term = term.lower() if term not in db_terms_count: # New term, so insert it now. self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,)) db_id, db_count = self._cursor.lastrowid, 1 db_terms_count[term] = db_id, db_count else: db_id, db_count = db_terms_count[term] update_list.append((db_count + 1, db_id)) map_list.append((int(score*10), db_id, object_type, object_id, score)) self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True) self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_object_inverted_index_terms(self, obj, ivtidx, terms):\n object_type, object_id = obj\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)", "def add_to_index(self, term_, doc_id_):\n\n if(term_ not in self.inverted_index.keys()):\n postingsList=LinkedList()\n postingsList.insert_at_end(doc_id_)\n #Doc freq\n postingsList.length=postingsList.length+1\n self.inverted_index[term_]=postingsList\n# self.inverted_index[term_].start_node.term_frequency += 1\n elif(not self.is_doc_id_in_posting_list(self.inverted_index[term_],doc_id_,term_)):\n self.inverted_index[term_].insert_at_end(doc_id_)\n self.inverted_index[term_].length=self.inverted_index[term_].length+1", "def index_terms(self):\n [[self.set_postings(term, id) for term in NLProcessor.process(doc)] for id, doc in\n self.doc_store.dict.iteritems()]", "def add_object(self, content, object_id = None):\n if object_id is None:\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"POST\", \"/1/indexes/%s\" % self.url_index_name, self.client.timeout, content)\n else:\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % object_id).encode('utf8'), safe='')), self.client.timeout, content)", "def add(self, object_type, parent = None, **attrs):\n type_attrs = self._get_type_attrs(object_type)\n if parent:\n attrs[\"parent_type\"] = self._get_type_id(parent[0])\n attrs[\"parent_id\"] = parent[1]\n\n # Increment objectcount for the applicable inverted indexes.\n inverted_indexes = self._get_type_inverted_indexes(object_type)\n if inverted_indexes:\n self._db_query(\"UPDATE inverted_indexes SET value=value+1 WHERE attr='objectcount' AND name IN %s\" % \\\n _list_to_printable(inverted_indexes))\n\n\n # Process inverted index maps for this row\n ivtidx_terms = []\n for ivtidx in inverted_indexes:\n # Sync cached objectcount with the DB (that we just updated above)\n self._inverted_indexes[ivtidx]['objectcount'] += 1\n terms_list = []\n split = self._inverted_indexes[ivtidx]['split']\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given in kwagrs,\n # but that ivtidx is not a registered attribute (which would be\n # handled in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n ivtidx_terms.append((ivtidx, terms))\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n attrs[ivtidx] = terms.keys()\n\n query, values = self._make_query_from_attrs(\"add\", attrs, object_type)\n self._db_query(query, values)\n\n # Add id given by db, as well as object type.\n attrs[\"id\"] = self._cursor.lastrowid\n attrs[\"type\"] = unicode(object_type)\n if parent:\n attrs['parent'] = (attrs['parent_type'], attrs['parent_id'])\n else:\n attrs['parent'] = (None, None)\n\n for ivtidx, terms in ivtidx_terms:\n self._add_object_inverted_index_terms((object_type, attrs['id']), ivtidx, terms)\n\n # Populate dictionary with keys for this object type not specified in kwargs.\n attrs.update(dict.fromkeys([k for k in type_attrs if k not in attrs.keys() + ['pickle']]))\n\n return ObjectRow(None, None, attrs)", "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(terms, (list, tuple)):\n split = self._inverted_indexes[ivtidx]['split']\n if callable(split):\n terms = split(str_to_unicode(terms).lower())\n else:\n terms = split.split(str_to_unicode(terms).lower())\n else:\n terms = [ str_to_unicode(x).lower() for x in terms ]\n\n # Remove terms that aren't indexed (words less than minimum length\n # or and terms in the ignore list for this ivtidx).\n if self._inverted_indexes[ivtidx]['min']:\n terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ]\n if self._inverted_indexes[ivtidx]['ignore']:\n terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ]\n\n terms_list = _list_to_printable(terms)\n nterms = len(terms)\n\n if nterms == 0:\n return []\n\n # Find term ids and order by least popular to most popular.\n rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \\\n 'term IN %s ORDER BY count' % (ivtidx, terms_list))\n save = map(lambda x: x.lower(), terms)\n terms = {}\n ids = []\n for row in rows:\n if row[2] == 0:\n return []\n\n # Give terms weight according to their order\n order_weight = 1 + len(save) - list(save).index(row[1])\n terms[row[0]] = {\n 'term': row[1],\n 'count': row[2],\n 'idf_t': math.log(objectcount / row[2] + 1) + order_weight,\n 'ids': {}\n }\n ids.append(row[0])\n\n # Not all the terms we requested are in the database, so we return\n # 0 results.\n if len(ids) < nterms:\n return []\n\n if object_type:\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n results, state = {}, {}\n for id in ids:\n results[id] = {}\n state[id] = {\n 'offset': [0]*11,\n 'more': [True]*11,\n 'count': 0,\n 'done': False\n }\n\n all_results = {}\n if limit == None:\n limit = objectcount\n\n if limit <= 0 or objectcount <= 0:\n return {}\n\n sql_limit = min(limit*3, 200)\n finished = False\n nqueries = 0\n\n # Keep a dict keyed on object_id that we can use to narrow queries\n # once we have a full list of all objects that match a given term.\n id_constraints = None\n t1 = time.time()\n while not finished:\n for rank in range(10, -1, -1):\n for id in ids:\n if not state[id]['more'][rank] or state[id]['done']:\n # If there's no more results at this rank, or we know\n # we've already seen all the results for this term, we\n # don't bother with the query.\n continue\n\n q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \\\n 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?'\n\n if object_type == None:\n q %= ''\n v = (id, rank, sql_limit, state[id][\"offset\"][rank])\n else:\n q %= 'AND object_type=?'\n v = (id, rank, object_type, sql_limit, state[id][\"offset\"][rank])\n\n if id_constraints:\n # We know about all objects that match one or more of the other\n # search terms, so we add the constraint that all rows for this\n # term match the others as well. Effectively we push the logic\n # to generate the intersection into the db.\n # XXX: This can't benefit from the index if object_type\n # is not specified.\n q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints))\n else:\n q %= ''\n\n rows = self._db_query(q, v)\n nqueries += 1\n state[id]['more'][rank] = len(rows) == sql_limit\n state[id]['count'] += len(rows)\n\n for row in rows:\n results[id][row[0], row[1]] = row[2] * terms[id]['idf_t']\n terms[id]['ids'][row[1]] = 1\n\n if state[id]['count'] >= terms[id]['count'] or \\\n (id_constraints and len(rows) == len(id_constraints)):\n # If we've now retrieved all objects for this term, or if\n # all the results we just got now intersect with our\n # constraints set, we're done this term and don't bother\n # querying it at other ranks.\n #print 'Done term '%s' at rank %d' % (terms[id]['term'], rank)\n state[id]['done'] = True\n if id_constraints is not None:\n id_constraints = id_constraints.intersection(terms[id]['ids'])\n else:\n id_constraints = set(terms[id]['ids'])\n #\n # end loop over terms\n\n\n for r in reduce(lambda a, b: set(a).intersection(b), results.values()):\n all_results[r] = 0\n for id in ids:\n if r in results[id]:\n all_results[r] += results[id][r]\n\n # If we have enough results already, no sense in querying the\n # next rank.\n if limit > 0 and len(all_results) > limit*2:\n finished = True\n #print \"Breaking at rank:\", rank\n break\n #\n # end loop over ranks\n\n if finished:\n break\n\n finished = True\n for index in range(len(ids)):\n id = ids[index]\n\n if index > 0:\n last_id = ids[index-1]\n a = results[last_id]\n b = results[id]\n intersect = set(a).intersection(b)\n\n if len(intersect) == 0:\n # Is there any more at any rank?\n a_more = b_more = False\n for rank in range(11):\n a_more = a_more or state[last_id]['more'][rank]\n b_more = b_more or state[id]['more'][rank]\n\n if not a_more and not b_more:\n # There's no intersection between these two search\n # terms and neither have more at any rank, so we\n # can stop the whole query.\n finished = True\n break\n\n # There's still hope of a match. Go through this term and\n # see if more exists at any rank, increasing offset and\n # unsetting finished flag so we iterate again.\n for rank in range(10, -1, -1):\n if state[id]['more'][rank] and not state[id]['done']:\n state[id]['offset'][rank] += sql_limit\n finished = False\n\n # If we haven't found enough results after this pass, grow our\n # limit so that we expand our search scope. (XXX: this value may\n # need empirical tweaking.)\n sql_limit *= 10\n\n # end loop while not finished\n log.info('%d results, did %d subqueries, %.04f seconds (%.04f overhead)',\n len(all_results), nqueries, time.time()-t0, t1-t0)\n return all_results", "def sort_terms(self):\n sorted_index = OrderedDict({})\n for k in sorted(self.inverted_index.keys()):\n sorted_index[k] = self.inverted_index[k]\n self.inverted_index = sorted_index", "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(terms, (list, tuple)):\n split = self._inverted_indexes[ivtidx]['split']\n if callable(split):\n terms = [term for term in split(tostr(terms).lower()) if term]\n else:\n terms = [term for term in split.split(tostr(terms).lower()) if term]\n else:\n terms = [ tostr(x).lower() for x in terms ]\n\n # Remove terms that aren't indexed (words less than minimum length\n # or and terms in the ignore list for this ivtidx).\n if self._inverted_indexes[ivtidx]['min']:\n terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ]\n if self._inverted_indexes[ivtidx]['ignore']:\n terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ]\n\n terms_list = _list_to_printable(terms)\n nterms = len(terms)\n\n if nterms == 0:\n return []\n\n # Find term ids and order by least popular to most popular.\n rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \\\n 'term IN %s ORDER BY count' % (ivtidx, terms_list))\n save = [x.lower() for x in terms]\n terms = {}\n ids = []\n for row in rows:\n if row[2] == 0:\n return []\n\n # Give terms weight according to their order\n order_weight = 1 + len(save) - list(save).index(row[1])\n terms[row[0]] = {\n 'term': row[1],\n 'count': row[2],\n 'idf_t': math.log(objectcount / row[2] + 1) + order_weight,\n 'ids': {}\n }\n ids.append(row[0])\n\n # Not all the terms we requested are in the database, so we return\n # 0 results.\n if len(ids) < nterms:\n return []\n\n if object_type:\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n results, state = {}, {}\n for id in ids:\n results[id] = {}\n state[id] = {\n 'offset': [0]*11,\n 'more': [True]*11,\n 'count': 0,\n 'done': False\n }\n\n all_results = {}\n if limit == None:\n limit = objectcount\n\n if limit <= 0 or objectcount <= 0:\n return {}\n\n sql_limit = min(limit*3, 200)\n finished = False\n nqueries = 0\n\n # Keep a dict keyed on object_id that we can use to narrow queries\n # once we have a full list of all objects that match a given term.\n id_constraints = None\n t1 = time.time()\n while not finished:\n for rank in range(10, -1, -1):\n for id in ids:\n if not state[id]['more'][rank] or state[id]['done']:\n # If there's no more results at this rank, or we know\n # we've already seen all the results for this term, we\n # don't bother with the query.\n continue\n\n q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \\\n 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?'\n\n if object_type == None:\n q %= ''\n v = [id, rank, sql_limit, state[id][\"offset\"][rank]]\n else:\n q %= 'AND object_type=?'\n v = [id, rank, object_type, sql_limit, state[id][\"offset\"][rank]]\n\n if id_constraints:\n # We know about all objects that match one or more of the other\n # search terms, so we add the constraint that all rows for this\n # term match the others as well. Effectively we push the logic\n # to generate the intersection into the db.\n # XXX: This can't benefit from the index if object_type\n # is not specified.\n q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints))\n # But since we're specifying a list of ids to search for with this\n # term, we can't use limit/offset, since the constraints might be\n # different since the last iteration.\n v[-2:] = [-1, 0]\n else:\n q %= ''\n\n rows = self._db_query(q, v)\n nqueries += 1\n state[id]['more'][rank] = len(rows) == sql_limit\n state[id]['count'] += len(rows)\n\n for row in rows:\n results[id][row[0], row[1]] = row[2] * terms[id]['idf_t']\n terms[id]['ids'][row[1]] = 1\n\n if state[id]['count'] >= terms[id]['count'] or \\\n (id_constraints and len(rows) == len(id_constraints)):\n # If we've now retrieved all objects for this term, or if\n # all the results we just got now intersect with our\n # constraints set, we're done this term and don't bother\n # querying it at other ranks.\n #print \"Done term '%s' at rank %d\" % (terms[id]['term'], rank)\n state[id]['done'] = True\n if id_constraints is not None:\n id_constraints = id_constraints.intersection(terms[id]['ids'])\n else:\n id_constraints = set(terms[id]['ids'])\n #\n # end loop over terms\n\n\n for r in functools.reduce(lambda a, b: set(a).intersection(b), results.values()):\n all_results[r] = 0\n for id in ids:\n if r in results[id]:\n all_results[r] += results[id][r]\n\n # If we have enough results already, no sense in querying the\n # next rank.\n if limit > 0 and len(all_results) > limit*2:\n finished = True\n #print \"Breaking at rank:\", rank\n break\n #\n # end loop over ranks\n\n if finished:\n break\n\n finished = True\n for index in range(len(ids)):\n id = ids[index]\n\n if index > 0:\n last_id = ids[index-1]\n a = results[last_id]\n b = results[id]\n intersect = set(a).intersection(b)\n\n if len(intersect) == 0:\n # Is there any more at any rank?\n a_more = b_more = False\n for rank in range(11):\n a_more = a_more or state[last_id]['more'][rank]\n b_more = b_more or state[id]['more'][rank]\n\n if not a_more and not b_more:\n # There's no intersection between these two search\n # terms and neither have more at any rank, so we\n # can stop the whole query.\n finished = True\n break\n\n # There's still hope of a match. Go through this term and\n # see if more exists at any rank, increasing offset and\n # unsetting finished flag so we iterate again.\n for rank in range(10, -1, -1):\n if state[id]['more'][rank] and not state[id]['done']:\n state[id]['offset'][rank] += sql_limit\n finished = False\n\n # If we haven't found enough results after this pass, grow our\n # limit so that we expand our search scope. (XXX: this value may\n # need empirical tweaking.)\n sql_limit *= 10\n\n # end loop while not finished\n log.debug('%d results, did %d subqueries, %.04f seconds (%.04f overhead)',\n len(all_results), nqueries, time.time()-t0, t1-t0)\n return all_results", "def add_word(self,word,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n if word in self.word_dict:\r\n\r\n self.word_dict[word].add(str(index))\r\n else:\r\n self.word_dict[word] = {str(index)}\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, word,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_words \"\r\n +\"(word, notebook)\"\r\n +\" VALUES (?,?);\",value_tuple)\r\n value_tuple = (notebookname, word, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO word_to_indexes \"\r\n +\"(notebook, word, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def index_document(self, document):\n \n # Remove punctuation from the text.\n clean_text = re.sub(r'[\\n,*().\\-\\:]',' ', document['text'])\n \n terms = word_tokenize(clean_text.lower())\n appearances_dict = dict()\n # Dictionary with each term and the frequency it appears in the text.\n for term in terms:\n term_frequency = appearances_dict[term].frequency if term in appearances_dict else 0\n appearances_dict[term] = Appearance(document['id'], term_frequency + 1)\n \n # Update the inverted index\n update_dict = { key: [appearance]\n if key not in self.index\n else self.index[key] + [appearance]\n for (key, appearance) in appearances_dict.items() }\n self.index.update(update_dict)\n # Add the document into the database\n self.db.add(document)\n return document", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def _delete_object_inverted_index_terms(self, obj, ivtidx):\n object_type, object_id = obj\n self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})", "def __set_data_to_db(term, data):\n data_copy = copy.deepcopy(data)\n instances = []\n for app_data in data_copy:\n instance, created = AppData.objects.get_or_create(\n uid=app_data.pop('uid'), **app_data)\n instances.append(instance)\n index, created = AppSearchIndex.objects.get_or_create(query=term)\n index.apps.add(*instances)", "def add(self, object_type, parent=None, **attrs):\n if self._readonly:\n raise DatabaseReadOnlyError('upgrade_to_py3() must be called before database can be modified')\n\n type_attrs = self._get_type_attrs(object_type)\n if parent:\n attrs['parent_type'], attrs['parent_id'] = self._to_obj_tuple(parent, numeric=True)\n\n # Increment objectcount for the applicable inverted indexes.\n inverted_indexes = self._get_type_inverted_indexes(object_type)\n if inverted_indexes:\n self._db_query(\"UPDATE inverted_indexes SET value=value+1 WHERE attr='objectcount' AND name IN %s\" % \\\n _list_to_printable(inverted_indexes))\n\n\n # Process inverted index maps for this row\n ivtidx_terms = []\n for ivtidx in inverted_indexes:\n # Sync cached objectcount with the DB (that we just updated above)\n self._inverted_indexes[ivtidx]['objectcount'] += 1\n terms_list = []\n split = self._inverted_indexes[ivtidx]['split']\n for name, (attr_type, flags, attr_ivtidx, attr_split) in type_attrs.items():\n if attr_ivtidx == ivtidx and name in attrs:\n terms_list.append((attrs[name], 1.0, attr_split or split, ivtidx))\n\n if ivtidx in attrs and ivtidx not in type_attrs:\n # Attribute named after an inverted index is given in kwagrs,\n # but that ivtidx is not a registered attribute (which would be\n # handled in the for loop just above).\n terms_list.append((attrs[ivtidx], 1.0, split, ivtidx))\n\n terms = self._score_terms(terms_list)\n if terms:\n ivtidx_terms.append((ivtidx, terms))\n # If there are no terms for this ivtidx, we don't bother storing\n # an empty list in the pickle.\n if ivtidx in type_attrs:\n # Registered attribute named after ivtidx; store ivtidx\n # terms in object.\n attrs[ivtidx] = list(terms.keys())\n\n query, values = self._make_query_from_attrs(\"add\", attrs, object_type)\n self._db_query(query, values)\n\n # Add id given by db, as well as object type.\n attrs['id'] = self._cursor.lastrowid\n attrs['type'] = str(object_type)\n attrs['parent'] = self._to_obj_tuple(parent) if parent else (None, None)\n\n for ivtidx, terms in ivtidx_terms:\n self._add_object_inverted_index_terms((object_type, attrs['id']), ivtidx, terms)\n\n # Populate dictionary with keys for this object type not specified in kwargs.\n attrs.update(dict.fromkeys([k for k in type_attrs if k not in list(attrs.keys()) + ['pickle']]))\n\n self._set_dirty()\n return ObjectRow(None, None, attrs)", "def index(self):\n print(\"Indexing...\")\n # ------------------------------------------------------------------\n # TODO: Create an inverted, positional index.\n # Granted this may not be a linked list as in a proper\n # implementation.\n # This index should allow easy access to both \n # 1) the documents in which a particular word is contained, and \n # 2) for every document, the positions of that word in the document \n # Some helpful instance variables:\n # * self.docs = List of documents\n # * self.titles = List of titles\n inv_index = defaultdict(set)\n self.tf = defaultdict(Counter)\n \n for word in self.vocab:\n inv_index[word] = {} # create dictionary with words in V\n\n # Generate inverted index here\n for doc in range(len(self.docs)):\n for word in self.docs[doc]:\n self.tf[doc][word] += 1 # represents how many times word 'word' is mentioned in document 'i'\n \n for doc, title in zip(self.docs, self.titles):\n for word in self.vocab:\n inv_index[word][title] = [] # list for each word in vocabulary for all titles\n for pos, word in enumerate(doc):\n inv_index[word][title].append(pos)\n\n self.inv_index = inv_index\n # ------------------------------------------------------------------\n\n # turn self.docs into a map from ID to bag of words\n id_to_bag_of_words = {}\n for d, doc in enumerate(self.docs):\n bag_of_words = set(doc)\n id_to_bag_of_words[d] = bag_of_words\n self.docs = id_to_bag_of_words", "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (basestring, list, tuple)):\n raise ValueError, \"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms))\n\n if isinstance(terms, (list, tuple)):\n parsed = terms\n else:\n if callable(split):\n parsed = split(terms)\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n term = str_to_unicode(term)\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())", "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (str, list, tuple)):\n raise ValueError(\"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms)))\n\n if isinstance(terms, (list, tuple)):\n terms = [tostr(term) for term in terms]\n parsed = terms\n else:\n terms = tostr(terms)\n if callable(split):\n parsed = list(split(terms))\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())", "def add_terms_data(self, terms: Dict[datetime, List[dict]]):\n raise NotImplementedError()", "def _delete_object_inverted_index_terms(self, (object_type, object_id), ivtidx):\n self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})", "def add(self, term, count=1):\n term = term.lower() if self.lower else term\n if term in self.term2id:\n idx = self.term2id[term]\n else:\n idx = len(self.id2term)\n self.id2term[idx] = term\n self.term2id[term] = idx\n if count > 0:\n if term in self.term_frequent:\n self.term_frequent[term] += count\n else:\n self.term_frequent[term] = count\n return idx", "def get_inverted_index_terms(self, ivtidx, associated = None, prefix = None):\n if ivtidx not in self._inverted_indexes:\n raise ValueError(\"'%s' is not a registered inverted index.\" % ivtidx)\n\n if prefix:\n where_clause = 'WHERE terms.term >= ? AND terms.term <= ?'\n where_values = (prefix, prefix + 'z')\n else:\n where_clause = ''\n where_values = ()\n\n if not associated:\n return self._db_query('''SELECT term, count\n FROM ivtidx_%s_terms AS terms\n %s\n ORDER BY count DESC''' % (ivtidx, where_clause), where_values)\n\n\n rows = self._db_query('SELECT id FROM ivtidx_%s_terms WHERE term IN %s ORDER BY count' % \\\n (ivtidx, _list_to_printable(associated)))\n term_ids = [ x[0] for x in rows ]\n if len(term_ids) < len(associated):\n return []\n\n query = '''SELECT term, COUNT(*) AS total\n FROM ivtidx_%s_terms_map AS t0''' % ivtidx\n for n, term_id in enumerate(term_ids):\n query += ''' JOIN ivtidx_%s_terms_map t%d\n ON t%d.object_type = t%d.object_type AND\n t%d.object_id = t%d.object_id AND\n t%d.term_id = %d''' % \\\n (ivtidx, n + 1, n, n + 1, n, n + 1, n + 1, term_id)\n query += ''' JOIN ivtidx_%s_terms AS terms\n ON t0.term_id = terms.id AND\n t0.term_id NOT IN %s\n %s\n GROUP BY t0.term_id\n ORDER BY total DESC ''' % \\\n (ivtidx, _list_to_printable(term_ids), where_clause)\n return self._db_query(query, where_values)", "def terms(self, terms):\n\n self._terms = terms", "def get_inverted_index_terms(self, ivtidx, associated = None, prefix = None):\n if ivtidx not in self._inverted_indexes:\n raise ValueError, \"'%s' is not a registered inverted index.\" % ivtidx\n\n if prefix:\n where_clause = 'WHERE terms.term >= ? AND terms.term <= ?'\n where_values = (prefix, prefix + 'z')\n else:\n where_clause = ''\n where_values = ()\n\n if not associated:\n return self._db_query('''SELECT term, count\n FROM ivtidx_%s_terms AS terms\n %s\n ORDER BY count DESC''' % (ivtidx, where_clause), where_values)\n\n\n rows = self._db_query('SELECT id FROM ivtidx_%s_terms WHERE term IN %s ORDER BY count' % \\\n (ivtidx, _list_to_printable(associated)))\n term_ids = [ x[0] for x in rows ]\n if len(term_ids) < len(associated):\n return []\n\n query = '''SELECT term, COUNT(*) AS total\n FROM ivtidx_%s_terms_map AS t0''' % ivtidx\n for n, term_id in enumerate(term_ids):\n query += ''' JOIN ivtidx_%s_terms_map t%d\n ON t%d.object_type = t%d.object_type AND\n t%d.object_id = t%d.object_id AND\n t%d.term_id = %d''' % \\\n (ivtidx, n + 1, n, n + 1, n, n + 1, n + 1, term_id)\n query += ''' JOIN ivtidx_%s_terms AS terms\n ON t0.term_id = terms.id AND\n t0.term_id NOT IN %s\n %s\n GROUP BY t0.term_id\n ORDER BY total DESC ''' % \\\n (ivtidx, _list_to_printable(term_ids), where_clause)\n return self._db_query(query, where_values)", "def index_document(self, text: str, name: str):\n tokens = self.tokenize(text)\n term_frequencies = Counter(tokens) # Calculate term frequencies\n doc_id = len(self.documents) # Get document id as newest document\n\n for term in term_frequencies:\n if term not in self.index:\n self.index[term] = {}\n self.index[term][doc_id] = term_frequencies[term]\n\n self.documents[doc_id] = {\n \"name\": name,\n \"mag\": self.magnitude(term_frequencies.values())\n }", "def save_object(self, obj):\n return AlgoliaUtils_request(self.client.headers, self.write_hosts, \"PUT\", \"/1/indexes/%s/%s\" % (self.url_index_name, quote((\"%s\" % obj[\"objectID\"]).encode('utf8'), safe='')), self.client.timeout, obj)", "def add_terms_to_graph(graph, cursor, docid, nr_terms):\n # Retrieve n terms from database\n terms = db_utils.get_entities_from_docid(cursor, docid, \"tfidf_terms\")[:nr_terms]\n\n # Create node for each term\n for term in terms:\n term_name = term[0]\n term_positions = json.loads(term[1])\n term_tf = int(term[2])\n graph.add_node(Node(term_name, \"term\", term_positions, term_tf))", "def add_terms(self, project_id, data):\n data = self._run(\n url_path=\"terms/add\",\n id=project_id,\n data=json.dumps(data)\n )\n return data['result']['terms']", "def create_or_update_term(self, term, doc_id, hits):\n term_id = self.get_or_create_term(term)\n postings_table = 'term_%d' % term_id\n\n self.db.execute('''INSERT INTO %s(document_id, hits) VALUES(?, ?)''' % postings_table, (doc_id, hits))", "def update_terms(self, terms):\n with transaction.atomic():\n self.phenotype_terms.all().delete()\n for name, lst in terms.items():\n self.phenotype_terms.create(individual=name, terms=lst)", "def index_terms(self, terms):\n index = dict()\n for term in terms:\n links = [cell.metadata[\"nbpages\"][\"link\"] for nb in self.notebooks\n for cell in nb.content.cells if re.search(term, cell.source) if \"nbpages\" in cell.metadata.keys()]\n index[term] = list(dict.fromkeys(links))\n return index" ]
[ "0.8023277", "0.61256963", "0.5922523", "0.57880175", "0.5704644", "0.56919354", "0.56858873", "0.56835586", "0.56740934", "0.56609404", "0.56487274", "0.5621855", "0.56036234", "0.55900854", "0.5573914", "0.5557396", "0.5548084", "0.5533701", "0.55311424", "0.53814083", "0.5378356", "0.53717214", "0.5369809", "0.53632617", "0.5321292", "0.5316626", "0.5291472", "0.5267835", "0.526185", "0.52368426" ]
0.7867361
1
Queries the inverted index ivtidx for the terms supplied in the terms argument. If terms is a string, it is parsed into individual terms based on the split for the given ivtidx. The terms argument may also be a list or tuple, in which case no parsing is done. The search algorithm tries to optimize for the common case. When terms are scored (_score_terms()), each term is assigned a score that is stored in the database (as a float) and also as an integer in the range 010, called rank. (So a term with score 0.35 has a rank 3.) Multiple passes are made over the terms map table for the given ivtidx, first starting at the highest rank fetching a certain number of rows, and progressively drilling down to lower ranks, trying to find enough results to fill our limit that intersects on all supplied terms. If our limit isn't met and all ranks have been searched but there are still more possible matches (because we use LIMIT on the SQL statement), we expand the LIMIT (currently by an order of 10) and try again, specifying an OFFSET in the query. The worst case scenario is given two search terms, each term matches 50% of all rows but there is only one intersecting row. (Or, more generally, given N terms, each term matches (1/N)100 percent rows with only 1 row intersection between all N terms.) This could be improved by avoiding the OFFSET/LIMIT technique as described above, but that approach provides a big performance win in more common cases. This case can be mitigated by caching common term combinations, but it is an extremely difficult problem to solve. object_type specifies an type name to search (for example we can search type "image" with keywords "2005 vacation"), or if object_type is None (default), then all types are searched. This function returns a dictionary (object_type, object_id) > score which match the query.
def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None): t0 = time.time() # Fetch number of files the inverted index applies to. (Used in score # calculations.) objectcount = self._inverted_indexes[ivtidx]['objectcount'] if not isinstance(terms, (list, tuple)): split = self._inverted_indexes[ivtidx]['split'] if callable(split): terms = split(str_to_unicode(terms).lower()) else: terms = split.split(str_to_unicode(terms).lower()) else: terms = [ str_to_unicode(x).lower() for x in terms ] # Remove terms that aren't indexed (words less than minimum length # or and terms in the ignore list for this ivtidx). if self._inverted_indexes[ivtidx]['min']: terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ] if self._inverted_indexes[ivtidx]['ignore']: terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ] terms_list = _list_to_printable(terms) nterms = len(terms) if nterms == 0: return [] # Find term ids and order by least popular to most popular. rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \ 'term IN %s ORDER BY count' % (ivtidx, terms_list)) save = map(lambda x: x.lower(), terms) terms = {} ids = [] for row in rows: if row[2] == 0: return [] # Give terms weight according to their order order_weight = 1 + len(save) - list(save).index(row[1]) terms[row[0]] = { 'term': row[1], 'count': row[2], 'idf_t': math.log(objectcount / row[2] + 1) + order_weight, 'ids': {} } ids.append(row[0]) # Not all the terms we requested are in the database, so we return # 0 results. if len(ids) < nterms: return [] if object_type: # Resolve object type name to id object_type = self._get_type_id(object_type) results, state = {}, {} for id in ids: results[id] = {} state[id] = { 'offset': [0]*11, 'more': [True]*11, 'count': 0, 'done': False } all_results = {} if limit == None: limit = objectcount if limit <= 0 or objectcount <= 0: return {} sql_limit = min(limit*3, 200) finished = False nqueries = 0 # Keep a dict keyed on object_id that we can use to narrow queries # once we have a full list of all objects that match a given term. id_constraints = None t1 = time.time() while not finished: for rank in range(10, -1, -1): for id in ids: if not state[id]['more'][rank] or state[id]['done']: # If there's no more results at this rank, or we know # we've already seen all the results for this term, we # don't bother with the query. continue q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \ 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?' if object_type == None: q %= '' v = (id, rank, sql_limit, state[id]["offset"][rank]) else: q %= 'AND object_type=?' v = (id, rank, object_type, sql_limit, state[id]["offset"][rank]) if id_constraints: # We know about all objects that match one or more of the other # search terms, so we add the constraint that all rows for this # term match the others as well. Effectively we push the logic # to generate the intersection into the db. # XXX: This can't benefit from the index if object_type # is not specified. q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints)) else: q %= '' rows = self._db_query(q, v) nqueries += 1 state[id]['more'][rank] = len(rows) == sql_limit state[id]['count'] += len(rows) for row in rows: results[id][row[0], row[1]] = row[2] * terms[id]['idf_t'] terms[id]['ids'][row[1]] = 1 if state[id]['count'] >= terms[id]['count'] or \ (id_constraints and len(rows) == len(id_constraints)): # If we've now retrieved all objects for this term, or if # all the results we just got now intersect with our # constraints set, we're done this term and don't bother # querying it at other ranks. #print 'Done term '%s' at rank %d' % (terms[id]['term'], rank) state[id]['done'] = True if id_constraints is not None: id_constraints = id_constraints.intersection(terms[id]['ids']) else: id_constraints = set(terms[id]['ids']) # # end loop over terms for r in reduce(lambda a, b: set(a).intersection(b), results.values()): all_results[r] = 0 for id in ids: if r in results[id]: all_results[r] += results[id][r] # If we have enough results already, no sense in querying the # next rank. if limit > 0 and len(all_results) > limit*2: finished = True #print "Breaking at rank:", rank break # # end loop over ranks if finished: break finished = True for index in range(len(ids)): id = ids[index] if index > 0: last_id = ids[index-1] a = results[last_id] b = results[id] intersect = set(a).intersection(b) if len(intersect) == 0: # Is there any more at any rank? a_more = b_more = False for rank in range(11): a_more = a_more or state[last_id]['more'][rank] b_more = b_more or state[id]['more'][rank] if not a_more and not b_more: # There's no intersection between these two search # terms and neither have more at any rank, so we # can stop the whole query. finished = True break # There's still hope of a match. Go through this term and # see if more exists at any rank, increasing offset and # unsetting finished flag so we iterate again. for rank in range(10, -1, -1): if state[id]['more'][rank] and not state[id]['done']: state[id]['offset'][rank] += sql_limit finished = False # If we haven't found enough results after this pass, grow our # limit so that we expand our search scope. (XXX: this value may # need empirical tweaking.) sql_limit *= 10 # end loop while not finished log.info('%d results, did %d subqueries, %.04f seconds (%.04f overhead)', len(all_results), nqueries, time.time()-t0, t1-t0) return all_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(terms, (list, tuple)):\n split = self._inverted_indexes[ivtidx]['split']\n if callable(split):\n terms = [term for term in split(tostr(terms).lower()) if term]\n else:\n terms = [term for term in split.split(tostr(terms).lower()) if term]\n else:\n terms = [ tostr(x).lower() for x in terms ]\n\n # Remove terms that aren't indexed (words less than minimum length\n # or and terms in the ignore list for this ivtidx).\n if self._inverted_indexes[ivtidx]['min']:\n terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ]\n if self._inverted_indexes[ivtidx]['ignore']:\n terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ]\n\n terms_list = _list_to_printable(terms)\n nterms = len(terms)\n\n if nterms == 0:\n return []\n\n # Find term ids and order by least popular to most popular.\n rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \\\n 'term IN %s ORDER BY count' % (ivtidx, terms_list))\n save = [x.lower() for x in terms]\n terms = {}\n ids = []\n for row in rows:\n if row[2] == 0:\n return []\n\n # Give terms weight according to their order\n order_weight = 1 + len(save) - list(save).index(row[1])\n terms[row[0]] = {\n 'term': row[1],\n 'count': row[2],\n 'idf_t': math.log(objectcount / row[2] + 1) + order_weight,\n 'ids': {}\n }\n ids.append(row[0])\n\n # Not all the terms we requested are in the database, so we return\n # 0 results.\n if len(ids) < nterms:\n return []\n\n if object_type:\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n results, state = {}, {}\n for id in ids:\n results[id] = {}\n state[id] = {\n 'offset': [0]*11,\n 'more': [True]*11,\n 'count': 0,\n 'done': False\n }\n\n all_results = {}\n if limit == None:\n limit = objectcount\n\n if limit <= 0 or objectcount <= 0:\n return {}\n\n sql_limit = min(limit*3, 200)\n finished = False\n nqueries = 0\n\n # Keep a dict keyed on object_id that we can use to narrow queries\n # once we have a full list of all objects that match a given term.\n id_constraints = None\n t1 = time.time()\n while not finished:\n for rank in range(10, -1, -1):\n for id in ids:\n if not state[id]['more'][rank] or state[id]['done']:\n # If there's no more results at this rank, or we know\n # we've already seen all the results for this term, we\n # don't bother with the query.\n continue\n\n q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \\\n 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?'\n\n if object_type == None:\n q %= ''\n v = [id, rank, sql_limit, state[id][\"offset\"][rank]]\n else:\n q %= 'AND object_type=?'\n v = [id, rank, object_type, sql_limit, state[id][\"offset\"][rank]]\n\n if id_constraints:\n # We know about all objects that match one or more of the other\n # search terms, so we add the constraint that all rows for this\n # term match the others as well. Effectively we push the logic\n # to generate the intersection into the db.\n # XXX: This can't benefit from the index if object_type\n # is not specified.\n q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints))\n # But since we're specifying a list of ids to search for with this\n # term, we can't use limit/offset, since the constraints might be\n # different since the last iteration.\n v[-2:] = [-1, 0]\n else:\n q %= ''\n\n rows = self._db_query(q, v)\n nqueries += 1\n state[id]['more'][rank] = len(rows) == sql_limit\n state[id]['count'] += len(rows)\n\n for row in rows:\n results[id][row[0], row[1]] = row[2] * terms[id]['idf_t']\n terms[id]['ids'][row[1]] = 1\n\n if state[id]['count'] >= terms[id]['count'] or \\\n (id_constraints and len(rows) == len(id_constraints)):\n # If we've now retrieved all objects for this term, or if\n # all the results we just got now intersect with our\n # constraints set, we're done this term and don't bother\n # querying it at other ranks.\n #print \"Done term '%s' at rank %d\" % (terms[id]['term'], rank)\n state[id]['done'] = True\n if id_constraints is not None:\n id_constraints = id_constraints.intersection(terms[id]['ids'])\n else:\n id_constraints = set(terms[id]['ids'])\n #\n # end loop over terms\n\n\n for r in functools.reduce(lambda a, b: set(a).intersection(b), results.values()):\n all_results[r] = 0\n for id in ids:\n if r in results[id]:\n all_results[r] += results[id][r]\n\n # If we have enough results already, no sense in querying the\n # next rank.\n if limit > 0 and len(all_results) > limit*2:\n finished = True\n #print \"Breaking at rank:\", rank\n break\n #\n # end loop over ranks\n\n if finished:\n break\n\n finished = True\n for index in range(len(ids)):\n id = ids[index]\n\n if index > 0:\n last_id = ids[index-1]\n a = results[last_id]\n b = results[id]\n intersect = set(a).intersection(b)\n\n if len(intersect) == 0:\n # Is there any more at any rank?\n a_more = b_more = False\n for rank in range(11):\n a_more = a_more or state[last_id]['more'][rank]\n b_more = b_more or state[id]['more'][rank]\n\n if not a_more and not b_more:\n # There's no intersection between these two search\n # terms and neither have more at any rank, so we\n # can stop the whole query.\n finished = True\n break\n\n # There's still hope of a match. Go through this term and\n # see if more exists at any rank, increasing offset and\n # unsetting finished flag so we iterate again.\n for rank in range(10, -1, -1):\n if state[id]['more'][rank] and not state[id]['done']:\n state[id]['offset'][rank] += sql_limit\n finished = False\n\n # If we haven't found enough results after this pass, grow our\n # limit so that we expand our search scope. (XXX: this value may\n # need empirical tweaking.)\n sql_limit *= 10\n\n # end loop while not finished\n log.debug('%d results, did %d subqueries, %.04f seconds (%.04f overhead)',\n len(all_results), nqueries, time.time()-t0, t1-t0)\n return all_results", "def _add_object_inverted_index_terms(self, (object_type, object_id), ivtidx, terms):\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)", "def _add_object_inverted_index_terms(self, obj, ivtidx, terms):\n object_type, object_id = obj\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)", "def get_inverted_index_terms(self, ivtidx, associated = None, prefix = None):\n if ivtidx not in self._inverted_indexes:\n raise ValueError(\"'%s' is not a registered inverted index.\" % ivtidx)\n\n if prefix:\n where_clause = 'WHERE terms.term >= ? AND terms.term <= ?'\n where_values = (prefix, prefix + 'z')\n else:\n where_clause = ''\n where_values = ()\n\n if not associated:\n return self._db_query('''SELECT term, count\n FROM ivtidx_%s_terms AS terms\n %s\n ORDER BY count DESC''' % (ivtidx, where_clause), where_values)\n\n\n rows = self._db_query('SELECT id FROM ivtidx_%s_terms WHERE term IN %s ORDER BY count' % \\\n (ivtidx, _list_to_printable(associated)))\n term_ids = [ x[0] for x in rows ]\n if len(term_ids) < len(associated):\n return []\n\n query = '''SELECT term, COUNT(*) AS total\n FROM ivtidx_%s_terms_map AS t0''' % ivtidx\n for n, term_id in enumerate(term_ids):\n query += ''' JOIN ivtidx_%s_terms_map t%d\n ON t%d.object_type = t%d.object_type AND\n t%d.object_id = t%d.object_id AND\n t%d.term_id = %d''' % \\\n (ivtidx, n + 1, n, n + 1, n, n + 1, n + 1, term_id)\n query += ''' JOIN ivtidx_%s_terms AS terms\n ON t0.term_id = terms.id AND\n t0.term_id NOT IN %s\n %s\n GROUP BY t0.term_id\n ORDER BY total DESC ''' % \\\n (ivtidx, _list_to_printable(term_ids), where_clause)\n return self._db_query(query, where_values)", "def get_inverted_index_terms(self, ivtidx, associated = None, prefix = None):\n if ivtidx not in self._inverted_indexes:\n raise ValueError, \"'%s' is not a registered inverted index.\" % ivtidx\n\n if prefix:\n where_clause = 'WHERE terms.term >= ? AND terms.term <= ?'\n where_values = (prefix, prefix + 'z')\n else:\n where_clause = ''\n where_values = ()\n\n if not associated:\n return self._db_query('''SELECT term, count\n FROM ivtidx_%s_terms AS terms\n %s\n ORDER BY count DESC''' % (ivtidx, where_clause), where_values)\n\n\n rows = self._db_query('SELECT id FROM ivtidx_%s_terms WHERE term IN %s ORDER BY count' % \\\n (ivtidx, _list_to_printable(associated)))\n term_ids = [ x[0] for x in rows ]\n if len(term_ids) < len(associated):\n return []\n\n query = '''SELECT term, COUNT(*) AS total\n FROM ivtidx_%s_terms_map AS t0''' % ivtidx\n for n, term_id in enumerate(term_ids):\n query += ''' JOIN ivtidx_%s_terms_map t%d\n ON t%d.object_type = t%d.object_type AND\n t%d.object_id = t%d.object_id AND\n t%d.term_id = %d''' % \\\n (ivtidx, n + 1, n, n + 1, n, n + 1, n + 1, term_id)\n query += ''' JOIN ivtidx_%s_terms AS terms\n ON t0.term_id = terms.id AND\n t0.term_id NOT IN %s\n %s\n GROUP BY t0.term_id\n ORDER BY total DESC ''' % \\\n (ivtidx, _list_to_printable(term_ids), where_clause)\n return self._db_query(query, where_values)", "def advanced_search(self, terms, relation=None, index=0, limit=25, **kwargs):\n if not isinstance(terms, dict):\n raise TypeError(\"terms must be a dict\")\n # terms are sorted (for consistent tests between Python < 3.7 and >= 3.7)\n query = \" \".join(sorted(f'{k}:\"{v}\"' for (k, v) in terms.items()))\n return self.get_object(\n \"search\", relation=relation, q=query, index=index, limit=limit, **kwargs\n )", "def myhtable_index_search(files, index, terms):\n res_file = []\n count = 0\n if len(terms) == 0:\n print('empty terms')\n return\n for term in terms:\n term = term.lower()\n count += 1\n if count == 1:\n s = htable_get(index, term)\n if s == None:\n s = {-1}\n else:\n s = s.intersection(htable_get(index, term))\n for id in s:\n if id != -1:\n res_file.append(files[id])\n return res_file", "def index_search(files, index, terms):\n\n\n termlist = set()\n\n for i in range(len(terms)):\n for j in range(len(terms[i].split(\" \"))):\n\n termlist.add(terms[i].split(\" \")[j])\n\n indexlist = [index[w] for w in termlist]\n\n intersect = list(set.intersection(*indexlist))\n\n return [files[x] for x in intersect]", "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (str, list, tuple)):\n raise ValueError(\"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms)))\n\n if isinstance(terms, (list, tuple)):\n terms = [tostr(term) for term in terms]\n parsed = terms\n else:\n terms = tostr(terms)\n if callable(split):\n parsed = list(split(terms))\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())", "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (basestring, list, tuple)):\n raise ValueError, \"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms))\n\n if isinstance(terms, (list, tuple)):\n parsed = terms\n else:\n if callable(split):\n parsed = split(terms)\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n term = str_to_unicode(term)\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())", "def _delete_object_inverted_index_terms(self, (object_type, object_id), ivtidx):\n self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})", "def find(terms):\n terms = ' '.join(terms)\n searcher = IndexSearcher(STORE)\n\n SHOULD = BooleanClause.Occur.SHOULD\n\n query = MultiFieldQueryParser.parse(terms, \n ['name_', 'full_text'], [SHOULD, SHOULD], StandardAnalyzer())\n hits = searcher.search(query)\n\n ret = []\n for i, hit in enumerate(hits):\n doc = Hit.cast_(hit).getDocument()\n ret.append(MyHit(doc, hits, i))\n if i == 10:\n break\n\n return ret", "def search(terms):\n indexdir = data_folder\n try:\n ix = windex.open_dir(indexdir)\n except EmptyIndexError as e:\n print('No Index found! Clone some repos or run index!')\n exit(0)\n\n with ix.searcher() as searcher:\n query = QueryParser(\"body\", schema).parse(' '.join(terms))\n results = searcher.search(query, terms=True)\n results.formatter = TermFormatter()\n #hi = whighlight.Highlighter(fragmenter=PinpointFragmenter)\n results.fragmenter = ContextFragmenter()\n for result in results:\n print('{0:-<40}'.format(term.bold(result['path'])))\n print(term.bold(\"[\" + result['type'] + \"]\") + '--preview:')\n print(result.highlights('body'))\n print('\\n')", "def index_terms(self, terms):\n index = dict()\n for term in terms:\n links = [cell.metadata[\"nbpages\"][\"link\"] for nb in self.notebooks\n for cell in nb.content.cells if re.search(term, cell.source) if \"nbpages\" in cell.metadata.keys()]\n index[term] = list(dict.fromkeys(links))\n return index", "def rankDocuments_itp(terms, docs, index, tf, itp): \n\n\tdocVectors=defaultdict(lambda: [0]*len(terms)) \t\n\tqueryVector=[0]*len(terms) \n\n\t# compute the norm for the query tf\n\tquery_terms_count = collections.Counter(terms) \n\t\n\tquery_norm = la.norm(list(query_terms_count.values()))\n\t\n\tfor termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n\t\t\tif term not in index:\n\t\t\t\t\tcontinue \n\t\t\t## Compute tf*idf(normalize tf as done with documents)\n\t\t\tqueryVector[termIndex]=query_terms_count[term]/query_norm * itp[term] \n\t\t\t# Generate docVectors for matching docs\n\t\t\tfor docIndex, (doc, postings) in enumerate(index[term]):\n \n\t\t\t\t\tif doc in docs:\n\t\t\t\t\t\t\tdocVectors[doc][termIndex]=tf[term][docIndex] * itp[term] \n\t\n\tdocScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n\tdocScores.sort(reverse=True)\n\tresultDocs=[x[1] for x in docScores]\n\n\treturn resultDocs", "def _delete_object_inverted_index_terms(self, obj, ivtidx):\n object_type, object_id = obj\n self._delete_multiple_objects_inverted_index_terms({object_type: ((ivtidx,), (object_id,))})", "def getTerms(vocabulary_id, terms_id):\n return [getTerm(vocabulary_id, term_id) for term_id in terms_id]", "def search_tf_idf(query, index, tf, idf):\n\tquery = getTerms(query)\n\tdocs = set()\n\tfor term in query:\n\t\t\ttry:\n\t\t\t\t\t# store in termDocs the ids of the docs that contain \"term\"\n\t\t\t\t\ttermDocs = [posting[0] for posting in index[term]]\n\n\t\t\t\t\t# docs = docs Union termDocs\n\t\t\t\t\tdocs |= set(termDocs)\n\t\t\texcept:\n\t\t\t\t\t# term is not in index\n\t\t\t\t\tpass\n\tdocs = list(docs)\n\tranked_docs = rankDocuments_tf_idf(query, docs, index, idf, tf)\n\treturn ranked_docs", "def answer(document, search_terms):\n idx = {k: [] for k in search_terms}\n doc = document.split()\n [idx[term].append(i) for i, term in enumerate(doc, start=1) if term in search_terms]\n min_score = sys.maxint\n winning_slice = None\n for term in idx.keys(): # ignore duplicate terms\n for position in idx[term]:\n positions = [position]\n for other_term in idx.keys():\n distances = \\\n [int(math.fabs(position - x)) for x in idx[other_term]]\n positions.append(\n idx[other_term][distances.index(min(distances))])\n score = max(positions) - min(positions) + 1\n if score < min_score:\n winning_slice = (min(positions) - 1, max(positions),)\n min_score = score\n return \" \".join(doc[slice(*winning_slice)])", "def findTerms(self, text, terms, scope=50, includeAll=True):\n\t\tlistOfResults = list()\n\t\tlistOfMatchesMain = list()\n\t\tlistOfMatchesSecondary = list()\n\n\t\tappend = listOfResults.append\n\t\treplace\t= str.replace\n\n\t\tkeywordIndices = self.find(text, terms[0])\n\n\t\t# loop through the indices and check for dependencies if terms list has more than 1 term\n\t\tfor indices in keywordIndices:\n\n\t\t\tleading = text[indices[0]-scope:indices[0]]\n\t\t\ttrailing = text[indices[0]:indices[0]+scope]\n\n\t\t\tleading = replace(replace(leading, '\\n', '_'), '\\t', ' ') \n\t\t\ttrailing = replace(replace(trailing, '\\n', '_'), '\\t', ' ') \n\n\t\t\t# if terms list has more than 1 term (i.e., contextual terms), see if present within scope\n\t\t\tif len(terms) > 1:\n\n\t\t\t\t# loop through the contextual terms and check for presence within scope\n\t\t\t\tfor term in terms[1:]:\n\n\t\t\t\t\t# if term in either leading or trailing\n\t\t\t\t\tif (replace(term, '*', '') in leading.lower()) or (replace(term, '*', '') in trailing.lower()):\n\n\t\t\t\t\t\t# if '*' in term, do not add this context\n\t\t\t\t\t\tif '*' in term:\n\t\t\t\t\t\t\tpass\n\n\t\t\t\t\t\t# if '*' not indicated, add this context\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t'+term)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tappend(excerpt)\n\n\t\t\t# if terms list has 1 term, just append the excerpt\n\t\t\telse:\n\n\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t')\n\t\t\t\t\telse:\n\t\t\t\t\t\tappend(excerpt)\n\n\t\treturn listOfResults", "def search_from_terms(api, term, **kwargs):\n tweets=api.GetSearch(term=term)\n return {\"tweets\":tweets}", "def rankDocuments(terms, docs, index, idf, tf, rt, likes, score):\n \n # init docvectors and queryvector to dict and array of 0, to be filled later\n docVectors=collections.defaultdict(lambda: [0]*len(terms)) \n queryVector=[0]*len(terms) \n\n if score == \"1\":\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf*idf(normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm * idf[term]\n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex] * idf[term]\n # calculate the score of each doc\n # compute the cosine similarity between queyVector and each docVector:\n docScores=[ [np.dot(curDocVec, queryVector), doc] for doc, curDocVec in docVectors.items() ]\n else:\n # as we just want cosine similarity but not use tf-idf, we're using the term frequency as a weight\n # in our custom ranking\n # compute the norm for the query tf\n query_terms_count = collections.Counter(terms) # get the frequency of each term in the query. \n \n query_norm = np.linalg.norm(list(query_terms_count.values()))\n \n for termIndex, term in enumerate(terms): #termIndex is the index of the term in the query\n if term not in index:\n continue\n \n ## Compute tf (normalize tf as done with documents)\n queryVector[termIndex] = query_terms_count[term] / query_norm \n\n # Generate docVectors for matching docs\n for docIndex, (doc, postings) in enumerate(index[term]):\n # in form of [docIndex, (doc, postings)] \n if doc in docs:\n docVectors[doc][termIndex]=tf[term][docIndex]\n # calculate the score of each doc\n # compute the cosine similarity and add rt and fav score\n # rt brings to more visibility than a like, hence a higher score\n docScores=[ [np.dot(curDocVec, queryVector) + 1.5*rt[doc] + likes[doc], doc] for doc, curDocVec in docVectors.items() ]\n docScores.sort(reverse=True)\n resultDocs=[x[1] for x in docScores]\n if len(resultDocs) == 0:\n print(\"No results found, try again\")\n return None \n return resultDocs", "def with_terms(model: Model, terms: Iterable[Term]):\n program: SWIProgram = model.solver.program\n if isinstance(program, SWIProgram):\n # cdb = ClauseDB(builtins={})\n # for c in terms:\n # cdb.add_statement(c)\n identifiers = list(x[0:2] for x in program.add_program(terms))\n model.solver.cache.invalidate()\n try:\n yield\n finally:\n for type_, idx in identifiers:\n if type_ == \"cl\":\n program.retract_clause(idx)\n elif type_ == \"fa\":\n program.retract_fact(idx)\n else:\n raise NotImplementedError(\n \"with_terms is currently only implemented for ApproximateEngine\"\n )", "def find_similar_terms(term, path_to_model, n=10):\n model = Word2Vec.load(path_to_model)\n similar_terms = model.wv.most_similar(term, topn=n)\n return similar_terms", "def index(self,\n path_in: str,\n path_out: str,\n # path_terms: str\n ) -> Tuple[Dict[str, int], Dict[int, str]]:\n self._docs_processed = 0\n self._start_time = time.time()\n\n # terms = set()\n # with open(path_terms, 'r', encoding='utf8') as fin:\n # for line in fin:\n # terms.add(line.strip('\\n'))\n\n word_to_idx = {}\n idx_to_word = {}\n i = 0\n corpus_idx = []\n for doc in get_docs(path_in):\n doc_idx = []\n for sent in doc:\n for word in sent:\n if word not in word_to_idx:\n word_to_idx[word] = i\n idx_to_word[i] = word\n i += 1\n idx_sent = [word_to_idx[word] for word in sent]\n doc_idx.append(idx_sent)\n corpus_idx.append(doc_idx)\n # doc_idx = []\n self._docs_processed += 1\n self._update_cmd_counter()\n\n if self._docs_processed % self._file_write_threshhold == 0:\n self._update_cmd_time_info()\n self.write_corpus(corpus_idx, path_out)\n corpus_idx = []\n\n self._update_cmd_time_info(end=True)\n self.write_corpus(corpus_idx, path_out)\n self._already_written_to_file = False\n return word_to_idx, idx_to_word", "def find_objects(self, terms=None, type=None, chunksize=None, **kwargs):\n type = type or self.default_object_type\n\n find_opts = {'chunksize' : chunksize}\n\n search_operators = {\n 'exact': '=',\n 'gt': '>',\n 'gte': '>=',\n 'lt': '<',\n\n 'lte': '<=',\n 'contains': '~'\n }\n\n if terms is not None:\n find_opts['terms'] = terms\n else:\n conditions = []\n for field, value in six.iteritems(kwargs):\n if '__' in field:\n field, filtr = field.split('__')\n if filtr not in search_operators:\n raise Exception(\"Unsupported search filter '%s'\" % filtr)\n op = search_operators[filtr]\n else:\n op = search_operators['contains'] # default search mode\n\n if field in self.search_fields_aliases:\n field = self.search_fields_aliases[field]\n if field not in self.search_fields:\n raise Exception(\"Error generating Fedora findObjects query: unknown search field '%s'\" \\\n % field)\n if ' ' in value:\n # if value contains whitespace, it must be delimited with single quotes\n value = \"'%s'\" % value\n conditions.append(\"%s%s%s\" % (field, op, value))\n\n query = ' '.join(conditions)\n find_opts['query'] = query\n\n r = self.api.findObjects(**find_opts)\n chunk = parse_xml_object(SearchResults, r.content, r.url)\n while True:\n for result in chunk.results:\n yield type(self.api, result.pid)\n\n if chunk.session_token:\n r = self.api.findObjects(session_token=chunk.session_token, **find_opts)\n chunk = parse_xml_object(SearchResults, r.content, r.url)\n else:\n break", "def _delete_multiple_objects_inverted_index_terms(self, objects):\n for type_name, (ivtidxes, object_ids) in objects.items():\n # Resolve object type name to id\n type_id = self._get_type_id(type_name)\n\n for ivtidx in ivtidxes:\n # Remove all terms for the inverted index associated with this\n # object. A trigger will decrement the count column in the\n # terms table for all term_id that get affected.\n self._db_query(\"DELETE FROM ivtidx_%s_terms_map WHERE object_type=? AND object_id IN %s\" % \\\n (ivtidx, _list_to_printable(object_ids)), (type_id,))\n self._inverted_indexes[ivtidx]['objectcount'] -= len(object_ids)", "def _delete_multiple_objects_inverted_index_terms(self, objects):\n for type_name, (ivtidxes, object_ids) in objects.items():\n # Resolve object type name to id\n type_id = self._get_type_id(type_name)\n\n for ivtidx in ivtidxes:\n # Remove all terms for the inverted index associated with this\n # object. A trigger will decrement the count column in the\n # terms table for all term_id that get affected.\n self._db_query(\"DELETE FROM ivtidx_%s_terms_map WHERE object_type=? AND object_id IN %s\" % \\\n (ivtidx, _list_to_printable(object_ids)), (type_id,))\n self._inverted_indexes[ivtidx]['objectcount'] -= len(object_ids)", "def search(query, idx):\n\n if len(query) == 0:\n return []\n ordered = {}\n for e in query:\n ordered[e] = len(idx[e])\n ordered = sorted(ordered.items(), key = lambda d: d[1])\n results = idx[ordered[0][0]]\n i = 1\n while i < len(ordered):\n results = intersect(results, idx[ordered[i][0]])\n i += 1\n return results", "def top_terms(self, nterms):\n return self.sql_session.query(Term)\\\n .filter(Term.term != '*')\\\n .order_by(desc(Term.relevance))[:nterms]" ]
[ "0.7995925", "0.75903076", "0.74195915", "0.64581156", "0.6457221", "0.59554505", "0.5865238", "0.5861748", "0.58322316", "0.5811512", "0.5693761", "0.5638802", "0.5617676", "0.5603782", "0.55951315", "0.55166435", "0.530402", "0.5279322", "0.52687097", "0.52252996", "0.51968676", "0.51794475", "0.5149694", "0.5143542", "0.51100653", "0.5042453", "0.5037883", "0.5037883", "0.50057054", "0.50052285" ]
0.8012516
0
Obtains terms for the given inverted index name. If associated is None, all terms for the inverted index are returned. The return value is a list of 2tuples, where each tuple is (term, count). Count is the total number of objects that term is mapped to. Otherwise, associated is a specified list of terms, and only those terms which are mapped to objects in addition to the given associated terms will be returned. The return value is as above, except that count reflects the number of objects which have that term plus all of the given associated terms. For example, given an otherwise empty database, if you have an object with terms ['vacation', 'hawaii'] and two other object with terms ['vacation', 'spain'] and the associated list passed is ['vacation'], the return value will be [('spain', 2), ('hawaii', 1)]. If prefix is not None, only those terms that begin with the specified prefix will be returned. This is useful, for example, for autocompletion while a user is typing a query. The returned lists are sorted with the highest counts appearing first.
def get_inverted_index_terms(self, ivtidx, associated = None, prefix = None): if ivtidx not in self._inverted_indexes: raise ValueError, "'%s' is not a registered inverted index." % ivtidx if prefix: where_clause = 'WHERE terms.term >= ? AND terms.term <= ?' where_values = (prefix, prefix + 'z') else: where_clause = '' where_values = () if not associated: return self._db_query('''SELECT term, count FROM ivtidx_%s_terms AS terms %s ORDER BY count DESC''' % (ivtidx, where_clause), where_values) rows = self._db_query('SELECT id FROM ivtidx_%s_terms WHERE term IN %s ORDER BY count' % \ (ivtidx, _list_to_printable(associated))) term_ids = [ x[0] for x in rows ] if len(term_ids) < len(associated): return [] query = '''SELECT term, COUNT(*) AS total FROM ivtidx_%s_terms_map AS t0''' % ivtidx for n, term_id in enumerate(term_ids): query += ''' JOIN ivtidx_%s_terms_map t%d ON t%d.object_type = t%d.object_type AND t%d.object_id = t%d.object_id AND t%d.term_id = %d''' % \ (ivtidx, n + 1, n, n + 1, n, n + 1, n + 1, term_id) query += ''' JOIN ivtidx_%s_terms AS terms ON t0.term_id = terms.id AND t0.term_id NOT IN %s %s GROUP BY t0.term_id ORDER BY total DESC ''' % \ (ivtidx, _list_to_printable(term_ids), where_clause) return self._db_query(query, where_values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_inverted_index_terms(self, ivtidx, associated = None, prefix = None):\n if ivtidx not in self._inverted_indexes:\n raise ValueError(\"'%s' is not a registered inverted index.\" % ivtidx)\n\n if prefix:\n where_clause = 'WHERE terms.term >= ? AND terms.term <= ?'\n where_values = (prefix, prefix + 'z')\n else:\n where_clause = ''\n where_values = ()\n\n if not associated:\n return self._db_query('''SELECT term, count\n FROM ivtidx_%s_terms AS terms\n %s\n ORDER BY count DESC''' % (ivtidx, where_clause), where_values)\n\n\n rows = self._db_query('SELECT id FROM ivtidx_%s_terms WHERE term IN %s ORDER BY count' % \\\n (ivtidx, _list_to_printable(associated)))\n term_ids = [ x[0] for x in rows ]\n if len(term_ids) < len(associated):\n return []\n\n query = '''SELECT term, COUNT(*) AS total\n FROM ivtidx_%s_terms_map AS t0''' % ivtidx\n for n, term_id in enumerate(term_ids):\n query += ''' JOIN ivtidx_%s_terms_map t%d\n ON t%d.object_type = t%d.object_type AND\n t%d.object_id = t%d.object_id AND\n t%d.term_id = %d''' % \\\n (ivtidx, n + 1, n, n + 1, n, n + 1, n + 1, term_id)\n query += ''' JOIN ivtidx_%s_terms AS terms\n ON t0.term_id = terms.id AND\n t0.term_id NOT IN %s\n %s\n GROUP BY t0.term_id\n ORDER BY total DESC ''' % \\\n (ivtidx, _list_to_printable(term_ids), where_clause)\n return self._db_query(query, where_values)", "def enumerate_match(self, prefix: List[str]) -> List[str]:\n matched_terms = []\n cur = self._root\n for i, token in enumerate(prefix):\n if token not in cur.children:\n break\n cur = cur.children[token]\n if cur.is_term:\n item = \"\".join(prefix[:i+1])\n if item in self._masked_items:\n continue\n else:\n matched_terms.append(item)\n\n return matched_terms", "def autocomplete(trie, prefix, max_count=None):\n\n if type(prefix) != trie.type:\n raise TypeError\n\n result = []\n\n if max_count == 0:\n return result\n\n if prefix in trie:\n result.append((prefix, trie[prefix]))\n\n for i, elt in enumerate(prefix):\n if isinstance(prefix, tuple):\n elt = (elt,)\n\n if elt in trie.children:\n trie = trie.children[elt]\n else:\n return result\n\n if i == len(prefix) - 1:\n break\n\n for key, _ in trie:\n result.append((prefix + key, trie[key]))\n\n sorted_result = sorted(result, key=lambda x: x[1], reverse=True)\n\n if max_count is None or len(result) <= max_count:\n return [key for key, _ in sorted_result]\n\n return [key for key, _ in sorted_result[:max_count]]", "def advanced_search(self, terms, relation=None, index=0, limit=25, **kwargs):\n if not isinstance(terms, dict):\n raise TypeError(\"terms must be a dict\")\n # terms are sorted (for consistent tests between Python < 3.7 and >= 3.7)\n query = \" \".join(sorted(f'{k}:\"{v}\"' for (k, v) in terms.items()))\n return self.get_object(\n \"search\", relation=relation, q=query, index=index, limit=limit, **kwargs\n )", "def get_terms(self, documents=None, sortby=['TERM', 'COUNT'], ascending=[True, True], as_dict=False):\n terms = {}\n if documents == None:\n docs = self.vocab\n else:\n docs = documents\n for doc in docs:\n for term, count in doc['term_counts'].items():\n if term in terms.keys():\n terms[term] = terms[term] + count\n else:\n terms[term] = count\n # Create a dataframe\n terms = [(k, v) for k, v in terms.items()]\n df = pd.DataFrame(terms, columns=['TERM', 'COUNT']) \n df.sort_values(by=sortby, ascending=ascending, inplace=True)\n if as_dict:\n term_list = df.to_dict(orient='records')\n terms = {}\n for term in term_list:\n terms[term['TERM']] = term['COUNT']\n return terms\n else:\n return df", "def suggest_terms(self, fields, prefix, handler=\"terms\", **kwargs):\n params = {\"terms.fl\": fields, \"terms.prefix\": prefix}\n params.update(kwargs)\n response = self._suggest_terms(params, handler=handler)\n result = self.decoder.decode(response)\n terms = result.get(\"terms\", {})\n res = {}\n\n # in Solr 1.x the value of terms is list of elements with the field name\n # and a flat list of value, count pairs:\n # [\"field_name\", [\"dance\", 23, \"dancers\", 10, …]]\n #\n # in Solr 3+ the value of terms is a dict of field name and a flat list of\n # value, count pairs: {\"field_name\": [\"dance\", 23, \"dancers\", 10, …]}\n if isinstance(terms, (list, tuple)):\n terms = dict(zip(terms[0::2], terms[1::2]))\n\n for field, values in terms.items():\n tmp = []\n\n while values:\n tmp.append((values.pop(0), values.pop(0)))\n\n res[field] = tmp\n\n self.log.debug(\n \"Found '%d' Term suggestions results.\", sum(len(j) for i, j in res.items())\n )\n return res", "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(terms, (list, tuple)):\n split = self._inverted_indexes[ivtidx]['split']\n if callable(split):\n terms = split(str_to_unicode(terms).lower())\n else:\n terms = split.split(str_to_unicode(terms).lower())\n else:\n terms = [ str_to_unicode(x).lower() for x in terms ]\n\n # Remove terms that aren't indexed (words less than minimum length\n # or and terms in the ignore list for this ivtidx).\n if self._inverted_indexes[ivtidx]['min']:\n terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ]\n if self._inverted_indexes[ivtidx]['ignore']:\n terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ]\n\n terms_list = _list_to_printable(terms)\n nterms = len(terms)\n\n if nterms == 0:\n return []\n\n # Find term ids and order by least popular to most popular.\n rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \\\n 'term IN %s ORDER BY count' % (ivtidx, terms_list))\n save = map(lambda x: x.lower(), terms)\n terms = {}\n ids = []\n for row in rows:\n if row[2] == 0:\n return []\n\n # Give terms weight according to their order\n order_weight = 1 + len(save) - list(save).index(row[1])\n terms[row[0]] = {\n 'term': row[1],\n 'count': row[2],\n 'idf_t': math.log(objectcount / row[2] + 1) + order_weight,\n 'ids': {}\n }\n ids.append(row[0])\n\n # Not all the terms we requested are in the database, so we return\n # 0 results.\n if len(ids) < nterms:\n return []\n\n if object_type:\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n results, state = {}, {}\n for id in ids:\n results[id] = {}\n state[id] = {\n 'offset': [0]*11,\n 'more': [True]*11,\n 'count': 0,\n 'done': False\n }\n\n all_results = {}\n if limit == None:\n limit = objectcount\n\n if limit <= 0 or objectcount <= 0:\n return {}\n\n sql_limit = min(limit*3, 200)\n finished = False\n nqueries = 0\n\n # Keep a dict keyed on object_id that we can use to narrow queries\n # once we have a full list of all objects that match a given term.\n id_constraints = None\n t1 = time.time()\n while not finished:\n for rank in range(10, -1, -1):\n for id in ids:\n if not state[id]['more'][rank] or state[id]['done']:\n # If there's no more results at this rank, or we know\n # we've already seen all the results for this term, we\n # don't bother with the query.\n continue\n\n q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \\\n 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?'\n\n if object_type == None:\n q %= ''\n v = (id, rank, sql_limit, state[id][\"offset\"][rank])\n else:\n q %= 'AND object_type=?'\n v = (id, rank, object_type, sql_limit, state[id][\"offset\"][rank])\n\n if id_constraints:\n # We know about all objects that match one or more of the other\n # search terms, so we add the constraint that all rows for this\n # term match the others as well. Effectively we push the logic\n # to generate the intersection into the db.\n # XXX: This can't benefit from the index if object_type\n # is not specified.\n q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints))\n else:\n q %= ''\n\n rows = self._db_query(q, v)\n nqueries += 1\n state[id]['more'][rank] = len(rows) == sql_limit\n state[id]['count'] += len(rows)\n\n for row in rows:\n results[id][row[0], row[1]] = row[2] * terms[id]['idf_t']\n terms[id]['ids'][row[1]] = 1\n\n if state[id]['count'] >= terms[id]['count'] or \\\n (id_constraints and len(rows) == len(id_constraints)):\n # If we've now retrieved all objects for this term, or if\n # all the results we just got now intersect with our\n # constraints set, we're done this term and don't bother\n # querying it at other ranks.\n #print 'Done term '%s' at rank %d' % (terms[id]['term'], rank)\n state[id]['done'] = True\n if id_constraints is not None:\n id_constraints = id_constraints.intersection(terms[id]['ids'])\n else:\n id_constraints = set(terms[id]['ids'])\n #\n # end loop over terms\n\n\n for r in reduce(lambda a, b: set(a).intersection(b), results.values()):\n all_results[r] = 0\n for id in ids:\n if r in results[id]:\n all_results[r] += results[id][r]\n\n # If we have enough results already, no sense in querying the\n # next rank.\n if limit > 0 and len(all_results) > limit*2:\n finished = True\n #print \"Breaking at rank:\", rank\n break\n #\n # end loop over ranks\n\n if finished:\n break\n\n finished = True\n for index in range(len(ids)):\n id = ids[index]\n\n if index > 0:\n last_id = ids[index-1]\n a = results[last_id]\n b = results[id]\n intersect = set(a).intersection(b)\n\n if len(intersect) == 0:\n # Is there any more at any rank?\n a_more = b_more = False\n for rank in range(11):\n a_more = a_more or state[last_id]['more'][rank]\n b_more = b_more or state[id]['more'][rank]\n\n if not a_more and not b_more:\n # There's no intersection between these two search\n # terms and neither have more at any rank, so we\n # can stop the whole query.\n finished = True\n break\n\n # There's still hope of a match. Go through this term and\n # see if more exists at any rank, increasing offset and\n # unsetting finished flag so we iterate again.\n for rank in range(10, -1, -1):\n if state[id]['more'][rank] and not state[id]['done']:\n state[id]['offset'][rank] += sql_limit\n finished = False\n\n # If we haven't found enough results after this pass, grow our\n # limit so that we expand our search scope. (XXX: this value may\n # need empirical tweaking.)\n sql_limit *= 10\n\n # end loop while not finished\n log.info('%d results, did %d subqueries, %.04f seconds (%.04f overhead)',\n len(all_results), nqueries, time.time()-t0, t1-t0)\n return all_results", "def load_lst_terms(file_path, tag_name, result_dic=None, prefix=None):\n n_term = 0\n result_dic = terms if result_dic is None else result_dic\n with codecs.open(file_path, 'r', encoding='utf-8-sig') as fp:\n for line in fp:\n line = line.strip(' \\t\\r\\n')\n if len(line)<1 or line[0] == '%': continue #以%开头的行作为字典文件的注释\n\n t = '%s/%s' % (prefix,t) if prefix is not None else tag_name\n result_dic[line].add(t.lower())\n n_term += 1\n\n if debug:\n info = \"%6d terms with %6d tags loaded from dictionary [%s].\" % (n_term, 1, file_path)\n print info\n return (n_term, 1)", "def complete(index, prefix, text, field='form_suggest', size=100):\n response = { 'prefix': prefix, 'text':text, 'length': 0, 'complete': [] }\n \n key = \"word_completion\"\n body = {\n key: {\n \"text\": text,\n \"completion\": {\n \"field\": field,\n \"size\": size,\n \"context\": {\n \"prefix\": prefix\n }\n }\n }\n }\n res = index.suggest(body=body)\n #return res\n if key in res and res[key][0].get('length', 0) :\n complete = []\n \n options = res[key][0]['options']\n max_score = 0\n for opt in options:\n complete.append( {\n \"graph\": opt['payload']['graph'],\n \"lang\": opt['payload']['lang'],\n \"pos\": opt['payload']['pos'],\n \"form\": opt['payload']['form'],\n \"score\": opt['score'],\n \"output\": opt['text']\n })\n max_score = max(max_score, opt['score'])\n\n for v in complete:\n score = v['score']/max_score\n if text == v['form']:\n score +=1\n v['score'] = score\n\n complete.sort(key=lambda x : x['score'], reverse=True)\n \n response['length'] = len(complete)\n response['complete'] = complete\n response['size'] = size\n \n return response", "def _query_inverted_index(self, ivtidx, terms, limit = 100, object_type = None):\n t0 = time.time()\n # Fetch number of files the inverted index applies to. (Used in score\n # calculations.)\n objectcount = self._inverted_indexes[ivtidx]['objectcount']\n\n if not isinstance(terms, (list, tuple)):\n split = self._inverted_indexes[ivtidx]['split']\n if callable(split):\n terms = [term for term in split(tostr(terms).lower()) if term]\n else:\n terms = [term for term in split.split(tostr(terms).lower()) if term]\n else:\n terms = [ tostr(x).lower() for x in terms ]\n\n # Remove terms that aren't indexed (words less than minimum length\n # or and terms in the ignore list for this ivtidx).\n if self._inverted_indexes[ivtidx]['min']:\n terms = [ x for x in terms if len(x) >= self._inverted_indexes[ivtidx]['min'] ]\n if self._inverted_indexes[ivtidx]['ignore']:\n terms = [ x for x in terms if x not in self._inverted_indexes[ivtidx]['ignore'] ]\n\n terms_list = _list_to_printable(terms)\n nterms = len(terms)\n\n if nterms == 0:\n return []\n\n # Find term ids and order by least popular to most popular.\n rows = self._db_query('SELECT id,term,count FROM ivtidx_%s_terms WHERE ' \\\n 'term IN %s ORDER BY count' % (ivtidx, terms_list))\n save = [x.lower() for x in terms]\n terms = {}\n ids = []\n for row in rows:\n if row[2] == 0:\n return []\n\n # Give terms weight according to their order\n order_weight = 1 + len(save) - list(save).index(row[1])\n terms[row[0]] = {\n 'term': row[1],\n 'count': row[2],\n 'idf_t': math.log(objectcount / row[2] + 1) + order_weight,\n 'ids': {}\n }\n ids.append(row[0])\n\n # Not all the terms we requested are in the database, so we return\n # 0 results.\n if len(ids) < nterms:\n return []\n\n if object_type:\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n results, state = {}, {}\n for id in ids:\n results[id] = {}\n state[id] = {\n 'offset': [0]*11,\n 'more': [True]*11,\n 'count': 0,\n 'done': False\n }\n\n all_results = {}\n if limit == None:\n limit = objectcount\n\n if limit <= 0 or objectcount <= 0:\n return {}\n\n sql_limit = min(limit*3, 200)\n finished = False\n nqueries = 0\n\n # Keep a dict keyed on object_id that we can use to narrow queries\n # once we have a full list of all objects that match a given term.\n id_constraints = None\n t1 = time.time()\n while not finished:\n for rank in range(10, -1, -1):\n for id in ids:\n if not state[id]['more'][rank] or state[id]['done']:\n # If there's no more results at this rank, or we know\n # we've already seen all the results for this term, we\n # don't bother with the query.\n continue\n\n q = 'SELECT object_type,object_id,frequency FROM ivtidx_%s_terms_map ' % ivtidx + \\\n 'WHERE term_id=? AND rank=? %s %%s LIMIT ? OFFSET ?'\n\n if object_type == None:\n q %= ''\n v = [id, rank, sql_limit, state[id][\"offset\"][rank]]\n else:\n q %= 'AND object_type=?'\n v = [id, rank, object_type, sql_limit, state[id][\"offset\"][rank]]\n\n if id_constraints:\n # We know about all objects that match one or more of the other\n # search terms, so we add the constraint that all rows for this\n # term match the others as well. Effectively we push the logic\n # to generate the intersection into the db.\n # XXX: This can't benefit from the index if object_type\n # is not specified.\n q %= ' AND object_id IN %s' % _list_to_printable(tuple(id_constraints))\n # But since we're specifying a list of ids to search for with this\n # term, we can't use limit/offset, since the constraints might be\n # different since the last iteration.\n v[-2:] = [-1, 0]\n else:\n q %= ''\n\n rows = self._db_query(q, v)\n nqueries += 1\n state[id]['more'][rank] = len(rows) == sql_limit\n state[id]['count'] += len(rows)\n\n for row in rows:\n results[id][row[0], row[1]] = row[2] * terms[id]['idf_t']\n terms[id]['ids'][row[1]] = 1\n\n if state[id]['count'] >= terms[id]['count'] or \\\n (id_constraints and len(rows) == len(id_constraints)):\n # If we've now retrieved all objects for this term, or if\n # all the results we just got now intersect with our\n # constraints set, we're done this term and don't bother\n # querying it at other ranks.\n #print \"Done term '%s' at rank %d\" % (terms[id]['term'], rank)\n state[id]['done'] = True\n if id_constraints is not None:\n id_constraints = id_constraints.intersection(terms[id]['ids'])\n else:\n id_constraints = set(terms[id]['ids'])\n #\n # end loop over terms\n\n\n for r in functools.reduce(lambda a, b: set(a).intersection(b), results.values()):\n all_results[r] = 0\n for id in ids:\n if r in results[id]:\n all_results[r] += results[id][r]\n\n # If we have enough results already, no sense in querying the\n # next rank.\n if limit > 0 and len(all_results) > limit*2:\n finished = True\n #print \"Breaking at rank:\", rank\n break\n #\n # end loop over ranks\n\n if finished:\n break\n\n finished = True\n for index in range(len(ids)):\n id = ids[index]\n\n if index > 0:\n last_id = ids[index-1]\n a = results[last_id]\n b = results[id]\n intersect = set(a).intersection(b)\n\n if len(intersect) == 0:\n # Is there any more at any rank?\n a_more = b_more = False\n for rank in range(11):\n a_more = a_more or state[last_id]['more'][rank]\n b_more = b_more or state[id]['more'][rank]\n\n if not a_more and not b_more:\n # There's no intersection between these two search\n # terms and neither have more at any rank, so we\n # can stop the whole query.\n finished = True\n break\n\n # There's still hope of a match. Go through this term and\n # see if more exists at any rank, increasing offset and\n # unsetting finished flag so we iterate again.\n for rank in range(10, -1, -1):\n if state[id]['more'][rank] and not state[id]['done']:\n state[id]['offset'][rank] += sql_limit\n finished = False\n\n # If we haven't found enough results after this pass, grow our\n # limit so that we expand our search scope. (XXX: this value may\n # need empirical tweaking.)\n sql_limit *= 10\n\n # end loop while not finished\n log.debug('%d results, did %d subqueries, %.04f seconds (%.04f overhead)',\n len(all_results), nqueries, time.time()-t0, t1-t0)\n return all_results", "def lookup_idf(self) -> list:\n self.__process()\n prox_by_doc = {}\n\n for token in self._query_vector:\n for token_info in self._index.get_token_search(token):\n doc = token_info.doc\n if doc not in prox_by_doc:\n prox_by_doc[doc] = 0\n prox_by_doc[doc] += self._query_vector[token] * token_info.weight\n\n return sorted(prox_by_doc.items(), key=lambda t: t[1], reverse=True)", "def facet_terms(facet):\n facetterms = []\n results = elasticsearch.facet_terms(settings.ELASTICSEARCH_HOST_PORT,\n settings.DOCUMENT_INDEX, facet['name'], order='term')\n if facet.get('terms', []):\n # precoordinate\n # IMPORTANT: topics and facility term IDs are int. All others are str.\n term_counts = {}\n for t in results['terms']:\n term_id = extract_term_id(t['term'])\n term_count = t['count']\n if term_id and term_count:\n term_counts[term_id] = term_count\n # make URLs for terms\n for term in facet['terms']:\n term['url'] = reverse('ui-search-term-query', args=(facet['id'], term['id']))\n # add counts to terms\n for term in facet['terms']:\n term_id = term['id']\n if isinstance(term_id, int):\n term_id = str(term_id)\n term['count'] = term_counts.get(term_id, 0)\n facetterms.append(term)\n else:\n # postcoordinate\n for t in results['terms']:\n t['title'] = t['term']\n t['description'] = ''\n t['url'] = '/search/%s:%s/' % (facet['id'], t['term'])\n facetterms.append(t)\n return facetterms", "def _add_object_inverted_index_terms(self, obj, ivtidx, terms):\n object_type, object_id = obj\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)", "def docTermCountReducer(docname, values):\n values = list(values)\n # Total count of term across all docs\n N = sum(n for (term, payload, n) in values)\n for (term, payload, n) in values:\n yield (term, docname), (payload, n, N)", "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (str, list, tuple)):\n raise ValueError(\"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms)))\n\n if isinstance(terms, (list, tuple)):\n terms = [tostr(term) for term in terms]\n parsed = terms\n else:\n terms = tostr(terms)\n if callable(split):\n parsed = list(split(terms))\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())", "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (basestring, list, tuple)):\n raise ValueError, \"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms))\n\n if isinstance(terms, (list, tuple)):\n parsed = terms\n else:\n if callable(split):\n parsed = split(terms)\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n term = str_to_unicode(term)\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())", "def count_doc_frequencies(self, docs):\n frequencyIndex = {}\n doc_id = 0\n for doc in docs:\n for term in doc:\n if term not in frequencyIndex:\n frequencyIndex[term] = [doc_id]\n else:\n for id in frequencyIndex[term]:\n if doc_id == id:\n break\n else:\n frequencyIndex[term].append(doc_id)\n doc_id+=1\n\n for term in frequencyIndex:\n occurences = len(frequencyIndex[term])\n frequencyIndex[term] = occurences\n\n return frequencyIndex", "def complete(self, prefix):\n words = list(set([word for word in self._vocabulary if word.startswith(prefix)]))\n if len(words) <= self._max_completions:\n return words\n else:\n return words[:self._max_completions]", "def count_terms(self, tokens):\n\n terms = [self.term_match(t) for t in tokens ]\n \n terms = [t for t in terms if t != None]\n\n #print terms\n lf = dict(Counter(terms))\n for k in lf:\n lf[k] /= float(len(tokens))\n #lf[k] = 1 # binarize?\n pass\n return lf", "def getNumberTerms(content): \n return Counter(getTerms(content))", "def lookup_in_taxonomy(results):\n from unidecode import unidecode\n\n base_url = \"http://taxonomy.projectchronos.eu/space/dbpediadocs/{}\"\n labels = []\n resource = None\n for res in results:\n res = unidecode(res)\n try:\n # print base_url.format(res)\n resource = retrieve_json(base_url.format(res))\n except Exception as e:\n print Exception('Cannot fetch taxonomy: ' + res.encode('ascii', 'replace') + ' ' + str(e))\n\n if resource and 'relatedConcepts' in resource.keys():\n for c in resource['relatedConcepts']:\n if c:\n label = c[c.rfind('/') + 1:].replace('+', ' ')\n # print 'Found! ' + label\n labels.append(str(label))\n return set(labels)", "def index_terms(self):\n [[self.set_postings(term, id) for term in NLProcessor.process(doc)] for id, doc in\n self.doc_store.dict.iteritems()]", "def _add_object_inverted_index_terms(self, (object_type, object_id), ivtidx, terms):\n if not terms:\n return\n\n # Resolve object type name to id\n object_type = self._get_type_id(object_type)\n\n # Holds any of the given terms that already exist in the database\n # with their id and count.\n db_terms_count = {}\n\n terms_list = _list_to_printable([ t.lower() for t in terms.keys() ])\n q = \"SELECT id,term,count FROM ivtidx_%s_terms WHERE term IN %s\" % (ivtidx, terms_list)\n rows = self._db_query(q)\n for row in rows:\n db_terms_count[row[1]] = row[0], row[2]\n\n # For executemany queries later.\n update_list, map_list = [], []\n\n for term, score in terms.items():\n term = term.lower()\n if term not in db_terms_count:\n # New term, so insert it now.\n self._db_query('INSERT OR REPLACE INTO ivtidx_%s_terms VALUES(NULL, ?, 1)' % ivtidx, (term,))\n db_id, db_count = self._cursor.lastrowid, 1\n db_terms_count[term] = db_id, db_count\n else:\n db_id, db_count = db_terms_count[term]\n update_list.append((db_count + 1, db_id))\n\n map_list.append((int(score*10), db_id, object_type, object_id, score))\n\n self._db_query('UPDATE ivtidx_%s_terms SET count=? WHERE id=?' % ivtidx, update_list, many = True)\n self._db_query('INSERT INTO ivtidx_%s_terms_map VALUES(?, ?, ?, ?, ?)' % ivtidx, map_list, many = True)", "def index_search(files, index, terms):\n\n\n termlist = set()\n\n for i in range(len(terms)):\n for j in range(len(terms[i].split(\" \"))):\n\n termlist.add(terms[i].split(\" \")[j])\n\n indexlist = [index[w] for w in termlist]\n\n intersect = list(set.intersection(*indexlist))\n\n return [files[x] for x in intersect]", "def getNamespaceIndexByPrefix(self, *args):\n return _libsbml.XMLToken_getNamespaceIndexByPrefix(self, *args)", "def add(self, term, count=1):\n term = term.lower() if self.lower else term\n if term in self.term2id:\n idx = self.term2id[term]\n else:\n idx = len(self.id2term)\n self.id2term[idx] = term\n self.term2id[term] = idx\n if count > 0:\n if term in self.term_frequent:\n self.term_frequent[term] += count\n else:\n self.term_frequent[term] = count\n return idx", "def get_concept_occurrences(corpus_file, concepts_file):\n # Load corpus and concept vectors\n concept_taxonomy, concepts = load_taxonomy(concepts_file)\n corpus = load_corpus(corpus_file, concepts)\n # Process corpus and return only concept terms on a per-document-sentence\n # level\n doc_concepts = detect_corpus_concepts(corpus, concept_taxonomy)\n # Get all sentence-level concept pairs for each document\n p = Pool(4)\n doc_pairs = map(detect_sent_concept_pairs, doc_concepts)\n p.close()\n p.join()\n # Get sentence-level pair-wise occurrence matrix\n sent_pairs = reduce(list.__add__, doc_pairs)\n pair_counts = combine_weights(sent_pairs)\n concepts = [c for c in concept_taxonomy]\n pair_matrix = []\n for concept1 in concepts:\n row = []\n for concept2 in concepts:\n pair_count = 0\n for count in pair_counts:\n if (concept1 in count[0]) and (concept2 in count[0]):\n pair_count += count[1]\n row.append(pair_count)\n pair_matrix.append(row)\n # Write results\n write_concept_results(doc_pairs, pair_matrix, concept_taxonomy)\n # Save results\n with open('../work/doc_pairs.pickle', 'w') as f:\n pickle.dump(doc_pairs, f)\n # Save results\n with open('../work/pair_matrix.pickle', 'w') as f:\n pickle.dump(pair_matrix, f)\n # Save results\n with open('../work/concept_taxonomy.pickle', 'w') as f:\n pickle.dump(concept_taxonomy, f)\n\n return doc_pairs, pair_matrix", "def _get_ngrams_with_counter(segment, max_order):\n ngram_counts = collections.Counter()\n for order in xrange(1, max_order + 1):\n for i in xrange(0, len(segment) - order + 1):\n ngram = tuple(segment[i:i + order])\n ngram_counts[ngram] += 1\n return ngram_counts", "def count_terms_in_doc(pii, terms):\n\n # We can't pass the trie as an argument when using multiprocessing.\n trie = NoAho()\n for term in terms:\n trie.add(term)\n\n file_path = os.path.join(outdir, 'sd-download', pii + '-full.xml')\n text = io.open(file_path, 'r', encoding='utf8').read().lower()\n matches = [text[x[0]:x[1]] for x in trie.findall_long(text)]\n\n return [pii, len(matches), len(set(matches))]", "def corpusTermCountReducer(term, values):\n values = list(values)\n d = sum(c for (docname, payload, n, N, c) in values)\n for (docname, payload, n, N) in (v[:4] for v in values):\n yield docname, (term, payload, n, N, d)" ]
[ "0.72814137", "0.5312216", "0.51168823", "0.50760156", "0.5040851", "0.49835056", "0.48447284", "0.4833117", "0.48034608", "0.47988817", "0.4782228", "0.46925923", "0.46841052", "0.4615586", "0.461366", "0.45966607", "0.45822227", "0.45699117", "0.45678702", "0.45583963", "0.45541042", "0.4553358", "0.4535828", "0.45008442", "0.44907507", "0.4487746", "0.44839835", "0.4482122", "0.44683346", "0.44662422" ]
0.7293714
0
Test that trailing @ used for extracting does not interfere with untag.
def test_untag_with_trailing_extract(self): fields_to_test = { 'foo@': 'bar-base', 'foo@de@': 'bar-de', 'foo@(.*_FR|.*_SG)@': 'bar-fr', 'nested': { 'nested@': 'nested-base', 'nested@de_AT@': 'nested-de', 'nested@(.*_FR|.*_SG)@': 'nested-fr', }, } fields = copy.deepcopy(fields_to_test) self.assertDictEqual({ 'foo': 'bar-base', 'nested': { 'nested': 'nested-base', }, }, document_fields.DocumentFields.untag(fields, locale='fr')) self.assertDictEqual({ 'foo': 'bar-fr', 'nested': { 'nested': 'nested-fr', }, }, document_fields.DocumentFields.untag(fields, locale='fr_FR')) self.assertDictEqual({ 'foo': 'bar-base', 'nested': { 'nested': 'nested-base', }, }, document_fields.DocumentFields.untag(fields, locale='fr_CA')) self.assertDictEqual({ 'foo': 'bar-de', 'nested': { 'nested': 'nested-base', }, }, document_fields.DocumentFields.untag(fields, locale='de')) self.assertDictEqual({ 'foo': 'bar-base', 'nested': { 'nested': 'nested-de', }, }, document_fields.DocumentFields.untag(fields, locale='de_AT'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mention(result):\n return result.text.find('@') != -1", "def testUnindentedFields(self):\n self.checkParse(\"\"\"\n This is a paragraph.\n \n @foo: This is a field.\"\"\")\n \n self.checkParse(\"\"\"\n This is a paragraph.\n @foo: This is a field.\"\"\")\n \n self.checkParse(\"\"\"\n This is a paragraph.\n @foo: This is a field.\n Hello.\"\"\")\n \n self.checkParse(\"\"\"\n This is a paragraph.\n @foo: This is a field.\n Hello.\"\"\")\n self.checkParse(\"\"\"Paragraph\\n@foo: field\"\"\")\n self.checkParse(\"\"\"Paragraph\\n\\n@foo: field\"\"\")\n self.checkParse(\"\"\"\\nParagraph\\n@foo: field\"\"\")", "def test_trailing_data(self):", "def testLeadingAndTrailingText(self):\n self.assertEqual([\"http://123.123.123.123\"], grab('fooasdf asdf a http://123.123.123.123 asdfasdf', self.needScheme))", "def test_missing_delim(self):", "def clean_chunk(chunk):\n return '\\n'.join([x[1:] for x in chunk.split('\\n')\n if x and x[0] not in ('-', '@')])", "def test_drop_emails():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"[email protected]\"]])[\"corpus\"][0] == \"\"\n assert not cleaner.drops[\"email\"].dropna().empty", "def username_test(self):\n text = 'test @username'\n html = 'test <a href=\"https://www.instagram.com/username/\">@username</a>'\n self.assertEqual(linkify_text(text), html)", "def _handle_ellipsis(value: Any, annotation: Any) -> bool:\n return value == ...", "def mixed_symbols(self):\n \n arg = '@twe!et test case'\n actual = tweets.extract_mentions(arg)\n expected = ['twe']\n msg = \"Expected {}, but returned {}\".format(expected, actual)\n self.assertEqual(actual, expected, msg)", "def test_parse_quotes_no_author(self):\n with self.assertRaisesRegexp(Exception, re.escape('an author was not included with the quote. Expecting '\n 'quote in the format \\\"<quote> - <author>\\\".')):\n api.parse_quote(\"This is a quote. | | Publication | tag1, tag2 , tag3 \", simple_format=False)", "def removeAtUser(text):\n text = re.sub('@[^\\s]+','',text)\n return text", "def testTrailingSpaces(self):\n self.assertEqual([\"http://tomtom.foobar.org/\"], grab('http://tomtom.foobar.org/ ', self.needScheme))\n self.assertEqual([\"http://www.foobi.org/saatoimia\"], grab('http://www.foobi.org/saatoimia ', self.needScheme))", "def test_drop_trailing_questionmark():\n assert normalize_url(\"http://example.com/?\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com?\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com/a?\") == \"http://example.com/a\"\n assert normalize_url(\"http://example.com/a/?\") == \"http://example.com/a\"", "def test_drop_punctuation():\n assert TextCleaner().transform([[\"'test!?\"]])[\"corpus\"][0] == \"test\"", "def bpe_postprocess(string) -> str:\n return string.replace(\"@@ \", \"\")", "def parse_for_query(query):\n index = query.find('@')\n if index == -1:\n return \"\"\n elif index == len(query)-1:\n # Make sure the final return doesn't index outside the list.\n return \"\"\n else:\n return query[index+1:]", "def remove_mentions(text):\r\n text = re.sub(r'@\\S+', '', text)\r\n text = re.sub(r'@', \"at\", text)\r\n return text", "def remove_mentions(self,text):\n return re.sub(r'@\\w+', '', text)", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def test_extra_suffix(self):\n nt = NewickTokenizer(newick='(a,(b,c));suffix')\n self.assertRaises(ValueError, nt.tokens)", "def remove_at_symbols(text):\n return text.replace('@', '')", "def have_at_symbol(l):\r\n if \"@\" in str(l):\r\n return 1\r\n else:\r\n return 0", "def is_attname(name):\n test = re.search(\"^@[a-z]+\", name)\n if test:\n return True\n else:\n return False", "def clean_mentions(self, tweet):\n self.mentions = [tag.strip('@') for tag in tweet.split() if tag.startswith('@')]\n\n for mention in self.mentions:\n tweet = tweet.replace('@'+mention, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n\n return tweet", "def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UCxxxAGwsnyrHBNzzzD-D\", check=False).strip_bad(),\n \"UCAGWSNYRHBND-D\",\n )\n self.assertEqual(self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad(), \"\")\n self.assertEqual(\n self.RNA(\"aaaxggg---!ccc\", check=False).strip_bad(), \"AAAGGG---CCC\"\n )", "def test_extract_twitter_description_meta_00(input_, expected):\n actual = regex.match_twitter_description_meta(input_)\n assert actual == expected", "def filter_mentions(text):\n return re.sub(\"@\\S+\", \"\", text)", "def postparse_hook_undeclared_return_annotation(self, data: cmd2.plugin.PostparsingData):\n pass", "def test_parse_quotes_no_quote(self):\n with self.assertRaisesRegexp(Exception, 'a quote was not found'):\n api.parse_quote(\"| Author | Publication | tag1, tag2 , tag3 \", simple_format=False)" ]
[ "0.6034205", "0.59515476", "0.59357536", "0.58285934", "0.57458454", "0.54263383", "0.5389915", "0.5322321", "0.53162247", "0.527235", "0.5249418", "0.5217515", "0.5167631", "0.5140492", "0.5128733", "0.5124771", "0.5098799", "0.5088662", "0.50590724", "0.5033587", "0.50318456", "0.5030856", "0.5026558", "0.49942648", "0.49876288", "0.49843183", "0.49755862", "0.49752042", "0.49514446", "0.49496225" ]
0.6260769
0
Test that not having a base key does not interfere with untag and locales.
def test_untag_with_no_base(self): fields_to_test = { 'foo@de': 'bar-de', 'baz@de': { 'fum@de': 'boo-de' }, } fields = copy.deepcopy(fields_to_test) self.assertDictEqual({}, document_fields.DocumentFields.untag(fields)) self.assertDictEqual({ 'foo': 'bar-de', 'baz': { 'fum': 'boo-de', }, }, document_fields.DocumentFields.untag(fields, locale='de'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unset_key(self):\n context = {'help_key': 'unused-key'}\n self.assertRaises(\n ImproperlyConfigured,\n tags.madcap_flare_help,\n context)", "def __missing__(self, key):\n global MISSING\n MISSING = key # For debugging - save name of missing key\n return INVALID", "def test_missingKey(self):\n self.assertIsNone(self.users.key(\"mystery domain\", \"mystery user\"))", "def test_strip_unnecessary_keys():\n assert len(strip_unnecessary_keys(EXAMPLE_GOOD_OFFER)) == len(OUTPUT_KEYS)\n assert strip_unnecessary_keys(EXAMPLE_BAD_OFFER) is None", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)", "def _check_key(self, key):\n raise NotImplementedError", "def test_throws_base_price_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n BasePrice.Schema().loads(json.dumps(base_price_missing_key))", "def test_invalid_key(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('221b=\"starts with number\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('_=\"not assignable\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('o-o=\"invalid character\"')\n assert 'Invalid key' in str(err.value)", "def test_get_key_not_defined_yet(self):\n storage = SessionStorage()\n\n self.assertNotIn('key1', storage)\n s1 = storage['key1']\n self.assertIn('key1', storage)\n\n self.assertNotIn('key2', storage)\n s2 = storage['key2']\n self.assertIn('key2', storage)\n\n self.assertIsNot(s1, s2)", "def isKeyEmpty(k):\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False", "def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")", "def test_templatetags_search_tags_rfc_5646_locale_absent(self):\n with self.assertRaises(ImproperlyConfigured):\n rfc_5646_locale(\"fr_BE\")\n\n with self.assertRaises(ImproperlyConfigured):\n rfc_5646_locale(\"it\")", "def _KeyMissing(side):\n return 'Key missing from %s' % side", "def test_invalid_chars_location(self):\r\n course_key = SlashSeparatedCourseKey(u'org.dept-1%2', u'course.sub-2%3', u'run.faster-4%5')\r\n valid_base = course_key.make_usage_key('tomato-again%9', 'block-head:sub-4%9')\r\n for key in SlashSeparatedCourseKey.KEY_FIELDS:\r\n with self.assertRaises(InvalidKeyError):\r\n # this ends up calling the constructor where the legality check should occur\r\n valid_base.replace(**{key: u'funny thing'})", "def test_key_not_found(self):\n self.expect_datatore_lookup('SomeBlobKey', False)\n self.mox.ReplayAll()\n self.assertResponse('404 %s' % httplib.responses[404], [], '', self.app,\n self._environ)", "def test_bad_with_no_prior_key(self):\n # config seems to be shared across tests, so we have to specifically set\n # it to None.\n config.set(xsrf_token_key=None)\n tool = utils.XsrfTool()\n timestamp = utils.get_timestamp(XsrfToolTests.TEST_NOW)\n self.assertFalse(\n tool.verify_token('NotTheRightDigest/%f' % timestamp, 12345,\n 'test_action'))", "def test_key_no_data(self):\n key = Key({})\n\n assert key.warning is None\n assert key.in_car is None", "def __missing__(self, key):\n raise KeyNotInContextError(f\"{key} not found in the pypyr context.\")", "def test_untag_none(self):\n untag = document_fields.DocumentFields.untag\n fields_to_test = {\n 'foo': 'base',\n '[email protected]': None,\n }\n fields = copy.deepcopy(fields_to_test)\n self.assertDictEqual({\n 'foo': 'base',\n }, untag(fields, locale=None, params={'env': None}))\n self.assertDictEqual({\n 'foo': None,\n }, untag(fields, locale=None, params={'env': 'prod'}))\n\n fields_to_test = {\n 'nested': {\n 'foo': 'nested-base',\n },\n 'nested@de': {\n 'foo': 'nested-de-base',\n '[email protected]': None,\n }\n }\n fields = copy.deepcopy(fields_to_test)\n self.assertDictEqual({\n 'nested': {\n 'foo': 'nested-base',\n },\n }, untag(fields, locale=None, params={'env': None}))\n self.assertDictEqual({\n 'nested': {\n 'foo': 'nested-base',\n },\n }, untag(fields, locale=None, params={'env': 'dev'}))\n self.assertDictEqual({\n 'nested': {\n 'foo': None,\n },\n }, untag(fields, locale='de', params={'env': 'prod'}))", "def __missing__(self, key):\n return key", "def test_meta_data_is_not_inherited(self):", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_process_dict_false(self):\n\n self.assertNotIn('userB@domain', self.temp_set)", "def test_nonexisting_string_not_contained(tricky_trie):\n assert not tricky_trie.contains('no')", "def test_invalid_chars_ssck(self):\r\n valid_base = SlashSeparatedCourseKey(u'org.dept-1%2', u'course.sub-2%3', u'run.faster-4%5')\r\n for key in SlashSeparatedCourseKey.KEY_FIELDS:\r\n with self.assertRaises(InvalidKeyError):\r\n # this ends up calling the constructor where the legality check should occur\r\n valid_base.replace(**{key: u'funny thing'})", "def test_blank_key(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._blank_key))\n self.assertEqual(str(cm.exception), \"AuthConfig must contain ApiKey\")", "def test_remove_word_raises_key_error_when_word_not_in_trie(full_trie):\n with pytest.raises(KeyError):\n full_trie.remove(\"tool\")", "def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]" ]
[ "0.61089957", "0.60943186", "0.6040597", "0.60356236", "0.58925235", "0.58480316", "0.5847725", "0.5817884", "0.5728378", "0.5715855", "0.57137895", "0.57015187", "0.5691672", "0.5673796", "0.56599385", "0.56495875", "0.5617581", "0.56079537", "0.5602262", "0.55924207", "0.55764776", "0.5566246", "0.55577964", "0.55577964", "0.5554854", "0.5553501", "0.55407214", "0.55180836", "0.5513944", "0.5495078" ]
0.6586597
0
Untag when there is a none value for the tagged value.
def test_untag_none(self): untag = document_fields.DocumentFields.untag fields_to_test = { 'foo': 'base', '[email protected]': None, } fields = copy.deepcopy(fields_to_test) self.assertDictEqual({ 'foo': 'base', }, untag(fields, locale=None, params={'env': None})) self.assertDictEqual({ 'foo': None, }, untag(fields, locale=None, params={'env': 'prod'})) fields_to_test = { 'nested': { 'foo': 'nested-base', }, 'nested@de': { 'foo': 'nested-de-base', '[email protected]': None, } } fields = copy.deepcopy(fields_to_test) self.assertDictEqual({ 'nested': { 'foo': 'nested-base', }, }, untag(fields, locale=None, params={'env': None})) self.assertDictEqual({ 'nested': { 'foo': 'nested-base', }, }, untag(fields, locale=None, params={'env': 'dev'})) self.assertDictEqual({ 'nested': { 'foo': None, }, }, untag(fields, locale='de', params={'env': 'prod'}))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(self, value):\n tags = self.__all_tags()\n if value in tags:\n tags.remove(value)\n self.__post_changes(tags)", "def testNoneValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'[email protected]')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, None))", "def _issingleton(self, tagname):\n return self.shortempty", "def pop_tag(data):\n if data and is_tag(data[0]):\n return data.pop(0)", "def remove_tag(self, key, value=None):\r\n if value:\r\n tags = {key : value}\r\n else:\r\n tags = [key]\r\n status = self.connection.delete_tags([self.id], tags)\r\n if key in self.tags:\r\n del self.tags[key]", "def delval(self):\r\n self.value = None", "def test_remove_tag_from_derived_metric(self):\n pass", "def _remove_none(self, data):\r\n for key, value in data.items():\r\n if value is None or isinstance(value, forms.Field):\r\n del data[key]\r\n if isinstance(value, dict):\r\n self._remove_none(data[key])", "def untag(self, tag):\n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n if isinstance(tag, six.string_types):\n try:\n tag = Tag.objects.get(slug=makeslug(tag), owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n self.tags.remove(tag)", "def clean_value(tag):\n value = tag.get('v')\n if is_street_name(tag):\n return update_name(value, mapping)\n elif is_state(tag):\n return update_state(value)\n elif is_phone(tag):\n return update_phone(value)\n elif is_postcode(tag):\n return update_postcode(value)\n else:\n return value", "def NULL(self, t):\n t.value = None\n return t", "def test_removal_does_not_raise_on_nonexistent_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag-does-not-exist']},\n ],\n })\n\n # verify initial tag set is empty\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})\n\n raised = False\n try:\n p.run()\n except KeyError:\n raised = True\n\n # verify no exception raised and no changes to tags on resource\n self.assertFalse(raised)\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})", "def clearValue(self, d, name):\r\n \r\n if name in d:\r\n d[name] = None", "def test_avoids_bombing_on_none(self):\r\n test_value = None\r\n self.assertEqual(set(), suggest_tags(test_value))", "def testRemoveTagValueRemovesLink(self):\n user = createUser(u'name', u'password', u'User', u'[email protected]')\n user.namespaceID = createNamespace(user, user.username, None).id\n tag = createTag(user, user.namespace, u'tag')\n value = TagValue(user.id, tag.id, uuid4(), None)\n self.store.add(value)\n self.store.flush()\n fileID = 'f' * 64\n self.store.add(OpaqueValue(fileID, 'content'))\n self.store.add(OpaqueValueLink(value.id, fileID))\n self.store.remove(value)\n self.store.flush()\n result = self.store.find(OpaqueValueLink,\n OpaqueValueLink.fileID == fileID).one()\n self.assertIdentical(None, result)", "def remove_tag(args):", "def unset(self) -> None:\n self.val = None\n self.notes = []", "def untag():\n version = git.prompt_tag('Which tag to delete?')\n if not version:\n abort('No available version tag')\n git.delete_tag(version)", "def unhideValue(self, value: Any) -> None:\r\n if value in self._hiddenValue:\r\n self._hiddenValue.remove(value)\r\n else:\r\n raise VariableError", "def clean_nodes_no_names(tag, data):\n\tif not isinstance(tag, tuple):\n\t\tfor each in data:\n\t\t\tif each['k'] != [] and each['v'] != []:\n\t\t\t\tif tag in each['k'] and 'name' not in each['k']:\n\t\t\t\t\teach['removed'] = 'true'\n\t\t\t\t\ttagValueData = dict(zip(each['k'], each['v']))\n\t\t\t\t\tif tagValueData.get('amenity') == 'atm':\n\t\t\t\t\t\teach['removed'] = 'false'\n\t\t\tyield each\n\telse:\n\t\tfor each in data:\n\t\t\tif each['k'] != [] and each['v'] != []:\n\t\t\t\tif tag[0] in each['k'] and tag[1] in each['v'] and 'name' not in each['k']:\n\t\t\t\t\teach['removed'] = 'true'\n\t\t\tyield each", "def discard(self, value):\r\n raise NotImplementedError", "def untag_element(self,tag_name,element):\n pass", "def remove_tag(self, tag):\n _tag_entity('task', self.task_id, tag, untag=True)", "def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []", "def strip_tags(value):\n if value:\n return re.sub(r'<[^>]*?>', '', value)\n return \"\"", "def strip_tags(value):\n if value:\n return re.sub(r'<[^>]*?>', '', value)\n return \"\"", "def untag(self, uuid, tags=None):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.untag(uuid, tags)", "def remove(self, value):\n pass", "def unsetInitialValue(self):\n return _libsbml.Trigger_unsetInitialValue(self)", "def strip_tags(cls, value):\n if value == None:\n return None\n\n s = re.sub(r'<\\/?p>', ' ', '%s' % value)\n s = re.sub(r'<[^>]*?>', '', s)\n return cls.compress_whitespace(s)" ]
[ "0.6135915", "0.5967588", "0.59159297", "0.580774", "0.57716626", "0.57520264", "0.56370187", "0.56107867", "0.5567301", "0.5549925", "0.55275375", "0.55266446", "0.54774225", "0.5460869", "0.54421735", "0.54349834", "0.54182965", "0.54049665", "0.5401856", "0.5396873", "0.5364972", "0.5294987", "0.5217467", "0.52120376", "0.5203987", "0.5203987", "0.5163513", "0.51609254", "0.5140107", "0.51287735" ]
0.64906526
0
Summary of the time series. include mean, std, max, min and range
def summaryone(x): print 'mean and std are ',np.mean(x), np.std(x) print 'max and min are ',np.max(x), np.min(x) print 'the range is ',np.max(x)-np.min(x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_time_series_stats(time_series):\n return pd.Series([np.mean(time_series), np.std(time_series), get_frequency(time_series)])", "def show_stats(x):\n print(\"min =\", x.min())\n print(\"max =\", x.max())\n print(\"median =\", np.median(x))\n print(\"average =\", x.mean())\n print(\"std =\", x.std())", "def test_analyze_time_series_std():\n\n statistics = analyze_time_series(np.ones(10))\n\n assert statistics.n_total_points == 10\n assert statistics.n_uncorrelated_points == 1\n assert np.isclose(statistics.statistical_inefficiency, 10.0)\n assert statistics.equilibration_index == 0", "def get_summary_stats(self):\r\n n = len(self.results)\r\n\r\n if n == 0:\r\n mean = None\r\n stdev = None\r\n\r\n elif n == 1:\r\n mean = numpy.mean(self.results)\r\n stdev = None\r\n\r\n else:\r\n mean = numpy.mean(self.results)\r\n stdev = numpy.std(self.results)\r\n\r\n sum_stats = {'n': n, 'mean': mean, 'stdev': stdev}\r\n\r\n return sum_stats", "def temp_range_stats(start, end):\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n \n dates_ = session.query(Measurement.date)\n dates = [x[0] for x in dates_]\n if start not in dates or end not in dates:\n session.close()\n return jsonify({\"error\": f\"Date {start} or {end} not found.\"}), 404\n \n else:\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n \n temp_stats = [\n {\"tmin\": results[0][0]},\n {\"tavg\": results[0][1]},\n {\"tavg\": results[0][2]}\n ]\n\n session.close()\n \n return jsonify(temp_stats)", "def _printSummary(self):\n\t\t### COP OUT\n\t\tif self.params['background'] is True:\n\t\t\tself.stats['count'] += 1\n\t\t\treturn\n\n\t\t### THIS NEEDS TO BECOME MUCH MORE GENERAL, e.g. Peaks\n\t\ttdiff = time.time()-self.stats['startseries']\n\t\tif not self.params['continue'] or tdiff > 0.1:\n\t\t\tcount = self.stats['count']\n\t\t\t#if(count != self.stats['lastcount']):\n\t\t\tsys.stderr.write(\"\\n\\tSUMMARY: \"+self.functionname+\"\\n\")\n\t\t\tself._printLine()\n\t\t\tsys.stderr.write(\"\\tTIME: \\t\"+apDisplay.timeString(tdiff)+\"\\n\")\n\t\t\tself.stats['timesum'] = self.stats['timesum'] + tdiff\n\t\t\tself.stats['timesumsq'] = self.stats['timesumsq'] + (tdiff**2)\n\t\t\ttimesum = self.stats['timesum']\n\t\t\ttimesumsq = self.stats['timesumsq']\n\t\t\tif(count > 1):\n\t\t\t\ttimeavg = float(timesum)/float(count)\n\t\t\t\ttimestdev = math.sqrt(float(count*timesumsq - timesum**2) / float(count*(count-1)))\n\t\t\t\ttimeremain = (float(timeavg)+float(timestdev))*self.stats['seriesleft']\n\t\t\t\tsys.stderr.write(\"\\tAVG TIME: \\t\"+apDisplay.timeString(timeavg,timestdev)+\"\\n\")\n\t\t\t\t#print \"\\t(- TOTAL:\",apDisplay.timeString(timesum),\" -)\"\n\t\t\t\tif(self.stats['seriesleft'] > 0):\n\t\t\t\t\tsys.stderr.write(\"\\t(- REMAINING TIME: \"+apDisplay.timeString(timeremain)+\" for \"\n\t\t\t\t\t\t+str(self.stats['seriesleft'])+\" series -)\\n\")\n\t\t\t#print \"\\tMEM: \",(mem.active()-startmem)/1024,\"M (\",(mem.active()-startmem)/(1024*count),\"M)\"\n\t\t\tself.stats['count'] += 1\n\t\t\tself._printLine()", "def summary(self, tmin=None, tmax=None, stats='basic'):\n output = {\n 'basic': {\n 'evp': 'Explained variance percentage',\n 'rmse': 'Root mean squared error',\n 'avg_dev': 'Average Deviation',\n 'rsq': 'Pearson R^2',\n 'bic': 'Bayesian Information Criterion',\n 'aic': 'Akaike Information Criterion'},\n }\n\n # get labels and method names for stats output\n if stats == 'all':\n # sort by key, label, method name\n selected_output = sorted([(k, l, f) for k, d in output.items()\n for f, l in d.items()])\n else:\n # sort by name, method name\n selected_output = sorted([(0, l, f) for f, l in\n output[stats].items()])\n\n # compute statistics\n labels_and_values = [(l, getattr(self, f)(tmin=tmin, tmax=tmax))\n for _, l, f in selected_output]\n labels, values = zip(*labels_and_values)\n\n stats = pd.DataFrame(index=list(labels), data=list(values),\n columns=['Value'])\n stats.index.name = 'Statistic'\n return stats", "def summary(self) -> Dict[str, Dict[str, float]]:\n vals: Dict[str, List[float]] = defaultdict(list)\n if not self.steps: # pragma: no cover\n return {}\n\n for timing_dict in self._timings:\n for step in self.steps:\n if step in timing_dict:\n vals[step].append(timing_dict[step])\n summary = {}\n for step in self.steps:\n if vals[step]:\n summary[step] = {\n \"cnt\": len(vals[step]),\n \"sum\": sum(vals[step]),\n \"min\": min(vals[step]),\n \"max\": max(vals[step]),\n \"avg\": sum(vals[step]) / len(vals[step]),\n }\n return summary", "def calc_stats(start=None, end=None):\n \n # Query all the stations and for the given range of dates. \n results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs),func.avg(Measurement.tobs)). filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n\n # Create a dictionary from the row data and append to a list of for the temperature data.\n start_end_dict = []\n \n for Tmin, Tmax, Tavg in results:\n start_end = {}\n start_end[\"Minimum Temp\"] = Tmin\n start_end[\"Maximum Temp\"] = Tmax\n start_end[\"Average Temp\"] = Tavg\n start_end_dict.append(start_end)\n \n return jsonify(start_end_dict)", "def summary(data, digits=2, name=None):\n logger = logging.getLogger(__name__)\n if data.ndim!=1:\n raise ValueError(\"invest.calculation.summary only takes pandas Series as input data\")\n\n if name is None:\n name = data.name\n if name is None:\n name = 0\n\n df = pd.DataFrame()\n df.at[name, 'Start'] = data.index[0]\n df.at[name, 'End'] = data.index[-1]\n n = data.shape[0]\n mean = np.mean(data)\n df.at[name, 'Size'] = n\n df.at[name, 'Mean'] = np.round(mean, digits)\n df.at[name, 'Std'] = np.round(np.sqrt( np.mean((data-mean)**2) * n / (n-1) ), digits)\n df.at[name, 'Skew'] = np.round( np.mean((data-mean)**3) / np.mean((data-mean)**2)**1.5, digits)\n df.at[name, 'Kurtosis'] = np.round( np.mean((data-mean)**4) / np.mean((data-mean)**2)**2 - 3, digits)\n data = np.sort(data.values).flatten()\n df.at[name, 'min'] = data[0]\n for p in [0.25, 0.5, 0.75]:\n i = int(n*p)\n ratio = np.abs(n*p - i - p)\n df.at[name, \"{:.0%}\".format(p)] = ratio * data[i-1] + (1-ratio) * data[i]\n df.at[name, 'max'] = data[n-1]\n df = df.astype({'Size':int})\n return df", "def stats(start=None, end=None):\n # Select statement\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n \n if not end:\n # Calculate TMIN, TAVG, TMAX for dates greater than start\n results = session.query(*sel).\\\n filter(Measurement.date >= start).all()\n # Unravel the results into a ID array and convert into a list\n temps = list(np.ravel(results))\n return jsonify(temps)\n \n # Calculate TMIN, TAVG, TMAX with start and stop\n results = session.query(*sel).\\\n filter(Measurement.date >= start).\\\n filter(Measurement.date <= end).all()\n # Unravel the results into a ID array and convert into a list\n temps = list(np.ravel(results))\n return jsonify(temps)", "def meanstd(self):\n\t\tmean = [125.3, 123.0, 113.9] # R,G,B\n\t\tstddev = [63.0, 62.1, 66.7] # R,G,B\n\t\treturn [mean, stddev]", "def std(self):\n return self._summarize(lambda c: c.std)", "def statistic_series(self, resource=None, resource_type=None,\n meter_name=None, start_time=None, end_time=None,\n granularity=300):\n\n pass", "def stat_summary(username, n, precision, start_date=None):\n n = int(n)\n precision = int(precision)\n return [{\"stat_name\" : \"mean\", \"stat\" : round(last_n_average(username, n, start_date=start_date), precision)},\n {\"stat_name\" : \"std\", \"stat\" : round(last_n_std(username, n, start_date=start_date), precision)},\n {\"stat_name\" : \"sum\", \"stat\" : round(last_n_sum(username, n, start_date=start_date), precision)}]", "def stats(start=None, end=None):\n\n # Select statement\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n\n if not end:\n # calculate TMIN, TAVG, TMAX for dates greater than start\n Tempstart = session.query(*sel).filter(Measurement.date >= prevyeardates).all()\n session.close()\n # Unravel results into a 1D array and convert to a list\n \n temps = [] \n for temp in Tempstart:\n datedic = {}\n \n datedic[\"Low Temp\"] = temp[0]\n datedic[\"Avg Temp\"] = temp[1]\n datedic[\"High Temp\"] = temp[2]\n temps.append(datedic)\n\n return jsonify(temps)\n\n # calculate TMIN, TAVG, TMAX with start and stop\n Tempsend = session.query(*sel).filter(Measurement.date >= prevyeardates).filter(Measurement.date <= curyeardate).all()\n session.close()\n # Unravel results into a 1D array and convert to a list\n temps = [] \n for temp in Tempsend:\n datedic = {}\n \n datedic[\"Low Temp\"] = temp[0]\n datedic[\"Avg Temp\"] = temp[1]\n datedic[\"High Temp\"] = temp[2]\n temps.append(datedic)\n \n\n return jsonify(temps=temps)", "def value_stats(values):\n stats = describe(values)\n mean = stats.mean\n std = np.sqrt(stats.variance)\n t_stat = t.ppf(1 - 0.025, len(values) - 1)\n dev = t_stat * (std / np.sqrt(len(values)))\n trim_mean_v = trim_mean(values, 0.25)\n upper_val = mean + dev\n lower_val = mean - dev\n\n return mean, trim_mean_v, std, upper_val, lower_val", "def get_rolling_stats(self):\n rolling = self.data.rolling(window=20, center=False)\n\n rm = rolling.mean().dropna()\n rstd = rolling.std().dropna()\n\n rolling_mean = rm[self.symb]\n rolling_std = rstd[self.symb]\n return rolling_mean, rolling_std", "def summary_stats(self):\n capital_gains = self.df['values'].iloc[-1].sum() - self.tc.starting_cash\n total_return = capital_gains / self.tc.starting_cash\n days_invested = (self.df.index[-1] - self.df.index[0]).days\n annualized_returns = (total_return + 1) ** (365 / days_invested) - 1\n annualized_volatility = self.df['returns'].std() * (252 ** 0.5)\n sharpe = annualized_returns / annualized_volatility\n num_trades = self.trades.shape[0]\n stats = pd.Series(\n data=[capital_gains, total_return, annualized_returns, annualized_volatility, sharpe, num_trades],\n index=['Capital Gains', 'Total Return', 'Annualized Return', 'Annualized Volatility', 'Sharpe Ratio',\n 'Number of Trades']\n )\n return stats", "def _calculate_stats(values, factor=1):\n result = {'min': min(values) * factor,\n 'max': max(values) * factor,\n 'sum': sum(values) * factor,\n 'mean': 0,\n 'stddev': 0}\n\n if values:\n mean = sum(values) / float(len(values))\n result['mean'] = factor * mean\n result['stddev'] = (\n factor * math.sqrt((1.0 / (len(values) - 1))\n * sum((x - mean) ** 2 for x in values)))\n\n return result", "def summarize(dataset):\n summaries = [(np.mean(attribute), np.std(attribute)) for attribute in zip(*dataset)]\n\n return summaries", "def time_stats(df):", "def _get_tads_mean_std(self, experiments):\n norm_tads = []\n for tad in experiments:\n for brk in self.experiments[tad]['tads'].values():\n if not brk['brk']:\n continue\n norm_tads.append(log((brk['end'] - brk['start']) * self.resolution))\n length = len(norm_tads)\n mean = sum(norm_tads)/length\n std = sqrt(sum([(t-mean)**2 for t in norm_tads])/length)\n return mean, std", "def get_forecast_summary(self):\n sum = {\"prior_var\":[], \"post_var\":[], \"percent_reduction\":[]}\n for forecast in self.prior_forecast.keys():\n pr = self.prior_forecast[forecast]\n pt = self.posterior_forecast[forecast]\n ur = 100.0 * (1.0 - (pt/pr))\n sum[\"prior_var\"].append(pr)\n sum[\"post_var\"].append(pt)\n sum[\"percent_reduction\"].append(ur)\n return pd.DataFrame(sum,index=self.prior_forecast.keys())", "def aggregate_data(tdata):\n # extract the unique mean and daily pair\n unique_pair = np.unique(np.vstack((tdata.mean_temp,\n tdata.daily_temp)).T, axis=0)\n mean_temp = unique_pair[:, 0]\n daily_temp = unique_pair[:, 1]\n\n obs_mean = []\n obs_std = []\n\n for p in unique_pair:\n valid_id = (tdata.mean_temp == p[0]) &\\\n (tdata.daily_temp == p[1]) &\\\n (tdata.trimming_weights > 0.5)\n obs_mean_atp = tdata.obs_mean[valid_id]\n obs_std_atp = tdata.obs_std[valid_id]\n\n ivar = 1.0/obs_std_atp**2\n obs_mean_atp = obs_mean_atp.dot(ivar)/np.sum(ivar)\n obs_std_atp = np.sqrt(1.0/np.sum(ivar))\n # obs_std_atp = np.mean(obs_std_atp)\n\n obs_mean.append(obs_mean_atp)\n obs_std.append(obs_std_atp)\n\n obs_mean = np.array(obs_mean)\n obs_std = np.array(obs_std)\n\n study_id = np.arange(obs_mean.size)\n data_id = None\n\n return utils.TempData(mean_temp,\n daily_temp,\n obs_mean,\n obs_std,\n study_id,\n data_id)", "def variable_summaries(self, var):\n if not self.MP.DISABLE_SUMMARY:\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)", "def mean_STD(self,counter):\n \n \n pass", "def SeriesStandard(series):\n mean = np.mean(series)\n variance = np.var(series)\n series = (series-mean)/variance\n return series", "def summary(trace, vars=None, alpha=0.05, start=0, batches=100, roundto=3):\n if vars is None:\n vars = trace.varnames\n if isinstance(trace, MultiTrace):\n trace = trace.combined()\n\n stat_summ = _StatSummary(roundto, batches, alpha)\n pq_summ = _PosteriorQuantileSummary(roundto, alpha)\n\n for var in vars:\n # Extract sampled values\n sample = trace[var][start:]\n if sample.ndim == 1:\n sample = sample[:, None]\n elif sample.ndim > 2:\n ## trace dimensions greater than 2 (variable greater than 1)\n warnings.warn('Skipping {} (above 1 dimension)'.format(var))\n continue\n\n print('\\n%s:' % var)\n print(' ')\n\n stat_summ.print_output(sample)\n pq_summ.print_output(sample)", "def get_series(self,year):\n year_dates, year_dc = self.year_data(year)\n mean_dc = []\n std_dc = []\n for date in year_dates:\n day = date.day\n month = date.month\n idx = [i for i in range(self.dates.shape[0]) \\\n if (self.dates[i].month == month and \\\n self.dates[i].day == day)]\n mean_dc.append(np.ma.mean(self.dc[idx]))\n std_dc.append(np.ma.std(self.dc[idx]))\n\n return np.array(mean_dc), np.array(std_dc)" ]
[ "0.6649429", "0.6533868", "0.6494436", "0.64276904", "0.63289535", "0.6307185", "0.62566227", "0.6218539", "0.6188114", "0.6148697", "0.6121931", "0.60502344", "0.6021406", "0.60148054", "0.5986059", "0.59573406", "0.5942241", "0.5935031", "0.5930992", "0.5924357", "0.5903259", "0.5884795", "0.58805346", "0.5879726", "0.58582", "0.58563143", "0.5844927", "0.58330923", "0.58284414", "0.5827436" ]
0.7294873
0
Creates and returns a MySQL database engine.
def create_mysql_engine(dbname, prod=True, driver="pymysql"): db_config = toolbox.open_system_config(prod=prod, config_type="DB_CONFIG")[dbname] db_url = URL( drivername="mysql+{}".format(driver), username=db_config.get("username"), password=db_config.get("password"), host=db_config.get("hostname"), database=db_config.get("db"), ) engine = create_engine(db_url) return engine
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_mysql_engine():\n\n return sa.create_engine(\n sa.engine.url.URL(\n drivername=\"mysql+pymysql\",\n username=\"username\", # Change that!!\n password=\"password\", # Change that!!\n host=\"host\", # Change that!!\n port=c.PORT,\n database=c.DATABASE,\n ),\n encoding=\"utf-8\", # Since there will be some japanse chars\n )", "def get_db_engine():\n # get database connection url\n connection_url = get_db_connection_url()\n\n # Create engine from connection url\n engine = create_engine(connection_url)\n\n return engine", "def get_engine(username, password, ipaddress, database):\n #TODO(rnirmal):Based on permissions issues being resolved we may revert\n #url = URL(drivername='mysql', host='localhost',\n # query={'read_default_file': '/etc/mysql/my.cnf'})\n global ENGINE\n if ENGINE:\n return ENGINE\n if database:\n ENGINE = sqlalchemy.create_engine(\"mysql://%s:%s@%s:3306/%s\" %\n (username, password, ipaddress,database),\n pool_recycle=7200,\n listeners=[KeepAliveConnection()])\n else:\n ENGINE = sqlalchemy.create_engine(\"mysql://%s:%s@%s:3306\" %\n (username, password, ipaddress),\n pool_recycle=7200,\n listeners=[KeepAliveConnection()])\n return ENGINE", "def get_database_engine() -> Engine:\n return engine", "def setup_engine():\n print(\"Setting up engine\")\n engine = create_engine('mysql+pymysql://{}:{}@{}/govhack2015'.format(\n username, password, ip_address))\n\n return engine", "def _get_engine(**kwargs):\n engine_name = 'MySQL'\n return engine_name", "def setup_mysql(host, database, user, pw):\n engine = db.create_engine('mysql+pymysql://{}:{}@{}:3306/{}'.format(user, pw, host, database))\n tables.create_tables(engine)\n return engine.connect()", "def get_sql_engine(cls, db_uri: str) -> Engine:\n return create_engine(db_uri)", "def create_db(args, engine=None):\n if engine is None:\n if args.RDS:\n engine_string = get_engine_string()\n else:\n engine_string = args.local_URI\n logger.info(\"RDS:%s\" % args.RDS)\n engine = sql.create_engine(engine_string)\n\n Base.metadata.create_all(engine)\n logging.info(\"database created\")\n\n return engine", "def create_engine(self):\n return create_engine('sqlite:///' + self.database_name, echo=True)", "def Engine_Connection(self):\n try:\n # Engine Connection\n engine = create_engine('mysql+mysqlconnector://{}:{}@{}/{}'.format(self.user,self.password,self.host,self.database))\n return['Engine created', engine]\n except engine.closed():\n return print(\"Failed to create engine\")", "def create_engine(self):\n connection_string = f'postgresql://{self.user}:{self.password}@{self.host}/{self.database_name}'\n return create_engine(connection_string)", "def engine(db_url=None):\n db_url = db_url or os.getenv(\"DB_URL\")\n if not db_url:\n raise ValueError(\"database URL is required\")\n print(f\"Returning an engine for {db_url}\")\n return create_engine(db_url)", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))", "def mysql_create(name=None, user=None, password=None, host=None,\n db_password=None, port=''):\n name = check(name, 'name: the database name to create.')\n user = check(user, 'user: the user to grant privileges')\n password = check(password, 'password: user\\'s password')\n host = check(host, 'host: machine ', default='localhost')\n db_password = check(db_password, 'db_password: mysql password.')\n port == ':'+port if port is not '' else ''\n\n mysql_run((\n \"CREATE DATABASE IF NOT EXISTS {name}\",\n \"GRANT ALL PRIVILEGES ON {name}.* TO '{user}'@'{host}{port}' \" +\n \"IDENTIFIED BY '{password}'\",\n ), {'name': name, 'user': user, 'password': password, 'host': host,\n 'port': port}, db_password=db_password)", "def get_engine(db_params: Dict[str, str]) -> sa.engine:\r\n db_uri = get_uri(db_params)\r\n return sa.create_engine(db_uri)", "def get_engine(db_url):\n check_db_url(db_url)\n return create_engine(db_url)", "def create_db():\n\n db_url = os.environ.get('HEROKU_POSTGRESQL_GRAY_URL', 'postgresql://postgres:[email protected]:5432/nextbus')\n\n return create_engine(db_url)", "def _set_database_engine(self, config):\n confi = config.copy()\n superuse = confi.pop(\"supdatabase\"), confi.pop(\"supusername\"), confi.pop(\"suppassword\")\n self.__engine = create_engine(URL(**confi))\n try:\n try:\n if self.__engine is not None:\n conn = self.__engine.connect()\n conn.close()\n except OperationalError:\n configdef = confi.copy()\n configdef[\"database\"] = superuse[0]\n self.__engine.dispose()\n self.__engine = create_engine(URL(**configdef))\n try:\n conn = self.__engine.connect()\n try:\n conn.execute(\"commit\")\n conn.execute(\"CREATE DATABASE %s;\" % config[\"database\"])\n finally:\n conn.close()\n except OperationalError:\n self.__engine.dispose()\n raise\n self.__engine.dispose()\n self.__engine = create_engine(URL(**confi))\n except ProgrammingError:\n raise", "def create_engine(self, base):\n try:\n engine = create_engine(\n \"postgresql+psycopg2://%s:%s@%s:%s/%s\" % (\n self._db_settings['DATABASE_USER'],\n self._db_settings['DATABASE_PASS'],\n self._db_settings['DATABASE_IP'],\n self._db_settings['DATABASE_PORT'],\n self._db_settings['DATABASE_NAME']),\n poolclass=NullPool)\n #pool_size=5,\n #max_overflow=10)\n base.metadata.create_all(engine)\n # Fix for forking\n #register_after_fork(engine, engine.dispose)\n return engine\n except ValueError as e: # Potentially corrupted DB config.\n self.error_handler.abort_framework(\n \"Database configuration file is potentially corrupted. Please check %s\\n[DB] %s\" %\n (self.config.get_val('DATABASE_SETTINGS_FILE'), str(e)))\n except KeyError: # Indicates incomplete db config file\n self.error_handler.abort_framework(\"Incomplete database configuration settings in %s\" %\n self.config.get_val('DATABASE_SETTINGS_FILE'))\n except exc.OperationalError as e:\n self.error_handler.abort_framework(\"[DB] %s\\nRun 'make db-run' to start/setup db\" % str(e))", "def create_db(user,password,database, conn_type, host, port):\n logger.info(\"Creating RDS database\")\n\n # create engine string with details from yaml file\n engine_string = \"{}://{}:{}@{}:{}/{}\". \\\n format(conn_type, user, password, host, port, database)\n\n engine = create_engine(engine_string)\n Base.metadata.create_all(engine)\n logger.info(\"Database created with tables\")\n return engine", "def db_connect():\n return create_engine(URL(**product_crawlers.settings.DATABASE))", "def __my_create_engine(self, config):\n return {\n 'mysql': lambda c: create_engine(\n \"mysql://\" + c[\"user\"] + \":\" + c[\"password\"] +\n \"@\" + c[\"host\"] + \"/\" + c[\"database\"],\n encoding=\"utf-8\",\n isolation_level=\"READ UNCOMMITTED\")\n }[config[\"type\"]](config)", "def db_connect():\n return create_engine(URL(**DATABASE))", "def init_database(cls):\n conn = config.db_connection_string(Settings)\n cls.Engine = create_engine(conn, echo=Settings.get('DEBUG'))\n cls.Session = sessionmaker(bind=cls.Engine)\n return cls", "def db_connect():\n return create_engine(URL(**settings.DATABASE))", "def db_connect():\n return create_engine(URL(**settings.DATABASE))" ]
[ "0.77028465", "0.75408655", "0.740179", "0.73909706", "0.71847886", "0.7134698", "0.7087348", "0.70744014", "0.7018489", "0.6846384", "0.679343", "0.6783817", "0.676985", "0.670791", "0.670791", "0.670791", "0.670791", "0.66787875", "0.66645664", "0.6635707", "0.6618998", "0.6602763", "0.65987355", "0.65959615", "0.651421", "0.6512671", "0.649563", "0.6485515", "0.647456", "0.647456" ]
0.7968073
0
Creates and returns a connection to a Microsoft SQL Server database.
def create_mssql_connection( dbname, prod=True, driver="{ODBC Driver 17 for SQL Server}", driver_type="pyodbc" ): db_config = toolbox.open_system_config(prod=prod, config_type="DB_CONFIG")[dbname] if driver_type == "pyodbc": connection = pyodbc.connect( driver=driver, server=db_config["server"], database=db_config["database"], uid=db_config["username"], pwd=db_config["password"], ) else: raise ValueError("Invalid driver type: {}".format(driver_type)) return connection
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createConnection(self):\n comp_name = os.environ['COMPUTERNAME']\n conn = py.connect('Driver=ODBC Driver 11 for SQL Server;SERVER=' +\n comp_name + '\\HAZUSPLUSSRVR; UID=SA;PWD=Gohazusplus_02')\n self.conn = conn\n return conn", "def create_connection():\r\n try:\r\n conn = sq.connect(DBClass.db_name)\r\n except sq.Error as e:\r\n raise e\r\n \r\n return conn", "def connect(self):\n\n self.logger.debug(\"creating DB connection\")\n conn = sql.connect(**self.connection_arguments)\n self.logger.debug(\"DB connection ready: %r\", conn.get_host_info())\n return conn", "def create_connection():\r\n # Establishing SQL Connection \r\n con = pyodbc.connect('DSN=ZipAnalyticsADW;UID=zipcode_analytics_app;PWD=DECZr91@cF')\r\n return con", "def create_connection(connection_name: str, **kwargs) -> SnowflakeConnection:\n ret = get_db_parameters(connection_name)\n ret.update(kwargs)\n connection = snowflake.connector.connect(**ret)\n return connection", "def connect(username=None, password=None):\n\n# server = \"cfcsql17.gs.umt.edu\"\n server = \"fcfc-sql.cfc.umt.edu\"\n database = 'MCOMesonet'\n\n params = urllib.parse.quote_plus('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' + server +\n ';DATABASE=' + database +\n ';UID=' + username +\n ';PWD=' + password)\n\n return sqlalchemy.create_engine(\"mssql+pyodbc:///?odbc_connect=%s\" % params,\n fast_executemany=True)", "def createConnection(self):\r\n conn_string = \"host='{}' dbname='{}' user='{}' password='{}' port={}\".format(\r\n self.host, self.database, self.user, self.password, self.port)\r\n return psycopg2.connect(conn_string)", "def connect_to_db():\n with open(r'omppackage\\\\server_config.cfg', 'r') as f:\n conn_string = f.read()\n return pyodbc.connect(conn_string)", "def getDbConnection(self, **kwargs):\r\n \r\n con = sql.connect(self._filename, **kwargs)\r\n con.row_factory = sql.Row\r\n return con", "def createWriteConnection(self, databaseName):\n engine = create_engine('mssql+pyodbc://hazuspuser:Gohazusplus_02@.\\\\HAZUSPLUSSRVR/'+\n databaseName+'?driver=SQL+Server')\n writeConn = engine.connect()\n self.writeConn = writeConn\n return writeConn", "def connection(config: dict) -> pyodbc.connect:\n try:\n return pyodbc.connect(\"DSN={}\".format(config['dsn']))\n except pyodbc.Error as e:\n raise Exception(e)", "def get_new_connection(self, conn_params):\r\n self.__connection_string = conn_params.get('connection_string', '')\r\n conn = Database.connect(**conn_params)\r\n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = connect(db_file)\n except Error as e:\n print(e)\n\n return conn", "def _GetNewConnection(self):\n l_logger.debug(\"Creating a db connection\")\n return mdb.connect(host=self.host,\n user=self.user, \n password=self.passwd, \n database=self.db_name,\n port=self.port\n )", "def create_connection(conn_string):\n try:\n conn = psycopg2.connect(conn_string, sslmode='require')\n logging.debug(\"Db connection established.\")\n return conn\n except DatabaseError as e:\n logging.error(\"psycopg2 error: {}\", e)\n return None", "def __create_connection(db_file: Path) -> sqlite3.Connection:\n\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except sqlite3.Error as e:\n logging.error(e)\n logging.info('Connection could be created. Return sqlite3.Connection object.')\n return conn", "def _CreateConnection(db_file):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n except Error as e:\r\n print(e)\r\n\r\n return conn", "def get_db_conn(server, database, version='sde.DEFAULT'):\n scratch_work = arcpy.env.scratchFolder\n conn_name = 'temp__{}_{}'.format(server, database)\n conn_path = '{}//{}.sde'.format(scratch_work, conn_name)\n\n with TempOverwrite():\n arcpy.CreateDatabaseConnection_management(\n scratch_work,\n conn_name,\n database_platform='SQL_SERVER',\n instance=server,\n account_authentication='OPERATING_SYSTEM_AUTH',\n database=database,\n version=version\n )\n\n return conn_path", "def create_connection(db_file):\n conn = sqlite3.connect(db_file)\n return conn", "def create_connection(self):\n try:\n conn = sqlite3.connect(self.db_path)\n return conn\n except Error as e:\n print(e)\n raise e", "def connect_db():\n\n # use nullpool because pooling breaks unit tests and we don't need the performance\n return sqlalchemy.create_engine(\n 'postgresql://' +\n app.config[ 'DATABASE_USER' ] + ':' +\n app.config[ 'DATABASE_PASSWORD' ] + '@' +\n app.config[ 'DATABASE_HOST' ] + '/' +\n app.config[ 'DATABASE' ],\n poolclass = sqlalchemy.pool.NullPool\n )", "def getDatabaseConnection(self):\n \n strConn = \"dbname='\" + self.__dbname + \"' user='\" + self.__user + \"' host='\" + self.__host + \"' port='\" + self.__port + \"' password='\" + self.__password + \"'\"\n return strConn", "def create_connection(path='sqlite:///database.db'):\n engine = sql.create_engine(path, encoding='utf8')\n conn = engine.connect()\n metadata = sql.MetaData()\n return conn, engine, metadata", "def get_connection(dsn):\n try:\n db_url = make_url(dsn)\n engine = create_engine(db_url)\n return engine.connect()\n except exc.OperationalError:\n raise RuntimeError(\"Database %s does not exist\" % db_url.database)", "def openConnection():\n connection = nj.GraphDatabase.driver(\n uri=URI, auth=nj.basic_auth(USER, PASSWORD))\n return connection", "def create_connection():\n db_file = \"data/data.db\"\n conn = None\n \n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Exception as e:\n pass", "def connect(db, username=None, password=None, **kwargs):\n global _connection_settings, _db_name, _db_username, _db_password, _db\n _connection_settings = dict(_connection_defaults, **kwargs)\n _db_name = db\n _db_username = username\n _db_password = password\n return _get_db(reconnect=True)", "def create_connection(sqlite_db_file):\n try:\n connection_db = sqlite3.connect(sqlite_db_file)\n return connection_db\n except Exception:\n pass", "def get_connection(self):\n\n\t\treturn dbapi.connect(credentials.SERVER,\\\n\t\t\t\t\t\t\t credentials.PORT,\\\n\t\t\t\t\t\t\t credentials.USER,\\\n\t\t\t\t\t\t\t credentials.PASSWORD)", "def create_connection():\n try:\n return sqlite3.connect(database=os.environ[\"DATABASE_NAME\"])\n except Exception as e:\n print(e)\n return None" ]
[ "0.69744205", "0.6950537", "0.67107075", "0.64784396", "0.6422242", "0.6395865", "0.638747", "0.638506", "0.6319929", "0.6319132", "0.6210808", "0.6189856", "0.6167106", "0.61432165", "0.6121374", "0.6109558", "0.61014706", "0.60880446", "0.60853744", "0.60759133", "0.6063821", "0.6053781", "0.6031984", "0.602608", "0.60231733", "0.6010567", "0.60067034", "0.6006666", "0.6002516", "0.5995992" ]
0.72884434
0
Fix the dates for the CEMS data Three date/datetime changes (not all implemented) Make op_date a DATE type Make an appropriate INTERVAL type (not implemented) Add a UTC timestamp (not implemented)
def fix_up_dates(df): # Convert to interval: # df = convert_time_to_interval(df) # Convert op_date and op_hour from string and integer to datetime: # Note that doing this conversion, rather than reading the CSV with # `parse_dates=True` is >10x faster. # Make an operating timestamp df["operating_datetime"] = ( pd.to_datetime(df["op_date"], format=r"%m-%d-%Y", exact=True, cache=True) + pd.to_timedelta(df["op_hour"], unit="h") ) del df["op_hour"], df["op_date"] # Add UTC timestamp ... not done. return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def datefixer(ds):\n\n\n\t# ========== create the new dates ==========\n\tyear = ds.Year\n\n\t# +++++ set up the list of dates +++++\n\tdates = OrderedDict()\n\ttm = [dt.datetime(int(year) , int(np.floor(tm)), int(tm%1*30+1)) for tm in ds.time]\n\tdates[\"time\"] = pd.to_datetime(tm)\n\n\tdates[\"calendar\"] = 'standard'\n\tdates[\"units\"] = 'days since 1900-01-01 00:00'\n\t\n\tdates[\"CFTime\"] = date2num(\n\t\ttm, calendar=dates[\"calendar\"], units=dates[\"units\"])\n\n\treturn dates", "def modify_datetime_train(df):\n\n df['pickup_hour'] = pd.to_datetime(df['pickup_datetime']).dt.hour\n\n df['dropoff_hour'] = pd.to_datetime(df['dropoff_datetime']).dt.hour\n\n df['pickup_minute'] = pd.to_datetime(df['pickup_datetime']).dt.minute\n\n df['dropoff_minute'] = pd.to_datetime(df['dropoff_datetime']).dt.minute\n\n df['pickup_hour_sin'], df['pickup_hour_cos'] = convert_time_sin_cos(df, 'pickup_hour')\n\n df['dropoff_hour_sin'], df['dropoff_hour_cos'] = convert_time_sin_cos(df, 'dropoff_hour')\n\n #split datetime between dates and time\n #using normalize even though it gives us 0:00 time, but the resulting column is a datetime object,\n #which allows us to further process for day of week\n df['pickup_date'] = pd.to_datetime(df['pickup_datetime']).dt.date\n\n df['dropoff_date'] = pd.to_datetime(df['dropoff_datetime']).dt.date\n\n #create day of the week for both pickup date and dropoff dates\n df['pickup_day'] = pd.to_datetime(df['pickup_datetime']).dt.weekday\n\n df['dropoff_day'] = pd.to_datetime(df['dropoff_datetime']).dt.weekday\n\n #get week of year to capture effects of holidays\n df['pickup_weekofyear'] = pd.to_datetime(df['pickup_datetime']).dt.weekofyear\n\n df[\"month\"] = pd.to_datetime(df['pickup_datetime']).dt.month\n\n df[\"year\"] = pd.to_datetime(df['pickup_datetime']).dt.year\n #one hot encode day of the week for both pickup and dropoff\n df = pd.get_dummies(df, columns=['pickup_day', 'dropoff_day'])\n\n return df", "def fix_dates(self, row):\r\n for field in self.date_fields:\r\n if field in row:\r\n if not type(row[field]) is datetime:\r\n try:\r\n row[field] = datetime.fromtimestamp(float(row[field]))\r\n except Exception as e:\r\n row[field] = None", "def format_data(self, raw_data):\n opz = raw_data.copy()\n opz['datetime'] = pd.to_datetime(opz['Datum-tijd'], format='%Y-%m-%dT%H:%M:%SZ')\n opz.drop(['Datum-tijd'],axis=1, inplace=True)\n opz['dag']=opz['datetime'].dt.day\n opz['tijd'] = opz['datetime'].dt.time\n #voeg open/dicht data toe en bepaal momenten waarop dit wisselt\n opz['Opzetstuk Noord (°)'] = opz['Opzetstuk Noord (°)'].str.replace(',', '.').astype(float)\n opz['Opzetstuk Zuid (°)'] = opz['Opzetstuk Zuid (°)'].str.replace(',', '.').astype(float)\n opz['Opzetstuk Noord (°)'].fillna(opz['Opzetstuk Zuid (°)'], inplace=True)\n opz['Opzetstuk Zuid (°)'].fillna(opz['Opzetstuk Noord (°)'], inplace=True)\n return opz", "def add_time_delta(time_offset_value, date_time, dataset): \n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n \n \n '''\n if 'era' not in dataset:\n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n else:\n date_time = np.array( [ datetime.strptime(str(int(i)), '%Y%m%d%H') for i in date_time ] )# convert to datetime object \n ''' \n \n #else:\n # print('check if time is wrong !!!! (should never happen)')\n # sys.exit() \n #unique_dt = [i for i in [ time_offset_value + j for j in delta ] ] \n #unique_dt = [ i +0 ]\n date_time_delta = [ i.replace(minute=0, second=0) for i in date_time_delta ] \n \n return date_time_delta", "def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n \n all_uniques = [] # storing a list with all the unique date_tmes \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n def add_time_delta(time_offset_value, date_time, dataset):\n \"\"\" Converting to proper date_time adding the time_delta. \n Removes minutes rounding to closest integer hour. \"\"\" \n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n \n \n '''\n if 'era' not in dataset:\n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n else:\n date_time = np.array( [ datetime.strptime(str(int(i)), '%Y%m%d%H') for i in date_time ] )# convert to datetime object \n ''' \n \n #else:\n # print('check if time is wrong !!!! (should never happen)')\n # sys.exit() \n #unique_dt = [i for i in [ time_offset_value + j for j in delta ] ] \n #unique_dt = [ i +0 ]\n date_time_delta = [ i.replace(minute=0, second=0) for i in date_time_delta ] \n \n return date_time_delta \n\n\n for k,v in self.datasets.items() : \n self.unique_dates[k] = {}\n \n self.unique_dates[k]['indices'] = {} \n #self.unique_dates[k]['indices_low'] = {} \n #self.unique_dates[k]['index_up'] = {} \n \n \"\"\" recordtimestamp from the input file \"\"\"\n \n \"\"\" Convert to proper date_time using the add_time_delta funtion \"\"\"\n logging.debug(' Calculating the time_delta for : %s', k )\n \n File = nc.Dataset(self.datasets[k]) \n unique = File.variables['recordtimestamp']\n \n self.data[k]['recordtimestamp'] = File.variables['recordtimestamp'][:].data\n self.data[k]['recordindex'] = File.variables['recordindex'][:].data\n \n time_offset = File.groups['observations_table']['date_time'].units\n time_offset_value = time_offset.split('since ') [1] \n time_offset_value = datetime.strptime(time_offset_value, '%Y-%m-%d %H:%M:%S')\n \n #unique = self.data[k]['recordtimestamp']\n \n unique_dt = add_time_delta (time_offset_value, unique, k ) \n \n all_uniques += unique_dt # adding to the total unique date_times \n \n \"\"\" Extracting the recordindex low and up from the input file \"\"\"\n indices = self.data[k]['recordindex']\n \n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n \n try: \n which_k_in_dt[dt].append(k)\n except:\n which_k_in_dt[dt] = []\n which_k_in_dt[dt].append(k) \n \n self.unique_dates[k]['indices'][dt] = {}\n self.unique_dates[k]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n #index_up = len(indices-1) \n index_up = len(indices)-1 \n \n self.unique_dates[k]['indices'][dt]['up'] = index_up\n \n #self.unique_dates[k]['indices'].append(index) \n #self.unique_dates[k]['indices_up'].append(index_up) \n \n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of all distinct dt values \n logging.debug('make_all_datetime finished ')", "def OPCtimetransformOld(data, to):\n outtimes = []\n \n times = {\n 'ms':[],\n 'SS':[],\n 'MM':[],\n 'HH':[]\n }\n for i in range(0, len(data)):\n item = data[i]\n try: \n times['HH'].append(int(item[0:2]))\n times['MM'].append(int(item[2:4]))\n times['SS'].append(int(item[4:6]))\n times['ms'].append(int(item[7:9]))\n except ValueError:\n # strange value 2319010.00 in 201129 file...\n olditem = item\n newitem = item[:4] + item[4+1:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n try:\n times['HH'].append(int(newitem[0:2]))\n times['MM'].append(int(newitem[2:4]))\n times['SS'].append(int(newitem[4:6]))\n times['ms'].append(int(newitem[7:9]))\n except ValueError:\n print(newitem)\n\n # OPC times go up to 60 minutes. This is corrected by moving one minute\n times['MM'] = [max(0,x-1) for x in times['MM']]\n times['SS'] = [max(0,x-1) for x in times['SS']]\n\n for i in range(0, len(data)):\n md = dt.datetime(1900,1,1,times['HH'][i], times['MM'][i], times['SS'][i]) \n outtimes.append( dt.datetime.strftime(md, to))\n\n return outtimes", "def __preproc_covid(self):\n\n # processar datas\n \n self.covidbr['data'] = pd.to_datetime(self.covidbr['data'], format='%d/%m/%Y')\n self.covidbr['data'] = self.covidbr['data'].astype(pd.DatetimeTZDtype(tz='America/Sao_Paulo'))\n\n # 2020-06-02: O Ministério da Saúde cagou os dados de população na planilha divulgada diariamente.\n # devemos consertá-lo\n # regex: ^((?:\\d{1,3}\\.)*\\d*)\n\n # preencher populacao de areas dos estados que não estão em nenhum município\n\n pop_estados = self.covidbr[(self.covidbr.estado.notnull()) & (self.covidbr.codmun.isnull())].\\\n groupby('coduf').populacaoTCU2019.first()\n df = self.covidbr.merge(right=pop_estados, on='coduf', how='left')\n populacaoTCU2019 = df.populacaoTCU2019_x.where(df.populacaoTCU2019_x.notnull(), other=df.populacaoTCU2019_y)\n\n # filtrar dados espúrios de população\n\n populacaoTCU2019 = populacaoTCU2019.str.extract(r'^((?:\\d{1,3}\\.)*\\d*)', expand=False).\\\n str.replace('.','').astype(float)\n\n # converter tipos\n\n df['populacaoTCU2019'] = populacaoTCU2019\n df.drop(columns=[ 'populacaoTCU2019_' + mergedir for mergedir in ['x', 'y'] ], inplace=True)\n self.covidbr = df\n\n self.covidbr = self.covidbr.astype(\n { converter: 'Int64' for converter in ['coduf', 'codmun', 'codRegiaoSaude', 'populacaoTCU2019',\n 'Recuperadosnovos', 'emAcompanhamentoNovos'] }\n )", "def define_secdate(self):\r\n \r\n # Since 2017\r\n self.start_date = datetime.datetime(2017,1,1) + (datetime.datetime(2017,12,31) - datetime.datetime(2017,1,1))/2 \r\n self.end_date = datetime.datetime(2050,1,1)\r\n self.ktime = (self.end_date - self.start_date).days + 1\r\n self.date = np.zeros(self.ktime,dtype=datetime.datetime)\r\n self.t = np.zeros(self.ktime)\r\n self.dt = 1/365.25\r\n \r\n for k in range(0,self.ktime):\r\n \r\n self.date[k] = self.start_date + datetime.timedelta(days=self.t[k]*365.25)\r\n\r\n if k < self.ktime-1:\r\n \r\n self.t[k+1] = self.t[k] + self.dt\r\n \r\n # Since 1990\r\n self.start_date_hist = datetime.datetime(1990,1,1) + (datetime.datetime(1990,12,31) - datetime.datetime(1990,1,1))/2 \r\n self.ktime_1990_2050 = (self.end_date - self.start_date_hist).days + 1\r\n self.date_1990_2050 = np.zeros(self.ktime_1990_2050,dtype=datetime.datetime)\r\n self.t_1990_2050 = np.zeros(self.ktime_1990_2050)\r\n \r\n for k in range(0,self.ktime_1990_2050):\r\n \r\n self.date_1990_2050[k] = self.start_date_hist + datetime.timedelta(days=self.t_1990_2050[k]*365.25)\r\n \r\n if (self.date_1990_2050[k].year == self.start_date.year and self.date_1990_2050[k].month == self.start_date.month and self.date_1990_2050[k].day == self.start_date.day):\r\n \r\n self.ktime_proj_crossing = k\r\n \r\n \r\n if k < self.ktime-1:\r\n \r\n self.t_1990_2050[k+1] = self.t_1990_2050[k] + self.dt \r\n \r\n return", "def extract_dates(self,dates, tol= 0.05 , in_place=False, verbose=True):\n###################################################################\n\n # import \n import inspect\n import numpy as np\n import pyacs.gts\n\n # check data is not None\n from pyacs.gts.lib.errors import GtsInputDataNone\n \n try:\n if self.data is None:\n # raise exception\n raise GtsInputDataNone(inspect.stack()[0][3],__name__,self)\n except GtsInputDataNone as error:\n # print PYACS WARNING\n print( error )\n return( self )\n\n # working gts\n new_gts = self.copy()\n \n # case .data_xyz is None\n \n if new_gts.data_xyz is None:\n new_gts.neu2xyz(corr=True)\n\n else:\n # check data/data_xyz consistency\n try:\n if not new_gts.cdata(data=True):\n # raise exception\n from pyacs.gts.lib.errors import GtsCDataError\n raise GtsCDataError( inspect.stack()[0][3],__name__,self )\n except GtsCDataError as error:\n print( error )\n return( self )\n \n \n new_data=None\n \n # extract dates\n \n index = np.array( pyacs.gts.Gts.get_index_from_dates(dates, self.data, tol=tol) )\n \n if verbose:\n print('-- Extracting ',index.shape[0],' entries from Gts or code: ',self.code)\n \n if index.shape[0] > 0:\n new_data_xyz= self.data_xyz[index,:]\n new_sigma = self.data[index,4:]\n else:\n new_data=None\n if verbose:\n print(\"-- time series \",self.code,\" does not have dates at the requested dates \")\n\n \n # handles outliers\n\n if new_data is not None: \n ldate_outliers=self.data[:,0][self.outliers]\n lupdated_outliers=pyacs.gts.Gts.get_index_from_dates(ldate_outliers, new_data, tol=tol)\n else:\n lupdated_outliers = []\n\n if verbose:\n print('-- Transmitting ',len(lupdated_outliers),' outliers to the extracted Gts ')\n \n # case observations\n \n new_gts.data_xyz = new_data_xyz\n \n # handle outliers\n\n ldate_outliers=self.data[:,0][self.outliers]\n lupdated_outliers=pyacs.gts.Gts.get_index_from_dates(ldate_outliers, new_data_xyz, tol=0.05)\n \n # handles offsets_date\n \n upd_offsets=[]\n for offset_date in self.offsets_dates:\n if offset_date>=new_data_xyz[0,0] and offset_date<=new_data_xyz[-1,0]:\n upd_offsets.append(offset_date)\n \n # handles X0,Y0,Z0\n \n new_gts.X0 = new_data_xyz[0,1]\n new_gts.Y0 = new_data_xyz[0,2]\n new_gts.Z0 = new_data_xyz[0,3]\n \n # re-generate NEU time series\n new_gts.xyz2neu(corr=False)\n\n # re-populate the uncertainties columns\n new_gts.data[:,4:] = new_sigma\n \n # offsets & outliers\n \n new_gts.offsets_dates=upd_offsets\n new_gts.outliers=lupdated_outliers\n \n if in_place:\n self = new_gts\n return(self)\n else:\n return(new_gts)", "def ru_date_time_table_set(host_id, date_time_fields, date_time_param, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n result = \"\"\n param = []\n form_name = ['Year', 'Month', 'Day', 'Hour', 'Minute', 'Second']\n dictarr = []\n resultarray = {}\n err1 = [0, 0, 0, 0, 0, 0]\n param.append('year.1')\n param.append('month.1')\n param.append('day.1')\n param.append('hour.1')\n param.append('min.1')\n param.append('sec.1')\n odu16_date_time_table = []\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n odu16_date_time_table = sqlalche_obj.session.query(SetOdu16RUDateTimeTable).filter(\n SetOdu16RUDateTimeTable.config_profile_id == device_param_list[0][4]).all()\n for i in range(len(date_time_fields)):\n oidname = oid_name[date_time_fields[i]]\n oidtype = oid_type[date_time_fields[i]]\n oidvalue = date_time_param[i]\n result += snmp_set(\n device_param_list[0][0], device_param_list[0][\n 1], device_param_list[0][2],\n device_param_list[0][3], oidname, oidtype, oidvalue)\n err = error_odu16(result, param, err1)\n try:\n el = EventLog()\n if 1 in err1:\n el.log_event(\n \"Values Updated in UBR RU Date Time Form\", \"%s\" % (user_name))\n for j in range(0, len(date_time_fields)):\n dict = {}\n dict[\"name\"] = form_name[j]\n dict[\"value\"] = date_time_param[j]\n dict[\"textbox\"] = date_time_fields[j]\n dict[\"status\"] = err1[j]\n dictarr.append(dict)\n if err1[0] == 1:\n odu16_date_time_table[0].year = date_time_param[0]\n if err1[1] == 1:\n odu16_date_time_table[0].month = date_time_param[1]\n if err1[2] == 1:\n odu16_date_time_table[0].day = date_time_param[2]\n if err1[3] == 1:\n odu16_date_time_table[0].hour = date_time_param[3]\n if err1[4] == 1:\n odu16_date_time_table[0].min = date_time_param[4]\n if err1[5] == 1:\n odu16_date_time_table[0].sec = date_time_param[5]\n sqlalche_obj.session.commit()\n sqlalche_obj.sql_alchemy_db_connection_close()\n if err != '':\n raise Set_exception\n except Set_exception as e:\n resultarray[\"result\"] = dictarr\n resultarray[\"tableName\"] = 'SetOdu16RUDateTimeTable'\n resultarray[\"formAction\"] = 'RU_Date_Time.py'\n sqlalche_obj.sql_alchemy_db_connection_close()\n return str(resultarray)", "def ec_data_processor_precip(path, x='TIMESTAMP_END', y='LE', daily=True):\n\n\n # Get the data from the path and turn the path into a data frame\n # ec_dataset = pd.read_csv(path, header=2)\n\n ec_dataset = pd.read_csv(path, header=2, engine='python')\n\n # print ec_dataset.head()\n print ec_dataset['LE'].head()\n print ec_dataset[ec_dataset[y] != -9999].head()\n # === get rid of no data values in any category of the energy balance ===\n precip_dataset = ec_dataset[ec_dataset['P'] != -9999]\n ec_dataset = ec_dataset[ec_dataset[y] != -9999]\n ec_dataset = ec_dataset[ec_dataset['NETRAD'] != -9999]\n ec_dataset = ec_dataset[ec_dataset['H'] != -9999]\n ec_dataset = ec_dataset[ec_dataset['LE'] != -9999]\n # # You probably won't need these because Marcy Doesn't think they are valid for her towers\n # ec_dataset = ec_dataset[ec_dataset['SH'] != -9999]\n # ec_dataset = ec_dataset[ec_dataset['SLE'] != -9999]\n\n if x.startswith(\"TIMESTAMP\"):\n a = ec_dataset[x].apply(lambda b: dt.strptime(str(b), '%Y%m%d%H%M'))\n aa = precip_dataset[x].apply(lambda d: dt.strptime(str(d), '%Y%m%d%H%M'))\n\n # # TODO - if converting PRISM to MTN time.\n # # Convert to PRISM time (Mtn Standard + 5 hours) PRISM midnight is 12:00 UTC - 7 hours for mountain. Net +5 hrs\n # a = [i + timedelta(hours=19) for i in a]\n # aa = [i + timedelta(hours=19) for i in aa]\n\n\n else:\n a = ec_dataset[x]\n\n # ===== Time Series Processing =====\n\n timeseries = a\n p_timeseries = aa\n # print 'timeseries\\n', timeseries\n Rn = ec_dataset['NETRAD'].values\n H = ec_dataset['H'].values\n LE = ec_dataset['LE'].values\n P = precip_dataset['P']\n print 'P \\n', P\n # indexed_datetimes = pd.DataFrame(pd.DatetimeIndex(timeseries))\n\n # # testing\n # plt.plot(timeseries, P, color='black')\n # plt.show()\n\n # recreate a dataframe of the variables you want to time average on a monthly timestep\n halfhour_data = pd.DataFrame({'timeseries': timeseries, 'Rn': Rn, 'LE': LE, 'H': H}) # took out precip. no good vals? 'P': P\n\n halfhour_precip = pd.DataFrame({'timeseries': p_timeseries, 'P': P})\n # set the timeseries column to the index so groupby function can group by year and month of the index.\n halfhour_data = halfhour_data.set_index(pd.DatetimeIndex(halfhour_data['timeseries']))\n halfhour_precip = halfhour_precip.set_index(pd.DatetimeIndex(halfhour_precip['timeseries']))\n # convert latent heat to mmH2O by dividing by latent heat of vaporization.\n halfhour_data['mmh20'] = halfhour_data['LE'] * 7.962e-4\n\n if daily:\n\n daily_cum_data = halfhour_data.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\n daily_cum_precip = halfhour_precip.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\n\n # get each day in the timeseries. there are duplicates from the groupby function, so use set() to get rid of\n # duplicates\n daily_cum_time = daily_time_parse(timeseries)\n daily_cum_precip_time = daily_time_parse(p_timeseries)\n\n # # testing\n # daily_cum_data.to_csv('/Users/dcadol/Desktop/daily_cumulative_df.csv')\n\n # format daily_cum_data to have datetimes\n daily_cum_data['date'] = daily_cum_time\n daily_cum_precip['date'] = daily_cum_precip_time\n\n return daily_cum_data, daily_cum_precip", "def modify_datetime_test(df):\n\n df['pickup_hour'] = pd.to_datetime(df['pickup_datetime']).dt.hour\n df['pickup_minute'] = pd.to_datetime(df['pickup_datetime']).dt.minute\n df['pickup_hour_sin'], df['pickup_hour_cos'] = convert_time_sin_cos(df, 'pickup_hour')\n df['pickup_date'] = pd.to_datetime(df['pickup_datetime']).dt.date\n df['pickup_day'] = pd.to_datetime(df['pickup_datetime']).dt.weekday\n df['pickup_day'] = pd.to_datetime(df['pickup_datetime']).dt.weekday\n df['pickup_weekofyear'] = pd.to_datetime(df['pickup_datetime']).dt.weekofyear\n df[\"month\"] = pd.to_datetime(df['pickup_datetime']).dt.month\n df[\"year\"] = pd.to_datetime(df['pickup_datetime']).dt.year\n return df", "def test_fill_data_with_one_date(self):\n # date = pd.to_datetime('2015-06-30')\n date = pd.to_datetime('2011-05-09')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n\n # df_date = self.full_iv.df_all.query('date == %r' % date)\n # df_date = df_date[['date', 'dte', 'mark', 'strike', 'impl_vol']]\n # print df_date.sort_values(['dte', 'strike']).to_string(line_width=1000)\n\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n\n self.assertTrue(len(df_iv))", "def OPCtimetransform(data, to):\n \n remove_times = []\n outtimes = []\n times = {'ms':[],'SS':[],'MM':[],'HH':[]}\n\n for i in range(0, len(data)):\n times['HH'] = 0\n times['MM'] = 0\n times['SS'] = 0\n times['ms'] = 0\n\n item = data[i]\n \n try:\n if len(item.split('.')[1]) < 2:\n item += '0'\n except IndexError:\n item += '.00'\n if len(item) < 9:\n item = item.zfill(9)\n if int(item[:2]) > 23:\n item = '0' + item\n \n # remove items with extra zero (2319010.00 to 231910)\n if len(item) > 9:\n olditem = item\n newitem = item[:4] + item[5:]\n print( ('Repairing strange value %s into %s')%(olditem, newitem) )\n item = newitem\n else:\n pass\n try:\n md = dt.datetime.strptime(item, \"%H%M%S.%f\")\n \n # round off items which exceed 59 minutes or 59 seconds \n # (i.e. 146001 to 150001.)\n except ValueError:\n \n try:\n times['HH'] = int(item[0:2])\n times['MM'] = int(item[2:4])\n times['SS'] = int(item[4:6])\n times['ms'] = int(item[7:9])\n except ValueError:\n print(i, item)\n\n if times['SS'] > 59:\n times['MM'] += 1\n times['SS'] = 0\n if times['MM'] > 59:\n times['HH'] += 1\n times['MM'] = 0\n # discard items which exceed 23 hours\n if times['HH'] > 23:\n times['HH'] = 23\n print( ('resetting value %s')%(item) )\n \n\n md = dt.datetime(1900,1,1,times['HH'], times['MM'], times['SS']) \n\n \n outtimes.append( dt.datetime.strftime(md, to) )\n\n return outtimes", "def execute_cast_datetime_to_integer(op, data, type, **kwargs):\n return pd.Timestamp(data).value", "def manual_930_adjust(raw: pd.DataFrame):\n # SC offset = UTC <-> Eastern offset\n sc_offsets = (\n raw.index.tz_convert(\"US/Eastern\").to_series().apply(lambda s: s.utcoffset())\n )\n # After Dec 31, 2020, the offset is 0\n sc_offsets[\"2020-12-31 00:00:00+00\":] = timedelta(0)\n # make new data so we don't mess up other data indexing\n sc_dat = raw[get_columns(\"SC\", raw.columns)].copy()\n sc_idx = pd.DatetimeIndex(sc_dat.index + sc_offsets) # make shifted dates\n sc_dat.index = sc_idx # use shifted dates\n sc_dat = sc_dat[~sc_dat.index.duplicated(keep=\"first\")]\n # exchange old rows with new\n raw = raw.drop(columns=sc_dat.columns)\n raw = pd.concat([raw, sc_dat], axis=\"columns\")\n\n # PJM, CISO, TEPC: shift by one hour\n for ba in [\"PJM\", \"CISO\", \"TEPC\"]:\n cols = get_columns(ba, raw.columns)\n new = raw[cols].shift(1, freq=\"H\")\n raw = raw.drop(columns=cols)\n raw = pd.concat([raw, new], axis=\"columns\")\n\n # Interchange sign. Do before we change interchange time for PJM, because\n # identification of sign shift is based on raw data\n cols = get_int_columns(\n \"PJM\", raw.columns, [\"CPLE\", \"CPLW\", \"DUK\", \"LGEE\", \"MISO\", \"NYIS\", \"TVA\"]\n )\n raw.loc[raw.index < \"2019-10-31 04:00:00+00\", cols] = (\n raw.loc[raw.index < \"2019-10-31 04:00:00+00\", cols] * -1\n )\n\n # Interchange AZPS - SRP is wonky before 6/1/2020 7:00 UTC. Use SRP - AZPS (inverted)\n azps_srp = get_int_columns(\"AZPS\", raw.columns, [\"SRP\"])\n srp_azps = get_int_columns(\"SRP\", raw.columns, [\"AZPS\"])\n replacement = (raw.loc[:, srp_azps] * (-1)).rename(\n columns={srp_azps[0]: azps_srp[0]} # rename so Pandas will do the right thing\n )\n raw.loc[:\"2020-06-01 07:00:00+00\", azps_srp] = replacement[\n :\"2020-06-01 07:00:00+00\"\n ]\n # Update total interchange\n all_cols = [c for c in get_int_columns(\"AZPS\", raw.columns) if \"ALL\" not in c]\n total_col = \"EBA.AZPS-ALL.TI.H\"\n raw.loc[:\"2020-06-01 07:00:00+00\", total_col] = raw.loc[\n :\"2020-06-01 07:00:00+00\", all_cols\n ].sum(axis=1)\n\n # Interchange TEPC is uniformly lagged\n cols = get_int_columns(\"TEPC\", raw.columns)\n new = raw[cols].shift(-7, freq=\"H\")\n raw = raw.drop(columns=cols)\n raw = pd.concat([raw, new], axis=\"columns\")\n\n # Interchange PJM is lagged differently across DST boundary\n is_dst = raw.index.tz_convert(\"US/Eastern\").to_series().apply(\n lambda s: s.utcoffset()\n ) == timedelta(hours=-4)\n pjm_offset = [\n timedelta(hours=-3) if is_d else timedelta(hours=-4) for is_d in is_dst\n ]\n\n # make new data so we don't mess up other data indexing\n pjm_dat = raw[\n get_int_columns(\n \"PJM\",\n raw.columns,\n [\"CPLE\", \"CPLW\", \"DUK\", \"LGEE\", \"MISO\", \"NYIS\", \"TVA\", \"ALL\"],\n )\n ].copy()\n # make shifted dates\n pjm_idx = pd.DatetimeIndex(pjm_dat.index + pd.Series(pjm_offset))\n pjm_dat.index = pjm_idx # use shifted dates\n # delete duplicates\n pjm_dat = pjm_dat[~pjm_dat.index.duplicated(keep=\"first\")]\n # exchange old rows with new\n raw = raw.drop(columns=pjm_dat.columns)\n raw = pd.concat([raw, pjm_dat], axis=\"columns\")\n\n # Shift all -1 hour to make start-of-hour\n return raw.shift(-1, freq=\"H\")", "def batch_process_dt():\r\n caseno_list = []\r\n for acc in acc_col.find({'n_acc_date':{'$exists':False}}):\r\n #for acc in acc_col.find():\r\n ws = acc['acc_date'].split('/')\r\n MM = int(ws[0])\r\n DD = int(ws[1])\r\n YYYY = int(ws[2])\r\n\r\n time_str = acc['time']\r\n colon_pos = time_str.index(\":\")\r\n hour = int( time_str[0:colon_pos] )\r\n minute = int( time_str[colon_pos+1:time_str.index(' ', colon_pos)])\r\n\r\n if acc['time'].count('AM') > 0:\r\n if hour == 12:\r\n hour = 0\r\n else:\r\n if hour != 12:\r\n hour += 12\r\n\r\n seconds = hour * 60*60 + minute*60\r\n #print acc, YYYY, MM, DD, hour, minute\r\n\r\n timestamp = (datetime( YYYY, MM, DD, hour, minute ) - \\\r\n datetime(1970, 1, 1, )).total_seconds()+1\r\n #print acc, timestamp, seconds, date.fromtimestamp(timestamp)\r\n #break\r\n acc_col.update(\r\n {'caseno': acc['caseno']},\r\n {'$set':{\r\n 'n_acc_date': timestamp,\r\n 'n_time': seconds\r\n }})", "def date_cleaner(dataset):\n dataset['document_last_edition'] = dataset['meta_lastEdition']\n dataset = dataset.drop(['meta_lastEdition'], axis=1)\n \n \n \"\"\"\n Get column to correct date format\n \"\"\"\n dataset['document_last_edition'] = dataset['document_last_edition'].apply(lambda x: str(unify_date_format(x))[:10]) \n \n \n \"\"\"\n meta_lastPublication renaming\n \"\"\"\n dataset['document_last_publication'] = dataset['meta_lastPublication']\n dataset = dataset.drop(['meta_lastPublication'], axis=1)\n\n # DROP HOURS/M/S\n dataset['document_last_publication'] = dataset['document_last_publication'].apply(lambda x: str(unify_date_format(x))[:10]) \n \n \n # META CREATED DATE\n dataset['meta_created_date'] = dataset['meta_created_date'].str.replace('_', '-')\n dataset['meta_created_date'] = dataset['meta_created_date'].apply(lambda x: str(unify_date_format(x))[:10])\n dataset['document_created_at'] = dataset['meta_created_date']\n dataset = dataset.drop(['meta_created_date'], axis=1)\n\n # META_REVISED_MODIFIED\n dataset['document_revised_modified'] = dataset['meta_revised_modified']\n dataset = dataset.drop(['meta_revised_modified'], axis=1) \n \n \n date_column_list = ['document_created_at','document_last_edition', 'document_last_publication', 'document_revised_modified']\n \n \"\"\"\n \n THE PLAN IS TO FIRST REPLACE EMPTY SPOTS IN META_CREATED_DATE WITH CREATED_AT\n THEN WE DROP CREATED_AT\n THEN WE REPLACE EMPTY SPOTS IN OTHER COLUMNS WITH document_created_at\n \"\"\" \n \n dataset[date_column_list] = dataset[date_column_list].replace('Not Specified', np.nan)\n dataset[date_column_list] = dataset[date_column_list].replace('Not Specif', np.nan)\n dataset[date_column_list] = dataset[date_column_list].replace('nan', np.nan) \n dataset['document_created_at'].fillna(dataset['created_at'], inplace=True) \n dataset = dataset.drop(['created_at'], axis=1)\n \n dataset['document_last_edition'].fillna(dataset['document_created_at'], inplace=True)\n dataset['document_last_publication'].fillna(dataset['document_created_at'], inplace=True)\n dataset['document_revised_modified'].fillna(dataset['document_created_at'], inplace=True)\n \n \n\n \n \"\"\"\n FIXING NON-EXISTING DATES IN DATASET\n \"\"\"\n \n dataset = dataset.replace(['2020-1-29'], ['2020-01-29'])\n \n \n \n created_at_unique = list(dataset['document_created_at'].unique())\n last_edition_unique = list(dataset['document_last_edition'].unique())\n last_publication_unique = list(dataset['document_last_publication'].unique())\n revised_modified_unique = list(dataset['document_revised_modified'].unique())\n \n \n # IF LIST NEED TO GET UPDATED\n invalid_created_at = is_valid_date(created_at_unique)\n invalid_last_edition_unique = is_valid_date(last_edition_unique)\n invalid_last_publication_unique = is_valid_date(last_publication_unique)\n invalid_revised_modified_unique = is_valid_date(revised_modified_unique) \n invalid_dates = list(set(itertools.chain(invalid_created_at, invalid_last_edition_unique, invalid_last_publication_unique, invalid_revised_modified_unique)))\n \n \n \n \n # Non-existing dates from the list\n dataset = dataset.replace(['2019-04-31', '2016-11-31', '2019-09-31', '2015-02-31', '2017-04-31', '2015-11-31', '2015-09-31', '2017-02-29', '2018-09-31', '2017-06-31', '2018-04-31', '2015-04-31', '2018-11-31', '2017-09-31', '2015-02-29', '2019-02-29', '2019-06-31', '2018-02-29', '2016-02-30', '2016-06-31', '2016-09-31', '2018-06-31', '2019-18-03', '2020-02-31', '9999-12-31'], \n ['2019-04-30', '2016-11-30', '2019-09-30', '2015-02-28', '2017-04-30', '2015-11-30', '2015-09-30', '2017-02-28', '2018-09-30', '2017-06-30', '2018-04-30', '2015-04-30', '2018-11-30', '2017-09-30', '2015-02-28', '2019-02-28', '2019-06-30', '2018-02-28', '2016-02-28', '2016-06-30', '2016-09-30', '2018-06-30', '2019-03-18', '2020-02-28', '1999-12-31'])\n\n\n \n \n\n\n return dataset", "def main():\n date_time_conversion('2018-12-30T09:37:56.000001Z', '2020-07-12T07:56:43.000001Z', 0, 0, 0, 0)", "def to_datetime(data, keep_dates=False):\n # Date series\n dates_listen = pd.to_datetime(data['ts_listen'], unit='s')\n dates_media = pd.to_datetime(data['release_date'], format=\"%Y%m%d\")\n\n # When the listen dates are before the release dates\n idx_wrong_dates = np.where(dates_listen < dates_media)[0]\n for idx in idx_wrong_dates:\n dates_listen[idx] = dates_listen[idx].replace(\n day=dates_media[idx].day, \n month=dates_media[idx].month, \n year=dates_media[idx].year)\n\n # Replace the year of the date before the creation of deezer...\n tmp = dates_listen.dt.year < 2016\n dates_listen[tmp] = dates_listen[tmp].apply(lambda dt: dt.replace(year=2016))\n\n # Add the new features and drop the previous ones\n data.drop(['ts_listen', 'release_date'], axis=1, inplace=True)\n data['day_listen'] = dates_listen.dt.weekday\n data['hour_listen'] = dates_listen.dt.hour\n data['year_media'] = dates_media.dt.year\n\n if keep_dates:\n data['dt_listen'] = dates_listen\n data['dt_media'] = dates_media", "def f_precios_masivos(p0_fini, p1_ffin, p2_gran, p3_inst, p4_oatk, p5_ginc):\n\n def f_datetime_range_fx(p0_start, p1_end, p2_inc, p3_delta):\n \"\"\"\n Parameters\n ----------\n p0_start\n p1_end\n p2_inc\n p3_delta\n Returns\n -------\n ls_resultado\n Debugging\n ---------\n \"\"\"\n\n ls_result = []\n nxt = p0_start\n\n while nxt <= p1_end:\n ls_result.append(nxt)\n if p3_delta == 'minutes':\n nxt += timedelta(minutes=p2_inc)\n elif p3_delta == 'hours':\n nxt += timedelta(hours=p2_inc)\n elif p3_delta == 'days':\n nxt += timedelta(days=p2_inc)\n\n return ls_result\n\n # inicializar api de OANDA\n\n api = API(access_token=p4_oatk)\n\n gn = {'S30': 30, 'S10': 10, 'S5': 5, 'M1': 60, 'M5': 60 * 5, 'M15': 60 * 15,\n 'M30': 60 * 30, 'H1': 60 * 60, 'H4': 60 * 60 * 4, 'H8': 60 * 60 * 8,\n 'D': 60 * 60 * 24, 'W': 60 * 60 * 24 * 7, 'M': 60 * 60 * 24 * 7 * 4}\n\n # -- para el caso donde con 1 peticion se cubran las 2 fechas\n if int((p1_ffin - p0_fini).total_seconds() / gn[p2_gran]) < 4999:\n\n # Fecha inicial y fecha final\n f1 = p0_fini.strftime('%Y-%m-%dT%H:%M:%S')\n f2 = p1_ffin.strftime('%Y-%m-%dT%H:%M:%S')\n\n # Parametros pra la peticion de precios\n params = {\"granularity\": p2_gran, \"price\": \"M\", \"dailyAlignment\": 16, \"from\": f1,\n \"to\": f2}\n\n # Ejecutar la peticion de precios\n a1_req1 = instruments.InstrumentsCandles(instrument=p3_inst, params=params)\n a1_hist = api.request(a1_req1)\n\n # Para debuging\n # print(f1 + ' y ' + f2)\n lista = list()\n\n # Acomodar las llaves\n for i in range(len(a1_hist['candles']) - 1):\n lista.append({'TimeStamp': a1_hist['candles'][i]['time'],\n 'Open': a1_hist['candles'][i]['mid']['o'],\n 'High': a1_hist['candles'][i]['mid']['h'],\n 'Low': a1_hist['candles'][i]['mid']['l'],\n 'Close': a1_hist['candles'][i]['mid']['c']})\n\n # Acomodar en un data frame\n r_df_final = pd.DataFrame(lista)\n r_df_final = r_df_final[['TimeStamp', 'Open', 'High', 'Low', 'Close']]\n r_df_final['TimeStamp'] = pd.to_datetime(r_df_final['TimeStamp'])\n r_df_final['Open'] = pd.to_numeric(r_df_final['Open'], errors='coerce')\n r_df_final['High'] = pd.to_numeric(r_df_final['High'], errors='coerce')\n r_df_final['Low'] = pd.to_numeric(r_df_final['Low'], errors='coerce')\n r_df_final['Close'] = pd.to_numeric(r_df_final['Close'], errors='coerce')\n\n return r_df_final\n\n # -- para el caso donde se construyen fechas secuenciales\n else:\n\n # hacer series de fechas e iteraciones para pedir todos los precios\n fechas = f_datetime_range_fx(p0_start=p0_fini, p1_end=p1_ffin, p2_inc=p5_ginc,\n p3_delta='minutes')\n\n # Lista para ir guardando los data frames\n lista_df = list()\n\n for n_fecha in range(0, len(fechas) - 1):\n\n # Fecha inicial y fecha final\n f1 = fechas[n_fecha].strftime('%Y-%m-%dT%H:%M:%S')\n f2 = fechas[n_fecha + 1].strftime('%Y-%m-%dT%H:%M:%S')\n\n # Parametros pra la peticion de precios\n params = {\"granularity\": p2_gran, \"price\": \"M\", \"dailyAlignment\": 16, \"from\": f1,\n \"to\": f2}\n\n # Ejecutar la peticion de precios\n a1_req1 = instruments.InstrumentsCandles(instrument=p3_inst, params=params)\n a1_hist = api.request(a1_req1)\n\n # Para debuging\n print(f1 + ' y ' + f2)\n lista = list()\n\n # Acomodar las llaves\n for i in range(len(a1_hist['candles']) - 1):\n lista.append({'TimeStamp': a1_hist['candles'][i]['time'],\n 'Open': a1_hist['candles'][i]['mid']['o'],\n 'High': a1_hist['candles'][i]['mid']['h'],\n 'Low': a1_hist['candles'][i]['mid']['l'],\n 'Close': a1_hist['candles'][i]['mid']['c']})\n\n # Acomodar en un data frame\n pd_hist = pd.DataFrame(lista)\n pd_hist = pd_hist[['TimeStamp', 'Open', 'High', 'Low', 'Close']]\n pd_hist['TimeStamp'] = pd.to_datetime(pd_hist['TimeStamp'])\n\n # Ir guardando resultados en una lista\n lista_df.append(pd_hist)\n\n # Concatenar todas las listas\n r_df_final = pd.concat([lista_df[i] for i in range(0, len(lista_df))])\n\n # resetear index en dataframe resultante porque guarda los indices del dataframe pasado\n r_df_final = r_df_final.reset_index(drop=True)\n r_df_final['Open'] = pd.to_numeric(r_df_final['Open'], errors='coerce')\n r_df_final['High'] = pd.to_numeric(r_df_final['High'], errors='coerce')\n r_df_final['Low'] = pd.to_numeric(r_df_final['Low'], errors='coerce')\n r_df_final['Close'] = pd.to_numeric(r_df_final['Close'], errors='coerce')\n\n return r_df_final", "def update_dates(start_date, end_date, freq):\n if (freq == \"MS\") or (freq == \"M\"):\n try:\n start_date = start_date.split(\"/\")\n end_date = end_date.split(\"/\")\n except AttributeError:\n start_date = [start_date.month, start_date.day, start_date.year]\n end_date = [end_date.month, end_date.day, end_date.year]\n if int(end_date[1]) < 22:\n\n if int(end_date[0]) == 1:\n end_month = 12\n end_year = int(end_date[2]) - 1\n else:\n end_month = int(end_date[0]) - 1\n end_year = end_date[2]\n\n end_date[0] = end_month\n end_date[2] = end_year\n\n start_date = pd.to_datetime(f\"{start_date[0]}/01/{start_date[2]}\")\n\n end_date = pd.to_datetime(\n f\"{end_date[0]}/{calendar.monthrange(int(end_date[2]),int(end_date[0]))[1]}/{end_date[2]}\"\n )\n\n if (freq == \"QS\") or (freq == \"Q\"):\n start_date = (pd.to_datetime(start_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterBegin(\n startingMonth=1\n )\n end_date = (pd.to_datetime(end_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterEnd()\n\n return (start_date, end_date)", "def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')", "def up_date(dte, r_quant, str_unit, bln_post_colon):\n if str_unit == 'w':\n dte += timedelta(weeks=r_quant)\n elif str_unit == 'd':\n dte += timedelta(days=r_quant)\n elif str_unit == 'h':\n dte += timedelta(hours=r_quant)\n elif str_unit == 'm':\n dte += timedelta(minutes=r_quant)\n elif str_unit in ('Y', 'y'):\n if r_quant > 500: # jul 2019 vs jul 17\n r_year = r_quant\n else:\n r_year = datetime.now().year + r_quant\n try:\n dte = datetime.replace(dte, year=int(r_year))\n except ValueError:\n dte = datetime.replace(dte, day=28, month=2,\n year=int(datetime.now().year + r_quant))\n elif str_unit == 'H':\n dte = datetime.replace(dte, hour=int(r_quant), second=0, microsecond=0)\n elif str_unit == 'M':\n dte = datetime.replace(dte, minute=int(r_quant),\n second=0, microsecond=0)\n elif str_unit == 'a':\n if not bln_post_colon:\n dte = datetime.replace(dte, hour=int(r_quant), minute=0,\n second=0, microsecond=0)\n elif str_unit == 'p':\n if bln_post_colon: # adjust by 12 hours if necessary\n if dte.hour < 12:\n dte = datetime.replace(dte, hour=dte.hour+12)\n else:\n p_quant = r_quant\n if p_quant < 12:\n p_quant += 12\n dte = datetime.replace(dte, hour=int(p_quant), minute=0,\n second=0, microsecond=0)\n elif (len(str_unit) >= 3) and (STR_MONTHS.find(str_unit) != -1):\n dte = datetime.replace(dte, month=(STR_MONTHS.index(str_unit) + 3)/3,\n day=int(r_quant), second=0, microsecond=0)\n # refers to this year or next year ? (assume not past)\n dte_today = datetime.today().replace(hour=0, minute=0, \\\n second=0, microsecond=0)\n if dte < dte_today:\n dte = dte.replace(year=(dte_today.year+1))\n return dte", "def main(argv):\n\n inputfile = ''\n outputfile = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\n except getopt.GetoptError:\n print( 'usage: fix_icar_time.py -i <inputfile> -o <outputfile>' )\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print( 'use: fix_icar_time.py -i <inputfile> -o <outputfile>' )\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n\n print( 'Input file is ', inputfile)\n print( 'Output file is ', outputfile)\n\n\n #### Option A: less elegant, but more robust: ####\n #_______ open the icar file: ______\n FIX = xr.open_dataset( inputfile )\n \n\n #_______ create the correct times: ______\n tstring = inputfile[inputfile.find('out_')+4:inputfile.rfind(\".nc\")]\n\n times2 = pd.date_range(tstring[:10], periods=len(FIX.time), freq='H')\n \n FIX['time'] = times2\n \n \n #_______ Write the fixed Dataset to nc file: _________\n \n if outputfile == '':\n out_path = inputfile\n else:\n out_path = outputfile \n \n FIX.to_netcdf( path=out_path, mode='w', encoding={'time': {'dtype': 'i4'}}) \n\n\n\n ########### Option B: only modify units, but this doesnt always work as sometimes it is only the first hour, sometimes more hours. ########\n # #_______ open the icar file: ______\n # FIX = xr.open_dataset( inputfile , decode_times=False) \n \n # #_______ create the correct times: ______\n # units = FIX.time.units\n # tstring = inputfile[inputfile.find('out_')+4:inputfile.rfind(\".nc\")]\n # # create right time based on file name:\n # FIX['time'].attrs['units'] = units[:units.find('since')+6] + tstring[:10]", "def csv_handle_changedate(self,col_name,col_type):\n table = self.csv_dataframe\n if col_type == 'date':\n table[col_name] = pd.to_datetime(table[col_name]).dt.date\n elif col_type == 'datetime':\n table[col_name] = pd.to_datetime(table[col_name]).dt.to_pydatetime()\n elif col_type == 'year':\n table[col_name] = pd.to_datetime(table[col_name].apply(lambda x: str(x)+'/1/1')).dt.date", "def convertData(data):\n for candle in data['candles']:\n candle['date'],candle['time'] = convertToEST(candle['datetime'])\n\n return data", "def updateDateValues(self):\n kwargs = {\"cdb_project_id\": self.project.cdb_project_id}\n cca = Project.MakeChangeControlAttributes()\n kwargs.update(cdb_mdate=sqlapi.SQLdbms_date(cca[u\"cdb_mdate\"]))\n kwargs.update(cdb_mpersno=cca[u\"cdb_mpersno\"])\n\n update_gap_by_view = \"\"\"cdbpcs_taskrel_gaps_v SET gap = new_gap\n WHERE pred_pid = '%(cdb_project_id)s'\n OR succ_pid = '%(cdb_project_id)s'\"\"\" % kwargs\n update_gap_by_select = \"\"\"cdbpcs_taskrel SET gap =\n (SELECT CASE\n WHEN cdbpcs_taskrel.rel_type = 'AA' THEN b.start_time_fcast_offset - a.start_time_fcast_offset\n WHEN cdbpcs_taskrel.rel_type = 'AE' THEN b.end_time_fcast_offset - a.start_time_fcast_offset\n WHEN cdbpcs_taskrel.rel_type = 'EA' THEN b.start_time_fcast_offset - a.end_time_fcast_offset\n WHEN cdbpcs_taskrel.rel_type = 'EE' THEN b.end_time_fcast_offset - a.end_time_fcast_offset\n ELSE 0 END +\n CASE\n WHEN a.milestone = 1 AND b.milestone = 1 AND a.early_position = 0 AND b.early_position = 1 THEN -1\n WHEN a.milestone = 1 AND b.milestone = 0 AND a.early_position = 0 AND cdbpcs_taskrel.rel_type IN ('EA', 'AA') THEN -1\n WHEN a.milestone = 0 AND b.milestone = 1 AND b.early_position = 1 AND cdbpcs_taskrel.rel_type IN ('EA', 'EE') THEN -1\n WHEN a.milestone = 0 AND b.milestone = 0 AND cdbpcs_taskrel.rel_type IN ('EA') THEN -1\n ELSE 0 END +\n CASE\n WHEN a.status = 180 THEN a.days_fcast\n ELSE 0 END\n FROM cdbpcs_task a, cdbpcs_task b\n WHERE cdbpcs_taskrel.cdb_project_id2 = a.cdb_project_id\n AND cdbpcs_taskrel.task_id2 = a.task_id\n AND cdbpcs_taskrel.cdb_project_id = b.cdb_project_id\n AND cdbpcs_taskrel.task_id = b.task_id)\n WHERE cdb_project_id2 = '%(cdb_project_id)s'\n OR cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs\n\n update_gap_stmt = {\n sqlapi.DBMS_SQLITE: update_gap_by_select,\n sqlapi.DBMS_MSSQL: update_gap_by_view,\n sqlapi.DBMS_ORACLE: update_gap_by_view,\n }\n\n updates = [\n \"\"\"cdbpcs_project\n SET start_time_plan = (SELECT CASE\n WHEN MIN(cdbpcs_task.start_time_fcast) < MIN(cdbpcs_task.start_time_plan)\n THEN MIN(cdbpcs_task.start_time_fcast)\n ELSE MIN(cdbpcs_task.start_time_plan)\n END\n FROM cdbpcs_task\n WHERE cdbpcs_task.cdb_project_id = '%(cdb_project_id)s'\n AND cdbpcs_task.parent_task = ''\n ), end_time_plan = (SELECT CASE\n WHEN MAX(cdbpcs_task.end_time_fcast) > MAX(cdbpcs_task.end_time_plan)\n THEN MAX(cdbpcs_task.end_time_fcast)\n ELSE MAX(cdbpcs_task.end_time_plan)\n END\n FROM cdbpcs_task\n WHERE cdbpcs_task.cdb_project_id = '%(cdb_project_id)s'\n AND cdbpcs_task.parent_task = ''\n ), cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs,\n (\"\"\"cdbpcs_project\n SET start_time_fcast = start_time_plan,\n end_time_fcast = end_time_plan,\n days_fcast = days,\n duration_fcast = duration,\n cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs) if self.project.auto_update_time else None,\n \"\"\"cdbpcs_task\n SET total_float = late_finish_offset - end_time_fcast_offset,\n cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs,\n update_gap_stmt[sqlapi.SQLdbms()],\n \"\"\"cdbpcs_task\n SET cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n AND task_id IN (SELECT task_id FROM cdbpcs_taskrel\n WHERE cdb_project_id = '%(cdb_project_id)s'\n AND (violation = 0 AND minimal_gap > gap\n OR violation = 1 AND minimal_gap <= gap)\n UNION\n SELECT task_id2 FROM cdbpcs_taskrel\n WHERE cdb_project_id2 = '%(cdb_project_id)s'\n AND (violation = 0 AND minimal_gap > gap\n OR violation = 1 AND minimal_gap <= gap))\n \"\"\" % kwargs,\n \"\"\"cdbpcs_taskrel\n SET violation = CASE\n WHEN minimal_gap <= gap\n THEN 0\n ELSE 1\n END\n WHERE cdb_project_id = '%(cdb_project_id)s'\n OR cdb_project_id2 = '%(cdb_project_id)s'\n \"\"\" % kwargs,\n ]\n for upd in updates:\n if upd:\n sqlapi.SQLupdate(upd)", "def process(self, inputs):\n df = cudf.read_csv(self.conf['path'])\n # extract the year, month, day\n ymd = df['DTE'].astype('str').str.extract(r'(\\d\\d\\d\\d)(\\d\\d)(\\d\\d)')\n # construct the standard datetime str\n df['DTE'] = ymd[0].str.cat(ymd[1],\n '-').str.cat(ymd[2],\n '-').astype('datetime64[ms]')\n df = df[['DTE', 'OPEN', 'CLOSE', 'HIGH', 'LOW', 'SM_ID', 'VOLUME']]\n df['VOLUME'] /= 1000\n # change the names\n df.columns = ['datetime', 'open', 'close',\n 'high', 'low', \"asset\", 'volume']\n return df" ]
[ "0.6125357", "0.5819586", "0.5773256", "0.57689685", "0.57060605", "0.5502692", "0.55019426", "0.5471219", "0.54283524", "0.54059875", "0.54014325", "0.53956264", "0.53769857", "0.5311041", "0.5306806", "0.5298646", "0.52898556", "0.52759445", "0.5245434", "0.5235692", "0.5184599", "0.51796985", "0.51618207", "0.5158366", "0.5157592", "0.51211655", "0.5119529", "0.5109756", "0.50704736", "0.5057026" ]
0.6865202
0
Test whether every element in the series is either missing or in values This is fiddly because isin() changes behavior if the series is totally NaN (because of type issues)
def _all_na_or_values(series, values): series_excl_na = series[series.notna()] if not len(series_excl_na): out = True elif series_excl_na.isin(values).all(): out = True else: out = False return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nan_value(data):\n return data.isnull().any()", "def is_empty(series):\n return series.isna().all()", "def checkNaN(data):\n if data.isnull().values.any():\n N = data.isnull().sum().sum()\n print(\"There are {} missing values.\".format(N))", "def pd_isnan(val):\n return val is None or val != val", "def has_nan(a):\n return np.any(np.isnan(a))", "def is_in(self, e):\n return e in self.vals", "def is_isin(value):\n return True", "def has_nan(self) -> bool:\n\n return any([x is np.NaN for x in self.values])", "def isnan(self):\n return self.isAny( (lambda x: np.isnan(x)) )", "def checkfornan(chosen_df):\n if not chosen_df.isnull().values.any():\n raise ValueError('NaN in DataFrame')", "def check_missing_values(col):\n return np.sum(np.isnan(col))", "def is_nan(x):\n return (x is np.nan or x != x)", "def all_finite(x):\n return np.isfinite(np.min(x)) and np.isfinite(np.max(x))", "def isin(self, values, ignore_indices=False, **kwargs): # noqa: PR02\n shape_hint = kwargs.pop(\"shape_hint\", None)\n if isinstance(values, type(self)) and ignore_indices:\n # Pandas logic is that it ignores indexing if 'values' is a 1D object\n values = values.to_pandas().squeeze(axis=1)\n if shape_hint == \"column\":\n return SeriesDefault.register(pandas.Series.isin)(self, values, **kwargs)\n else:\n return DataFrameDefault.register(pandas.DataFrame.isin)(\n self, values, **kwargs\n )", "def gdx_isnan(val,gdxf):\n return val in [SPECIAL_VALUES[0], SPECIAL_VALUES[1]]", "def asin_array(values): # pragma: no cover\n result = np.empty_like(values, dtype=nb.float64)\n flat_result = result.flat\n flat_values = values.flat\n for i in range(values.size):\n flat_result[i] = asin(flat_values[i])\n return result", "def isnan(x):\n return False", "def pd_val_equal(val1, val2):\n return pd_isnan(val1) and pd_isnan(val2) or val1 == val2", "def _check_nan(self, vector):\n return np.isnan(vector).sum() > 0", "def isfinite(self):\n return not self.isAny( (lambda x: not np.isfinite(x)) )", "def ISNA(value):\n return isinstance(value, float) and math.isnan(value)", "def is_continuous(series: List) -> bool:\n\n if series.dtype in [\n np.int16,\n np.int32,\n np.int64,\n np.float16,\n np.float32,\n np.float64,\n int,\n float,\n ]:\n if (\n len(series.astype(int).unique()) / len(series) == 1\n or \"id\" == series.name.lower()\n ):\n return False\n\n elif sorted(series.unique()) == [0, 1]:\n return False\n elif len(series.unique()) == 1:\n return False\n\n else:\n return True\n else:\n\n return False", "def handle_missing_values(dataset, missing_values_header, missing_label):\n\n return dataset[dataset[missing_values_header] != missing_label]", "def isin(self, values: Union[list, dict, IColumn]):\n if isinstance(values, list):\n return self._fromdata(\n {\n self.dtype.fields[i]\n .name: ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[i].dtype,\n self._data.child_at(i),\n True,\n )\n .isin(values)\n for i in range(self._data.children_size())\n },\n self._mask,\n )\n if isinstance(values, dict):\n self._check_columns(values.keys())\n return self._fromdata(\n {n: c.isin(values[n]) for n, c in self._field_data.items()}\n )\n if isinstance(values, IDataFrame):\n self._check_columns(values.columns)\n return self._fromdata(\n {n: c.isin(values=list(values[n])) for n, c in self._field_data.items()}\n )\n else:\n raise ValueError(\n f\"isin undefined for values of type {type(self).__name__}.\"\n )", "def isinf(data):\n return _make.isinf(data)", "def handel_missing_values(dataset, missing_values_header, missing_label):\n \n return dataset[dataset[missing_values_header] != missing_label]", "def verify_value_occurence_in_series(value, series):\n \n series_values_occurence = series.value_counts()\n if value in series_values_occurence:\n return series_values_occurence[value]", "def isfinite ( x ) : \n y = float ( x ) \n return ( not math.isinf ( y ) ) and ( not math.isnan ( y ) )", "def edge_case(values):\r\n for val in values:\r\n if val is True:\r\n return False\r\n return True", "def na_complain(X):\n na_values_present = np.isnan(X).sum()\n if na_values_present:\n raise ValueError(\"Na's found in data matrix.\")" ]
[ "0.6702152", "0.65366334", "0.64191544", "0.6405477", "0.6321939", "0.63141245", "0.61905783", "0.6163543", "0.61403567", "0.6065991", "0.60013145", "0.59662825", "0.592186", "0.5896812", "0.5895072", "0.58945686", "0.5826522", "0.5777309", "0.57611376", "0.57524323", "0.57523555", "0.5731586", "0.5722735", "0.5686982", "0.567296", "0.5622523", "0.56002426", "0.55787015", "0.55108297", "0.5491747" ]
0.73976064
0
Drop these calculated rates because they don't provide any information. If you want these, you can just use a view.
def drop_calculated_rates(df): if not _all_na_or_values(df["so2_rate_measure_flg"], {"Calculated"}): raise AssertionError() if not _all_na_or_values(df["co2_rate_measure_flg"], {"Calculated"}): raise AssertionError() del df["so2_rate_measure_flg"], df["so2_rate_lbs_mmbtu"] del df["co2_rate_measure_flg"], df["co2_rate_tons_mmbtu"] return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rates(self):\n return self._rates", "def rates(self):\n raise NotImplementedError(\"Must be implemented by subclass.\")", "def get_zero_rates(self):\r\n self.__bootstrap_zero_coupons__()\r\n self.__get_bond_spot_rates__()\r\n return [self.zero_rates[T] for T in self.get_maturities()]", "def test_get_rate_article_not_found(self):\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.client.get(\n self.view_rates_url + str(2) + \"/\",\n format='json')\n self.assertEqual(\n 0,\n response.data[\"rates\"])\n self.assertEqual(204, status.HTTP_204_NO_CONTENT)", "def cull_uninformative_rates(rates, inform):\n return rates * inform", "def show_calculations(self):\n self.view.set_sum(self.model.get_sum())\n self.view.set_diff(self.model.get_diff())", "def calculateDataRate(self):\n pass", "def get_rates(table_id):\n fields = [\"0\",\"0\",\"0\",\"0\",\"0\",\"0\"]\n for pos, name in enumerate(rates_key_list):\n full_table_id = RATES_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = float(counter_data)\n cntr = RateStats._make(fields)\n return cntr", "def exchange_rate(self):\n res = r.get(self.url + self.current_rate)\n return self.execute(res)", "def _retention_rate(self):\n res = {}\n for ret_line in self.browse(self.ids):\n if ret_line.invoice_id:\n pass\n else:\n res[ret_line.id] = 0.0\n return res", "def test_remove_taxation_strategy_from_rate_plan(self):\n pass", "def return_rates(self):\n return TimeSeriesRDD(None, None, self._jtsrdd.returnRates(), self.ctx)", "def MR_rate_clean(self, mech_rates):\n for rate_tuple in self['MR_rate']:\n \n if rate_tuple not in mech_rates.keys():\n self['MR_rate'].remove(rate_tuple)\n print (\"Removed \" + str(rate_tuple) + \" from MR_rate\")\n \n #check for rate to change in MR params\n for _rtc in self['rate_to_change']:\n rtc_tuple = r_tuple_from_r_name(mech_rates, _rtc)\n \n if rtc_tuple not in self['MR_avoid'] and not self['MR_avoid_preserve']:\n #this blanket hack will remove any special info in MR_avoid\n #flag can be used to make MR_avoid invulnerable\n \n self['MR_avoid'].append(rtc_tuple)\n print (\"Adding \"+str(rtc_tuple)+\" to MR_avoid (now: \"+ str(self['MR_avoid'])+\" )\\n\")\n \n #take the rate to change out of MR use \n if rtc_tuple in self['MR_rate']:\n self['MR_rate'].remove(rtc_tuple)", "def doNotUsePatternSpecificRates(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n TreeLikelihoodBase.setUsePatternSpecificRates(self, False)", "def test_error_no_rate(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"] = []\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def trend_none(self):\n raise NotImplementedError()", "def getTransferListSummaryWithoutPrices(self):\n players = self.getAllPlayerInfoTransferlist()\n\n num_p_sold = 0\n num_p_expired = 0\n num_p_unlisted = 0\n num_p_listed = 0\n sold_p_value = 0\n\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_soldprice = p[5] # is 0 if unlisted\n\n if \"won\" in p_bidstatus:\n num_p_sold += 1\n sold_p_value += p_soldprice\n if \"expired\" in p_bidstatus:\n num_p_expired += 1\n if (p_bidstatus == \"listFUTItem\"):\n num_p_unlisted += 1\n if (p_bidstatus == \"listFUTItem has-auction-data\"):\n num_p_listed += 1\n\n # TODO subtract bought price\n return num_p_sold, num_p_expired, num_p_unlisted, num_p_listed", "def get_current_rate(self):\n pass", "def trend_price_down(self):\n raise NotImplementedError()", "def get_prices(self):\n pass", "def GetOpsRates():\n return GetDataFromCsvFile('ops_rates.csv')", "def preprocess_rates(self):\n # the rates from fixar.io are almost exactly in the required common format\n # as requested ccode is not in the request respond\n # we add it => e.g 1 EUR = 1 EUR => needed for further pandas extrapolation\n self.rates.update({self.in_ccode: float(1)})", "def update_rates(request):\n if request.method == 'GET':\n obj = requests.get(request_address).json()\n curr_inst = Currencies()\n curr_inst.timestamp = obj['timestamp']\n curr_inst.usd = obj['rates']['USD']\n curr_inst.eur = obj['rates']['EUR']\n curr_inst.czk = obj['rates']['CZK']\n curr_inst.pln = obj['rates']['PLN']\n curr_inst.save()\n serializer = CurrenciesSerializer(curr_inst)\n return Response(serializer.data)", "def disp_all_rates(user_email):\r\n check_email = Check_For_User(user_email)\r\n if check_email.user_exists is False:\r\n return jsonify(str(user_email) + \" not found\"), 400\r\n raise LookupError(str(user_email) + \" was not found. Please re-enter\")\r\n heart_rate_list = get_all_rates(user_email)\r\n date_list = get_all_times(user_email)\r\n return_dict = {\r\n \"user\": user_email,\r\n \"all_heart_rates\": heart_rate_list,\r\n \"all_times\": date_list\r\n }\r\n return jsonify(return_dict), 200", "def getActiveCurrencies():", "def rates(self, rates):\n\n self._rates = rates", "def rates(self, rates):\n\n self._rates = rates", "def test_error_on_negative_rate(self):\n self.ocp_data[\"rates\"][0][\"tiered_rates\"][0][\"value\"] = float(round(Decimal(random.random()), 6) * -1)\n\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n if serializer.is_valid(raise_exception=True):\n serializer.save()", "def recalc_for_bird(self, early_bird):\n \n if early_bird.meal_discount and early_bird.meal_expanding:\n rates_ml = self.get_rates()\n if early_bird.discount_type == 1:\n if rates_ml:\n rates = []\n for rate in rates_ml:\n if early_bird.meal_discount > 0:\n rate *= Decimal(str(early_bird.meal_discount)) / 100\n else:\n rate += rate * Decimal(str(early_bird.meal_discount)) / 100\n rates.append(str(rate))\n self.rates = u';'.join(rates)\n\n if early_bird.discount_type == 0:\n if rates_ml:\n rates = []\n for rate in rates_ml:\n rate += Decimal(str(early_bird.meal_discount))\n rates.append(str(rate))\n self.rates = u';'.join(rates)\n return self.rates", "def __get_bond_spot_rates__(self):\r\n for T in self.get_maturities():\r\n instrument=self.instruments[T]\r\n (par,coup,price,freq)=instrument\r\n\r\n if coup!=0:\r\n self.zero_rates[T]=self.__calculate_bond_spot_rate__(T,instrument)" ]
[ "0.61796653", "0.6036099", "0.5962428", "0.57073414", "0.565551", "0.5630784", "0.56072664", "0.5545332", "0.55377626", "0.5521308", "0.5488782", "0.5419411", "0.5387603", "0.5350166", "0.53407055", "0.53198713", "0.5319001", "0.52960646", "0.52811706", "0.5280485", "0.5267656", "0.52611125", "0.52567077", "0.52557635", "0.5239544", "0.52333784", "0.52333784", "0.5226912", "0.52260053", "0.5225245" ]
0.635766
0
Calculates the bins used in the Riemann sum over metallicities
def calculateMetallicityBinEdges(self): if self.binInLogSpace: logMetallicities = np.log10(self.metallicityGrid) b= logMetallicities[:-1] + (logMetallicities[1:] - logMetallicities[:-1])/2. b = 10.**b #the boundaries for integration are not in log space so #convert to "normal" numbers. else: b= (self.metallicityGrid[1:] - self.metallicityGrid[:-1])/2. \ + self.metallicityGrid[:-1] self.metallicityBinEdges = np.zeros(len(b)+2) #the lowest/highest metallicity bin edge are set in options #the calculated b edges are all in between self.metallicityBinEdges[0] = self.metallicityLowerLimit self.metallicityBinEdges[-1] = self.metallicityUpperLimit self.metallicityBinEdges[1:-1] = b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_energy_bins(image, num_bins=4):\r\n h, _ = image.shape\r\n C = np.linspace(0, h//2, num=num_bins+1, dtype=\"int\")\r\n # Create a list of dense square mask\r\n mask_list = [square_mask(image, c) for c in C[1:]]\r\n # Extract a list of hollow mask\r\n square_zones = [~mask_list[k]*mask_list[k+1] for k in range(len(mask_list)-1)]\r\n # add center square and whole image square to the list\r\n square_zones = [mask_list[0]] + square_zones + [mask_list[-1]]\r\n # Apply these masks to the image\r\n image_bins = []\r\n for zone in square_zones:\r\n img = image.copy()\r\n img[~zone] = np.nan\r\n image_bins.append(img)\r\n # Compute the energy of each masked image\r\n energy_bins = [np.nansum(_bin) for _bin in image_bins]\r\n total_energy = energy_bins[-1]\r\n # normalize the energy repartition\r\n energy_bins /= total_energy\r\n return energy_bins[:-1], image_bins[:-1]", "def correlation_bins(shred):\n return 0", "def _get_integrals_fast(bins):\n\n if min(bins.shape) == 1: \n flatsum = np.cumsum(bins.flat[::-1])[::-1]\n sums = np.subtract(*np.meshgrid(flatsum,flatsum))\n assert sums.shape[0] == sums.shape[1]\n return sums\n\n else: \n y_sums = np.cumsum(bins[:,::-1], axis = 1)[:,::-1]\n sums = np.cumsum(y_sums[::-1,:], axis = 0)[::-1,:]\n return sums", "def bincalc(nbin=0.1,bmin=5,bmax=2000):\n\n logbmin=np.log10(bmin)\n logbmax=np.log10(bmax)\n\n logbins=np.arange(logbmin,logbmax,nbin)\n\n bins=10**logbins\n\n #bins=np.linspace(bmin,bmax,60)\n return (bins)", "def get_srr_bins(p_data):\n \n n_data = len(p_data)\n \n n_bins = np.sqrt(n_data)\n \n return int(n_bins)", "def n_particles_bins(DG, bins=[0, 0.5, 3, 10, 100]):\n radii = fid.rss(DG.gas['Coordinates'][()])\n hist, bin_edges = np.histogram(radii, bins)\n\n return hist, bin_edges", "def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1", "def getAbsNormalizationFactor(deltaE_wkspace,min,max):\n global reducer\n van_mass=reducer.get_default_parameter('vanadium-mass') \n \n Integration(InputWorkspace=deltaE_wkspace,OutputWorkspace='van_int',RangeLower=min,RangeUpper=max,IncludePartialBins='1')\n input_ws = mtd[deltaE_wkspace]\n ei_monovan = input_ws.getRun().getLogData(\"Ei\").value\n data_ws=mtd['van_int']\n nhist = data_ws.getNumberHistograms()\n #print nhist\n\n signal1_sum = 0.0\n weight1_sum = 0.0 \n signal2_sum = 0.0\n weight2_sum = 0.0 \n signal3_sum = 0.0\n weight3_sum = 0.0 \n signal4_sum = 0.0\n weight4_sum = 0.0 \n\n \n ic=0;\n izerc=0;\n for i in range(nhist):\n try:\n det = data_ws.getDetector(i)\n except Exception:\n continue\n if det.isMasked():\n continue\n\n signal = data_ws.readY(i)[0]\n error = data_ws.readE(i)[0]\n \n if signal != signal: #ignore NaN\n continue\n if ((error<=0) or (signal<=0)): # ignore Inf (0 in error are probably 0 in sign\n izerc+=1\n continue\n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i -- this what Libisis had\n weight = 1.0/error\n signal1_sum += signal * weight\n weight1_sum += weight \n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i^2\n weight2 = 1.0/(error*error)\n signal2_sum += signal * weight2\n weight2_sum += weight2 \n # Guess which assumes puassonian distribution with Err=Sqrt(signal) and calculates \n # the function: N_avrg = 1/(DetEfficiency_avrg^-1)*sum(n_i*DetEfficiency_i^-1)\n # where the DetEfficiency = WB_signal_i/WB_average WB_signal_i is the White Beam Vanadium \n # signal on i-th detector and the WB_average -- average WB vanadium signal. \n # n_i is the modified signal \n err_sq = error*error\n weight = err_sq/signal\n signal3_sum += err_sq\n weight3_sum += weight\n # Guess which estimatnes value sum(n_i^2/Sigma_i^2)/sum(n_i/Sigma_i^2) TGP suggestion from 12-2012\n signal4_sum += signal*signal/err_sq\n weight4_sum += signal/err_sq\n \n ic += 1 \n #print 'signal value =' ,signal\n #print 'error value =' ,error \n #print 'average ',signal_sum \n #---------------- Loop finished\n \n if( weight1_sum==0.0 or weight2_sum == 0.0 or weight3_sum == 0.0 or weight4_sum == 0.0) :\n print \"WB integral has been calculated incorrectrly, look at van_int workspace and input workspace: \",deltaE_wkspace\n raise IOError(\" divided by 0 weight\")\n \n integral_monovanLibISIS=signal1_sum / weight1_sum\n integral_monovanSigSq =signal2_sum / weight2_sum \n integral_monovanPuason =signal3_sum / weight3_sum \n integral_monovanTGP =signal4_sum / weight4_sum\n #integral_monovan=signal_sum /(wbVan_sum)\n van_multiplier = (float(reducer.van_rmm)/float(van_mass))\n absnorm_factorLibISIS = integral_monovanLibISIS * van_multiplier\n absnorm_factorSigSq = integral_monovanSigSq * van_multiplier \n absnorm_factorPuason = integral_monovanPuason * van_multiplier \n absnorm_factorTGP = integral_monovanTGP * van_multiplier \n #print 'Monovan integral :' ,integral_monovan \n \n if ei_monovan >= 210.0: \n xsection = 421 # vanadium cross-section in mBarn/sR (402 mBarn/Sr) (!!!modified to fit high energy limit?!!!)\n else: # old textbook cross-section for vanadium for ei=20mEv\n xsection = 400 + (ei_monovan/10) \n\n absnorm_factorLibISIS /= xsection\n absnorm_factorSigSq /= xsection \n absnorm_factorPuason /= xsection \n absnorm_factorTGP /= xsection \n \n sample_multiplier = (float(reducer.sample_mass)/float(reducer.sample_rmm))\n absnorm_factorLibISIS= absnorm_factorLibISIS *sample_multiplier\n absnorm_factorSigSq = absnorm_factorSigSq *sample_multiplier\n absnorm_factorPuason = absnorm_factorPuason *sample_multiplier\n absnorm_factorTGP = absnorm_factorTGP *sample_multiplier\n \n if (absnorm_factorLibISIS !=absnorm_factorLibISIS)|(izerc!=0): # It is an error, print diagnostics:\n if (absnorm_factorLibISIS !=absnorm_factorLibISIS):\n print '--------> Absolute normalization factor is NaN <----------------------------------------------'\n else:\n print '--------> Warning, Monovanadium has zero spectra <--------------------------------------------' \n print '--------> Processing workspace: ',deltaE_wkspace\n print '--------> Monovan Integration range : min=',min,' max=',max\n print '--------> Summarized: ',ic,' spectra with total value: ',signal2_sum, 'and total weight: ',weight2_sum\n print '--------> Dropped: ',izerc,' empty spectra'\n print '--------> Van multiplier: ',van_multiplier,' sample multiplier: ',sample_multiplier, 'and xsection: ',xsection \n print '--------> Abs norm factors: LibISIS: ',absnorm_factorLibISIS,' Sigma^2: ',absnorm_factorSigSq\n print '--------> Abs norm factors: Puasonian: ',absnorm_factorPuason, ' TGP: ',absnorm_factorTGP\n print '----------------------------------------------------------------------------------------------' \n else:\n DeleteWorkspace(Workspace=deltaE_wkspace)\n DeleteWorkspace(Workspace=data_ws)\n return (absnorm_factorLibISIS,absnorm_factorSigSq,absnorm_factorPuason,absnorm_factorTGP)", "def eqw_binning(t, n_bins):\n \n t_diff= (np.max(t) - np.min(t))/n_bins\n t_bins= np.hstack([np.array([np.min(t) + t_diff*i for i in range(1, n_bins)]), [np.max(t) + 0.01]])\n t_binning= np.digitize(t, t_bins)\n return t_binning", "def kinetic_energy_bins(v, bins, Mm=1.0):\n speed_squared = v[:, 0] ** 2 + v[:, 1] ** 2\n ke = 0.5 * Mm * speed_squared\n # for each bin bins[kk], sum kinetic energies of particles in bin\n ke_bins = np.zeros(len(bins), dtype=float)\n kk = 0\n for bin in bins:\n ke_bins[kk] = np.sum(ke[bin])\n kk += 1\n return ke_bins", "def block_sum(i, bins, C, n_u):\n s= 0.0\n for j in range(bins[i], bins[i+1]):\n for k in range(bins[i], bins[i+1]):\n s+= C[j][k]*n_u[j]*n_u[k]\n return s", "def bins(self):\n return self._bins", "def bins (self):\n return self._bins", "def bins (self):\n return self._bins", "def count_r_bins(self, rmax, Nr, zmin=None, rmin=0., zmax=None, plotfig=False):\n rArr = np.mgrid[rmin:rmax:Nr*1j]\n if zmin != None:\n if zmax == None: zmax = zmin + 10.\n ind = (self.z >= zmin)*(self.z <= zmax)\n xin = self.x[ind]; yin = self.y[ind]; zin = self.z[ind]\n else:\n xin = self.x.copy();yin = self.y.copy();zin = self.z.copy()\n R = np.sqrt(xin**2+yin**2)\n self.RR = R\n self.rbins = np.zeros(rArr.size-1)\n for ir in xrange(Nr-1):\n r0 = rArr[ir]; r1 = rArr[ir+1]\n print r0, r1\n N = np.where((R>=r0)*(R<r1))[0].size\n self.rbins[ir] = N#/np.pi/(r1**2-r0**2)\n self.rArr = rArr[:-1]\n if plotfig:\n plt.plot(self.rArr, self.rbins, 'o', ms=3)\n plt.show()\n self.area = np.pi*((rArr[1:])**2-(rArr[:-1])**2)\n self.rbins_norm = self.rbins / self.area\n return", "def _calcBins(self, contribs, parValues, fraction, minReq):\n # single set of R for this calculation\n bins = np.zeros(self.binCount)\n binObs = np.zeros(self.binCount)\n for bi in range(self.binCount):\n val, obs = self._calcBin(\n self._binMask(bi, parValues),\n fraction, minReq)\n bins[bi] = val\n binObs[bi] = obs\n cdf = self._calcCDF(bins)\n return bins, binObs, cdf", "def bin_discretize(self, variables=[], bins=3,\n min_const_samples_bin_size=1.0/3):\n self.edges=np.zeros((self.arity.size,bins+1))\n for i in variables:\n un_cnt=np.unique(self.data[:,i],return_counts=True)\n constvals=un_cnt[0][un_cnt[1]>self.data.shape[0]*min_const_samples_bin_size]\n mask=np.ones(self.data.shape[0],dtype=bool)\n if constvals.size>0:\n for j,cv in enumerate(constvals):\n mask*=(self.data[:,i]!=cv)\n self.data[self.data[:,i]==cv,i]=j\n\n size=np.sum(mask)/bins\n sorted_i=np.argsort(self.data[mask,i])\n edges=[self.data[mask,i][sorted_i[int(size*num)-1]] for num in range(1,bins)]\n self.edges[i]=[self.data[mask,i][sorted_i[0]]]+edges+[self.data[mask,i][sorted_i[-1]]]\n self.data[mask,i]=np.searchsorted(edges,self.data[mask,i])+constvals.size\n self.arity[i]=len(edges)+1+constvals.size", "def greedy_binning(t, C, n_bins, maxit= 1000):\n b= n_bins\n n_u= generate_n_u(t)\n d= len(n_u)\n cum_n_u= np.hstack([[0], np.cumsum(n_u)])\n tau= np.unique(t)\n tau= np.hstack([tau, [np.max(tau) + 0.1]])\n \n splits= sorted(np.random.randint(1, d, b-1))\n while len(np.unique(splits)) < b-1:\n splits= sorted(np.random.randint(1, d, b-1)) \n bins= np.array([0] + splits + [d])\n \n sums= np.repeat(0.0, n_bins)\n\n for i in range(n_bins):\n sums[i]= block_sum(i, bins, C, n_u)\n \n ns= np.repeat(0.0, n_bins)\n for i in range(n_bins):\n ns[i]= cum_n_u[bins[i+1]] - cum_n_u[bins[i]]\n \n objective= 0.0\n \n for i in range(n_bins):\n objective+= sums[i]/ns[i]\n\n cum_n_u= np.hstack([[0], np.cumsum(n_u)])\n \n it= 0\n while True and it < maxit:\n it+= 1\n \n change_obj, change_idx, step_, new_sum_i, new_sum_im1, new_ns_i, new_ns_im1= 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n \n for i in range(1, n_bins):\n for step in [-1, 0]:\n if ns[i + step] > n_u[bins[i] + step]:\n change, sum_i, sum_im1, ns_i, ns_im1 = changes(i, step*2 + 1, bins, C, n_u, ns, sums)\n if change > change_obj:\n change_obj, change_idx, step_, new_sum_i, new_sum_im1, new_ns_i, new_ns_im1= change, i, step*2 + 1, sum_i, sum_im1, ns_i, ns_im1\n \n if change_obj > 0.0:\n objective= objective + change_obj\n bins[change_idx]+= step_\n sums[change_idx]= new_sum_i\n sums[change_idx-1]= new_sum_im1\n ns[change_idx]= new_ns_i\n ns[change_idx-1]= new_ns_im1\n else:\n break\n \n t_binning= []\n for i in range(len(t)):\n for j in range(len(bins)):\n if t[i] >= tau[bins[j]] and t[i] < tau[bins[j+1]]:\n t_binning.append(j)\n \n return np.array(t_binning)", "def eqf_binning(t, n_bins):\n t_bins= []\n t= sorted(t)\n n_items= int(len(t)/n_bins)\n\n for i in range(1, n_bins):\n t_bins.append(t[int(i*n_items)])\n t_bins.append(np.max(t) + 0.01)\n t_binning= np.digitize(t, t_bins)\n return t_binning", "def pz_weight_mcal(cat,mask,bins,binnum=100,pdf=False):\n\n if pdf:\n print 'transfer pdf support'\n return\n else:\n if hasattr(cat,'pzstore'):\n nz = cat.pzstore.pz_full\n else:\n nz = cat.pz_full\n mask1=catalog.CatalogMethods.get_cuts_mask(cat,full=True)\n e1,e2,w,m1,m2=lin.linear_methods.get_lin_e_w_ms(cat,mask=mask1,xi=True)\n weights = (m1+m2)/2.*np.ones(len(cat.coadd))\n w0 = []\n for i in range(len(mask1)):\n if i==0:\n mask = mask1[0]\n else:\n mask = np.append(mask1[i],mask1[5])\n h0,b0=np.histogram(nz[mask],bins=binnum,weights=weights[mask])\n w=np.ones(len(nz))\n print 'w0',len(w)\n for j in range(cat.sbins):\n binmask=bins[j]\n h,b=np.histogram(nz[binmask],bins=b0,weights=weights[binmask])\n for k in range(binnum):\n binmask2=(nz>b[k])&(nz<=b[k+1])\n mask_=binmask[np.in1d(binmask,np.where(binmask2)[0])]\n if h[k]<0.01*h0[k]:\n w[mask_]=0.\n else:\n w[mask_]=0.5*h0[k]/h[k]\n w0.append(w)\n\n print 'max/min/mean weight', k,np.max(w),np.min(w),np.mean(w[binmask])\n\n return w0,weights", "def compute_histogram(self):\n # compute distance between points \n distmatrix = np.sqrt(pdist(self.points))\n if not self.mean_dist:\n self.mean_dist = np.mean(distmatrix)\n distmatrix = distmatrix/self.mean_dist\n distmatrix = squareform(distmatrix)\n #compute angles between points\n angles = compute_angles(self.points)\n #quantize angles to a bin\n tbins = np.floor(angles / (2 * pi / self.nbins_theta))\n lg = np.logspace(self.r1, self.r2, num=5)\n #quantize radious to bins\n rbins = np.ones(angles.shape) * -1\n for r in lg:\n counts = (distmatrix < r) \n rbins = rbins + counts.astype(int) \n return rbins, tbins", "def bin_centers(radial_bins):\n\n outer = radial_bins[1:]\n inner = radial_bins[:-1]\n return 0.5 * (outer + inner)", "def binning_axis(self) -> int:\r\n return 0", "def rebin(flux, ivar, w_grid):\n new_grid, w = regrid(w_grid)\n\n fl_iv = flux * ivar\n\n # len(flux) will give number of spectra,\n # len(new_grid) will give number of output bins\n flux_out = np.zeros((len(flux), nbins))\n ivar_out = np.zeros_like(flux_out)\n\n # These lines are necessary for SDSS spectra. For DESI\n # spectra nothing will change here, since the entire DESI grid is contained\n # within the QuasarNET one, but for BOSS/eBOSS the grid can extend out\n # past the QuasarNET grid and give negative bin values. I have tests that\n # confirm this still works on DESI data, don't worry.\n fl_iv = fl_iv[:, w]\n new_grid = new_grid[w]\n ivar_temp = ivar[:, w]\n\n for i in range(len(flux)):\n c = np.bincount(new_grid, weights=fl_iv[i, :])\n flux_out[i, :len(c)] += c\n c = np.bincount(new_grid, weights=ivar_temp[i, :])\n ivar_out[i, :len(c)] += c\n\n return flux_out, ivar_out", "def computation_gr(particles,p_types,dist,i,j,nbins, rmax):\n i=np.where(p_types == i)[0][0]\n j=np.where(p_types == j)[0][0]\n\n\n if len(p_types)>1:\n #indexes to delete if there is more than one type of particles\n i_axis0=[]\n i_axis1=[]\n for k in range(len(p_types)):\n if k!=i:\n i_axis0.append(particles[k])\n if k!=j:\n i_axis1.append(particles[k])\n dist = np.delete(dist,np.hstack(i_axis0), axis=0)\n dist = np.delete(dist,np.hstack(i_axis1), axis=1)\n\n\n\n bin_count = np.zeros((nbins,3))\n bin_ends = -rmax*np.cos(np.linspace(np.pi/2,np.pi,num=nbins+1))\n\n vol_old=0\n for i in range(nbins):\n bin_count[i,0]=0.5*(bin_ends[i+1]+bin_ends[i]) #Count position in the middle of the bin only needed in the first\n rmax_bin=bin_ends[i+1]\n indexes=np.where(dist<=rmax_bin)\n dist[indexes]=1000\n bin_count[i,1]=len(indexes[0])/len(particles[j])\n print(len(particles[j]))\n vol_new=4/3*np.pi*rmax_bin**3\n bin_count[i,2]=bin_count[i,1]/(vol_new-vol_old)\n\n rho_ave=256/6.71838**3 #np.sum(bin_count[:,1])/(4/3*np.pi*rmax**3)\n\n print(rho_ave)\n\n bin_count[:,2]=bin_count[:,2]/rho_ave**2 #g(r)=rho(r)/rho_ave\n\n return bin_count", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def __len__(self):\n return 9 # logsfr_ratios has 6 bins", "def freedman_diaconis_bins(self, arr):\n # From https://stats.stackexchange.com/questions/798/\n if len(arr) < 2:\n return 1\n # Calculate the iqr ranges.\n self.iqr(arr)\n # Calculate the h\n h = 2 * (self.q3 - self.q1) / (len(arr) ** (1 / 3))\n # fall back to sqrt(a) bins if iqr is 0\n if h == 0:\n return int(np.sqrt(arr.size))\n else:\n return int(np.ceil((arr.max() - arr.min()) / h))", "def bin_definition(n_bins_gammaness, n_bins_theta2):\n max_gam = 0.9\n max_th2 = 0.05 * u.deg * u.deg\n min_th2 = 0.005 * u.deg * u.deg\n\n gammaness_bins = np.linspace(0, max_gam, n_bins_gammaness)\n theta2_bins = np.linspace(min_th2, max_th2, n_bins_theta2)\n\n return gammaness_bins, theta2_bins", "def histogram_function(r_min, a, N, nbins):\n r_min_no_selfdistance = r_min - np.identity(len(r_min)) #remove identitity matrix so own distance will be treated as 0\n histarray = np.array(r_min_no_selfdistance).flatten()\n histogram = np.histogram(histarray,bins=np.linspace(0,a,nbins))\n histogram[0][0] = histogram[0][0] - N # remove N from the first bin to prevent it counting particle distance to itself\n return histogram[0]" ]
[ "0.63174194", "0.6299061", "0.62940705", "0.6222577", "0.6208876", "0.6017929", "0.598964", "0.59230375", "0.59167486", "0.58886397", "0.58843166", "0.5880696", "0.58755875", "0.58755875", "0.58666515", "0.5855976", "0.58558095", "0.584502", "0.5839998", "0.5780967", "0.577036", "0.57609797", "0.57373387", "0.57202417", "0.5715982", "0.5715856", "0.5713434", "0.5687484", "0.56832355", "0.5681004" ]
0.6595891
0
Make component fields, other info into dict for template context
def make_context( container: ServiceContainer, component_name: str, **kwargs ) -> Dict[str, Any]: from wired_components.component import IWrapComponents, IComponent # Start with all the wrapped components context: Dict[str, Any] = container.get(IWrapComponents) # We get the component again in case there are multiple components # registered with the same name, but for more specific contexts. component_factory = container.get(IComponent, name=component_name) # TODO Try to replace this part with DI+props in wired.components # (see above in component_factory) component_instance = component_factory(**kwargs) # Copy all the fields into the context dict for field in dataclasses.fields(component_instance): context[field.name] = getattr(component_instance, field.name) return context
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _template_data(self):\n return {\"form\": self.form.render()}", "def _get_render_context(self):\r\n context = {\r\n 'id': self.input_id,\r\n 'value': self.value,\r\n 'status': Status(self.status, self.capa_system.i18n.ugettext),\r\n 'msg': self.msg,\r\n 'STATIC_URL': self.capa_system.STATIC_URL,\r\n }\r\n context.update(\r\n (a, v) for (a, v) in self.loaded_attributes.iteritems() if a in self.to_render\r\n )\r\n context.update(self._extra_context())\r\n return context", "def get_context(self):\r\n return {\r\n 'module': self,\r\n 'editable_metadata_fields': self.editable_metadata_fields\r\n }", "def create_template_dict(name, cat, boilerplate_name=None, is_common=False):\r\n return {\r\n \"display_name\": name,\r\n \"category\": cat,\r\n \"boilerplate_name\": boilerplate_name,\r\n \"is_common\": is_common\r\n }", "def dict(self):\n d = {}\n d['template_id'] = self.id\n d['name'] = self.name\n d['cpu'] = self.cpu\n d['memory'] = self.memory\n d['points'] = self.points\n d['description'] = self.description\n d['ec2name'] = self.ec2name\n # state is not put in dictionary\n return d", "def context(template):\n\n return {\n v.key: v.read()\n for v in [Variable(name) for name in extract_variables(template)]\n }", "def _extra_context(self):\r\n return {\r\n 'input_type': self.html_input_type,\r\n 'choices': self.choices\r\n }", "def make_entity_dict(class_reference, template, partial_dict): \n _data = class_reference.properties()\n for _key in _data:\n _data[_key] = partial_dict.get(_key, template.get(_key, '')) \n return _data", "def get_dashmanager_field_components(doctype):\n\tfields_list, fields_component_list = get_fields_component_list(doctype)\n\treturn {\n\t\t\"fields\" : json.dumps(fields_list),\n\t\t\"fields_components\" : json.dumps(fields_component_list)\n\t}", "def prepare_template(self, rest_handler, key=''):\n template_values = {}\n template_values['page_title'] = self.format_title('Edit Question')\n template_values['main_content'] = self.get_form(rest_handler, key=key)\n\n return template_values", "def output_format_template_context(self, app: BaseConfig):\n # Android requires an integer \"version code\". If a version code\n # isn't explicitly provided, generate one from the version number.\n # The build number will also be appended, if provided.\n try:\n version_code = app.version_code\n except AttributeError:\n parsed = parsed_version(app.version)\n\n v = (list(parsed.release) + [0, 0])[:3] # version triple\n build = int(getattr(app, \"build\", \"0\"))\n version_code = f\"{v[0]:d}{v[1]:02d}{v[2]:02d}{build:02d}\".lstrip(\"0\")\n\n return {\n \"version_code\": version_code,\n \"safe_formal_name\": safe_formal_name(app.formal_name),\n }", "def render_custom_fields(form):\n return {\n 'form': form,\n }", "def get_template_render_context(self):\n return {\n \"distribution\": self,\n \"distribution_numbers\": self.numbers,\n \"distribution_times\": self.times.all(),\n }", "def to_canvas_properties(component: Component) -> Dict:\n if ComponentCache.get_generic_component(component.id) is not None:\n template = ComponentCache.load_jinja_template(\"generic_properties_template.jinja2\")\n else:\n template = ComponentCache.load_jinja_template(\"canvas_properties_template.jinja2\")\n\n template_vars = {\n \"elyra_owned_properties\": component.get_elyra_properties(),\n \"render_property_details\": ComponentProperty.render_property_details,\n }\n template.globals.update(template_vars)\n canvas_properties = template.render(component=component)\n return json.loads(canvas_properties)", "def get_context_data(self):\n return {\"form\": self.get_form()}", "def hydrate_arguments(cls, view_io: ViewIO) -> Dict:\n return {\n **super().hydrate_arguments(view_io),\n # TODO: should we add this here? probably not: \"software_system\"\n \"paper_size\": view_io.paper_size,\n \"automatic_layout\": AutomaticLayout.hydrate(view_io.automatic_layout)\n if view_io.automatic_layout\n else None,\n \"element_views\": map(ElementView.hydrate, view_io.element_views),\n \"relationship_views\": map(\n RelationshipView.hydrate, view_io.relationship_views\n ),\n }", "def _extra_context(self):\r\n return {}", "def createFormatMap(self, form, renderable, **extras):\n\n fmtmap = renderable.__dict__.copy()\n fmtmap.update(extras)\n\n def replaceVars(match):\n\n try:\n var = match.group()[2:-1]\n if var and var.endswith(\":lexical\"):\n var = var[:-len(\":lexical\")]\n value = form.getFieldValue(var, lexical=True) or ''\n else:\n value = form.getFieldValue(var) or ''\n\n if not isinstance(value, str):\n if not hasattr(value, \"decode\"):\n value = str(value)\n value = value.decode('utf-8')\n return value\n except:\n return match.group()\n\n # process labels and hints\n if 'label' in fmtmap and fmtmap['label'] != None:\n fmtmap['label'] = VAREXP.sub(replaceVars, fmtmap['label'])\n if 'hint' in fmtmap and fmtmap['hint'] != None:\n fmtmap['hint'] = VAREXP.sub(replaceVars, fmtmap['hint'])\n if 'text' in fmtmap and fmtmap['text'] != None:\n fmtmap['text'] = VAREXP.sub(replaceVars, fmtmap['text'])\n if 'placeholder' in fmtmap and fmtmap['placeholder'] != None:\n fmtmap['placeholder'] = VAREXP.sub(replaceVars,\n fmtmap['placeholder'])\n\n # defaults\n extra_classes = {'relevant': True, 'required': False,\n 'readonly': False, 'error': False}\n\n # Let's see whether we got properties here...\n try:\n if hasattr(renderable, 'bind') and renderable.bind:\n # Requiredness\n if form.model.isRequired(renderable.bind, form.data):\n extra_classes[\"required\"] = True\n\n if not form.model.isRelevant(renderable.bind, form.data):\n extra_classes[\"relevant\"] = False\n\n # Read only\n if form.model.isReadonly(renderable.bind, form.data):\n extra_classes[\"readonly\"] = True\n\n elif hasattr(renderable, 'getRenderables') and \\\n callable(renderable.getRenderables):\n\n # Group relevance\n if not form.model.isGroupRelevant(renderable, form.data):\n extra_classes[\"relevant\"] = False\n\n except:\n pass\n\n if extras.get(\"errors\", None) and \\\n hasattr(renderable, 'bind') and renderable.bind and \\\n extras['errors'].get(renderable.bind, None):\n\n extra_classes['error'] = True\n\n if getattr(renderable, 'alert', ''):\n fmtmap['alert'] = renderable.alert\n else:\n fmtmap['alert'] = \"; \".join(extras['errors'][renderable.bind])\n\n else:\n\n fmtmap['alert'] = ''\n\n if \"extra_classes\" in fmtmap:\n fmtmap['extra_classes'] = \" \".join([fmtmap['extra_classes']] + \\\n [key for key in\n list(extra_classes.keys())\n if extra_classes[key]])\n else:\n fmtmap['extra_classes'] = \" \".join([key for key in\n list(extra_classes.keys()) if\n extra_classes[key]])\n\n fmtmap['type'] = self.getType(renderable)\n\n return fmtmap", "def get_context(context, **dict_params):\n\tcontext['content'] = [\"item1\", \"item2\"]", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['transform_fn'] = self.transform_or_spec\n if self._also_track_spec is not None:\n specs['also_track'] = self._also_track_spec\n return specs", "def _ks_prepare_odoo_product_tag_data(self, record):\n data = {\n \"name\": record.ks_name,\n \"slug\": record.ks_slug or '',\n \"description\": record.ks_description or ''\n }\n return data", "def base_data(self):\n return {\"context\": self.context}", "def field_wrapper(field):\n return {'field': field}", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def _component_specs(self):\n specs = dict(pretransformed_input=self._input_spec)\n if self._transform_is_composite:\n specs['bijector'] = self.transform_or_spec\n return specs", "def get_context(self):\n return {\"request\": self.request, \"format\": self.format_kwarg, \"view\": self}", "def _make_context():\n return {'User': User, 'CreditCard': CreditCard, 'Transaction': Transaction, 'db': db, 'jsonify':jsonify}", "def _driver_template_data(self):\n return {\n 'driver_module': self.driver_modulename(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'driver_path': self.metadata.driver_path,\n 'release_notes': self.metadata.notes,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def get_context_data(self, **kwargs): # pylint: disable=R0201\n return {}", "def get_crud_template_dict():\n return CRUD_TEMPLATE_DICT" ]
[ "0.65645075", "0.6080614", "0.5978665", "0.58917844", "0.5774367", "0.5748968", "0.57399213", "0.57305616", "0.56950396", "0.5634534", "0.55968183", "0.55514616", "0.5549723", "0.5531995", "0.550397", "0.54978746", "0.5492822", "0.5489106", "0.54887336", "0.54638904", "0.54448843", "0.54330486", "0.5415507", "0.5412822", "0.5407736", "0.5376833", "0.53603166", "0.5348014", "0.53443104", "0.53323555" ]
0.6259585
1
Launch training of the model with a set of hyperparameters in parent_dir/job_name
def launch_training_job(model_dir,job_name, params, implementation_dir): # Create a new folder in implementation corresponding to the model implementation_dir = os.path.join(implementation_dir, os.path.basename(os.path.normpath(model_dir))) if not os.path.exists(implementation_dir): os.makedirs(implementation_dir) implementation_hyperparams_dir = os.path.join(implementation_dir, job_name) if not os.path.exists(implementation_hyperparams_dir): os.makedirs(implementation_hyperparams_dir) params.implementation_dir = implementation_hyperparams_dir + "/" # Write parameters in json file json_path = os.path.join(implementation_hyperparams_dir, 'params.json') params.save(json_path) # Launch training with this config cmd = "{python} {model_dir}/train_C3D.py --params={json_path}".format(python=PYTHON, model_dir=model_dir, json_path=json_path) #print(cmd) #NOT GENERALIZABLE -- READ IN TEMPLATE AND APPEND? f = open(os.path.join(implementation_hyperparams_dir, ('run_' + job_name + '.test')), 'w+') f.write("#!/bin/bash\n") f.write("\n") f.write("#SBATCH --job-name=iterate{}\n".format(job_name)) f.write("#SBATCH --nodes=1\n") f.write("#SBATCH --mem=100GB\n") f.write("#SBATCH --time=12:00:00\n") f.write("#SBATCH --gres=gpu:1 -c1\n") f.write("#SBATCH --cpus-per-task=1\n") f.write("#SBATCH --error={}.out\n".format(model_dir + "/" + job_name)) f.write("\n") f.write("\n") f.write("module purge\n") f.write("module load python3/intel/3.5.3\n") f.write("module load pillow/intel/4.0.0\n") f.write("module load scikit-learn/intel/0.18.1\n") f.write("module load pytorch/python3.5/0.2.0_3\n") f.write("module load numpy/intel/1.13.1 \n") f.write("module load cuda/8.0.44\n") f.write("module load jupyter-kernels/py3.5\n") f.write("module load mysql/5.7.17\n") f.write("module load zeromq/intel/4.2.0\n") f.write("module load intel/17.0.1\n") f.write("module load zlib/intel/1.2.8\n") f.write("\n") f.write("\n") f.write(cmd) f.close() file=(implementation_hyperparams_dir +'/run_' + job_name + '.test') sbatch_call = "sbatch " + file print(sbatch_call) call(sbatch_call, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch_training_job(parent_dir, data_dir, job_name, params):\n # Create a new folder in parent_dir with unique_name \"job_name\"\n model_dir = os.path.join(parent_dir, job_name)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n # Write parameters in json file\n json_path = os.path.join(model_dir, 'params.json')\n params.save(json_path)\n\n # Launch training with this config\n cmd = \"{python} train.py --model_dir {model_dir} --data_dir {data_dir}\"\n cmd = cmd.format(python=PYTHON, model_dir=model_dir, data_dir=data_dir)\n print(cmd)\n check_call(cmd, shell=True)", "def launch_job(self,\n job_id: Text,\n parent: Text,\n training_input: Dict[Text, Any],\n job_labels: Optional[Dict[Text, Text]] = None) -> None:\n pass", "def launch_training_job(dataset_name, parent_dir, job_name, params):\r\n # Create a new folder in parent_dir with unique_name \"job_name\"\r\n model_dir = os.path.join(parent_dir, job_name)\r\n if not os.path.exists(model_dir):\r\n os.makedirs(model_dir)\r\n\r\n # Write parameters in json file\r\n json_path = os.path.join(model_dir, 'params.json')\r\n params.model_dir = model_dir\r\n params.save(json_path)\r\n\r\n # Launch training with this config\r\n exrta_paras_dict = {'train_window_size':params.train_window_size,\r\n 'test_window_size': params.test_window_size}\r\n if dataset_name == \"ucsd_ped1\":\r\n dataset_train = UCSDPed1_deepSVDD_TRAIN(path=params.train_dir,\r\n exrta_paras_dict=exrta_paras_dict)\r\n dataset_eval = UCSDPed1_deepSVDD(path=params.test_dir,\r\n exrta_paras_dict=exrta_paras_dict)\r\n if dataset_name == \"ucsd_ped2\":\r\n dataset_train = UCSDPed2_deepSVDD_TRAIN(path=params.train_dir,\r\n exrta_paras_dict=exrta_paras_dict)\r\n dataset_eval = UCSDPed2_deepSVDD(path=params.test_dir,\r\n exrta_paras_dict=exrta_paras_dict)\r\n # mutli_task train,这次使用逻辑上的分阶段训练:代码实现上用权重来控制\r\n # 先给rec loss极高的权重,只训练rec loss;然后只训练 probability loss\r\n # 最后恢复权重,联合训练 rec loss + deep_SVDD loss\r\n model = LSAUCSD_deepSVDD(input_shape=dataset_train.shape,\r\n code_length=params.code_length)\r\n # Set up result helper and perform test\r\n helper = VideoAnomalyDetectionResultHelper_deepSVDD(\r\n dataset_train, dataset_eval, model, params)\r\n helper.hyperparas_search()", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def train(self, single=False):\n self.baseline=None\n\n dag = utils.load_dag(self.args,self.logger) if single else None\n \n if self.args.shared_initial_step > 0:\n self.train_shared(self.args.shared_initial_step)\n self.train_controller()\n\n for self.epoch in range(self.start_epoch, self.args.max_epoch):\n # 1. Training the shared parameters omega of the child models\n self.train_shared(dag=dag)\n\n # 2. Training the controller parameters theta\n if not single:\n self.train_controller()\n\n if self.epoch % self.args.save_epoch == 0 and self.epoch!=0:\n with _get_no_grad_ctx_mgr():\n best_dag = dag if dag else self.derive()\n self.evaluate(best_dag,batch_size=self.args.batch_size)\n self.save_model()\n\n if self.epoch >= self.args.shared_decay_after:\n utils.update_lr(self.shared_optim, self.shared_lr)\n self.save_model()\n self.dag_file.close()", "def run_training(self, schema_params, export_model=False, output_model_dir=None):\n # Log distributed execution context, which includes cluster configuration\n logger.info(f\"Commencing {self.effect_name} training\")\n logger.info(f\"Execution context : {self.execution_context}\")\n\n # Create partition_index_list\n partition_index_list = self._get_partition_list()\n logger.info(f\"This worker on work on the following list of partitions : {partition_index_list}\")\n\n # Sequentially train model on partitions\n for partition_index in partition_index_list:\n logger.info(f\"Commencing {self.effect_name} training for partition index : {partition_index}\")\n\n # Resolve partitioned data directory from raw path params from user\n checkpoint_path = self._anchor_directory(\n self.model.checkpoint_path,\n partition_index)\n training_data_dir = self._anchor_directory(self.model.training_data_dir,\n partition_index)\n validation_data_dir = self._anchor_directory(self.model.validation_data_dir,\n partition_index) if self.model.validation_data_dir else None\n\n if is_empty_directory(training_data_dir):\n logger.info(f\"{training_data_dir} is empty, no dataset to train on.\")\n continue\n # Train model\n self.execution_context[constants.PARTITION_INDEX] = partition_index\n self.model.train(training_data_dir=training_data_dir,\n validation_data_dir=validation_data_dir,\n metadata_file=self.model.metadata_file,\n checkpoint_path=checkpoint_path,\n execution_context=self._prepare_training_context(partition_index),\n schema_params=schema_params)\n\n # Chief should export model\n is_chief = self.execution_context[constants.IS_CHIEF]\n if export_model and is_chief:\n logger.info(f\"Exporting model to directory : {output_model_dir}\")\n self.model.export(output_model_dir=output_model_dir)", "def train(args):\n print(args)\n\n # Run a training job\n configs = LuxMatchConfigs_Default\n\n # Create a default opponent agent\n opponent = Agent()\n\n # Create a RL agent in training mode\n player = AgentPolicy(mode=\"train\")\n\n # Train the model\n num_cpu = 1\n if num_cpu == 1:\n env = LuxEnvironment(configs=configs,\n learning_agent=player,\n opponent_agent=opponent)\n else:\n env = SubprocVecEnv([make_env(LuxEnvironment(configs=configs,\n learning_agent=AgentPolicy(mode=\"train\"),\n opponent_agent=opponent), i) for i in range(num_cpu)])\n run_id = args.id\n print(\"Run id %s\" % run_id)\n\n if args.path:\n # by default previous model params are used (lr, batch size, gamma...)\n model = PPO.load(args.path)\n model.set_env(env=env)\n\n # Update the learning rate\n model.lr_schedule = get_schedule_fn(args.learning_rate)\n\n # TODO: Update other training parameters\n else:\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate=args.learning_rate,\n gamma=args.gamma,\n gae_lambda=args.gae_lambda,\n batch_size=args.batch_size,\n n_steps=args.n_steps\n )\n\n print(\"Training model...\")\n # Save a checkpoint every 1M steps\n checkpoint_callback = CheckpointCallback(save_freq=1000000,\n save_path='./models/',\n name_prefix=f'rl_model_{run_id}')\n model.learn(total_timesteps=args.step_count,\n callback=checkpoint_callback) # 20M steps\n if not os.path.exists(f'models/rl_model_{run_id}_{args.step_count}_steps.zip'):\n model.save(path=f'models/rl_model_{run_id}_{args.step_count}_steps.zip')\n print(\"Done training model.\")\n\n # Inference the model\n print(\"Inference model policy with rendering...\")\n saves = glob.glob(f'models/rl_model_{run_id}_*_steps.zip')\n latest_save = sorted(saves, key=lambda x: int(x.split('_')[-2]), reverse=True)[0]\n model.load(path=latest_save)\n obs = env.reset()\n for i in range(600):\n action_code, _states = model.predict(obs, deterministic=True)\n obs, rewards, done, info = env.step(action_code)\n if i % 5 == 0:\n print(\"Turn %i\" % i)\n env.render()\n\n if done:\n print(\"Episode done, resetting.\")\n obs = env.reset()\n print(\"Done\")\n\n '''\n # Learn with self-play against the learned model as an opponent now\n print(\"Training model with self-play against last version of model...\")\n player = AgentPolicy(mode=\"train\")\n opponent = AgentPolicy(mode=\"inference\", model=model)\n env = LuxEnvironment(configs, player, opponent)\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate = 0.0003,\n gamma=0.999,\n gae_lambda = 0.95\n )\n\n model.learn(total_timesteps=2000)\n env.close()\n print(\"Done\")\n '''", "def train_model(self, *args, **kwargs):\n self.model.train(self.training, *args, **kwargs)", "def train(self, absList, modelFilename):\n pass", "def train_model(self):\n if not self.is_exist(self.path_model_directory):\n # Then create the parent folder\n os.makedirs(self.path_model_directory)\n\n # Create a meta-data pickle for the model\n self.create_meta_data_pickle()\n\n # Necessary meta-data file must be created before starting the training. Check if the file exists\n if self.is_exist(self.path_model_metadata):\n\n # We do not need to train a model if there is already a best model for the same training exist\n try:\n self.model = load_model(self.path_best_model)\n return\n except:\n self.log_event('There is no best trained model found in the parent folder. Going with the training...')\n\n # Load the model meta-data\n self.load_model_metadata()\n self.encoding_vector_size = self.number_of_distinct_items\n\n # Iterate trough the split data for the training\n for split_number in range(self.k_split):\n split_path = f'split_{str(split_number)}/'\n split_directory = self.path_model_directory + split_path\n\n # Check the split directory is already created. If it is, then we can directly start the training by using the existing data\n if self.is_exist(split_directory):\n try:\n self.load_best_tuned_model(split_number)\n except (IndexError, FileNotFoundError):\n self.load_fold_k_data_and_fit(split_number=int(split_number))\n\n else:\n # Create a folder for the split data and prepare the data for the training\n os.makedirs(split_directory)\n\n # Create an array which will contain train features-labels and test features-labels\n train_array = np.full(4, fill_value=self.mask_value, dtype=object)\n train_index = 0\n for position, split_name in enumerate(['train_split_', 'test_split_']):\n training_features_directory = split_directory + f'{split_name}{str(split_number)}_all_training_features.data'\n training_targets_directory = split_directory + f'{split_name}{str(split_number)}_all_training_targets.data'\n fold_directory = self.path_shared_folds + f'{split_name}{str(split_number)}.fold'\n\n self.process_training_data(fold_directory=fold_directory)\n\n self.save_data_to_disk(data_to_save=self.all_features, path_to_save=training_features_directory)\n train_array[train_index] = self.all_features\n train_index += 1\n self.all_features = None # Memory Management\n\n self.save_data_to_disk(data_to_save=self.all_targets, path_to_save=training_targets_directory)\n train_array[train_index] = self.all_targets\n train_index += 1\n self.all_targets = None # Memory Management\n\n # Assign the input data to respective variables for the training\n self.train_features = train_array[0]\n self.train_targets = train_array[1]\n self.test_features = train_array[2]\n self.test_targets = train_array[3]\n del train_array\n\n self.start_hyper_parameter_tuning(split_number)\n\n self.retrieve_best_model(metric=self.hyper_parameters['metric'])", "def start_training(self, logdir: str, **info):\n pass", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def train(args):\n print(args)\n\n # Run a training job\n configs = LuxMatchConfigs_Default\n\n # Create a default opponent agent\n opponent = Agent()\n\n # Create a RL agent in training mode\n player = AgentPolicy(mode=\"train\")\n\n # Train the model\n env_eval = None\n if args.n_envs == 1:\n env = LuxEnvironment(configs=configs,\n learning_agent=player,\n opponent_agent=opponent)\n else:\n env = SubprocVecEnv([make_env(LuxEnvironment(configs=configs,\n learning_agent=AgentPolicy(mode=\"train\"),\n opponent_agent=opponent), i) for i in range(args.n_envs)])\n \n run_id = args.id\n print(\"Run id %s\" % run_id)\n\n if args.path:\n # by default previous model params are used (lr, batch size, gamma...)\n model = PPO.load(args.path)\n model.set_env(env=env)\n\n # Update the learning rate\n model.lr_schedule = get_schedule_fn(args.learning_rate)\n\n # TODO: Update other training parameters\n else:\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate=args.learning_rate,\n gamma=args.gamma,\n gae_lambda=args.gae_lambda,\n batch_size=args.batch_size,\n n_steps=args.n_steps\n )\n\n \n \n callbacks = []\n\n # Save a checkpoint and 5 match replay files every 100K steps\n player_replay = AgentPolicy(mode=\"inference\", model=model)\n callbacks.append(\n SaveReplayAndModelCallback(\n save_freq=100000,\n save_path='./models/',\n name_prefix=f'model{run_id}',\n replay_env=LuxEnvironment(\n configs=configs,\n learning_agent=player_replay,\n opponent_agent=Agent()\n ),\n replay_num_episodes=5\n )\n )\n \n # Since reward metrics don't work for multi-environment setups, we add an evaluation logger\n # for metrics.\n if args.n_envs > 1:\n # An evaluation environment is needed to measure multi-env setups. Use a fixed 4 envs.\n env_eval = SubprocVecEnv([make_env(LuxEnvironment(configs=configs,\n learning_agent=AgentPolicy(mode=\"train\"),\n opponent_agent=opponent), i) for i in range(4)])\n\n callbacks.append(\n EvalCallback(env_eval, best_model_save_path=f'./logs_{run_id}/',\n log_path=f'./logs_{run_id}/',\n eval_freq=args.n_steps*2, # Run it every 2 training iterations\n n_eval_episodes=30, # Run 30 games\n deterministic=False, render=False)\n )\n\n print(\"Training model...\")\n model.learn(total_timesteps=args.step_count,\n callback=callbacks)\n if not os.path.exists(f'models/rl_model_{run_id}_{args.step_count}_steps.zip'):\n model.save(path=f'models/rl_model_{run_id}_{args.step_count}_steps.zip')\n print(\"Done training model.\")\n\n # Inference the model\n print(\"Inference model policy with rendering...\")\n saves = glob.glob(f'models/rl_model_{run_id}_*_steps.zip')\n latest_save = sorted(saves, key=lambda x: int(x.split('_')[-2]), reverse=True)[0]\n model.load(path=latest_save)\n obs = env.reset()\n for i in range(600):\n action_code, _states = model.predict(obs, deterministic=True)\n obs, rewards, done, info = env.step(action_code)\n if i % 5 == 0:\n print(\"Turn %i\" % i)\n env.render()\n\n if done:\n print(\"Episode done, resetting.\")\n obs = env.reset()\n print(\"Done\")\n\n '''\n # Learn with self-play against the learned model as an opponent now\n print(\"Training model with self-play against last version of model...\")\n player = AgentPolicy(mode=\"train\")\n opponent = AgentPolicy(mode=\"inference\", model=model)\n env = LuxEnvironment(configs, player, opponent)\n model = PPO(\"MlpPolicy\",\n env,\n verbose=1,\n tensorboard_log=\"./lux_tensorboard/\",\n learning_rate = 0.0003,\n gamma=0.999,\n gae_lambda = 0.95\n )\n model.learn(total_timesteps=2000)\n env.close()\n print(\"Done\")\n '''", "def pretrained():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='pretrained', dropout=0.7987, learning_rate=0.00009659)", "def train(self, absList, modelFilename):\n raise NotImplementedError(\"Need to implement train()\")", "def launch_training_job(master_nodes, trainset_date, opts, ec2_opts):\n # TODO: check whether HDFS is running\n # TODO: check whether YARN is running\n master = master_nodes[0].public_dns_name\n print(\"Setting up HDFS on the cluster..\")\n ssh(host=master, opts=ec2_opts, command=\"chmod u+x /root/spark-ec2/setup_pricer_data.sh\")\n ssh(host=master, opts=ec2_opts, command=\"/root/spark-ec2/setup_pricer_data.sh\")\n print(\"Running trainer with train date={d}..\".format(d=trainset_date))\n ssh(host=master, opts=ec2_opts, command=\"chmod u+x /root/spark-ec2/run_aws_trainer.sh\")\n ssh(host=master, opts=ec2_opts, command=\"nohup /root/spark-ec2/run_aws_trainer.sh {d} 2>&1 </dev/null |tee log.aws_trainer\".format(d=trainset_date))\n print(\"Trainer was launched successfully..\")", "def train_and_eval(params: flags.FlagValues) -> tf.keras.callbacks.History:\n logging.info('Run training for {} with {}'.format(params.model_name,\n params.dataset_name))\n logging.info('The CLI params are: {}'.format(params.flag_values_dict()))\n d_config = _get_dataset_config().get(params.dataset_name)()\n m_config = _get_model_config().get(params.model_name)()\n\n logging.info('Training dataset configuration:', d_config)\n logging.info('Training model configuration:', m_config)\n\n # override the model params with CLI params\n m_config.num_classes = d_config.num_classes\n m_config.dropout_keep_prob = 1 - params.dropout_rate\n m_config.weight_decay = params.std_weight_decay\n m_config.stddev = params.truncated_normal_stddev\n m_config.batch_norm_decay = params.batch_norm_decay\n\n strategy = tf.distribute.MirroredStrategy()\n with strategy.scope():\n # override the dataset params with CLI params\n if params.data_dir:\n d_config.data_dir = params.data_dir\n global_batch_size = params.batch_size * strategy.num_replicas_in_sync\n\n # override the dataset params with CLI params\n # for distributed training, update batch size\n d_config.batch_size = global_batch_size\n # determine whether one_hot is used based on label_smoothing\n d_config.one_hot = params.label_smoothing and params.label_smoothing > 0\n\n # build train dataset\n train_dataset = get_dataset(d_config)\n # build validation dataset\n d_config.split = 'validation'\n eval_dataset = get_dataset(d_config)\n\n # compute number iterations per epoch\n steps_per_epoch = d_config.num_examples // d_config.batch_size\n eval_steps = d_config.num_eval_examples // d_config.batch_size\n\n # build the model\n keras_model = build_model(\n model_name=params.model_name,\n dataset_config=d_config,\n model_config=m_config\n )\n\n # build the optimizer\n learning_params = defaults.LR_CONFIG_DEFAULT\n learning_params.update({'initial_lr': params.lr,\n 'decay_epochs': params.lr_decay_epochs,\n 'decay_rate': params.lr_decay_rate})\n optimizer_params = defaults.OP_CONFIG_DEFAULT\n optimizer_params.update({'decay': params.op_decay_rate,\n 'momentum': params.op_momentum})\n optimizer = _get_optimizer(\n batch_size=global_batch_size,\n steps_per_epoch=steps_per_epoch,\n lr_name=params.learning_scheduler_name,\n optimizer_name=params.optimizer_name,\n lr_params=learning_params,\n optimizer_params=optimizer_params\n )\n\n logging.info('Exponential decay rate:{}'.format(params.ma_decay_rate))\n if params.ma_decay_rate:\n optimizer = tfa.optimizers.MovingAverage(\n optimizer=optimizer,\n average_decay=params.ma_decay_rate)\n\n # compile model\n if d_config.one_hot:\n loss_obj = tf.keras.losses.CategoricalCrossentropy(\n label_smoothing=params.label_smoothing)\n else:\n loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()\n\n keras_model.compile(\n optimizer=optimizer,\n loss=loss_obj,\n metrics=[_get_metrics(one_hot=d_config.one_hot)['acc']],\n )\n\n logging.info(keras_model.summary())\n\n initial_epoch = 0\n if params.resume_checkpoint:\n initial_epoch = _resume_from_checkpoint(model=keras_model,\n model_dir=params.model_dir,\n train_steps=steps_per_epoch)\n\n # Callbacks\n callbacks_to_use = _get_callback(model_dir=params.model_dir)\n\n # Train model\n history = keras_model.fit(\n train_dataset,\n steps_per_epoch=steps_per_epoch,\n epochs=params.epochs,\n validation_data=eval_dataset,\n validation_steps=eval_steps,\n initial_epoch=initial_epoch,\n verbose=1,\n callbacks=callbacks_to_use\n )\n\n return history", "def call_training_routine(self):\n training_command = \"th main.lua \"\\\n \"-GPU_id %(GPU_identifier)i \"\\\n \"-number_of_GPUs %(number_of_GPUs)i \"\\\n \"-training_dataset %(training_dataset)s \"\\\n \"-testing_dataset %(testing_dataset)s \"\\\n \"-modelFilePath %(modelFilePath)s \"\\\n \"-maxepoch %(maxepoch)i \"\\\n \"-savingDirectory %(savingDirectory)s \"\\\n \"-learningRate %(learningRate)f \"\\\n \"-batchSize %(batchSize)i \"\\\n \"-momentum %(momentum)f\" % self.training_parameters\n\n if self.training_parameters[\"presavedModelPath\"] != \"\":\n training_command += \" -presavedModelPath %s\" %\\\n self.training_parameters[\"presavedModelPath\"]\n\n # Call the training command\n subprocess.call(training_command, shell=True)", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def train( # type: ignore\n self,\n pl_trainer_args: Dict[str, Any],\n model_args: Dict[str, Union[float, str, int]],\n dataset_args: Dict[str, Union[float, str, int]],\n dataset: GFlowNetDataset,\n environment: GraphBuildingEnv,\n context: GraphBuildingEnvContext,\n task: GFlowNetTask,\n ) -> None:\n\n logger.info(f\"Trainer arguments: {pl_trainer_args}\")\n\n if pl_trainer_args[\n \"resume_from_checkpoint\"\n ] is not None and not pl_trainer_args[\"resume_from_checkpoint\"].endswith(\n \".ckpt\"\n ):\n pl_trainer_args[\"resume_from_checkpoint\"] = None\n\n pl_trainer_args[\"callbacks\"] = {\n \"model_checkpoint_callback\": {\"save_top_k\": pl_trainer_args[\"save_top_k\"]}\n }\n\n pl_trainer_args[\"callbacks\"] = self.add_callbacks(pl_trainer_args[\"callbacks\"])\n\n pl_trainer_args[\"logger\"] = TensorBoardLogger(\n pl_trainer_args[\"save_dir\"], name=pl_trainer_args[\"basename\"]\n )\n\n trainer = Trainer(\n profiler=pl_trainer_args[\"profiler\"],\n logger=pl_trainer_args[\"logger\"],\n log_every_n_steps=pl_trainer_args[\"trainer_log_every_n_steps\"],\n callbacks=pl_trainer_args[\"callbacks\"],\n max_epochs=pl_trainer_args[\"epochs\"],\n strategy=pl_trainer_args[\"strategy\"],\n fast_dev_run=pl_trainer_args[\"development_mode\"],\n )\n\n data_module, model_module = self.get_data_and_model_modules(\n model_args,\n dataset_args,\n pl_trainer_args,\n dataset,\n environment,\n context,\n task,\n )\n trainer.fit(model_module, data_module)", "def main(args, base_dir):\n for i in range(args.n_training):\n # value of the next seed\n seed = args.seed + i\n\n # The time when the current experiment started.\n now = strftime(\"%Y-%m-%d-%H:%M:%S\")\n\n # Create a save directory folder (if it doesn't exist).\n if args.log_dir is not None:\n dir_name = args.log_dir\n else:\n dir_name = os.path.join(base_dir, '{}/{}'.format(\n args.env_name, now))\n ensure_dir(dir_name)\n\n # Get the policy class.\n if args.alg == \"TD3\":\n from hbaselines.multiagent.td3 import MultiFeedForwardPolicy\n elif args.alg == \"SAC\":\n from hbaselines.multiagent.sac import MultiFeedForwardPolicy\n elif args.alg == \"PPO\":\n from hbaselines.multiagent.ppo import MultiFeedForwardPolicy\n elif args.alg == \"TRPO\":\n from hbaselines.multiagent.trpo import MultiFeedForwardPolicy\n else:\n raise ValueError(\"Unknown algorithm: {}\".format(args.alg))\n\n # Get the hyperparameters.\n hp = get_hyperparameters(args, MultiFeedForwardPolicy)\n\n # add the seed for logging purposes\n params_with_extra = hp.copy()\n params_with_extra['seed'] = seed\n params_with_extra['env_name'] = args.env_name\n params_with_extra['policy_name'] = \"MultiFeedForwardPolicy\"\n params_with_extra['algorithm'] = args.alg\n params_with_extra['date/time'] = now\n\n # Add the hyperparameters to the folder.\n with open(os.path.join(dir_name, 'hyperparameters.json'), 'w') as f:\n json.dump(params_with_extra, f, sort_keys=True, indent=4)\n\n run_exp(\n env=args.env_name,\n policy=MultiFeedForwardPolicy,\n hp=hp,\n dir_name=dir_name,\n evaluate=args.evaluate,\n seed=seed,\n eval_interval=args.eval_interval,\n log_interval=args.log_interval,\n save_interval=args.save_interval,\n initial_exploration_steps=args.initial_exploration_steps,\n ckpt_path=args.ckpt_path,\n )", "def setup_training(args: argparse.Namespace) -> None:\n # 1. Read hyperparameters from file\n hp = HParams.from_yaml(args.path_config)\n # check if GPU available and add it to parameters\n hp[\"device\"] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # 2. Create extension of the architecture of the model and timestamp for this run (use to\n # identify folders and files created for this run)\n # format: f(params_file)_t(n_tiers)_l(n_layers)_hd(hidden_size)_gmm(gmm_size).\n extension_architecture = f\"d{hp.name}_t{hp.network.n_tiers}_\" \\\n f\"l{'.'.join(map(str, hp.network.layers))}_\" \\\n f\"hd{hp.network.hidden_size}_gmm{hp.network.gmm_size}\"\n timestamp = f\"{datetime.now().strftime('%Y%m%d-%H%M%S')}\"\n\n # 3 Create directories for saving logs and model weights if they do not exist\n # 3.1 Create model weights directory for this run (the same directory will be used for different\n # runs of a model with same architecture and the difference will be in the file stored)\n hp[\"training\"][\"dir_chkpt\"] = hp.training.dir_chkpt + extension_architecture\n Path(hp.training.dir_chkpt).mkdir(parents=True, exist_ok=True)\n # 3.2 Create general log directory for this run (the same directory will be used for different\n # runs of a model with same architecture and the difference will be in the file stored)\n hp[\"logging\"][\"dir_log\"] = hp.logging.dir_log + extension_architecture\n Path(hp.logging.dir_log).mkdir(parents=True, exist_ok=True)\n\n # 4. Setup general logging (it will use the folder previously created and the filename will be:\n tier = str(args.tier) if args.tier is not None else 'ALL'\n filename = f\"{hp.logging.dir_log}/tier{tier}_{timestamp}\"\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s',\n handlers=[\n logging.FileHandler(filename=filename), # handler to save the log to a file\n logging.StreamHandler() # handler to output the log to the terminal\n ])\n logger = logging.getLogger()\n\n # 5. Show device that will be used for training: CPU or GPU\n logger.info(f\"Device for training: {hp.device}\")\n\n # 6. Start training of the model (or a single tier, depending on args)\n train_model(args, hp, extension_architecture, timestamp, logger)", "def train(project_id, python_module=None, package_uris=None, \n region=None, args=None, job_dir=None, python_version=None, \n runtime_version=None, master_image_uri=None, worker_image_uri=None, \n training_input=None, job_id_prefix=None, wait_interval=30):\n if not training_input:\n training_input = {}\n if python_module:\n training_input['pythonModule'] = python_module\n if package_uris:\n training_input['packageUris'] = package_uris\n if region:\n training_input['region'] = region\n if args:\n training_input['args'] = args\n if job_dir:\n training_input['jobDir'] = job_dir\n if python_version:\n training_input['pythonVersion'] = python_version\n if runtime_version:\n training_input['runtimeVersion'] = runtime_version\n if master_image_uri:\n if 'masterConfig' not in training_input:\n training_input['masterConfig'] = {}\n training_input['masterConfig']['imageUri'] = master_image_uri\n if worker_image_uri:\n if 'workerConfig' not in training_input:\n training_input['workerConfig'] = {}\n training_input['workerConfig']['imageUri'] = worker_image_uri\n job = {\n 'trainingInput': training_input\n }\n return create_job(project_id, job, job_id_prefix, wait_interval)", "def train(self):\n backend = self.config.backend.build(self.config, self.tmp_dir)\n backend.train(source_bundle_uri=self.config.source_bundle_uri)", "def train_loop(job_name,\n agent,\n save_dir,\n seed = 0,\n niter = 101,\n gamma = 0.995,\n gae_lambda = None,\n num_cpu = 1,\n sample_mode = 'trajectories',\n num_samples = None,\n save_freq = 10,\n evaluation_rollouts = None,\n plot_keys = ['stoc_pol_mean']):\n # Validate parameters.\n if not os.path.isdir(save_dir):\n raise ValueError('Save directory {} does not exist'.format(save_dir))\n if sample_mode not in ['trajectories', 'samples']:\n raise ValueError('Invalid sample mode: {}'.format(sample_mode))\n\n # Choose a default for num_samples if not specified.\n if num_samples is None:\n num_samples = 50 if sample_mode == 'trajectories' else 50000\n\n # Initialize the folders in the save directory.\n iterations_dir = os.path.join(save_dir, 'iterations')\n if not os.path.isdir(iterations_dir):\n os.mkdir(iterations_dir)\n logs_dir = os.path.join(save_dir, 'logs')\n if agent.save_logs and not os.path.isdir(logs_dir):\n os.mkdir(logs_dir)\n\n # Initialize results log file.\n results_path = os.path.join(save_dir, 'results.txt')\n open(results_path, 'w').close()\n\n # Initialize training variables.\n np.random.seed(seed)\n best_policy = copy.deepcopy(agent.policy)\n best_perf = -1e8\n train_curve = best_perf * np.ones(niter)\n mean_pol_perf = 0.0\n\n # Prefix tensorboard logs with the job name.\n # tb_logger = tensorboard.get_prefixed(job_name)\n tb_logger = []\n # print('Starting training for job: {}'.format(job_name))\n\n for i in range(niter):\n print('.' * 80 + '\\nITERATION : {}'.format(i))\n\n if train_curve[i-1] > best_perf:\n best_policy = copy.deepcopy(agent.policy)\n best_perf = train_curve[i-1]\n\n stats = agent.train_step(\n N=num_samples,\n sample_mode=sample_mode,\n gamma=gamma,\n gae_lambda=gae_lambda,\n num_cpu=num_cpu,\n )\n train_curve[i] = stats[0]\n\n if evaluation_rollouts is not None and evaluation_rollouts > 0:\n print('Performing evaluation rollouts ........')\n mean_pol_perf = _evaluation_rollout(agent, evaluation_rollouts, num_cpu)\n if agent.save_logs:\n agent.logger.log_kv('eval_score', mean_pol_perf)\n\n if i % save_freq == 0 and i > 0:\n _save_policy(agent.policy, 'policy_{}'.format(i), iterations_dir)\n _save_policy(agent.baseline, 'baseline_{}'.format(i), iterations_dir)\n _save_policy(best_policy, 'best_policy', iterations_dir)\n if agent.save_logs:\n agent.logger.save_log(logs_dir)\n make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc=logs_dir)\n\n _log_performance(i, train_curve[i], mean_pol_perf, best_perf,\n results_path, tb_logger)\n if agent.save_logs:\n print_data = sorted(filter(lambda v: np.asarray(v[1]).size == 1,\n agent.logger.get_current_log().items()))\n print(tabulate(print_data))\n\n # Save the final best policy.\n _save_policy(best_policy, 'best_policy', iterations_dir)\n if agent.save_logs:\n agent.logger.save_log(logs_dir)\n make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc=logs_dir)", "def train_model(args: argparse.Namespace, hp: HParams, extension_architecture: str, timestamp: str,\n logger: logging.Logger) -> None:\n # 1. Check if we have to train a single tier or a complete model (with several tiers)\n if args.tier is not None:\n # 1.1 Argument tier was defined. Only that tier will be trained.\n logging.info(f\"Training single tier of the model: Tier {args.tier}\")\n\n # 2. Setup tensorboard logging\n # 2.1 Create tensorboard logs directory (tensorboard requires a different folder for each\n # run of the model, in this case every run to train a tier) so we add the extension of the\n # network's architecture of this run and the timestamp to identify it completely\n tensorboard_dir = f\"{hp.logging.dir_log_tensorboard}{extension_architecture}_\" \\\n f\"{timestamp}_tier{args.tier}\"\n Path(tensorboard_dir).mkdir(parents=True, exist_ok=True)\n # 2.2 Create tensorboard writer\n tensorboardwriter = TensorboardWriter(hp, tensorboard_dir)\n\n # 3. Start training of the tier\n train_tier(args, hp, args.tier, extension_architecture, timestamp, tensorboardwriter,\n logger)\n\n tensorboardwriter.close()\n\n else:\n # 1.2 Argument tier was not defined. Train all tiers of the model.\n logging.info(\"Training all tiers of the model\")\n\n for tier in range(1, hp.network.n_tiers + 1):\n # 2. Setup tensorboard logging (one for every tier)\n # 2.1 Create tensorboard logs directory (tensorboard requires a different folder for\n # each run of the model, in this case every run to train a tier) so we add the extension\n # of the network's architecture of this run and the timestamp to identify it completely\n tensorboard_dir = hp.logging.dir_log_tensorboard + extension_architecture \\\n + f\"_{timestamp}_tier{tier}\"\n Path(tensorboard_dir).mkdir(parents=True, exist_ok=True)\n # 2.2 Create tensorboard writer\n tensorboardwriter = TensorboardWriter(hp, tensorboard_dir)\n\n # 3. Start training of the tier\n train_tier(args, hp, tier, extension_architecture, timestamp, tensorboardwriter, logger)\n\n tensorboardwriter.close()\n del tensorboardwriter", "def start_training(self):\n if self.task_env is None:\n rospy.logfatal(\"No task environment found for training.\")\n if self.agent is None:\n rospy.logfatal(\"No agent found for training.\")\n self.agent.start_training()", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)" ]
[ "0.80069077", "0.7198703", "0.71838933", "0.6437247", "0.63805366", "0.629484", "0.62795806", "0.6278729", "0.62004155", "0.6189536", "0.6156923", "0.6135809", "0.61020046", "0.6097955", "0.60901624", "0.60863006", "0.60771877", "0.6063894", "0.6054576", "0.6025766", "0.60214263", "0.6018209", "0.60086375", "0.6008442", "0.59942216", "0.5992626", "0.5988499", "0.5973047", "0.59694093", "0.5961963" ]
0.7579028
1
Takes a positive list of integers along with a target and returns a subset of
def diophantine_subset_sum(number_list, target, time_limit=TIME_LIMIT): started_at = time.time() # Sort numbers list. number_list = sorted(number_list) # Build sums list. sums_list = [number_list[0]] for n in range(1, len(number_list)): sums_list.append(number_list[n] + sums_list[n-1]) # Sanity check target. if number_list[0] > target or target > sums_list[-1]: return [] # Add first subset to subset stack. subset_stack = [(len(number_list)-1, target, ())] # Process subset stack. while subset_stack: # Enforce time constraint. runtime = time.time() - started_at if runtime > time_limit: raise SummerTimeoutError('No solution found in %d seconds.' % (time_limit)) # Pop first subset off queue offset, subtarget, subset = subset_stack.pop() # Keeps only sums less than subset target. sumlist_offset = 0 while sums_list[sumlist_offset] < subtarget and sumlist_offset < len(sums_list)-1: sumlist_offset += 1 # If next sums list value matches subset target, we have a solution. if sums_list[sumlist_offset] == subtarget: return subset + tuple(number_list[0:sumlist_offset+1]) # Keep only numbers in list less than subset target. while number_list[offset] > subtarget and offset > 0: offset = offset - 1 # If next number in list matches subset target, we have a solution. if number_list[offset] == subtarget: return subset + tuple([number_list[offset]]) # Add subsets to queue for any number list values falling between sums list # offset and numbers list offset step = (sumlist_offset <= offset) and 1 or -1 for new_offset in range(sumlist_offset, offset+step, step): new_subset = subset + tuple([number_list[new_offset]]) new_subtarget = subtarget - number_list[new_offset] if number_list[0] > new_subtarget: break subset_stack.append((new_offset-1, new_subtarget, new_subset)) # Solution not found return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _select_sublist(lst, target):\n ln = len(lst)\n\n # Generate an array that indicates the decision bit for each element in the list.\n # If an element is deterministically true, then no decision bit is needed.\n choice_bits = [None] * ln\n x = 0\n for i in range(0, ln):\n if lst[i][1] not in (target.TRUE, target.FALSE):\n choice_bits[i] = x\n x += 1\n\n # We have 2^x distinct lists. Each can be represented as a number between 0 and 2^x-1=n.\n n = (1 << x) - 1\n\n while n >= 0:\n # Generate the list of positive values and node identifiers\n # noinspection PyTypeChecker\n sublist = [lst[i] for i in range(0, ln)\n if (choice_bits[i] is None and lst[i][1] == target.TRUE) or\n (choice_bits[i] is not None and n & 1 << choice_bits[i])]\n # Generate the list of negative node identifiers\n # noinspection PyTypeChecker\n sublist_no = tuple([target.negate(lst[i][1]) for i in range(0, ln)\n if (choice_bits[i] is None and lst[i][1] == target.FALSE) or (\n choice_bits[i] is not None and not n & 1 << choice_bits[i])])\n if sublist:\n terms, nodes = zip(*sublist)\n else:\n # Empty list.\n terms, nodes = (), ()\n yield terms, nodes + sublist_no + (0,)\n n -= 1", "def _random_subset(seq,m):\n targets=set()\n while len(targets)<m:\n x=random.choice(seq)\n targets.add(x)\n return targets", "def _select_targets(y, min_threshold=10, max_threshold=None):\n c = collections.Counter(y)\n y_sel = []\n for y_id in c:\n if c[y_id] > min_threshold:\n if max_threshold:\n if c[y_id] < max_threshold:\n y_sel.append(y_id)\n else:\n y_sel.append(y_id)\n return y_sel", "def _random_subset(seq,m):\n targets=random.sample(seq,m)\n return targets", "def select_random_subset(self, input_list):\n import random\n\n random_inp_list = []\n if self.params.advanced.random_sample.number == 0:\n if len(input_list) <= 5:\n random_sample_number = len(input_list)\n elif len(input_list) <= 50:\n random_sample_number = 5\n else:\n random_sample_number = int(len(input_list) * 0.1)\n else:\n random_sample_number = self.params.advanced.random_sample.number\n\n for i in range(random_sample_number):\n random_number = random.randrange(0, len(input_list))\n if input_list[random_number] in random_inp_list:\n while input_list[random_number] in random_inp_list:\n random_number = random.randrange(0, len(input_list))\n random_inp_list.append(input_list[random_number])\n else:\n random_inp_list.append(input_list[random_number])\n\n return random_inp_list", "def subrange(x, onset=None, offset=None):\n return (y[(y >= onset) & ~(y > (offset))] for y in x)", "def _random_subset(self, pa_nodes, seq, m, rng):\n targets = set()\n while len(targets) < m:\n x = rng.choice(seq)\n # if x in pa_nodes:\n if pa_nodes.get(x, False):\n targets.add(x)\n else:\n pass\n return targets", "def _random_subset(seq, m, seed):\n targets = set()\n random.seed(seed)\n\n while len(targets) < m:\n x = random.choice(seq)\n targets.add(x)\n return targets", "def searchRange4(self, nums: List[int], target: int) -> List[int]:\n def bisearch_l() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] >= target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n def bisearch_r() -> int:\n i = -1\n l, r = 0, len(nums) - 1\n while l <= r:\n m = (l + r) // 2\n if nums[m] > target:\n r = m - 1\n else:\n l = m + 1\n \n if nums[m] == target:\n i = m\n \n return i\n\n return [bisearch_l(), bisearch_r()]", "def subset_number_to_subset(\n set_size: int,\n subset_size: int,\n subset_number: int,\n order_matters: bool = False,\n can_reselect: bool = False\n) -> List[int]:\n # Sets can't have negative size\n if set_size < 0:\n raise ArithmeticError(f\"Can't have a set of {set_size} items\")\n if subset_size < 0:\n raise ArithmeticError(f\"Can't have a subset of {subset_size} items\")\n\n # Start with the empty set\n subset = []\n\n # The empty set is the only possible subset of size 0, so return it\n if subset_size == 0:\n # Subset number should be 0 for a subset size of 0\n if subset_number != 0:\n raise ArithmeticError(\n f\"0 is the only valid subset number for subsets of size 0, got {subset_number}\"\n )\n\n return subset\n\n # If there are no items to select from, the empty set is the only possible selection,\n # so any subsets of greater size are impossible\n if set_size == 0:\n raise ArithmeticError(\n f\"Can't select a non-empty subset (subset size = {subset_size}) from the empty set\"\n )\n\n # Special case for order-dependent\n if order_matters:\n # Ordered with reselection is shift-encoded, so simply shift-decode\n if can_reselect:\n while len(subset) < subset_size:\n subset.append(subset_number % set_size)\n subset_number //= set_size\n\n # Without reselection, the items available for selection reduces by 1 at each iteration\n else:\n factor = set_size - subset_size + 1\n while len(subset) < subset_size:\n next = subset_number % factor\n subset_number //= factor\n for index in range(len(subset)):\n if subset[index] >= next:\n subset[index] += 1\n subset.insert(0, next)\n factor += 1\n\n return subset\n\n # If reselect is allowed, we are expecting the equivalent binomial representation of the selection\n if can_reselect:\n set_size += subset_size - 1\n subset_size = set_size - subset_size\n\n # Decode the arithmetic encoding of the binomial representation\n num_subsets = number_of_subsets(set_size - 1, subset_size)\n k = subset_size\n for n in reversed(range(set_size)):\n if subset_number >= num_subsets:\n subset_number -= num_subsets\n subset.append(n)\n if len(subset) == subset_size:\n break\n num_subsets = num_subsets * k // n\n k -= 1\n elif n != 0:\n num_subsets = num_subsets * (n - k) // n\n\n # Convert the binomial representation back to the original multinomial one if reselection was enabled\n if can_reselect:\n subset.sort()\n subset_size = set_size - subset_size\n set_size -= subset_size - 1\n counts = {}\n total = 0\n for i in range(set_size - 1):\n last = -1 if i == 0 else subset[i - 1]\n count = subset[i] - last - 1\n total += count\n if count != 0:\n counts[i] = count\n if total < subset_size:\n counts[set_size - 1] = subset_size - total\n subset = []\n for value, count in counts.items():\n for i in range(count):\n subset.append(value)\n\n return subset", "def searchRange(self, nums: List[int], target: int) -> List[int]:\n if not nums:\n return [-1, -1]\n n = len(nums)\n start, end = 0, n - 1\n while start <= end:\n mid = start + (end - start + 1 + 1)//2 - 1\n left = right = -1\n if nums[mid] == target:\n left = right = mid\n elif nums[start] == target:\n left = right = start\n elif nums[end] == target:\n left = right = end\n\n if 0 <= left and left < n:\n has_left = left - 1 >= 0 and nums[left-1] == target\n has_right = right + 1 < n and nums[right+1] == target\n while has_left or has_right:\n if has_left:\n left -= 1\n if has_right:\n right += 1\n has_left = left - 1 >= 0 and nums[left-1] == target\n has_right = right + 1 < n and nums[right+1] == target\n\n return [left, right]\n\n elif nums[mid] > target:\n # [0, mid - 1]\n end = mid - 1\n else:\n # [mid + 1, n]\n start = mid + 1\n\n return [-1, -1]", "def stratified_subset(features, targets, examples_per_class):\n idxs = np.array([False] * len(features))\n for target in np.unique(targets):\n idxs[np.where(targets == target)[0][:examples_per_class]] = True\n return features[idxs], targets[idxs]", "def return_indices(nums, target):\n indices = []\n i = 0\n number_found = False\n while not number_found:\n my_target = nums[i]\n \n for j in range(i+1,len(nums)):\n my_target += nums[j]\n if my_target == target:\n number_found = True\n indices = [i, j]\n break\n my_target = nums[i]\n \n i+=1\n return indices", "def list_sum_range_finder(lst, target):\n for n in range(2, len(lst)):\n for sublist in zip(*[lst[x:] for x in range(n)]):\n if sum(sublist) == target:\n return min(sublist) + max(sublist)", "def find_value(lists, target):\n loc = []\n l = len(lists)\n for i in range(0, l, 1):\n if(lists[i] == target):\n loc.append(i)\n else:\n continue\n return loc", "def coding_problem_42(numbers, target):\n if target == 0:\n return []\n\n valid_numbers = [n for n in numbers if 0 < n <= target]\n for number in sorted(valid_numbers, reverse=True):\n\n remaining_numbers = copy(valid_numbers)\n remaining_numbers.remove(number)\n partial_sum = coding_problem_42(remaining_numbers, target - number)\n if partial_sum is not None:\n return [number] + partial_sum\n\n return None", "def get_top_k_indexes_of_list(target_list, k, is_max=True, min_value=None):\n indexes = sorted(range(len(target_list)), key=lambda i: target_list[i], reverse=is_max)[:k]\n result = list()\n if min_value is not None:\n for index in indexes:\n if target_list[index] <= min_value:\n break\n result.append(index)\n else:\n result = indexes\n return result", "def discard_none_targets(dataset):\r\n indices = []\r\n for (ii,sample) in enumerate(dataset):\r\n target = sample[1]\r\n if target is not None:\r\n indices.append(ii)\r\n\r\n return Subset(dataset,indices)", "def sub(x: list[int], start: int, end: int) -> list[int]:\n result: list[int] = list()\n i: int = 0\n if end > len(x):\n end = len(x)\n if len(x) == 0: \n return result\n else: \n if start > len(x):\n return result\n else: \n if end <= 0:\n return result\n while i < end:\n if i >= start:\n result.append(x[i])\n else: \n if start < 0:\n result.append(x[i])\n i += 1\n return result", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n d = {}\n for i, n in enumerate(nums):\n d[n]=i\n \n for i, n in enumerate(nums):\n m = target - n\n if m in d and d[m] != i:\n return [i,d[m]]\n return []", "def subset(arr, start, end):\n return [[row_data for row_data in row[start[1]:end[1]]] for row in arr[start[0]:end[0]]]", "def recursion(self, size, target, start):\n # Base case:\n if target == 0 and size == 0:\n return [[]]\n \n result = []\n for i in range(start, 10):\n if target - i >= 0 and size - 1 >= 0:\n all_but_i = self.recursion(size - 1, target - i, i + 1)\n # 1. all the combinations in `all_but_i` has size `size` - 1\n # 2. and sums up to `target` - `i`,\n # 3. and the minimum number in each combination is `i` + 1\n for combination in all_but_i:\n result.append([i] + combination)\n # inductive assumption maintained:\n \n # Adding back the number `i`,\n # 1. all the combinations in `result` has size `size`,\n # 2. and sums up to `target`\n # 3. and the minimum number in each combination is `i`\n return result", "def select_for_target(self, target):\n\n return [x for x in self.objects if x.target == target]", "def compute_random_subset(values, num_values):\n shuffled = values[:]\n random.shuffle(shuffled)\n return shuffled[:num_values]", "def adapt_target(self, target):\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - self.buggy_offset\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs", "def prepare_target_list(how_many, target_value):\n target = []\n for one in range(how_many):\n target.append([target_value])\n target = np.array(target)\n return target", "def list_validate_number(lst, target):\n elements = set(lst)\n for element in elements:\n if target - element in elements:\n return True\n return False", "def subset(mylist,mybool):\n myarray = np.array(mylist)\n return(np.squeeze(myarray.take(np.where(mybool),axis=0)))", "def sub(z: list[int], x: int, y: int) -> list[int]:\n i: int = 0\n lists = list()\n end: int = y\n start: int = x\n if len(z) == 0 or end <= 0 or len(z) < start:\n return lists\n elif len(z) < end: \n while len(z) > start:\n lists.append(z[start])\n start += 1 \n return lists\n elif start >= 0: \n while end > start: \n lists.append(z[start])\n start += 1 \n return lists\n elif start < 0: \n while end > i:\n lists.append(z[i])\n i += 1\n return lists\n return z", "def get_elements_from_list(target_list, indexes):\n elements = [target_list[i] for i in indexes]\n return elements" ]
[ "0.7094783", "0.65067834", "0.6460729", "0.63038445", "0.6270336", "0.62450624", "0.62270397", "0.61682236", "0.61059326", "0.6029637", "0.6015087", "0.5967162", "0.5875812", "0.5808211", "0.5807387", "0.57974637", "0.5757506", "0.5754575", "0.5735689", "0.5707086", "0.5698021", "0.5663836", "0.5663737", "0.5657806", "0.563444", "0.56282127", "0.56242454", "0.5615647", "0.56149155", "0.5608319" ]
0.693455
1
Convert unit conversion with custom UnitRegistry
def test_convert_unit_with_custom_registry(test_df): df = get_units_test_df(test_df).rename(unit={"EJ/yr": "foo"}) # check that conversion fails with application registry with pytest.raises(pint.UndefinedUnitError): df.convert_unit("foo", "baz") # define a custom unit registry ureg = pint.UnitRegistry() ureg.define("baz = [custom]") ureg.define("foo = 3 * baz") exp = pd.Series([1.0, 6.0, 1.5, 9, 6, 21], name="value") assert_converted_units(df, "foo", "baz", exp, registry=ureg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _convert_unit(self, unit):\n if unit in self.units:\n return self.units[unit]\n elif unit in unit_map:\n return unit_map[unit]\n else:\n raise SBMLError('Unit not recognized: ' + str(unit))", "def convertUnit(*args, fromUnit: AnyStr=\"\", toUnit: AnyStr=\"\", **kwargs)->float:\n pass", "def convert(value, units: UnitLike, registry: unyt.UnitRegistry = None):\n return process_unit_input(value, units, convert=True, registry=registry).v", "def useUnits():", "def to_unit(self, unit):\n unit = _find_unit(unit)\n self.value = _convert_value(self.value, self.unit, unit)\n self.unit = unit", "def convert(x, unit1, unit2):\r\n return conversions[unit1][unit2](x)", "def SBMLUnitsConverter_init():\n return _libsbml.SBMLUnitsConverter_init()", "def convert_units(self, units):\n self.unit_array = self.unit_array.to(units)", "def convert_units(data, units):\n # Build the dictionary of units conversions\n convert = {'m' : [1.0, 0., 'm'], \n 'meter' : [1.0, 0., 'm'], \n 'deg C' : [1.0, 273.15, 'K'], \n 'Celsius' : [1.0, 273.15, 'K'], \n 'K' : [1.0, 0., 'K'],\n 'db' : [1.e4, 101325., 'Pa'], \n 'Pa' : [1.0, 0., 'Pa'],\n 'mg/m^3': [1.e-6, 0., 'kg/m^3'], \n 'S/m': [1.0, 0., 'S/m'],\n 'mS/m' : [1.e-3, 0., 'S/m'],\n 'psu': [1.0, 0., 'psu'], \n 'salinity': [1.0, 0., 'psu'], \n 'kg/m^3': [1.0, 0., 'kg/m^3'], \n 'kilogram meter-3': [1.0, 0., 'kg/m^3'], \n 'm/s': [1.0, 0., 'm/s'], \n 'mg/l': [1.e-3, 0., 'kg/m^3'],\n 'meter second-1' : [1.0, 0., 'm/s'],\n 'm.s-1' : [1.0, 0., 'm/s'],\n 'pH units' : [1.0, 0., 'pH units'],\n 'MPa' : [1.e6, 0., 'Pa'],\n '--' : [1.0, 0., '--'],\n 'mD' : [9.869233e-16, 0., 'm^2'],\n 'um' : [1.e-6, 0., 'm'],\n 'm/s 1e-9' : [1.e-9, 0., 'm/s'],\n 'm/s 1e-7' : [1.e-7, 0., 'm/s'],\n 'wt.%' : [10., 0., 'psu'],\n '10^-15 m^2' : [1.e-15, 0., 'm^2'],\n 'm^2' : [1., 0., 'm^2'],\n 'kg/m^2/year' : [3.168808781402895e-08, 0., 'kg/m^2/s'] \n } \n \n # Make sure the data are a numpy array and the units are a list\n if isinstance(data, float) or isinstance(data, int):\n data = np.array([data])\n if isinstance(data, list):\n data = np.array(data)\n if isinstance(units, str) or isinstance(units, unicode):\n units = [units]\n if units == None:\n units = ['']\n \n # Make sure you can slice through the columns: must be two-dimensional\n sh = data.shape\n data = np.atleast_2d(data)\n \n # Allow conversion of a row of data if all of the same unit\n if len(units) == 1 and data.shape[1] > 1:\n data = data.transpose()\n \n # Create an emtpy array to hold the output\n out_data = np.zeros(data.shape)\n out_units = []\n \n # Convert the units\n for i in range(len(units)):\n try:\n out_data[:,i] = data[:,i] * convert[units[i]][0] + \\\n convert[units[i]][1]\n out_units += [convert[units[i]][2]]\n except KeyError:\n print('Do not know how to convert %s to mks units' % units[i])\n print('Continuing without converting these units...')\n out_data[:,i] = data[:,i]\n out_units += units[i]\n \n # Return the converted data in the original shape\n out_data = np.reshape(out_data, sh, 'C')\n return (out_data, out_units)", "def fits_to_units(unit_str):\n unit_lookup = {\n 'meters': 'm',\n 'meter': 'm',\n 'degrees': 'deg',\n 'degree': 'deg',\n 'hz': 'Hz',\n 'hertz': 'Hz',\n 'second': 's',\n 'sec': 's',\n 'secs': 's',\n 'days': 'd',\n 'day': 'd',\n 'steradians': 'sr',\n 'steradian': 'sr',\n 'radians': 'rad',\n 'radian': 'rad',\n 'jy': 'Jy',\n 'au': 'AU',\n }\n\n try:\n new_units = \"\"\n\n if unit_str is None:\n unit_str = ''\n unit_str = unit_str.lower()\n unit_list = unit_str.split(\"/\")\n\n for uu in unit_list:\n if uu.endswith(\"s\") and len(uu) > 1:\n uu = uu[:-1]\n corrected_unit = unit_lookup.get(uu, uu)\n new_units += corrected_unit\n new_units += \" / \"\n new_units = new_units[:-3]\n unit = Unit(new_units)\n return unit\n\n except ValueError:\n warnings.warn(\"Unknown unit: %s\" % new_units, UnitWarning)\n return UnrecognizedUnit(unit_str)", "def test_convert_compatible_units(self):\n result = convert_units(self.arr, 'degC')\n expected_data = np.array([[-273.15, -272.15], [-271.15, -270.15]])\n expected_units = cf_units.Unit('degC')\n self.assertEquals(result.units, expected_units)\n self.assertArrayEqual(result.data, expected_data)", "def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5", "def reload_unit_registry():\n\n import time\n t_start = time.time()\n\n global _unit_registry\n\n _unit_registry = None\n\n reg = pint.UnitRegistry()\n\n # Define some \"standard\" additional units\n reg.define('piece = 1')\n reg.define('each = 1 = ea')\n reg.define('dozen = 12 = dz')\n reg.define('hundred = 100')\n reg.define('thousand = 1000')\n\n # Allow for custom units to be defined in the database\n try:\n from common.models import CustomUnit\n\n for cu in CustomUnit.objects.all():\n try:\n reg.define(cu.fmt_string())\n except Exception as e:\n logger.error(f'Failed to load custom unit: {cu.fmt_string()} - {e}')\n\n # Once custom units are loaded, save registry\n _unit_registry = reg\n\n except Exception:\n # Database is not ready, or CustomUnit model is not available\n pass\n\n dt = time.time() - t_start\n logger.debug(f'Loaded unit registry in {dt:.3f}s')\n\n return reg", "def assign_unit(self):\n self.units = {}\n for unit in RADIAL_UNITS:\n if unit.REPR == \"2th_deg\":\n self.units[unit] = self.tth_deg\n elif unit.REPR == \"2th_rad\":\n self.units[unit] = self.tth_rad\n elif unit.REPR == \"q_nm^-1\":\n self.units[unit] = self.q_nm\n elif unit.REPR == \"q_A^-1\":\n self.units[unit] = self.q_A\n elif unit.REPR == \"r_mm\":\n self.units[unit] = self.r_mm\n else:\n logger.warning(\"Unit unknown to GUI %s\" % unit)", "def unit_registry_to_human_readable(unit_registry):\n if unit_registry is None:\n return None\n new_registry = {}\n integer_one = 1\n for k in SI_base_registry:\n if unit_registry[k] is integer_one:\n new_registry[k] = 1, 1\n else:\n dim_list = list(unit_registry[k].dimensionality)\n if len(dim_list) != 1:\n raise TypeError(\"Compound units not allowed: {}\".format(dim_list))\n u_symbol = dim_list[0].u_symbol\n new_registry[k] = float(unit_registry[k]), u_symbol\n return new_registry", "def convert(self):\n return _libsbml.SBMLUnitsConverter_convert(self)", "def convert_units(value, from_unit, to_unit, **args):\n\n # Check if units can be converted\n if not can_convert(from_unit, to_unit):\n return '[!] Units cannot be converted\\n'\n\n # Extract the numeric value from the string\n decimal_places = 2 # Set the default value of precision\n if \".\" in str(value):\n decimal_places = len(str(value)[str(value).index('.') + 1:])\n\n # Return the value if units are the same\n if from_unit == to_unit:\n return str(value) + \" \" + get_si(to_unit)\n\n responses = [\n check_time(value, from_unit, to_unit, decimal_places), # Time units\n check_metric_imperial(\n value,\n from_unit,\n to_unit,\n decimal_places), # Metric and Imperial units\n check_digital_storage(\n value,\n from_unit,\n to_unit,\n decimal_places\n ) # Digital storage units\n ]\n\n for response in responses:\n if response:\n return response\n\n # actually convert the units\n try:\n return str(round(convert(from_unit, to_unit, float(value), **args), decimal_places)) + \" \" + get_si(to_unit)\n except RequireAdditionalParamError as e:\n additional_unit = input(\"\\n[*] Enter an additional unit (choose between \" + str(e.additional_params) + \"): \")\n additional_value = float(input(\"\\n[*] Enter the value: \"))\n return convert_units(value, from_unit, to_unit, **{additional_unit: additional_value})\n except ConversionError as e:\n print(e.reason)", "def spice_unit_convert(valuet, restrict=[]):\n # valuet is a tuple of (unit, value), where \"value\" is numeric\n # and \"unit\" is a string. \"restrict\" may be used to require that\n # the value be of a specific class like \"time\" or \"resistance\". \n\n # Recursive handling of '/' and multiplicatioon dot in expressions\n if '/' in valuet[0]:\n parts = valuet[0].split('/', 1)\n result = numeric(spice_unit_convert([parts[0], valuet[1]], restrict))\n result /= numeric(spice_unit_convert([parts[1], \"1.0\"], restrict))\n return str(result)\n\n if '\\u22c5' in valuet[0]:\t# multiplication dot\n parts = valuet[0].split('\\u22c5')\n result = numeric(spice_unit_convert([parts[0], valuet[1]], restrict))\n result *= numeric(spice_unit_convert([parts[1], \"1.0\"], restrict))\n return str(result)\n\n if '\\u00b2' in valuet[0]:\t# squared\n part = valuet[0].split('\\u00b2')[0]\n result = numeric(spice_unit_unconvert([part, valuet[1]], restrict))\n result *= numeric(spice_unit_unconvert([part, \"1.0\"], restrict))\n return str(result)\n\n if valuet[0] == \"\":\t\t# null case, no units\n return valuet[1]\n\n for unitrec in unittypes:\t# case of no prefix\n if re.match('^' + unitrec + '$', valuet[0]):\n if restrict:\n if unittypes[unitrec] == restrict.lower():\n return valuet[1]\n else:\n return valuet[1]\n\n for prerec in prefixtypes:\n for unitrec in unittypes:\n if re.match('^' + prerec + unitrec + '$', valuet[0]):\n if restrict:\n if unittypes[unitrec] == restrict.lower():\n newvalue = numeric(valuet[1]) * prefixtypes[prerec]\n return str(newvalue)\n else:\n newvalue = numeric(valuet[1]) * prefixtypes[prerec]\n return str(newvalue)\n\n # Check for \"%\", which can apply to anything.\n if valuet[0][0] == '%':\n newvalue = numeric(valuet[1]) * 0.01\n return str(newvalue)\n \n if restrict:\n raise ValueError('units ' + valuet[0] + ' cannot be parsed as ' + restrict.lower())\n else:\n # raise ValueError('units ' + valuet[0] + ' cannot be parsed')\n # (Assume value is not in SI units and will be passed back as-is)\n return valuet[1]", "def convert_units(unt, origunits):\n if unt[0:3] == origunits[0:3] | unt[0:3] == 'ori':\n units = origunits\n convf = 1\n else:\n if 'fee' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'met':\n units = 'feet'\n convf = 3.2808399\n else:\n units = origunits\n convf = 1\n else:\n if 'met' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'fee':\n units = 'meters'\n convf = 0.3048\n else:\n units = origunits\n convf = 1\n else:\n if 'm/s' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'kno':\n units = 'meters/sec'\n convf = 0.51444444\n else:\n units = origunits\n convf = 1\n else:\n if 'kno' == unt[(((0:3 -1) -1) -1)]:\n if origunits[0:3] == 'm/s':\n units = 'knots'\n convf = 1.9438445\n else:\n units = origunits\n convf = 1\n else:\n error('Unknown units')\n #\n return units, convf", "def init():\n return _libsbml.SBMLUnitsConverter_init()", "def test_unit_conversion(self):\n self.cube_uv_down.convert_units(\"kW m-2\")\n scale_factor = 1.0\n expected = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=0.1\n )\n result = calculate_uv_index(self.cube_uv_down, scale_factor)\n self.assertArrayEqual(result.data, expected)", "def convert_unit(self, time_unit):\r\n\r\n self.time_unit = time_unit\r\n self._conversion_factor = time_unit_conversion[time_unit]", "def unit2internal(src_unit: Union[str, float]):\n return _parse_unit(src_unit, conversion_factor=_conversion_factor_internal)", "def units_to_fits(unit):\n if unit is None:\n unit = Unit('')\n return unit.to_string(\"fits\").upper()", "def get_converted_si_unit():\n units = request.args.get('units')\n response = ConvertUnit(units).convert()\n return jsonify(response)", "def to(self, new_unit, **kwargs):\n new_unit = u.Unit(new_unit)\n return self * (self.unit.to(new_unit, **kwargs) * new_unit / self.unit)", "def unit_converter(val, from_u, to_u):\n\tconverter = {'b':0, 'k':1, 'm':2, 'g':3, 't':4}\n\tif converter[from_u] < converter[to_u]:\n\t\tval = float(val)\n\t\tfor _ in range(converter[to_u] - converter[from_u]):\n\t\t\tval = val/1024\n\telse:\n\t\tfor _ in range(converter[from_u] - converter[to_u]):\n\t\t\tval = val * 1024\n\t\t\t\n\treturn val", "def convert_units(src_unit: Union[str, float], tgt_unit: Union[str, float]):\n return _parse_unit(src_unit) / _parse_unit(tgt_unit)", "def _conversion_factor_internal(unit: str):\n return globals()[unit]", "def convert(self, value, units, newunits):\n return value * self._units[units] / self._units[newunits]" ]
[ "0.72558576", "0.7242834", "0.69972605", "0.67883354", "0.6740144", "0.6727505", "0.6723467", "0.67014116", "0.66655236", "0.6658775", "0.6603207", "0.65299666", "0.64555025", "0.64369696", "0.6419799", "0.64174867", "0.64120305", "0.6399249", "0.63895", "0.6342039", "0.6332659", "0.628787", "0.62832105", "0.62518567", "0.6233634", "0.62296045", "0.6228601", "0.62142986", "0.61970496", "0.61949044" ]
0.72553796
1
insert a column to tb. if called, all operation related to db must be fitted.
def insert_column(self, tb_name, column_name, data_type): sentences = f""" ALTER TABLE {tb_name} ADD COLUMN {column_name} {data_type}; """ print(sentences) self.commit(sentences)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _addColumn(self, table, column, init_data):\n\t\tcommand = \"ALTER TABLE \" + table + \" ADD COLUMN \" + str(column) + \" \" + getSQLiteType(init_data)\n\t\ttry:\n\t\t\tself._run_command(command)\n\t\texcept sqlite3.OperationalError:\n\t\t\tprint(\"Column \" + str(column) + \" already exists!\")", "def insert_column(self):\n try:\n lbl_name = Tk.Label(self, text='Enter a column name: ')\n lbl_name.grid(row=0, column=0, sticky=Tk.W+Tk.E)\n ent_name = Tk.Entry(self)\n ent_name.grid(row=0, column=1, sticky=Tk.W+Tk.E)\n lbl_type = Tk.Label(self, text='Enter a column type: ')\n lbl_type.grid(row=1, column=0, sticky=Tk.W+Tk.E)\n ent_type = Tk.Entry(self)\n ent_type.grid(row=1, column=1, sticky=Tk.W+Tk.E)\n\n def _insert_column():\n c_name = ent_name.get()\n c_type = ent_type.get()\n self.parent.insert_column(self.parent.table, c_name, c_type)\n self.destroy()\n self.parent.populate_display()\n b_ins = Tk.Button(self,\n text='Insert Column',\n command=_insert_column)\\\n .grid(row=2, column=1, sticky=Tk.W+Tk.E)\n except Exception, ex:\n logging.error(ex)\n traceback.print_exc()", "def add_column(self, tap_column):\r\n self.__columns.append(tap_column)", "def add_column(self, fieldname, column, align=..., valign=...):\n ...", "def insert_column(self, column_name, column_type, table, params=None, overwrite=False, after_col=None, verbose=True):\n \n assert(self.connected)\n \n try: assert(self.check_table(table, verbose=False)) \n except AssertionError: raise TableNotFoundError\n \n \n \n if self.check_column(column_name, table, verbose=False): \n \n if not overwrite:\n \n if verbose: print(\"The column '{0}' already exists in the table '{1}'.\".format(column_name, table))\n return False\n \n else:\n \n if verbose: \n print(\"The column '{0}' already exists in the table '{1}'.\".format(column_name, table))\n \n self.delete_column(column_name,table,verbose=True)\n \n self._insert_column(column_name, column_type, table, params, overwrite, after_col)\n \n else:\n \n self._insert_column(column_name, column_type, table, params, overwrite, after_col)\n \n \n if verbose: print(\"Column '{0}' added to the table '{1}' successfully.\".format(column_name, table))\n \n return True", "def _insert_column(self, column_name, column_type, table, params=None, overwrite=False, after_col=None, verbose=True):\n \n not_null = ''\n auto_increment = ''\n \n if params != None and 'not_null' in params:\n not_null = 'NOT NULL'\n \n \n if params != None and 'auto_increment' in params:\n auto_increment = \"AUTO_INCREMENT\"\n \n \n ADD_COLUMN_COMMAND = \"ALTER TABLE {0} ADD {1} {2} {3} {4}\".format(table, column_name, column_type, not_null, auto_increment)\n \n if (after_col != None and type(after_col) is str):\n ADD_COLUMN_COMMAND += \" AFTER {0} \".format(after_col)\n \n \n self.cursor.execute(ADD_COLUMN_COMMAND)\n \n if verbose: \n print(\"Adding the column '{0}' to the table '{1}'...\".format(column_name, table))\n print(\"\\t\" + ADD_COLUMN_COMMAND) \n \n \n if params != None and 'foreign_key' in params:\n \n if 'references' not in params:\n raise InvalidParameterError\n \n referenced_table = params['references'].split('(')[0]\n referenced_column = params['references'].split('(')[1][:-1] \n \n \n if (not self.check_table(referenced_table, verbose=False)):\n raise(TableNotFoundError)\n \n \n if (not self.check_column(referenced_column, referenced_table, verbose=False)):\n raise(ColumnNotFoundError)\n \n \n ADD_FOREIGN_KEY_COMMAND = \"ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})\".format(table, column_name, referenced_table, referenced_column)\n \n \n if verbose: \n print(\"\\t\" + ADD_FOREIGN_KEY_COMMAND) \n \n self.cursor.execute(ADD_FOREIGN_KEY_COMMAND)", "def addColumn(self, *column):\n self.insertColumn(self._width, *column)", "def add_column_into_source(self, tap_type, table, new_column):\n run_query_method = getattr(self, f'run_query_tap_{tap_type}')\n run_query_method(\n f'ALTER TABLE {table} ADD {new_column[\"name\"]} int'\n )\n run_query_method(\n f'UPDATE {table} set {new_column[\"name\"]}={new_column[\"value\"]} where 1=1'\n )", "def create_column(self, new_column, dtype):\n self.logger.debug(\"[%u] Ready to add column %s\" %\n (os.getpid(), new_column))\n ddl = \"\"\"\n ALTER TABLE {schema}.{table}\n ADD COLUMN IF NOT EXISTS {col} {type}\n \"\"\"\n # TODO Replace by execute_ddl func and test it\n with get_sink_connection_string(self) as conn:\n with conn.cursor() as cursor:\n cursor.execute(ddl.format(schema=self.config['schema'],\n table=self.config['table'],\n col=new_column,\n type=dtype))\n self.logger.debug(\"[%u] Column %s has been added\" %\n (os.getpid(), new_column))", "def AddColumn(self, column):\n self.columns.append(column)\n self.column_dict[column.column_id] = column", "def insertColumn(self, index, *column):\n if ((len(column) == 1) and (type(column[0]) in MATRIX_VALID_COLLECTIONS)):\n column = column[0]\n if self._height:\n if not (len(column) == self._height):\n raise ValueError('Improper length for new column: %d, should be %d' % (len(column), self._height))\n else:\n self._height = len(column)\n for i in range(self._height):\n self._value.append(list())\n self._width += 1\n for i in range(self._height):\n if not (type(column[i]) in MATRIX_VALID_TYPES):\n message = \"Values must be of type \"\n for t in range(len(MATRIX_VALID_TYPENAMES)):\n if t:\n message += ' or '\n message += \"'%s'\" % MATRIX_VALID_TYPENAMES[t]\n raise TypeError(message)\n self._value[i].insert(index, column[i])", "def insert_column(self, identifier, position, name, datastore):\n # Raise ValueError if given colum name is invalid.\n if name is not None and not is_valid_name(name):\n raise ValueError(\"invalid column name '{}'\".format(name))\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Insert new column into dataset.\n df = dataset.to_dataframe()\n df = vizual.insert_column(df=df, names=[name], pos=position)\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations\n )\n return VizualApiResult(ds)", "def addcolumn(self, colname, coldata):\n if len(coldata) != len(self):\n raise ValueError,\"Column length must match catalog length\"\n\n #Most of the bookkeeping is the same as for an empty column\n self.addemptycolumn(colname,coldata.dtype)\n\n #and then we reset the column to contain the actual data\n setattr(self,colname,coldata)", "def add_column(self, table_name: str, column) -> None:\n sql = 'ALTER TABLE ' + table_name + ' ADD COLUMN ' + column.to_sql()\n self.cursor.execute(sql)", "def setCol(self, col):\n self.column = col", "def addTableColumn(self, tablename, columnname, columntype):\n\n # Check if the table exists\n if tablename in self.getTableNames():\n\n # Check that the column does not already exist\n if columnname not in self.getColumnNames(tablename):\n\n #Allow columnames with spaces\n columnname = '`'+columnname+'`'\n\n \"\"\"# Fit characters to the allowed format if necessary\n fmt = ''\n if (self.connector == 'mysql' and\n ('TEXT' in columntype or 'VARCHAR' in columntype) and\n not ('CHARACTER SET' in columntype or\n 'utf8mb4' in columntype)):\n\n # We enforze utf8mb4 for mysql\n fmt = ' CHARACTER SET utf8mb4'\n\n\n sqlcmd = ('ALTER TABLE ' + tablename + ' ADD COLUMN ' +\n columnname + ' ' + columntype + fmt)\"\"\"\n sqlcmd = ('ALTER TABLE ' + tablename + ' ADD COLUMN ' +\n columnname + ' ' + columntype) \n self._c.execute(sqlcmd)\n\n # Commit changes\n self._conn.commit()\n\n else:\n print((\"WARNING: Column {0} already exists in table {1}.\"\n ).format(columnname, tablename))\n\n else:\n print('Error adding column to table. Please, select a valid ' +\n 'table name from the list')\n print(self.getTableNames())\n\n return", "def insert_column(df, colTitle, colIndex, fillValue):\n if colTitle not in df.columns:\n df.insert(colIndex, colTitle, fillValue, True)\n return df", "def add_column(self, col_name, definition):\n if not self.column_exists(col_name):\n self.execute(self.commands.add_column(self.name, col_name, definition))", "def insert(self, j, column, default=None):\n try: column = [v for v in column]\n except:\n raise TypeError, \"Table.columns.insert(x): x must be list\"\n column = column + [default] * (len(self._table) - len(column))\n if len(column) > len(self._table):\n self._table.extend([[None]] * (len(column)-len(self._table)))\n for i, row in enumerate(self._table):\n row.insert(j, column[i])\n self._table._m += 1 # Increase column count.", "def add_column_to_staging_table(cursor,table_schema,table_name,column_name):\n if not check_if_column_exists(cursor, table_schema, table_name, column_name):\n add_column = \"ALTER TABLE \" + table_schema + \".\" + table_name + \" ADD COLUMN \" + column_name + \" text;\"\n cursor.execute(add_column)", "def apply_to_table(self, table: Table):\n table.change_column(self.column)", "def add_column(self, schema):\n self[schema.name] = schema.copy()", "def add_col(self, colname, n_batch=5000, debug=False):\n\n if debug: print(\"Create new column {col}\".format(col=colname))\n # Alter table add column\n #\n alter_query = '''\n ALTER TABLE \"{tablename}\"\n ADD COLUMN \"{colname}\" {datatype};\n '''.format(tablename=self.get_carto_tablename(),\n colname=colname,\n datatype=datatype_map(str(self.dtypes[colname])))\n if debug: print(alter_query)\n\n # add column\n resp = self.carto_sql_client.send(alter_query)\n if debug: print(resp)\n\n # update all the values in that column\n #\n # NOTE: fails if colval is 'inf' or some other exceptional Python\n # or NumPy type\n n_items = len(self[colname])\n update_query = '''\n UPDATE \"{tablename}\"\n SET \"{colname}\" = {colval}\n WHERE \"cartodb_id\" = {cartodb_id};\n '''\n queries = []\n\n for row_num, item in enumerate(self[colname].iteritems()):\n # if debug: print(item)\n pgtype = dtype_to_pgtype(self[colname].dtype, colname)\n temp_query = update_query.format(\n tablename=self.get_carto_tablename(),\n colname=colname,\n colval=numpy_val_to_pg_val(item[1], pgtype),\n cartodb_id=item[0]).strip()\n queries.append(temp_query)\n if (len(queries) == n_batch) or (row_num == n_items - 1):\n output_query = '\\n'.join(queries)\n if debug: print(output_query)\n if debug: print(\"Num chars in query: {}\".format(len(output_query)))\n resp = self.carto_sql_client.send(output_query)\n queries = []\n\n return None", "def add_table_column(self, schema, column):\n if not column[\"name\"] or not constants.NAME_RX.match(column[\"name\"]):\n raise ValueError(\"invalid column name\")\n if utils.name_in_nocase(column[\"name\"], [c[\"name\"] for c in schema[\"columns\"]]):\n raise ValueError(\"non-unique column name\")\n if column[\"type\"] not in constants.COLUMN_TYPES:\n raise ValueError(\"invalid column type\")\n sql = (\n f'''ALTER TABLE \"{schema['name']}\"'''\n f\"\"\" ADD COLUMN \"{column['name']}\" {column['type']}\"\"\"\n )\n if column.get(\"notnull\"):\n notnull = [\"NOT NULL\"]\n if column[\"type\"] == constants.INTEGER:\n notnull.append(\"DEFAULT 0\")\n elif column[\"type\"] == constants.REAL:\n notnull.append(\"DEFAULT 0.0\")\n elif column[\"type\"] in (constants.TEXT, constants.BLOB):\n notnull.append(\"DEFAULT ''\")\n sql += \" \" + \" \".join(notnull)\n self.dbcnx.execute(sql)\n schema[\"columns\"].append(column)\n self.update_table(schema)", "def addcolumn(self, column):\n if column not in self.headersindex:\n database = managers.database_manager.get_database(self.owner_id, self.database_id)\n cur = database.get_connection().cursor()\n cur.execute(\"ALTER TABLE \\'%s\\' ADD COLUMN %s\" % (self.name, column.to_declaration()))", "def insertData(table, column, input):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"INSERT INTO '\" + table + \"' (\" + column + \") VALUES ('\" + input + \"')\")\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function insertData from DbController')", "def add_column_into_target_sf(self, tap_type, table, new_column):\n self.run_query_target_snowflake(\n f'ALTER TABLE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table} ADD {new_column[\"name\"]} int'\n )\n self.run_query_target_snowflake(\n f'UPDATE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table}'\n f' SET {new_column[\"name\"]}={new_column[\"value\"]} WHERE 1=1'\n )", "def test_add_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.add_columns.statements) == 1)\n self.assertEqual(\n schema_differ.add_columns.statements[0],\n \"manager.add_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre', column_class_name='Varchar', column_class=Varchar, params={'length': 255, 'default': '', 'null': False, 'primary_key': False, 'unique': False, 'index': False, 'index_method': IndexMethod.btree, 'choices': None, 'db_column_name': None})\", # noqa\n )", "def _add_column_and_format(self, table, label, column):\n table[label] = column\n if label in self._formats:\n table.set_format(label, self._formats[label])", "def _addcolumns(self, columnname, columndata=\"\"):\n self[columnname] = columndata" ]
[ "0.6773", "0.65841776", "0.64992654", "0.6474949", "0.6455139", "0.64373386", "0.6368149", "0.6303153", "0.629054", "0.6239281", "0.6229999", "0.622003", "0.6205193", "0.6202118", "0.6199418", "0.6196598", "0.6169299", "0.6163966", "0.613079", "0.60933846", "0.60929984", "0.6081151", "0.6055493", "0.60359025", "0.59919715", "0.59901536", "0.5931063", "0.592207", "0.5881164", "0.58758914" ]
0.76917046
0
Throw double buffer into widget drawable
def on_draw(self, widget, cr): #print "starting to draw" if self.double_buffer is not None: self.draw_tiles() cr.set_source_surface(self.double_buffer, 0.0, 0.0) cr.paint() else: print('Invalid double buffer') #print "done drawing" return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def draw(self):", "def draw(self, screen):", "def pre_draw(self):", "def on_configure(self, widget, event, data=None):\n print \"reconfiguring\"\n # Destroy previous buffer\n if self.double_buffer is not None:\n self.double_buffer.finish()\n self.double_buffer = None\n\n # Create a new buffer\n self.double_buffer = cairo.ImageSurface(cairo.FORMAT_ARGB32,\n widget.get_allocated_width(),\n widget.get_allocated_height())\n self.height = widget.get_allocated_height()\n self.width = widget.get_allocated_width()\n\n # Initialize the buffer\n self.draw_tiles()\n print \"config done\"\n return False", "def __init__(self, *args):\n _gdi_.BufferedDC_swiginit(self,_gdi_.new_BufferedDC(*args))\n # save a ref so the other dc will not be deleted before self\n self.__dc = args[0] \n # also save a ref to the bitmap\n if len(args) > 1: self.__bmp = args[1]", "def on_draw_overlay(self):", "def __init__(self, *args, **kwargs):\n _gdi_.AutoBufferedPaintDC_swiginit(self,_gdi_.new_AutoBufferedPaintDC(*args, **kwargs))", "def Blit(*args, **kwargs):\n return _gdi_.DC_Blit(*args, **kwargs)", "def on_draw_over_image(self):", "def _draw_widget(self, *args) -> None:\n del args\n\n if self.canvas is None:\n return\n\n # TODO: allow user to set rotation/scale origin\n center = center_of_points_list(self.points)\n self.canvas.clear()\n\n with self.canvas:\n Color(*self.color)\n Scale(self.scale, origin=center)\n Rotate(angle=self.rotation, origin=center)\n KivyPoint(points=self.points,\n pointsize=self.pointsize)", "def draw():", "def draw(self):\n\t\tpass", "def fig2buf(fig):\n # draw the renderer\n fig.canvas.draw()\n\n # Get the RGBA buffer from the figure\n w,h = fig.canvas.get_width_height()\n buf = np.fromstring ( fig.canvas.tostring_argb(), dtype=np.uint8 )\n buf.shape = (h, w, 4)\n \n # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode\n buf = np.roll(buf, 3, axis = 2 )\n buf = buf[0::1,0::1] #slice to make image 4x smaller and use only the R channel of RGBA\n buf = buf[0::1,0::1, 0:3] #slice to make image 4x smaller and use only the R channel of RGBA\n return buf", "def draw(self):\n return ImageDraw.Draw(self.buffer)", "def decorate(self):\n\n c = self.canvas\n c.rect(20, 20, 20, 20, fill=1) # bt lf\n c.rect(self.width - 40, 20, 20, 20, fill=1) # bt rt\n c.rect(20, self.height - 40, 20, 20, fill=1) # tp lf\n c.rect(self.width/2 - 10, 20, 20, 20, fill=1) # bt md\n c.rect(20, self.height/2 - 10, 20, 20, fill=1) # md lf\n c.rect(self.width - 40, self.height - 40, 20, 20, fill=1) # tp rt\n c.rect(self.width - 40, self.height/2 - 10, 20, 20, fill=1) # md rt", "def draw(self, surface):\n surface.blit(self.image, (0,0))\n for widget in self.widgets:\n widget.draw(surface)", "def draw(self) -> Any:", "def buffer(\n self,\n distance,\n resolution=...,\n quadsegs=...,\n cap_style=...,\n join_style=...,\n mitre_limit=...,\n single_sided=...,\n ): # -> BaseGeometry:\n ...", "def draw(self, x, y):\r\n for w in self.widgets:\r\n if w.visible:\r\n w.draw()\r\n self.pointer.position(x + self.p_dx, y + self.p_dy, 0.5)\r\n self.pointer.draw()", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw( self, **kw ):\n pass", "def update(): # (12)\n with canvas(device) as draw:\n for led_pos in range(0, len(color_buffer)):\n color = color_buffer[led_pos]\n\n ## If your LED strip's colors are are not in the expected\n ## order, uncomment the following lines and adjust the indexes\n ## in the line color = (rgb[0], rgb[1], rgb[2])\n # rgb = getrgb(color)\n # color = (rgb[0], rgb[1], rgb[2])\n # if len(rgb) == 4:\n # color += (rgb[3],) # Add in Alpha\n\n draw.point((led_pos, 0), fill=color)", "def pre_render(self) -> None:\n self.buffer = Surface((self.render_width, self.render_height), SRCALPHA)\n self.buffer.fill(list(self.halo_texture.surfaces.values())[0].get_at((0, 0)))\n\n self.buffer.fill((0, 0, 0, 0), Rect(\n (self.render_width - self.halo_texture.get_width()) // 2,\n (self.render_height - self.halo_texture.get_height()) // 2,\n self.halo_texture.get_width(),\n self.halo_texture.get_height()\n ))", "def draw_brick(self, x, y):\n pygame.draw.rect(self.main_surface, self.color, (x, y, self.width, self.height), 0)\n pygame.display.update()", "def __init__(self, *args, **kwargs):\n _gdi_.BufferedPaintDC_swiginit(self,_gdi_.new_BufferedPaintDC(*args, **kwargs))\n if len(args) > 1: self.__bmp = args[1]", "def blit(self):\n raise NotImplementedError()" ]
[ "0.5492979", "0.54373515", "0.541417", "0.5406553", "0.53597254", "0.5355659", "0.5354216", "0.5340868", "0.53357756", "0.5319597", "0.52766144", "0.5271925", "0.5259718", "0.5236922", "0.52361697", "0.5216407", "0.5194417", "0.5152858", "0.51386267", "0.51239336", "0.51079524", "0.51079524", "0.51079524", "0.51079524", "0.51078683", "0.51076776", "0.51038384", "0.509398", "0.5088107", "0.50674677" ]
0.6775266
0
Configure the double buffer based on size of the widget
def on_configure(self, widget, event, data=None): print "reconfiguring" # Destroy previous buffer if self.double_buffer is not None: self.double_buffer.finish() self.double_buffer = None # Create a new buffer self.double_buffer = cairo.ImageSurface(cairo.FORMAT_ARGB32, widget.get_allocated_width(), widget.get_allocated_height()) self.height = widget.get_allocated_height() self.width = widget.get_allocated_width() # Initialize the buffer self.draw_tiles() print "config done" return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setBufferSize(self, buffer_size):\n DPxSetDinBuffSize(buffer_size)", "def _changed_size(self, **kw):\n\t\tself._clear_matrix()\n\t\t\n\t\tself._recalc_adjustments()\n\t\t\n\t\tif self.flags() & gtk.REALIZED:\n\t\t\tif kw.get('resize', True): self.queue_resize()\n\t\t\tif kw.get('draw', True): self.queue_draw()", "def set_widget_size(self, widget_size):\n v = self.viewport\n v.projection.widget_rect = Rect(\n mins=[0, 0],\n maxes=[widget_size[0], widget_size[1]])\n v.view.widget_size = v.projection.widget_rect.sizes", "def on_size(self, event):\n size = self.GetSize()\n self.SetSize(size)\n gauge_pos, gauge_size = self.get_gauge_dimensions()\n self.gauge.SetSize(gauge_size)\n event.Skip()\n self.Update()", "def on_draw(self, widget, cr):\n #print \"starting to draw\"\n if self.double_buffer is not None:\n self.draw_tiles()\n cr.set_source_surface(self.double_buffer, 0.0, 0.0)\n cr.paint()\n else:\n print('Invalid double buffer')\n #print \"done drawing\"\n return False", "def use_buffer(self, buffer_size):\n self.__buffer_size = buffer_size\n if self.__buffer is None:\n self.__buffer = []", "def onSetToHalfSize(self, evt):\n\t\tself.halfResampleZ.Enable(1)\n\t\tif self.dataUnits:\n\t\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\t\tzf = 1\n\t\t\t\n\t\t\tif self.halfResampleZ.GetValue():\n\t\t\t\tzf = 0.5\n\t\t\tself.currSize = int(0.5 * x), int(0.5 * y), int(zf * z)\n\t\tself.fourthResampleZ.Enable(0)\n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(0)", "def OnSize(self, event):\r\n size = self.GetClientSizeTuple()\r\n self.gList.SetSize(size)", "def on_size(self, event):\n # Forces reconfiguration of the viewport, modelview and projection\n # matrices on the next paint event\n self.init = False", "def on_size(self, event):\n # Forces reconfiguration of the viewport, modelview and projection\n # matrices on the next paint event\n self.init = False", "def OnSize(self, event):\r\n \r\n self.UpdateHintWindowSize()\r\n event.Skip()", "def cb_size(self, event):\n if not self.size_timer.IsRunning():\n self.size_timer.StartOnce(2000)\n event.Skip(True)", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def double_height_on(self):\n self._set_print_mode(self.DOUBLE_HEIGHT_MASK)", "def SetUniformBitmapSize(self, size):\r\n\r\n self._requested_bmp_size = wx.Size(*size)\r\n\r\n # if window is already initialized, recalculate the tab height\r\n if self._dummy_wnd:\r\n self.UpdateTabCtrlHeight()", "def onSize(self, event): \n\t\tw, h = self.GetClientSizeTuple()\n\t\tself.tree.SetDimensions(0, 0, w, h)", "def _configure_canvas(event):\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))", "def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))", "def set_point_size(self, point_size=0.0):\r\n for b in self.buf:\r\n b.unib[8] = point_size", "def on_resize(self, *args):\n\n self.page_current.width = terminal.width # Give page new terminal width\n self.render_buffer = []\n\n self.render() # Re-render buffer", "def onSetToFourthSize(self, evt):\n\t\tself.halfResampleZ.Enable(0)\n\t\tself.fourthResampleZ.Enable(1)\n\t\tif self.dataUnits:\n\t\t\tzf = 1\n\t\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\t\t\n\t\t\tif self.fourthResampleZ.GetValue():\n\t\t\t\tzf = 0.25\n\t\t\tself.currSize = int(0.25 * x), int(0.25 * y), int(zf * z) \n\t\tfor obj in [self.factorLabel, self.dimLabel, self.newDimX, self.newDimY, self.newDimZ, self.factorX, self.factorY, self.factorZ]:\n\t\t\tobj.Enable(0)", "def FloatingSize(self, size):\r\n \r\n self.floating_size = wx.Size(*size)\r\n return self", "def __init__(self, size: int):\n self.size = size\n self.window = []", "def OnSize(self, event):\r\n\r\n self.Layout()", "def set_mode(self, size, buffered=True, *args, **kwargs):\n self.canvas = Canvas(size, buffered)\n env.set_env('canvas', self.canvas)\n self.frame = Window.getDocumentRoot()\n env.set_env('frame', self.frame)\n panel = SimplePanel(Widget=self.canvas)\n RootPanel().add(panel)\n self.panel = panel\n self.vpanel = None\n self.textbox = None\n self.textarea = None\n self.Textbox = Textbox\n self.Textarea = Textarea\n self.surface = self.canvas.surface\n self.surface._display = self\n self._surface_rect = self.surface.get_rect()\n if not self.canvas._bufferedimage:\n self.flip = lambda: None\n self.update = lambda *arg: None\n return self.surface", "def on_canvas_resize(self, event) -> None:\r\n\r\n self.painter.adjust_to_canvas()\r\n self.painter.draw_board()", "def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size", "def __init__(self, buffer_size=3000):\n self.buffer = []\n self.buffer_size = buffer_size", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)" ]
[ "0.610253", "0.5766414", "0.5608554", "0.56082743", "0.5588019", "0.55230093", "0.5471639", "0.54091835", "0.5401123", "0.5401123", "0.5352664", "0.53298753", "0.5326704", "0.5319585", "0.53189176", "0.53112936", "0.53038687", "0.5290001", "0.52694154", "0.5253243", "0.52476627", "0.523922", "0.52297443", "0.5229701", "0.518845", "0.51817435", "0.5160045", "0.5155831", "0.5155831", "0.5151278" ]
0.6723716
0
Shows a category item
def showItem(category_item_id): return render_template('item.html', item=db.findItem(id=category_item_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def showCategory(category_id):\n category = session.query(Category).\\\n filter_by(id=category_id).one()\n item = session.query(Item).\\\n filter_by(category_id=category.id)\n return render_template('item.html', category=category, item=item)", "def showItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n items = session.query(Item).filter_by(category_id=category_id).all()\n\n return render_template('items.html', items=items, category=category)", "def view_category(cat_id):\n session['target'] = url_for('view_category', cat_id=cat_id)\n sqlsession = SQLSESSION()\n category = sqlsession.query(Category).filter_by(id=cat_id).first()\n categories = sqlsession.query(Category).all()\n items = sqlsession.query(Item).filter_by(category_id=cat_id).all()\n return render_template(\"view_category.html\",\n category=category,\n categories=categories,\n items=items,\n item_title=category.name + \" Items\")", "def show_item(category, item):\n # Detect login status\n login_status = None\n if 'email' in login_session:\n login_status = True\n # Provide state token to enable Google Sign-In\n state = login_session['state']\n # Query database with SQLAlchemy to show selected category and item\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item.replace('-', ' '), category_id=category.id)\n .one())\n # Render webpage\n return render_template('show_item.html',\n item=item,\n category=category,\n login_status=login_status,\n CLIENT_ID=CLIENT_ID,\n STATE=state)", "def show_category(category):\n # Detect login status\n login_status = None\n if 'email' in login_session:\n login_status = True\n # Provide state token to enable Google Sign-In\n state = login_session['state']\n # Query database with SQLAlchemy to show all categories\n categories = (session.query(Categories)\n .order_by(Categories.name)\n .all())\n # Query database with SQLAlchemy to show selected category and items\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n category_items = (session.query(Items)\n .filter_by(category_id=category.id)\n .order_by(Items.name)\n .all())\n category_items_count = (session.query(Items)\n .filter_by(category_id=category.id)\n .count())\n # Render webpage\n return render_template('show_category.html',\n categories=categories,\n category_name=category.name,\n category_items=category_items,\n category_items_count=category_items_count,\n login_status=login_status,\n CLIENT_ID=CLIENT_ID,\n STATE=state)", "def cli(ctx, category_id):\n return ctx.ti.categories.show_category(category_id)", "def showItems(category_id):\r\n # validation\r\n category = session.query(Category).filter_by(id=category_id).first()\r\n if not category:\r\n flash('Attempt to view non-existent category')\r\n return redirect(url_for('showCategories'))\r\n\r\n # authorization\r\n items = session.query(Item).filter_by(category_id=category_id).all()\r\n if 'username' not in login_session or\\\r\n login_session['user_id'] != category.user_id:\r\n return render_template(\"publicShowItems.html\", category=category,\r\n items=items)\r\n\r\n return render_template(\"showItems.html\", category=category, items=items,\r\n logged_in_user_id=login_session['user_id'])", "def items_category(request, category_slug):\n\n category = get_object_or_404(Category, slug=category_slug)\n items = Item.objects.filter(category=category)\n\n context = {\n 'category': category,\n 'items': items,\n }\n\n return render(request, 'category/items_category.html', context)", "def view_item(item_id):\n session['target'] = url_for('view_item', item_id=item_id)\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"view_item.html\", item=item)", "def show_category_items(category_id):\n all_categories = session.execute(\n 'SELECT category.name, category.id, count(item.id) AS item_count '\n 'FROM category LEFT JOIN item ON category.id = item.category_id '\n 'GROUP BY category.name, category.id')\n category = session.query(Category).filter(Category.id == category_id).first()\n items = session.query(Item).filter(Item.category_id == category_id)\n item_count = items.count()\n return render_template('category_items.html',\n all_categories=all_categories,\n category=category,\n items=items,\n item_count=item_count,\n login_session=login_session)", "def show(self, item_id):\n pass", "def showItemsByCategory(category_id):\n createSession()\n categories = db.getIndexCategories()\n category = db.getByCategory(category_id)\n items = db.getItemsByCategory(category_id)\n return render_template('category.html',\n main_category=category,\n categories=categories,\n category_id=category_id,\n items=items,\n user_id=login_session.get('user_id'),\n STATE=login_session.get('state'))", "def showCategoryItems(category_id):\r\n session = DBSession()\r\n category = session.query(Category).filter_by(id=category_id).one()\r\n creator = getUserInfo(category.user_id)\r\n items = session.query(Item).filter_by(\r\n category_id=category.id).order_by(asc(Item.name))\r\n # or creator.id != login_session['user_id']:\r\n if 'username' in login_session:\r\n return render_template('items.html', category=category, items=items)\r\n else:\r\n return render_template('itemspublic.html', category=category, items=items, creator=creator)", "def display_item(categories, item, item_id, initial_category_id):\n if item:\n # Item already exists - display on page\n return render_template('item_edit.html', item_id=item_id, item_name=item.Item.name,\n item_description=item.Item.description, item_category=item.Item.category,\n item_category_id=item.Item.category_id, categories=categories,\n login_session=login_session,\n csrf_token=generate_csrf_token())\n else:\n print('initial_category_id', initial_category_id)\n # Default fields for creating a new item\n return render_template('item_edit.html', item_id=0, item_name=\"\",\n item_description=\"\", item_category=\"\",\n item_category_id=initial_category_id, categories=categories,\n login_session=login_session, initial_category_id=initial_category_id,\n csrf_token=generate_csrf_token())", "def category(request):\n\n return render(request, \"core/category_list.html\", {\n \"category_list\": Category.objects.all()\n })", "def show_category(category, page):\n per_page = current_app.config['POSTS_PER_PAGE']\n category = Category.query.filter_by(name=category).first() or abort(404)\n posts = category.posts.order_by(Post.id.desc())\n if not session.get('logged_in'): posts = posts.filter_by(visible=True)\n items = posts.limit(per_page).offset((page - 1) * per_page).all()\n pagination = Pagination(posts, page=page, per_page=per_page, \n total=posts.count(), items=items)\n flash(\"Posts in category '%s'\" % category.name)\n return render_template('posts.html', pagination=pagination,\n endpoint_func=lambda x: url_for('main.show_category', \n category=category.name, page=x))", "def display_fav_cats():\n\n #display cat general info from db\n #have a delete button/option\n #commit changes to the db\n #if the user clicks the cat for more details, redirect to /more-details/<int:cat_id>", "def category(request, slug):\n categry = get_object_or_404(Category,slug=slug)\n story_list = Story.objects.filter(category=category)\n heading = \"Category: %s\" % category.label\n return render_to_response('cms/story_list.html', locals())", "def showItems(category_id):\n\n category = session.query(Category).filter_by(id=category_id).one()\n items = session.query(ListItem).filter_by(category_id=category_id).all()\n creator = getUserInfo(category.user_id)\n if 'user_id' in login_session:\n if category.user_id != login_session['user_id']:\n return render_template('pubitems.html',\n items=items,\n category=category,\n creator=creator,\n user=getUserInfo(login_session['user_id']))\n else:\n return render_template('items.html',\n items=items,\n category=category,\n user=getUserInfo(login_session['user_id']))\n else:\n return render_template('pubitems.html',\n items=items,\n category=category,\n creator=creator)", "def render_category_page(\r\n self, client_id, state, user_id, user_name, category_id):\r\n category = self._db_manager.get_category(category_id)\r\n if category is None:\r\n flash(\"Invalid category.\")\r\n return\r\n items = self._db_manager.get_category_items(category_id)\r\n return render_template(\r\n \"category_view.html\",\r\n client_id=client_id,\r\n state=state,\r\n is_logged_in=user_id is not None,\r\n is_creator=category[\"user_id\"] == user_id,\r\n user_name=user_name,\r\n category=category,\r\n items=items\r\n )", "def view(request, category):\r\n return render(request, 'categories/view.html', {\r\n 'title': category.name,\r\n 'category': category,\r\n 'articles': Article.objects.select_related('author','author__profile','category').filter(category=category, status=Article.PUBLISHED),\r\n })", "def show_cat(slug):\n cat = Category.query.filter_by(slug=slug).first()\n return redirect(url_for('articles.show_all') + '?c=' + str(cat.id))", "def goto_category_by_title(self,category):\n\n return self.catbrowser.goto_category_by_title(category)", "def category_items(category_id):\n\n items = Item.query.filter(\n Item.category_id == category_id,\n Item.user_id == current_user.id\n ).all()\n categories = Category.query.filter(\n Category.user_id == current_user.id).all()\n if not categories:\n flash(\"Couldn't find this category\", category='warning')\n\n return render_template(\n 'index.html',\n categories=categories,\n items=items,\n current_category_id=category_id)", "def show_category(update, context):\n query = update.callback_query\n bot = context.bot\n # loads json received from callback_data into dictionary\n ids = json.loads(query.data)\n category_id = ids['category_id']\n keyboard = []\n\n products = menu[category_id]['products']\n # iterates over all products in selected category\n for product_id in products.keys():\n product = products[product_id]\n # add each product to keyboard with id information as callback_data\n keyboard.append([InlineKeyboardButton(product['name'] + \" \" + product['price'],\n callback_data=json.dumps({\"category_id\": category_id,\n \"product_id\": product_id}))])\n\n reply_markup = InlineKeyboardMarkup(keyboard)\n # edits last message to keyboard with all products from category\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text='Select desired food in ' + menu[category_id]['category'] + \":\",\n reply_markup=reply_markup)\n # notify ConversationHandler of THIRD stage\n return THIRD", "async def category(self,ctx):\n await ctx.send(\"Yes this is a category.\")", "def showCategories():\n categories = session.query(Category).order_by(asc(Category.name))\n return render_template('categories.html', categories=categories)", "def show_categories():\n categories = session.query(Category).all()\n username = login_session.get('username')\n user_id = login_session.get('user_id')\n provider = login_session.get('provider')\n if username is not None:\n username = login_session.get('username')\n return render_template(\"categories.html\", categories=categories,\n username=username, user_id=user_id,\n provider=provider)", "def showCategoryDetails(cat_id):\n\n category = Category.query.get(cat_id)\n # get all the poses under that category\n all_poses = db.session.query(Pose).join(PoseCategory).filter(PoseCategory.cat_id==cat_id).all()\n\n return render_template(\"category-details.html\", all_poses=all_poses, category=category)", "def category(request, category_id, template_name='doppler/shift/catalog/category.haml'):\n category = get_object_or_404(Category, pk=category_id, enabled=True)\n products = category.enabled_products\n subcategories = category.children.filter(enabled=True)\n return render_to_response(\n template_name,\n {\n 'category': category,\n 'products': products,\n 'subcategories': subcategories,\n },\n context_instance=RequestContext(request))" ]
[ "0.79654604", "0.7739004", "0.7643676", "0.7522781", "0.7224209", "0.702118", "0.7007901", "0.69838154", "0.6969686", "0.68813515", "0.67284054", "0.6714985", "0.67128104", "0.6640791", "0.65600896", "0.6559297", "0.65177655", "0.6493208", "0.6447697", "0.6445132", "0.6391688", "0.63806367", "0.6373013", "0.63603246", "0.6285808", "0.6166651", "0.61424804", "0.61104655", "0.60680586", "0.60586" ]
0.7802788
1
Allow user to create new catalog item
def newItem(): if request.method == 'POST': db.createItem( title=request.form['title'], description=request.form['description'], category_id=request.form['category'], user_id=login_session['user_id']) flash("New catalog item created!", 'success') return redirect(url_for('showCatalog')) return render_template('new_item.html', categories=db.getAllCategories())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_item(self, user: User, **kwargs) -> None:", "def addCatalogItem(sport_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n if request.method == 'POST':\n newCatalogItem = Item(\n name=request.form['itemName'],\n description=request.form['itemDescription'],\n sport_id=sport_id,\n user_id=login_session['user_id'])\n session.add(newCatalogItem)\n session.commit()\n return redirect(url_for('showCatalog', sport_id=sport_id))\n else:\n return render_template('newcatalogitem.html', sport_id=sport_id)", "def create_item():\n name = request.form['name']\n catagory = request.form['catagory']\n description = request.form['description']\n errors = form_errors(request.form)\n if errors:\n catagories = [c.name for c in Catagory.fetch_all()]\n values = {\n 'name': name, 'catagory': catagory, 'description': description\n }\n return render_template(\n 'add_item.html',\n catagories=catagories,\n values=values,\n errors=errors\n )\n Item.create(name, catagory_name=catagory, description=description)\n return redirect(url_for(\n 'read_item', catagory_name=catagory, item_name=name\n ))", "def createItem(category_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n if request.method == 'POST':\r\n session = DBSession()\r\n item = Item(name=request.form['name'],\r\n description=request.form['description'],\r\n category_id=category_id,\r\n user_id=login_session['user_id'])\r\n session.add(item)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('newitem.html', category_id=category_id)", "def new_item():\n if request.method == 'POST':\n new_item = Item(\n category_id=int(request.form['category']),\n name=request.form['name'],\n description=request.form['description'],\n created_date=datetime.datetime.now(),\n user_id=login_session['user_id'])\n session.add(new_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=new_item.category_id,\n item_id=new_item.id))\n else:\n categories = session.query(\n Category).all()\n return render_template(\n 'views/add.html',\n categories=categories)", "def newListItem(category_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n category = session.query(Category).filter_by(id=category_id).one()\n\n if category.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n category.name)\n return redirect(url_for('showItems', category_id=category.id))\n if request.method == 'POST':\n if \"btn_new\" in request.form:\n newItem = ListItem(name=request.form['name'],\n description=request.form['description'],\n category_id=category_id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Catalog Item: %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('newitem.html',\n category_id=category_id,\n user=getUserInfo(login_session['user_id']))", "def test_vault_create_new_vault_item(self):\n pass", "def add_item():\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = Item()\n # First we populate the new item.\n item.category_id = request.form['categoryId']\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n # Now let's pull its category.\n category = session.query(Category).filter_by(id=item.category_id).one()\n # And make sure they're properly linked.\n item.category = category\n session.add(item)\n session.flush()\n id = item.id\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state'], 'id': id}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def insert_item():\n if 'userinfo' not in session.keys():\n session['target'] = url_for('insert_item')\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n creator_email = session['userinfo']['email']\n sqlsession = SQLSESSION()\n user = sqlsession.query(User).filter_by(email=creator_email).first()\n item = Item(name=request.form['name'],\n description=request.form['description'],\n category_id=int(request.form['category']),\n creator_id=user.id)\n sqlsession.add(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n return render_template(\"new_item.html\",\n categories=categories)", "def newItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if request.method == 'POST':\n newItem = Item(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'], category_id=category.id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItem', category_id=category.id))\n else:\n return render_template('newitem.html', category_id=category.id)", "def newItem():\n if request.method == 'POST':\n if not checkLogin():\n return requests(url_for('catelog'))\n\n if request.form['name'].strip() == '':\n flash('item create failed: name is empty!')\n return redirect(url_for('newItem'))\n\n category = session.query(\n Category).filter_by(\n name=request.form['category']).one()\n\n ifCategory = session.query(Category).filter_by(\n name=request.form['category']).one()\n ifItem = session.query(Item).filter_by(\n category_id=ifCategory.id,\n name=request.form['name']).all()\n if (len(ifItem) > 0):\n flash('item create failed: item(%s) \\\n is already exist in category(%s)' % (\n ifItem[0].name,\n ifCategory.name))\n return redirect(url_for('catelog'))\n\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n category=category,\n auth=getLoginUser(),\n time=getIntTime())\n session.add(newItem)\n session.commit()\n\n flash('new item created: %s' % newItem.name)\n\n return redirect(url_for(\n 'itemDetail',\n category_name=category.name,\n item_name=newItem.name))\n else:\n all_category = session.query(Category).all()\n return render_template(\n 'new-item.html',\n all_category=all_category,\n isLogin=checkLogin())", "def create_item_page():\n catagories = [c.name for c in Catagory.fetch_all()]\n return render_template('add_item.html', catagories=catagories, values={})", "def insert_item_page(request):\n validate(instance=request.body, schema=item_schema)\n body = json.loads(request.body)\n item = Item.new_item(body['cart_id'], body['food_id'], body['count'])\n return JsonResponse(model_to_json(item))", "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def add_item():\n\n form = ItemForm()\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n new_item = Item(\n category_id=form.category_id.data.id,\n name=form.name.data.capitalize(),\n description=form.description.data,\n user_id=current_user.id)\n db.session.add(new_item)\n db.session.commit()\n flash(\"New item '{}' was successfully created\".format(\n form.name.data.capitalize()), category='success')\n return redirect(url_for('url.index'))\n\n return render_template(\n 'forms/form.html',\n form_title='Add Item',\n form=form,\n form_name='item',\n action=url_for('url.add_item'))", "def add(auth_context):\n print(\"Add\")\n uid = auth_context.get('uid')\n item_id = request.form.get('id')\n if item_id:\n print(\"item_id: \"+item_id)\n carts.add_to_cart(uid,item_id)\n return \"Operation Completed\", 200\n return \"Operation Failed\", 400", "def add_item(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n new_item = form.save()\n messages.success(request, 'Your product was added to the '\n 'store successfully.')\n return redirect(reverse('item_info', args=[new_item.id]))\n else:\n messages.error(request, 'There was an issue adding the '\n 'product. Please ensure the form is valid.')\n else:\n form = ProductForm()\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/add_item.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def newItem(category_id):\n editedCategory = session.query(Category). \\\n filter_by(id=category_id).one()\n if editedCategory.user_id != login_session['user_id']:\n flash('You are not authorized to edit this Category.\\\n Please create your own Category in order to edit.')\n return redirect(url_for('showCategory', category_id=category_id))\n if request.method == 'POST':\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n category_id=category_id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item created')\n return redirect(url_for('showCategory',\n category_id=category_id))\n else:\n return render_template('newItem.html', category_id=category_id)", "def put_on_sale():\n\n item = {\n \"status\": 'for_sale',\n \"category\": request.form['item-type'],\n \"name\": request.form['item-name'],\n \"price\": request.form['item-price'],\n \"description\": request.form['item-description'],\n \"mail\": request.form['seller-email']\n }\n\n put_item(item)\n\n return redirect('/')", "def catalog_create(self, args):\n try:\n if args.id and self.server.connect_ermrest(args.id).exists():\n print(\"Catalog already exists\")\n return\n owner = args.owner if args.owner else None\n catalog = self.server.create_ermrest_catalog(args.id, owner)\n if args.auto_configure:\n model = catalog.getCatalogModel()\n model.configure_baseline_catalog(**args.configure_args)\n if not args.quiet:\n print(\"Created new catalog %s with the following default configuration:\\n\" % catalog.catalog_id)\n pp(catalog.get('/').json())\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n elif e.response.status_code == requests.codes.conflict:\n raise ResourceException(\"Catalog already exists\", e)\n else:\n raise e", "def createNewItem(request):\n newItem = ItemSerializer(data=request.data)\n if newItem.is_valid():\n newItem.save()\n return Response(newItem.data, status=status.HTTP_201_CREATED)\n\n fail = {\n \"item\" : \"item is not valid\"\n }\n return JsonResponse(fail)", "def createItem(name, description, category_id, image, user_id):\n i = Item(name=name, description=description, category_id=category_id,\n image=image, user_id=user_id, pub_date=datetime.utcnow())\n db_session.add(i)\n db_session.commit()\n return i", "def add_item():\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Get form fields\n name = request.form['name']\n url = request.form['url']\n photo_url = request.form['photo_url']\n description = request.form['description']\n category = request.form['item_category']\n # Retrieve the database ID of the selected category\n category_id = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Retrieve user's database ID for the item's database entry\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print('Database ID of category is {}.'.format(category_id.id))\n # Flash messages for incomplete item info\n if not request.form['name']:\n flash('Please add item name')\n return redirect(url_for('add_item'))\n if not request.form['url']:\n flash('Please add item URL')\n return redirect(url_for('add_item'))\n if not request.form['photo_url']:\n flash('Please add item photo URL')\n return redirect(url_for('add_item'))\n if not request.form['description']:\n flash('Please add a description')\n return redirect(url_for('add_item'))\n # Query database for item name\n item_name_in_db = (session.query(Items.name)\n .filter_by(name=name)\n .all())\n # If the item name is already in the database, don't add\n if item_name_in_db:\n print('Item name \"{}\" already in database.'.format(name))\n flash('Item name \"{}\" already in database.'.format(name))\n return redirect(url_for('add_item'))\n # Create object with form field info to add to database\n new_item = Items(name=name,\n url=url,\n photo_url=photo_url,\n description=description,\n category_id=category_id.id,\n creator_db_id=user_db_id)\n session.add(new_item)\n session.commit()\n print('Item \"{}\" created.'.format(new_item.name))\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n # Render webpage\n return render_template('add_item.html',\n categories=categories,\n login_status=login_status)", "def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r\n parent = get_modulestore(category).get_item(usage_key)\r\n dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)\r\n\r\n # get the metadata, display_name, and definition from the request\r\n metadata = {}\r\n data = None\r\n template_id = request.json.get('boilerplate')\r\n if template_id:\r\n clz = parent.runtime.load_block_type(category)\r\n if clz is not None:\r\n template = clz.get_template(template_id)\r\n if template is not None:\r\n metadata = template.get('metadata', {})\r\n data = template.get('data')\r\n\r\n if display_name is not None:\r\n metadata['display_name'] = display_name\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=data,\r\n metadata=metadata,\r\n system=parent.runtime,\r\n )\r\n\r\n # TODO replace w/ nicer accessor\r\n if not 'detached' in parent.runtime.load_block_type(category)._class_tags:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent.location).update_item(parent, request.user.id)\r\n\r\n return JsonResponse({\"locator\": unicode(dest_usage_key), \"courseKey\": unicode(dest_usage_key.course_key)})", "def create_item(self, obj):\n logger.info('ItemProduct adding item initiated')\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n unit, = self.ProductUom.search([('name', '=', obj['units'])])\n template = self.ProductTemplate()\n try:\n if self.Product.search([('code', '=', obj['id']), ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]:\n return False\n except Exception:\n pass\n template.category = self.ProductCategory.search([('name', '=', obj['category'])])[-1]\n template.default_uom = unit\n template.purchase_uom = unit\n template.type = 'goods'\n rate = Decimal(obj['rate'])\n cost = rate / 2\n template.name = obj['name']\n template.list_price = Decimal(rate)\n template.cost_price = Decimal(cost)\n template.purchasable = True\n template.account_expense = self.accounts['expense']\n template.account_receivable = self.accounts['receivable']\n template.save()\n # transaction.cursor.commit()\n product = self.Product()\n product.template = template\n product.code = obj['id']\n product.description = 'Stock'\n product.save()\n transaction.cursor.commit()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def _create_item(self, parent_location, category, display_name, **kwargs):\n return ItemFactory.create(\n parent_location=parent_location,\n category=category,\n display_name=display_name,\n publish_item=False,\n user_id=self.user.id,\n **kwargs\n )", "def add_new_item(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['calendar_plan'] = cpdoc.id\n\n item_ser = self.get_serializer(data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)", "def create_item_command(cog_href: str, destination: str) -> None:\n item = stac.create_item(cog_href)\n\n item.save_object(dest_href=destination)", "def new(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"./\"\n\n pp = PoseePermiso('redefinir tipo item', id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(atras)\n tmpl_context.widget = self.new_form\n return dict(value=kw, \n page=u\"Nuevo Atributo\", \n action=url_action, \n atras=url_action)" ]
[ "0.73387873", "0.69759136", "0.69521016", "0.68233764", "0.6724789", "0.66220343", "0.65843236", "0.6572441", "0.6484448", "0.6453141", "0.6386233", "0.6372381", "0.6363761", "0.6352616", "0.6330959", "0.6315789", "0.63058573", "0.6267776", "0.6242766", "0.6202442", "0.61834675", "0.61821467", "0.6173522", "0.6143714", "0.6128689", "0.6123711", "0.6095438", "0.60692865", "0.6058558", "0.6057965" ]
0.76763475
0
Allows user to edit an existing category item
def editItem(category_item_id): editedItem = db.findItem(id=category_item_id) if editedItem.user_id != login_session['user_id']: return not_authorized() if request.method == 'POST': db.updateItem(editedItem, request.form) return redirect(url_for('showCatalog')) return render_template( 'edit_item.html', categories=db.getAllCategories(), item=editedItem)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def editItem(category_id, item_id):\n editedItem = session.query(Item).filter_by(id=item_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n\n if editedItem.user_id != login_session['user_id']:\n flash(\"You are authorised to edit items created by you!\")\n return redirect(url_for(\"showCatalog\"))\n\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n if request.form['price']:\n editedItem.price = request.form['price']\n session.add(editedItem)\n session.commit()\n flash('%s Item Successfully Edited' % (editedItem.name))\n return redirect(url_for('showItem',\n category_id=editedItem.category_id))\n else:\n return render_template('edititem.html', category=category,\n item=editedItem)", "def editItem(category_id, item_id):\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if editedItem.user_id != login_session['user_id']:\n flash('You are not authorized to edit this Item.\\\n Please create own Item in order to edit.')\n return redirect(url_for('showallCategories'))\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n if request.form['price']:\n editedItem.price = request.form['price']\n session.add(editedItem)\n session.commit()\n flash('Item Edit successfull')\n return redirect(url_for('showCategory', category_id=category_id))\n else:\n return render_template(\n 'editItem.html', category_id=category_id,\n item_id=item_id, item=editedItem)", "def editListItem(category_id, item_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n editedItem = session.query(ListItem).filter_by(id=item_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n\n if category.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n category.name)\n return redirect(url_for('showItems', category_id=category.id))\n\n if request.method == 'POST':\n if \"btn_edit\" in request.form:\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n session.add(editedItem)\n session.commit()\n flash('Catalog Item Successfully Edited')\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('edititem.html',\n item=editedItem,\n user=getUserInfo(login_session['user_id']))", "def edit_item(category, item):\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n if request.method == 'POST':\n # Query database with SQLAlchemy and store queries as objects\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item.replace('-', ' '))\n .one())\n # Get form fields submitted by user, or retain item info\n name = request.form['name'] if request.form['name'] else item.name\n url = request.form['url'] if request.form['url'] else item.url\n if request.form['photo_url']:\n photo_url = request.form['photo_url']\n else:\n photo_url = item.photo_url\n if request.form['description']:\n description = request.form['description']\n else:\n description = item.description\n category = request.form['item_category']\n # Retrieve the database ID of the item's category\n category_id = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Get user's database ID\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n # Get database ID of creator\n creator_db_id = item.creator_db_id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print(\"Item creator's database primary key id is {}.\"\n .format(creator_db_id))\n print('Item to edit is \"{}\".'.format(item.name))\n # Only allow creator to edit. If not, redirect to login.\n if user_db_id != creator_db_id:\n flash('Only the creator can edit. Please log in as creator.')\n return redirect(url_for('home'))\n # Store edits in an object\n edited_item = Items(name=name,\n url=url,\n photo_url=photo_url,\n description=description,\n category_id=category_id.id,\n creator_db_id=user_db_id)\n # Overwrite item object with new info from edited_item object\n item.name = edited_item.name\n item.url = edited_item.url\n item.photo_url = edited_item.photo_url\n item.description = edited_item.description\n item.category_id = edited_item.category_id\n session.add(item)\n session.commit()\n print('Item \"{}\" edited.'.format(edited_item.name))\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n # Render webpage\n return render_template('edit_item.html',\n categories=categories,\n item=item,\n login_status=login_status)", "def editItem(category_id, item_id):\r\n # authentication\r\n if 'username' not in login_session:\r\n flash('Please login to edit item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # validation\r\n category = session.query(Category).filter_by(id=category_id).first()\r\n if not category:\r\n flash('Attempted operation on non-existent category')\r\n return redirect(url_for('showCategories'))\r\n\r\n editedItem = session.query(Item).filter_by(id=item_id,\r\n category_id=category_id).first()\r\n if not editedItem:\r\n flash('Attempt to edit non-existent item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # authorization\r\n if login_session['user_id'] != editedItem.user_id:\r\n flash('Sorry, you are not authorized to edit the item \\'{}\\''\r\n .format(editedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n if request.method == 'POST':\r\n # update operation\r\n if request.form['name']:\r\n editedItem.name = request.form['name']\r\n\r\n if request.form['description']:\r\n editedItem.description = request.form['description']\r\n else:\r\n editedItem.description = ''\r\n session.add(editedItem)\r\n session.commit()\r\n flash('Edited Item \\'{}\\' Successfully'.format(editedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n else:\r\n # serve GET requests with the form\r\n return render_template(\"editItem.html\",\r\n category=category, item=editedItem)", "def edit_item(self, item_id, name, description, category_id, user_id):\r\n item = self._db_manager.get_item(item_id)\r\n if item is None:\r\n flash(\"Invalid item.\")\r\n return\r\n if item[\"user_id\"] != user_id:\r\n flash(\"Only the original creator can edit an item.\")\r\n return\r\n category = self._db_manager.get_category(category_id)\r\n if category is None:\r\n flash(\"Invalid category.\")\r\n return\r\n if category[\"user_id\"] != user_id:\r\n flash(\"You can only add items to categories you created.\")\r\n return\r\n flash(self._db_manager.edit_item(\r\n item_id=item_id,\r\n name=name,\r\n description=description,\r\n category_id=category_id\r\n ))", "def editCategory(category_id):\n editedCategory = session.query(Category). \\\n filter_by(id=category_id).one()\n if editedCategory.user_id != login_session['user_id']:\n flash('You are not authorized to edit this Category.\\\n Please create your own Category in order to edit.')\n return redirect(url_for('showallCategories'))\n if request.method == 'POST':\n if request.form['name']:\n editedCategory.name = request.form['name']\n if request.form['image']:\n editedCategory.image = request.form['image']\n session.add(editedCategory)\n session.commit()\n flash('Category Edit successfull')\n return redirect(url_for('showallCategories'))\n else:\n return render_template(\n 'editCategory.html', category_id=category_id,\n category=editedCategory)", "def edit_category(category):\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Query database with SQLAlchemy and store query as an object\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Get form fields\n edit_category_name = request.form['edit_category_name']\n # Get user's database ID\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n # Get database ID of category creator\n creator_db_id = category.creator_db_id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print(\"Category creator's database primary key id is {}.\"\n .format(creator_db_id))\n print('Category to edit is \"{}\".'.format(category.name))\n # Only allow creator to edit. If not, redirect to login.\n if user_db_id != creator_db_id:\n flash('Only the creator can edit. Please log in as creator.')\n return redirect(url_for('home'))\n # Flash messages for incomplete item info\n if not request.form['edit_category_name']:\n flash('Please identify category.')\n return redirect(url_for('edit_category'))\n # Overwrite object with new info for database\n category.name = edit_category_name\n print('Category name for database is \"{}\".'.format(category.name))\n session.add(category)\n session.commit()\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Render webpage\n return render_template('edit_category.html',\n category_name=category,\n login_status=login_status)", "def edit_item_details(item_id):\n category_id = None\n if 'category_id' in request.args:\n category_id = int(request.args['category_id'])\n if 'userid' not in login_session:\n flash('Unfortunately you need to be logged in to make changes', 'error')\n return redirect(url_for('show_homepage'))\n\n item = None\n if item_id != 0:\n item = is_user_the_creator(item_id)\n if request.method == 'GET':\n categories = session.query(Category).order_by(asc(Category.name)).all()\n return display_item(categories, item, item_id, category_id)\n else:\n return save_item(item, item_id)", "def editCategory(category_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n editedCategory = session.query(Category).filter_by(id=category_id).one()\n if editedCategory.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n editedCategory.name)\n return redirect(url_for('showCategories'))\n else:\n if request.method == 'POST':\n if \"btn_edit\" in request.form:\n if request.form['name']:\n editedCategory.name = request.form['name']\n flash('Category Successfully Edited %s' %\n editedCategory.name)\n return redirect(url_for('showCategories'))\n else:\n return redirect(url_for('showCategories'))\n else:\n return redirect(url_for('showCategories'))\n else:\n return render_template('editCategory.html',\n category=editedCategory,\n user=getUserInfo(login_session['user_id']))", "def update_category_item(catalog_item_id):\n edited_item = session.query(CatalogItem). \\\n filter_by(id=catalog_item_id).one()\n if request.form['name']:\n edited_item.name = request.form['name']\n if request.form['description']:\n edited_item.description = request.form['description']\n if request.form['price']:\n edited_item.price = request.form['price']\n session.add(edited_item)\n session.commit()", "def edit_item(item_id):\n if 'userinfo' not in session.keys():\n session['target'] = url_for('edit_item', item_id=item_id)\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n item.name = request.form['name']\n item.category_id = request.form['category']\n item.description = request.form['description']\n sqlsession.commit()\n return redirect(url_for('view_item', item_id=item_id))\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n categories = sqlsession.query(Category).all()\n return render_template(\"edit_item.html\",\n item=item,\n categories=categories)", "def editCategory(category_id):\n\n edited_category = session.query(Category).filter_by(id=category_id).first()\n if edited_category.user_id != login_session['user_id']:\n flash(\"You are authorised to Edit category created by You only!\")\n\n return redirect(url_for(\"showCatalog\"))\n\n if request.method == 'POST':\n if request.form['name'] != '':\n edited_category.name = request.form['name']\n session.add(edited_category)\n session.commit()\n flash('Category Successfully Edited %s' % edited_category.\n name)\n return redirect(url_for('showCatalog'))\n else:\n flash(\"Error editing category!\")\n return render_template('editCategory.html',\n category=edited_category)\n else:\n return render_template('editcategory.html',\n category=edited_category)", "def editCategory(category_id):\r\n # authentication\r\n if 'username' not in login_session:\r\n flash('Please login to edit category')\r\n return redirect(url_for('showCategories'))\r\n\r\n # validation\r\n editedCategory = session.query(Category).filter_by(id=category_id).first()\r\n if not editedCategory:\r\n flash('Attempt to edit non-existent category')\r\n return redirect(url_for('showCategories'))\r\n\r\n # authorization\r\n if login_session['user_id'] != editedCategory.user_id:\r\n flash('Sorry, you are not authorized to edit the category \\'{}\\''\r\n .format(editedCategory.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n if request.method == 'POST':\r\n # update operation\r\n if request.form['name']:\r\n editedCategory.name = request.form['name']\r\n\r\n if request.form['description']:\r\n editedCategory.description = request.form['description']\r\n else:\r\n editedCategory.description = ''\r\n session.add(editedCategory)\r\n session.commit()\r\n flash('Edited Category \\'{}\\' Successfully'.format(\r\n editedCategory.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n else:\r\n # serve GET requests with form\r\n return render_template(\"editCategory.html\", category=editedCategory)", "def edit_category(self, category_id, name, user_id):\r\n category = self._db_manager.get_category(category_id)\r\n if category is None:\r\n flash(\"Invalid category.\")\r\n return\r\n if category[\"user_id\"] != user_id:\r\n flash(\"Only the original creator can edit a category.\")\r\n return\r\n flash(self._db_manager.edit_category(category_id, name))", "def edit_category(category_id):\n\n category = Category.query.filter(\n Category.id == category_id,\n Category.user_id == current_user.id\n ).first()\n\n if not category:\n flash(\"Couldn't find that category\", category='warning')\n return redirect(request.referrer)\n\n form = CategoryForm()\n form.name.current_user_id = current_user.id\n\n if form.validate_on_submit():\n category.name = form.name.data.capitalize()\n db.session.commit()\n flash('Successfully updated category', 'success')\n return redirect(url_for('url.index'))\n\n elif request.method == 'GET':\n form.name.data = category.name\n\n return render_template(\n 'forms/form.html',\n form_title='Edit Category',\n form=form,\n form_name='category',\n action=url_for('url.edit_category', category_id=category_id))", "def newItem(category_id):\n editedCategory = session.query(Category). \\\n filter_by(id=category_id).one()\n if editedCategory.user_id != login_session['user_id']:\n flash('You are not authorized to edit this Category.\\\n Please create your own Category in order to edit.')\n return redirect(url_for('showCategory', category_id=category_id))\n if request.method == 'POST':\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n category_id=category_id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item created')\n return redirect(url_for('showCategory',\n category_id=category_id))\n else:\n return render_template('newItem.html', category_id=category_id)", "def edit_category(category_name):\n\n category = Category.query.filter_by(name=category_name).first_or_404()\n\n if category.owner != current_user:\n flash(\"Failed to edit category %s since you are not the owner.\" %\n category.name)\n return redirect(url_for('.index'))\n\n form = AddOrEditCategoryForm()\n if form.validate_on_submit():\n category.name = form.name.data\n try:\n db.session.commit()\n except:\n flash(\n (\"Failed to edit category \\\"%s\\\".\"\n \" Make sure that the category name is unique.\")\n % category.name)\n else:\n flash(\"Category \\\"%s\\\" has been edited.\" % category.name)\n finally:\n return redirect(url_for('.index'))\n form.name.data = category.name\n return render_template('add_or_edit.html', form=form)", "def editItem(category_id, item_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n session = DBSession()\r\n editedItem = session.query(Item).filter_by(id=item_id).one()\r\n if editedItem.user_id != login_session['user_id']:\r\n return \"<script>function myFunction() {alert('You are not authorized to edit this item.');}</script><body onload='myFunction()''>\"\r\n if request.method == 'POST':\r\n editedItem.name = request.form['name']\r\n session.add(editedItem)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('edititem.html', category_id=category_id, item=editedItem)", "def edit_item(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n Item.user_id == current_user.id\n ).first()\n\n if not item:\n flash(\"Couldn't find a item with that id\", category='warning')\n return redirect(request.referrer)\n\n form = ItemForm()\n form.editting_item_id = item_id\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n item.category_id = form.category_id.data.id\n item.name = form.name.data.capitalize()\n item.description = form.description.data\n db.session.commit()\n flash('Successfully updated Item', 'success')\n return redirect(url_for('url.index'))\n\n elif request.method == 'GET':\n form.name.data = item.name\n form.description.data = item.description\n\n return render_template(\n 'forms/form.html',\n form_title='Edit Item',\n form=form,\n form_name='item',\n action=url_for('url.edit_item', item_id=item_id))", "def edit_item(item_name):\n\n item = Item.query.filter_by(name=item_name).first_or_404()\n\n if item.owner != current_user:\n flash(\"Failed to edit item %s since you are not the owner.\" %\n item.name)\n return redirect(url_for('.index'))\n\n form = AddOrEditItemForm(Category.query.order_by(Category.name).all())\n if form.validate_on_submit():\n\n img_upload_name = secure_filename(form.img_upload.data.filename)\n img_deletehash = None\n img_url = None\n\n # Delete uploaded image on Imgur\n if item.img_deletehash is not None \\\n and not delete_image(item.img_deletehash):\n flash(\"Failed to edit item \\\"%s\\\".\" % item.name)\n return redirect(url_for('.index'))\n\n # Upload new image on Imgur\n if img_upload_name != '':\n img_url, img_deletehash = upload_image(form.img_upload.data)\n print \"img_url: \" + img_url\n print \"img_deletehash: \" + img_deletehash\n if img_url is None or img_deletehash is None:\n flash(\"Failed to upload image.\")\n return redirect(url_for('.index'))\n\n elif form.img_url.data != '':\n img_url = form.img_url.data\n\n item.name = form.name.data\n item.description = form.description.data\n item.category = Category.query.get(form.category.data)\n item.img_url = img_url\n item.img_deletehash = img_deletehash\n\n try:\n db.session.commit()\n except:\n flash(\n (\"Failed to edit item \\\"%s\\\".\"\n \" Make sure that the item name is unique.\") % item.name)\n else:\n flash(\"Item \\\"%s\\\" has been edited.\" % item.name)\n finally:\n return redirect(url_for('.index'))\n\n form.name.data = item.name\n form.description.data = item.description\n form.category.data = item.category.id\n form.img_url.data = item.img_url\n\n return render_template('add_or_edit.html', form=form)", "def edit_recipe_category(title):\n session['recipe_category_title'] = title\n if request.method == 'POST':\n result = USERS[session['username']].edit_recipe_category(session['recipe_category_title'],\n request.form['title'])\n if result == 'recipe_category edited':\n flash(result, 'info')\n else:\n flash(result, 'warning')\n return redirect(url_for('dashboard'))\n return render_template('edit_recipe_category.html')", "def add_item():\n\n form = AddOrEditItemForm(Category.query.order_by(Category.name).all())\n img_upload_name = None\n if form.validate_on_submit():\n img_upload_name = secure_filename(form.img_upload.data.filename)\n img_deletehash = None\n img_url = None\n\n # Upload image to Imgur if FileField is specified\n if img_upload_name != '':\n img_url, img_deletehash = upload_image(form.img_upload.data)\n if img_url is None or img_deletehash is None:\n flash(\"Failed to upload image.\")\n return redirect(url_for('.index'))\n elif form.img_url.data != '':\n img_url = form.img_url.data\n\n new_item = Item(name=form.name.data, description=form.description.data,\n category=Category.query.get(form.category.data),\n img_url=img_url, img_deletehash=img_deletehash,\n owner=current_user._get_current_object())\n\n try:\n db.session.add(new_item)\n db.session.commit()\n except:\n flash(\n (\"Failed to add item \\\"%s\\\".\"\n \" Make sure that the item name is unique.\") % new_item.name)\n else:\n flash(\"A new item \\\"%s\\\" has been added.\" % new_item.name)\n finally:\n return redirect(url_for('.index'))\n\n # Set SelectField's default value\n category_name = request.args.get('category_name')\n if category_name is not None:\n default_category = Category.query.filter_by(name=category_name).first()\n if default_category is None:\n flash(\"Wrong parameter(s).\")\n return redirect(url_for('.index'))\n form.category.data = default_category.id\n\n return render_template('add_or_edit.html',\n form=form, filename=img_upload_name)", "def editCategory(category_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n session = DBSession()\r\n editedCategory = session.query(Category).filter_by(id=category_id).one()\r\n if editedCategory.user_id != login_session['user_id']:\r\n return \"<script>function myFunction() {alert('You are not authorized to edit this category.');}</script><body onload='myFunction()''>\"\r\n if request.method == 'POST':\r\n editedCategory.name = request.form['name']\r\n session.add(editedCategory)\r\n session.commit()\r\n return redirect(url_for('showCategories'))\r\n else:\r\n return render_template('editcategory.html', category=editedCategory)", "def test_edit_category(self):\n response = self.client.put('/api/v1/category/1',\n data=json.dumps(category[3]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 201)\n self.assertIn('Apparels', str(response.data))", "def update(self, request, *args, **kwargs):\n response = super(CategoryViewSet).update(self, request, *args, *kwargs)\n response.data['message'] = \"Categoria ha sido editada\"", "def update_item(item_id):\n edited_item = session.query(Item).filter_by(id=item_id).one()\n\n # redirect to details page if current user does not own item\n if edited_item.user_id != login_session['user_id']:\n return redirect(\n url_for(\n 'item_details',\n category_id=edited_item.category_id,\n item_id=edited_item.id))\n\n if request.method == 'POST':\n if request.form['category']:\n edited_item.category_id = request.form['category']\n if request.form['name']:\n edited_item.name = request.form['name']\n if request.form['description']:\n edited_item.description = request.form['description']\n edited_item.updated_date = datetime.datetime.now()\n session.add(edited_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=edited_item.category_id,\n item_id=edited_item.id))\n else:\n categories = session.query(Category).all()\n return render_template(\n 'views/edit.html',\n edited_item=edited_item,\n categories=categories)", "def editItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n editedItem.user_id = login_session['user_id']\n session.add(editedItem)\n session.commit()\n return redirect(url_for('showCatalog', sport_id=sport_id))\n else:\n return render_template('edititem.html', sport_id=sport_id,\n item_id=item_id, sport=sport, item=editedItem)", "def edit_category(category_id):\n if request.args.get('state') != login_session['state']:\n response = make_response(\n json.dumps({'error': 'Invalid state parameter.'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n category = session.query(Category).filter_by(id=category_id).all()\n if len(category) > 0:\n state = get_new_state()\n login_session['state'] = state\n category = category[0]\n category.name = request.form['name']\n session.add(category)\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n return make_response(jsonify(error=[\"No results found\"]), 404)\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def render_edit_item_page(\r\n self, client_id, state, user_id, user_name, item_id):\r\n categories = self._db_manager.get_category_list(user_id)\r\n if len(categories) == 0:\r\n flash(\"You have created no categories to add items to.\")\r\n return\r\n item = self._db_manager.get_item(item_id)\r\n if item is None:\r\n flash(\"Invalid item.\")\r\n return\r\n if item[\"user_id\"] != user_id:\r\n flash(\"Only the original creator can edit an item.\")\r\n return\r\n return render_template(\r\n \"item_edit.html\",\r\n client_id=client_id,\r\n state=state,\r\n is_logged_in=True,\r\n user_name=user_name,\r\n categories=categories,\r\n item=item\r\n )" ]
[ "0.79928815", "0.7969549", "0.79305446", "0.76757014", "0.7604712", "0.7579564", "0.7493831", "0.74769086", "0.74194235", "0.7385975", "0.7354665", "0.73481214", "0.7337863", "0.7281295", "0.7241027", "0.723668", "0.71674097", "0.7164873", "0.7072671", "0.70634896", "0.70056456", "0.6998456", "0.6962231", "0.6916112", "0.6848112", "0.6818257", "0.68046737", "0.67940277", "0.6763425", "0.6731742" ]
0.82450867
0
Allows user to delete an existing category item
def deleteItem(category_item_id): itemToDelete = db.findItem(id=category_item_id) if itemToDelete.user_id != login_session['user_id']: return not_authorized() if request.method == 'POST': db.deleteItem(itemToDelete) flash('%s Successfully Deleted' % itemToDelete.title, 'success') return redirect(url_for('showCatalog')) return render_template('delete_item.html', item=itemToDelete)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def category_delete(request):\n if request.POST:\n cat = get_object_or_404(Category, pk=request.POST.get('id'))\n cat.delete()\n return HttpResponse(status=200)", "def deleteCategory():\n deletecategory = deleteCategoryForm()\n # Look for CSRF token in form, verify POST method, and validate form data.\n if deletecategory.validate_on_submit():\n deleteID = deletecategory.deleteID.data\n category = Category.query.filter_by(id=deleteID).one()\n # Check logged in user against the category creator.\n if session['user_id'] == category.user_id:\n items = Item.query.filter_by(category=category.id)\n # Delete items related to category as well as category itself.\n if items:\n for i in items:\n db.session.delete(i)\n db.session.delete(category)\n db.session.commit()\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))", "def deleteCategory(category_id):\n editedCategory = session.query(Category).\\\n filter_by(id=category_id).one()\n editedItem = session.query(Item).\\\n filter_by(category_id=editedCategory.id).all()\n print editedItem\n if editedCategory.user_id != login_session['user_id']:\n flash('You are not authorized to delete this Item.\\\n Please create own Category with items in order to \\\n delete items.')\n return redirect(url_for('showallCategories'))\n else:\n if editedItem:\n flash('Category Deletion not possible. \\\n Please delete the items in Category')\n return redirect(url_for('showCategory',\n category_id=category_id))\n elif request.method == 'POST':\n session.delete(editedCategory)\n session.commit()\n flash('Category Deletion successfull')\n return redirect(url_for('showallCategories'))\n else:\n return render_template(\n 'deleteCategory.html', category_id=category_id,\n category=editedCategory)", "def deleteListItem(category_id, item_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n category = session.query(Category).filter_by(id=category_id).one()\n itemToDelete = session.query(ListItem).filter_by(id=item_id).one()\n\n if category.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n category.name)\n return redirect(url_for('showItems', category_id=category.id))\n\n if request.method == 'POST':\n if \"btn_delete\" in request.form:\n session.delete(itemToDelete)\n session.commit()\n flash('Catalog Item Successfully Deleted')\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('deleteitem.html',\n item=itemToDelete,\n user=getUserInfo(login_session['user_id']))", "def deleteItem(category_id, item_id):\n category = session.query(Category).filter_by(id=category_id).first()\n item = session.query(Item).filter_by(id=item_id).first()\n if item.user_id != login_session['user_id']:\n flash(\"You are authorised to delete items created by you!\")\n return redirect(url_for(\"showCatalog\"))\n if request.method == \"POST\":\n session.delete(item)\n session.commit()\n flash('%s Item Successfully Deleted' % (item.name))\n return redirect(url_for('showItem', category_id=item.category_id))\n else:\n return render_template(\"deleteitem.html\", item=item,\n category=category)", "def deleteItem(category_id, item_id):\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if editedItem.user_id != login_session['user_id']:\n flash('You are not authorized to delete this Item.\\\n Please create own Category with items in order to \\\n delete items.')\n return redirect(url_for('showallCategories'))\n if request.method == 'POST':\n session.delete(editedItem)\n session.commit()\n flash('Item Deletion successfull')\n return redirect(url_for('showCategory', category_id=category_id))\n else:\n return render_template(\n 'deleteItem.html', category_id=category_id,\n item_id=item_id, item=editedItem)", "def delete_item(category, item):\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Query database with SQLAlchemy and store queries as objects\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item\n .replace('-', ' '), category_id=category.id)\n .one())\n # Get user's database ID\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n # Get database ID of creator\n creator_db_id = item.creator_db_id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print(\"Item creator's database primary key id is {}.\"\n .format(creator_db_id))\n print('Item to delete is \"{}\".'.format(item.name))\n # Only allow creator to edit. If not, redirect to login.\n if user_db_id != creator_db_id:\n flash('Only the creator can edit. Please log in as creator.')\n return redirect(url_for('home'))\n session.delete(item)\n session.commit()\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Render webpage\n return render_template('delete_item.html',\n item=item,\n login_status=login_status)", "def delete_item(item_name, catagory_name):\n try:\n item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name)\n except NoResultFound:\n abort(404)\n item.delete()\n return redirect(url_for('home'))", "def delete_category_item(category_item_id):\n item = session.query(CatalogItem).filter_by(id=category_item_id).one()\n session.delete(item)\n session.commit()", "def delete(self):\n key = self.request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n self.request, 'delete-category', {}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.')\n return\n\n errors = []\n course_category.CourseCategoryDAO.delete_category(key, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Deleted.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def delete_category(self):\n index_list = self.categoriesView.selectedIndexes()\n if index_list and index_list[0].isValid():\n index = index_list[0]\n category = index.data(Qt.UserRole)\n\n deletion = self.orm.delete_category(category)\n if not deletion:\n show_warning(\"Can't delete category\")\n else:\n self.show_categories()\n if category.parent is None:\n self.show_available_parents()", "def delete_category(category):\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Query database with SQLAlchemy and store queries as objects\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n category_items = (session.query(Items)\n .filter_by(category_id=category.id)\n .order_by(Items.name)\n .all())\n # Get user's database ID\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n # Get database ID of category creator\n creator_db_id = category.creator_db_id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print(\"Category creator's database primary key id is {}.\"\n .format(creator_db_id))\n print('Category to delete is \"{}\".'.format(category.name))\n print('Items to delete:')\n for item in category_items:\n print(item.name)\n # Only allow creator to edit. If not, redirect to login.\n if user_db_id != creator_db_id:\n flash('Only the creator can edit. Please log in as creator.')\n return redirect(url_for('home'))\n session.delete(category)\n for item in category_items:\n session.delete(item)\n session.commit()\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Render webpage\n return render_template('delete_category.html',\n category_name=category,\n login_status=login_status)", "def deleteItem(category_id, item_id):\r\n # authentication\r\n if 'username' not in login_session:\r\n flash('Please login to add item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # validation\r\n category = session.query(Category).filter_by(id=category_id).first()\r\n if not category:\r\n flash('Attempted operation on non-existent category')\r\n return redirect(url_for('showCategories'))\r\n\r\n deletedItem = session.query(Item).\\\r\n filter_by(id=item_id, category_id=category_id).first()\r\n if not deletedItem:\r\n flash('Attempt to delete non-existent item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # authorization\r\n if login_session['user_id'] != deletedItem.user_id:\r\n flash('Sorry, you are not authorized to delete the item \\'{}\\''\r\n .format(deletedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n if request.method == 'POST':\r\n # delete operation\r\n session.delete(deletedItem)\r\n session.commit()\r\n flash('Deleted Item \\'{}\\' Successfully'.format(deletedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n else:\r\n # serve GET requests with confirmation form\r\n return render_template(\"deleteItem.html\",\r\n category=deletedItem.category, item=deletedItem)", "def delete_category(request, id):\n\n if request.method == \"POST\":\n Category.objects.get(id=id).delete()\n messages.success(request, \"Category deleted successfully.\")\n return redirect(\"category\")\n\n return render(request, \"core/delete_category.html\", {\n \"object\": Category.objects.get(id=id)\n })", "def delete_item(category, name):\r\n item_key = course_key.make_usage_key(category, name)\r\n resp = self.client.delete(get_url('xblock_handler', item_key))\r\n self.assertEqual(resp.status_code, 204)\r\n _test_no_locations(self, resp, status_code=204, html=False)", "def delete_category(category_id):\n mongo.db.categories.remove({\"_id\": ObjectId(category_id)})\n flash(\"Recipe was deleted\")\n return redirect(url_for(\"get_categories\"))", "def delete_category(key):\n try:\n category = Categories.objects.get(pk=key)\n except ObjectDoesNotExist:\n return Response({'status': CATEGORY_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n\n setattr(category, 'is_delete', True)\n category.save()\n return Response({'status': CATEGORY_DELETED}, status=status.HTTP_200_OK)", "def delete(id):\r\n\r\n category = Category.query.get_or_404(id)\r\n db.session.delete(category)\r\n db.session.commit()\r\n flash('You have successfully deleted the category.')\r\n\r\n # redirect to the category page\r\n return redirect(url_for('category.list'))", "def delete_category(request):\n try:\n categories = request.POST.getlist('category_id', 0)\n category = Category.objects.filter(pk__in=categories).delete()\n ActionLogger().log(request.user, \"deleted\", \"Knowledgebase Category %s\" % categories)\n return format_ajax_response(True, \"Knowledgebase category(s) deleted successfully.\")\n except Exception as ex:\n logger.error(\"Failed to delete_category: %s\" % ex)\n return format_ajax_response(False, \"There was an error deleting knowledgebase category(s).\")", "def deleteCategory(category_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n session = DBSession()\r\n deletedCategory = session.query(Category).filter_by(id=category_id).one()\r\n if deletedCategory.user_id != login_session['user_id']:\r\n return \"<script>function myFunction() {alert('You are not authorized to delete this category.');}</script><body onload='myFunction()''>\"\r\n if request.method == 'POST':\r\n session.delete(deletedCategory)\r\n deletedItems = session.query(Item).filter_by(\r\n category_id=category_id).all()\r\n for i in deletedItems:\r\n session.delete(i)\r\n session.commit()\r\n return redirect(url_for('showCategories'))\r\n else:\r\n return render_template('deletecategory.html', category=deletedCategory)", "def delete_item(item_id):\n if 'userinfo' not in session.keys():\n session['target'] = url_for('delete_item', item_id=item_id)\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n sqlsession.delete(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"delete_item.html\", item=item)", "def delete_recipe_category(title):\n result = USERS[session['username']].delete_recipe_category(title)\n if result == \"recipe category deleted\":\n flash(result, 'info')\n else:\n flash(result, 'warning')\n return redirect(url_for('categories'))", "def delete_item_page(item_name, catagory_name):\n return render_template(\n 'delete_item.html', item_name=item_name, catagory_name=catagory_name\n )", "def test_delete_a_category(self):\n self.test_add_category_success()\n response = self.client.delete('/categories/1',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('category deleted permanently',\n response.data.decode())", "def delete(self, item):\n self._createAction(item, \"delete\")", "def deleteItem(category_id, item_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n session = DBSession()\r\n deletedItem = session.query(Item).filter_by(id=item_id).one()\r\n if deletedItem.user_id != login_session['user_id']:\r\n return \"<script>function myFunction() {alert('You are not authorized to delete this item.');}</script><body onload='myFunction()''>\"\r\n if request.method == 'POST':\r\n session.delete(deletedItem)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('deleteitem.html', item=deletedItem)", "def test_delete_category(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n self.login('[email protected]', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n rv = self.del_category()\n self.assertIn(b'successfully deleted category', rv.data)", "def test_delete_category(self):\n pass", "def delete_category(category_name):\n\n category = Category.query.filter_by(name=category_name).first_or_404()\n if category.owner != current_user:\n flash(\"Failed to delete category %s since you are not the owner.\" %\n category.name)\n return redirect(url_for('.index'))\n\n form = DeleteForm()\n if form.validate_on_submit():\n try:\n db.session.delete(category)\n db.session.commit()\n except:\n flash((\"Failed to delete category \\\"%s\\\".\") % category.name)\n else:\n flash(\"Category \\\"%s\\\" has been deleted.\" % category.name)\n finally:\n return redirect(url_for('.index'))\n return render_template('delete.html', form=form, name=category_name)", "def delete_category(category_id):\n\n category = Category.query.filter(\n Category.id == category_id,\n Category.user_id == current_user.id\n ).first()\n\n if not category:\n flash(\"Couldn't find that category\", category='warning')\n return redirect(request.referrer)\n\n category_name = category.name\n db.session.delete(category)\n db.session.commit()\n flash(\n \"Successfully deleted category '{}'\".format(category_name),\n \"success\")\n\n return redirect(url_for('url.index'))" ]
[ "0.8067774", "0.8005237", "0.7868571", "0.78579575", "0.7821217", "0.77969927", "0.7698097", "0.7654832", "0.7649916", "0.76128393", "0.75621045", "0.7551869", "0.74583393", "0.74365985", "0.73939204", "0.73760337", "0.734237", "0.726488", "0.7225764", "0.72094333", "0.71772903", "0.71766996", "0.71602726", "0.7127478", "0.71256256", "0.71035093", "0.70699614", "0.7047942", "0.70065194", "0.69924694" ]
0.80150735
1
Compute average return and of steps.
def compute_avg_return_and_steps(environment, policy, num_episodes=10): total_return = 0.0 total_steps = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 episode_steps = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward episode_steps += 1 total_return += episode_return total_steps += episode_steps average_return = total_return / num_episodes average_episode_steps = total_steps / num_episodes return average_return.numpy()[0], average_episode_steps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_mean(self, sums, step):\n\n return sums/step", "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def average(self):\n return (self.current + self.last) / 2.0", "def average(self, start, end):\n return self.integrate(start, end) / (end - start)", "def average(self, returns):\r\n return returns.mean() * self.day", "def average(self):\n return self.summation() / self.count()", "def average(self):\n total = 0\n for t in self.memory:\n total += t.reward\n return total/self.__len__()", "def calculateAverage(self, data):\n\n nValidTrials = data['nValid'][-1]\n nRewardTrials = data['nRewarded'][-1]\n return float(nRewardTrials)/nValidTrials", "def total_steps(self) -> global___Expression:", "def avg_return(pct_returns, daily_investment=100, skip_n_steps=0):\n return np.sum(np.multiply(daily_investment, np.average(pct_returns.T, axis=1)[skip_n_steps:]))", "def averageTime(self):\n \n pass", "def compute_avg_reward(env, policy, num_episodes):\n total_return = 0.0\n for _ in range(num_episodes):\n state = env.reset()\n done = False\n episode_return = 0.0\n while not done:\n action = policy(state)\n next_state, reward, done, _ = env.step(action)\n if done:\n reward = -1.0\n episode_return += reward\n state = next_state\n total_return += episode_return\n avg_return = total_return / num_episodes\n return avg_return", "def avg(a,b):\r\n return (a+b)/2", "def avgtr(self):\n return np.diff(self.trtimes).mean()", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def average(self):\n\n x = list(zip(*self.memory))\n states = list(x[0])\n actions = list(x[1])\n \n downsampled_states = resample(states , self.output_size-1)\n downsampled_actions = resample(actions, self.output_size-1)\n\n return downsampled_states, downsampled_actions", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def calc_mean(a, b, c, d, e):\n return (a + b + c + d + e) / 5", "def average(arg1, *args): \n return (arg1 + sum(args)) / (1 + len(args))", "def geo_average(self, returns):\r\n return (1 + returns).prod() ** (self.day / len(returns)) - 1", "def main():\n print(average([2, 4, 6, 8, 20, 50, 70]))", "def averageDistance(nbSteps, nbWalks, func):\n totalDistance = 0\n startPoint = (0, 0)\n for _ in range(nbWalks):\n arrival = None\n while arrival is None:\n arrival = func(startPoint, nbSteps)\n totalDistance += distance(startPoint, arrival)\n return pow(totalDistance/nbWalks, 2)", "def print_avg():", "def getAverage(die, numRolls, numTrials):", "def average(x, y):\n #helper function for get_accuracy\n average = (x+y)/2 \n return average", "def get_average_torsion (phis) :\n shift = phis[0]\n phis_shifted = get_diffvec(phis,shift)\n avg_shifted = phis_shifted.sum()/len(phis)\n average = avg_shifted + shift\n return average", "def get_average_torsion (phis) :\n shift = phis[0]\n phis_shifted = get_diffvec(phis,shift)\n avg_shifted = phis_shifted.sum()/len(phis)\n average = avg_shifted + shift\n return average", "def get_runs_to_average(self):\n\n if Test.performance_params: return int(Test.performance_params[1])\n elif self._check_performance: return self._runs_to_average\n else: return None" ]
[ "0.7158892", "0.6989263", "0.6973302", "0.68902016", "0.67515194", "0.6697132", "0.652611", "0.6519316", "0.64798063", "0.6475223", "0.64444023", "0.6367762", "0.630979", "0.6308836", "0.62865496", "0.62865496", "0.62865496", "0.62493414", "0.61997265", "0.61639595", "0.6111974", "0.6094934", "0.6078407", "0.6073247", "0.60700184", "0.6065695", "0.6047945", "0.60353243", "0.60353243", "0.602915" ]
0.75718766
0
Return the camera's mac address as the serial number.
def serial_number(self) -> str: return self.mac_address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_mac_address(self):\n str_hex_mac = uuid.UUID(int=uuid.getnode()).hex[-12:]\n return str_hex_mac", "def mac(self) -> str:\n return self.camera_info[\"wifi_mac\"]", "def mac_address(self) -> str:\n return self._device.mac", "def serial(self) -> str:\n return self.camera_info[\"device_sn\"]", "def unique_id(self):\n return self._device.mac", "def get_mac_string():\n mac_int = getnode()\n mac_str = ':'.join((\"%012x\" % mac_int)[i:i + 2] for i in range(0, 12, 2))\n return mac_str", "def mac(self):\n return self.device.settings[\"device\"][\"mac\"]", "def mac(self):\n return self.device.settings[\"device\"][\"mac\"]", "def get_mac(self) -> str:\n hex_mac = hexlify(self.message)[160:172].decode().upper()\n return (\n hex_mac[0:2]\n + \":\"\n + hex_mac[2:4]\n + \":\"\n + hex_mac[4:6]\n + \":\"\n + hex_mac[6:8]\n + \":\"\n + hex_mac[8:10]\n + \":\"\n + hex_mac[10:12]\n )", "def _get_mac_address():\n if not sys.platform.startswith('linux'):\n raise RuntimeError(\n 'Cannot get the MAC address on non-Linux platforms'\n )\n ifname = get_default_iface_name_linux()\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n info = fcntl.ioctl(s.fileno(), 0x8927,\n struct.pack('256s', bytes(ifname, 'utf-8')[:15]))\n return ''.join('%02x' % b for b in info[18:24])", "def get_mac(self, node_id):\n nc = '%02x' % self.node_class\n nr_iface = '%02x' % self.nr_host_interface\n node_id = '%08x' % node_id\n\n return '%s:%s:%s:%s:%s:%s' % (nc, nr_iface, node_id[0:2], node_id[2:4], node_id[4:6], node_id[6:8])", "def _get_mac(self):\n return self.__mac", "def mac_address(self):\n mac = [\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff)\n ]\n return ':'.join(map(lambda x: f\"{x:02X}\", mac))", "def person_mac(self):\n return self._person_mac", "def get_mac_address():\n eth0_interface = 'eth0'\n addresses = netifaces.ifaddresses(eth0_interface)[netifaces.AF_LINK][0]\n mac_address = addresses['addr']\n return mac_address", "def mac_address(self):\n return self._mac_address", "def get_sonic_mac(host, asicnum, port):\n if host.facts[\"num_asic\"] == 1:\n cmd = \"sudo ip link show {}\".format(port)\n else:\n ns = \"asic\" + str(asicnum)\n cmd = \"sudo ip netns exec {} ip link show {}\".format(ns, port)\n output = host.command(cmd)\n mac = output['stdout_lines'][1].split()[1]\n logger.info(\"host: %s, asic: %d, port: %s, mac: %s\", host.hostname, asicnum, port, mac)\n return mac", "def get_mac_address(self):\n\t\treturn call_sdk_function('PrlVmDevNet_GetMacAddress', self.handle)", "def get_rand_mac(self):\n random_mac = []\n\n # add manufacturer\n random_mac.append(self.get_rand_manufact())\n\n # generate the last 24 bits of the random hex\n for i in range(0, 3):\n rand_digit1 = self.get_rand_hex_digit()\n rand_digit2 = self.get_rand_hex_digit()\n random_mac.append(rand_digit1 + rand_digit2)\n\n return ':'.join(random_mac)", "def _mac_addr(address):\n return ':'.join('%02x' % ord(b) for b in address)", "def get_mac():\n\n interface = [x for x in netifaces.interfaces() if 'wlan' in x or 'wlp' in x][0]\n return netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']", "def get_mac(self) -> str:\n self.sendline(\"iw {} info\".format(self.iface_dut))\n # We are looking for MAC definition of STA\n # wdev 0x1\n # addr 96:4e:c9:cc:7a:2c\n # type managed\n self.expect(\"addr (?P<mac>..:..:..:..:..:..)\\r\\n\\t(type|ssid)\")\n return self.match.group('mac')", "def mac(self):\n mac = hexlify(WLAN().config('mac'), ':').decode()\n return mac.upper() # MAC-address in upper case", "def mac_ntoa(mac):\n return '%.2x:%.2x:%.2x:%.2x:%.2x:%.2x' % tuple(map(ord, list(mac)))", "def get_mac(self) :\n\t\ttry :\n\t\t\treturn self.p_fields.f128\n\t\texcept :\n\t\t\treturn None", "def macFor(cls, board):\n return cls.MAC_PREFIX + '{:02X}'.format(int(board))", "def idn(self):\n hname = (ct.c_char * 100)()\n self.lib.GetHeadModel(ct.pointer(hname))\n hname = str(hname.value)[2:-1]\n sn = ct.c_uint()\n self.lib.GetCameraSerialNumber(ct.pointer(sn))\n return 'Andor ' + hname + ', serial number ' + str(sn.value)", "def _get_mac_address(self, mac_numbers):\n\n mac = \"\"\n for num in mac_numbers:\n num = self._convert_to_hex(num)\n mac = ':'.join((mac, num))\n mac = mac[1:]\n return mac", "def get_mac_address(self):\n\t\treturn call_sdk_function('PrlSrvCfgNet_GetMacAddress', self.handle)", "def getMac(self):\n # Import netifaces here to prevent error importing this module in setup.py\n import netifaces\n interfaces = ['eth0', 'wlan0']\n try:\n interfaces.append(netifaces.gateways()['default'][netifaces.AF_INET][1])\n except:\n pass\n for interface in interfaces:\n try:\n return netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']\n except ValueError:\n pass\n except:\n exception('Error getting MAC address')\n return None" ]
[ "0.74284977", "0.7396035", "0.7244921", "0.7241079", "0.7186397", "0.71549183", "0.6929182", "0.6929182", "0.68750423", "0.686168", "0.6835941", "0.6835715", "0.68190706", "0.67833877", "0.6717112", "0.6596283", "0.65792704", "0.6549326", "0.6522258", "0.6508936", "0.6494126", "0.6475544", "0.6457014", "0.6446254", "0.64233387", "0.64109164", "0.64090216", "0.63519794", "0.63443905", "0.6301198" ]
0.77441496
0
Return True if capture clip on motion is active.
def capture_clip_on_motion(self) -> bool: return self.data[Attribute.CAPTURE_CLIP_ON_MOTION]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def capture_is_active(self):\n return self.um in self._streams", "def can_activate(self):\n if self.video_library.get_number_of_video_clips() == 0:\n return False\n else:\n return True", "def motion_detection_enabled(self):\n return self._motion_status", "def is_active(self) -> bool:\n return self.map.active_cam == self.map.cameras.index(self) + 1", "def is_StartCapture_allowed(self):\n handler = self.get_command_object(\"StartCapture\")\n return handler.check_allowed()", "def is_playing(self):\n return self.process is not None", "def is_screen_on(self):\n out = self.adb.get_window_policy_info()\n pattern = re.compile('mScreenOnFully=(true|false)')\n return pattern.search(str(out)).group(1)", "def get_active(self):\n if hasattr(self, 'canvas'):\n return True\n else:\n return False", "def require_motion(self) -> bool:\n return self._require_motion", "def is_on(self):\n camera = self.coordinator.data[self._camera_id]\n if self._switch_type == \"record_motion\":\n enabled = True if camera[\"recording_mode\"] == TYPE_RECORD_MOTION else False\n elif self._switch_type == \"record_always\":\n enabled = True if camera[\"recording_mode\"] == TYPE_RECORD_ALLWAYS else False\n else:\n enabled = True if camera[\"ir_mode\"] == self._ir_on_cmd else False\n return enabled", "def isActive(self):\n self._acquire_lock()\n returned = True\n try:\n if len(self.existing_frames) == 0 :\n returned = False\n finally:\n self._release_lock()\n return returned", "def captured(self):\n return self.game.enemyTeam.flag.carrier != None", "def captured(self):\n return self.game.enemyTeam.flag.carrier != None", "def captured(self):\n return self.commander.game.enemyTeam.flag.carrier != None", "def captured(self):\n return self.commander.game.enemyTeam.flag.carrier != None", "def camera_set(self) -> bool:\n if self.camera is None: # pragma: no cover\n return False\n return self.camera.is_set", "def hasMultiPassEffects(self):\r\n cls = mxs.classOf(self._nativePointer)\r\n if cls in (mxs.VRayPhysicalCamera, mxs.Physical):\r\n return self._nativePointer.use_DOF\r\n elif mxs.isProperty(self._nativePointer, 'mpassEnabled'):\r\n return self._nativePointer.mpassEnabled\r\n return False", "def _isInIdle(self):\r\n if core.FW_conf['blackbox'].isVideoRecorderAvailable() and core.FW_conf['blackbox'].videoClient.GetCurrentState() == 'idle':\r\n self.inIdle = True\r\n return True\r\n else:\r\n return False", "def is_capturing(self):\r\n self.priority += self.captures*10", "def is_motion(self, channel=None):\n return bool(self.getBinaryData(\"MOTION\", channel))", "def is_motion(self, channel=None):\n return bool(self.getBinaryData(\"MOTION\", channel))", "def detectMotion():\n global MotionDetected\n MotionDetected = False\n return MotionDetected", "def is_on(self) -> bool:\n return self._client.get_circ_pump()", "def is_active(self):\n with self._lock:\n return self._robot is not None", "def is_active(self):\n if self.steps > STEPS_MAX or not self.targets[0]:\n return False\n return True", "def detector_active(self) -> bool:\n\t\treturn self._raw_result['data']['detectorActive']", "def can_play_on_all_active(self):\n if self.last_move is None:\n return True\n x, y = self.last_move[-2:]\n if self.boards[x][y].state != State.IN_PROGRESS:\n return True\n return False", "def is_StopCapture_allowed(self):\n handler = self.get_command_object(\"StopCapture\")\n return handler.check_allowed()", "def checkCamera(self):\n #how to check if cam exits\n #https://stackoverflow.com/questions/48049886/how-to-correctly-check-if-a-camera-is-available\n if \"has_cam\" in self.store.keys():\n return self.store[\"has_cam\"]\n\n cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n\n if cap is None or not cap.isOpened():\n return False\n #Close the cam\n cap.release()\n cv2.destroyAllWindows()\n return True", "def detect(frame: numpy.ndarray) -> bool:\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5" ]
[ "0.6876677", "0.6634053", "0.63762534", "0.6186591", "0.60965234", "0.605035", "0.6013605", "0.6006316", "0.5958077", "0.59114885", "0.5910525", "0.5908825", "0.5908825", "0.58919317", "0.58919317", "0.5860505", "0.5826048", "0.5814718", "0.5800764", "0.5795898", "0.5795898", "0.5793163", "0.57746214", "0.5766827", "0.5758039", "0.5740444", "0.5718584", "0.57157695", "0.56888545", "0.5661974" ]
0.8491094
0
Return True if privacy mode is active.
def is_in_privacy_mode(self) -> bool: return self.data[Attribute.CAMERA_PRIVACY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_aprentice(self):\n return self.user_profile_status == self.APPRENTICE", "def private(self) -> bool:\n return pulumi.get(self, \"private\")", "def authorized(self) -> bool:\n\n return (\n self.activated\n or self.on_screen\n or self.on_file\n or (\n bool(PyFunceble.storage.CONFIGURATION)\n and bool(PyFunceble.storage.CONFIGURATION.debug.active)\n )\n )", "def permissive(self) -> bool:\n return self._permissive", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self) -> bool:", "def is_active(self):\n return self.user.is_active", "def is_on(self):\n return bool(self.enabled)", "def is_on(self):\n return self._data[\"enabled\"]" ]
[ "0.6490553", "0.6430658", "0.64281636", "0.638807", "0.63529915", "0.63529915", "0.63529915", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6319265", "0.6279934", "0.6267442", "0.6230965", "0.62258554" ]
0.86465675
0
Camera's wireless signal strength.
def wireless_signal_strength(self) -> int: return self.data[Attribute.WIRELESS_SIGNAL_STRENGTH]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetCurrentSignalStrength(self, iwconfig=None):\n try:\n strength = int(self.wifi.GetSignalStrength(iwconfig))\n except:\n strength = 0\n return strength", "def strength(self) -> float:\n ...", "def sensorStrength(self):\n # TODO: also return type of sensor\n radar = self._getAttribute(Attribute.scanRadarStrength)\n ladar = self._getAttribute(Attribute.scanLadarStrength)\n magnetometric = self._getAttribute(Attribute.scanMagnetometricStrength)\n gravimetric = self._getAttribute(Attribute.scanGravimetricStrength)\n\n return radar or ladar or magnetometric or gravimetric", "def SignalStrengthIs( self, signalStrength ):\n\t\tif( not self.running ):\n\t\t\treturn\n\t\t#print('')\n\t\t#print( '--------------------------' )\n\t\t#print( '{}: at {}MHz signal strength is {}%'.format( \n\t\t\t#( 'Running...' if self.running else 'Not running!' ),\n\t\t\t#self.frame.radio.frequency, \n\t\t\t#self.frame.radio.signalStrength ) )\n\t\t\n\t\tstrength = self.frame.radio.signalStrength\t\t\t\n\t\t\t\n\t\t#print( 'a:{} + b:{} + s:{} = {} ~ max:{}, f:{} - last:{} = {} ~ MIN_FREQUENCY_SEPARATION:{}'.format( ScanUntilObserver.a, ScanUntilObserver.b, strength, ScanUntilObserver.a + ScanUntilObserver.b + strength,\n\t\t\t#self.frame.radio.GetMaxSignalStrength(),\n\t\t\t#self.frame.radio.frequency, ScanUntilObserver.last,\t\t\t\n\t\t\t#math.fabs( self.frame.radio.frequency - ScanUntilObserver.last ),\n\t\t\t#self.frame.Settings.ScanFrequencySeparationThresholdMHz ) )\n\t\tlast3Strengths = ScanUntilObserver.a + ScanUntilObserver.b + strength\n\t\tif( ( last3Strengths > self.frame.radio.GetMaxSignalStrength() ) and\n\t\t\tlast3Strengths > 0.8*self.frame.Settings.MinAcceptableStationSignalStrength and\n\t\t\t( math.fabs( self.frame.radio.frequency - ScanUntilObserver.last ) > \n\t\t\tself.frame.Settings.ScanFrequencySeparationThresholdMHz ) \n\t\t\t):\n\t\t\t#print( 'previous strength:{}%, current strength:{}%'.format( \n\t\t\t#self.previousSignalStrength, strength ) )\n\t\t\tif( self.previousSignalStrength > strength ):\t\t\t\n\t\t\t\tif( self.mode == ScanUntilObserver.RISING ):\n\t\t\t\t\t#print( 'found peak' )\n\t\t\t\t\tif( self.previousSignalStrength > self.frame.Settings.MinAcceptableStationSignalStrength ):\n\t\t\t\t\t\tself.Stop()\t\t\t\t\t\n\t\t\t\t\t\tScanUntilObserver.a = 0\n\t\t\t\t\t\tScanUntilObserver.b = 0\n\t\t\t\t\t\tScanUntilObserver.last = self.frame.radio.frequency\n\t\t\t\t\t\tself.frame.radio.Tune( self.previousFrequency )\n\t\t\t\t\t\tself.frame.ZappStart( self.Start )\n\t\t\t\t\t\treturn\t\t\t\t \n\t\t\t\telse:\n\t\t\t\t\t#print( 'mode = FALLING' )\n\t\t\t\t\tself.mode = ScanUntilObserver.FALLING\n\t\t\telse:\n\t\t\t\t#print( 'mode = RISING' )\n\t\t\t\tself.mode = ScanUntilObserver.RISING\n\t\t\t\n\t\tScanUntilObserver.a = ScanUntilObserver.b\n\t\tScanUntilObserver.b = strength\n\t\tself.previousSignalStrength = strength\n\t\tself.previousFrequency = self.frame.radio.frequency\n\t\tself.frame.ScanTimer.Start( self.frame.Settings.ScanWaitTimeMSecs, True )\n\t\treturn", "def getStrength(self):\n return self.st", "def getSignalQualityInDBM(self):\n return (float(self.wlanSignalQuality) / 2.0) - 100.0", "def GetPrintableSignalStrength(self, iwconfig=None):\n if self.GetSignalDisplayType() == 0:\n return self.GetCurrentSignalStrength(iwconfig)\n else:\n return self.GetCurrentDBMStrength(iwconfig)", "def calculate_signal_power(self, sender, freq_range):\r\n distance = np.sqrt(\r\n np.power(self.x - sender.x, 2) + np.power(self.y - sender.y, 2))\r\n avg_frequency = np.average(freq_range) * 1e6\r\n wavelength = settings.speed_of_light / avg_frequency\r\n received_signal_power = (\r\n sender.tx_power * sender.gain * self.gain * np.power(\r\n wavelength, 2)) / np.power(4 * np.pi * distance, 2)\r\n return received_signal_power", "def digital_gain():\n def r(x):\n return x/512.\n\n def w(x):\n return int(x*512)\n return r, w", "def lightSpeed():\n return const.c.value", "def weight(self):\n return self._hx711.get_weight()", "def strength(self) -> int:\n return self._strength", "def get_winStrength(self):\n if self.winStrength is None:\n self.calculate_my_win_strength()\n return self.winStrength", "def get_power(self):\n #GPIO.setmode(GPIO.BOARD)\n #GPIO.setup(self.input_pin, GPIO.IN)\n return 0", "def num_wires(self):", "def gain(self):\n return self[1]", "def microphone_sensitivity(transferfactor: float) -> float:\n return amp2db(transferfactor/1000.)", "def lms_gain(self):\n return self._lms_gain", "def get_signal_percent(self):\n # RSSI or this signal value is measured in decibels from 0 (zero) to\n # -120 (minus 120). The closer the value to 0 (zero), the stronger the\n # signal will be.\n if self.rssi < -80:\n return 20\n elif self.rssi < -70:\n return 40\n elif self.rssi < -60:\n return 60\n elif self.rssi < -50:\n return 80\n elif self.rssi <= 0:\n return 100\n else:\n return 0", "def calcPower(self, inputs):\n if self.getAtt('available', inputs):\n possible_charge_rate = self.getAtt('possible_charge_rate', inputs)\n Vm = self.getAtt('Vm', inputs)\n P = possible_charge_rate * Vm\n if not self.stayConnected:\n P = P * self.calculateVoltageIndex(Vm) * self.calculateTrafoIndex()\n return P\n return 0.0", "def tx_power(self) -> int:\n # Follow table 10 truth table from the datasheet for determining power\n # level from the individual PA level bits and output power register.\n pa0 = self.pa_0_on\n pa1 = self.pa_1_on\n pa2 = self.pa_2_on\n current_output_power = self.output_power\n if pa0 and not pa1 and not pa2:\n # -18 to 13 dBm range\n return -18 + current_output_power\n if not pa0 and pa1 and not pa2:\n # -2 to 13 dBm range\n return -18 + current_output_power\n if not pa0 and pa1 and pa2 and not self.high_power:\n # 2 to 17 dBm range\n return -14 + current_output_power\n if not pa0 and pa1 and pa2 and self.high_power:\n # 5 to 20 dBm range\n return -11 + current_output_power\n raise RuntimeError(\"Power amps state unknown!\")", "def get_strength(self):\n return 10 - self.get_agility()", "def silencing_constraint(self, x0: devices.PrimaryWeights) -> float:\n contrast = self.get_photoreceptor_contrasts(x0)\n return sum(pow(contrast[self.silence].values, 2))", "def get_wl_band(radar_frequency):\n return 0 if (30 < radar_frequency < 40) else 1", "def overall_sensitivity(self):\n if self.mod1:\n s = torch.max(torch.max(self.weight, -1)[0], -1)[0].item()\n else:\n s = torch.max(torch.sqrt(torch.sum(self.weight * self.weight, -1)))[0].item()\n s *= np.sqrt(2. / np.e)\n return s", "def cMACW(self):\n return 3.5", "def vga_gain(self):\n return float(self._read(0x15, 4, 0x70) + 8)", "def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def fieldBaseStrength(self):\n return self.params['fieldStrength']", "def addStrength(self):\n\t\tself.strength += 1\n\t\tif self.strength > 10:\n\t\t\tself.strength = 10" ]
[ "0.7031714", "0.6994032", "0.67061955", "0.6649575", "0.64029896", "0.63176775", "0.6286045", "0.61830634", "0.61751175", "0.6073168", "0.6055971", "0.6050908", "0.5939323", "0.59383196", "0.59170634", "0.5894795", "0.5874123", "0.5873845", "0.58524925", "0.5838355", "0.58368933", "0.5815634", "0.578764", "0.5785689", "0.57613915", "0.57528955", "0.5747629", "0.57446307", "0.57362854", "0.5718873" ]
0.8146503
0
Request a new thumbnail for the camera.
async def request_thumbnail(self) -> None: await self.vivintskyapi.request_camera_thumbnail( self.alarm_panel.id, self.alarm_panel.partition_id, self.id )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def thumbnail(self, thumbnail):\n self._thumbnail = thumbnail", "def set_thumbnail(self, **kwargs):\n self.thumbnail_url = kwargs.get('url')", "def GetThumbnail(self, type, maxsize): # real signature unknown; restored from __doc__\n pass", "def thumbnail(self, width, height, path, **kwargs):\n return self.get('fileops/thumbnail', api='CONV', params={\n 'root': self.root,\n 'path': path,\n 'width': width,\n 'height': height,\n }, **kwargs)", "def setThumbnailImage(*args):", "def fetch_thumbnail():\n uuid = request.args.get(\"id\")\n crop = request.args.get(\"crop\")\n if crop == \"None\":\n crop = None\n thumb_mime = \"image/jpg\"\n thumbname = \"(no file)\"\n try:\n thumbname, cl = media.get_thumbname(uuid, crop)\n if cl == \"jpg\":\n ret = send_file(thumbname, mimetype=thumb_mime)\n elif cl == \"pdf\":\n ret = send_file(\n os.path.join(\"static\", \"image/a_pdf.png\"), mimetype=thumb_mime\n )\n else:\n ret = send_file(\n os.path.join(\"static\", \"image/noone.jpg\"), mimetype=thumb_mime\n )\n logger.debug(f\"-> bp.scene.routes.fetch_thumbnail ok\")\n except FileNotFoundError:\n # Show default image\n ret = send_file(os.path.join(\"static\", \"image/noone.jpg\"), mimetype=thumb_mime)\n logger.debug(f\"-> bp.scene.routes.fetch_thumbnail none\")\n\n return ret", "def thumbnail(self, options):\n params = {\n 'width': options['width'] if 'width' in options else 50,\n 'height': options['height'] if 'height' in options else 50,\n 'smartCropping': options['smartCropping'] if 'smartCropping' in options else False\n }\n\n return Base._postWithOptions(self, _thumbnailUrl, options, params)", "def thumbnail(self, from_path, size='m', format='JPEG'):\n assert format in ['JPEG', 'PNG'], \\\n \"expected a thumbnail format of 'JPEG' or 'PNG', got %s\" % format\n\n path = \"/thumbnails/%s%s\" % (self.session.root, format_path(from_path))\n\n url, params, headers = self.request(path, {'size': size, 'format': format},\n method='GET', content_server=True)\n return self.rest_client.request(\"GET\", url, headers=headers, raw_response=True)", "def mosaic_thumbnail(self):\n serial = slugify(self.request.matchdict[\"serial\"])\n filename = \"thumbnails/%s/mosaic.png\" % serial\n return FileResponse(filename)", "def get_thumbnail(self):\r\n raise Exception('get_thumbnail is Not Implemented in base class \"Video\"')", "def create_thumbnail(self, target, format=None):", "def top_thumbnail(self):\n serial = slugify(self.request.matchdict[\"serial\"])\n filename = \"thumbnails/%s/top.png\" % serial\n return FileResponse(filename)", "def thumbnail(self, size, resample=BICUBIC):\r\n # preserve aspect ratio\r\n x, y = self.size\r\n if x > size[0]:\r\n y = int(max(y * size[0] / x, 1))\r\n x = int(size[0])\r\n if y > size[1]:\r\n x = int(max(x * size[1] / y, 1))\r\n y = int(size[1])\r\n size = x, y\r\n if size == self.size:\r\n return\r\n self.draft(None, size)\r\n self._instance = self.resize(size, resample, image=self._instance)\r\n self.readonly = 0\r\n self.pyaccess = None", "def thumbnail(self):\n\n if self._thumbnail is None:\n cover = self.cover()\n\n if cover is not None:\n self._thumbnail = cover.resize(THUMBNAIL_SIZE, Image.ANTIALIAS)\n\n return self._thumbnail", "def thumbnail(self, img_path):\n\n thumb_path = self.thumbnail_path(img_path)\n\n if os.path.exists(thumb_path):\n return thumb_path\n else:\n self.queue_thumbnail(img_path)\n return None", "def thumbnail(self):\n return self.get_thumbnail_url()", "def get_thumbnail_url():", "async def get_thumbnail_url(self) -> str:\n # Sometimes this date field comes back with a \"Z\" at the end\n # and sometimes it doesn't, so let's just safely remove it.\n camera_thumbnail_date = datetime.strptime(\n self.data[Attribute.CAMERA_THUMBNAIL_DATE].replace(\"Z\", \"\"),\n \"%Y-%m-%dT%H:%M:%S.%f\",\n )\n thumbnail_timestamp = int(camera_thumbnail_date.timestamp() * 1000)\n\n return await self.vivintskyapi.get_camera_thumbnail_url(\n self.alarm_panel.id,\n self.alarm_panel.partition_id,\n self.id,\n thumbnail_timestamp,\n )", "def take_photo(self):\n\n status = self.camera.status()\n if status['mode'] != 'still':\n # place camera in snapshot mode\n self.camera.command('mode', 'still')\n\n photo_successful = self.camera.command('record', 'on')\n\n if photo_successful:\n\n # sleep for two seconds so the camera can process\n # and serve the new photo via http\n\n retrieved = False\n while not retrieved:\n print(\"Waiting for image to be served.\")\n time.sleep(2)\n retrieved = self.get_photos_from_device()\n\n print(\"Image got served.\")\n return True\n\n else:\n return False", "def thumbnail(im, config):\n\n im.thumbnail(\n (config['width'], config['height']),\n ANTIALIAS,\n )\n\n return im", "def thumbnail_url(self):\n return None", "def thumbnail(self):\n return self._thumbnail", "def __get_thumbnail_from_service(self, uri):\n root, ext = os.path.splitext(uri)\n head, tail = os.path.split(uri)\n output_file = os.path.join(self.__base, tail)\n try:\n if self.__service.GetVideoFrame('file://' + uri, output_file):\n d = os.path.join(constant.THUMBNAIL_DIR, self.__service.name)\n return utils.create_thumbnail_image(thumbnail_dir = d,\n full_image_path = output_file,\n canvas = g_thumb_bkgd,\n target = uri)\n except Exception, e:\n print e\n # We seem to be choking on thumbnail request, so just return False\n # and let the placeholder icon be used in the iconview\n pass\n return False", "def save(self, *args, **kwargs):\n if not self.pk: # on create\n image = Image.open(self.file)\n image.thumbnail((400, 400), Image.ANTIALIAS)\n\n thumb = io.BytesIO()\n image.save(\n thumb, format=\"jpeg\", quality=80, optimize=True, progressive=True\n )\n self.thumbnail = InMemoryUploadedFile(\n thumb, None, self.file.name, 'image/jpeg', thumb.tell(), None\n )\n\n super(File, self).save(*args, **kwargs)", "def resized(self, source='image', id='imagekit:thumbnail',\n\t\tdest=None, **kwargs):\n\n\t\tif dest and hasattr(self, dest):\n\t\t\treturn getattr(self, dest)\n\n\t\tkwargs['source'] = getattr(self, source)\n\n\t\tgenerator = generator_registry.get(id, **kwargs)\n\t\timage = ImageCacheFile(generator)\n\t\tif dest:\n\t\t\tsetattr(self, dest, image)\n\t\treturn image", "def make_thumbnail(self):\n # https://gist.github.com/valberg/2429288\n\n # make sure image data is set\n if not self.image_data:\n return False\n\n if self.proxy_data:\n return True\n\n # Create a resized version of the image\n image = Image.open(self.image_data)\n image.thumbnail(THUMBNAIL_SIZE, Image.BICUBIC)\n\n # Save the thumbnail to in-memory 'file'\n temp_thumb = BytesIO()\n image.save(temp_thumb, 'jpeg')\n temp_thumb.seek(0) # rewinds the file\n\n # Save image to a SimpleUploadFile which can be saved\n # into ImageField\n # TODO figure out how to pass base image's UUID before\n # image is committed to DB\n basename = os.path.basename(self.image_data.name)\n uuidname = os.path.splitext(basename)[0]\n suf = SimpleUploadedFile(uuidname,\n temp_thumb.read(), content_type='image/jpeg')\n thumb_filename = '{}_thumb.jpeg'.format(suf.name)\n\n # set save=False, or else it will infinite loop\n self.proxy_data.save(thumb_filename,\n suf,\n save=False)\n\n # Also store the real dimensions for the Pillow thumbnail\n self.proxy_width, self.proxy_height = image.size\n\n temp_thumb.close()\n\n return True", "def thumbnail(self, fnameIn, fnameOut):\n cmd = \"convert -define jpeg:size=500x150 \"\n cmd += '\"%s\" ' % os.path.join(self.downloadFolder, fnameIn)\n cmd += \"-auto-orient -thumbnail 250x150 \"\n cmd += '\"%s\" ' % os.path.join(self.thumbnailFolder, fnameOut)\n self.log(\"creating thumbnail ...\")\n self.log(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()", "def get_thumbnail(self, size):\n\n thumb = self.associated_images[b'thumbnail']\n return thumb", "def take_picture(self):\n self.drone.take_picture()", "def take_picture(self):\n self.drone.take_picture()" ]
[ "0.6508987", "0.6476307", "0.6436708", "0.6407975", "0.6400892", "0.63398254", "0.6334903", "0.62879765", "0.6212906", "0.62107", "0.6181639", "0.60656625", "0.6060553", "0.60002947", "0.5988563", "0.59305435", "0.59282035", "0.5918891", "0.5912826", "0.59014267", "0.58705056", "0.5862334", "0.5815107", "0.57987887", "0.57642215", "0.5751984", "0.57464796", "0.5722924", "0.57122934", "0.57122934" ]
0.821199
0
Return the latest camera thumbnail URL.
async def get_thumbnail_url(self) -> str: # Sometimes this date field comes back with a "Z" at the end # and sometimes it doesn't, so let's just safely remove it. camera_thumbnail_date = datetime.strptime( self.data[Attribute.CAMERA_THUMBNAIL_DATE].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f", ) thumbnail_timestamp = int(camera_thumbnail_date.timestamp() * 1000) return await self.vivintskyapi.get_camera_thumbnail_url( self.alarm_panel.id, self.alarm_panel.partition_id, self.id, thumbnail_timestamp, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_camera_image_url(self) -> str:\n return self.camera_info[\"cover_path\"]", "def get_thumbnail_url(self):\n if not self.id_video or not self.original_url or not self.xml_response:\n return ''\n return self.xml_response.find('framegrab_url').text", "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n vine_url = self.get_url()\n res = self._http_request(vine_url)\n m = re.search(r'property=\"og:image\" content=\"(?P<thumbnail>[^\"]*)\"', res)\n if m and m.groupdict():\n self.thumbnail_url = m.groupdict().get('thumbnail') or ''\n \n return self.thumbnail_url", "def get_thumbnail_url(self):\n return self.thumbnail_url", "def get_thumbnail_url():", "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n if not self.thumbnail_url:\n api_url = 'https://api.dailymotion.com/video/%s?fields=thumbnail_url' % self.get_video_id()\n res = self._oembed_request(api_url)\n self.thumbnail_url = res.get('thumbnail_url', '')\n return self.thumbnail_url", "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id() or not self.get_username():\n return ''\n \n channel_formated = 'x%sx' % (self.get_username().replace('_', '-'))\n api_url = 'http://%s.api.channel.livestream.com/2.0/thumbnail.json?id=%s' % (channel_formated, self.get_video_id())\n \n res = self._oembed_request(api_url)\n thumbnail = res.get('thumbnail', {})\n self.thumbnail_url = thumbnail.get('@url', '')\n return self.thumbnail_url", "def thumbnail(self):\n return self.get_thumbnail_url()", "def get_thumbnail_url(self):\n if not self.get_video_id():\n return ''\n \n if not self.thumbnail_url:\n self.thumbnail_url = 'https://img.youtube.com/vi/%s/hqdefault.jpg' % self.get_video_id()\n \n return self.thumbnail_url", "def get_thumbnail_url(self):\n \n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n api_url = 'http://vimeo.com/api/v2/video/%s.json' % self.get_video_id()\n try:\n res = self._oembed_request(api_url)[0]\n except KeyError:\n return ''\n self.thumbnail_url = res.get('thumbnail_large', '')\n return self.thumbnail_url", "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return ''\n \n if not self.thumbnail_url:\n thumb_url = self.res.get('slide_image_baseurl', '')\n thumb_suffix = self.res.get('slide_image_baseurl_suffix', '')\n if thumb_url and thumb_suffix:\n #hardcode: \"1\" means the slide that we want to show as thumbnail.\n # this case is slide number 1 of presentation.\n thumb_url = ''.join(['https:', thumb_url, '1', thumb_suffix])\n self.thumbnail_url = thumb_url\n \n return self.thumbnail_url", "def thumbnail_url(self):\n return None", "def get_thumbnail_url(self):\n if not self._oembed:\n return ''\n \n if not self.thumbnail_url:\n self.thumbnail_url = self._oembed.get('thumbnail_url', '')\n \n return self.thumbnail_url", "def get_thumb_url(self):\n return self.thumb_url", "def camera_image(self):\n now = utcnow()\n if self._ready_for_snapshot(now) or True:\n image = self._device.camera_get_image(self._uuid, now)\n\n self._next_snapshot_at = now + self._time_between_snapshots\n self._last_image = image\n\n return self._last_image", "def img_url_thumbnail(self):\n url = '%s=s%s-c' % (self.img_url, self.THUMBNAIL_SIZE_PX)\n if self.img_rot in Plaque.ALLOWED_ROTATIONS:\n url = \"%s-r%s\" % (url, self.img_rot)\n return url", "def media_image_url(self):\n\n if self._table.active_track:\n return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE)\n\n return super().media_image_url", "def camera_image(self):\n if not self.ezvizService.switchState:\n return \"\"\n\n now = time.time()\n if now < self._last_snapshot_time + self._interval_snapshots:\n return self._last_image\n\n result = self.ezvizService.post('/lapp/device/capture', data={'deviceSerial':self.deviceSerial,'channelNo':1})\n if (result['code']!='200'):\n _LOGGER.error(\"EZVIZ capture image fail:%s\", result)\n return self._last_image\n\n image_path = result['data']['picUrl']\n try:\n response = requests.get(image_path)\n except requests.exceptions.RequestException as error:\n _LOGGER.error(\"EZVIZ getting camera image: %s\", error)\n return self._last_image\n\n self._last_snapshot_time = now\n self._last_image = response.content\n return self._last_image", "def get_thumbnail_url(self, playback_id):\n return f'https://image.mux.com/{playback_id.id}/thumbnail.jpg'", "def thumbnail_url_if_set(self):\n return self.thumbnail.url if self.thumbnail else self.file.url", "def thumbnail(self):\n return self._thumbnail", "def get_thumbnail_url(self):\n raise NotImplementedError(\"Subclass must implement abstract method get_thumbnail_url\")", "def thumbnail_url_if_set(self):\n progress_url = settings.GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL\n return self.thumbnail.url if self.thumbnail else progress_url", "def get_possible_thumbnail(self):\n meta = self.get_meta_data()\n print meta\n if \"og:image\" in meta:\n return meta[\"og:image\"]\n elif \"twitter:image:src\" in meta:\n return meta[\"twitter:image:src\"]\n else:\n images = self.get_image_data()\n temp_url = \"\"\n temp_width = 0\n for img in images:\n if img[\"image_width\"] > temp_width:\n temp_url = img[\"image_url\"]\n temp_width = img[\"image_width\"]\n\n return temp_url", "def prepare_thumbnail_url(self, object):\n if object.media is not None:\n return os.path.join(settings.MEDIA_URL, object.media.media_thumb_file.name)\n else:\n return ''", "def mjpeg_image_url(self) -> str:\n\t\treturn 'video.mjpg?oid={0}'.format(self._oid)", "def get_thumbnail(self):\r\n raise Exception('get_thumbnail is Not Implemented in base class \"Video\"')", "def _get_url(self, video, thumbnail):\n return f\"/api/videos/{video.pk}/thumbnails/{thumbnail.id}/\"", "def get_thumbnail_url(self, image_url):\n\n return settings.THUMBNAILER_URL + image_url", "def media_image_url(self):\n if (media_status := self._media_status()[0]) is None:\n return None\n\n images = media_status.images\n\n return images[0].url if images and images[0].url else None" ]
[ "0.7910915", "0.7836688", "0.7768984", "0.7726674", "0.7721377", "0.7670391", "0.7665452", "0.76559013", "0.76526964", "0.75800186", "0.75523674", "0.74557847", "0.7306187", "0.7166377", "0.70493877", "0.70226187", "0.7014065", "0.6992731", "0.6937823", "0.6921818", "0.6912715", "0.6776094", "0.676816", "0.67660165", "0.6723035", "0.66999596", "0.66222775", "0.65988624", "0.6577496", "0.65690744" ]
0.8384189
0
Return the rtsp URL for the camera.
async def get_rtsp_url(self, internal: bool = False, hd: bool = False) -> str: credentials = await self.alarm_panel.get_panel_credentials() url = self.data[f"c{'i' if internal else 'e'}u{'' if hd else 's'}"][0] return f"{url[:7]}{credentials[PanelCredentialAttribute.NAME]}:{credentials[PanelCredentialAttribute.PASSWORD]}@{url[7:]}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rtsp_stream_url(self) -> str:\n return self.properties.get(MessageField.RTSP_STREAM_URL.value)", "async def get_direct_rtsp_url(self, hd: bool = False) -> str:\n return (\n f\"rtsp://{self.data[Attribute.USERNAME]}:{self.data[Attribute.PASSWORD]}@{self.ip_address}:{self.data[Attribute.CAMERA_IP_PORT]}/{self.data[Attribute.CAMERA_DIRECT_STREAM_PATH if hd else Attribute.CAMERA_DIRECT_STREAM_PATH_STANDARD]}\"\n if self.data[Attribute.CAMERA_DIRECT_AVAILABLE]\n and self.data.get(Attribute.ACTUAL_TYPE) not in SKIP_DIRECT\n else None\n )", "def get_url(self):\n if not self.get_video_id() or not self.get_username():\n return ''\n \n return 'http://www.livestream.com/%s/video?clipId=%s' % (self.get_username(), self.get_video_id())", "def video_stream_url(self):\n return self._video_stream_url", "def rtsp(ctx, src, endpoint, verbose):\n import gi\n gi.require_version('Gst', '1.0')\n gi.require_version('GstRtspServer', '1.0')\n from gi.repository import Gst, GstRtspServer, GObject\n from ace.rtspserver import GstServer\n GObject.threads_init()\n Gst.init(None)\n\n if src.isdigit():\n src = int(src)\n cap = cv2.VideoCapture(src)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\n server = GstServer(cap, \"/{!s}\".format(endpoint), verbose)\n\n loop = GObject.MainLoop()\n loop.run()", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n return 'http://www.vimeo.com/%s' % self.get_video_id()", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n return 'http://www.dailymotion.com/%s' % self.get_video_id()", "def get_thumbnail_url(self):\n if not self.id_video or not self.original_url or not self.xml_response:\n return ''\n return self.xml_response.find('framegrab_url').text", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return self.original_url\n \n return 'http://www.slideshare.net/slideshow/embed_code/%s' % self.get_video_id()", "async def get_thumbnail_url(self) -> str:\n # Sometimes this date field comes back with a \"Z\" at the end\n # and sometimes it doesn't, so let's just safely remove it.\n camera_thumbnail_date = datetime.strptime(\n self.data[Attribute.CAMERA_THUMBNAIL_DATE].replace(\"Z\", \"\"),\n \"%Y-%m-%dT%H:%M:%S.%f\",\n )\n thumbnail_timestamp = int(camera_thumbnail_date.timestamp() * 1000)\n\n return await self.vivintskyapi.get_camera_thumbnail_url(\n self.alarm_panel.id,\n self.alarm_panel.partition_id,\n self.id,\n thumbnail_timestamp,\n )", "def webm_url(self) -> str:\n\t\treturn 'video.webm?oid={0}'.format(self._oid)", "def get_url(self):\n return self.metadata['thisRecordUrl']", "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return ''\n \n if not self.thumbnail_url:\n thumb_url = self.res.get('slide_image_baseurl', '')\n thumb_suffix = self.res.get('slide_image_baseurl_suffix', '')\n if thumb_url and thumb_suffix:\n #hardcode: \"1\" means the slide that we want to show as thumbnail.\n # this case is slide number 1 of presentation.\n thumb_url = ''.join(['https:', thumb_url, '1', thumb_suffix])\n self.thumbnail_url = thumb_url\n \n return self.thumbnail_url", "def get_video_url(data):\n # type: (dict) -> Optional[str]\n resource = data.get(\"resources\", [{}])[0]\n url = resource.get(\"video_stream\") # try m3u8\n if not url: # try mp4\n files = resource.get(\"files\")[0]\n mp4 = get_mime_property(files, \"url\", \"video/mp4\")\n url = \"https:{}\".format(mp4) if mp4 and mp4.startswith(\"//\") else mp4\n if not url: # try x-video\n idx = get_mime_property(files, \"mediaObjectId\", \"application/x-video\")\n media = get_json(LOS_MEDIA_TEMPLATE.format(idx))\n derivative = media.get(\"mediaObject\").get(\"derivatives\")[0]\n url = \"https://{}/{}\".format(\n derivative.get(\"fqdn\"),\n derivative.get(\"derivativeMediaUrl\").replace(\"mp4:\", \"\"))\n return url", "def URL(self):\n return self._sourceurl", "def get_embed_url(self):\n if not self.original_url:\n return ''\n \n return 'https://vine.co/v/%s/embed/simple' % (self.get_video_id())", "def set_url(self, source_url):\n if utils.validate_url(source_url, \"rtsp\"):\n self.url = source_url\n self.set_state_null()\n self.setup_pipeline()\n self.play()\n else:\n print(\"Invalid URL\")", "def get_movie(self):\n\n return self.movie_url", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n params = cgi.parse_qs(p.query)\n \n if p.path.endswith('/video'):\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n if 'clipId' in params:\n return params['clipId'][0]\n if p.path.startswith('/embed'):\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n if 'clip' in params:\n return params['clip'][0]\n \n return ''", "def mp4_url(self) -> str:\n\t\treturn 'video.mp4?oid={0}'.format(self._oid)", "def replay_url(self):\n if (\n self.cluster is None\n or self.steam_id is None\n or self.replay_salt is None\n ):\n return None\n else:\n return \"http://replay{0}.valve.net/570/{1}_{2}.dem.bz2\".format(\n self.cluster, self.steam_id, self.replay_salt\n )", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def get_url():\n config = configparser.RawConfigParser()\n config.read(\"speech.cfg\")\n region = config.get('auth', 'region')\n host = REGION_MAP[region]\n return (\n f\"wss://{host}/speech-to-text/api/v1/recognize\"\n \"?model=en-US_BroadbandModel&x-watson-learning-opt-out=true\"\n )", "def from_rtsp_stream(ip, port):\n url = f\"rtsp://{ip}:{port}/h264_pcm.sdp\"\n vcap = cv2.VideoCapture(url)\n while True:\n ret, frame = vcap.read()\n if ret == False:\n print(\"Frame is empty\")\n break\n else:\n cv2.imshow(\"VIDEO\", frame)\n cv2.waitKey(1)", "def url(self):\n url = self.url\n return url", "def get_url(self):\n return self.resource.url", "def get_embed_url(self):\n if not self.get_video_id() or not self.get_username():\n return ''\n \n return 'http://cdn.livestream.com/embed/%s?layout=4&amp;clip=%s' % (self.get_username(), self.get_video_id())", "def _get_url(self, video, live_session):\n return f\"/api/videos/{video.pk}/livesessions/{live_session.pk}/\"", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n return 'https://player.vimeo.com/video/%s' % self.get_video_id()", "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id() or not self.get_username():\n return ''\n \n channel_formated = 'x%sx' % (self.get_username().replace('_', '-'))\n api_url = 'http://%s.api.channel.livestream.com/2.0/thumbnail.json?id=%s' % (channel_formated, self.get_video_id())\n \n res = self._oembed_request(api_url)\n thumbnail = res.get('thumbnail', {})\n self.thumbnail_url = thumbnail.get('@url', '')\n return self.thumbnail_url" ]
[ "0.79006195", "0.689613", "0.6728681", "0.63702255", "0.6178544", "0.61279833", "0.6083949", "0.6080557", "0.5965044", "0.575143", "0.5714059", "0.56403685", "0.5609133", "0.55965275", "0.5580069", "0.55688417", "0.5547634", "0.55053115", "0.55026656", "0.5496402", "0.5487867", "0.5469167", "0.54675555", "0.5456146", "0.5450938", "0.5447755", "0.54324573", "0.5416711", "0.54150707", "0.53764975" ]
0.6958374
1
Return the direct rtsp url for this camera, in HD if requested, if any.
async def get_direct_rtsp_url(self, hd: bool = False) -> str: return ( f"rtsp://{self.data[Attribute.USERNAME]}:{self.data[Attribute.PASSWORD]}@{self.ip_address}:{self.data[Attribute.CAMERA_IP_PORT]}/{self.data[Attribute.CAMERA_DIRECT_STREAM_PATH if hd else Attribute.CAMERA_DIRECT_STREAM_PATH_STANDARD]}" if self.data[Attribute.CAMERA_DIRECT_AVAILABLE] and self.data.get(Attribute.ACTUAL_TYPE) not in SKIP_DIRECT else None )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rtsp_stream_url(self) -> str:\n return self.properties.get(MessageField.RTSP_STREAM_URL.value)", "async def get_rtsp_url(self, internal: bool = False, hd: bool = False) -> str:\n credentials = await self.alarm_panel.get_panel_credentials()\n url = self.data[f\"c{'i' if internal else 'e'}u{'' if hd else 's'}\"][0]\n return f\"{url[:7]}{credentials[PanelCredentialAttribute.NAME]}:{credentials[PanelCredentialAttribute.PASSWORD]}@{url[7:]}\"", "def get_thumbnail_url(self):\n if not self.id_video or not self.original_url or not self.xml_response:\n return ''\n return self.xml_response.find('framegrab_url').text", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n return 'http://www.dailymotion.com/%s' % self.get_video_id()", "def get_url(self):\n if not self.get_video_id() or not self.get_username():\n return ''\n \n return 'http://www.livestream.com/%s/video?clipId=%s' % (self.get_username(), self.get_video_id())", "def video_stream_url(self):\n return self._video_stream_url", "def get_thumb_url(self):\n return self.thumb_url", "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return ''\n \n if not self.thumbnail_url:\n thumb_url = self.res.get('slide_image_baseurl', '')\n thumb_suffix = self.res.get('slide_image_baseurl_suffix', '')\n if thumb_url and thumb_suffix:\n #hardcode: \"1\" means the slide that we want to show as thumbnail.\n # this case is slide number 1 of presentation.\n thumb_url = ''.join(['https:', thumb_url, '1', thumb_suffix])\n self.thumbnail_url = thumb_url\n \n return self.thumbnail_url", "async def get_thumbnail_url(self) -> str:\n # Sometimes this date field comes back with a \"Z\" at the end\n # and sometimes it doesn't, so let's just safely remove it.\n camera_thumbnail_date = datetime.strptime(\n self.data[Attribute.CAMERA_THUMBNAIL_DATE].replace(\"Z\", \"\"),\n \"%Y-%m-%dT%H:%M:%S.%f\",\n )\n thumbnail_timestamp = int(camera_thumbnail_date.timestamp() * 1000)\n\n return await self.vivintskyapi.get_camera_thumbnail_url(\n self.alarm_panel.id,\n self.alarm_panel.partition_id,\n self.id,\n thumbnail_timestamp,\n )", "def get_url(self):\n return self.metadata['thisRecordUrl']", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return self.original_url\n \n return 'http://www.slideshare.net/slideshow/embed_code/%s' % self.get_video_id()", "def thumbnail_url(self):\n return None", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n return 'http://www.vimeo.com/%s' % self.get_video_id()", "def get_thumbnail_url():", "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n if not self.thumbnail_url:\n api_url = 'https://api.dailymotion.com/video/%s?fields=thumbnail_url' % self.get_video_id()\n res = self._oembed_request(api_url)\n self.thumbnail_url = res.get('thumbnail_url', '')\n return self.thumbnail_url", "def get_vidurl(self):\n if self.assets is None:\n self.get_assets()\n \n df = self.assets\n des = df.loc[(df['container']==self.container) & (df['display_name']==self.resolution), 'url']\n if des.shape[0] == 1:\n self.vidurl = des.iloc[0].replace('.bin',f'.{self.container}')\n return self.vidurl", "def prepare_media_url(self, object):\n if object.media is not None:\n return os.path.join(settings.MEDIA_URL, object.media.media_file.name)\n else:\n return ''", "def media_image_url(self):\n\n if self._table.active_track:\n return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE)\n\n return super().media_image_url", "def rtsp(ctx, src, endpoint, verbose):\n import gi\n gi.require_version('Gst', '1.0')\n gi.require_version('GstRtspServer', '1.0')\n from gi.repository import Gst, GstRtspServer, GObject\n from ace.rtspserver import GstServer\n GObject.threads_init()\n Gst.init(None)\n\n if src.isdigit():\n src = int(src)\n cap = cv2.VideoCapture(src)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\n server = GstServer(cap, \"/{!s}\".format(endpoint), verbose)\n\n loop = GObject.MainLoop()\n loop.run()", "def thumbnail(self):\n return self.get_thumbnail_url()", "def mpd_url(self):\n # type: () -> string_types\n return self._mpd_url", "def get_video_id(self):\n \n if self.video_id:\n return self.video_id\n \n if not self.original_url:\n return ''\n \n p = urlparse.urlparse(self.original_url)\n params = cgi.parse_qs(p.query)\n \n if p.path.endswith('/video'):\n # url type http://www.livestream.com/xprize/video?clipId=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2\n if 'clipId' in params:\n return params['clipId'][0]\n if p.path.startswith('/embed'):\n # url type http://cdn.livestream.com/embed/xprize?layout=4&amp;clip=pla_1a25a2ba-9ca4-4c3b-b1b1-ebd7d79ef6d2&amp;width=560&amp;autoplay=false\n if 'clip' in params:\n return params['clip'][0]\n \n return ''", "def get_thumbnail_url(self):\n \n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n api_url = 'http://vimeo.com/api/v2/video/%s.json' % self.get_video_id()\n try:\n res = self._oembed_request(api_url)[0]\n except KeyError:\n return ''\n self.thumbnail_url = res.get('thumbnail_large', '')\n return self.thumbnail_url", "def get_thumbnail_url(self):\n return self.thumbnail_url", "def is_rtsp_enabled(self) -> bool:\n return False if self.is_rtsp_supported is False else self.properties.get(MessageField.RTSP_STREAM.value)", "def media_image_url(self):\n return self.coordinator.data.nowplaying[self.zone.SourceID].CurrSong.ArtworkURI", "def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n vine_url = self.get_url()\n res = self._http_request(vine_url)\n m = re.search(r'property=\"og:image\" content=\"(?P<thumbnail>[^\"]*)\"', res)\n if m and m.groupdict():\n self.thumbnail_url = m.groupdict().get('thumbnail') or ''\n \n return self.thumbnail_url", "def getParentDeviceUrl(self):\n url = \"\"\n dev = self.device()\n if dev: url = dev.absolute_url_path()\n return url", "def get_video_url(data):\n # type: (dict) -> Optional[str]\n resource = data.get(\"resources\", [{}])[0]\n url = resource.get(\"video_stream\") # try m3u8\n if not url: # try mp4\n files = resource.get(\"files\")[0]\n mp4 = get_mime_property(files, \"url\", \"video/mp4\")\n url = \"https:{}\".format(mp4) if mp4 and mp4.startswith(\"//\") else mp4\n if not url: # try x-video\n idx = get_mime_property(files, \"mediaObjectId\", \"application/x-video\")\n media = get_json(LOS_MEDIA_TEMPLATE.format(idx))\n derivative = media.get(\"mediaObject\").get(\"derivatives\")[0]\n url = \"https://{}/{}\".format(\n derivative.get(\"fqdn\"),\n derivative.get(\"derivativeMediaUrl\").replace(\"mp4:\", \"\"))\n return url", "def thumbnail_url_if_set(self):\n progress_url = settings.GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL\n return self.thumbnail.url if self.thumbnail else progress_url" ]
[ "0.72032493", "0.68680316", "0.61045647", "0.60359323", "0.6011566", "0.5905714", "0.5760486", "0.56779563", "0.5652585", "0.56033957", "0.55442023", "0.5505961", "0.5492464", "0.5490018", "0.5473845", "0.5441808", "0.5429904", "0.54259044", "0.54249716", "0.53899205", "0.53451777", "0.53444713", "0.5305086", "0.5268251", "0.5238109", "0.5231287", "0.52305233", "0.52290976", "0.5228565", "0.5221472" ]
0.76972985
0
Handle a pubnub message addressed to this camera.
def handle_pubnub_message(self, message: dict) -> None: super().handle_pubnub_message(message) event = None if message.get(Attribute.CAMERA_THUMBNAIL_DATE): event = THUMBNAIL_READY elif message.get(Attribute.DING_DONG): event = DOORBELL_DING elif message.keys() == set([Attribute.ID, Attribute.TYPE]): event = VIDEO_READY elif message.get(Attribute.VISITOR_DETECTED) or message.keys() in [ set([Attribute.ID, Attribute.ACTUAL_TYPE, Attribute.STATE]), set([Attribute.ID, Attribute.DETER_ON_DUTY, Attribute.TYPE]), ]: event = MOTION_DETECTED if event is not None: self.emit(event, {"message": message}) _LOGGER.debug("Message received by %s: %s", self.name, message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_message(self, msg):\n self.event('message', msg)", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def received_message(self, m):\n self.receiver.handle_message(m)", "def handle_message(self, msg):\n pass", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def handle_message(self, validated_message: dict):\n self.logger.debug(f'Sensor received message {validated_message}')\n if (validated_message['messageType'] !=\n model.MessageTypes.Control.value):\n self.logger.debug(\n 'Sensor ignoring because messageType was not control'\n )\n return\n if validated_message['messageBody']['target'] != self.component_id:\n self.logger.debug(\n 'Sensor ignoring because not targeted at me'\n )\n return\n\n subtype = validated_message['messageSubtype']\n try:\n self.logger.debug(f'Dispatching message with subtype {subtype}')\n self.message_handler_table[subtype](validated_message)\n except KeyError:\n self.logger.warning(f'No handler for with subtype {subtype}')\n pass", "def handle_message(self, message):", "def on_message(self, userdata, message):\n logging.debug(f\"Message arrived from {message.topic}\")\n self.process(userdata, message)", "def on_pubmsg(self, raw_msg, source, msg, **kwargs):", "def handleMessage(msg):", "def onMessage(self, message):\n raise NotImplementedError", "def handle(self, message):", "def receive_message(self, message):", "def on_message(self, message):\n log.debug(\"Protocol got message {message}\", message=message)\n if message['type'] == \"change\":\n self.handler.process_packet(message['packet'])\n self.send_packet()\n elif message['type'] == \"chat\":\n self.on_chat_message(message)\n elif message['type'] == \"action\":\n self.on_action(message)\n else:\n log.warn(\"Unrecognized message type {type}\", type=message['type'])", "def handle_message(self, data, channel):\n pass", "def _handle_message(self, bus, message):\n if message.type == Gst.MessageType.EOS:\n logger.info(\"End-Of-Stream reached.\\n\")\n # file finished playing\n self.pipeline.set_state(Gst.State.NULL)\n #self.playing = False\n # if self.finished_callback:\n # self.finished_callback()\n \n elif message.type == Gst.MessageType.ERROR:\n # error\n self.pipeline.set_state(Gst.State.NULL)\n err, debug_info = message.parse_error()\n logger.error(f\"Error received from element {message.src.get_name()}: {err.message}\\n\")\n logger.error(f\"Debugging information: {debug_info if debug_info else 'none'}\\n\")\n #self.playing = False \n elif message.type == Gst.MessageType.STATE_CHANGED:\n # We are only interested in state-changed messages from the pipeline\n if message.src == self.pipeline:\n old_state, new_state, pending_state = message.parse_state_changed()\n logger.info(f\"Pipeline state changed from {Gst.Element.state_get_name(old_state)} to {Gst.Element.state_get_name(new_state)}:\\n\")", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def on_message(data):\n pass", "def on_message(self, message):\n\n # Start an infinite loop when this is called\n if message == \"read_camera\":\n self.camera_loop = PeriodicCallback(self.loop, 10)\n self.camera_loop.start()\n\n # Extensibility for other methods\n else:\n print(\"Unsupported function: \" + message)", "def message_callback(self, message):\n pass", "def on_message(self, message):\n print \"Client %s received a message : %s\" % (self.id, message)", "def processReceivedMessage(iTag, clsName, msgID, msg): #@NoSelf", "def handle_msg(self, state_id, msg):\n pass", "def processMessage(self, *args, **kwargs):\r\n pass", "def onMessage(self, msg, binary):\r\n self._assembler.processMessage(msg, binary)", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "def handle_message(self, data):\n message = Message.from_text(data)\n if message is not None:\n print(message.username, message.action, message.channel, message.content)\n self._callback(\"message\", message) # TODO: add additional callbacks", "def callback_message(self, message):\n pass", "def callback_message(self, message):\n pass", "def receive_message(self, message):\r\n return" ]
[ "0.7261088", "0.6893499", "0.689115", "0.67964315", "0.6787809", "0.6784308", "0.67636156", "0.6758844", "0.67415947", "0.6718704", "0.67065203", "0.66710377", "0.66152173", "0.6600205", "0.65741396", "0.65554744", "0.6550199", "0.6538151", "0.6529823", "0.65268636", "0.6517526", "0.6498686", "0.64822453", "0.6478778", "0.6472553", "0.6454479", "0.6450408", "0.64501387", "0.64501387", "0.6436912" ]
0.7852151
0
Return all .js files in the project root folder The project file is not included.
def get_all_js_files(self, root): res = [] for fname in os.listdir(root): mo = re.match(r'(\w+)\.js$', fname) if mo: res.append({ 'name': mo.group(1), 'src': file_contents(os.path.join(root, mo.group())) }) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_project_source_files():\n source_files = glob.glob(PROJECT_SOURCE_FILES_FOLDER + '/**/*.py', recursive=True)\n # Insert root main.py at the beginning.\n source_files.insert(0, os.path.join(PROJECT_ROOT_FOLDER, 'main.py'))\n return list(map(lambda path: posixpath.join(*path.split('\\\\')), source_files))", "def get_js_files(directories):\n # Initialize key variables\n result = []\n # iterate through files in directories\n for d in directories:\n for root, _, files in os.walk(d, topdown=False):\n for name in files:\n # append files with .js extension\n if name.endswith('.js'):\n result.append(os.path.join(root, name))\n\n return result", "def _get_bulma_js() -> List[str]:\n return list(get_js_files())", "def coffeescript_files():\r\n dirs = \" \".join(THEME_COFFEE_PATHS + [Env.REPO_ROOT / coffee_dir for coffee_dir in COFFEE_DIRS])\r\n return cmd('find', dirs, '-type f', '-name \\\"*.coffee\\\"')", "def list_files(self):\n re_css = re.compile(r'\\.css$')\n re_js = re.compile(r'\\.js$')\n re_adminlte2 = re.compile(r'adminlte2')\n file_list = []\n print \"static path is %s\" % self.static_path\n for dirpath, _, files in os.walk(self.static_path):\n if not re_adminlte2.search(dirpath):\n for name in files:\n if re_css.search(name) or re_js.search(name):\n file_list.append(os.path.join(dirpath, name))\n return file_list", "def js():\n with lcd(BASEDIR):\n js_ext = (\n 'submodules/jquery-cookie/src/jquery.cookie.js',\n 'submodules/jquery-treegrid/js/jquery.treegrid.js',\n 'submodules/bootstrap/dist/js/bootstrap.js',\n )\n js_own = (\n 'js/variables.js',\n 'js/bmf-autocomplete.js',\n 'js/bmf-calendar.js',\n 'js/bmf-editform.js',\n 'js/bmf-inlineform.js',\n 'js/bmf-buildform.js',\n 'js/menu.js',\n )\n\n local('cp submodules/bootstrap/dist/js/bootstrap.min.js djangobmf/static/djangobmf/js/')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.cookie.min.js submodules/jquery-cookie/src/jquery.cookie.js')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.treegrid.min.js submodules/jquery-treegrid/js/jquery.treegrid.js')\n\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_ext + js_own))\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/djangobmf.min.js djangobmf/static/djangobmf/js/djangobmf.js')\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_own))", "def get_js(directory):\n\n composed = fu.lcompose([\n partial(get_path_files_with_ext, '.js'),\n fu.fmap(fu.file_to_str),\n '\\n'.join,\n ])\n return composed(directory)", "def compile_files(root):\n files = [os.path.join(root, f) for f in os.listdir(root) if not f.startswith(\".\")]\n \n return files", "def get_project_files():\n if is_git_project() and has_git():\n return get_git_project_files()\n\n project_files = []\n for top, subdirs, files in os.walk('.'):\n for subdir in subdirs:\n if subdir.startswith('.'):\n subdirs.remove(subdir)\n\n for f in files:\n if f.startswith('.'):\n continue\n project_files.append(os.path.join(top, f))\n\n return project_files", "def get_my_files():\n return [file for file in os.listdir(os.getcwd()) if os.path.isfile(file)]", "def project_root_files():\n return [\"parent_workflow.wdl\"]", "def process_js():\n source_paths = [\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/admin.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/app.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/footnotes.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/table_of_contents.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/text_resize.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/toastr.js'),\n ]\n dest_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.js')\n min_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.min.js')\n\n process_js_files(source_paths, dest_path, min_path)", "def copy_js(self):\n # Compiled JS files for copying\n js_dist_dir = os.path.join(node_root, 'dist', 'pydeck_embeddable')\n # Uncompiled JS files for copying\n # See https://github.com/jupyter-widgets/widget-ts-cookiecutter/blob/master/%7B%7Bcookiecutter.github_project_name%7D%7D/%7B%7Bcookiecutter.python_package_name%7D%7D/nbextension/static/extension.js\n js_src_dir = os.path.join(node_root, 'src')\n js_files = [\n os.path.join(js_src_dir, 'extension.js'),\n os.path.join(js_dist_dir, 'index.js'),\n os.path.join(js_dist_dir, 'index.js.map')\n ]\n static_folder = os.path.join(here, 'pydeck', 'nbextension', 'static')\n for js_file in js_files:\n log.debug('Copying %s to %s' % (js_file, static_folder))\n copy(js_file, static_folder)", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]", "def get_all_messages_from_js_files(app_name=None):\n\tmessages = []\n\tfor app in [app_name] if app_name else frappe.get_installed_apps(_ensure_on_bench=True):\n\t\tif os.path.exists(frappe.get_app_path(app, \"public\")):\n\t\t\tfor basepath, folders, files in os.walk(frappe.get_app_path(app, \"public\")):\n\t\t\t\tif \"frappe/public/js/lib\" in basepath:\n\t\t\t\t\tcontinue\n\n\t\t\t\tfor fname in files:\n\t\t\t\t\tif fname.endswith(\".js\") or fname.endswith(\".html\") or fname.endswith(\".vue\"):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(basepath, fname)))\n\n\treturn messages", "def js(filepath):\n return static_file(filepath, root=\"public\")", "def get_scripts():\n scripts = []\n if os.path.isdir('bin'):\n scripts = [ fname for fname in glob.glob(os.path.join('bin', '*'))\n if not os.path.basename(fname).endswith('.rst') ]\n return scripts", "def files_serve(path):\n return flask.send_from_directory(\"static/js\", path)", "def get_all_messages_from_js_files(app_name=None):\n\tmessages = []\n\tfor app in ([app_name] if app_name else frappe.get_installed_apps()):\n\t\tif os.path.exists(frappe.get_app_path(app, \"public\")):\n\t\t\tfor basepath, dummy, files in os.walk(frappe.get_app_path(app, \"public\")):\n\t\t\t\tif \"frappe/public/js/lib\" in basepath:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif os.path.isfile(frappe.get_app_path(app, \"public/build.json\")):\n\t\t\t\t\twith open(frappe.get_app_path(app, \"public/build.json\"), 'r') as f:\n\t\t\t\t\t\tbuilt_files = json.loads(f.read())\n\t\t\t\t\t\tbuilt_files = reduce(lambda a,b: a.extend(b) or a, list(built_files.values()), [])\n\n\t\t\t\tfor fname in files:\n\t\t\t\t\tif fname not in built_files and (fname.endswith(\".js\") or fname.endswith(\".html\") or fname.endswith(\".vue\")):\n\t\t\t\t\t\tmessages.extend(get_messages_from_file(os.path.join(basepath, fname)))\n\n\treturn messages", "def get_scripts():\n scripts = []\n if os.path.isdir('bin'):\n scripts = [fname for fname in glob.glob(os.path.join('bin', '*'))\n if not os.path.basename(fname).endswith('.rst')]\n return scripts", "def assemble_simplyjs_sources(project, base_dir, build_result):\n source_files = project.source_files.all()\n shutil.rmtree(base_dir)\n shutil.copytree(settings.SIMPLYJS_ROOT, base_dir)\n\n js = '\\n\\n'.join(x.get_contents() for x in source_files if x.file_name.endswith('.js'))\n escaped_js = json.dumps(js)\n build_result.save_simplyjs(js)\n\n with open(os.path.join(base_dir, 'src', 'js', 'zzz_userscript.js'), 'w') as f:\n f.write(\"\"\"\n (function() {\n simply.mainScriptSource = %s;\n })();\n \"\"\" % escaped_js)", "def xmodule_js_files(request):\r\n urls = get_xmodule_urls()\r\n return HttpResponse(json.dumps(urls), content_type=\"application/json\")", "def get_source_files(self):\n return zip(*self.distribution.scripts)[0]", "def get_all_files(cwd):\n return os.listdir(cwd)", "def get_all_files(self):\n\t\tfiles_list = []\n\t\tfor path, subdirs, files in os.walk(self.root):\n\t\t for name in files:\n\t\t \tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def get_files(self) -> list:\n files = []\n for file in os.listdir(self.root):\n if file.endswith(f\".{self.suffix}\"):\n files.append(os.path.join(self.root, file))\n return files", "def project_files(self):\n if not self._project_files:\n self._project_file_blobs()\n return self._project_files", "def getFiles(searchDir = './', extension = 'source'):\n from glob import glob \n\n return glob(searchDir+'/*.'+extension)" ]
[ "0.7026188", "0.663383", "0.66265917", "0.6534242", "0.6475579", "0.6318005", "0.62322176", "0.62103647", "0.62050897", "0.61848503", "0.618283", "0.6143342", "0.6108959", "0.60054374", "0.59731764", "0.59707844", "0.5969685", "0.5961811", "0.59470624", "0.59345275", "0.5928488", "0.59234047", "0.5913168", "0.59117293", "0.59005344", "0.58849955", "0.58735454", "0.58679897", "0.583011", "0.5774731" ]
0.7880714
0
if we've got a cropping annotation for the given fieldname and scale, set self._rescale to False, to prevent plone.app.imaging traverser to overwrite our cropped scale since the self.modified() method does not know about the currently requested scale name, we need to use the _rescale property
def _need_rescale(self, fieldname, scale): cropped = IAnnotations(self.context).get(PAI_STORAGE_KEY) if cropped and '%s_%s' % (fieldname, scale) in cropped: self._allow_rescale = False else: self._allow_rescale = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _crop(self, fieldname, scale, box):\n croputils = IImageCroppingUtils(self.context)\n data = croputils.get_image_data(fieldname)\n\n original_file = StringIO(data)\n image = PIL.Image.open(original_file)\n image_format = image.format or self.DEFAULT_FORMAT\n\n cropped_image = image.crop(box)\n cropped_image_file = StringIO()\n cropped_image.save(cropped_image_file, image_format, quality=100)\n cropped_image_file.seek(0)\n\n croputils.save_cropped(fieldname, scale, cropped_image_file)\n\n # store crop information in annotations\n self._store(fieldname, scale, box)\n\n # Purge caches if needed\n notify(Purge(self.context))", "def modified(self):\n if self._allow_rescale:\n return super(NamedfileImageScaling, self).modified()\n else:\n return 1", "def modified(self):\n if self._allow_rescale:\n return super(ImageScaling, self).modified()\n else:\n return 1", "def set_cropping(self, crop=True):\n self._crop = crop\n self._final = None # Force rebuild", "def rescale_intrinsic(self):\n # scale focal length and principal points wrt image resizeing\n if self.downscale > 1:\n self.K = self.K_orig.copy()\n self.K[0, 0] /= float(self.downscale)\n self.K[1, 1] /= float(self.downscale)\n self.K[0, 2] /= float(self.downscale)\n self.K[1, 2] /= float(self.downscale)\n self.intrinsic = self.K\n else:\n self.K = self.intrinsic = self.K_orig.copy()", "def reset_limits(self):\n self.autoscale = True\n self.camera.autoscale()", "def reset_limits(self):\n self.autoscale = True\n self.pixels.autoscale()", "def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r", "def update_rescale_entry(self):\n if self.var_rescale_frame.get() == 0:\n self.checkbox_rescale_frame[\"text\"] = \"Rescale Frames\"\n self.rescale_factor_entry.config(state=\"disabled\")\n elif self.var_rescale_frame.get() == 1:\n self.checkbox_rescale_frame[\"text\"] = \"By a factor of: \"\n self.rescale_factor_entry.config(state=\"normal\")", "def rescale(self, img):\n\n if self.scale != 1:\n return imutils.resize(img, width=int(img.shape[1] * self.scale))\n else:\n return img", "def force_rescale(self,rescaleFactor):\n if not self.built:\n raise Exception(\"model should be built before calling this function\")\n for l in self.layerList:\n l.rescale(rescaleFactor)\n self.rescaleFactor.assign(rescaleFactor)", "def setDoRescale(self, value):\n return self._set(doRescale=value)", "def testDefaultDataScalingAvoidsCropping(self):\n orig_scale = util.ScaleData\n util.ScaleData = self.FakeScale\n try:\n self.AddToChart(self.chart, [1, 6])\n # This causes scaling to happen & calls FakeScale.\n self.chart.display.Url(0, 0)\n buffer = 5 * self.chart.auto_scale.buffer\n self.assertEqual(1 - buffer, self.min)\n self.assertEqual(6 + buffer, self.max)\n finally:\n util.ScaleData = orig_scale", "def _scale(self, image):\n\n if image.GetWidth() != self._width or image.GetHeight()!= self._height:\n image.Rescale(self._width, self._height)\n \n return image", "def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def rescale(self):\n # forecast on real data, don't need this anymore\n pass", "def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale):\n\n self._scale = scale", "def b_scale_object():\n \n bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))\n bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))\n bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))\n bpy.ops.object.transform_apply(scale=True)", "def reset_scale(self) -> None:\n self._scale.set(self._start_val)", "def change_zoom(self, b):\n\n x_mid = int(self.ff[0].info['xres'] / 2)\n y_mid = int(self.ff[0].info['yres'] / 2)\n\n x = x_mid - self.x_crop_slider.value\n\n if self.y_crop.value is True:\n y = y_mid - self.y_crop_slider.value\n else:\n y = y_mid - self.x_crop_slider.value\n\n x0 = x_mid - x\n x1 = x_mid + x\n y0 = y_mid - y\n y1 = y_mid + y\n\n self.x_range = [x0, x1]\n self.y_range = [y0, y1]\n\n self.ax.set_xlim([x0, x1])\n self.ax.set_ylim([y0, y1])", "def scale(self):", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def shell_scale_to_fit_changed(self, scale_to_fit):\n self.set_scale_to_fit(scale_to_fit)", "def shell_scale_to_fit_changed(self, scale_to_fit):\n self.set_scale_to_fit(scale_to_fit)", "def update_axis_scale(self, scale, axis='left'):\n self.plt.getAxis(axis).setScale(scale=scale)", "def save_form_data(self, instance, data):\r\n if data and isinstance(data, UploadedFile):\r\n # A new file is being uploaded. So delete the old one.\r\n remove_model_image(instance, 'image')\r\n super(CampaignImageField, self).save_form_data(instance, data)\r\n instance._create_resized_images(raw_field=data, save=False)" ]
[ "0.7229034", "0.6486539", "0.63547015", "0.6094969", "0.5937726", "0.5919167", "0.5832441", "0.5809148", "0.57294387", "0.56831604", "0.5682105", "0.55893314", "0.557489", "0.55414176", "0.55127174", "0.5507968", "0.5507968", "0.5496308", "0.54725057", "0.54692656", "0.54692656", "0.54660183", "0.54051715", "0.5395724", "0.53924507", "0.5386554", "0.537144", "0.537144", "0.5332521", "0.53268987" ]
0.87613535
0
we overwrite the default method that would return the modification time of the context, to return a way back modification time in case the currently requested scale is a cropped scale. (so plone.scale does not create a new scale w/o cropping information
def modified(self): if self._allow_rescale: return super(ImageScaling, self).modified() else: return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modified(self):\n if self._allow_rescale:\n return super(NamedfileImageScaling, self).modified()\n else:\n return 1", "def get_modified_time(self, name):\n raise NotImplementedError(\n \"subclasses of Storage must provide a get_modified_time() method\"\n )", "def get_mtime(self):\n storage = getattr(self._file, \"storage\", None)\n if storage:\n return storage.modified_time(self._file.name)\n return super(FileAsset, self).get_mtime()", "def getLastModifiedTime(self): #$NON-NLS-1$\r", "def scale(self):\n return self._scale", "def modified(self) -> datetime:\n # TODO: Should this be overridden for LocalDirectoryAsset?\n return datetime.fromtimestamp(self.filepath.stat().st_mtime).astimezone()", "def convertTimeAndExtent(self):\n return _libsbml.Submodel_convertTimeAndExtent(self)", "def GetScale(self):\n ...", "def get_created_time(self, name):\n raise NotImplementedError(\n \"subclasses of Storage must provide a get_created_time() method\"\n )", "def scale(self):", "def mtime(self):\n if self.source_file is None:\n return None\n if self._mtime is None:\n try:\n try: # Locally stored file\n mtime = os.path.getmtime(self.source_file.path)\n self._mtime = int(mtime)\n except NotImplementedError: # AWS S3 storage\n key = self.source_file.file.key\n modified = boto.utils.parse_ts(key.last_modified)\n self._mtime = int(modified.strftime('%s'))\n except (FileNotFoundError, AttributeError):\n self._mtime = int(timezone.now().strftime('%s'))\n return self._mtime", "def test_patch_namespaced_scale_scale(self):\n pass", "def get_transform_value(self, *args, **kwargs):\n if self.compute_only_full and not self.full:\n return None\n\n #################################################\n # Determine whether we should call the transform\n # 0. Support historical/legacy usage of '0' signaling,\n # 'update on every bar'\n if self.refresh_period == 0:\n period_signals_update = True\n else:\n # 1. Is the refresh period over?\n period_signals_update = (\n self.trading_days_total % self.refresh_period == 0)\n # 2. Have the args or kwargs been changed since last time?\n args_updated = args != self.last_args or kwargs != self.last_kwargs\n # 3. Is this a downsampled batch, and is the last event mkt close?\n downsample_ready = not self.downsample or \\\n self.last_dt == self.mkt_close\n\n recalculate_needed = downsample_ready and \\\n (args_updated or (period_signals_update and self.updated))\n ###################################################\n\n if recalculate_needed:\n self.cached = self.compute_transform_value(\n self.get_data(),\n *args,\n **kwargs\n )\n\n self.last_args = args\n self.last_kwargs = kwargs\n return self.cached", "def get_zoom_transform(self):\n return self.zoom_levels[self.cur_zoom][1]", "def _need_rescale(self, fieldname, scale):\n cropped = IAnnotations(self.context).get(PAI_STORAGE_KEY)\n if cropped and '%s_%s' % (fieldname, scale) in cropped:\n self._allow_rescale = False\n else:\n self._allow_rescale = True", "def get_mtime(self):\n if settings.DEBUG:\n return os.path.getmtime(self.get_path())\n return staticfiles_storage.modified_time(self.get_name())", "def _get_access_time(self):\n return self.__access_time", "def get_accessed_time(self, name):\n raise NotImplementedError(\n \"subclasses of Storage must provide a get_accessed_time() method\"\n )", "def ModifyTime(self):\n if self.force_auto_sync:\n self.get('ModifyTime')\n return self._ModifyTime", "def scaling_object(self):\n return self.__scaling_object", "def collection_timestamp(self, collection_id, parent_id, auth=None):\n tb = Timestamps.__table__\n qry = select([label('last_modified', func.max(tb.c.last_modified))]).where(and_(\n tb.c.parent_id == parent_id,\n tb.c.collection_id == collection_id))\n last_modified, = Session.execute(qry).fetchone()\n if last_modified is None:\n last_modified = datetime.datetime.utcnow()\n with transaction.manager:\n Session.add(Timestamps(parent_id=parent_id, collection_id=collection_id,\n last_modified=last_modified))\n return last_modified.replace(tzinfo=datetime.timezone.utc).timestamp()", "def last_modified(self) -> str:\n\t\tif self.name == \"\":\n\t\t\tif \"last_modified\" in self.ds._file[\"/matrix\"].attrs:\n\t\t\t\treturn self.ds._file[\"/matrix\"].attrs[\"last_modified\"]\n\t\t\telif self.ds._file.mode == 'r+':\n\t\t\t\tself.ds._file[\"/matrix\"].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.flush()\n\t\t\t\treturn self.ds._file[\"/matrix\"].attrs[\"last_modified\"]\n\n\t\tif self.name != \"\":\n\t\t\tif \"last_modified\" in self.ds._file[\"/layers/\" + self.name].attrs:\n\t\t\t\treturn self.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"]\n\t\t\telif self.ds._file.mode == 'r+':\n\t\t\t\tself.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.flush()\n\t\t\t\treturn self.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"]\n\n\t\treturn timestamp()", "def update_time_base(self, event):\n print(\"TimeBase.update_time_base()\")\n print(\"Base de temps : \", self.scale_T.get())\n if not isinstance(self.parent, Tk):\n self.parent.update_time(self.scale_T.get())", "def apply_model(self, original, t1, t2, resolution_scaling_factor=1):\n img = Image()\n img.time_stamp = t2\n\n if t1 == t2:\n img.initialize_with_image(original)\n return img\n\n calc_shift_fnc = self.calculate_shift\n orig_get_fnc = original.get\n interp_fnc = my_math.linear_interpolation\n\n def generate(y, x):\n \"\"\"Function describing the transformed image\"\"\"\n realy = y / resolution_scaling_factor\n realx = x / resolution_scaling_factor\n\n # move to time t2\n posy = y + calc_shift_fnc(realy, realx, t2, 0) - \\\n calc_shift_fnc(realy, realx, t1, 0)\n posx = x + calc_shift_fnc(realy, realx, t2, 1) - \\\n calc_shift_fnc(realy, realx, t1, 1)\n\n x_left = int(posx) # math.floor(pos[0])\n x_right = x_left + 1 # math.ceil(pos[0])\n y_down = int(posy) # math.floor(pos[1])\n y_up = y_down + 1 # math.ceil(pos[1])\n\n v11 = orig_get_fnc(y_down, x_left, resolution_scaling_factor)\n v12 = orig_get_fnc(y_down, x_right, resolution_scaling_factor)\n v21 = orig_get_fnc(y_up, x_left, resolution_scaling_factor)\n v22 = orig_get_fnc(y_up, x_right, resolution_scaling_factor)\n\n return interp_fnc(y_down, x_left, y_up, x_right, v11, v12, v21, v22,\n posy, posx)\n\n img.image_data = np.fromfunction(np.vectorize(generate),\n (original.shape()[0]*resolution_scaling_factor,\n original.shape()[1]*resolution_scaling_factor))\n\n if resolution_scaling_factor != 1:\n img.image_data = skimage.transform.resize(img.image_data,\n original.shape(),\n preserve_range=True)\n\n return img", "def clipEditorCurrentTimeCtx(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr,\n bool]=\"\", name: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def get_mod_time(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n raw_date_time = file_meta_plist['$objects'][1]['LastModified']\n converted_time = datetime.datetime.fromtimestamp(raw_date_time)\n converted_time = converted_time.timetuple()\n return converted_time\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['modified'].timetuple()", "def getScaleKeyTime(self, index, view) -> float:\n ...", "def timestamp(self):\n\t\tcurrent_stamp = 0\n\t\tcurrent_size = 0\n\t\ttry:\n\t\t\tst = stat(self._file)\n\t\t\tif st:\n\t\t\t\tcurrent_stamp = int(st.st_mtime)\n\t\t\t\tcurrent_size = st.st_size\n\t\t\t\t# Fake a changed mtime if size is different. Subsequent processing\n\t\t\t\t# only depends on the mtime field.\n\t\t\t\tif current_size != self._last_size:\n\t\t\t\t\tcurrent_stamp = int(time())\n\t\t\t\t\tMODULE.info(\"Size of '%s': %s -> %s\" % (self._file,self._last_size,current_size))\n\t\t\t\t\tself._last_size = current_size\n\t\tfinally:\n\t\t\tpass\n\n\t\tif current_stamp == self._last_stamp:\n\t\t\tself._unchanged_count += 1\n\t\t\tif self._unchanged_count >= self._count:\n\t\t\t\t# Don't record new timestamp if MD5 of file is the same\n\t\t\t\thash = md5(open(self._file).read()).hexdigest()\n\t\t\t\tif hash != self._last_md5:\n\t\t\t\t\tself._last_md5 = hash\n\t\t\t\t\tself._last_returned_stamp = current_stamp\n\t\t\t\telse:\n\t\t\t\t\tMODULE.info(\"Hash of '%s' unchanged\" % self._file)\n\t\telse:\n\t\t\tself._unchanged_count = 0\n\t\t\tself._last_stamp = current_stamp\n\n\t\treturn self._last_returned_stamp", "def exposuretime(self):\n _, = self.exposuretimes\n return _", "def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r" ]
[ "0.6460381", "0.56831473", "0.5679432", "0.5437756", "0.53410375", "0.5333899", "0.5274236", "0.5244045", "0.51951635", "0.51917946", "0.516668", "0.51533455", "0.51174325", "0.5113334", "0.509966", "0.5089414", "0.5071341", "0.5068429", "0.5064854", "0.50535154", "0.50339556", "0.503348", "0.50307244", "0.49801737", "0.49785444", "0.49664643", "0.49658948", "0.49615514", "0.49425498", "0.4930351" ]
0.6318901
1
we overwrite the default method that would return the modification time of the context, to return a way back modification time in case the currently requested scale is a cropped scale. (so plone.scale does not create a new scale w/o cropping information
def modified(self): if self._allow_rescale: return super(NamedfileImageScaling, self).modified() else: return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modified(self):\n if self._allow_rescale:\n return super(ImageScaling, self).modified()\n else:\n return 1", "def get_modified_time(self, name):\n raise NotImplementedError(\n \"subclasses of Storage must provide a get_modified_time() method\"\n )", "def get_mtime(self):\n storage = getattr(self._file, \"storage\", None)\n if storage:\n return storage.modified_time(self._file.name)\n return super(FileAsset, self).get_mtime()", "def getLastModifiedTime(self): #$NON-NLS-1$\r", "def scale(self):\n return self._scale", "def modified(self) -> datetime:\n # TODO: Should this be overridden for LocalDirectoryAsset?\n return datetime.fromtimestamp(self.filepath.stat().st_mtime).astimezone()", "def convertTimeAndExtent(self):\n return _libsbml.Submodel_convertTimeAndExtent(self)", "def GetScale(self):\n ...", "def get_created_time(self, name):\n raise NotImplementedError(\n \"subclasses of Storage must provide a get_created_time() method\"\n )", "def scale(self):", "def mtime(self):\n if self.source_file is None:\n return None\n if self._mtime is None:\n try:\n try: # Locally stored file\n mtime = os.path.getmtime(self.source_file.path)\n self._mtime = int(mtime)\n except NotImplementedError: # AWS S3 storage\n key = self.source_file.file.key\n modified = boto.utils.parse_ts(key.last_modified)\n self._mtime = int(modified.strftime('%s'))\n except (FileNotFoundError, AttributeError):\n self._mtime = int(timezone.now().strftime('%s'))\n return self._mtime", "def test_patch_namespaced_scale_scale(self):\n pass", "def get_transform_value(self, *args, **kwargs):\n if self.compute_only_full and not self.full:\n return None\n\n #################################################\n # Determine whether we should call the transform\n # 0. Support historical/legacy usage of '0' signaling,\n # 'update on every bar'\n if self.refresh_period == 0:\n period_signals_update = True\n else:\n # 1. Is the refresh period over?\n period_signals_update = (\n self.trading_days_total % self.refresh_period == 0)\n # 2. Have the args or kwargs been changed since last time?\n args_updated = args != self.last_args or kwargs != self.last_kwargs\n # 3. Is this a downsampled batch, and is the last event mkt close?\n downsample_ready = not self.downsample or \\\n self.last_dt == self.mkt_close\n\n recalculate_needed = downsample_ready and \\\n (args_updated or (period_signals_update and self.updated))\n ###################################################\n\n if recalculate_needed:\n self.cached = self.compute_transform_value(\n self.get_data(),\n *args,\n **kwargs\n )\n\n self.last_args = args\n self.last_kwargs = kwargs\n return self.cached", "def get_zoom_transform(self):\n return self.zoom_levels[self.cur_zoom][1]", "def _need_rescale(self, fieldname, scale):\n cropped = IAnnotations(self.context).get(PAI_STORAGE_KEY)\n if cropped and '%s_%s' % (fieldname, scale) in cropped:\n self._allow_rescale = False\n else:\n self._allow_rescale = True", "def get_mtime(self):\n if settings.DEBUG:\n return os.path.getmtime(self.get_path())\n return staticfiles_storage.modified_time(self.get_name())", "def _get_access_time(self):\n return self.__access_time", "def get_accessed_time(self, name):\n raise NotImplementedError(\n \"subclasses of Storage must provide a get_accessed_time() method\"\n )", "def ModifyTime(self):\n if self.force_auto_sync:\n self.get('ModifyTime')\n return self._ModifyTime", "def scaling_object(self):\n return self.__scaling_object", "def last_modified(self) -> str:\n\t\tif self.name == \"\":\n\t\t\tif \"last_modified\" in self.ds._file[\"/matrix\"].attrs:\n\t\t\t\treturn self.ds._file[\"/matrix\"].attrs[\"last_modified\"]\n\t\t\telif self.ds._file.mode == 'r+':\n\t\t\t\tself.ds._file[\"/matrix\"].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.flush()\n\t\t\t\treturn self.ds._file[\"/matrix\"].attrs[\"last_modified\"]\n\n\t\tif self.name != \"\":\n\t\t\tif \"last_modified\" in self.ds._file[\"/layers/\" + self.name].attrs:\n\t\t\t\treturn self.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"]\n\t\t\telif self.ds._file.mode == 'r+':\n\t\t\t\tself.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.flush()\n\t\t\t\treturn self.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"]\n\n\t\treturn timestamp()", "def collection_timestamp(self, collection_id, parent_id, auth=None):\n tb = Timestamps.__table__\n qry = select([label('last_modified', func.max(tb.c.last_modified))]).where(and_(\n tb.c.parent_id == parent_id,\n tb.c.collection_id == collection_id))\n last_modified, = Session.execute(qry).fetchone()\n if last_modified is None:\n last_modified = datetime.datetime.utcnow()\n with transaction.manager:\n Session.add(Timestamps(parent_id=parent_id, collection_id=collection_id,\n last_modified=last_modified))\n return last_modified.replace(tzinfo=datetime.timezone.utc).timestamp()", "def update_time_base(self, event):\n print(\"TimeBase.update_time_base()\")\n print(\"Base de temps : \", self.scale_T.get())\n if not isinstance(self.parent, Tk):\n self.parent.update_time(self.scale_T.get())", "def apply_model(self, original, t1, t2, resolution_scaling_factor=1):\n img = Image()\n img.time_stamp = t2\n\n if t1 == t2:\n img.initialize_with_image(original)\n return img\n\n calc_shift_fnc = self.calculate_shift\n orig_get_fnc = original.get\n interp_fnc = my_math.linear_interpolation\n\n def generate(y, x):\n \"\"\"Function describing the transformed image\"\"\"\n realy = y / resolution_scaling_factor\n realx = x / resolution_scaling_factor\n\n # move to time t2\n posy = y + calc_shift_fnc(realy, realx, t2, 0) - \\\n calc_shift_fnc(realy, realx, t1, 0)\n posx = x + calc_shift_fnc(realy, realx, t2, 1) - \\\n calc_shift_fnc(realy, realx, t1, 1)\n\n x_left = int(posx) # math.floor(pos[0])\n x_right = x_left + 1 # math.ceil(pos[0])\n y_down = int(posy) # math.floor(pos[1])\n y_up = y_down + 1 # math.ceil(pos[1])\n\n v11 = orig_get_fnc(y_down, x_left, resolution_scaling_factor)\n v12 = orig_get_fnc(y_down, x_right, resolution_scaling_factor)\n v21 = orig_get_fnc(y_up, x_left, resolution_scaling_factor)\n v22 = orig_get_fnc(y_up, x_right, resolution_scaling_factor)\n\n return interp_fnc(y_down, x_left, y_up, x_right, v11, v12, v21, v22,\n posy, posx)\n\n img.image_data = np.fromfunction(np.vectorize(generate),\n (original.shape()[0]*resolution_scaling_factor,\n original.shape()[1]*resolution_scaling_factor))\n\n if resolution_scaling_factor != 1:\n img.image_data = skimage.transform.resize(img.image_data,\n original.shape(),\n preserve_range=True)\n\n return img", "def clipEditorCurrentTimeCtx(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr,\n bool]=\"\", name: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def get_mod_time(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n raw_date_time = file_meta_plist['$objects'][1]['LastModified']\n converted_time = datetime.datetime.fromtimestamp(raw_date_time)\n converted_time = converted_time.timetuple()\n return converted_time\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['modified'].timetuple()", "def getScaleKeyTime(self, index, view) -> float:\n ...", "def timestamp(self):\n\t\tcurrent_stamp = 0\n\t\tcurrent_size = 0\n\t\ttry:\n\t\t\tst = stat(self._file)\n\t\t\tif st:\n\t\t\t\tcurrent_stamp = int(st.st_mtime)\n\t\t\t\tcurrent_size = st.st_size\n\t\t\t\t# Fake a changed mtime if size is different. Subsequent processing\n\t\t\t\t# only depends on the mtime field.\n\t\t\t\tif current_size != self._last_size:\n\t\t\t\t\tcurrent_stamp = int(time())\n\t\t\t\t\tMODULE.info(\"Size of '%s': %s -> %s\" % (self._file,self._last_size,current_size))\n\t\t\t\t\tself._last_size = current_size\n\t\tfinally:\n\t\t\tpass\n\n\t\tif current_stamp == self._last_stamp:\n\t\t\tself._unchanged_count += 1\n\t\t\tif self._unchanged_count >= self._count:\n\t\t\t\t# Don't record new timestamp if MD5 of file is the same\n\t\t\t\thash = md5(open(self._file).read()).hexdigest()\n\t\t\t\tif hash != self._last_md5:\n\t\t\t\t\tself._last_md5 = hash\n\t\t\t\t\tself._last_returned_stamp = current_stamp\n\t\t\t\telse:\n\t\t\t\t\tMODULE.info(\"Hash of '%s' unchanged\" % self._file)\n\t\telse:\n\t\t\tself._unchanged_count = 0\n\t\t\tself._last_stamp = current_stamp\n\n\t\treturn self._last_returned_stamp", "def exposuretime(self):\n _, = self.exposuretimes\n return _", "def Rescale(self):\r\n picWidth,picHeight = self.oldSize = self.GetSizeTuple()\r\n bitmap = self.scaled = self.bitmap\r\n if not bitmap: return\r\n imgWidth,imgHeight = bitmap.GetWidth(),bitmap.GetHeight()\r\n if self.scaling == 2 or (self.scaling == 1 and (imgWidth > picWidth or imgHeight > picHeight)):\r\n image = bitmap.ConvertToImage()\r\n factor = min(1.0*picWidth/imgWidth,1.0*picHeight/imgHeight)\r\n newWidth,newHeight = int(factor*imgWidth),int(factor*imgHeight)\r\n self.scaled = image.Scale(newWidth,newHeight).ConvertToBitmap()\r\n #self.scaled = image.Scale(newWidth,newHeight,wx.IMAGE_QUALITY_HIGH ).ConvertToBitmap()\r" ]
[ "0.63181275", "0.5682004", "0.56780356", "0.5436276", "0.53405464", "0.53323674", "0.52736956", "0.5243496", "0.5193847", "0.5192097", "0.51653427", "0.51530874", "0.51168525", "0.5112648", "0.50986654", "0.5087256", "0.5069669", "0.50668234", "0.5063405", "0.50531495", "0.5032332", "0.5032051", "0.5031142", "0.49812278", "0.49788228", "0.49646804", "0.49639896", "0.49601832", "0.49416727", "0.49307114" ]
0.6459762
0
Set the pair and reload data if its new.
def set_pair(self, pair: Pair): if pair != self.pair: self.pair = pair self.load_candles()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_pair(self, pair: StudentPair) -> None:\n self._edit_pair = pair\n self.line_edit_title.setText(str(self._edit_pair[\"title\"]))\n self.line_edit_lecturer.setText(str(self._edit_pair[\"lecturer\"]))\n self.combo_box_type.setCurrentText(str(self._edit_pair[\"type\"]))\n self.line_edit_classes.setText(str(self._edit_pair[\"classroom\"]))\n self.combo_box_subgroup.setCurrentText(str(self._edit_pair[\"subgroup\"]))\n\n time: TimePair = self._edit_pair[\"time\"]\n if time is not None:\n number = time.get_number()\n self.combo_box_start.setCurrentIndex(number)\n self.combo_box_end.clear()\n self.combo_box_end.addItems(TimePair.time_ends()[number:])\n self.combo_box_end.setCurrentIndex(time.duration() - 1)\n\n self._dates = self._edit_pair[\"dates\"]\n self.update_list_widget_date()", "async def _set_watch_pair(self, pair: str):\n\n if pair not in self.market.pairs:\n if pair in self.market.extra_base_pairs:\n self.market.extra_base_pairs.remove(pair)\n\n self.market.pairs.append(pair)\n self.watch_only_pairs.append(pair)\n self.log.info('Setting watch-only pair {}.', pair, stack_depth=1)", "def change_pair(self):\n newcoin = self.mw.coin_selector.currentText()\n\n if any(newcoin + \"BTC\" in s for s in val[\"coins\"]) and newcoin != self.mw.cfg_manager.coin:\n self.mw.cfg_manager.coin = newcoin\n self.mw.cfg_manager.pair = newcoin + \"BTC\"\n\n self.set_charts(self.mw.cfg_manager.pair)\n\n val[\"bm\"].stop_socket(val[\"aggtradeWebsocket\"])\n val[\"bm\"].stop_socket(val[\"depthWebsocket\"])\n val[\"bm\"].stop_socket(val[\"klineWebsocket\"])\n logging.info('Switching to %s' % newcoin + \" / BTC\")\n\n self.mw.api_manager.set_pair_values()\n\n self.initial_values()\n\n self.mw.websocket_manager.websockets_symbol()\n\n self.mw.history_table.setRowCount(0)\n\n self.mw.api_manager.api_calls()\n\n self.mw.table_manager.init_filter()", "def __init__(self, pairdata):\n\n self.data = deepcopy(pairdata)\n gear = {'z': self.data['z'], 'x': self.data['x'], 'alpha_n': self.data['alpha_n'], 'beta': self.data['beta'],\n 'm_n': self.data['m_n'], 'rho_f': self.data['rho_f'], 'd_s': self.data['d_s'], 'c': self.data['c'],\n 'b': self.data['b']}\n\n self.gear = self.__set_gear(gear)", "def setData(self,newData):\r\n pass", "async def update_derived_data(self, pair):\n\n await self.update_adjusted_tick_data(pair)\n await self.update_mas(pair)\n await self.update_emas(pair)\n await self.filter_mas(pair)\n await self.filter_emas(pair)\n await self.update_bbands(pair)\n await self.refresh_indicators(pair)", "async def prepare_states(self, pair: str):\n\n if pair not in self.pair_states:\n self.pair_states[pair] = {\n 'enable_buy': True,\n 'enable_rebuy': True\n }", "def add_pair(self, new_pair: StudentPair) -> None:\n self.check_possible_added(new_pair)\n self._buffer.append(new_pair)\n self.reallocate()", "async def refresh_derived_data(self, pair):\n\n await self.refresh_adjusted_tick_data(pair)\n await self.refresh_mas(pair)\n await self.refresh_emas(pair)\n await self.filter_mas(pair)\n await self.filter_emas(pair)\n await self.refresh_bbands(pair)\n await self.refresh_indicators(pair)", "def __setitem__(self, key, value):\n try:\n kvp = self.keyvaluepair_set.get(key=key)\n except KeyValuePair.DoesNotExist:\n KeyValuePair.objects.create(container=self, key=key, value=value)\n else:\n kvp.value = value\n kvp.save()", "def update_original_data(self):\n pass", "async def refresh_pairs(self):\n\n summaries = await self.api.get_market_summaries()\n if summaries is None:\n self.log.error('Could not get market summaries data.')\n return None\n\n pairs = []\n pair_count = 0\n changes, volumes, min_trade_qtys, min_trade_sizes = await self._extract_filtered_summaries(summaries)\n bases = list(config['min_base_volumes'].keys())\n\n for pair in sorted(volumes, key=volumes.get, reverse=True):\n if await Market.apply_pair_prefer_filter(pair, bases, volumes.keys()):\n continue\n if await self._handle_greylisted(pair):\n continue\n\n pairs.append(pair)\n self.log.debug('Added pair {}: volume {}, change {}.', pair, volumes[pair], changes[pair], verbosity=1)\n\n pair_count += 1\n if config['max_pairs'] and pair_count >= config['max_pairs']:\n break\n\n if config['app_node_index'] is not None:\n pair_splits = list(utils.split(pairs, config['app_node_max']))\n self.pairs = pair_splits[config['app_node_index']] # pylint: disable=E1126\n else:\n self.pairs = pairs\n\n self.extra_base_pairs = [pair for pair in config['base_pairs'] if pair not in pairs]\n self.min_trade_qtys = min_trade_qtys\n self.min_trade_sizes = min_trade_sizes", "def update_data():\n pass", "async def prepare_trades(self, pair: str):\n\n if pair not in self.trades:\n self.trades[pair] = {\n 'last_open_time': 0.0,\n 'rebuy_count': 0,\n 'open': [],\n 'closed': []\n }", "def update_gear_data(self, geardata):\n\n tempdata = self.data.copy()\n tempdata.update(geardata)\n self.__init__(geardata, self.modifications)", "def __setitem__(self, key, val):\n self.members[key] = val\n pair = self.pair\n for i in range(key):\n pair = pair.cdr\n pair.car = val", "def update(self):\n\n if len(self._data) > 0:\n if not self._switch._is_on:\n tmp = list(self._data.keys())\n\n random.shuffle(tmp)\n\n data = random.sample(tmp,1)[0]\n\n if (self._state == data):\n random.shuffle(tmp)\n random.shuffle(tmp)\n data = random.sample(tmp, 1)[0]\n\n self._state = self._data[data]\n \n self._now_key = data\n self._now_val = self._data[data]\n\n return\n\n self._api.load_file()\n\n self._data = self._api._data\n\n tmp = list(self._data.keys())\n\n random.shuffle(tmp)\n data = random.sample(tmp,1)[0]\n\n if (self._state == data):\n random.shuffle(tmp)\n random.shuffle(tmp)\n data = random.sample(tmp,1)[0]\n\n self._state = self._data[data]\n \n self._now_key = data\n self._now_val = self._data[data]", "def currency_pair(self, currency_pair):\n\n self._currency_pair = currency_pair", "def visit_record(self, syrecord):\n for other_key, other_value in syrecord.items():\n try:\n getattr(self.current, other_key).update(other_value)\n except KeyError:\n setattr(self.current, other_key, other_value)", "def add_pairing(self, pairing): \n \n self.pairings.append(pairing)\n\n\n # Fill in the rest", "async def update_tick_data(self, pair: str) -> str:\n\n self.last_update_nums[pair] = 0\n\n close_time, tick_gap = await self._get_tick_delta(pair)\n if close_time is None:\n return None\n\n if tick_gap > config['tick_gap_max']:\n self.log.info(\"{} is missing too many ticks, removing from pairs list.\", pair)\n\n if pair in self.pairs:\n self.pairs.remove(pair)\n\n if pair not in self.greylist_pairs:\n greylist_time = time.time() + config['pairs_greylist_secs']\n self.log.info(\"{} greylisting for {} seconds.\", pair, config['pairs_greylist_secs'])\n self.greylist_pairs[pair] = greylist_time\n\n return None\n\n close_value, base_24hr_volume = await self.api.get_last_values(pair)\n if close_value is None:\n return None\n\n try:\n if await self._restore_ticks(pair, tick_gap, close_value, base_24hr_volume):\n await self._schedule_back_refresh(pair, tick_gap)\n\n self.log.debug('{} adding new tick value {} at {}.', pair, close_value, close_time, verbosity=1)\n self.close_times[pair].append(close_time)\n self.close_values[pair].append(close_value)\n self.base_24hr_volumes[pair][0].append(base_24hr_volume)\n self.last_update_nums[pair] = tick_gap + 1\n await self._truncate_tick_data(pair)\n await self._backup_tick_data(pair)\n\n self.log.debug('{} updated tick data.', pair, verbosity=1)\n return pair\n\n except (KeyError, IndexError, TypeError) as e:\n self.log.error('{} got {}: {}\\n{}', pair, type(e).__name__, e,\n ''.join(traceback.format_tb(e.__traceback__)))\n\n return None", "def __setitem__(self,key,value):\n if key in self.deleted: self.deleted.remove(key)\n if key not in self.changed: self.changed.append(key)\n self.data[key] = value", "def set_data(self, new_data):\n self.data = new_data", "def __init__(self):\n self.data = {}\n self.refresh()", "def __setitem__(self, key, val):\n self.__check_key_validity(key)\n self.data[key[0]][key[1]] = val", "def _update_data(self, data, update_original=False):\n self._data.update(dict((key, self._deserialize(key, value))\n for key, value in data.items()))\n\n if update_original:\n self._original_data = copy.deepcopy(self._data)", "def update(self, new_content: dict):\n self.__init__(new_content, self.__previous_hash)", "def __setitem__(self, key, value):\r\n self.data[key] = value", "async def sync_pairs(self):\n\n self.watch_only_pairs = []\n\n await self._handle_trader_watch_pairs()\n await self._handle_balancer_watch_pairs()\n\n for pair in self.market.pairs + self.market.extra_base_pairs:\n await self.prepare_trades(pair)\n await self.prepare_states(pair)\n await self.prepare_last_trades(pair)\n\n await self.prepare_all_trade_stats()\n await self.balancer.sync_pairs()", "def __setitem__(self, key, value):\n self.data[key] = value" ]
[ "0.6273834", "0.62562305", "0.62263435", "0.6111727", "0.5962303", "0.59485316", "0.59118456", "0.5875051", "0.58309335", "0.58295953", "0.58218694", "0.58065355", "0.5751621", "0.5708703", "0.5699012", "0.56972504", "0.5696664", "0.5620271", "0.55983835", "0.5595588", "0.55933", "0.55857444", "0.5582843", "0.55814624", "0.55400974", "0.5534963", "0.55234903", "0.5522884", "0.5518812", "0.5495551" ]
0.76077706
0
Set the granularity and reload data if its new.
def set_gran(self, gran: Gran): if gran != self.gran: self.gran = gran self.load_candles()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _resolution_changed(self):\n self.reinitialiseData()", "def reload(self):\n self._populate(self.hierarchy[-1])", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def ReloadSettings(jsonData):\n global MySet\n MySet.Reload(jsonData)", "def _reload_values(self):\r\n raise NotImplementedError", "def set_Granularity(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Granularity', value)", "def load_new_data():\n require('settings', provided_by=[production, staging])\n \n maintenance_up()\n load_data()\n maintenance_down()", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def _reload(self):\n if self._ancestorModelSourceCreated:\n self._parent._reload()\n else:\n # beware this breaks parent/child links such as current selection / hierarchical groups\n dictSave = self.serialize()\n tmpRegion = self._createBlankCopy()\n tmpRegion.deserialize(dictSave)\n self._assign(tmpRegion)\n self._informRegionChange(True)", "def test_update_single_grading_period(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def reinit(self):\n self.logger.info(\"Reinit called. Clear the population.\")\n self.set_init_population([], perf_name=None)\n self._gt_rollouts = []\n self._gt_scores = []", "def __init__(self):\n self.data = {}\n self.refresh()", "def set_analysis_time(self, t):\n for z in self.zones:\n z.set_demand_rate_per_t(t)", "def reloadData(self):\n self.dto.readFromData()\n print(\"Record reloaded.\")", "def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()", "def setLoad(self, new_load: float) -> None:\n self.load = new_load", "async def async_set_primary_filtration(self, **kwargs):\n await self.cycle.set(\n duration=kwargs.get(ATTR_DURATION),\n start_hour=kwargs.get(ATTR_START_HOUR),\n )\n await self.coordinator.async_request_refresh()", "def reload(self):\n if len(self.files) > 0:\n self.load(self.files, regfiles=self.regions)", "def reload(self):\n cluster_kubeconfig = self.ocp.cluster_kubeconfig\n self.data = self.get()\n self.__init__(**self.data)\n self.ocp.cluster_kubeconfig = cluster_kubeconfig", "def reinit(self):\n self.data_updating = {}\n self.reinitialization = True\n # force the bounds to be defined again\n self.bounds = None", "def update_grad_data():\n t_file = 'hcapgrd1_full_data_*.fits*'\n out_dir = deposit_dir + '/Grad_save/'\n tdir = out_dir + 'Gradcap/'\n#\n#--- read grad group name\n#\n gfile = house_keeping + 'grad_list'\n grad_list = mcf.read_data_file(gfile)\n\n [tstart, tstop, year] = ecf.find_data_collecting_period(tdir, t_file)\n\n get_data(tstart, tstop, year, grad_list, out_dir)", "async def refresh_derived_data(self, pair):\n\n await self.refresh_adjusted_tick_data(pair)\n await self.refresh_mas(pair)\n await self.refresh_emas(pair)\n await self.filter_mas(pair)\n await self.filter_emas(pair)\n await self.refresh_bbands(pair)\n await self.refresh_indicators(pair)", "def test_update_derived_metric(self):\n pass", "def reload(self):\n data = self.api.api_request(\"GET\", self.url)\n for t in self.ace_types:\n self[t].actors = data[t][\"actors\"]\n self[t].groups = data[t][\"groups\"]", "def _trigger(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def reload(self):\n\n pass", "def set_heritage(self):\n if self.has_non_empty_attribute(\"registration_date\"):\n try:\n iso_date = JalaliCalendar(self.registration_date).get_date()\n except TypeError:\n print(\"dateparser.JalaliCalendar could not handle: {}\".format(\n self.registration_date))\n iso_date = None\n\n if iso_date:\n date_dict = utils.datetime_to_dict(\n iso_date.get('date_obj'), \"%Y%m%d\")\n qualifier = {\"start_time\": utils.package_time(date_dict)}\n heritage = self.mapping[\"heritage\"][\"item\"]\n self.add_statement(\"heritage_status\", heritage, qualifier)\n else:\n self.add_to_report(\n \"registration_date\", self.registration_date, \"start_time\")\n else:\n super().set_heritage()", "def _refresh(self):\n # if we have all the values we need to hookup to the URL\n for key in self.DBMSettings.keys():\n if not key.startswith(LOCALCHAR):\n self.DBMSettings[key] = self._urldict()[key]", "def reload(self):", "def reload(self):" ]
[ "0.5652916", "0.5497101", "0.54564565", "0.5395625", "0.53053933", "0.5266499", "0.51898026", "0.51679134", "0.5159813", "0.5147838", "0.51331675", "0.5116666", "0.5112451", "0.50447494", "0.5042116", "0.5025085", "0.50143266", "0.50115645", "0.49833155", "0.4946549", "0.4946061", "0.49414337", "0.48950323", "0.48949814", "0.48788175", "0.48636985", "0.4861964", "0.48559886", "0.48541945", "0.48541945" ]
0.61495394
0
Set the quote kind and reload data if its new.
def set_quote_kind(self, quote_kind: QuoteKind): if quote_kind != self.quote_kind: self.quote_kind = quote_kind if self.geo is None: self.load_candles() else: self.geo.update(quote_kind=quote_kind) self.chart.redraw(self.geo)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quotes(self, quotes):\n\n self._quotes = quotes", "def quote_type(self, quote_type):\n allowed_values = [None,\"Price\", \"Spread\", \"Rate\", \"LogNormalVol\", \"NormalVol\", \"ParSpread\", \"IsdaSpread\", \"Upfront\", \"Index\", \"Ratio\", \"Delta\", \"PoolFactor\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and quote_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `quote_type` ({0}), must be one of {1}\" # noqa: E501\n .format(quote_type, allowed_values)\n )\n\n self._quote_type = quote_type", "def _update_quote(self):\n # If this is the first quote or a price is outside current price ladder,\n # reset the price ladder.\n if (self._quotes_row == 0 or (\n self._quotes_df.loc[self._quotes_row, 'ask_price'] > \\\n self._price_ladder[0] + .5 * self._config['tick_size']) or (\n self._quotes_df.loc[self._quotes_row, 'bid_price'] < \\\n self._price_ladder[-1] - .5 * self._config['tick_size'])):\n max_price = (self._quotes_df.loc[self._quotes_row, 'ask_price'] +\n self._config['tick_size'] * np.floor(\n (self._config['row_count'] - 1) / 2))\n self._price_ladder = np.linspace(\n max_price,\n max_price - (\n self._config['row_count'] - 1) * self._config['tick_size'],\n self._config['row_count'])\n self._price_ladder_df.iloc[:, [0, 1, 3, 4]] = ''\n self._price_ladder_df.iloc[:, 2] = [self._config[\n 'price_format'].format(x) for x in self._price_ladder]\n\n # Populate price ladder dataframe and update table cells.\n for i in range(self._config['row_count']):\n if math.isclose(self._price_ladder[i],\n self._quotes_df.loc[self._quotes_row, 'ask_price']):\n self._price_ladder_df.iloc[i, 3] = str(\n self._quotes_df.loc[self._quotes_row, 'ask_size'])\n else:\n self._price_ladder_df.iloc[i, 3] = ''\n if math.isclose(self._price_ladder[i],\n self._quotes_df.loc[self._quotes_row, 'bid_price']):\n self._price_ladder_df.iloc[i, 1] = str(\n self._quotes_df.loc[self._quotes_row, 'bid_size'])\n else:\n self._price_ladder_df.iloc[i, 1] = ''\n\n # Print this quote row and update counter.\n print(self._quotes_df.iloc[self._quotes_row, ].values)\n self._quotes_row += 1", "def set_stock_retrieval_type(self, type ='all'):\n self.stock_retrieval_type = type", "def on_book(context, quote_type, quote):\n date, filterTime = str(context.config.trading_date), int(quote.int_time)\n # print(quote.symbol, quote.int_time)\n\n if ((filterTime > 93000000) and (filterTime < 113000000)) or (\n (filterTime > 130000000) and (filterTime < 150000000)):\n # print (\"Trading Time\")\n if str(quote.symbol).__contains__(\"IH\"):\n context.dic[\"IH\"] = [quote.bp_array[0], quote.ap_array[0]]\n context.symboldic[\"IH\"] = quote.symbol\n if str(quote.symbol).__contains__(\"IC\"):\n context.dic[\"IC\"] = [quote.bp_array[0], quote.ap_array[0]]\n context.symboldic[\"IC\"] = quote.symbol\n if len(context.dic.keys()) < 2:\n return\n \"\"\"\n if len(context.dic.keys()) >= 2:\n sql = \"`quoteData insert (%s;%s;%s;%s;%s;%s;%s)\"\n time_sql = '{y+ \"T\"$-9#\"00000000\",string x}[%s;%s]'\n date_time = time_sql % (filterTime, \"%s.%s.%s\" % (date[0:4], date[4:6], date[6:8]))\n context.q.sync(date_time)\n # print(context.dic[\"IC\"][0]*200 -context.dic[\"IH\"][1]*300*2)\n feed_quote = sql % (date_time, context.dic[\"IH\"][0], context.dic[\"IH\"][1], context.dic[\"IC\"][0], context.dic[\"IC\"][1], context.dic[\"IC\"][0]*200 -context.dic[\"IH\"][1]*300*2, context.dic[\"IC\"][1]*200 -context.dic[\"IH\"][0]*300*2)\n context.q.sync(feed_quote)\n\n context.q.sync(\n \"CombinedMainContract: select Date:last Date,BidPrice1Open:first PairBidPrice,BidPrice1High:max PairBidPrice,BidPrice1Low:min PairBidPrice, BidPrice1Close:last PairBidPrice,BidVol1:100,AskPrice1Open:first PairAskPrice,AskPrice1High:max PairAskPrice,AskPrice1Low:min PairAskPrice, AskPrice1Close:last PairAskPrice,AskVol1:last 100,LegOneBidPrice1:last LegOneBidPrice1, LegOneAskPrice1:last LegOneAskPrice1, LegTwoBidPrice1:last LegTwoBidPrice1, LegTwoAskPrice1: last LegTwoAskPrice1 by %s xbar Date.second from `quoteData;\" % (\n context.kindleInterval))\n context.q.sync(\n \"delete date, second from `CombinedMainContract;delete from `CombinedMainContract where Date.second < 09:30:00;delete from `CombinedMainContract where Date.second > 11:30:00, Date.second < 13:00:00;delete from `CombinedMainContract where Date.second > 15:00:00;update TrueRange: {max(x;y;z)}'[(AskPrice1High - BidPrice1Low);(AskPrice1High - (prev BidPrice1Close));((prev AskPrice1High) - BidPrice1Low)] from `CombinedMainContract;\")\n context.q.sync(\"update N: mavg[%s;TrueRange] from `CombinedMainContract;\" % (context.volatilityRange))\n context.q.sync(\"update ShortEntry: prev (%s mmin BidPrice1Low), LongEntry: prev (%s mmax AskPrice1High) from `CombinedMainContract;\"%(context.breakRange, context.breakRange))\n\n Signal = context.q.sync(\"select count Date from CombinedMainContract\")[0]\n \n if (Signal[0] > context.kindleNumber):\n context.kindleNumber = Signal[0]\n PairDataBar = context.q.sync(\"-2#select Date.minute, BidPrice1Close, AskPrice1Close, ShortEntry, LongEntry, N from CombinedMainContract\")[0]\n context.PairDataBarDate = PairDataBar[0]\n context.PairDataBarBidPrice1Close = PairDataBar[1]\n context.PairDataBarAskPrice1Close = PairDataBar[2]\n context.PairDataBarShortEntry = PairDataBar[3]\n context.PairDataBarLongEntry = PairDataBar[4]\n context.PairDataBarN = PairDataBar[5]\n if (context.PairDataBarBidPrice1Close < context.LocalLow):\n context.UpDrawBack = 0.0\n context.LocalLow = context.PairDataBarBidPrice1Close\n elif (context.PairDataBarBidPrice1Close > context.LocalLow):\n context.UpDrawBack = context.PairDataBarBidPrice1Close - context.LocalLow\n\n if (abs(context.PositionAddedTime) > 0 and (context.PairDataBarDate > 898)):\n context.PositionClearPrice = context.dic[\"IC\"][1]\n # print(\"PosClear: \" + str(context.dic[\"IC\"][1]))\n # context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], abs(context.PositionAddedTime), Direction.BUY, OpenClose.CLOSE)\n context.PositionAddedTime = 0\n # sendOrderClose PositionAddedTime Amount Contract\n if (context.PositionAddedTime == -1):\n print(context.LegOnePositionEntryPrice[1] - context.PositionClearPrice)\n elif (context.PositionAddedTime == -2):\n print(context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[\n 2] - 2 * context.PositionClearPrice)\n elif (context.PositionAddedTime == -3):\n print(context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[2] +\n context.LegOnePositionEntryPrice[3] - 3 * context.PositionClearPrice)\n elif (context.PositionAddedTime == -4):\n print(context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[2] +\n context.LegOnePositionEntryPrice[3] + context.LegOnePositionEntryPrice[\n 4] - 4 * context.PositionClearPrice)\n context.LegOnePositionEntryPrice = {}\n context.PositionEntryPrice = {}\n # context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], abs(context.PositionAddedTime), Direction.BUY, OpenClose.CLOSE)\n context.PositionAddedTime = 0\n else:\n if ((abs(context.PositionAddedTime) > 0) and (context.UpDrawBack > context.NStoplossPositionParameter * context.PairDataBarN)):\n # print(\"PosClear: \" + str(context.dic[\"IC\"][1]))\n context.PositionClearPrice = context.dic[\"IC\"][1]\n if (context.PositionAddedTime == -1):\n print (context.LegOnePositionEntryPrice[1] - context.PositionClearPrice)\n elif (context.PositionAddedTime == -2):\n print (context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[2]- 2 * context.PositionClearPrice)\n elif (context.PositionAddedTime == -3):\n print (context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[2] + context.LegOnePositionEntryPrice[3]- 3 * context.PositionClearPrice)\n elif (context.PositionAddedTime == -4):\n print (context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[2] + context.LegOnePositionEntryPrice[3] + context.LegOnePositionEntryPrice[4] - 4 * context.PositionClearPrice)\n context.LegOnePositionEntryPrice = {}\n context.PositionEntryPrice = {}\n #context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], abs(context.PositionAddedTime), Direction.BUY, OpenClose.CLOSE)\n context.PositionAddedTime = 0\n context.q.sync(\"update Position:0 from `CombinedMainContract where Date = max Date\")\n if ((abs(context.PositionAddedTime) == 3) and context.PositionTimesParameter >= 4 and context.PairDataBarBidPrice1Close < context.PositionEntryPrice[1] - 3 * context.NEntryParameter*context.Nvalue):\n context.PositionAddedTime = -4\n context.LegOnePositionEntryPrice[4] = context.dic[\"IC\"][0]\n context.PositionEntryPrice[4] = context.PairDataBarBidPrice1Close\n # print(\"Pos4: \" + str(context.dic[\"IC\"][0]))\n #context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][0], 1, Direction.SELL, OpenClose.OPEN)\n context.q.sync(\"update Position:-4 from `CombinedMainContract where Date = max Date\")\n if ((abs(context.PositionAddedTime) == 2) and context.PositionTimesParameter >= 3 and context.PairDataBarBidPrice1Close < context.PositionEntryPrice[1] - 2 * context.NEntryParameter*context.Nvalue):\n context.PositionAddedTime = -3\n context.LegOnePositionEntryPrice[3] = context.dic[\"IC\"][0]\n context.PositionEntryPrice[3] = context.PairDataBarBidPrice1Close\n # print(\"Pos3: \" + str(context.dic[\"IC\"][0]))\n context.q.sync(\"update Position:-3 from `CombinedMainContract where Date = max Date\")\n #context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][0], 1, Direction.SELL, OpenClose.OPEN)\n if ((abs(context.PositionAddedTime) == 1) and context.PositionTimesParameter >= 2 and context.PairDataBarBidPrice1Close < context.PositionEntryPrice[1] - 1 * context.NEntryParameter*context.Nvalue):\n context.PositionAddedTime = -2\n context.LegOnePositionEntryPrice[2] = context.dic[\"IC\"][0]\n context.PositionEntryPrice[2] = context.PairDataBarBidPrice1Close\n # print(\"Pos2: \" + str(context.dic[\"IC\"][0]))\n #context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][0], 1, Direction.SELL, OpenClose.OPEN)\n context.q.sync(\"update Position:-2 from `CombinedMainContract where Date = max Date\")\n if ((abs(context.PositionAddedTime) == 0) and context.PositionTimesParameter >= 1 and context.PairDataBarBidPrice1Close < context.PairDataBarShortEntry - 0 * context.NEntryParameter*context.Nvalue):\n context.PositionAddedTime = -1\n context.LegOnePositionEntryPrice[1] = context.dic[\"IC\"][0]\n context.PositionEntryPrice[1] = context.PairDataBarBidPrice1Close\n # print(\"Pos1: \" + str(str(context.dic[\"IC\"][0])))\n context.Nvalue = context.PairDataBarN\n context.q.sync(\"update Position:-1 from `CombinedMainContract where Date = max Date\")\n #context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][0], 1, Direction.SELL, OpenClose.OPEN)\n\n \"\"\"\n \"\"\"\n if context.long_position(quote.symbol) > 0 and not context.ORDER_SENT_FLAG:\n context.order.send_single_order(\n quote.symbol, quote.bp_array[0], 5, Direction.SELL, OpenClose.CLOSE\n )\n context.ORDER_SENT_FLAG = True\n elif 90000000 < quote.int_time < 90500000 and not context.ORDER_SENT_FLAG:\n context.order.send_single_order(\n quote.symbol, quote.bp_array[0], 5, Direction.BUY, OpenClose.OPEN\n )\n context.ORDER_SENT_FLAG = True\n else:\n pass\n \"\"\"\n\n if len(context.dic.keys()) >= 2:\n sql = \"`quoteData insert (%s;%s;%s;%s;%s;%s;%s)\"\n time_sql = '{y+ \"T\"$-9#\"00000000\",string x}[%s;%s]'\n date_time = time_sql % (filterTime, \"%s.%s.%s\" % (date[0:4], date[4:6], date[6:8]))\n context.q.sync(date_time)\n # print(context.dic[\"IC\"][0]*200 -context.dic[\"IH\"][1]*300*2)\n feed_quote = sql % (\n date_time, context.dic[\"IH\"][0], context.dic[\"IH\"][1], context.dic[\"IC\"][0], context.dic[\"IC\"][1],\n context.dic[\"IC\"][0] * 200 - context.dic[\"IH\"][1] * 300 * 2,\n context.dic[\"IC\"][1] * 200 - context.dic[\"IH\"][0] * 300 * 2)\n context.q.sync(feed_quote)\n\n context.q.sync(\n \"CombinedMainContract: select Date:last Date,BidPrice1Open:first PairBidPrice,BidPrice1High:max PairBidPrice,BidPrice1Low:min PairBidPrice, BidPrice1Close:last PairBidPrice,BidVol1:100,AskPrice1Open:first PairAskPrice,AskPrice1High:max PairAskPrice,AskPrice1Low:min PairAskPrice, AskPrice1Close:last PairAskPrice,AskVol1:last 100,LegOneBidPrice1:last LegOneBidPrice1, LegOneAskPrice1:last LegOneAskPrice1, LegTwoBidPrice1:last LegTwoBidPrice1, LegTwoAskPrice1: last LegTwoAskPrice1 by %s xbar Date.second from `quoteData;\" % (\n context.kindleInterval))\n context.q.sync(\n \"delete date, second from `CombinedMainContract;delete from `CombinedMainContract where Date.second < 09:30:00;delete from `CombinedMainContract where Date.second > 11:30:00, Date.second < 13:00:00;delete from `CombinedMainContract where Date.second > 15:00:00;update TrueRange: {max(x;y;z)}'[(AskPrice1High - BidPrice1Low);(AskPrice1High - (prev BidPrice1Close));((prev AskPrice1High) - BidPrice1Low)] from `CombinedMainContract;\")\n context.q.sync(\"update N: mavg[%s;TrueRange] from `CombinedMainContract;\" % (context.volatilityRange))\n context.q.sync(\n \"update ShortEntry: prev (%s mmin BidPrice1Low), LongEntry: prev (%s mmax AskPrice1High) from `CombinedMainContract;\" % (\n context.breakRange, context.breakRange))\n\n Signal = context.q.sync(\"select count Date from CombinedMainContract\")[0]\n\n if (Signal[0] > context.kindleNumber):\n context.kindleNumber = Signal[0]\n PairDataBar = context.q.sync(\n \"-2#select Date.minute, BidPrice1Close, AskPrice1Close, ShortEntry, LongEntry, N from CombinedMainContract\")[\n 0]\n context.PairDataBarDate = PairDataBar[0]\n context.PairDataBarBidPrice1Close = PairDataBar[1]\n context.PairDataBarAskPrice1Close = PairDataBar[2]\n context.PairDataBarShortEntry = PairDataBar[3]\n context.PairDataBarLongEntry = PairDataBar[4]\n context.PairDataBarN = PairDataBar[5]\n if (context.PairDataBarAskPrice1Close > context.LocalHigh):\n context.DownDrawBack = 0.0\n context.LocalHigh = context.PairDataBarAskPrice1Close\n elif (context.PairDataBarAskPrice1Close < context.LocalHigh):\n context.DownDrawBack = context.LocalHigh - context.PairDataBarAskPrice1Close\n\n if (abs(context.PositionAddedTime) > 0 and (context.PairDataBarDate > 898)):\n context.PositionClearPrice = context.dic[\"IC\"][0]\n # print(\"PosClear: \" + str(context.dic[\"IC\"][1]))\n # context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], abs(context.PositionAddedTime), Direction.BUY, OpenClose.CLOSE)\n\n # sendOrderClose PositionAddedTime Amount Contract\n print(\"PosClear: \" + str(context.dic[\"IC\"][0]))\n context.PositionClearPrice = context.dic[\"IC\"][0]\n if (context.PositionAddedTime == 1):\n print(-(context.LegOnePositionEntryPrice[1] - context.PositionClearPrice))\n elif (context.PositionAddedTime == 2):\n print(-(context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[\n 2] - 2 * context.PositionClearPrice))\n elif (context.PositionAddedTime == 3):\n print(-(context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[2] +\n context.LegOnePositionEntryPrice[3] - 3 * context.PositionClearPrice))\n elif (context.PositionAddedTime == 4):\n print(-(context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[2] +\n context.LegOnePositionEntryPrice[3] + context.LegOnePositionEntryPrice[\n 4] - 4 * context.PositionClearPrice))\n context.PositionAddedTime = 0\n context.LegOnePositionEntryPrice = {}\n context.PositionEntryPrice = {}\n # context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], abs(context.PositionAddedTime), Direction.BUY, OpenClose.CLOSE)\n context.PositionAddedTime = 0\n else:\n if ((abs(context.PositionAddedTime) > 0) and (\n context.DownDrawBack > context.NStoplossPositionParameter * context.PairDataBarN)):\n print(\"PosClear: \" + str(context.dic[\"IC\"][0]))\n context.PositionClearPrice = context.dic[\"IC\"][0]\n if (context.PositionAddedTime == 1):\n print(-(context.LegOnePositionEntryPrice[1] - context.PositionClearPrice))\n elif (context.PositionAddedTime == 2):\n print(-(context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[\n 2] - 2 * context.PositionClearPrice))\n elif (context.PositionAddedTime == 3):\n print(-(context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[2] +\n context.LegOnePositionEntryPrice[3] - 3 * context.PositionClearPrice))\n elif (context.PositionAddedTime == 4):\n print(-(context.LegOnePositionEntryPrice[1] + context.LegOnePositionEntryPrice[2] +\n context.LegOnePositionEntryPrice[3] + context.LegOnePositionEntryPrice[\n 4] - 4 * context.PositionClearPrice))\n context.LegOnePositionEntryPrice = {}\n context.PositionEntryPrice = {}\n # context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], abs(context.PositionAddedTime), Direction.BUY, OpenClose.CLOSE)\n context.PositionAddedTime = 0\n context.q.sync(\"update Position:0 from `CombinedMainContract where Date = max Date\")\n if ((abs(\n context.PositionAddedTime) == 3) and context.PositionTimesParameter >= 4 and context.PairDataBarAskPrice1Close >\n context.PositionEntryPrice[1] + 3 * context.NEntryParameter * context.Nvalue):\n context.PositionAddedTime = 4\n context.LegOnePositionEntryPrice[4] = context.dic[\"IC\"][1]\n context.PositionEntryPrice[4] = context.PairDataBarAskPrice1Close\n print(\"Pos4: \" + str(context.dic[\"IC\"][1]))\n # context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], 1, Direction.SELL, OpenClose.OPEN)\n context.q.sync(\"update Position:4 from `CombinedMainContract where Date = max Date\")\n if ((abs(\n context.PositionAddedTime) == 2) and context.PositionTimesParameter >= 3 and context.PairDataBarAskPrice1Close >\n context.PositionEntryPrice[1] + 2 * context.NEntryParameter * context.Nvalue):\n context.PositionAddedTime = 3\n context.LegOnePositionEntryPrice[3] = context.dic[\"IC\"][1]\n context.PositionEntryPrice[3] = context.PairDataBarAskPrice1Close\n print(\"Pos3: \" + str(context.dic[\"IC\"][1]))\n context.q.sync(\"update Position:3 from `CombinedMainContract where Date = max Date\")\n # context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], 1, Direction.SELL, OpenClose.OPEN)\n if ((abs(\n context.PositionAddedTime) == 1) and context.PositionTimesParameter >= 2 and context.PairDataBarAskPrice1Close >\n context.PositionEntryPrice[1] + 1 * context.NEntryParameter * context.Nvalue):\n context.PositionAddedTime = 2\n context.LegOnePositionEntryPrice[2] = context.dic[\"IC\"][1]\n context.PositionEntryPrice[2] = context.PairDataBarAskPrice1Close\n print(\"Pos2: \" + str(context.dic[\"IC\"][1]))\n # context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], 1, Direction.SELL, OpenClose.OPEN)\n context.q.sync(\"update Position:2 from `CombinedMainContract where Date = max Date\")\n if ((abs(\n context.PositionAddedTime) == 0) and context.PositionTimesParameter >= 1 and context.PairDataBarAskPrice1Close > context.PairDataBarLongEntry + 0 * context.NEntryParameter * context.Nvalue):\n context.PositionAddedTime = 1\n context.LegOnePositionEntryPrice[1] = context.dic[\"IC\"][1]\n context.PositionEntryPrice[1] = context.PairDataBarAskPrice1Close\n print(\"Pos1: \" + str(str(context.dic[\"IC\"][1])))\n context.Nvalue = context.PairDataBarN\n context.q.sync(\"update Position:1 from `CombinedMainContract where Date = max Date\")\n # context.order.send_single_order(context.symboldic[\"IC\"], context.dic[\"IC\"][1], 1, Direction.SELL, OpenClose.OPEN)", "def _initialize_quotes_spreadsheet(self, spreadsheet_name):\n gc = gspread.authorize(self.credentials)\n sheet = gc.open(spreadsheet_name)\n sheet.worksheets() # Necessary to remind gspread that Sheet1 exists, otherwise gpsread forgets about it\n\n try:\n qs = sheet.worksheet('Quotes')\n except gspread.exceptions.WorksheetNotFound:\n qs = sheet.add_worksheet('Quotes', 1000, 2)\n sheet1 = sheet.worksheet('Sheet1')\n sheet.del_worksheet(sheet1)\n\n qs.update_acell('A1', 'Quote Index')\n qs.update_acell('B1', 'Quote')\n\n # self.update_quote_spreadsheet()", "def changeType(self, newType):\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()", "def _auto_quote(self, index, quote, period):\n key = 'AQ{}'.format(index)\n self.auto_quotes_timers[key] = threading.Timer(period, self._auto_quote,\n kwargs={'index': index, 'quote': quote, 'period': period})\n self.auto_quotes_timers[key].start()\n self._add_to_chat_queue(quote)", "def setUp(self):\n self.random_quote = Quote(\"Natasha Chebichii\", \"Run the world\")", "def save(self, *args, **kwargs):\n self.entity_type = \"Charity\"\n super().save(*args, **kwargs)", "def type(self, kind):\n self.type = kind", "async def addquote(self, ctx, *, quote : str):\n if not self.addquote_regex.match(quote):\n await ctx.send(\"`Quote must be in this format (including surrounding single quotes):\\n'some quote here - quote author'`\")\n else:\n with open(\"data/quotes.txt\", \"a\") as text_file:\n text_file.write(f\"{quote}\\n\")\n await ctx.send('`Quote added!`')", "def allow_quote_request(self, allow_quote_request):\n\n self._allow_quote_request = allow_quote_request", "def kind(self, kind):\n\n self._kind = kind", "def test_random_quote(self):\n quote = Quote().print()\n self.assertTrue(type(quote) == str)", "def update_quote_spreadsheet(self, db_session):\n spreadsheet_name, web_view_link = self.spreadsheets['quotes']\n gc = gspread.authorize(self.credentials)\n sheet = gc.open(spreadsheet_name)\n qs = sheet.worksheet('Quotes')\n\n quotes = db_session.query(db.Quote).all()\n\n for index in range(len(quotes)+10):\n qs.update_cell(index+2, 1, '')\n qs.update_cell(index+2, 2, '')\n\n for index, quote_obj in enumerate(quotes):\n qs.update_cell(index+2, 1, index+1)\n qs.update_cell(index+2, 2, quote_obj.quote)", "def reloadData(self):\n self.dto.readFromData()\n print(\"Record reloaded.\")", "def update(self, stock_record):\n self._records[stock_record.symbol] = stock_record", "def SetKind(self, new_kind):\r\n\r\n self.kind = new_kind", "def update_quote_db_from_spreadsheet(self, db_session):\n spreadsheet_name, web_view_link = self.spreadsheets['quotes']\n gc = gspread.authorize(self.credentials)\n sheet = gc.open(spreadsheet_name)\n qs = sheet.worksheet('Quotes')\n cell_location = [2, 2]\n quotes_list = []\n while True:\n if bool(qs.cell(*cell_location).value) is not False:\n quotes_list.append(db.Quote(quote=qs.cell(*cell_location).value))\n cell_location[0] += 1\n else:\n break\n\n db_session.execute(\n \"DELETE FROM QUOTES;\"\n )\n db_session.add_all(quotes_list)", "def update_original_data(self):\n pass", "def set_magic_quotes_runtime():\n raise NotImplementedError()", "def retrieve_company_data(self):\n self.set_stock_sym_append_str('')\n self.set_stock_retrieval_type('all') #'all', watcher\n self.load_stock_symbol_fr_file()", "def updatePrice(self, isinkey, field, data, qtype):\r\n isin = isinkey[0:12]\r\n bond = regsToBondName[isin]\r\n if qtype == BloombergQuery.BID:\r\n # 1/ WE CACHE THE OLD PRICE\r\n self.updateCell(bond, 'OLDBID', self.df.at[bond, 'BID'])\r\n self.updateCell(bond, 'OLDASK', self.df.at[bond, 'ASK'])\r\n # 2/ WE CHECK IF PRICE CHANGED\r\n if bond in self.rfbonds:\r\n self.blptsAnalytics.get(isin + '@CBBT' + ' Corp', self.bbgPriceRFQuery)\r\n else:\r\n self.blptsPriceOnly.get(isin + BBGHand + ' Corp', self.bbgPriceOnlyQuery)\r\n elif qtype == BloombergQuery.PRICEONLY:\r\n data = data.astype(float)\r\n # for item, value in data.iteritems():\r\n # self.updateCell(bond,bbgToBdmDic[item],value)\r\n self.lock.acquire()\r\n for item, value in data.iteritems():\r\n self.df.at[bond, bbgToBdmDic[item]] = value\r\n self.lock.release()\r\n if (data['BID'] != self.df.at[bond, 'OLDBID']) or (data['ASK'] != self.df.at[bond, 'OLDASK']):\r\n if bond in SPECIALBONDS:\r\n self.blptsAnalytics.get(isin + BBGHand + ' Corp', self.bbgPriceSpecialQuery)\r\n else:\r\n self.blptsAnalytics.get(isin + BBGHand + ' Corp', self.bbgPriceQuery)\r\n # try:\r\n # self.blptsAnalytics.get(isin + BBGHand + ' Corp', self.bbgPriceQuery)\r\n # except:\r\n # print 'error asking analytics for ' + bond\r\n else:\r\n # print 'Update event without a price change for ' + bond\r\n pub.sendMessage('BOND_PRICE_UPDATE', message=MessageContainer(self.df.loc[bond]))\r\n elif qtype == BloombergQuery.RTGACC:\r\n for item, value in data.iteritems():\r\n self.updateCell(bond,bbgToBdmDic[item],value)\r\n else:#'ANALYTICS' or 'FIRSTPASS'\r\n data = data.astype(float)\r\n # try:\r\n # for item, value in data.iteritems():\r\n # self.updateCell(bond,bbgToBdmDic[item],value)\r\n # except:\r\n # print data\r\n self.lock.acquire()\r\n try:\r\n for item, value in data.iteritems():\r\n self.df.at[bond, bbgToBdmDic[item]] = value\r\n except:\r\n self.lock.release()\r\n print data\r\n self.lock.release()\r\n if bond in SINKABLEBONDS:\r\n #self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=data['BID'])\r\n self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=self.df.at[bond, 'BID'])\r\n self.bbgSinkRequest.get()\r\n self.updateCell(bond, 'ZB', float(self.bbgSinkRequest.output.values[0,0]))\r\n #self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=data['ASK'])\r\n # self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=self.df.at[bond, 'ASK'])\r\n # self.bbgSinkRequest.get() \r\n # self.updateCell(bond, 'ZA', float(self.bbgSinkRequest.output.values[0,0]))\r\n if qtype == BloombergQuery.ANALYTICS:\r\n self.updateStaticAnalytics(bond)", "def reload(self, **params):\n if not self.bucket:\n raise ValueError('bucket property not assigned')\n\n if not self.key:\n raise ValueError('key property not assigned')\n\n dtype, value, context = self.bucket._client._fetch_datatype(\n self.bucket, self.key, **params)\n\n if not dtype == self.type_name:\n raise TypeError(\"Expected datatype {} but \"\n \"got datatype {}\".format(self.__class__,\n TYPES[dtype]))\n\n self.clear()\n self._context = context\n self._set_value(value)\n return self", "def _handle_market_data(self, response):\n if response['type'] != 'update':\n err_msg = f\"Got unexpected response: {response['type']}\"\n logging.info(err_msg)\n return\n events = response['events']\n # Only iterate over change events.\n for event in (e for e in events if e['type'] == 'change'):\n side = event['side']\n price = Decimal(event['price'])\n quantity = Decimal(event['remaining'])\n quote = Quote(price=price, quantity=quantity)\n if side == 'bid':\n self.exchange_state.order_book().bids().set_quote(quote)\n elif side == 'ask':\n self.exchange_state.order_book().asks().set_quote(quote)\n else:\n raise Exception(\"Unexpected update side: \" + side)\n return True", "def set_bpq_kind(self, bpq_kind):\n if not ((bpq_kind == self.BPQ_BLOCK_KIND_QUERY) or\n (bpq_kind == self.BPQ_BLOCK_KIND_RESPONSE) or\n (bpq_kind == self.BPQ_BLOCK_KIND_RESPONSE_DO_NOT_CACHE_FRAG) or\n (bpq_kind == self.BPQ_BLOCK_KIND_PUBLISH)):\n raise ValueError\n \n self.bpq_kind = bpq_kind\n return", "def en_quote_as_entity(self):\n pass", "def test_random_programming_quote(self):\n quote = Quote().print_programming_quote()\n self.assertTrue(type(quote) == str)", "def save_quote():\n if not is_valid_request(request.form, [\"quote_id\", \"image_id\", \"quote\", \"author\"]):\n return jsonify({\"error\": \"Could not save quote, due to technical reasons\"})\n quote_id = request.form[\"quote_id\"]\n image_id = request.form[\"image_id\"]\n quote = request.form[\"quote\"]\n author = request.form[\"author\"]\n\n check_uniqueness = (\n Quote.query.filter_by(user=current_user)\n .filter_by(quote_id=quote_id)\n .filter_by(image_id=image_id)\n .count()\n )\n\n if check_uniqueness == 0:\n quote = Quote(\n quote_id=quote_id,\n image_id=image_id,\n quote=quote,\n author=author,\n user=current_user,\n )\n db.session.add(quote)\n db.session.commit()\n return jsonify({\"succes\": \"Quote saved\"})\n else:\n return jsonify({\"error\": \"Quote already saved\"})" ]
[ "0.61072516", "0.60456073", "0.5505138", "0.5404351", "0.5160823", "0.51101786", "0.50316006", "0.5006308", "0.49826854", "0.4947969", "0.4940617", "0.49285766", "0.492396", "0.49115217", "0.49072197", "0.49046135", "0.48957124", "0.48728767", "0.48653087", "0.48302495", "0.4812693", "0.47949508", "0.47757488", "0.476466", "0.47574916", "0.4754841", "0.47228047", "0.47096878", "0.47088462", "0.47032508" ]
0.8109521
0
Return a list of completion strings Simple completion based on pythonlike identifiers and whitespace
def get_completions(self, info): items = [] if (info.line.strip().startswith(('import ', 'from ')) and info.is_python_like): items += module_completion(info.line, [info.filename]) elif info.obj: base = info.obj tokens = set(info.split_words(-1)) items = [item for item in tokens if item.startswith(base) and len(item) > len(base)] if '.' in base: start = base.rfind('.') + 1 else: start = 0 items = [i[start:len(base)] + i[len(base):].split('.')[0] for i in items] # get path completions # get last word back to a space or a quote character match = re.search('''[ "\']([\w\.\\\\/]+)\Z''', info.line) if match: items += _complete_path(match.groups()[0]) return list(sorted(items))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def completenames(self, text, line, begidx, endidx):\n command = text\n if self.case_insensitive:\n command = text.lower()\n\n # Call super class method. Need to do it this way for Python 2 and 3 compatibility\n cmd_completion = cmd.Cmd.completenames(self, command)\n\n # If we are completing the initial command name and get exactly 1 result and are at end of line, add a space\n if begidx == 0 and len(cmd_completion) == 1 and endidx == len(line):\n cmd_completion[0] += ' '\n\n return cmd_completion", "def get_completions(self, line):\n script = jedi.Interpreter(line, [self.interpreter.locals])\n return [comp.name for comp in script.completions()]", "def autocomplete_expander_name(n):\n n = n.strip().upper()\n for name in EXPANDERS_ORDERED:\n if name.startswith(n):\n return name\n if consts.VERBOSE:\n print('ERROR: Bad expander name %s'%n)\n return 'LIST'", "def complete(self, text: str, word: str) -> List[str]:\n prefix = word.lower()\n result = [prop.name for prop in self.context.debug_info.properties\n if prop.name.lower().startswith(prefix)]\n\n # If the users didn't ask for a special property, don't suggest special\n # properties, as they are usually just noise for them.\n if not prefix.startswith('['):\n result = [n for n in result if not n.startswith('[')]\n\n return result", "def Complete():\n\n # TODO(iancottrell): support auto complete of more than just the command\n # try to parse the command line using parser\n print(' '.join(command.name for command in cr.Command.Plugins()))", "def raw_command_completer(self, text, line, start_index, end_index):\n return [command for command in self.suggested_commands() if command.startswith(text)]", "def get_completions(self, info):\r\n pass", "def quick_completer(cmd, completions):\n if isinstance(completions, basestring):\n \n completions = completions.split()\n def do_complete(self,event):\n return completions\n \n ip.set_hook('complete_command',do_complete, str_key = cmd)", "def emit_completion(tool):\n\n cases = \"\"\n for sname in tool[\"snames\"]:\n cases += SNAMES.replace(\"${sname}\", sname).replace(\n \"${opts}\", \" \".join(tool[\"subs\"][sname][\"opts\"])\n )\n\n compl = (\n SCRIPT.replace(\"${tname}\", tool[\"name\"])\n .replace(\"${snames}\", \" \".join(tool[\"snames\"]))\n .replace(\"${subs}\", cases)\n )\n\n return compl", "def cli_completions(self) -> str:\n completions = []\n for cmd_name in self.cli_parser.cmd_names:\n completions.append(cmd_name)\n for plugin_name in self.name_to_plugin_class:\n completions.append(plugin_name)\n\n return \",\".join(completions)", "def vcs_completer(commands, event):\n\n\n cmd_param = event.line.split()\n if event.line.endswith(' '):\n cmd_param.append('')\n\n if cmd_param[0] == 'sudo':\n cmd_param = cmd_param[1:]\n\n if len(cmd_param) == 2 or 'help' in cmd_param:\n return commands.split()\n\n return ip.Completer.file_matches(event.symbol)", "def complete_setup_pocs(self, text, line, begidx, endidx):\n names = ['all'] + hardware.get_all_names()\n return [name for name in names if name.startswith(text)]", "def completion(s):\n if len(s) >= 1 and s[0] == 'h':\n return ('hello', 'hello there')\n return None", "def completions(source, line, column, path):\n script = jedi.api.Script(\n source = source,\n line = line + 1,\n column = column,\n path = path\n )\n\n completions = list()\n\n try:\n for completion in script.completions():\n completions.append({\n \"name\": completion.name,\n \"description\": completion.description,\n \"type\":completion.type\n })\n\n return completions\n except:\n return []", "def getAutoCompleteList(self, rawCmd='', *args, **kwds):\n try:\n actKey = rawCmd[-1] #Was it activated by a '/', '.' or ' ' ?\n cmd = re.sub('#.*', '', rawCmd) # remove comments\n cmd = string.strip(cmd)\n if not cmd: return None \n \n # get lhs description\n (lhsDesc, remaining) = self.cmd.GetNextTerm( cmd ) \n \n lst = []\n \n #Get contents from the root\n if actKey == '/':\n if hasattr(self.cmd.root, 'GetContents'):\n lst = []\n for i in self.cmd.root.GetContents():\n lst.append(i[0])\n \n #Try different options\n elif actKey == '.':\n myDesc = string.split(cmd, ' ')[-1][:-1] \n if myDesc[0] == '/': lhsObj = self.cmd.GetObject(self.cmd.root, myDesc[1:])\n else: lhsObj = self.cmd.GetObject(self.cmd.currentObj, myDesc)\n \n #Object with get contents attr\n if hasattr(lhsObj, 'GetContents'):\n lst = []\n for i in lhsObj.GetContents():\n lst.append(i[0])\n \n #If it is a thermo provider, return available prop pkgs\n elif myDesc in self.cmd.thermoAdmin.GetAvThermoProviderNames():\n thAd = self.cmd.thermoAdmin\n lst = thAd.GetAvPropPkgNames(myDesc)\n \n #If a folder with unit ops, then retun av u ops\n elif myDesc in unitop.__all__:\n uop = guicmd.CommandInterface.__dict__.get(myDesc, None)\n if hasattr(uop, 'VALID_UNIT_OPERATIONS'):\n lst = uop.VALID_UNIT_OPERATIONS\n \n #Is it a command?\n elif guicmd.CommandInterface.commands.has_key(lhsDesc):\n cmdActOnObj = ('cd', 'view', 'delete', 'dir', 'valueOf')\n lst = []\n if lhsDesc == 'units':\n if actKey == ' ' and remaining == '':\n lst = self.cmd.units.GetSetNames()\n elif lhsDesc in cmdActOnObj:\n if actKey == ' ' and remaining == '':\n lst = ['..', '/']\n if hasattr(self.cmd.currentObj, 'GetContents'):\n for i in self.cmd.currentObj.GetContents():\n lst.append(i[0])\n elif lhsDesc == 'language':\n if actKey == ' ' and remaining == '':\n dct = guicmd.CommandInterface.MessageHandler.GetSupportedLanguages()\n #dct['languages'] should have the main languages supported\n lst = list(dct['languages'])\n \n lst.sort()\n return lst\n \n except:\n return []", "def dslq_completers(self, event):\n # print(dir(event), event)\n\n # FIXME only first line gets the autocomplete!\n if event.line.startswith(\"%%\"):\n event.line = event.line[1:] # reduce cell symbol (double %) to line symbol\n for command in [\"%dslloopdf\", \"%dsldf\", \"%dslloop\", \"%dsl\"]:\n if command in event.line: # first match will return results\n doc = Document(event.line.replace(command, \"\"))\n c = CleverCompleter()\n res = c.get_completions(doc, None)\n # print(res)\n return [x.text for x in res]", "def complete_help(self, arg_scopes):\n c_help = _lookup_in_scopes('conpletion-text', arg_scopes)\n if c_help:\n return c_help\n c_help = _lookup_in_scopes('syntax-help', arg_scopes)\n if c_help:\n return c_help\n c_help = _lookup_in_scopes('all-help', arg_scopes)\n if c_help:\n return '!' + c_help\n c_help = _lookup_in_scopes('short-help', arg_scopes)\n if c_help:\n return c_help\n return ' <help missing> %s' % _lookup_in_scopes('self', arg_scopes)", "def autocomplete():\n query = '' if request.args.get('query') is None else request.args.get('query')\n\n prefixed_words = []\n close_words = []\n for f in app.preprocessed.words:\n lowered = f.lower()\n if lowered.startswith(query) and lowered != query:\n prefixed_words.append(f)\n elif levenshtein(query, lowered) <= 1:\n close_words.append(f)\n\n result = {\n 'success': True,\n 'data': {\n 'suggestions': prefixed_words + close_words\n }\n }\n return jsonify(result)", "def _shell_command_complete(text, line, begidx, endidx):\n\n # Purposely don't match any executable containing wildcards\n wildcards = ['*', '?']\n for wildcard in wildcards:\n if wildcard in text:\n return []\n\n # Get a list of every directory in the PATH environment variable and ignore symbolic links\n paths = [p for p in os.getenv('PATH').split(os.path.pathsep) if not os.path.islink(p)]\n\n # Find every executable file in the PATH that matches the pattern\n exes = []\n for path in paths:\n full_path = os.path.join(path, text)\n matches = [f for f in glob.glob(full_path + '*') if os.path.isfile(f) and os.access(f, os.X_OK)]\n\n for match in matches:\n exes.append(os.path.basename(match))\n\n # If there is a single completion and we are at end of the line, then add a space at the end for convenience\n if len(exes) == 1 and endidx == len(line):\n exes[0] += ' '\n\n # If there are multiple completions, then sort them alphabetically\n return sorted(exes)", "def prepareAutoComplete(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n return []", "def _get_definition_completions(self, original_text, remaining_text, operation):\n param_words = remaining_text.split(\".\")\n\n # Only two words parameter completion are supported\n if len(param_words) != 2:\n return []\n\n param_name, sub_name = param_words\n if param_name not in operation.operation.params:\n return []\n\n param_object = operation.operation.params[param_name]\n param_schema = param_object.param_spec.get(\"schema\")\n if not param_schema:\n return []\n\n param_ref = param_schema.get(\"$ref\")\n if not param_ref:\n return []\n\n definition_name = param_ref.split('/')[-1]\n definition = self.definitions.get(definition_name)\n\n if not definition:\n return []\n\n return self._get_completion(\n original_text=original_text,\n remaining_text=sub_name,\n options=dir(definition())\n )", "def complete(self, code: str, pos: int) -> List[str]:\n c = self.jcompleter\n jres = c.complete(code, pos)\n return list(_scala_seq_to_py(jres.candidates()))", "async def func_completions(self, root):\n words = set()\n funcs = self.functions.copy()\n funcs.update(self.ast_functions)\n for name in funcs.keys():\n if name.lower().startswith(root):\n words.add(name)\n return words", "def main():\r\n # Sample implementation of the autocomplete API\r\n query = lambda prefix: [d for d in database if d.startswith(prefix)][:5]\r\n\r\n print(\"test case 1: assigned list\")\r\n database = sorted([\r\n \"abracadara\", \"al\", \"alice\", \"alicia\", \"allen\", \"alter\", \"altercation\", \"bob\", \"eve\", \"evening\", \"event\",\r\n \"eventually\", \"mallory\"\r\n ])\r\n assert extract(query) == database\r\n print(\"Passed! Evolution: Pichu\")\r\n print(\"\\r\")\r\n\r\n print(\"test case 2: lots of 'username + a-z'\")\r\n database = ['usernamea', 'usernameb', 'usernamec', 'usernamed', 'usernamee', 'usernamef', 'usernameg', 'usernameh', 'usernamei', 'usernamej', 'usernamek', 'usernamel', 'usernamem', 'usernamen', 'usernameo', 'usernamep', 'usernameq', 'usernamer', 'usernames', 'usernamet', 'usernameu', 'usernamev', 'usernamew', 'usernamex', 'usernamey', 'usernamez']\r\n query = lambda prefix: [d for d in database if d.startswith(prefix)][:5]\r\n assert extract(query) == database\r\n print(\"Passed! Evolution: Pikachu\")\r\n print(\"\\r\")\r\n\r\n print(\"test case 3: empty\")\r\n database = sorted([\r\n ])\r\n assert extract(query) == database\r\n print(\"Passed! Evolution: Raichu\")", "def complete_opt_display(self, text, *_):\n return [t + \" \" for t in DISPLAYS if t.startswith(text)]", "def _get_completion(self, original_text, remaining_text, options):\n if self.should_hide_completions(original_text=original_text,\n remaining_text=remaining_text,\n allowed_suffixes=(\" \", \".\")):\n return []\n\n return [(option, len(remaining_text)) for option in options\n if option.startswith(remaining_text) and not option.startswith(\"_\")]", "def autocomplete(self): # pylint: disable-msg=R0914\n # Don't complete if user hasn't sourced bash_completion file.\n # This is found in django-trunk/extras/django_bash_completion\n if 'DJANGO_AUTO_COMPLETE' not in os.environ:\n return\n\n cwords = os.environ['COMP_WORDS'].split()[1:]\n cword = int(os.environ['COMP_CWORD'])\n\n try:\n curr = cwords[cword - 1]\n except IndexError:\n curr = ''\n\n subcommands = get_commands().keys() + ['help']\n options = [('--help', None)]\n\n # subcommand\n if cword == 1:\n debug_text = ' '.join(sorted(filter(lambda x: x.startswith(curr),\n subcommands)))\n print(debug_text)\n # subcommand options\n # special case: the 'help' subcommand has no options\n elif cwords[0] in subcommands and cwords[0] != 'help':\n subcommand_cls = self.fetch_command(cwords[0])\n # special case: add the names of installed apps to options\n if cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',\n 'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):\n try:\n database = Database()\n # Get the last part of the dotted path as the app name.\n options += [(a.split('.')[-1], 0) for \\\n a in database.get_app_list()]\n except ImportError:\n # Fail silently if DJANGO_SETTINGS_MODULE isn't set. The\n # user will find out once they execute the command.\n pass\n options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in\n subcommand_cls.option_list]\n # filter out previously specified options from available options\n prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]\n\n # Original Python 2 version\n #options = filter(lambda (x, v): x not in prev_opts, options)\n # Python 3 version?\n #options = filter(lambda x_v: x_v[0] not in prev_opts, options)\n options = [opt for opt in options if opt[0] not in prev_opts]\n\n # filter options by current input\n options = sorted([(k, v) for k, v in \\\n options if k.startswith(curr)])\n for option in options:\n opt_label = option[0]\n # append '=' to options which require args\n if option[1]:\n opt_label += '='\n print(opt_label)\n sys.exit(1)", "def load_ipython_custom_completers(ipython):\n\n def dslq_completers(self, event):\n \"\"\" This should return a list of strings with possible completions.\n\n Note that all the included strings that don't start with event.symbol\n are removed, in order to not confuse readline.\n\n eg Typing %%apt foo then hitting tab would yield an event like so: namespace(command='%%apt', line='%%apt foo', symbol='foo', text_until_cursor='%%apt foo')\n\n https://stackoverflow.com/questions/36479197/ipython-custom-tab-completion-for-user-magic-function\n\n > https://github.com/ipython/ipython/issues/11878\n \"\"\"\n # print(dir(event), event)\n\n # FIXME only first line gets the autocomplete!\n if event.line.startswith(\"%%\"):\n event.line = event.line[1:] # reduce cell symbol (double %) to line symbol\n for command in [\"%dslloopdf\", \"%dsldf\", \"%dslloop\", \"%dsl\"]:\n if command in event.line: # first match will return results\n doc = Document(event.line.replace(command, \"\"))\n c = CleverCompleter()\n res = c.get_completions(doc, None)\n # print(res)\n return [x.text for x in res]\n \n def dsldocs_completers(self, event):\n \"\"\" \n Completer for dsldocs command (= describe)\n \"\"\"\n # print(dir(event), event)\n command = \"%dsldocs\"\n if event.line.startswith(command):\n doc = Document(event.line.replace(command, \".docs\"))\n c = CleverCompleter()\n res = c.get_completions(doc, None)\n # print(res)\n return [x.text for x in res] \n\n \n # loader\n\n for command in [\"%dslloop_to_dataframe\", \"%dsl_to_dataframe\", \"%dslloop\", \"%dsl\"]:\n ipython.set_hook('complete_command', dslq_completers, re_key = command)\n ipython.set_hook('complete_command', dslq_completers, re_key = \"%\" + command)\n\n ipython.set_hook('complete_command', dsldocs_completers, re_key = \"%dsldocs\")", "def moduleCompletion(line):\n def tryImport(mod, only_modules=False):\n def isImportable(module, attr):\n if only_modules:\n return inspect.ismodule(getattr(module, attr))\n else:\n return not(attr[:2] == '__' and attr[-2:] == '__')\n try:\n m = __import__(mod)\n except:\n return []\n mods = mod.split('.')\n for module in mods[1:]:\n m = getattr(m,module)\n if (not hasattr(m, '__file__')) or (not only_modules) or\\\n (hasattr(m, '__file__') and '__init__' in m.__file__):\n completion_list = [attr for attr in dir(m) if isImportable(m, attr)]\n completion_list.extend(getattr(m,'__all__',[]))\n if hasattr(m, '__file__') and '__init__' in m.__file__:\n completion_list.extend(moduleList(os.path.dirname(m.__file__)))\n completion_list = list(set(completion_list))\n if '__init__' in completion_list:\n completion_list.remove('__init__')\n return completion_list\n\n words = line.split(' ')\n if len(words) == 3 and words[0] == 'from':\n return ['import ']\n if len(words) < 3 and (words[0] in ['import','from']) :\n if len(words) == 1:\n return getRootModules()\n mod = words[1].split('.')\n if len(mod) < 2:\n return getRootModules()\n completion_list = tryImport('.'.join(mod[:-1]), True)\n completion_list = ['.'.join(mod[:-1] + [el]) for el in completion_list]\n return completion_list\n if len(words) >= 3 and words[0] == 'from':\n mod = words[1]\n return tryImport(mod)", "def pymel_python_completer(self, event):\n import pymel.core as pm\n\n # print \"python_matches\"\n text = event.symbol\n # print repr(text)\n # Another option, seems to work great. Catches things like ''.<tab>\n m = PYTHON_TOKEN_RE.match(text)\n\n if not m:\n raise TryNext\n\n expr, attr = m.group(1, 3)\n # print type(self.Completer), dir(self.Completer)\n # print self.Completer.namespace\n # print self.Completer.global_namespace\n try:\n # print \"first\"\n obj = eval(expr, self.Completer.namespace)\n except Exception:\n try:\n # print \"second\"\n obj = eval(expr, self.Completer.global_namespace)\n except Exception:\n raise TryNext\n # print \"complete\"\n if isinstance(obj, (pm.nt.DependNode, pm.Attribute)):\n # print \"isinstance\"\n node = str(obj)\n long_attrs = api_listAttr(node)\n short_attrs = api_listAttr(node, shortNames=1)\n\n matches = []\n matches = self.Completer.python_matches(text)\n # print \"here\"\n # if node is a plug ( 'persp.t' ), the first result will be the passed plug\n if '.' in node:\n attrs = long_attrs[1:] + short_attrs[1:]\n else:\n attrs = long_attrs + short_attrs\n # print \"returning\"\n matches += [expr + '.' + at for at in attrs]\n #import colorize\n #matches = [ colorize.colorize(x,'magenta') for x in matches ]\n return matches\n\n raise TryNext" ]
[ "0.6619886", "0.66005045", "0.6515913", "0.63659024", "0.6294996", "0.6283689", "0.62707794", "0.6202358", "0.6130983", "0.61280894", "0.6114059", "0.6094237", "0.6084692", "0.6065162", "0.6063104", "0.6040103", "0.6009062", "0.5986601", "0.59493244", "0.59490955", "0.59415007", "0.59229183", "0.59142405", "0.59011257", "0.58915627", "0.58791196", "0.5865413", "0.58491975", "0.58335096", "0.58220196" ]
0.6904953
0
Find the definition for an object within a set of source code This is used to find the path of pythonlike modules (e.g. cython and enaml) for a goto definition
def get_definition(self, info): token = info.obj lines = info.lines source_code = info.source_code filename = info.filename line_nr = None if '.' in token: token = token.split('.')[-1] line_nr = get_definition_with_regex(source_code, token, len(lines)) if line_nr is None: return line = info.line exts = python_like_exts() if not osp.splitext(filename)[-1] in exts: return filename, line_nr if line.startswith('import ') or line.startswith('from '): alt_path = osp.dirname(filename) source_file = python_like_mod_finder(line, alt_path=alt_path, stop_token=token) if (not source_file or not osp.splitext(source_file)[-1] in exts): line_nr = get_definition_with_regex(source_code, token, line_nr) return filename, line_nr mod_name = osp.basename(source_file).split('.')[0] if mod_name == token or mod_name == '__init__': return source_file, 1 else: with open(filename, 'rb') as fid: code = fid.read() code = encoding.decode(code)[0] line_nr = get_definition_with_regex(code, token) return filename, line_nr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findsource(object):\r\n file = getsourcefile(object)\r\n if not file:\r\n raise IOError('source code not available')\r\n module = getmodule(object, file)\r\n if module:\r\n lines = linecache.getlines(file, module.__dict__)\r\n else:\r\n lines = linecache.getlines(file)\r\n if not lines:\r\n raise IOError('could not get source code')\r\n\r\n if ismodule(object):\r\n return lines, 0\r\n\r\n if isclass(object):\r\n name = object.__name__\r\n pat = re.compile(r'^(\\s*)class\\s*' + name + r'\\b')\r\n # make some effort to find the best matching class definition:\r\n # use the one with the least indentation, which is the one\r\n # that's most probably not inside a function definition.\r\n candidates = []\r\n for i in range(len(lines)):\r\n match = pat.match(lines[i])\r\n if match:\r\n # if it's at toplevel, it's already the best one\r\n if lines[i][0] == 'c':\r\n return lines, i\r\n # else add whitespace to candidate list\r\n candidates.append((match.group(1), i))\r\n if candidates:\r\n # this will sort by whitespace, and by line number,\r\n # less whitespace first\r\n candidates.sort()\r\n return lines, candidates[0][1]\r\n else:\r\n raise IOError('could not find class definition')\r\n\r\n if ismethod(object):\r\n object = object.im_func\r\n if isfunction(object):\r\n object = object.func_code\r\n if istraceback(object):\r\n object = object.tb_frame\r\n if isframe(object):\r\n object = object.f_code\r\n if iscode(object):\r\n if not hasattr(object, 'co_firstlineno'):\r\n raise IOError('could not find function definition')\r\n lnum = object.co_firstlineno - 1\r\n pat = re.compile(r'^(\\s*def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*@)')\r\n while lnum > 0:\r\n if pat.match(lines[lnum]): break\r\n lnum = lnum - 1\r\n return lines, lnum\r\n raise IOError('could not find code object')", "def findsource(obj):\n filename = inspect.getsourcefile(obj)\n if filename:\n linecache.checkcache(filename)\n return inspect.findsource(obj)", "def get_definition_with_regex(source, token, start_line=-1):\r\n if not token:\r\n return None\r\n if DEBUG_EDITOR:\r\n t0 = time.time()\r\n patterns = [ # python / cython keyword definitions\r\n '^c?import.*\\W{0}{1}',\r\n 'from.*\\W{0}\\W.*c?import ',\r\n 'from .* c?import.*\\W{0}{1}',\r\n 'class\\s*{0}{1}',\r\n 'c?p?def[^=]*\\W{0}{1}',\r\n 'cdef.*\\[.*\\].*\\W{0}{1}',\r\n # enaml keyword definitions\r\n 'enamldef.*\\W{0}{1}',\r\n 'attr.*\\W{0}{1}',\r\n 'event.*\\W{0}{1}',\r\n 'id\\s*:.*\\W{0}{1}']\r\n\r\n matches = get_matches(patterns, source, token, start_line)\r\n\r\n if not matches:\r\n patterns = ['.*\\Wself.{0}{1}[^=!<>]*=[^=]',\r\n '.*\\W{0}{1}[^=!<>]*=[^=]',\r\n 'self.{0}{1}[^=!<>]*=[^=]',\r\n '{0}{1}[^=!<>]*=[^=]']\r\n matches = get_matches(patterns, source, token, start_line)\r\n\r\n # find the one closest to the start line (prefer before the start line)\r\n if matches:\r\n min_dist = len(source.splitlines())\r\n best_ind = 0\r\n for match in matches:\r\n dist = abs(start_line - match)\r\n if match <= start_line or not best_ind:\r\n if dist < min_dist:\r\n min_dist = dist\r\n best_ind = match\r\n if matches:\r\n if DEBUG_EDITOR:\r\n log_dt(LOG_FILENAME, 'regex definition match', t0)\r\n return best_ind\r\n else:\r\n if DEBUG_EDITOR:\r\n log_dt(LOG_FILENAME, 'regex definition failed match', t0)\r\n return None", "def which(object):\n object_type = type(object)\n if object_type is types.ModuleType:\n if hasattr(object, '__file__'):\n print 'Module from', object.__file__\n return (object.__file__, 1)\n else:\n print 'Built-in module.'\n elif object_type is types.ClassType:\n if object.__module__ == '__main__':\n print 'Built-in class or class loaded from $PYTHONSTARTUP'\n else:\n print 'Class', object.__name__, 'from', \\\n sys.modules[object.__module__].__file__\n # Send you to the first line of the __init__ method\n return (sys.modules[object.__module__].__file__,\n object.__init__.im_func.func_code.co_firstlineno)\n elif object_type in (types.BuiltinFunctionType, types.BuiltinMethodType):\n print \"Built-in or extension function/method.\"\n elif object_type is types.FunctionType:\n print 'Function from', object.func_code.co_filename\n return (object.func_code.co_filename, object.func_code.co_firstlineno)\n elif object_type is types.MethodType:\n print 'Method of class', object.im_class.__name__, 'from',\n fname = sys.modules[object.im_class.__module__].__file__\n print fname\n return (fname, object.im_func.func_code.co_firstlineno)\n else:\n print \"argument is not a module or function.\"\n return None", "def getsourcelines(object):\n lines, lnum = findsource(object)\n\n if inspect.ismodule(object): return lines, 0\n else: return inspect.getblock(lines[lnum:]), lnum + 1", "def get_code_file_and_lines(obj, pyccel_folder, mod_name = None):\n if not pyccel_folder:\n pyccel_folder = os.getcwd()\n\n obj_parts = obj.split('.')\n if mod_name is None:\n idx = len(obj_parts)\n print(pyccel_folder, obj)\n filename = os.path.join(pyccel_folder, '/'.join(obj_parts[:idx])+'.py')\n while idx > 0 and not os.path.isfile(filename):\n idx -= 1\n filename = os.path.join(pyccel_folder, '/'.join(obj_parts[:idx])+'.py')\n assert idx != 0\n mod_name = '.'.join(obj_parts[:idx])\n obj_parts = obj_parts[idx:]\n\n mod = importlib.import_module(mod_name)\n filename = mod.__file__.split('/')\n file = os.path.relpath(mod.__file__, pyccel_folder)\n\n if obj_parts:\n # Get the object\n obj = mod\n for o in obj_parts:\n obj = getattr(obj, o)\n\n # If the object is a class property, get the underlying function\n obj = getattr(obj, 'fget', obj)\n\n source, start_line = inspect.getsourcelines(obj)\n length = len(source)\n return file, start_line, start_line+length-1\n else:\n # Module\n return file, 1, 1", "def find_definition(name, relative_to=None, importer=__import__):\n # Check parameters.\n if not (relative_to is None or\n isinstance(relative_to, types.ModuleType) or\n isinstance(relative_to, type) and\n issubclass(relative_to, Message)):\n raise TypeError(\n 'relative_to must be None, Message definition or module.'\n ' Found: %s' % relative_to)\n\n name_path = name.split('.')\n\n # Handle absolute path reference.\n if not name_path[0]:\n relative_to = None\n name_path = name_path[1:]\n\n def search_path():\n \"\"\"Performs a single iteration searching the path from relative_to.\n\n This is the function that searches up the path from a relative object.\n\n fully.qualified.object . relative.or.nested.Definition\n ---------------------------->\n ^\n |\n this part of search --+\n\n Returns:\n Message or Enum at the end of name_path, else None.\n \"\"\"\n next_part = relative_to\n for node in name_path:\n # Look for attribute first.\n attribute = getattr(next_part, node, None)\n\n if attribute is not None:\n next_part = attribute\n else:\n # If module, look for sub-module.\n if (next_part is None or\n isinstance(next_part, types.ModuleType)):\n if next_part is None:\n module_name = node\n else:\n module_name = '%s.%s' % (next_part.__name__, node)\n\n try:\n fromitem = module_name.split('.')[-1]\n next_part = importer(module_name, '', '',\n [str(fromitem)])\n except ImportError:\n return None\n else:\n return None\n\n if not isinstance(next_part, types.ModuleType):\n if not (isinstance(next_part, type) and\n issubclass(next_part, (Message, Enum))):\n return None\n\n return next_part\n\n while True:\n found = search_path()\n if isinstance(found, type) and issubclass(found, (Enum, Message)):\n return found\n else:\n # Find next relative_to to search against.\n #\n # fully.qualified.object . relative.or.nested.Definition\n # <---------------------\n # ^\n # |\n # does this part of search\n if relative_to is None:\n # Fully qualified search was done. Nothing found. Fail.\n raise DefinitionNotFoundError(\n 'Could not find definition for %s' % name)\n else:\n if isinstance(relative_to, types.ModuleType):\n # Find parent module.\n module_path = relative_to.__name__.split('.')[:-1]\n if not module_path:\n relative_to = None\n else:\n # Should not raise ImportError. If it does...\n # weird and unexpected. Propagate.\n relative_to = importer(\n '.'.join(module_path), '', '', [module_path[-1]])\n elif (isinstance(relative_to, type) and\n issubclass(relative_to, Message)):\n parent = relative_to.message_definition()\n if parent is None:\n last_module_name = relative_to.__module__.split(\n '.')[-1]\n relative_to = importer(\n relative_to.__module__, '', '', [last_module_name])\n else:\n relative_to = parent", "def GetDefinitions(filename,obj):\n file=open(filename)\n content=file.read().replace(\"\\t\",\" \")\n file.close\n pat=re.compile(obj +' \\{([\\S\\s]*?)\\}',re.DOTALL)\n finds=pat.findall(content)\n return finds", "def extract_code_objects(pe):\n script_res = _get_scripts_resource(pe)\n dump = _resource_dump(pe, script_res)\n return _get_co_from_dump(dump)", "def get_docs( mysource , basename ):\n import parser\n ast = parser.suite(mysource)\n return ModuleInfo(ast.totuple(1), basename)", "def _ofind(self,oname):\n\n # the @ in magics isn't really part of the name\n oname = oname.strip()\n if oname.startswith('@'):\n oname = oname[1:]\n\n # Namespaces to search in:\n user_ns = self.shell.user_ns\n user_config_ns = self.shell.user_config_ns\n internal_ns = self.shell.internal_ns\n builtin_ns = __builtin__.__dict__\n\n # Put them in a list. The order is important so that we find things in the\n # same order that Python finds them.\n namespaces = [ ('Interactive',user_ns),\n ('User-defined configuration',user_config_ns),\n ('IPython internal',internal_ns),\n ('Python builtin',builtin_ns)\n ]\n\n # initialize results to 'null'\n found = 0; obj = None; ospace = None; ds = None; ismagic = 0\n\n try:\n for nsname,ns in namespaces:\n try:\n obj = ns[oname]\n except KeyError:\n pass\n else:\n found = 1\n ospace = nsname\n ds = inspect.getdoc(obj)\n raise 'found it'\n except 'found it':\n pass\n\n # try to see if it's magic\n if not found:\n try:\n obj = eval('self.magic_'+oname)\n found = 1\n ospace = 'IPython internal'\n ismagic = 1\n ds = inspect.getdoc(obj)\n except:\n pass\n # Play some games to try and find info about dotted objects\n # and for things like {}.get? or ''.remove? to work\n if not found:\n try:\n self.tmp_obj = eval(oname,user_ns)\n found = 1\n except:\n try:\n self.tmp_obj = eval(oname,builtin_ns)\n found = 1\n except:\n pass\n if found:\n ds = inspect.getdoc(self.tmp_obj)\n ospace = 'Currently not defined in user session.'\n obj = self.tmp_obj\n del self.tmp_obj\n return found,obj,ospace,ds,ismagic", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def getsourcelines(object):\r\n lines, lnum = findsource(object)\r\n\r\n if ismodule(object): return lines, 0\r\n else: return getblock(lines[lnum:]), lnum + 1", "def lookup(obj):\n return(dir(obj))", "def loc():\n file_types = (\n ['Python', 'py', '#']\n )\n\n click.echo('Lines of code\\n-------------')\n\n click.echo(\"{0}: {1}\".format(file_types[0], count_locs(file_types[1],\n file_types[2])))\n\n return None", "def source_code(obj):\n print(inspect.getsource(obj))", "def lookup(obj):\n\n return (dir(obj))", "def info(obj=None):\n if obj is None:\n print (\"Python keywords:\")\n import keyword\n for kwname in keyword.kwlist:\n print (\" \", kwname)\n print(\"Built in objects:\")\n for bi_object_name in sorted(__builtins__.keys()):\n bi_object = __builtins__[bi_object_name]\n if callable(bi_object):\n if type(bi_object) is types.ClassType:\n print(\" {} (class)\".format(bi_object.__name__))\n elif type(bi_object) is types.FunctionType:\n print(\" {} (function)\".format(bi_object.__name__))\n elif hasattr(obj, \"__doc__\") and obj.__doc__ is not None:\n print (\"Documentation for %s :\\n\" % (obj.__name__))\n print (obj.__doc__)\n elif type(obj) is types.ModuleType:\n pprint(dir(obj))\n elif type(obj) is types.ClassType:\n pprint(dir(obj))\n elif type(obj) is types.InstanceType:\n pprint(dir(obj))\n pprint(dir(obj.__class__))\n return \"\"", "def findModule(name):", "def get_objects_rednode(obj):\n from redbaron import RedBaron\n # walk til the first 'locals'\n # Example __qualname__: 'TestClassNodeConv.test_get_datamodel.<locals>.T'\n parent = inspect.getmodule(obj)\n for name in obj.__class__.__qualname__.split('.'):\n if name == '<locals>':\n break\n parent = getattr(parent, name)\n\n try:\n # try to find the source code with traditional means by using inspect, this may faile as it requires class to be defined in a file (not true fro REPL or Notebook)\n # if fails use IPYTHON history\n try:\n parent_code = inspect.getsourcelines(parent)[0]\n\n # monkeypatch the inspect module to use 'parent code' as input for searching the class code (else it searches full file)\n with patch('inspect.linecache.getlines', MagicMock(return_value=parent_code)):\n source = textwrap.dedent(inspect.getsource(obj.__class__))\n\n red_list = RedBaron(source)\n return red_list[0]\n\n except TypeError:\n # try finding the class from local IPYTHON input history\n from IPython import get_ipython\n ipython = get_ipython()\n ipython.run_cell_magic(\"capture\", \"out_var\", \"%history\")\n out_var = str(ipython.ev('out_var'))\n\n # filter up to the last occurance of class def\n import re\n lines = str(out_var).splitlines()\n pat = re.compile(r'^(\\s*)class\\s*' + obj.__class__.__name__ + r'\\b')\n\n last_match = -1\n for i in range(len(lines)):\n match = pat.match(lines[i])\n if match:\n last_match = i\n\n if last_match == -1:\n raise Exception('Class was not found at all...')\n out_var = '\\n'.join(lines[last_match:])\n\n with tempfile.NamedTemporaryFile(mode='w+') as temp:\n temp.write(out_var)\n temp.flush()\n with patch('inspect.getfile', MagicMock(return_value=temp.name)):\n source = textwrap.dedent(inspect.getsource(obj.__class__))\n red_list = RedBaron(source)\n logger.warning(f'Found \"{obj.__class__.__name__}\" source from IPython history!')\n return red_list[0]\n except:\n # This is due to the Inspect needing to open a file...\n # could be a bit relaxed with https://github.com/uqfoundation/dill/issues?utf8=%E2%9C%93&q=getsource, but this only works in regular REPL, not Ipython nor Notebook...\n raise Exception(f'Could not fetch \"{obj.__class__}\" source code (also tried loading from IPython history).')", "def get_definition(self):\r\n # TODO: Should probably check that this is either a reference or\r\n # declaration prior to issuing the lookup.\r\n return conf.lib.clang_getCursorDefinition(self)", "def _get_def(self,oname,obj):\n\n # There used to be a lot of fancy code here, until I realized that the\n # proper way of calling formatargspec() is with a * in the args! Now\n # this function is trivial.\n try:\n return oname + inspect.formatargspec(*self.getargspec(obj)), 1\n except:\n return 'Could not get definition header for ' + `oname` , 0" ]
[ "0.6904011", "0.6001746", "0.59132284", "0.58806264", "0.5847208", "0.58444947", "0.5813577", "0.5789314", "0.567056", "0.5662456", "0.5642679", "0.56179917", "0.56179917", "0.56179917", "0.56179917", "0.56179917", "0.56179917", "0.56179917", "0.56179917", "0.56179917", "0.56132835", "0.55693746", "0.5518602", "0.5485015", "0.54429406", "0.5392587", "0.538709", "0.5338226", "0.5337433", "0.5333783" ]
0.639387
1
Locate a module path based on an import line in an pythonlike file import_line is the line of source code containing the import alt_path specifies an alternate base path for the module stop_token specifies the desired name to stop on This is used to a find the path to pythonlike modules (e.g. cython and enaml) for a goto definition.
def python_like_mod_finder(import_line, alt_path=None, stop_token=None): if stop_token and '.' in stop_token: stop_token = stop_token.split('.')[-1] tokens = re.split(r'\W', import_line) if tokens[0] in ['from', 'import']: # find the base location try: _, path, _ = imp.find_module(tokens[1]) except ImportError: if alt_path: path = osp.join(alt_path, tokens[1]) else: path = None if path: path = osp.realpath(path) if not tokens[1] == stop_token: for part in tokens[2:]: if part in ['import', 'cimport', 'as']: break path = osp.join(path, part) if part == stop_token: break # from package import module if stop_token and not stop_token in path: for ext in python_like_exts(): fname = '%s%s' % (stop_token, ext) if osp.exists(osp.join(path, fname)): return osp.join(path, fname) # from module import name for ext in python_like_exts(): fname = '%s%s' % (path, ext) if osp.exists(fname): return fname # if it is a file, return it if osp.exists(path) and not osp.isdir(path): return path # default to the package file path = osp.join(path, '__init__.py') if osp.exists(path): return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moduleCompletion(line):\n def tryImport(mod, only_modules=False):\n def isImportable(module, attr):\n if only_modules:\n return inspect.ismodule(getattr(module, attr))\n else:\n return not(attr[:2] == '__' and attr[-2:] == '__')\n try:\n m = __import__(mod)\n except:\n return []\n mods = mod.split('.')\n for module in mods[1:]:\n m = getattr(m,module)\n if (not hasattr(m, '__file__')) or (not only_modules) or\\\n (hasattr(m, '__file__') and '__init__' in m.__file__):\n completion_list = [attr for attr in dir(m) if isImportable(m, attr)]\n completion_list.extend(getattr(m,'__all__',[]))\n if hasattr(m, '__file__') and '__init__' in m.__file__:\n completion_list.extend(moduleList(os.path.dirname(m.__file__)))\n completion_list = list(set(completion_list))\n if '__init__' in completion_list:\n completion_list.remove('__init__')\n return completion_list\n\n words = line.split(' ')\n if len(words) == 3 and words[0] == 'from':\n return ['import ']\n if len(words) < 3 and (words[0] in ['import','from']) :\n if len(words) == 1:\n return getRootModules()\n mod = words[1].split('.')\n if len(mod) < 2:\n return getRootModules()\n completion_list = tryImport('.'.join(mod[:-1]), True)\n completion_list = ['.'.join(mod[:-1] + [el]) for el in completion_list]\n return completion_list\n if len(words) >= 3 and words[0] == 'from':\n mod = words[1]\n return tryImport(mod)", "def _parse_line(line):\n if line.startswith('import '):\n line = line.replace('import ', '')\n return line", "def test_import_not_in_sys_path(Script):\n a = Script(path='module.py', line=5).goto_definitions()\n assert a[0].name == 'int'\n\n a = Script(path='module.py', line=6).goto_definitions()\n assert a[0].name == 'str'\n a = Script(path='module.py', line=7).goto_definitions()\n assert a[0].name == 'str'", "def main(path):\n try:\n print(\"Finding imports in '\" + path + \"':----------------------------------------------------------------------\")\n\n file = open(path)\n contents = file.read()\n wordArray = re.split(\" |\\n\", contents)\n\n currentList = list()\n nextPaths = list()\n skipWord = -1\n\n for wordNumb in range(len(wordArray)):\n word = wordArray[wordNumb]\n\n if wordNumb == skipWord:\n continue\n\n elif word == \"from\":\n item = wordArray[wordNumb + 1]\n if 'vespa.' in item:\n currentList.append(item)\n skipWord = wordNumb + 2\n\n elif word == \"import\":\n item = wordArray[wordNumb + 1]\n if 'vespa.' in item:\n currentList.append(item)\n\n currentList = set(currentList)\n for i in currentList:\n print(i)\n\n # print(\"Found imports in '\" + path + \"'\")\n # print(\"Finding paths for imports in '\" + path + \"':\")\n\n currentList2 = currentList.copy()\n currentList = list()\n\n for i in currentList2:\n if i in dependenciesNames:\n # print(i, \"already found\")\n pass\n\n else:\n dependenciesNames.append(i)\n\n try:\n fileInfo = importlib.machinery.PathFinder().find_spec(i)\n if fileInfo is None:\n fileInfo = importlib.util.find_spec(i)\n if fileInfo is None:\n origin = 'None'\n else:\n origin = fileInfo.origin\n else:\n origin = fileInfo.origin\n\n print(origin)\n dependenciesPaths.append(origin)\n currentList.append(origin)\n\n except AttributeError as e:\n print(\"Hit Exception: AttributeError\")\n print(e)\n print(i)\n print(importlib.machinery.PathFinder().find_spec(i))\n # print(red, \"Odd noneType import called \", i, \" in path \", path, end, sep='')\n\n\n# print(\"Found paths for imports in '\" + path + \"'\")\n\n\n for fileInfo in currentList:\n main(fileInfo)\n\n except Exception as e:\n print(e)", "def get_full_path_of_import(import_module_reference):\n f = inspect.getfile(import_module_reference)\n p = os.path.split(f)\n return p[0]", "def _get_name_level_relative_import_module(import_module: dict) -> Tuple:\n level = None\n name = None\n relative = False\n if import_module[\"type\"] == \"import\":\n # We start with import using only import keyword, it can be an import of the form:\n # import module\n # import package.module\n name = import_module[\"name\"]\n elif import_module[\"type\"] == \"from-import\":\n # Now we deal with from keyword like in:\n # from package import module\n # from module import func\n # from .. import module\n if import_module[\"module\"] is None:\n # This is the case for the following types of imports\n # from . import module (level 1)\n # from .. import module (level 2)\n name = import_module[\"name\"]\n relative = True\n else:\n # This is the case for the following types of imports\n # from .module import func (level 1)\n # from ..module import func (level 2)\n name = import_module[\"module\"]\n level = import_module[\"level\"]\n return name, level, relative", "def greedy_import_context(name: str, upwards: bool = False, level: int = 0) -> th.Tuple[th.Any, str]:\n module_hierarchy = name.split(\".\")\n imported_module = _NoValue\n for trial_index in range(\n 1 if upwards else len(module_hierarchy) - level,\n (len(module_hierarchy) + 1 - level) if upwards else -1,\n 1 if upwards else -1,\n ):\n try:\n imported_module = importer(\".\".join(module_hierarchy[:trial_index]))\n break\n except:\n pass\n return imported_module, \".\".join(module_hierarchy[trial_index:])", "def add_import_line(self, line: str) -> None:\n if line not in self._import_lines:\n self._import_lines.append(line)", "def handle_line(line: str, stmt_index: int):\n fn_name = f'repl_{stmt_index}'\n module_text = f\"\"\"\n import std\n fn {fn_name}() -> () {{\n {line}\n }}\n \"\"\"\n\n # For error reporting we use a helper that puts this into a fake filesystem\n # location.\n def make_fakefs_open():\n fs = fake_filesystem.FakeFilesystem()\n fs.CreateFile(FILENAME, module_text)\n return fake_filesystem.FakeFileOpen(fs)\n\n importer = import_helpers.Importer()\n\n while True:\n try:\n fake_module = parser.Parser(\n scanner.Scanner(FILENAME, module_text), fn_name).parse_module()\n except span.PositionalError as e:\n parser_helpers.pprint_positional_error(e, fs_open=make_fakefs_open())\n return\n\n # First attempt at type checking, we expect this may fail the first time\n # around and we'll substitute the real return type we observe.\n try:\n type_info = cpp_typecheck.check_module(fake_module, importer.cache,\n importer.additional_search_paths)\n except XlsTypeError as e:\n # We use nil as a placeholder, and swap it with the type that was expected\n # and retry once we determine what that should be.\n if e.rhs_type == concrete_type_mod.ConcreteType.NIL:\n module_text = module_text.replace(' -> ()', ' -> ' + str(e.lhs_type))\n continue\n # Any other errors are likely real type errors in the code and we should\n # report them.\n parser_helpers.pprint_positional_error(e, fs_open=make_fakefs_open())\n return\n\n # It type checked ok, and we can proceed.\n break\n\n # Interpret the line and print the result.\n # TODO(leary): 2020-06-20 No let bindings for the moment, just useful for\n # evaluating expressions -- could put them into the module scope as consts.\n interpreter = interpreter_mod.Interpreter(\n fake_module,\n type_info,\n importer.typecheck,\n import_cache=importer.cache,\n additional_search_paths=(),\n trace_all=False)\n result = interpreter.run_function(fn_name, args=(), symbolic_bindings=None)\n print(result)\n type_info.clear_type_info_refs_for_gc()\n return result", "def resolve_import(self, item):\n name = item.name\n # The last part in `from a.b.c import d` might be a symbol rather than a\n # module, so we try a.b.c and a.b.c.d as names.\n short_name = None\n if item.is_from and not item.is_star:\n if '.' in name.lstrip('.'):\n # The name is something like `a.b.c`, so strip off `.c`.\n rindex = name.rfind('.')\n else:\n # The name is something like `..c`, so strip off just `c`.\n rindex = name.rfind('.') + 1\n short_name = name[:rindex]\n\n if import_finder.is_builtin(name):\n filename = name + '.so'\n return Builtin(filename, name)\n\n filename, level = convert_to_path(name)\n if level:\n # This is a relative import; we need to resolve the filename\n # relative to the importing file path.\n filename = os.path.normpath(\n os.path.join(self.current_directory, filename))\n\n if not short_name:\n try_filename = True\n try_short_filename = False\n elif item.source:\n # If the import has a source path, we can use it to eliminate\n # filenames that don't match.\n source_filename, _ = os.path.splitext(item.source)\n dirname, basename = os.path.split(source_filename)\n if basename == \"__init__\":\n source_filename = dirname\n try_filename = source_filename.endswith(filename)\n try_short_filename = not try_filename\n else:\n try_filename = try_short_filename = True\n\n files = []\n if try_filename:\n files.append((name, filename))\n if try_short_filename:\n short_filename = os.path.dirname(filename)\n files.append((short_name, short_filename))\n\n for module_name, path in files:\n for fs in self.fs_path:\n f = self._find_file(fs, path)\n if not f or f == self.current_module.path:\n # We cannot import a file from itself.\n continue\n if item.is_relative():\n package_name = self.current_module.package_name\n if package_name is None:\n # Relative import in non-package\n raise ImportException(name)\n module_name = get_absolute_name(package_name, module_name)\n if isinstance(self.current_module, System):\n return System(f, module_name)\n return Local(f, module_name, fs)\n\n # If the module isn't found in the explicit pythonpath, see if python\n # itself resolved it.\n if item.source:\n prefix, ext = os.path.splitext(item.source)\n mod_name = name\n # We need to check for importing a symbol here too.\n if short_name:\n mod = prefix.replace(os.path.sep, '.')\n mod = utils.strip_suffix(mod, '.__init__')\n if not mod.endswith(name) and mod.endswith(short_name):\n mod_name = short_name\n\n if ext == '.pyc':\n pyfile = prefix + '.py'\n if os.path.exists(pyfile):\n return System(pyfile, mod_name)\n elif not ext:\n pyfile = os.path.join(prefix, \"__init__.py\")\n if os.path.exists(pyfile):\n return System(pyfile, mod_name)\n return System(item.source, mod_name)\n\n raise ImportException(name)", "def import_by_source(path: str):\n\n module = splitext(basename(path))[0]\n\n sys.path.append(dirname(path))\n\n spec = importlib.util.spec_from_file_location(module, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n\n sys.path.pop()\n\n return module", "def _find_head_package(\n self,\n source_package,\n target_module_partname,\n level=DEFAULT_IMPORT_LEVEL):\n self.msgin(4, \"find_head_package\", source_package, target_module_partname, level)\n\n #FIXME: Rename all local variable names to something sensible. No,\n #\"p_fqdn\" is not a sensible name.\n\n # If this target module is a submodule...\n if '.' in target_module_partname:\n target_module_headname, target_module_tailname = (\n target_module_partname.split('.', 1))\n # Else, this target module is a top-level module.\n else:\n target_module_headname = target_module_partname\n target_module_tailname = ''\n\n # If attempting both absolute and relative imports...\n if level == ABSOLUTE_OR_RELATIVE_IMPORT_LEVEL:\n if source_package:\n target_package_name = source_package.identifier + '.' + target_module_headname\n else:\n target_package_name = target_module_headname\n # Else if attempting only absolute imports...\n elif level == ABSOLUTE_IMPORT_LEVEL:\n target_package_name = target_module_headname\n\n # Absolute import, ignore the parent\n source_package = None\n # Else if attempting only relative imports...\n else:\n if source_package is None:\n self.msg(2, \"Relative import outside of package\")\n raise InvalidRelativeImportError(\n \"Relative import outside of package (name=%r, parent=%r, level=%r)\" % (\n target_module_partname, source_package, level))\n\n for i in range(level - 1):\n if '.' not in source_package.identifier:\n self.msg(2, \"Relative import outside of package\")\n raise InvalidRelativeImportError(\n \"Relative import outside of package (name=%r, parent=%r, level=%r)\" % (\n target_module_partname, source_package, level))\n\n p_fqdn = source_package.identifier.rsplit('.', 1)[0]\n new_parent = self.findNode(p_fqdn)\n if new_parent is None:\n #FIXME: Repetition detected. Exterminate. Exterminate.\n self.msg(2, \"Relative import outside of package\")\n raise InvalidRelativeImportError(\n \"Relative import outside of package (name=%r, parent=%r, level=%r)\" % (\n target_module_partname, source_package, level))\n\n assert new_parent is not source_package, (\n new_parent, source_package)\n source_package = new_parent\n\n if target_module_headname:\n target_package_name = (\n source_package.identifier + '.' + target_module_headname)\n else:\n target_package_name = source_package.identifier\n\n # Graph node of this target package.\n target_package = self._safe_import_module(\n target_module_headname, target_package_name, source_package)\n\n #FIXME: Why exactly is this necessary again? This doesn't quite seem\n #right but maybe it is. Shouldn't absolute imports only be performed if\n #the passed \"level\" is either \"ABSOLUTE_IMPORT_LEVEL\" or\n #\"ABSOLUTE_OR_RELATIVE_IMPORT_LEVEL\" -- or, more succinctly:\n #\n # if level < 1:\n\n # If this target package is *NOT* importable and a source package was\n # passed, attempt to import this target package as an absolute import.\n if target_package is None and source_package is not None:\n target_package_name = target_module_headname\n source_package = None\n\n # Graph node for the target package, again.\n target_package = self._safe_import_module(\n target_module_headname, target_package_name, source_package)\n\n # If this target package is importable, return this package.\n if target_package is not None:\n self.msgout(4, \"find_head_package ->\", (target_package, target_module_tailname))\n return target_package, target_module_tailname\n\n # Else, raise an exception.\n self.msgout(4, \"raise ImportError: No module named\", target_package_name)\n raise ImportError(\"No module named \" + target_package_name)", "def import_module(self, location, name):", "def find_import_before(doc, start_address, max_bytes=200):\n for adr in range(start_address, start_address - max_bytes, -1):\n lib = get_import_at(doc, adr)\n if lib:\n return lib\n return None", "def _analyse_stmt_ImportFrom(\n self, statement: ast.ImportFrom, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def _locate(path: str) -> Any:\n if path == \"\":\n raise ImportError(\"Empty path\")\n from importlib import import_module\n from types import ModuleType\n\n parts = [part for part in path.split(\".\")]\n for part in parts:\n if not len(part):\n raise ValueError(\n f\"Error loading '{path}': invalid dotstring.\"\n + \"\\nRelative imports are not supported.\"\n )\n assert len(parts) > 0\n part0 = parts[0]\n try:\n obj = import_module(part0)\n except Exception as exc_import:\n raise ImportError(\n f\"Error loading '{path}':\\n{repr(exc_import)}\"\n + f\"\\nAre you sure that module '{part0}' is installed?\"\n ) from exc_import\n for m in range(1, len(parts)):\n part = parts[m]\n try:\n obj = getattr(obj, part)\n except AttributeError as exc_attr:\n parent_dotpath = \".\".join(parts[:m])\n if isinstance(obj, ModuleType):\n mod = \".\".join(parts[: m + 1])\n try:\n obj = import_module(mod)\n continue\n except ModuleNotFoundError as exc_import:\n raise ImportError(\n f\"Error loading '{path}':\\n{repr(exc_import)}\"\n + f\"\\nAre you sure that '{part}' is importable from module '{parent_dotpath}'?\"\n ) from exc_import\n except Exception as exc_import:\n raise ImportError(\n f\"Error loading '{path}':\\n{repr(exc_import)}\"\n ) from exc_import\n raise ImportError(\n f\"Error loading '{path}':\\n{repr(exc_attr)}\"\n + f\"\\nAre you sure that '{part}' is an attribute of '{parent_dotpath}'?\"\n ) from exc_attr\n return obj", "def gen_import(self) -> str:\n as_name = self.exported_parts[-1]\n if as_name == self.imported_name:\n import_line = 'from {} import {}'.format(self.imported_module,\n self.imported_name)\n else:\n import_line = 'from {} import {} as {}'.format(self.imported_module,\n self.imported_name,\n as_name)\n return import_line", "def imports(self):\n line = self.line.strip()\n if line.startswith('im'):\n if line.startswith('import') is False:\n return True\n elif line == '':\n return True", "def importOverride(name, glbls={}, lcls={}, fromlist=[], level=-1):\n module = None\n # First try the system __import__ first\n try:\n module = BUILTIN_IMPORT(name, glbls, lcls, fromlist, level)\n # You cannot log in this namespace, due to an infinite regression issue, so don't try\n # Although I am thinking that disabling the import override, logging, and re enabling it would work\n except ImportError as error:\n # Next we will try to import them as a *.cc\n # First we need to determine if it exists\n # Check the folders in CC_PATH\n for path in CC_PATH:\n # If the path exists\n if os.path.exists(path):\n # And the path/<module name>.cc exists\n if os.path.exists(os.path.join(path, name+'.cc')):\n # We will use the first one we find\n # No the magic happens, we will first create a temp file\n temp_file = tempfile.TemporaryFile()\n # Now we add the 'magic' to the top of the temp file\n temp_file.write(MAGIC)\n # Now open the file being imported\n module_file = open(os.path.join(path, name+'.cc'), 'r')\n # Read the module contents into the temp file\n temp_file.write(module_file.read())\n module_file.close()\n # Now rewind the temp file so it can be read from the beginning\n temp_file.seek(0)\n # Now import the module\n try:\n module = imp.load_module(name, temp_file, path, ('.cc', 'r', imp.PY_SOURCE))\n except Exception as exception:\n logError(sys.exc_info(), log.error, 'Error importing control code file %s.cc:' % name, MAGIC_LINENO)\n finally:\n temp_file.close()\n log.debug('Module %s loaded from %s using the special .cc import' % (name, path))\n # If module is still None, we didn't find it and we should raise the original error\n if not module:\n raise error\n return module", "def DynamicImport(import_path, alias=dict(), log=None):\n if import_path not in alias and ':' not in import_path:\n raise ValueError(\n 'import_path should be one of {} or '\n 'include \":\", e.g. \"locata_wrapper.utils.music:MUSIC\" : '\n '{}'.format(set(alias), import_path))\n if ':' not in import_path:\n import_path = alias[import_path]\n\n module_name, objname = import_path.split(':')\n try:\n m = importlib.import_module(module_name)\n except Exception as e: # NOQA\n log.error('Function specified by my_alg_name not found!')\n sys.exit(1)\n return getattr(m, objname)", "def _imports(graph: mapry.Graph, py: mapry.Py) -> str:\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n stdlib_block = {'import typing'}\n\n third_party_block = set() # type: Set[str]\n\n if mapry.needs_type(a_type=graph, query=mapry.Path):\n if py.path_as == 'str':\n pass\n elif py.path_as == \"pathlib.Path\":\n stdlib_block.add(\"import pathlib\")\n else:\n raise NotImplementedError(\n \"Unhandled path_as: {!r}\".format(py.path_as))\n\n if mapry.needs_type(a_type=graph, query=mapry.TimeZone):\n if py.timezone_as == 'str':\n pass\n\n elif py.timezone_as == 'pytz.timezone':\n third_party_block.update(\n ('import pytz', 'import pytz.exceptions # type: ignore'))\n\n else:\n raise NotImplementedError(\n 'Unhandled timezone_as: {}'.format(py.timezone_as))\n\n # yapf: disable\n if any(mapry.needs_type(a_type=graph, query=query)\n for query in\n (mapry.Date, mapry.Time, mapry.Datetime, mapry.Duration)):\n # yapf: enable\n stdlib_block.add('import datetime')\n\n if mapry.needs_type(a_type=graph, query=mapry.Map):\n stdlib_block.add(\"import collections\")\n\n if len(graph.classes) > 0:\n stdlib_block.add(\n 'import collections'\n ) # needed for the initialization of class registries\n\n ##\n # Needs regex?\n ##\n\n import_re = False\n for a_type, _ in mapry.iterate_over_types(graph=graph):\n if isinstance(a_type, (mapry.String, mapry.Path)) and a_type.pattern:\n import_re = True\n break\n\n if isinstance(a_type, mapry.Duration):\n import_re = True\n break\n\n for cls in graph.classes.values():\n if cls.id_pattern is not None:\n import_re = True\n break\n\n if import_re:\n stdlib_block.add(\"import re\")\n\n ##\n # First party\n ##\n\n first_party_block = {\n 'import {}'.format(py.module_name),\n 'import {}.parse'.format(py.module_name)\n }\n\n block_strs = [] # type: List[str]\n if len(stdlib_block) > 0:\n block_strs.append('\\n'.join(sorted(stdlib_block)))\n\n if len(third_party_block) > 0:\n block_strs.append('\\n'.join(sorted(third_party_block)))\n\n if len(first_party_block) > 0:\n block_strs.append('\\n'.join(sorted(first_party_block)))\n\n return '\\n\\n'.join(block_strs)", "def position_before_code(filename=None):\n\n if filename is None:\n raise ValueError\n\n with open(filename, 'r') as f:\n content = f.read()\n\n # find \"import\" lines - if they exist\n pattern = r'[\\n\\r]\\s*import[^\\n\\r]*;'\n it = re.finditer(pattern, content, re.DOTALL)\n\n last = None\n # next \"empty\" for loop is intended to advance iterator to last match\n for match in it:\n last = match\n\n if last is None:\n # no imports in file\n return position_before_class(content)\n else:\n # found import lines - last holds last match in file\n return last.end()", "def import_from_string(import_path: str) -> Any:\n\n import_classname = import_path.split(\".\")[-1]\n import_module = \".\".join(import_path.split(\".\")[:-1])\n\n module = importlib.import_module(import_module)\n return getattr(module, import_classname)", "def fortify_location_with_parso(filename, line_no):\n try:\n import parso\n except ImportError:\n return None\n from pytest_pdb_break import BreakLoc\n root = parso.parse(filename.read_text())\n leaf = root.get_leaf_for_position((line_no, 0))\n\n def find(node, tipo):\n while node.type != tipo:\n if node is root:\n return None\n node = node.parent\n return node\n\n func = find(leaf, \"funcdef\")\n if func is None:\n return None\n\n cand = func\n while cand and not cand.name.value.startswith(\"test_\"):\n cand = find(cand.parent, \"funcdef\")\n if cand:\n func = cand\n\n cls = find(func, \"classdef\")\n\n return BreakLoc(file=filename, lnum=line_no, name=None,\n class_name=cls.name.value if cls else None,\n func_name=func.name.value,\n param_id=None)", "def try_import(import_str, default=None):\r\n try:\r\n return import_module(import_str)\r\n except ImportError:\r\n return default", "def test_taskfile_import(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n assert modpath not in sys.modules\n assert all(not p.startswith(modpath) for p in sys.modules)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n taskfile = import_module(randpath)\n\n expected = set(pypath) | set([modpath])\n result = set(p for p in sys.modules if p.startswith(modpath))\n\n assert modpath in sys.modules\n assert result == expected\n assert taskfile.TEST == randpath", "def retry_import(mf: ModuleGraph, m: Node) -> typing.Optional[Node]:\n if \".\" in m.identifier:\n pname, partname = m.identifier.rsplit(\".\", 1)\n parent = mf.findNode(pname)\n else:\n parent = None\n partname = m.identifier\n\n # This is basically mf.find_module inlined and with a\n # check disabled.\n\n def fmod(\n name: str,\n path: typing.Optional[typing.List[str]],\n parent: typing.Optional[Node],\n ) -> typing.Tuple[\n typing.Optional[typing.IO], typing.Optional[str], typing.Tuple[str, str, int]\n ]:\n if path is None:\n if name in sys.builtin_module_names:\n return (None, None, (\"\", \"\", imp.C_BUILTIN))\n\n path = mf.path\n\n fp, buf, stuff = find_module(name, path)\n if buf:\n buf = os.path.realpath(buf)\n return (fp, buf, stuff)\n\n try:\n fp, pathname, stuff = fmod(\n partname, parent.packagepath if parent is not None else None, parent\n )\n except ImportError:\n return None\n\n if stuff[-1] == imp.PKG_DIRECTORY:\n m.__class__ = Package\n elif stuff[-1] == imp.PY_SOURCE:\n m.__class__ = SourceModule\n else:\n m.__class__ = CompiledModule\n\n m = mf._load_module(m.identifier, fp, pathname, stuff)\n\n if parent:\n mf.createReference(m, parent)\n parent[partname] = m\n return m", "def _read_sourced_path(self, line):\n # type: (str)->tp.Optional[str]\n if line.startswith('source '):\n sline = [x.strip() for x in line.split()]\n sline.pop(0)\n path = ' '.join(sline)\n if not os.path.isabs(path):\n current_root = self._root_interfaces_path\n if os.path.isfile(current_root):\n current_root = os.path.dirname(current_root)\n path = os.path.join(current_root, path)\n return path\n return None", "def is_import_from_completion(self):\n\n current_line = self.get_current_line()\n\n match = re.match(r\"from .* import\", current_line)\n if match and self.get_word() != \"import\":\n return True\n\n return False", "def path_for_import(name):\n return os.path.dirname(os.path.abspath(import_module(name).__file__))" ]
[ "0.64819837", "0.6139575", "0.6079234", "0.5968648", "0.5918364", "0.5828221", "0.55816394", "0.5489672", "0.544243", "0.5438753", "0.5432876", "0.5428505", "0.5426799", "0.542341", "0.53607404", "0.5345444", "0.5345008", "0.53337634", "0.5321171", "0.5308447", "0.5307642", "0.5304253", "0.52794766", "0.5239844", "0.52378845", "0.52361286", "0.5231601", "0.52303445", "0.5209924", "0.5207" ]
0.811714
0
Find the definition of an object within a source closest to a given line
def get_definition_with_regex(source, token, start_line=-1): if not token: return None if DEBUG_EDITOR: t0 = time.time() patterns = [ # python / cython keyword definitions '^c?import.*\W{0}{1}', 'from.*\W{0}\W.*c?import ', 'from .* c?import.*\W{0}{1}', 'class\s*{0}{1}', 'c?p?def[^=]*\W{0}{1}', 'cdef.*\[.*\].*\W{0}{1}', # enaml keyword definitions 'enamldef.*\W{0}{1}', 'attr.*\W{0}{1}', 'event.*\W{0}{1}', 'id\s*:.*\W{0}{1}'] matches = get_matches(patterns, source, token, start_line) if not matches: patterns = ['.*\Wself.{0}{1}[^=!<>]*=[^=]', '.*\W{0}{1}[^=!<>]*=[^=]', 'self.{0}{1}[^=!<>]*=[^=]', '{0}{1}[^=!<>]*=[^=]'] matches = get_matches(patterns, source, token, start_line) # find the one closest to the start line (prefer before the start line) if matches: min_dist = len(source.splitlines()) best_ind = 0 for match in matches: dist = abs(start_line - match) if match <= start_line or not best_ind: if dist < min_dist: min_dist = dist best_ind = match if matches: if DEBUG_EDITOR: log_dt(LOG_FILENAME, 'regex definition match', t0) return best_ind else: if DEBUG_EDITOR: log_dt(LOG_FILENAME, 'regex definition failed match', t0) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)", "def findsource(object):\r\n file = getsourcefile(object)\r\n if not file:\r\n raise IOError('source code not available')\r\n module = getmodule(object, file)\r\n if module:\r\n lines = linecache.getlines(file, module.__dict__)\r\n else:\r\n lines = linecache.getlines(file)\r\n if not lines:\r\n raise IOError('could not get source code')\r\n\r\n if ismodule(object):\r\n return lines, 0\r\n\r\n if isclass(object):\r\n name = object.__name__\r\n pat = re.compile(r'^(\\s*)class\\s*' + name + r'\\b')\r\n # make some effort to find the best matching class definition:\r\n # use the one with the least indentation, which is the one\r\n # that's most probably not inside a function definition.\r\n candidates = []\r\n for i in range(len(lines)):\r\n match = pat.match(lines[i])\r\n if match:\r\n # if it's at toplevel, it's already the best one\r\n if lines[i][0] == 'c':\r\n return lines, i\r\n # else add whitespace to candidate list\r\n candidates.append((match.group(1), i))\r\n if candidates:\r\n # this will sort by whitespace, and by line number,\r\n # less whitespace first\r\n candidates.sort()\r\n return lines, candidates[0][1]\r\n else:\r\n raise IOError('could not find class definition')\r\n\r\n if ismethod(object):\r\n object = object.im_func\r\n if isfunction(object):\r\n object = object.func_code\r\n if istraceback(object):\r\n object = object.tb_frame\r\n if isframe(object):\r\n object = object.f_code\r\n if iscode(object):\r\n if not hasattr(object, 'co_firstlineno'):\r\n raise IOError('could not find function definition')\r\n lnum = object.co_firstlineno - 1\r\n pat = re.compile(r'^(\\s*def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*@)')\r\n while lnum > 0:\r\n if pat.match(lines[lnum]): break\r\n lnum = lnum - 1\r\n return lines, lnum\r\n raise IOError('could not find code object')", "def getsourcelines(object):\r\n lines, lnum = findsource(object)\r\n\r\n if ismodule(object): return lines, 0\r\n else: return getblock(lines[lnum:]), lnum + 1", "def get_definition(self, info):\r\n token = info.obj\r\n lines = info.lines\r\n source_code = info.source_code\r\n filename = info.filename\r\n\r\n line_nr = None\r\n if '.' in token:\r\n token = token.split('.')[-1]\r\n\r\n line_nr = get_definition_with_regex(source_code, token,\r\n len(lines))\r\n if line_nr is None:\r\n return\r\n line = info.line\r\n exts = python_like_exts()\r\n if not osp.splitext(filename)[-1] in exts:\r\n return filename, line_nr\r\n if line.startswith('import ') or line.startswith('from '):\r\n alt_path = osp.dirname(filename)\r\n source_file = python_like_mod_finder(line, alt_path=alt_path,\r\n stop_token=token)\r\n if (not source_file or\r\n not osp.splitext(source_file)[-1] in exts):\r\n line_nr = get_definition_with_regex(source_code, token,\r\n line_nr)\r\n return filename, line_nr\r\n mod_name = osp.basename(source_file).split('.')[0]\r\n if mod_name == token or mod_name == '__init__':\r\n return source_file, 1\r\n else:\r\n with open(filename, 'rb') as fid:\r\n code = fid.read()\r\n code = encoding.decode(code)[0]\r\n line_nr = get_definition_with_regex(code, token)\r\n\r\n return filename, line_nr", "def _locate_item(self, item):\n\t\tif item['meta'].has_key(\"filename\"):\n\t\t\tfilename = item['meta']['filename']\n\t\telse:\n\t\t\traise ValueError(\"item does not have a filename\")\n\t\tfile = open(filename)\n\t\tobject_has_been_found = False\n\t\teverything_before = [] # Every line before our object definition\n\t\teverything_after = [] # Every line after our object definition\n\t\tobject_definition = [] # List of every line of our object definition\n\t\ti_am_within_definition = False\n\t\tfor line in file.readlines():\n\t\t\tif object_has_been_found:\n\t\t\t\t'If we have found an object, lets just spool to the end'\n\t\t\t\teverything_after.append( line )\n\t\t\t\tcontinue\n\t\t\ttmp = line.split(None, 1)\n\t\t\tif len(tmp) == 0:\n\t\t\t\t'empty line'\n\t\t\t\tkeyword = ''\n\t\t\t\trest = ''\n\t\t\tif len(tmp) == 1:\n\t\t\t\t'single word on the line'\n\t\t\t\tkeyword = tmp[0]\n\t\t\t\trest = ''\n\t\t\tif len(tmp) > 1:\n\t\t\t\tkeyword,rest = tmp[0],tmp[1]\n\t\t\tkeyword = keyword.strip()\n\t\t\t# If we reach a define statement, we log every line to a special buffer\n\t\t\t# When define closes, we parse the object and see if it is the object we\n\t\t\t# want to modify\n\t\t\tif keyword == 'define':\n\t\t\t\tcurrent_object_type = rest.split(None,1)[0]\n\t\t\t\tcurrent_object_type = current_object_type.strip(';')\n\t\t\t\tcurrent_object_type = current_object_type.strip('{')\n\t\t\t\tcurrent_object_type = current_object_type.strip()\n\t\t\t\ttmp_buffer = []\n\t\t\t\ti_am_within_definition = True\n\t\t\tif i_am_within_definition == True:\n\t\t\t\ttmp_buffer.append( line )\n\t\t\telse:\n\t\t\t\teverything_before.append( line )\n\t\t\tif len(keyword) > 0 and keyword[0] == '}':\n\t\t\t\ti_am_within_definition = False\n\t\t\t\t\n\t\t\t\tcurrent_definition = self.get_new_item(object_type=current_object_type, filename=filename)\n\t\t\t\tfor i in tmp_buffer:\n\t\t\t\t\ti = i.strip()\n\t\t\t\t\ttmp = i.split(None, 1)\n\t\t\t\t\tif len(tmp) == 1:\n\t\t\t\t\t\tk = tmp[0]\n\t\t\t\t\t\tv = ''\n\t\t\t\t\telif len(tmp) > 1:\n\t\t\t\t\t\tk,v = tmp[0],tmp[1]\n\t\t\t\t\t\tv = v.split(';',1)[0]\n\t\t\t\t\t\tv = v.strip()\n\t\t\t\t\telse: continue # skip empty lines\n\t\t\t\t\t\n\t\t\t\t\tif k.startswith('#'): continue\n\t\t\t\t\tif k.startswith(';'): continue\n\t\t\t\t\tif k.startswith('define'): continue\n\t\t\t\t\tif k.startswith('}'): continue\n\t\t\t\t\t\n\t\t\t\t\tcurrent_definition[k] = v\n\t\t\t\t\tcurrent_definition = self._apply_template(current_definition)\n\t\t\t\t# Compare objects\n\t\t\t\tif self.compareObjects( item, current_definition ) == True:\n\t\t\t\t\t'This is the object i am looking for'\n\t\t\t\t\tobject_has_been_found = True\n\t\t\t\t\tobject_definition = tmp_buffer\n\t\t\t\telse:\n\t\t\t\t\t'This is not the item you are looking for'\n\t\t\t\t\teverything_before += tmp_buffer\n\t\tif object_has_been_found:\n\t\t\treturn (everything_before, object_definition, everything_after, filename)\n\t\telse:\n\t\t\traise ValueError(\"We could not find object in %s\\n%s\" % (filename,item))", "def get_source_lines(lines: list, obj) -> int:\n source_lines = inspect.getsourcelines(obj)[0]\n source_length = len(source_lines)\n\n for index in range(len(lines) - source_length):\n if source_lines == lines[index:index + source_length]:\n return index, index + source_length\n\n return None, None", "def findsource(obj):\n filename = inspect.getsourcefile(obj)\n if filename:\n linecache.checkcache(filename)\n return inspect.findsource(obj)", "def findInLine(self) -> str:\n raise NotImplementedError", "def get_line(self, path, line):\n\t\tlines = self.find_source(path)\n\t\tif lines == None:\n\t\t\treturn None\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn lines[line - 1]\n\t\t\texcept IndexError:\n\t\t\t\treturn None", "def getsourcelines(object):\n lines, lnum = findsource(object)\n\n if inspect.ismodule(object): return lines, 0\n else: return inspect.getblock(lines[lnum:]), lnum + 1", "def matchSource(self, line, thing):\r\n m = thing.pattern.match(line)\r\n if m and self.matchClasses(thing.classes):\r\n return (thing, m.groups(), m.groupdict())", "def find_line(view, start=0, end=-1, target=0):\r\n if target < 0 or target > view.size():\r\n return -1\r\n\r\n if end == -1: end = view.size()\r\n\r\n lo, hi = start, end\r\n while lo <= hi:\r\n middle = lo + (hi - lo) / 2\r\n if get_line_nr(view, middle) < target:\r\n lo = getEOL(view, middle) + 1\r\n elif get_line_nr(view, middle) > target:\r\n hi = getBOL(view, middle) - 1\r\n else:\r\n return view.full_line(middle)\r\n return -1", "def FindClosestPoint(self, ):\n ...", "def _linesearch(self):\n pass", "def getsource(object):\r\n lines, lnum = getsourcelines(object)\r\n return string.join(lines, '')", "def goToDefinition(file, line, offset):\n args = {\"file\": file, \"line\": line, \"offset\": offset}\n response = send_request(\"definition\", args)\n return get_response_body(response)", "def getMatchingLine(self, line):\n tokens = map(str.lower, line.text.split()) # easy way to ignore case and whitespace\n for l in self.lines:\n if map(str.lower, l.text.split()) == tokens:\n return l # found a match\n \n return None # no matches found", "def getsource(object):\n lines, lnum = getsourcelines(object)\n return string.join(lines, '')", "def getNearestCar(self, position, line=0):\n return self.getNearestObjectInArray(self._cars, position, line)", "def fortify_location_with_parso(filename, line_no):\n try:\n import parso\n except ImportError:\n return None\n from pytest_pdb_break import BreakLoc\n root = parso.parse(filename.read_text())\n leaf = root.get_leaf_for_position((line_no, 0))\n\n def find(node, tipo):\n while node.type != tipo:\n if node is root:\n return None\n node = node.parent\n return node\n\n func = find(leaf, \"funcdef\")\n if func is None:\n return None\n\n cand = func\n while cand and not cand.name.value.startswith(\"test_\"):\n cand = find(cand.parent, \"funcdef\")\n if cand:\n func = cand\n\n cls = find(func, \"classdef\")\n\n return BreakLoc(file=filename, lnum=line_no, name=None,\n class_name=cls.name.value if cls else None,\n func_name=func.name.value,\n param_id=None)", "def get_line_no(obj):\n try:\n lineno = getsourcelines(obj)[1]\n except:\n # no code found\n lineno = None\n return lineno", "def findWithinHorizon(self) -> str:\n raise NotImplementedError", "def locate(self, pos):\n for obj in self.wrappers:\n if obj.start <= pos < obj.end:\n for sub in getattr(obj, 'attributes', ()):\n if sub.start <= pos < sub.end:\n return sub\n return obj\n else:\n if pos == len(self.input):\n return self.wrappers[-1]\n raise IndexError(\"position %d out of range\" % pos)", "def Find_Line_By_XY( self, x, y ):\r\n for i in self.handle_list:\r\n #examine the bounding box of each line\r\n bbox = self.canvas_one.bbox( i.line_handle )\r\n xb1 = bbox[ 0 ]\r\n yb = ( bbox[ 1 ] + bbox[ 3 ] ) / 2\r\n xb2 = bbox[ 2 ]\r\n if x >= xb1 and x <= xb2 and abs( y-yb ) <= cb.ytick / 2:\r\n #found, return handle\r\n return i\r\n #not found return -1\r\n return -1", "def search_element(doc, xpath, line=None):\n for elem in doc.xpath(xpath):\n if line is None:\n return elem\n\n elif elem.sourceline == line:\n return elem\n\n else:\n continue\n\n # raise ValueError if the element could not be located.\n LOGGER.info('could not find element \"%s\"', xpath)\n raise ValueError('could not find element \"%s\"' % xpath)", "def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)", "def LineClosestPoint(line, testpoint):\n line = rhutil.coerceline(line, True)\n testpoint = rhutil.coerce3dpoint(testpoint, True)\n return line.ClosestPoint(testpoint, False)", "def find_line_by_line_id(self, line_id):\n for line in self._data_lines:\n if six.text_type(line.get(\"line_id\")) == six.text_type(line_id):\n return line\n return None", "def extract_ref(line, refType):\n target = '\\\\' + refType + '{'\n\n start = line.find(target)\n for i in range(start, len(line)):\n if line[i] == '}':\n ref = line[start + len(target) : i]\n break\n return ref", "def find_import_before(doc, start_address, max_bytes=200):\n for adr in range(start_address, start_address - max_bytes, -1):\n lib = get_import_at(doc, adr)\n if lib:\n return lib\n return None" ]
[ "0.6721103", "0.6425865", "0.594324", "0.5941179", "0.59397006", "0.59163904", "0.5842844", "0.58050025", "0.5782345", "0.5758031", "0.5743304", "0.5533894", "0.5482378", "0.54344255", "0.5429776", "0.54285425", "0.5424473", "0.54120266", "0.5374897", "0.5373373", "0.5371944", "0.53446543", "0.5343615", "0.52971923", "0.5270149", "0.524977", "0.5242922", "0.5230993", "0.5218004", "0.5214177" ]
0.66474533
1
Return a list of all pythonlike extensions
def python_like_exts(): exts = [] for lang in sourcecode.PYTHON_LIKE_LANGUAGES: exts.extend(list(sourcecode.ALL_LANGUAGES[lang])) return ['.' + ext for ext in exts]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extensions():\n\n pass", "def all_editable_exts():\r\n exts = []\r\n for (language, extensions) in sourcecode.ALL_LANGUAGES.items():\r\n exts.extend(list(extensions))\r\n return ['.' + ext for ext in exts]", "def get_loaded_extensions():\n raise NotImplementedError()", "def get_supported_extensions(ext=\".as\"):\n result = list(ext + x for x in LOADERS.keys())\n result.append(ext)\n return result", "def get_required_extensions(self):\n return []", "def extensions(self) -> Tuple[str, ...]:\n raise NotImplementedError", "def list_extensions(self, **_params):\r\n return self.get(self.extensions_path, params=_params)", "def extensions(self):\n raise NotImplementedError()", "def extensions(self):\n return list(self._list(extension.Extension, paginated=False))", "def get_extension_funcs():\n raise NotImplementedError()", "def extensions(self, global_step):\n return []", "def extensions(self, global_step):\n return []", "def iter_extension_paths():\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))", "def extensions(cls):\n raise NotImplementedError", "def get_optional_extensions(self):\n return []", "def exts(self):\n return type(self).class_ext()", "def list_all_exts(top_path, exts):\n if not top_path.endswith('/'):\n top_path += '/'\n ext_list = []\n for extension in exts:\n if not extension.startswith('.'):\n extension = '.' + extension\n ext_list.append(extension.lower())\n file_list = []\n for dirpath, dirnames, filenames in os.walk(top_path):\n for filename in filenames:\n if os.path.splitext(filename)[1].lower() in ext_list:\n file_list.append(os.path.join(dirpath, filename))\n return file_list", "def list_extensions(include_built_in=None, include_disabled=None, organization=None, detect=None):\n if include_built_in is None:\n include_built_in = True\n if include_disabled is None:\n include_disabled = True\n organization = resolve_instance(detect=detect, organization=organization)\n extension_client = get_extension_client(organization)\n extensions = extension_client.get_installed_extensions(include_disabled_extensions=include_disabled)\n\n if not include_built_in:\n filteredResult = []\n for extension in extensions:\n if 'builtIn' not in str(extension.flags):\n filteredResult.append(extension)\n\n extensions = filteredResult\n\n return extensions", "def extensions(cls):\n return ['ma', 'mb']", "def get_file_extensions():\n my_files_ext = []\n for file in os.listdir(os.getcwd()):\n if os.path.isfile(file):\n file_info = os.path.splitext(file)\n file_ext = file_info[1]\n my_files_ext.append(file_ext)\n return [file for file in my_files_ext]", "def get_available_extensions() -> DefaultDict[str, Type]:\n all_extensions:DefaultDict[str, Type] = defaultdict(lambda:False)\n for current_class in Content.__subclasses__():\n for extension in current_class.extensions:\n all_extensions[extension] = current_class\n return all_extensions", "def find_modules(x):\n return Path(x).rglob('*.py')", "def detectExtensions(builder):\n print (\"Checking if C extensions can be compiled, don't be alarmed if \"\n \"a few compile errors are printed.\")\n\n if not builder._compile_helper(\"#define X 1\\n\"):\n print \"Compiler not found, skipping C extensions.\"\n return []\n\n # Extension modules to build.\n exts = [\n Extension(\"twisted.spread.cBanana\",\n [\"twisted/spread/cBanana.c\"],\n define_macros=builder.define_macros),\n ]\n\n # urllib.unquote accelerator\n exts.append( Extension(\"twisted.protocols._c_urlarg\",\n [\"twisted/protocols/_c_urlarg.c\"],\n define_macros=builder.define_macros) )\n\n if sys.platform == 'darwin':\n exts.append(\n Extension(\"twisted.internet.cfsupport\",\n [\"twisted/internet/cfsupport/cfsupport.c\"],\n extra_compile_args=['-w'],\n extra_link_args=['-framework','CoreFoundation',\n '-framework','CoreServices',\n '-framework','Carbon'],\n define_macros=builder.define_macros))\n\n if sys.platform == 'win32':\n exts.append( Extension(\"twisted.internet.iocpreactor._iocp\",\n [\"twisted/internet/iocpreactor/_iocp.c\"],\n libraries=[\"ws2_32\", \"mswsock\"],\n define_macros=builder.define_macros))\n\n return exts", "def list_extensions():\n formats = FileFormat.list_formats()\n return render_template('home.html', formats=formats)", "def explicit_list(self):\n exts = []\n for ext in self.extensions.values():\n if ext.implicit:\n continue\n exts.append(ext)\n return exts", "def find_ext_modules(self):\n return (\n _create_extensions(self.package_name, self.ext_module_dirs)\n if self.ext_module_dirs\n else self._find_ext_modules_from_hint()\n if self.ext_module_hint\n else None\n )", "def getModuleNames():\n import setup\n names = [e.name[1:] for e in setup.wxpExtensions]\n return names", "def _load_extensions(path):\n extension_dir = os.environ.get(path, path)\n print(f\"looking for extensions in {extension_dir}\")\n if not os.path.isdir(extension_dir):\n print(f\"No such {extension_dir}\")\n return\n\n import sys \n import importlib\n\n sys.path.append(path)\n imports = [ filename \n for filename in os.listdir(path)\n if not filename.startswith('__') \n and not filename.startswith('.') \n ]\n for filename in imports:\n module_name, _ = os.path.splitext(filename)\n module = importlib.import_module(module_name)\n for attribute_name in dir(module):\n if attribute_name.startswith('__'):\n continue\n globals()[attribute_name] = getattr(module, attribute_name)", "def find(self):\n extension_hooks = list()\n #Find all extension names\n dirs = pylabs.q.system.fs.listDirsInDir(self.rootDir, True,findDirectorySymlinks=True)\n # Use a simple PMExtensionFactory\n factory = PMExtensionFactory()\n for dir in (d for d in dirs if pylabs.q.system.fs.exists(os.path.join(d, self.extensionConfigName))):\n #we found possible extension because extension.cfg file found\n pylabs.q.logger.log('Found extension in %s' % dir, 6)\n # Load extension ini file\n configfilePath = os.path.join(dir, self.extensionConfigName)\n inifile = pylabs.inifile.IniFile(configfilePath)\n path = pylabs.q.system.fs.getDirName(configfilePath)\n hooks = self._getHookInformation(inifile, path, factory)\n extension_hooks.extend(hooks)\n return extension_hooks", "def _load_library_extensions():\n group = 'helga_handlers'\n entry_points = pkg_resources.iter_entry_points(group=group)\n plugins = []\n for ep in entry_points:\n try:\n logger.debug('loading entry_point %s' % ep.name)\n plugin = ep.load()\n plugin._helga_name_ = ep.name\n plugins.append(plugin)\n except Exception as error:\n logger.error(\"Error initializing plugin %s: %s\" % (ep, error))\n return plugins" ]
[ "0.7843823", "0.77088356", "0.7419539", "0.7320552", "0.714457", "0.70847607", "0.70469284", "0.7008349", "0.6991765", "0.6977103", "0.69406205", "0.69406205", "0.6892772", "0.68143374", "0.6790699", "0.67160887", "0.6696911", "0.6656253", "0.665139", "0.6639324", "0.66269875", "0.65335184", "0.65327436", "0.65317017", "0.6461726", "0.6427159", "0.6413078", "0.6387581", "0.6384995", "0.63782674" ]
0.86687607
0
Return a list of all editable extensions
def all_editable_exts(): exts = [] for (language, extensions) in sourcecode.ALL_LANGUAGES.items(): exts.extend(list(extensions)) return ['.' + ext for ext in exts]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_extensions(self, **_params):\r\n return self.get(self.extensions_path, params=_params)", "def extensions(self):\n return list(self._list(extension.Extension, paginated=False))", "def extensions(self):\n return self.properties.get('extensions',\n EntityCollection(self.context, Extension,\n ResourcePath(\"extensions\", self.resource_path)))", "def list_extensions():\n formats = FileFormat.list_formats()\n return render_template('home.html', formats=formats)", "def extensions(self, global_step):\n return []", "def extensions(self, global_step):\n return []", "def extensions():\n\n pass", "def enabled_editables(self):\n from indico.modules.events.editing.settings import editing_settings\n if not self.event.has_feature('editing'):\n return []\n\n enabled_editable_types = editing_settings.get(self.event, 'editable_types')\n enabled_editables = [editable for editable in self.editables if editable.type.name in enabled_editable_types]\n order = list(EditableType)\n return sorted(enabled_editables, key=lambda editable: order.index(editable.type))", "def extensions():\r\n document.add_page_break()\r\n document.add_heading('Extensions', level=1)\r\n extensions = get_qlik_sense.get_extensions()\r\n num_of_extensions = len(extensions)\r\n table = document.add_table(rows=num_of_extensions+1, cols=1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n\r\n for extension in range(num_of_extensions):\r\n row = table.rows[extension+1]\r\n row.cells[0].text = str(extensions[extension])", "def extensions(self):\n raise NotImplementedError()", "def get_required_extensions(self):\n return []", "def list_extensions(include_built_in=None, include_disabled=None, organization=None, detect=None):\n if include_built_in is None:\n include_built_in = True\n if include_disabled is None:\n include_disabled = True\n organization = resolve_instance(detect=detect, organization=organization)\n extension_client = get_extension_client(organization)\n extensions = extension_client.get_installed_extensions(include_disabled_extensions=include_disabled)\n\n if not include_built_in:\n filteredResult = []\n for extension in extensions:\n if 'builtIn' not in str(extension.flags):\n filteredResult.append(extension)\n\n extensions = filteredResult\n\n return extensions", "def extensions(self):\n extensions = []\n #convenient name\n current = self.from_grid\n height, width = len(current), len(current[0])\n index = self.find_index()\n \n #Up and Down\n if (index[0] > 0 and index[0] < height-1):\n extensions += [self.swap_up(index)] + [self.swap_down(index)]\n #Down\n elif(index[0] == 0 and index[0] < height):\n extensions += [self.swap_down(index)]\n #Up\n elif(index[0] > 0 and index[0] == height):\n extensions += [self.swap_up(index)] \n\n #Left and Right\n if (index[1] > 0 and index[1] < width-1):\n extensions += [self.swap_right(index)] + [self.swap_left(index)]\n #Right\n elif(index[1] == 0 and index[1] != width):\n extensions += [self.swap_right(index)]\n #Left\n elif(index[1] != 0 and index[1] == width):\n extensions += [self.swap_left(index)]\n return extensions", "def get_loaded_extensions():\n raise NotImplementedError()", "def get_available_extensions() -> DefaultDict[str, Type]:\n all_extensions:DefaultDict[str, Type] = defaultdict(lambda:False)\n for current_class in Content.__subclasses__():\n for extension in current_class.extensions:\n all_extensions[extension] = current_class\n return all_extensions", "def get_optional_extensions(self):\n return []", "def __iter__(self):\r\n return iter(self.extensions)", "def extensions(cls):\n raise NotImplementedError", "def get_list_of_repos(self):\n my_extensions = {'checked_plugins_names': [],\n 'checked_plugins': [],\n 'unchecked_plugins': [],\n 'checked_plugins_count': 0,\n 'total_plugins_count': 0}\n try:\n if False in [isinstance(self.ext_folder, (str, unicode)),\n isinstance(self.ext_sufix, (str, unicode)),\n isinstance(self.ext_prefix, (str, unicode))]:\n raise TypeError('Los tipos de los argumentos provistos no son validos.')\n from os import listdir, path\n if not path.exists(self.ext_folder):\n raise IOError('El directorio {} no existe.'.format(self.ext_folder))\n list_of_folders = listdir(self.ext_folder)\n from git import Repo\n from git.exc import GitCommandError\n from os import path\n for folder in list_of_folders:\n if [folder[:len(self.ext_prefix)], folder[:-len(self.ext_sufix)]] == [self.ext_prefix, self.ext_sufix]:\n ext_name = folder.replace(self.ext_sufix, '').replace(self.ext_prefix, '')\n try:\n r = Repo(path.join(self.ext_folder, folder))\n _branch = r.active_branch.name\n origin_branch = 'origin/{branch}..{branch}'.format(branch=_branch)\n _git_dir = path.dirname(r.git_dir)\n commits_ahead = sum(x / x for x in r.iter_commits(origin_branch))\n commits_behind = sum(\n x / x for x in list(r.iter_commits('master..master@{{u}}'.format(b=_branch))))\n my_extensions['checked_plugins'].append({'ext_name': ext_name,\n 'branch': _branch,\n 'last_commit': r.active_branch.commit.message,\n 'description': r.description,\n 'commits_ahead_master': commits_ahead,\n 'git_dir': _git_dir,\n 'commits_behind_master': commits_behind})\n my_extensions['checked_plugins_names'].append(ext_name)\n my_extensions['checked_plugins_count'] = len(my_extensions['checked_plugins'])\n my_extensions['total_plugins_count'] = len(my_extensions['checked_plugins']) + len(\n my_extensions['unchecked_plugins'])\n except GitCommandError:\n my_extensions['unchecked_plugins'].append(ext_name)\n logger.error('Fallo: \\\"no upstream configured for branch\\\"')\n except (TypeError, IOError):\n logger.error('Imposible checkear extension')\n return my_extensions", "def iter_extensions(self) -> t.Iterator[\"Extension\"]:\n return iter(sorted(self.extensions.values(), key=lambda x: x.priority))", "def get_js_extensions(self):\n return JS_EXTENSIONS", "def extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MachineExtensionInstanceViewArgs']]]]:\n return pulumi.get(self, \"extensions\")", "def list(self):\n return [os.splitext(el)[0] for el in\n os.listdir(str(self.model_dir))]", "def get_matched_extensions(request):\n\n def _match(e):\n return e.obj if e.obj.matches(request) else None\n\n result = EXTENSION_MANAGER.map(_match)\n return filter(bool, result)", "def get_startup_extensions(self):\n final_list = []\n for entry in self.bot_data_file[\"startup_extensions\"]:\n final_list.append(str(entry[\"name\"]))\n return final_list", "def get_extension_options(self):\n options = []\n for extension in self.extensions:\n extension_options = getattr(extension, \"OPTIONS\", None)\n if extension_options:\n options.extend(extension_options)\n return options", "async def extensions(ctx):\n if ctx.invoked_subcommand is None:\n embed = Embed(\n title=\"Extensions\",\n description=\"The following extensions are loaded:\",\n colour=bot.colors['default']\n )\n for k, v in bot.cogs.items():\n embed.add_field(\n name=k,\n value=v.description,\n inline=False)\n await ctx.channel.send(embed=embed)", "def get_supported_extensions(ext=\".as\"):\n result = list(ext + x for x in LOADERS.keys())\n result.append(ext)\n return result", "def find(self):\n extension_hooks = list()\n #Find all extension names\n dirs = pylabs.q.system.fs.listDirsInDir(self.rootDir, True,findDirectorySymlinks=True)\n # Use a simple PMExtensionFactory\n factory = PMExtensionFactory()\n for dir in (d for d in dirs if pylabs.q.system.fs.exists(os.path.join(d, self.extensionConfigName))):\n #we found possible extension because extension.cfg file found\n pylabs.q.logger.log('Found extension in %s' % dir, 6)\n # Load extension ini file\n configfilePath = os.path.join(dir, self.extensionConfigName)\n inifile = pylabs.inifile.IniFile(configfilePath)\n path = pylabs.q.system.fs.getDirName(configfilePath)\n hooks = self._getHookInformation(inifile, path, factory)\n extension_hooks.extend(hooks)\n return extension_hooks", "def get_request_extensions(self):\n return []" ]
[ "0.6947238", "0.6891938", "0.6843709", "0.6733824", "0.66740257", "0.66740257", "0.6525709", "0.63423103", "0.6303439", "0.62761074", "0.6270803", "0.624791", "0.62398034", "0.61868507", "0.6155842", "0.604202", "0.6034626", "0.6003988", "0.5982591", "0.5959434", "0.59400076", "0.5935038", "0.5921213", "0.5914957", "0.5900612", "0.58817977", "0.5857729", "0.5850429", "0.58374727", "0.5823479" ]
0.82879215
0
Tests the recipe for AWS>GCP disk copy.
def testRunRecipe(self): warnings.filterwarnings( action="ignore", message="unclosed", category=ResourceWarning) # Load the recipe, set the arguments, and run self.test_state.LoadRecipe(RECIPE, TEST_MODULES) self.test_state.command_line_options = { 'aws_region': self.aws_region, 'gcp_zone': self.gcp_zone, 'volumes': self.aws_volume, 'aws_bucket': self.aws_bucket, 'gcp_bucket': self.gcp_bucket, 'subnet': self.aws_subnet, 'gcp_project': self.gcp_project_id } self.test_state.SetupModules() self.test_state.RunModules() # AWS Volume in count should equal GCE Disk out count, and be at least 1 self.assertGreaterEqual( len(self.test_state.GetContainers(containers.AWSVolume)), 1) self.assertEqual(len(self.test_state.GetContainers(containers.AWSVolume)), len(self.test_state.GetContainers(containers.GCEDisk))) disks = compute.GoogleCloudCompute(self.gcp_project_id).Disks() real_gce_disk_names = [disks[k].name for k in disks.keys()] for d in self.test_state.GetContainers(containers.GCEDisk): self.assertIn(d.name, real_gce_disk_names) real_disk = compute.GoogleComputeDisk( self.gcp_project_id, self.gcp_zone, d.name) self.assertEqual(real_disk.GetDiskType(), 'pd-standard') # Make an API call to the service that will fail if the disk doesn't exist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testFetchGs(self):\n # pylint: disable=unused-argument\n def _Fetch(_ctx, cmd, capture_output):\n # Touch file we tried to copy too.\n osutils.Touch(cmd[-1])\n\n self.gs_mock.AddCmdResult(\n ['cp', '-v', '--', partial_mock.Ignore(), partial_mock.Ignore()],\n side_effect=_Fetch)\n\n key = ('gs',)\n url = 'gs://some.site.localdomain/file_go_boom'\n with self.cache.Lookup(key) as ref:\n self.assertFalse(ref.Exists())\n ref.Assign(url)\n self.assertTrue(ref.Exists())", "def test_6c_copy_data_btw_containers(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif (GST.default_folder_to_be_used):\n if not (default_folders_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare default directories\")\n elif (not GST.dir1_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare dir1\")\n elif not GST.copying_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare copying data tests.\")\n self.dismiss_dialogs()\n function = js_func[\"copy_file\"] % (GST.gs_file_paths[\"copy_to_container_target_path\"], GST.gs_file_paths[\"file_to_copy_source_path\"])\n try:\n self.send_request(function, \"copy_file()\")\n except Exception as e:\n raise CopyException(\"Failed to copy the file between containers. \\n\" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise CopyException(\"Failed to copy the file between containers. \\n\" + response)", "def test_aws_provisioner(host):\n\n assert True", "def test_disk(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"file1\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:References>\n- <ovf:File ovf:href=\"input.vmdk\" ovf:id=\"file1\" ovf:size=\"{vmdk_size}\" />\n <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n...\n <ovf:Info>Virtual disk information</ovf:Info>\n- <ovf:Disk ovf:capacity=\"1\" ovf:capacityAllocationUnits=\"byte * 2^30\" \\\novf:diskId=\"vmdisk1\" ovf:fileRef=\"file1\" ovf:format=\"http://www.vmware.com/\\\ninterfaces/specifications/vmdk.html#streamOptimized\" />\n </ovf:DiskSection>\n...\n <rasd:AddressOnParent>0</rasd:AddressOnParent>\n- <rasd:ElementName>Hard Drive</rasd:ElementName>\n- <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>\n- <rasd:InstanceID>6</rasd:InstanceID>\n- <rasd:Parent>3</rasd:Parent>\n- <rasd:ResourceType>17</rasd:ResourceType>\n- </ovf:Item>\n- <ovf:Item>\n- <rasd:AddressOnParent>0</rasd:AddressOnParent>\n <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>\n\"\"\".format(vmdk_size=self.FILE_SIZE['input.vmdk'],\n iso_size=self.FILE_SIZE['input.iso']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"input.vmdk\")),\n \"deleted file should not be exported\")", "def test_create_copy(self):\n\n config = {\n 'version': '2.0',\n 'input_files': {\n 'INPUT_1': [{\n 'id': 1234,\n 'type': 'PRODUCT',\n 'workspace_name': 'wksp-name',\n 'workspace_path': 'the/workspace/path/file.json',\n 'local_file_name': 'file_abcdfeg.json',\n 'is_deleted': False,\n }]\n },\n 'output_workspaces': {\n 'OUTPUT_1': 'WORKSPACE_1'\n },\n 'tasks': [\n {\n 'task_id': 'task-1234',\n 'type': 'main',\n 'resources': {'cpu': 1.0},\n 'args': 'foo ${INPUT_1} ${JOB_OUTPUT_DIR}',\n 'env_vars': {'ENV_VAR_NAME': 'ENV_VAR_VALUE'},\n 'workspaces': {'WORKSPACE_NAME': {'mode': 'ro'}},\n 'mounts': {'MOUNT_NAME': 'MOUNT_VOLUME_NAME'},\n 'settings': {'SETTING_NAME': 'SETTING_VALUE'},\n 'volumes': {\n 'VOLUME_NAME_1': {\n 'container_path': '/the/container/path',\n 'mode': 'ro',\n 'type': 'host',\n 'host_path': '/the/host/path'\n },\n 'VOLUME_NAME_2': {\n 'container_path': '/the/other/container/path',\n 'mode': 'rw',\n 'type': 'volume',\n 'driver': 'SUPER_DRIVER_5000',\n 'driver_opts': {'turbo': 'yes-pleez'}\n }\n },\n 'docker_params': [{'flag': 'hello', 'value': 'scale'}]\n }\n ]\n }\n exe_config = ExecutionConfiguration(config)\n\n copy = exe_config.create_copy()\n self.assertDictEqual(copy.get_dict(), config)", "def test_profile_copy_file(profile_manager, test_profile,\n tmpdir, inventory_content):\n\n myfile = tmpdir.mkdir(\"ir_dir\").join(\"fake_hosts_file\")\n myfile.write(inventory_content)\n org_inventory = myfile.strpath\n\n target_path = test_profile.copy_file(org_inventory)\n assert target_path == os.path.join(\n test_profile.path, os.path.basename(org_inventory))\n\n profile_inventory = py.path.local(target_path)\n assert profile_inventory.check(file=1)\n assert inventory_content == profile_inventory.read()", "def main():\n\n try:\n\n session = boto3.session.Session(aws_access_key_id=os.environ['AWSKEY'],\n aws_secret_access_key=os.environ['AWSSECRET'])\n\n s3 = session.client(\"s3\")\n\n copy_source = {\n\n 'Bucket': 'my-new-bucket-name-123',\n 'Key': \"old_file_location\"\n }\n\n s3.copy(Bucket=\"my-new-bucket-name-123\", CopySource=copy_source,\n Key=\"new_file_location\")\n\n s3.delete_object(Bucket=\"my-new-bucket-name-123\", Key=\"old_file_location\")\n\n except Exception as e:\n\n print(\"Error: \", str(e))", "def _mock_s3_copy(\n cursor,\n copy_command,\n):\n s3 = boto3.client(\n \"s3\",\n aws_access_key_id=copy_command.aws_access_key_id,\n aws_secret_access_key=copy_command.aws_secret_access_key,\n )\n ending_index = len(copy_command.s3_uri)\n path_to_file = copy_command.s3_uri[5:ending_index]\n bucket, key = path_to_file.split(\"/\", 1)\n response = s3.get_object(Bucket=bucket, Key=key)\n\n # the following lins of code is used to check if the file is gzipped or not.\n # To do so we use magic numbers.\n # A mgic number is a constant numerical or text value used to identify a file format or protocol\n # The magic number for gzip compressed files is 1f 8b.\n is_gzipped = binascii.hexlify(response[\"Body\"].read(2)) == b\"1f8b\"\n\n response = s3.get_object(Bucket=bucket, Key=key)\n data = get_raw_file(response[\"Body\"].read(), is_gzipped)\n\n cursor.copy_expert(\n \"COPY {cc.table_name} FROM STDIN WITH DELIMITER AS '{cc.delimiter}' {cc.format} HEADER {non_null_clause}\".format(\n cc=copy_command,\n non_null_clause=(\"FORCE NOT NULL \" + \", \".join(copy_command.columns))\n if copy_command.columns\n else \"\",\n ),\n data,\n )", "def test_sync_file_in_glacier(self, mock_boto3):\n client_error_glacier = ClientError(\n error_response={\"Error\": {\"Code\": \"InvalidObjectState\"}}, operation_name=Mock()\n )\n source_bucket_name = fake.slug()\n destination_bucket_name = fake.slug()\n schema_name = self.schema\n\n start_date = date(2019, 1, 1)\n end_date = date(2019, 3, 1)\n date_range = (start_date, end_date)\n\n source_object = Mock()\n source_object.key = f\"{settings.S3_BUCKET_PATH}/{schema_name}{fake.file_path()}\"\n source_object.bucket_name = source_bucket_name\n source_object.storage_class = \"GLACIER\"\n\n self.assertNotEqual(source_bucket_name, destination_bucket_name)\n\n mock_resource = mock_boto3.resource\n mock_buckets = mock_resource.return_value.Bucket\n mock_filter = mock_buckets.return_value.objects.filter\n mock_filter.return_value = (source_object,)\n mock_destination_object = mock_buckets.return_value.Object\n mock_copy_from = mock_destination_object.return_value.copy_from\n mock_copy_from.side_effect = client_error_glacier\n with self.assertRaises(SyncedFileInColdStorageError):\n syncer = AwsS3Syncer(source_bucket_name)\n syncer.sync_bucket(schema_name, destination_bucket_name, date_range)\n source_object.restore_object.assert_called()", "def test_copy_fail(self):\n\n # Assert that a RelaxPipeError occurs when the data pipe to copy data to already exists.\n self.assertRaises(RelaxPipeError, pipes.copy, 'orig', 'empty')", "def copy_blobs_in_gcp_storage(source_bucket_name):\n\n # extract file and convert in dataframe\n extracted_df = extract()\n\n # transform the df\n transformed_df = transform (extracted_df)\n\n # the function loads clean csv content as csv file in clean-zone-bucket\n load(transformed_df)\n\n\n return \"Function executed sucessfully!\"", "def main(step, machine_image_region, source_project,\n source_subnet_uri: uri.Subnet, source_zone, source_zone_2,\n source_zone_3, target_project, target_service_account, target_scopes,\n target_subnet_uri: uri.Subnet, backup_subnet_uri: uri.Subnet,\n source_csv, filter_csv, input_csv, rollback_csv,\n log_level) -> bool:\n if not target_project:\n target_project = source_project\n if not target_subnet_uri:\n target_subnet_uri = copy.deepcopy(source_subnet_uri)\n if source_project != target_project:\n if not target_service_account:\n target_service_account = \\\n \"{}[email protected]\".format(\n project.get_number(target_project))\n if target_scopes:\n target_scopes = target_scopes.split(',')\n else:\n target_scopes = [\n 'https://www.googleapis.com/auth/devstorage.read_only',\n 'https://www.googleapis.com/auth/logging.write',\n 'https://www.googleapis.com/auth/monitoring.write',\n 'https://www.googleapis.com/auth/service.management.readonly',\n 'https://www.googleapis.com/auth/servicecontrol'\n ]\n\n numeric_level = getattr(logging, log_level.upper(), None)\n if not isinstance(numeric_level, int):\n raise ValueError('Invalid log level: %s' % log_level)\n logging.basicConfig(filename='migrator.log',\n format='%(asctime)s %(levelname)s %(message)s',\n level=numeric_level)\n\n logging.info('executing step %s', step)\n if step == 'prepare_inventory':\n logging.info('Exporting the inventory')\n if subnet.export_instances(source_project, source_zone, source_zone_2,\n source_zone_3, source_subnet_uri,\n source_csv):\n logging.info('%s now has exported records', source_csv)\n else:\n logging.info('File %s was not overwritten', source_csv)\n return False\n\n elif step == 'filter_inventory':\n logging.info('Exporting the inventory')\n if subnet.export_instances(source_project, source_zone, source_zone_2,\n source_zone_3, source_subnet_uri,\n source_csv):\n logging.info('%s now has exported records', source_csv)\n else:\n logging.info('File %s was not overwritten', source_csv)\n return False\n\n logging.info('Filtering out the exported records')\n if filter_records(source_csv, filter_csv, input_csv):\n logging.info('%s now has filtered records', input_csv)\n else:\n logging.info('File %s was not overwritten', input_csv)\n return False\n\n elif step == 'prepare_rollback':\n logging.info('Listing the VMs to roll back')\n if subnet.list_instances_for_rollback(source_project, source_zone, backup_subnet_uri, input_csv, rollback_csv):\n logging.info('%s now has exported records', rollback_csv)\n else:\n logging.info('File %s was not overwritten', rollback_csv)\n return False\n\n elif step == 'rollback_instances':\n logging.info('Performing rollback of instances in file %s', rollback_csv)\n if bulk_move_instances_to_subnet(rollback_csv, source_subnet_uri, 'rollback'):\n logging.info('Instances rollback completed successfully')\n else:\n logging.info('Rollback failed, please see the log file for details')\n return False\n\n elif step == 'shutdown_instances':\n with open(input_csv, 'r') as read_obj:\n csv_dict_reader = DictReader(read_obj)\n count = len(list(csv_dict_reader))\n shutdown_response = query_yes_no(\n 'Are you sure you want to shut down all (%s) '\n 'instances present in the inventory ?' % count,\n default='no')\n\n if shutdown_response:\n logging.info('Shutting down all instances')\n\n if bulk_instance_shutdown(input_csv):\n logging.info('Successfully shut down all instances')\n else:\n logging.info('Shutting down all instances failed')\n return False\n else:\n return False\n\n elif step == 'start_instances':\n start_response = query_yes_no(\n 'Are you sure you want to start all '\n 'instances present in the inventory ?',\n default='no')\n if start_response:\n logging.info('Starting all instances')\n\n if bulk_instance_start(input_csv):\n logging.info('Successfully started all instances')\n else:\n logging.info('Starting all instances failed')\n return False\n else:\n return False\n\n elif step == 'create_machine_images':\n logging.info('Creating Machine Images')\n if bulk_image_create(source_project, machine_image_region, input_csv):\n logging.info('Successfully created all machine images')\n else:\n logging.info('Creating all machine images failed')\n return False\n\n elif step == 'disable_deletionprotection_instances':\n with open(input_csv, 'r') as read_obj:\n csv_dict_reader = DictReader(read_obj)\n count = len(list(csv_dict_reader))\n shutdown_response = query_yes_no(\n 'Are you sure you want to disable deletion protection for all (%s) '\n 'instances present in the inventory?' % count,\n default='no')\n\n if shutdown_response:\n logging.info('Disabling deletion protection for all instances')\n\n if bulk_instance_disable_deletionprotection(input_csv):\n logging.info('Successfully disabled deletion protection for all '\n 'instances')\n else:\n logging.info('Disabling deletion protection for all instances '\n 'failed')\n return False\n else:\n return False\n\n elif step == 'delete_instances':\n with open(input_csv, 'r') as read_obj:\n csv_dict_reader = DictReader(read_obj)\n count = len(list(csv_dict_reader))\n response = query_yes_no('Are you sure you want to delete the (%s) '\n 'instances and disks present in the inventory '\n '?' % count, default='no')\n if response:\n logging.info('Deleting all the instances and disks present in the '\n 'inventory')\n if bulk_delete_instances_and_disks(input_csv, source_project):\n logging.info('Successfully deleted all instances and disks '\n 'present in the inventory')\n else:\n logging.info('Deleting all instances and disks in the '\n 'inventory failed')\n return False\n else:\n logging.info('Not deleting any instances nor disks')\n return False\n\n elif step == 'clone_subnet':\n logging.info('Cloning Subnet')\n if subnet.duplicate(source_subnet_uri, target_subnet_uri):\n logging.info('Successfully cloned subnet in the provided region')\n else:\n logging.info('Cloning subnet in the provided region failed')\n return False\n\n elif step == 'add_machineimage_iampolicies':\n logging.info('Setting IAM policies of created machine images with '\n 'input_csv=%s, source_project=%s, target_service_account='\n '%s', input_csv, source_project, target_service_account)\n if add_machineimage_iampolicies(input_csv, source_project,\n target_service_account):\n logging.info('Successfully set IAM policies of created machine '\n 'images')\n else:\n logging.info('Setting IAM policies of created machine images '\n 'failed')\n return False\n\n elif step == 'create_instances':\n logging.info(\n 'Creating instances retaining the original ips in file %s with '\n 'source_project=%s, target_project=%s, target_service_account=%s, '\n 'target_scopes=%s, target_subnet_uri=%s', input_csv,\n source_project, target_project, target_service_account,\n target_scopes, target_subnet_uri)\n if bulk_create_instances(input_csv, target_project,\n target_service_account, target_scopes,\n target_subnet_uri, source_project, True):\n logging.info('Instances created successfully')\n else:\n logging.error('Creation of instances failed')\n return False\n\n elif step == 'create_instances_without_ip':\n logging.info(\n 'Creating instances without retaining the original ips in file %s '\n 'with source_project=%s, target_project=%s, target_service_account'\n '=%s, target_scopes=%s, target_subnet_uri=%s', input_csv,\n source_project, target_project, target_service_account,\n target_scopes, target_subnet_uri)\n if bulk_create_instances(input_csv, target_project,\n target_service_account, target_scopes,\n target_subnet_uri, source_project, False):\n logging.info('Instances created successfully')\n else:\n logging.error('Creation of instances failed')\n return False\n\n elif step == 'backup_instances':\n logging.info(\n 'Backing up instances in file %s to backup_subnet_uri=%s',\n input_csv, backup_subnet_uri)\n if bulk_move_instances_to_subnet(input_csv, backup_subnet_uri, 'backup'):\n logging.info('Instances backed up successfully')\n else:\n logging.error('Backup of instances failed')\n return False\n\n elif step == 'release_ip_for_subnet':\n logging.info('Releasing all IPs of project %s present in '\n 'subnet %s', source_project, source_subnet_uri)\n if subnet.release_ip(source_project, source_subnet_uri):\n logging.info('All IPs of project %s present in subnet %s released '\n 'sucessfully', source_project, source_subnet_uri)\n else:\n logging.error('Releasing the IPs of project %s present in subnet '\n '%s failed', source_project, source_subnet_uri)\n return False\n\n elif step == 'release_ip':\n logging.info('Releasing the IPs present in the %s file', input_csv)\n if release_individual_ips(source_subnet_uri, input_csv):\n logging.info('IPs present in the file %s released successfully',\n input_csv)\n else:\n logging.error('Releasing ips present in the file %s failed',\n input_csv)\n return False\n else:\n logging.error('Step %s unknown', step)\n return False\n\n return True", "def test_scp(self):\n self.assertEqual(\n general.scp('user','example.com','my_file','remotedir').command_line,\n ['scp','my_file','[email protected]:remotedir'])", "def test_s3_whole_file_transfer(sdc_builder, sdc_executor, aws):\n s3_key = f'{S3_SANDBOX_PREFIX}/{get_random_string()}/'\n s3_dest_key = f'{S3_SANDBOX_PREFIX}/{get_random_string()}/'\n data = 'Completely random string that is transfered as whole file format.'\n\n # Build pipeline.\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n origin = builder.add_stage('Amazon S3', type='origin')\n origin.set_attributes(bucket=aws.s3_bucket_name, data_format='WHOLE_FILE',\n prefix_pattern=f'{s3_key}/*',\n max_batch_size_in_records=100)\n\n target = builder.add_stage('Amazon S3', type='destination')\n target.set_attributes(bucket=aws.s3_bucket_name, data_format='WHOLE_FILE', partition_prefix=s3_dest_key,\n file_name_expression='output.txt')\n\n # TLKT-248: Add ability to directly read events from snapshots\n identity = builder.add_stage('Dev Identity')\n trash = builder.add_stage('Trash')\n\n finisher = builder.add_stage('Pipeline Finisher Executor')\n finisher.set_attributes(stage_record_preconditions=[\"${record:eventType() == 'no-more-data'}\"])\n\n origin >> target\n origin >= finisher\n target >= identity\n identity >> trash\n\n pipeline = builder.build().configure_for_environment(aws)\n pipeline.configuration['shouldRetry'] = False\n sdc_executor.add_pipeline(pipeline)\n\n client = aws.s3\n try:\n client.put_object(Bucket=aws.s3_bucket_name, Key=f'{s3_key}/input.txt', Body=data.encode('ascii'))\n snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot\n\n # Validate event generation\n assert len(snapshot[identity].output) == 1\n assert snapshot[identity].output[0].get_field_data('/targetFileInfo/bucket') == aws.s3_bucket_name\n assert snapshot[identity].output[0].get_field_data(\n '/targetFileInfo/objectKey') == f'{s3_dest_key}sdc-output.txt'\n\n # We should have exactly one file on the destination side\n list_s3_objs = client.list_objects_v2(Bucket=aws.s3_bucket_name, Prefix=s3_dest_key)\n assert len(list_s3_objs['Contents']) == 1\n\n # With our secret message\n s3_obj_key = client.get_object(Bucket=aws.s3_bucket_name, Key=list_s3_objs['Contents'][0]['Key'])\n s3_contents = s3_obj_key['Body'].read().decode().strip()\n assert s3_contents == data\n finally:\n delete_keys = {'Objects': [{'Key': k['Key']}\n for k in\n client.list_objects_v2(Bucket=aws.s3_bucket_name, Prefix=s3_key)['Contents']]}\n client.delete_objects(Bucket=aws.s3_bucket_name, Delete=delete_keys)", "def test_clone_system(self):\n pass", "def ErrorCheckCopyRequest(self, src_uri_expansion, dst_uri_str, headers,\n debug, command='cp'):\n for src_uri in src_uri_expansion:\n if src_uri.is_cloud_uri() and not src_uri.bucket_name:\n raise CommandException('Provider-only src_uri (%s)')\n\n if ContainsWildcard(dst_uri_str):\n matches = list(self.CmdWildcardIterator(dst_uri_str, headers=headers,\n debug=debug))\n if len(matches) > 1:\n raise CommandException('Destination (%s) matches more than 1 URI' %\n dst_uri_str)\n base_dst_uri = matches[0]\n else:\n base_dst_uri = self.StorageUri(dst_uri_str, debug=debug)\n\n # Make sure entire expansion didn't result in nothing to copy. This can\n # happen if user request copying a directory w/o -r option, for example.\n have_work = False\n for v in src_uri_expansion.values():\n if v:\n have_work = True\n break\n if not have_work:\n raise CommandException('Nothing to copy')\n\n # If multi-object copy request ensure base_dst_uri names a container.\n multi_src_request = (len(src_uri_expansion) > 1 or\n len(src_uri_expansion.values()[0]) > 1)\n if multi_src_request:\n self.InsistUriNamesContainer(command, base_dst_uri)\n\n # Ensure no src/dest pairs would overwrite src. Note that this is\n # more restrictive than the UNIX 'cp' command (which would, for example,\n # allow \"mv * dir\" and just skip the implied mv dir dir). We disallow such\n # partial completion operations in cloud copies because they are risky.\n for src_uri in iter(src_uri_expansion):\n for exp_src_uri in src_uri_expansion[src_uri]:\n new_dst_uri = self.ConstructDstUri(src_uri, exp_src_uri, base_dst_uri)\n if self.SrcDstSame(exp_src_uri, new_dst_uri):\n raise CommandException('cp: \"%s\" and \"%s\" are the same object - '\n 'abort.' % (exp_src_uri.uri, new_dst_uri.uri))\n\n return (base_dst_uri, multi_src_request)", "def test_backup_with_update_on_disk_of_snapshot_markers(self):\n version = RestConnection(self.backupset.backup_host).get_nodes_version()\n if version[:5] == \"6.5.0\":\n self.log.info(\"\\n\\n******* Due to issue in MB-36904, \\\n \\nthis test will be skipped in 6.5.0 ********\\n\")\n return\n gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=100000)\n gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size, end=100000)\n gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size, end=100000)\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.create_bucket(bucket=\"bucket0\", ramQuotaMB=1024)\n self.buckets = rest_conn.get_buckets()\n authentication = \"-u Administrator -p password\"\n\n self._load_all_buckets(self.master, gen1, \"create\", 0)\n self.log.info(\"Stop persistent\")\n cluster_nodes = rest_conn.get_nodes()\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop %s\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n \"bucket0\",\n authentication))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n self._load_all_buckets(self.master, gen2, \"create\", 0)\n self.log.info(\"Run full backup with cbbackupwrapper\")\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n backup_dir = self.tmp_path + \"backup\" + self.master.ip\n shell.execute_command(\"rm -rf %s\" % backup_dir)\n shell.execute_command(\"mkdir %s\" % backup_dir)\n shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n self.log.info(\"Load 3rd batch docs\")\n self._load_all_buckets(self.master, gen3, \"create\", 0)\n self.log.info(\"Run diff backup with cbbackupwrapper\")\n output, _ = shell.execute_command(\"cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s\"\n % (self.cli_command_location, self.cmd_ext,\n self.backupset.cluster_host.ip,\n backup_dir,\n authentication))\n\n if output and \"SUCCESSFULLY COMPLETED\" not in output[1]:\n self.fail(\"Failed to backup as the fix in MB-25727\")\n shell.disconnect()", "def test_args_copy():\n args = cli.parse_args(['-c'])\n assert args.copy\n args = cli.parse_args(['--copy'])\n assert args.copy", "def test_google_storage_post_processing(sdc_builder, sdc_executor, gcp, action):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n bucket_name = get_random_string(ascii_lowercase, 10)\n\n storage_client = gcp.storage_client\n\n google_cloud_storage = pipeline_builder.add_stage('Google Cloud Storage', type='origin')\n\n google_cloud_storage.set_attributes(bucket=bucket_name,\n common_prefix='gcs-test',\n prefix_pattern='**/*.txt',\n data_format='TEXT',\n post_processing_option=action)\n wiretap = pipeline_builder.add_wiretap()\n\n google_cloud_storage >> wiretap.destination\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n sdc_executor.add_pipeline(pipeline)\n\n created_bucket = gcp.retry_429(storage_client.create_bucket)(bucket_name)\n try:\n data = [get_random_string(ascii_letters, 100) for _ in range(10)]\n file_path = 'gcs-test/a/b/c/d/e/'\n file_name = 'sdc-test.txt'\n blob = created_bucket.blob(file_path + file_name)\n blob.upload_from_string('\\n'.join(data))\n\n logger.info('Starting GCS Origin pipeline and wait until the information is read ...')\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 10)\n sdc_executor.stop_pipeline(pipeline)\n\n rows_from_wiretap = [record.field['text'] for record in wiretap.output_records]\n\n # If post processing option is enabled, old file path should not exist\n assert not storage_client.get_bucket(bucket_name).blob(file_path + file_name).exists()\n\n # If ARCHIVE, default prefix is empty, so it gets moved to root of bucket\n if action == 'ARCHIVE':\n assert storage_client.get_bucket(bucket_name).blob(file_name).exists()\n\n assert len(data) == len(rows_from_wiretap)\n assert rows_from_wiretap == data\n finally:\n logger.info('Deleting bucket %s ...', created_bucket.name)\n gcp.retry_429(created_bucket.delete)(force=True)", "def test_transfer(self, mock_variable_get, mock_aws_info, mock_transfer):\n mock_variable_get.side_effect = lambda x: {\n \"download_bucket\": \"download-bucket\",\n \"transform_bucket\": \"transform-bucket\",\n \"project_id\": \"project_id\",\n \"data_path\": \"data\",\n }[x]\n mock_aws_info.return_value = \"key_id\", \"secret_key\"\n mock_transfer.return_value = True, 3\n\n with CliRunner().isolated_filesystem():\n # Create release\n start_date = pendulum.DateTime(2022, 1, 1)\n end_date = pendulum.DateTime(2022, 2, 1)\n release = OpenAlexRelease(\"dag_id\", start_date, end_date, False, 1)\n\n # Create transfer manifest files\n with open(release.transfer_manifest_path_download, \"w\") as f:\n f.write('\"prefix1\"\\n\"prefix2\"\\n')\n with open(release.transfer_manifest_path_transform, \"w\") as f:\n f.write(\"\")\n\n # Test succesful transfer with prefixes for download, no prefixes for transform\n release.transfer(max_retries=1)\n mock_transfer.assert_called_once_with(\n \"key_id\",\n \"secret_key\",\n aws_bucket=OpenAlexTelescope.AWS_BUCKET,\n include_prefixes=[\"prefix1\", \"prefix2\"],\n gc_project_id=\"project_id\",\n gc_bucket=\"download-bucket\",\n gc_bucket_path=\"telescopes/dag_id/2022_01_01-2022_02_01/\",\n description=\"Transfer OpenAlex data from Airflow telescope to download-bucket\",\n )\n mock_transfer.reset_mock()\n\n # Test failed transfer\n mock_transfer.return_value = False, 4\n with self.assertRaises(AirflowException):\n release.transfer(1)", "def test_clone_deployment(self):\n pass", "def test_copy_without_name(self):\n self.source[\"name\"] = \"\"\n source_copy = copy_source(self.source, self.DATA_MODEL)\n self.assertEqual(\"pip (copy)\", source_copy[\"name\"])", "def os_copy_local_to_s3(taskout_local, taskout_s3_root):\n\n task_name = taskout_local.split(\"/\")[-1]\n if not os.path.exists(taskout_s3_root):\n os.system(\"mkdir \" + taskout_s3_root)\n\n if os.path.exists(taskout_s3_root + \"/\" + task_name):\n print(\"Task out s3 Folder already exist, Overwriting\", taskout_s3_root + \"/\" + task_name)\n\n cmd = \" cp -r {a} {b}\".format(a=taskout_local, b=taskout_s3_root)\n msg = os.system(cmd)\n print(\"Copy success\", msg)", "def check_filesystem(ssh_connection, disk_fmt, disk):\n if disk_fmt == \"squashfs\":\n return\n cmd = \"fsck.{} -n {}\".format(disk_fmt, disk)\n exit_code, _, stderr = ssh_connection.run(cmd)\n assert exit_code == 0, stderr", "def test_upload_new_vdisk_coordinated(self, mock_create_file):\n\n # Override adapter's traits to use the coordinated local API\n self.adptfx.set_traits(fx.LocalPVMTraits)\n\n # First need to load in the various test responses.\n vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt)\n vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)\n\n self.adpt.read.return_value = vg_orig\n self.adpt.update_by_path.return_value = vg_post_crt\n mock_create_file.return_value = self._fake_meta()\n\n n_vdisk, f_wrap = ts.upload_new_vdisk(\n self.adpt, self.v_uuid, self.vg_uuid, None, 'test2', 50,\n d_size=25, sha_chksum='abc123')\n\n # Ensure the create file was called\n mock_create_file.assert_called_once_with(\n self.adpt, 'test2', vf.FileType.DISK_IMAGE_COORDINATED,\n self.v_uuid, f_size=50,\n tdev_udid='0300f8d6de00004b000000014a54555cd9.3',\n sha_chksum='abc123')\n\n # Ensure cleanup was called after the upload\n self.adpt.delete.assert_called_once_with(\n 'File', service='web',\n root_id='6233b070-31cc-4b57-99bd-37f80e845de9')\n self.assertIsNone(f_wrap)\n self.assertIsNotNone(n_vdisk)\n self.assertIsInstance(n_vdisk, stor.VDisk)", "def test_co_transfer():\n test_path = tempfile.mkdtemp()\n x_train, metadata = co_transfer(test_path)\n try:\n assert x_train.shape == (7, 2)\n except:\n shutil.rmtree(test_path)\n raise()", "def copy(self, src_path: str, tgt_path: str) -> None:", "def snapshot(source, destination):\n\n processutils.execute(\n 'qemu-img convert --force-share -O qcow2 %s %s'\n % (source, destination),\n shell=True)", "def test_cpoy_clean():\n home_dir = \"/home/xyz\"\n result_root = \"tq-data01\"\n root_folder = \"sentinel1_GRD\"\n local_path = \"/home/xyz/data_pool/test_data/sentinel_GRD/77\"\n suffix_pattern = suffix_pattern = [\"/*.data\", \"/*.dim\", \"/*.SAFE\"]\n\n logger.info(\"Delete test\")\n res_path, flag = CCL.copy_clean_local(\n home_dir, result_root, root_folder, local_path, suffix_pattern\n )\n logger.info(\"%s, %s\", res_path, flag)", "def copy_drive(source, target,\n check_if_valid_and_copy=False,\n source_part_mask=\"{0}{1}\",\n target_part_mask=\"{0}{1}\",\n excluded_partitions=[],\n ignore_copy_failures=True,\n grub_partition=None,\n boot_partition=None,\n efi_partition=None,\n mount_points=None,\n rsync_args=device.DEFAULT_RSYNC_ARGS,\n part_callback=None,\n copy_callback=None,\n boot_callback=None):\n try:\n source_loop = None\n target_loop = None\n if source.endswith(\".img\"):\n source_loop = mount_loop_device(source)\n source = source_loop\n source_part_mask = \"{0}p{1}\"\n\n if target.endswith(\".img\"):\n target_loop = mount_loop_device(target)\n target = target_loop\n target_part_mask = \"{0}p{1}\"\n LOGGER.warning(\"Right now, WereSync does not properly install bootloaders on image files. You will have to handle that yourself if you want your image to be bootable.\")\n\n source_manager = device.DeviceManager(source, source_part_mask)\n target_manager = device.DeviceManager(target, target_part_mask)\n copier = device.DeviceCopier(source_manager, target_manager)\n if check_if_valid_and_copy:\n try:\n print(\"Checking partition validity.\")\n copier.partitions_valid()\n if part_callback != None:\n part_callback(1.0)\n LOGGER.info(\"Drives are compatible\")\n except CopyError as ex:\n LOGGER.warning(ex.message)\n print(\"Partitions invalid!\\nCopying drive partition table.\")\n LOGGER.warning(\"Drives are incompatible.\")\n copier.transfer_partition_table(callback=part_callback)\n else:\n if part_callback != None:\n part_callback(1.0)\n\n if mount_points == None or len(mount_points) < 2 or mount_points[0] == mount_points[1]:\n source_dir = \"/tmp/\" + str(random.randint(0, 100000))\n target_dir = \"/tmp/\" + str(random.randint(-100000, -1))\n os.makedirs(source_dir, exist_ok=True)\n os.makedirs(target_dir, exist_ok=True)\n mount_points = (source_dir, target_dir)\n\n print(\"Beginning to copy files.\")\n copier.copy_files(mount_points[0], mount_points[1], excluded_partitions, ignore_copy_failures, rsync_args, callback=copy_callback)\n print(\"Finished copying files.\")\n print(\"Making bootable\")\n try:\n copier.make_bootable(mount_points[0], mount_points[1], excluded_partitions, grub_partition, boot_partition, efi_partition, boot_callback)\n except DeviceError as ex:\n print(\"Error making drive bootable. All files should be fine.\")\n return ex\n print(\"All done, enjoy your drive!\")\n return True\n finally:\n def delete_loop(loop_name):\n subprocess.call([\"losetup\", \"-d\", loop_name])\n if source_loop != None:\n delete_loop(source_loop)\n if target_loop != None:\n delete_loop(target_loop)" ]
[ "0.64040744", "0.6082685", "0.57742435", "0.5700392", "0.5675572", "0.56447816", "0.56269157", "0.5600139", "0.55963385", "0.55918896", "0.55627227", "0.5555737", "0.55307716", "0.5498585", "0.54770666", "0.5464479", "0.54632187", "0.5451441", "0.54501855", "0.54433", "0.54425853", "0.5431706", "0.54302114", "0.54214114", "0.54102015", "0.53956825", "0.5390295", "0.53613114", "0.53607285", "0.5352489" ]
0.70389867
0
Deletes an AWS EBS Snapshot with ID `id`.
def _removeAWSSnapshot(self, snap_id: str): log.warning(f'Deleting AWS EBS Snapshot {snap_id}') ec2_client = boto3.client('ec2', region_name=self.aws_region) try: ec2_client.delete_snapshot(SnapshotId=snap_id) except Exception as error: # pylint: disable=broad-except log.error(f'Failed to delete AWS EBS Snapshot {snap_id}: {str(error)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_snapshot(self, snapshot_id):\r\n\r\n self.iscsi_svc.deleteObject(id=snapshot_id)", "def cli(env, snapshot_id):\n block_manager = SoftLayer.BlockStorageManager(env.client)\n deleted = block_manager.delete_snapshot(snapshot_id)\n\n if deleted:\n click.echo('Snapshot %s deleted' % snapshot_id)", "def delete_snapshot(SnapshotId=None):\n pass", "def delete_snapshot(self, snapshot_id):\n resp, body = self.delete(\"snapshots/%s\" % snapshot_id)\n self.validate_response(schema.delete_snapshot, resp, body)\n return rest_client.ResponseBody(resp, body)", "def delete_snapshot(self, snapshot_id):\n response = snapshot.delete_snapshot(self.url, self.verb,\n self.headers, self.version,\n snapshot_id)\n if response is not None :\n res = DeleteSnapshotResponse.DeleteSnapshotResponse()\n parseString(str(response.text), res)\n return res\n else :\n return None", "def delete_snapshot(self, name=None, snapshot_id=None):\n self.get_snapshots()\n\n _snapshot = self.get_snapshot(name=name, snapshot_id=snapshot_id)\n if not _snapshot:\n raise ValueError(\"Snapshot not found\")\n\n _url = (\n f\"{self.connector.base_url}/projects/{self.project_id}/snapshots/\"\n f\"{_snapshot['snapshot_id']}\"\n )\n\n self.connector.http_call(\"delete\", _url)\n\n self.get_snapshots()", "def delete_snapshot(self, context, snapshot_id):\n caller_context = context\n context = context.elevated()\n snapshot_ref = self.db.snapshot_get(context, snapshot_id)\n project_id = snapshot_ref['project_id']\n\n LOG.info(_(\"snapshot %s: deleting\"), snapshot_ref['id'])\n self._notify_about_snapshot_usage(\n context, snapshot_ref, \"delete.start\")\n\n try:\n LOG.debug(_(\"snapshot %s: deleting\"), snapshot_ref['id'])\n\n # Pass context so that drivers that want to use it, can,\n # but it is not a requirement for all drivers.\n snapshot_ref['context'] = caller_context\n\n self._delete_snapshot_cascaded(context, snapshot_id)\n except exception.SnapshotIsBusy:\n LOG.error(_(\"Cannot delete snapshot %s: snapshot is busy\"),\n snapshot_ref['id'])\n self.db.snapshot_update(context,\n snapshot_ref['id'],\n {'status': 'available'})\n return True\n except Exception:\n with excutils.save_and_reraise_exception():\n self.db.snapshot_update(context,\n snapshot_ref['id'],\n {'status': 'error_deleting'})\n\n # Get reservations\n try:\n if CONF.no_snapshot_gb_quota:\n reserve_opts = {'snapshots': -1}\n else:\n reserve_opts = {\n 'snapshots': -1,\n 'gigabytes': -snapshot_ref['volume_size'],\n }\n volume_ref = self.db.volume_get(context, snapshot_ref['volume_id'])\n QUOTAS.add_volume_type_opts(context,\n reserve_opts,\n volume_ref.get('volume_type_id'))\n reservations = QUOTAS.reserve(context,\n project_id=project_id,\n **reserve_opts)\n except Exception:\n reservations = None\n LOG.exception(_(\"Failed to update usages deleting snapshot\"))\n self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)\n self.db.snapshot_destroy(context, snapshot_id)\n LOG.info(_(\"snapshot %s: deleted successfully\"), snapshot_ref['id'])\n self._notify_about_snapshot_usage(context, snapshot_ref, \"delete.end\")\n\n # Commit the reservations\n if reservations:\n QUOTAS.commit(context, reservations, project_id=project_id)\n return True", "def delete(config: Config, ami: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n response = describe(config, ami, show_snapshot_id=True)\n\n ec2_client.deregister_image(ImageId=ami)\n\n ec2_client.delete_snapshot(SnapshotId=response[0][\"SnapshotId\"])", "def delete_snapshot(self, snapshot):\n aname = \"cinder_v%s.delete_snapshot\" % self.version\n with atomic.ActionTimer(self, aname):\n self._get_client().volume_snapshots.delete(snapshot)\n bench_utils.wait_for_status(\n snapshot,\n ready_statuses=[\"deleted\"],\n check_deletion=True,\n update_resource=self._update_resource,\n timeout=CONF.openstack.cinder_volume_delete_timeout,\n check_interval=(CONF.openstack\n .cinder_volume_delete_poll_interval)\n )", "def test_aws_service_api_snapshot_delete(self):\n pass", "def delete_snapshot_metadata_item(self, snapshot_id, id):\n url = \"snapshots/%s/metadata/%s\" % (snapshot_id, id)\n resp, body = self.delete(url)\n self.validate_response(\n schema.delete_snapshot_metadata_item, resp, body)\n return rest_client.ResponseBody(resp, body)", "def database_volume_snapshot_delete(volume_snapshot_uuid):\n db = database_get()\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n query.filter(model.VolumeSnapshot.uuid == volume_snapshot_uuid).delete()\n session.commit()", "def delete_volume_snapshot(volume_snapshots):\n if type(volume_snapshots) is not list:\n volumes = [volume_snapshots]\n command = 'cinder snapshot-delete %s' % \\\n \" \".join(snapshot['id'] for snapshot in volume_snapshots)\n d = Popen(command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0]", "def delete(self, arguments):\n name = arguments['<name>']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.deleteSnapshot(name) is None:\n puts_err(colored.red(\"Cannot delete name\"))\n else:\n puts_err(colored.green(\"Snapshot {} deleted\".format(name)))", "def delete_snapshot(self, snapshot):\n self._impl.delete_snapshot(snapshot)", "def test_delete_snapshot(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snapshot = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'snap10'}\n self.driver.delete_snapshot(snapshot)\n expected = {'name': 'snap10'}\n self.assertDictMatch(expected, self.deleted)", "def snap_delete(mnode, snapname):\n\n cmd = \"gluster snapshot delete %s --mode=script\" % snapname\n return g.run(mnode, cmd)", "def delete_snapshot(session, snapshot, network):\n # type: (Session, Text, Text) -> None\n url_tail = \"/{}/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, network, CoordConstsV2.RSC_SNAPSHOTS, snapshot\n )\n return _delete(session, url_tail)", "def delete_metadata(self, snapshot_id, key, **kwargs):\n return self._delete(\"/snapshots/%s/metadata/%s\" % (snapshot_id, key),\n **kwargs)", "def delete_snapshot_object(session, key, snapshot=None):\n # type: (Session, str, Optional[str]) -> None\n url_tail = \"/{}/{}/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS,\n session.network,\n CoordConstsV2.RSC_SNAPSHOTS,\n session.get_snapshot(snapshot),\n CoordConstsV2.RSC_OBJECTS,\n )\n _delete(session, url_tail, {CoordConstsV2.QP_KEY: key})", "def delete_snapshot(self, sSnapshotUuid, bChild = False):\n\t\treturn Job(SDK.PrlVm_DeleteSnapshot(self.handle, sSnapshotUuid, bChild)[0])", "def cleanup_detach_snapshot(ec2, aws_account_id, dry_run=True):\n images = ec2.images.filter(Owners=[aws_account_id])\n images = [image.id for image in images]\n for snapshot in ec2.snapshots.filter(OwnerIds=[aws_account_id]):\n r = re.match(r\".*for (ami-.*) from.*\", snapshot.description)\n if r:\n if r.groups()[0] not in images:\n logger.info(\"Deleting %s\" % snapshot.snapshot_id)\n if not dry_run:\n snapshot.delete(DryRun=dry_run)\n else:\n logger.info(\" skipped as dry_run is true\")", "def remove_snapshot(project, snap_name):\n data = {constants.PROJECT_PARAMETER: project,\n constants.IMAGE_NAME_PARAMETER: snap_name}\n res = requests.delete(_url + \"remove_image/\", data=data, auth=(\n _username, _password))\n click.echo(res.content)", "def delete(self, req, id):\n context = None\n try:\n db_api.image_destroy(context, id)\n except exception.NotFound:\n return exc.HTTPNotFound()", "def delete_snapshot(self, pool, project, share, snapshot):\n svc = self.snapshot_path % (pool, project, share, snapshot)\n ret = self.rclient.delete(svc)\n if ret.status != restclient.Status.NO_CONTENT:\n exception_msg = (_('Error deleting '\n 'snapshot: %(snapshot)s on '\n 'share: %(share)s to '\n 'pool: %(pool)s '\n 'project: %(project)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'snapshot': snapshot,\n 'share': share,\n 'pool': pool,\n 'project': project,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n LOG.error(exception_msg)\n raise exception.ShareBackendException(msg=exception_msg)", "def delete(id):\n # Get the photo requested\n photo = Photo.query.filter(Photo.id == id).one_or_none()\n\n # Did we find a photo?\n if photo is not None:\n db.session.delete(photo)\n db.session.commit()\n return make_response(\n \"Photo {id} deleted\".format(id=id), 200\n )\n\n # Otherwise, nope, didn't find that photo\n else:\n abort(\n 404,\n \"Photo not found for Id: {id}\".format(id=id),\n )", "def scrub_snapshot(snapshot_id: int):\n span = opentracing.tracer.start_span('tasks.scrub_snapshot')\n span.set_tag('snapshot_id', snapshot_id)\n _scrub_snapshot(snapshot_id, span)\n span.finish()\n # Flush the loggers here so it's not in the span\n utils.flush_logstash()", "def delete(self, cls, id):\n\n del FileStorage.__objects[key(cls, id)]", "def delete(self, id):\n try:\n self.gridfs.delete(ObjectId(id))\n except Exception, e:\n print e\n raise e", "def delete(**_):\n\n volume_id = utils.get_external_resource_id_or_raise(\n 'delete EBS volume', ctx.instance)\n\n if _delete_external_volume():\n return\n\n ctx.logger.debug('Deleting EBS volume: {0}'.format(volume_id))\n\n if not _delete_volume(volume_id):\n return ctx.operation.retry(\n message='Failed to delete volume {0}.'\n .format(volume_id))\n\n utils.unassign_runtime_property_from_resource(\n constants.ZONE, ctx.instance)\n\n utils.unassign_runtime_property_from_resource(\n constants.EXTERNAL_RESOURCE_ID, ctx.instance)\n\n ctx.logger.info(\n 'Deleted EBS volume: {0}.'\n .format(volume_id))" ]
[ "0.78719264", "0.7736575", "0.7644279", "0.75770277", "0.736929", "0.71240944", "0.69008183", "0.68352735", "0.67959744", "0.6736099", "0.6706821", "0.66655517", "0.6488515", "0.63967794", "0.6360391", "0.61587834", "0.6127205", "0.60837644", "0.6061843", "0.60463864", "0.604422", "0.6037647", "0.6031915", "0.5950206", "0.5832454", "0.58004", "0.5776624", "0.57688016", "0.5759608", "0.5733221" ]
0.78926736
0
Deletes an S3 object at `path`.
def _removeAWSS3Object(self, path: str): log.warning(f'Deleting AWS S3 object {path}') bucket, key = storage_utils.SplitStoragePath(path) s3_client = boto3.client('s3') try: s3_client.delete_object(Bucket=bucket, Key=key) except Exception as error: # pylint: disable=broad-except log.error(f'Failed to delete S3 Object {path}: {str(error)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_object(self, s3_path):\n logging.info(\"Deleting \\\"{}\\\" file from S3\".format(s3_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.ObjectSummary(bucket_name, key).delete()", "def delete_object(self, object_path):\n bucket_name, save_path = self.split_name(object_path)\n self.s3_client.delete_object(Bucket=bucket_name, Key=save_path)\n return True", "async def rm(path: str):\n _ = path.strip('/').split('/')\n bucket = _[0]\n key = '/'.join(_[1:])\n if path.endswith('/'):\n key += '/'\n async with _create_client() as client:\n try:\n await client.delete_object(Bucket=bucket, Key=key)\n logger.info(f'Delete file \"{path}\" from bucket.')\n except ClientError:\n pass", "def delete_key(bucket_name: str, path: str) -> None:\n global config\n session = config.get_session_fn()\n s3_client = session.client(\"s3\")\n try:\n s3_client.delete_object(Bucket=bucket_name, Key=path)\n except Exception as e:\n logger.warning(f\"Failed to delete {path} from s3: {e}\")", "def clean_s3(self, path=None):\n backend = BackendS3(**self.config)\n backend.recursive_delete()", "def delete_path(bucket, path):\n bucket = get_bucket(bucket)\n\n for k in bucket.list(path):\n k.delete()\n k = Key(bucket)\n k.key = path.strip(\"/\") + \"_$folder$\"\n k.delete()", "def s3_delete_data(self):\n\n self.k.delete()", "def remove_s3_object(bucket, key, access_id, access_secret):\n try:\n s3_client(access_id, access_secret).delete_object(Bucket=bucket, Key=key)\n except ClientError as error:\n LOGGER.error(error)", "def delete(self, path):\n \n try:\n self._client.remove(self._getEncodedUri(path), force=True)\n except ClientError, error:\n raise SubversionError(error)\n else:\n self._sharedState.removeFromCache(path)", "def delete_file(bucket, file_to_be_deleted):\n s3 = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)\n s3.delete_object(Bucket=bucket, Key=file_to_be_deleted)\n print(file_to_be_deleted, \" : is deleted from the bucket\")", "def deleteObject(bucket:str, object:str, region:str, version:str=None) -> None:\n client = boto3.client('s3', region_name=region)\n if version:\n client.delete_object(\n Bucket=bucket,\n Key=object,\n VersionId=version,\n )\n else:\n client.delete_object(\n Bucket=bucket,\n Key=object,\n )", "def delete(self, path):\n response = self._request(\"DELETE\", path)\n return self._handle_response(response)", "def delete_file_from_s3(bucket_name, filepath):\n if filepath not in list_files_in_s3_bucket(bucket_name):\n raise FileNotFoundError(\n f\"The filepath specified '{filepath}' does not exist in the\"\n f\" bucket '{bucket_name}'\"\n )\n s3 = boto3.client(\"s3\")\n s3.delete_object(Bucket=bucket_name, Key=filepath)", "def delete_object_from_bucket(bucket_name, file_name, object_name):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n try:\n # Delete object from bucket\n response = s3.delete_object(Bucket=bucket_name, Key=object_name)\n # List files in bucket to confirm\n describe_objects(bucket_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def delete(self, path):\n \n # prep\n delete_url = self.url(path)\n\n # request\n response = requests.delete(delete_url, auth=self.auth, headers=API.HEADERS)\n\n # test and return\n self.raise_for_status(response)", "def delete(self, key):\n return s3.Object(self.bucket.name, key).delete()", "def delete_objects(self, s3_prefix_path):\n bucket_name, prefix = S3Util.get_bucket_and_key(s3_prefix_path)\n bucket = self.s3_resource.Bucket(bucket_name)\n for obj in bucket.objects.filter(Prefix=prefix):\n obj.delete()", "def rm(self, s3uri, **kwargs):\n return self.exec_command('rm %s' % (s3uri), **kwargs)", "def delete(obj, path, ignore_missing=False):\n return glom(obj, Delete(path, ignore_missing=ignore_missing))", "def delete(self, path):\n client = self.connect(VAULT_TOKEN)\n client.delete(path)", "def delete(c, bucket=None):\n if bucket == None:\n bucket = \"dvc-\" + project_dir_name().replace(\"_\",\"-\")\n bucket_resource = boto3.resource('s3').Bucket(bucket)\n bucket_resource.objects.all().delete()\n s3 = boto3.client(\"s3\")\n s3.delete_bucket(Bucket=bucket)", "def rm(self, path):\n try:\n basedir, item = os.path.split(path)\n postdata = codecs.encode(json.dumps({ 'baseDir': basedir, 'items': [ item ] }), 'utf-8')\n self._urlopen('/api/fileops/delete', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to delete '{}'\".format(path))", "def delete_from_s3(site, bucket, prefix=None):\n if bucket is None:\n print red('Error: Bucket must be specified.')\n return\n # Setup boto\n import boto\n from boto.s3.bucket import Bucket\n from boto.s3.key import Key\n\n setup_aws_access_key(site)\n\n # Fix the prefix\n if prefix:\n prefix = prefix.lstrip('/')\n\n # Connect to S3, list the contents, and remove all of the keys\n c = boto.connect_s3()\n b = Bucket(c, bucket)\n result_set = b.list(prefix=prefix)\n result = b.delete_keys([key.name for key in result_set])", "def delete(self, path):\n full_path = self._get_full_path(path)\n if os.path.exists(full_path):\n os.remove(full_path)", "def delete(self, args):\n try:\n assert len(args) > 0\n file_name = args[0]\n try:\n if should('Delete %s?' % file_name):\n self.s3.Object(self.bucket_name, file_name).load()\n self.s3.Object(self.bucket_name, file_name).delete()\n print('File %s deleted!' % file_name)\n except ClientError:\n print('File %s not found in bucket %s' % (file_name, self.bucket_name))\n except AssertionError:\n print('I need a file name!')", "def delete(self, bucket: str, object_name: str):\n raise NotImplementedError()", "def delete_file(path):\n return files.delete_file(path)", "def delete_object(object_location: ObjectLocation) -> None:\n s3 = boto3.client(\"s3\")\n result = s3.delete_object(\n Bucket=object_location.bucket.name, Key=object_location.key\n )\n log.debug(f\"Result of delete of {object_location}: {result}\")", "def s3_delete_local(self, outpath, from_file, bucket, prefix_no_slash):\n\n objecta='{}/{}'.format(prefix_no_slash,outpath)\n s3 = boto3.client('s3')\n with open(from_file, \"rb\") as f:\n s3.upload_fileobj(f, bucket, objecta)\n os.remove(from_file)", "def file_delete(self, path):\n params = {'root': self.session.root, 'path': format_path(path)}\n\n url, params, headers = self.request(\"/fileops/delete\", params)\n\n return self.rest_client.POST(url, params, headers)" ]
[ "0.8589754", "0.81548566", "0.7849945", "0.7838405", "0.7520646", "0.7260223", "0.7242306", "0.7200459", "0.709181", "0.7063145", "0.7008641", "0.6976428", "0.6947444", "0.6919413", "0.69042146", "0.6903383", "0.69029564", "0.6900946", "0.6897261", "0.68503684", "0.685027", "0.683131", "0.6768136", "0.67548627", "0.6705938", "0.66961586", "0.66715044", "0.6644316", "0.66326874", "0.6601868" ]
0.85541904
1
Delete a GCS object at `path`.
def _removeGCSObject(self, path: str): log.warning(f'Deleting GCS object {path}') try: storage.GoogleCloudStorage(self.gcp_project_id).DeleteObject(path) except Exception as error: # pylint: disable=broad-except log.error(f'Failed to delete GCS Object {path}: {str(error)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def rm(path: str):\n _ = path.strip('/').split('/')\n bucket = _[0]\n key = '/'.join(_[1:])\n if path.endswith('/'):\n key += '/'\n async with _create_client() as client:\n try:\n await client.delete_object(Bucket=bucket, Key=key)\n logger.info(f'Delete file \"{path}\" from bucket.')\n except ClientError:\n pass", "def delete_object(self, object_path):\n bucket_name, save_path = self.split_name(object_path)\n self.s3_client.delete_object(Bucket=bucket_name, Key=save_path)\n return True", "def delete(obj, path, ignore_missing=False):\n return glom(obj, Delete(path, ignore_missing=ignore_missing))", "def delete_object(self, s3_path):\n logging.info(\"Deleting \\\"{}\\\" file from S3\".format(s3_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.ObjectSummary(bucket_name, key).delete()", "def delete(self, path):\n \n # prep\n delete_url = self.url(path)\n\n # request\n response = requests.delete(delete_url, auth=self.auth, headers=API.HEADERS)\n\n # test and return\n self.raise_for_status(response)", "def delete_path(bucket, path):\n bucket = get_bucket(bucket)\n\n for k in bucket.list(path):\n k.delete()\n k = Key(bucket)\n k.key = path.strip(\"/\") + \"_$folder$\"\n k.delete()", "def delete(self, path):\n \n try:\n self._client.remove(self._getEncodedUri(path), force=True)\n except ClientError, error:\n raise SubversionError(error)\n else:\n self._sharedState.removeFromCache(path)", "def delete_object(object_id):\n log.warning(f\"Deleting Google Drive object with id '{object_id}'...\")\n _drive_service.files().delete(fileId=object_id).execute()", "def delete(self, path):\n response = self._request(\"DELETE\", path)\n return self._handle_response(response)", "def delete(path: str):\n token = get_token()\n headers = {\n \"Authorization\": f\"Bearer {token}\"\n }\n return requests.delete(get_base_url() + path, headers=headers)", "def _removeAWSS3Object(self, path: str):\n log.warning(f'Deleting AWS S3 object {path}')\n bucket, key = storage_utils.SplitStoragePath(path)\n s3_client = boto3.client('s3')\n try:\n s3_client.delete_object(Bucket=bucket, Key=key)\n except Exception as error: # pylint: disable=broad-except\n log.error(f'Failed to delete S3 Object {path}: {str(error)}')", "def delete(self, path):\n full_path = self._get_full_path(path)\n if os.path.exists(full_path):\n os.remove(full_path)", "def delete_file(path):\n return files.delete_file(path)", "def delete(self, path):\n client = self.connect(VAULT_TOKEN)\n client.delete(path)", "def delete_key(bucket_name: str, path: str) -> None:\n global config\n session = config.get_session_fn()\n s3_client = session.client(\"s3\")\n try:\n s3_client.delete_object(Bucket=bucket_name, Key=path)\n except Exception as e:\n logger.warning(f\"Failed to delete {path} from s3: {e}\")", "def delete_file(self, path):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))", "def delete(path, recursive=False):\n fs.delete(path, recursive)", "def file_delete(self, path):\n params = {'root': self.session.root, 'path': format_path(path)}\n\n url, params, headers = self.request(\"/fileops/delete\", params)\n\n return self.rest_client.POST(url, params, headers)", "def remove(path: str):\n _fs().remove(path)", "def rm(self, path):\n try:\n basedir, item = os.path.split(path)\n postdata = codecs.encode(json.dumps({ 'baseDir': basedir, 'items': [ item ] }), 'utf-8')\n self._urlopen('/api/fileops/delete', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to delete '{}'\".format(path))", "def api_delete(self, path):\n return self._api_request(path, 'DELETE')", "def delete(self, bucket: str, object_name: str):\n raise NotImplementedError()", "def rm(path):\n abs_path = navigate.get_abs_path(path)\n parent, name = navigate.split_path(abs_path)\n access_token = db.get_access_to_file(parent, name)\n if access_token is not None:\n dbox_path = '/' + name\n client = dropbox.client.DropboxClient(access_token)\n client.file_delete(dbox_path)\n db.remove_file(access_token, parent, name)", "def delete(self, c_path):\n raise NotImplementedError", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete_object(self, object):\n object.delete()", "def delete_file(self, path):\n if not path_exists(path, self._store_folder):\n raise NotFoundException(\"\")\n os.remove(path)", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def rm(self, path: str) -> None:\n self.fs.rm(self._full_path(path))", "def gcloud_delete_file(file_name, gcloud_bucket_name):\n bucket = gce_storage_client.get_bucket(gcloud_bucket_name)\n blob = bucket.blob(file_name)\n\n blob.delete()" ]
[ "0.78189796", "0.7468063", "0.7263031", "0.7235776", "0.7158689", "0.7100991", "0.7084128", "0.7063686", "0.7039927", "0.70041424", "0.69738555", "0.6940997", "0.69260657", "0.68535155", "0.6739612", "0.6701492", "0.66956896", "0.6692519", "0.66546154", "0.66297257", "0.65918994", "0.6542909", "0.6517915", "0.6500579", "0.64786685", "0.6469854", "0.64126694", "0.63925636", "0.63665813", "0.63327277" ]
0.8232839
0
Remove GCE Image with name `name`.
def _removeGCEImage(self, name: str): log.warning(f'Deleting GCE Image {name}') try: compute.GoogleComputeImage( self.gcp_project_id, self.gcp_zone, name ).Delete() except Exception as error: # pylint: disable=broad-except log.error(f'Failed to delete GCE Image {name}: {str(error)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_image(Name=None):\n pass", "def del_image(self, name):\r\n if self.images is None or name not in self.images:\r\n return\r\n l = self.images\r\n self.images = None\r\n l.setdefault('/empties/', [])\r\n # push the number on the empties list\r\n l['/empties/'].append(l[name])\r\n del l[name]\r\n self.images = l", "def delImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n img.delete()\n return", "def _removeGCEDisk(self, name: str):\n log.warning(f'Deleting GCE Disk {name}')\n try:\n gce_disk_client = common.GoogleCloudComputeClient(\n project_id=self.gcp_project_id).GceApi().disks()\n gce_disk_client.delete(\n project=self.gcp_project_id,\n zone=self.gcp_zone,\n disk=name\n ).execute()\n except Exception as error: # pylint: disable=broad-except\n log.error(f'Failed to delete GCE Disk {name}: {str(error)}')", "def remove(name):", "def delete_image_builder(Name=None):\n pass", "def delete_image(name: str, remote: bool = True):\n client = docker.from_env()\n\n # List all images including un-tagged images and delete all of them.\n images = client.images.list(name)\n # Sort list to delete children images first.\n images.sort(key=lambda image: image.attrs['Created'], reverse=True)\n for image in images:\n client.images.remove(image.id, force=True)\n if remote:\n # NOTE: RepoDigest != id. Use repo digests when deleting remote images.\n remote_image = image.attrs['RepoDigests'][0]\n subprocess.check_output([\n 'gcloud', 'container', 'images', 'delete', remote_image, '--quiet',\n '--force-delete-tags'\n ])\n client.close()", "def deleteReferenceImage(self, name):\n blobName = self._getReferenceImageBlobName(name)\n self.productSearch.productClient.delete_reference_image(name=name)\n self.productSearch.bucket.blob(blobName).delete()", "def POST_delete_sr_img(self, res, name):\r\n # just in case we need to kill this feature from XSS\r\n if g.css_killswitch:\r\n return self.abort(403,'forbidden')\r\n c.site.del_image(name)\r\n c.site._commit()\r\n # hide the image and it's container\r\n res._hide(\"img-li_%s\" % name)\r\n # reset the status\r\n res._update('img-status', innerHTML = _(\"Deleted\"))", "def remove_image(self, imagename, del_img=False):\n os.system('rm -r {}.model'.format(imagename))\n os.system('rm -r {}.flux'.format(imagename))\n os.system('rm -r {}.psf'.format(imagename))\n os.system('rm -r {}.residual'.format(imagename))\n if del_img:\n os.system('rm -r {}.image'.format(imagename))", "def rm(self, name: str) -> None:\n path = self.get_path(name)\n if os.path.exists(path):\n os.remove(path)", "def remove_snapshot(project, snap_name):\n data = {constants.PROJECT_PARAMETER: project,\n constants.IMAGE_NAME_PARAMETER: snap_name}\n res = requests.delete(_url + \"remove_image/\", data=data, auth=(\n _username, _password))\n click.echo(res.content)", "def delete_from_s3(image_name):\n conn = S3Connection(aws_access_key_id, aws_secret_access_key)\n bucket = Bucket(conn, \"shopifyimagerepository\")\n k = Key(bucket)\n k.key = image_name\n bucket.delete_key(k)", "def delete(self, name):\n err = C.git_remote_delete(self._repo._repo, to_bytes(name))\n check_error(err)", "def remove_resource(self, name):\n self._NDL_API('removeresource', { 'vm': name, }, None)", "def delete_file(name):\n subprocess.check_output(cmd_preamble + [\"rm\", name])", "def delAvatarImage(self, imgName = None): \n if imgName:\n self.window.remove_child(self.images[imgName])\n self.images[imgName].destroy()\n self.images[imgName] = None\n else:\n for key in self.images:\n if self.images[key]:\n self.window.remove_child(self.images[key])\n self.images[key].destroy()\n self.images[key] = None", "def remove(self, name):\n raise NotImplementedError", "def delete_file(self, name):\n del self.files[name]", "def delete_camera(self, name: str) -> None:\n self._cur.execute(sql_delete_camera, [name])\n self._conn.commit()", "def delete(self, name):\n path = self.directory / f\"{name}.yaml\"\n if path.exists():\n path.unlink()", "def remove_asset(self, name):\n if name in self.assets:\n del self.assets[name]", "def scrub_image(name, ec2, s3):\n response = ec2.describe_images(\n Owners=['self'],\n Filters=[{'Name': 'tag:Name', 'Values': [name]}]\n )\n # find the date of the newest image\n image_dates = []\n for image in response['Images']:\n logger.debug(\"Image creation date is \" + dateutil.parser.parse(image['CreationDate']).isoformat())\n image_dates.append(dateutil.parser.parse(image['CreationDate']))\n current_image_date = max(image_dates)\n logger.info(\"Newest image date is \" + current_image_date.isoformat())\n\n # purge all images older than the current one\n for image in response['Images']:\n image_date = dateutil.parser.parse(image['CreationDate'])\n if image_date == current_image_date:\n logger.info(\"Skipping '\" + image['Name'] + \"' as it's the newest\")\n continue\n else:\n logger.info(\"Working on \" + image['Name'] + \" as \" + image['CreationDate'] + \" is not == \" +\n current_image_date.isoformat())\n try:\n ec2.deregister_image(ImageId=image['ImageId'])\n logger.info(\"De-registered image: \" + image['ImageId'])\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n\n # identify image objects to purge\n location = image['ImageLocation']\n if location.endswith('.manifest.xml'):\n location = location[:-13]\n logger.debug(\"image location: \" + location)\n objects = s3.list_objects_v2(\n Bucket=location.split('/', 1)[0],\n Prefix=location.split('/', 1)[1]\n )\n delete_list = []\n if 'Contents' not in objects:\n logger.warn(\"Found no S3 keys for image: \" + image['Name'])\n continue\n for s3_key in objects['Contents']:\n delete_list.append({\"Key\": s3_key['Key']})\n\n # delete the identified files\n response = s3.delete_objects(\n Bucket=location.split('/', 1)[0],\n Delete={\n 'Objects': delete_list\n }\n )\n if 'Errors' in response:\n logger.error(\"Deletion Errors:\" + json.dumps(response['Errors']))\n if 'Deleted' in response:\n logger.info(\"Deletion objects:\" + json.dumps(response['Deleted']))", "def remove(self, name: str) -> None:\n del self.components[name]", "def remove_image(project, img):\n data = {constants.PROJECT_PARAMETER: project,\n constants.IMAGE_NAME_PARAMETER: img}\n res = requests.delete(_url + \"remove_image/\", data=data, auth=(\n _username, _password))\n click.echo(res.content)", "def delete(self, name):\n self.backend.delete(name)", "def bdev_uring_delete(client, name):\n params = {'name': name}\n return client.call('bdev_uring_delete', params)", "def clean_node(\n self,\n name,\n ):\n # Gets the node IP address.\n ip = self.get_node_ip(name)\n\n # Deletes the images.\n docker_utils.clean(\n hostname=ip,\n ssh_port=SSH_PORT,\n ssh_username=self.get_ssh_username(name),\n ssh_private_key_file=self.get_ssh_private_key_file(name),\n executor=name,\n logger=self._logger,\n )", "def delete(self, name):\n params = {\n 'method': 'flickr.photos.delete',\n 'photo_id': name,\n }\n response = self.oauth_session.post(self.API_ENDPOINT, params=params)\n json_response = response.json()\n if json_response['stat'] == 'fail' and json_response['code'] != 1:\n raise FlickrError(json_response['message'])", "def remove(self, name):\n path = '%s/%s' % (self.path, name)\n lock = '%s%s' % (path, LOCKED_SUFFIX)\n os.unlink(path)\n os.unlink(lock)" ]
[ "0.77334917", "0.75917494", "0.7531193", "0.722961", "0.7067009", "0.69292396", "0.68562293", "0.6789977", "0.67663544", "0.6758139", "0.6702444", "0.66960543", "0.66144276", "0.6586997", "0.6548269", "0.6530188", "0.64906603", "0.6479747", "0.6467619", "0.6458344", "0.6456988", "0.6448828", "0.6439579", "0.6423764", "0.64082354", "0.64067894", "0.63955045", "0.6379018", "0.63686293", "0.63598937" ]
0.8452742
0
Remove the disk with name `name`.
def _removeGCEDisk(self, name: str): log.warning(f'Deleting GCE Disk {name}') try: gce_disk_client = common.GoogleCloudComputeClient( project_id=self.gcp_project_id).GceApi().disks() gce_disk_client.delete( project=self.gcp_project_id, zone=self.gcp_zone, disk=name ).execute() except Exception as error: # pylint: disable=broad-except log.error(f'Failed to delete GCE Disk {name}: {str(error)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rm(self, name: str) -> None:\n path = self.get_path(name)\n if os.path.exists(path):\n os.remove(path)", "def vm_diskdelete(args):\n name = args.name\n diskname = args.diskname\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if diskname is None:\n common.pprint(\"Missing diskname. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Deleting disk %s\" % diskname)\n k.delete_disk(name=name, diskname=diskname, pool=pool)\n return", "def delete(self, name):\n result = self.cm.find_name(name)\n path = result[0]['path']\n delete_path = Path(f'{path}/{name}')\n try:\n os.system(f\"rmdir {delete_path}\")\n result[0]['State'] = 'deleted'\n result = self.update_dict(result)\n except:\n Console.error(\"volume is either not empty or not exist\")\n return result", "def delete_file(self, name):\n del self.files[name]", "def delete(self, name):\n with self._lock:\n self._delete(name)", "def remove(self, name):\n path = '%s/%s' % (self.path, name)\n lock = '%s%s' % (path, LOCKED_SUFFIX)\n os.unlink(path)\n os.unlink(lock)", "def delete(self, name):\n path = self.directory / f\"{name}.yaml\"\n if path.exists():\n path.unlink()", "def remove_resource(self, name):\n self._NDL_API('removeresource', { 'vm': name, }, None)", "def delete(self, name):\n self.backend.delete(name)", "def delete_file(name):\n subprocess.check_output(cmd_preamble + [\"rm\", name])", "def remove(name):", "def delete(vmname, deldisk=True):\n\n dom = _conn.lookupByName(vmname)\n if dom.isActive():\n dom.destroy()\n infokeeper.update_status_vm(vmname, Instance.STATUS_POWER_OFF)\n dom.undefine()\n infokeeper.delete_vm(vmname)\n if deldisk:\n os.remove(os.path.join(base_disk_path, dom.name() + '.img'))\n return 'VM %s deleted' % vmname", "def delete(self, name):\n\n pass", "def delete(self, name):\n assert name, \"Must input a valid dataset name.\"\n self.manager.delete_data(name)", "def delete(self, name):\n raise NotImplementedError(\n \"subclasses of Storage must provide a delete() method\"\n )", "def delete(self, name):\n try:\n self.container.delete_object(name)\n except ResponseError as e:\n reraise(e)", "def delete(self, name):\n self.connect()\n self._write('DEL %s\\r\\n' % name)\n return self._get_numeric_response()", "def remove(self, name):\n raise NotImplementedError", "def bdev_iscsi_delete(client, name):\n params = {'name': name}\n return client.call('bdev_iscsi_delete', params)", "def delete(self, name=None):\n raise NotImplementedError", "def delete_entry(self, name):\n try:\n self.__datacatalog.delete_entry(name=name)\n self.__log_entry_operation('deleted', entry_name=name)\n except Exception as e:\n logging.info(\n 'An exception ocurred while attempting to'\n ' delete Entry: %s', name)\n logging.debug(str(e))", "def bdev_xnvme_delete(client, name):\n params = {'name': name}\n return client.call('bdev_xnvme_delete', params)", "def bdev_daos_delete(client, name):\n params = {'name': name}\n return client.call('bdev_daos_delete', params)", "def remove_dataset(cls, name):\n gxapi_cy.WrapSTORAGEPROJECT._remove_dataset(GXContext._get_tls_geo(), name.encode())", "def delete(self, name):\n err = C.git_remote_delete(self._repo._repo, to_bytes(name))\n check_error(err)", "def delete(self, *, name: types.TSeedName) -> None:\n if not (self._base_path / self._get_file_name(name)).exists():\n raise exceptions.SeedNotFoundError(f\"could not find seed {name}\")\n (self._base_path / self._get_file_name(name)).unlink()", "def rm(path):\n abs_path = navigate.get_abs_path(path)\n parent, name = navigate.split_path(abs_path)\n access_token = db.get_access_to_file(parent, name)\n if access_token is not None:\n dbox_path = '/' + name\n client = dropbox.client.DropboxClient(access_token)\n client.file_delete(dbox_path)\n db.remove_file(access_token, parent, name)", "def bdev_rbd_delete(client, name):\n params = {'name': name}\n return client.call('bdev_rbd_delete', params)", "def delete(self, name):\n if name in self._dict:\n self._dict.pop(name)\n self.save()\n else:\n raise PoseError(\"%s is not in database\" % _name)", "def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):\n raise NotImplementedError()" ]
[ "0.75362515", "0.71105665", "0.70604837", "0.69789946", "0.69264275", "0.69136286", "0.6910295", "0.68948543", "0.6887401", "0.684326", "0.67783725", "0.6729041", "0.67081046", "0.66445243", "0.6617731", "0.65383613", "0.6531545", "0.65064126", "0.64904535", "0.6488289", "0.6474192", "0.64426523", "0.643165", "0.6422895", "0.641646", "0.6347586", "0.63289493", "0.63259107", "0.6314989", "0.63034475" ]
0.76121897
0
Add blogpost to manuscript
def add_blogpost(manuscript, subject, url): line_number = 0 with open(manuscript, "r") as file: lines = file.readlines() for line in lines: if ("## ブロマガ全集" in line): lines.insert(line_number + 2, f"- [{subject}]({url})\n") with open(manuscript, "w") as file: file.writelines(lines) print("Add:", subject) return 0 line_number += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(blog, date):\n template = front_matter({\n \"title\": blog,\n \"date\": get_date(\"%Y-%m-%d %H:%M:%S %z\"),\n })\n new_blog(date + '-' + blog + '.markdown', template)", "def add_blog(self, text):\n self.blog.add_blog(text)\n self.refresh()", "def add_post(content):\n db = psycopg2.connect(\"dbname=forum\")\n c = db.cursor()\n content = bleach.clean(content)\n c.execute(\"insert into posts values (%s)\", (content,))\n db.commit()\n db.close()\n # POSTS.append((content, datetime.datetime.now()))", "def new_blog(blog, template):\n path = '/'.join([POSTS, blog])\n with open(path, 'w') as blg:\n blg.write(template)", "def add_meta(self, post, *args, **kwargs):\n\t\tsuper(Command, self).add_meta(post, *args, **kwargs)\n\t\tpost.gen_description = False\n\t\tpost.description = description_from_content(post)\n\t\tpost.save()", "def add_post(content):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n cleaned = bleach.clean(content, strip = True)\n c.execute(\"insert into posts values(%s)\", (cleaned,))\n db.commit()\n db.close()", "def addContent(text):", "def newPost(self, postLink, zserverBlogEntry): #$NON-NLS-1$\r\n atomEntry = self.createNewBlogEntry()\r\n self._populateAtomEntry(atomEntry, zserverBlogEntry)\r\n # publish entry\r\n atomRespEntry = self.createAtomEntry(postLink, atomEntry)\r\n return atomRespEntry", "def addPublication():\n preloaded = [\n {\"description\": \"bortaS <b>bIr</b> jablu'DI' reH QaQqu' nay'!\"},\n {\"language\": \"en\"},\n {\"country\": \"usa\"}\n ]\n return render_template(\"addPublication.html\", msg=\"\", preloaded=preloaded)", "def main():\n\n # Pick files\n token_file = os.path.abspath(TOKEN_FILE)\n creds_file = os.path.abspath(CREDS_FILE)\n manuscript_file = os.path.abspath(MANUSCRIPT_FILE)\n\n # Authorize Gmail API\n creds = gmail.authorize(SCOPES, token_file, creds_file)\n\n # Build Gmail API\n service = gmail.build_service(creds)\n\n # Get messages list\n msgs = gmail.get_messages(service,\n userid=\"me\",\n query=\"from:鈴木祐\",\n )\n\n # Add blogpost link\n for msg in reversed(msgs):\n msg_ = gmail.get_message(service, msg)\n blogpost_subject = get_blogpost_subject(msg_)\n blogpost_url = get_blogpost_url(msg_)\n if not is_included(manuscript_file, blogpost_url):\n add_blogpost(manuscript_file, blogpost_subject, blogpost_url)\n print(\"FINISHED: Update paleo channel blogposts.\")", "def new_post(self, content):\n return self.proxy.wp.newPost(self.blog_id, self.username, self.password,\n content)", "def add_post(self, post: Post) -> None:\n self.post_process.append(post)", "def newPost(self, useRawHTML):\n print\n content, publish = self._fillPost(useRawHTML)\n\n # Upload to server\n try :\n postid = self.server.metaWeblog.newPost(\n self.blogid, self.username, self.password,\n content, publish\n )\n except xmlrpclib.Fault as fault:\n display_XMLRPC_errors(\"post the new entry\", fault)\n import pdb\n pdb.set_trace()\n else :\n self._setCategorie(postid)\n print \"New post created with ID =\", postid", "def add_post(request, topic_id, post_id = False):\n\t\n\ttopic = Topic.objects.values('is_locked').get(id=topic_id)\n\tif topic['is_locked']:\n\t\treturn render_to_response('pages/bug.html', {'bug': _('Topic is closed')}, context_instance=RequestContext(request))\n\n\t# check who made the last post.\n\tlastpost = Post.objects.order_by('-post_date').filter(post_topic=topic_id)[:1]\n\tis_staff = request.user.is_staff\n\t# if the last poster is the current one (login) and he isn't staff then we don't let him post after his post\n\tif str(lastpost[0].post_author) == str(request.user) and not is_staff:\n\t\treturn render_to_response('pages/bug.html', {'bug': _('You can\\'t post after your post')}, context_instance=RequestContext(request))\n\t\n\tlastpost = Post.objects.filter(post_topic=topic_id).order_by('-id')[:10]\n\tif request.POST:\n\t\tpage_data = request.POST.copy()\n\t\tpage_data['post_author'] = str(request.user)\n\t\ttags = findall( r'(?xs)\\[code\\](.*?)\\[/code\\]''', page_data['post_text'])\n\t\tfor i in tags:\n\t\t\tpage_data['post_text'] = page_data['post_text'].replace(u'[code]'+i+u'[/code]', u'[code]'+base64.encodestring(i)+u'[/code]')\n\t\tpage_data['post_text'] = html2safehtml(page_data['post_text'] ,valid_tags=settings.VALID_TAGS)\n\t\ttags = findall( r'(?xs)\\[code\\](.*?)\\[/code\\]''', page_data['post_text'])\n\t\tfor i in tags:\n\t\t\tpage_data['post_text'] = page_data['post_text'].replace(u'[code]'+i+u'[/code]', u'[code]'+base64.decodestring(i)+u'[/code]')\n\t\t\n\t\tpage_data['post_ip'] = request.META['REMOTE_ADDR']\n\t\tpage_data['post_topic'] = topic_id\n\t\tpage_data['post_date'] = datetime.now()\n\t\tform = AddPostForm(page_data)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\n\t\t\ttopic = Topic.objects.get(id=topic_id)\n\t\t\tposts = Post.objects.filter(post_topic=topic_id).count()\n\t\t\t\n\t\t\tpmax = posts/10\n\t\t\tpmaxten = posts%10\n\t\t\tif pmaxten != 0:\n\t\t\t\tpmax = pmax+1\n\t\t\t\ttopic.topic_last_pagination_page = pmax\n\t\t\telif pmax > 0:\n\t\t\t\ttopic.topic_last_pagination_page = pmax\n\t\t\telse:\n\t\t\t\tpmax = 1\n\t\t\t\ttopic.topic_last_pagination_page = 1\n\t\t\ttopic.topic_posts = posts\n\t\t\ttopic.topic_lastpost = str(request.user)+'<br />' + str(datetime.today())[:-10]\n\t\t\ttopic.save()\n\t\t\t\n\t\t\tforum = Forum.objects.get(id=topic.topic_forum.id)\n\t\t\tforum.forum_posts = forum.forum_posts +1\n\t\t\t\n\t\t\tforum.forum_lastpost = str(request.user)+' (' + str(datetime.today())[:-10] + ')<br /><a href=\"/forum/topic/' + str(pmax) + '/' + str(topic.id) + '/\">' + topic.topic_name + '</a>'\n\t\t\tforum.save()\n\t\t\t\n\t\t\tmail_admins('Post Dodany', \"Dodano Post: http://www.\" + settings.SITE_KEY + \"/forum/topic/\" + str(pmax) + \"/\" + topic_id +\"/\", fail_silently=True)\n\t\t\treturn HttpResponseRedirect(\"/forum/topic/\" + str(pmax) + \"/\" + topic_id +\"/\")\n\t\telse:\n\t\t\treturn render_to_response(\n\t\t\t\t'myghtyboard/add_post.html',\n\t\t\t\t{'lastpost': lastpost, 'perms': list_perms(request), 'form':form},\n\t\t\t\tcontext_instance=RequestContext(request))\n\telse:\n\t\tif post_id:\n\t\t\tquote = Post.objects.get(id=post_id)\n\t\t\tquote_text = '<blockquote><b>' + quote.post_author + _(' wrote') + ':</b><br /><cite>' + quote.post_text + '</cite></blockquote>\\n\\n'\n\t\telse:\n\t\t\tquote_text = ''\n\treturn render_to_response(\n\t\t'myghtyboard/add_post.html',\n\t\t{'quote_text': quote_text, 'lastpost': lastpost, 'perms': list_perms(request)},\n\t\tcontext_instance=RequestContext(request))", "def run_get_post(m):\n\n doc = get_doc(m)\n assert doc is not None\n\n wp = get_wp(m)\n\n post = find_post(wp, doc.identifier)\n\n if post:\n post.content = \"…content elided…\"\n from pprint import pprint\n pprint(post.struct)\n return\n else:\n warn(f\"Didn't find post for identifier {doc.identifier}\")\n return", "def massage_addcontent(self) -> str:\n pass", "def _fillPost(self, useRawHTML, old_data=None):\n # Initialize empty dictionnary ct (aka content)\n # to be sent through self.server.metaWeblog.newPost()\n ct = {}\n\n # if no old_data, create a fake one\n if old_data == None:\n old_data = { 'title': None\n , 'mt_keywords': None\n , 'formatted_text': BALISE\n , 'mt_excerpt': None\n , 'description': None}\n\n def updateField(prompt, string=None):\n if (string == None) or (string == \"\") :\n return raw_input(prompt)\n else :\n r = raw_input(prompt + \" [default:\" + string + \"]\\n\")\n if r == \"\" :\n return string\n else :\n return r\n\n # Now get information\n ct['title'] = updateField( \"Title?\\n\", old_data['title'])\n ct['mt_keywords'] = updateField(\n \"Tags? (comma separated lists)?\\n\",\n old_data['mt_keywords'])\n\n # Categories are not included in the struct \"ct\"\n # see _setCategorie()\n\n # Get excerpt/content\n # Method0: external XML file\n if useRawHTML:\n with open( useRawHTML, 'rb') as f:\n doc = xml.dom.minidom.parse(f)\n # Parse our XHTML file\n text = doc.getElementsByTagName(\"body\")[0].toxml()\n #text = text.decode() # convert bytes to string\n text = text.replace(\"<body>\", \"\").replace(\"</body>\", \"\")\n ct['mt_excerpt'], ct['description'] = split_excerpt( text)\n\n # Method1: custom editor\n elif self.editor :\n prev_data = old_data['formatted_text']\n data = self._externalEditor( wrap_with_template(prev_data) )\n ct['mt_excerpt'], ct['description'] = split_excerpt( data)\n\n # Method2: input\n else :\n ct['mt_excerpt'] = updateField(\n \"Excerpt? (beware of xHTML tags !)\\n\",\n old_data['mt_excerpt'])\n ct['description'] = updateField(\n \"Main content? (beware of xHTML tags !)\\n\",\n old_data['description'])\n\n # Process the rest of the attributes (comments, pings, ...)\n def set_boolean( prompt, default):\n if default == True:\n return raw_input(prompt + \"[Y|n]\") != \"n\"\n else:\n return raw_input(prompt + \"[y|N]\") != \"y\"\n\n ct['mt_allow_comments'] = set_boolean( \"Allow comments ?\"\n , self.auto_comments)\n ct['mt_allow_pings'] = set_boolean( \"Allow pings ?\"\n , self.auto_pings)\n publish = set_boolean( \"Publish ?\" , self.auto_publish)\n\n return ct, publish", "def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return", "def markdown_post(post):\n post['entry'] = markdown(post['entry'].replace(\"\\n\",\" \\n\"), output=\"html5\")\n return post", "def addPost(self,text,id,url,date):\n self.topComments.append(Post(text,id,url,date))\n return None", "def insert_new_post(post_arg_set):\n api, post_data, acct_data, page_id, config = post_arg_set\n\n try:\n post_id = post_data['id'] if post_data.has_key('id') else None\n\n except Exception as e:\n log.error( e )\n\n else:\n\n # parse date\n if post_data.has_key('created_time') and post_data['created_time'] is not None: \n dt = datetime.strptime(post_data['created_time'], FB_DATE_FORMAT)\n date_time = tz_adj(dt, config)\n time_bucket = round_datetime(date_time, config)\n raw_timestamp = int(date_time.strftime(\"%s\"))\n \n else:\n time_bucket = None\n raw_timestamp = None\n \n # extract message so we can find links within the msg if not in url\n article_urls = [get_fb_link(post_data, config, unshorten=True)]\n message = post_data['message'].encode('utf-8') if post_data.has_key('message') else None\n message_urls = get_message_urls(article_urls, message, config)\n\n # detect article links, unshorten and parse\n article_urls = [\n parse_url(unshorten_link(url, config)) \\\n for url in article_urls + message_urls\n if url is not None\n ]\n\n article_urls = [url for url in article_urls if is_article(url, config)]\n\n if article_urls:\n for article_url in set(article_urls):\n\n # sluggify url\n article_slug = sluggify(article_url)\n\n # format data\n post_value = {\n 'article_slug': article_slug,\n 'article_url': article_url,\n 'time_bucket': time_bucket,\n 'fb_post_created': raw_timestamp,\n 'raw_timestamp': raw_timestamp,\n 'fb_raw_link' : get_fb_link(post_data, config=config),\n 'fb_page_id': page_id,\n 'fb_post_id': post_id,\n 'fb_page_likes': acct_data['likes'] if acct_data.has_key('likes') else None,\n 'fb_page_talking_about': acct_data['talking_about_count'] if acct_data.has_key('talking_about_count') else None,\n 'fb_type': post_data['type'] if post_data.has_key('type') else None,\n 'fb_status_type': post_data['status_type'] if post_data.has_key('status_type') else None,\n 'fb_message': message\n }\n \n # always insert insights data\n if is_insights(page_id, config):\n \n log.info( \"INSIGHTS\\tAdding data from %s re: %s\" % (page_id, article_slug) )\n\n # fetch data\n insights_value = get_insights_data(api, page_id, post_id)\n\n # create datasource name\n data_source = \"facebook_insights_%s\" % page_id \n \n # upsert url\n upsert_url(article_url, article_slug, data_source, config)\n\n # insert id\n db.sadd('facebook_post_ids', post_id)\n\n # format time bucket\n current_time_bucket = gen_time_bucket(config)\n insights_value['time_bucket'] = current_time_bucket\n post_value.pop('time_bucket', None)\n \n value = json.dumps({\n data_source : dict(post_value.items() + insights_value.items())\n })\n\n # upload data to redis\n db.zadd(article_slug, current_time_bucket, value) \n \n # only insert new posts\n if not db.sismember('facebook_post_ids', post_id):\n \n log.info( \"FACEBOOK\\tNew post %s\\t%s\" % (post_id, article_url) )\n \n # insert id\n db.sadd('facebook_post_ids', post_id) \n \n # upsert url\n data_source = \"facebook_%s\" % page_id\n upsert_url(article_url, article_slug, data_source, config)\n\n value = json.dumps( {data_source : post_value} )\n\n\n # upload data to redis\n db.zadd(article_slug, time_bucket, value)", "def _add_article(self, link, index=None):\n if self.verbose:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n link_url = self.base_url + link\n pdf_filepath = (\n self.output_raw_dir\n + \"/FOMC_PresConfScript_\"\n + self._date_from_link(link)\n + \".pdf\"\n )\n\n if not os.path.exists(pdf_filepath) or self.force_download:\n # Scripts are provided only in pdf. Save the pdf and pass the content\n res = requests.get(link_url)\n\n with open(pdf_filepath, \"wb\") as f:\n f.write(res.content)\n else:\n if self.verbose:\n print(\"File already exists: \", pdf_filepath)\n\n # Extract text from the pdf\n pdf_file_parsed = \"\" # new line\n with pdfplumber.open(pdf_filepath) as pdf:\n for page in pdf.pages:\n pg_width = page.width\n pg_height = page.height\n pg_bbox = (\n self.crop_coords[0] * float(pg_width),\n self.crop_coords[1] * float(pg_height),\n self.crop_coords[2] * float(pg_width),\n self.crop_coords[3] * float(pg_height),\n )\n page_crop = page.crop(bbox=pg_bbox)\n text = page_crop.extract_text()\n pdf_file_parsed = pdf_file_parsed + \"\\n\" + text\n paragraphs = re.sub(\"(\\n)(\\n)+\", \"\\n\", pdf_file_parsed.strip())\n paragraphs = paragraphs.split(\"\\n\")\n\n section = -1\n paragraph_sections = []\n for paragraph in paragraphs:\n if not re.search(\n \"^(page|january|february|march|april|may|june|july|august|september|october|november|december|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\",\n paragraph.lower(),\n ):\n if len(re.findall(r\"[A-Z]\", paragraph[:10])) > 5 and not re.search(\n \"(present|frb/us|abs cdo|libor|rp–ioer|lsaps|cusip|nairu|s cpi|clos, r)\",\n paragraph[:10].lower(),\n ):\n section += 1\n paragraph_sections.append(\"\")\n if section >= 0:\n paragraph_sections[section] += paragraph\n self.articles[index] = self.segment_separator.join(\n [paragraph for paragraph in paragraph_sections]\n )", "def randomPostToTumblr():\t\n\ttxt_files = filter(lambda x: x.endswith('.txt'), os.listdir(bookpath))\n\tbook = random.choice(txt_files)\n\tTAGS = settings['tags'] + \",inspired by \" + book.replace('-','by')[:book.find(\".\")+1]\n\tif(book.find(\"-\") != -1):\n\t\tTAGS = TAGS + \",\" + book[:book.find(\"-\")-1]\n\t\tTAGS = TAGS + \",\" + book[book.find(\"-\")+2:book.find(\".\")]\n\ttumblrPost(generatePost(os.path.join(bookpath,book)), tags=TAGS)", "def post(self):\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n # if user enter good subject and content, redirect them to new post page\n if subject and content:\n p = Post(parent = blog_key(), subject = subject, content = content)\n p.put() # store the post element into database\n self.redirect('/blog/%s' % str(p.key().id()))\n # otherwise, render an error page \n else:\n error = \"subject and content, please!\"\n self.render(\"newpost.html\", subject=subject, content=content, error=error)", "def push_blog():\n\n\twarn(green(\"Update blog on github pages.\"))\n\t_setup_virtualenv()\n\n\twith cd(PROJECT_PATH):\n\t\twith prefix(env.activate):\n\t\t\tlocal('python blog.py build', shell='/bin/bash')\n\n\t\tlocal('cd {}'.format(FREEZER_DESTINATION), shell='/bin/bash')\n\t\tlocal('git status')\n\t\task_msg = red(\"Force push new content to blog?\")\n\t\tif console.confirm(ask_msg, default=False) is True:\n\t\t\tlocal('git add --all')\n\t\t\tlocal('git commit -m \"new articles\"')\n\t\t\tlocal('git push --force origin master')", "def create_db_post(entry, keys, like):\n h = get_hash(entry['link'])\n collection = pos if like else neg\n return collection.update(\n {'hash': h},\n {\n 'link': entry['link'],\n 'title': entry['title'],\n 'published': '',\n 'content': \" \".join(keys),\n 'hash': h,\n 'read': False\n }, upsert=True\n )", "def __addPost(self, link):\n self.currenturi = link\n self.__setStoneSoupForCurrentUri()\n try:\n page = self.__getData()\n if not page:\n return True \n if checkSessionInfo(self.genre, self.session_info_out, self.currenturi,\\\n self.task.instance_data.get('update'),parent_list\\\n = [self.task.instance_data['uri']]):\n log.info(self.log_msg('Session info returns True'))\n return False\n except:\n log.exception(self.log_msg('Cannot add the post for the url %s'%\\\n self.currenturi))\n return False\n try:\n result=updateSessionInfo(self.genre, self.session_info_out, self.currenturi, \\\n get_hash( page ),'review', self.task.instance_data.get('update'),\\\n parent_list=[self.task.instance_data['uri']])\n if not result['updated']:\n log.exception(self.log_msg('Update session info returns False'))\n return True\n page['parent_path'] = [self.task.instance_data['uri']]\n page['path'] = [self.task.instance_data['uri'], self.currenturi]\n page['uri'] = self.currenturi\n page['entity'] = 'review'\n page['uri_domain'] = urlparse.urlparse(page['uri'])[1]\n page.update(self.__task_elements_dict)\n self.pages.append(page)\n #log.info(page)\n log.info(self.log_msg('page added %s'%self.currenturi))\n return True\n except:\n log.exception(self.log_msg('Error while adding session info'))\n return False", "def update_post_format(post):\n\n post_dict = {\n \"title\": post[1],\n \"genre\": get_genre(post[0]),\n \"content\": post[2],\n \"repeater_link\": get_links(post[3], post[4]),\n }\n \n return post_dict", "def post(self):\n\n title = self.request.get(\"title\")\n blogPost = self.request.get(\"blogPost\")\n author = self.request.cookies.get('name')\n\n if title and blogPost:\n\n bp = Blogposts(parent=blog_key(), title=title,\n blogPost=blogPost, author=check_secure_val(author))\n\n bp.put()\n\n self.redirect('/%s' % str(bp.key.integer_id()))\n else:\n error = \"Please submit both a title and a blogpost!\"\n self.render(\"newpost.html\", title=title,\n blogPost=blogPost, error=error)", "def main():\n # Get the date and time in a formatted string.\n today = datetime.datetime.today()\n date_formatted = today.strftime(\"%Y-%m-%d\")\n time_formatted = today.strftime(\"%H:%M:%S\")\n\n # Form the file name and path.\n file_name = date_formatted+\"-post.md\"\n file_path = os.path.join(POSTS_DIR, file_name)\n\n # Make the new header.\n header = HEADER_TEMPLATE.format(date_formatted, time_formatted)\n\n with open(file_path, 'w') as f:\n f.write(header)\n \n os.system(EDITOR+\" \"+file_path)" ]
[ "0.6208906", "0.6076283", "0.59839743", "0.59753376", "0.5914017", "0.5913528", "0.58720726", "0.58409494", "0.5834559", "0.582747", "0.57706946", "0.57343227", "0.56686044", "0.5657509", "0.56546366", "0.5640166", "0.5577207", "0.557562", "0.557433", "0.556912", "0.55233467", "0.55014837", "0.54822457", "0.54579186", "0.5445902", "0.5441769", "0.54388547", "0.53965425", "0.5387325", "0.53787553" ]
0.7508624
0
Evalutes whether the passed dict matches the complex logic of the LogicNode
def eval_logic(self, checkDict): result = True #gets individual evaluations from children passList = [] for child in self.children: myVal = child.eval_comparison(checkDict) passList.append(child.eval_comparison(checkDict)) #if only one child returns the only boolean available if(len(passList) == 1): result = passList[0] #TODO: Combine following cases possibly #print(passList) #gets resutl if only 2 simple logics elif(len(passList) == 2 and len(self.operators) == 1): result = self.operators[0](passList[0], passList[1]) else: #combines all children logic using the operators firstCheck = True opIndex = 0 for i in range(0,len(passList)): if(firstCheck): firstCheck = False result = self.operators[opIndex](passList[0], passList[1]) i+=1 else: result = self.operators[opIndex](result,passList[i]) opIndex += 1 """ print('----------------------') print(result) """ return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_RestrictingNodeTransformer__visit_In_Dict():\n assert restricted_eval('2 in {1: 1, 2: 2, 3: 3}') is True", "def eval_act_logic(self, act_logic_str):\n if self.ignore_act_logic:\n return True\n # return true for empty string\n if len(act_logic_str) == 0:\n return True\n eval_str = act_logic_str\n # switch to Python boolean operators\n for (op, py_op) in (('!', ' not '), ('&&', ' and '), ('||', ' or ')):\n eval_str = eval_str.replace(op, py_op)\n # TODO: guard against malicious code in eval() call?\n result = eval(eval_str, self.ZZ_EVAL_VARS)\n return result", "def evaluate(self, d):\n return bool(eval(self.expr, facts_globals, d))", "def test_expression_contains(self):\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.id.in_([1, 3, 4]))\n value = expression.evaluate(KeyedTuple([{\"id\": 1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.id in [1, 3, 4] with models.Network.id=1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.id.in_([1, 3, 4]))\n value = expression.evaluate(KeyedTuple([{\"id\": 2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.id in [1, 3, 4] with models.Network.id=2\")\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with string\n expression = BooleanExpression(\"NORMAL\", models.Network.label.in_([\"network_1\", \"network_3\", \"network_4\"]))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_1\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label in [\"network_1\", \"network_3\", \"network_4\"] with models.Network.label=\"network_1\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label.in_([\"network_1\", \"network_3\", \"network_4\"]))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label in [\"network_1\", \"network_3\", \"network_4\"] with models.Network.label=\"network_1\" \"\"\")", "def validate(self, mydict):\n\n if self.query is None:\n raise Exception(\"Validation missing attribute 'query': \" + str(self))\n\n if not isinstance(self.query, str):\n raise Exception(\"Validation attribute 'query' type is not str: \" + type(self.query).__name__)\n\n if self.operator is None:\n raise Exception(\"Validation missing attribute 'operator': \" + str(self))\n\n # from http://stackoverflow.com/questions/7320319/xpath-like-query-for-nested-python-dictionaries\n self.actual = mydict\n try:\n logging.debug(\"Validator: pre query: \" + str(self.actual))\n for x in self.query.strip(self.query_delimiter).split(self.query_delimiter):\n logging.debug(\"Validator: x = \" + x)\n try:\n x = int(x)\n self.actual = self.actual[x]\n except ValueError:\n self.actual = self.actual.get(x)\n except:\n logging.debug(\"Validator: exception applying query\")\n pass\n\n # default to false, if we have a check it has to hit either count or expected checks!\n output = False\n\n if self.operator == \"exists\":\n # require actual value\n logging.debug(\"Validator: exists check\")\n output = True if self.actual is not None else False\n elif self.operator == \"empty\":\n # expect no actual value\n logging.debug(\"Validator: empty check\" )\n output = True if self.actual is None else False\n elif self.actual is None:\n # all tests beyond here require actual to be set\n logging.debug(\"Validator: actual is None\")\n output = False\n elif self.expected is None:\n raise Exception(\"Validation missing attribute 'expected': \" + str(self))\n elif self.operator == \"count\":\n self.actual = len(self.actual) # for a count, actual is the count of the collection\n logging.debug(\"Validator: count check\")\n output = True if self.actual == self.expected else False\n else:\n logging.debug(\"Validator: operator check: \" + str(self.expected) + \" \" + str(self.operator) + \" \" + str(self.actual))\n\n # any special case operators here:\n if self.operator == \"contains\":\n if isinstance(self.actual, dict) or isinstance(self.actual, list):\n output = True if self.expected in self.actual else False\n else:\n raise Exception(\"Attempted to use 'contains' operator on non-collection type: \" + type(self.actual).__name__)\n else:\n # operator list: https://docs.python.org/2/library/operator.html\n myoperator = getattr(operator, self.operator)\n output = True if myoperator(self.actual, self.expected) == True else False\n\n #print \"Validator: output is \" + str(output)\n\n # if export_as is set, export to environ\n if self.export_as is not None and self.actual is not None:\n logging.debug(\"Validator: export \" + self.export_as + \" = \" + str(self.actual))\n os.environ[self.export_as] = str(self.actual)\n\n self.passed = output\n\n return output", "def match(ctx, expr):\n if \"$or\" in expr:\n for x in expr[\"$or\"]:\n if match(ctx, x):\n return True\n return False\n else:\n for x in expr:\n if x not in ctx:\n return False\n if isinstance(expr[x], dict):\n for m in expr[x]:\n mf = matchers.get(m)\n if mf:\n if not mf(ctx[x], expr[x][m]):\n return False\n else:\n return False\n elif ctx.get(x) != expr[x]:\n return False\n return True", "def _match(self, rule, obj):\n\n for key in rule:\n if key == '$and':\n if not self.handle_and(key, rule[key], obj):\n return False\n\n elif key == '$or':\n if not self.handle_or(key, rule[key], obj):\n return False\n\n elif key == '$nor':\n if not self.handle_nor(key, rule[key], obj):\n return False\n\n elif not self.handle_field(key, rule[key], obj):\n return False\n\n return True", "def evaluate(self, obj):\n #obj._print()\n\n # substitute event's attributes names by their values.\n cond = self.condition\n for attr in obj._attr_:\n cond = re.sub('evt\\.%s' % attr, \"\\\"%s\\\"\" % str(obj._attr_[attr]), cond)\n\n # if it remains evt.* objects in the rule, there is a problem\n # FIXME: false positive is possible when parsing an url for example containing somethingevt.gif <= 'evt.'\n if re.search(r'evt\\.', cond):\n msg = \"Correlation rule (%s) not properly translated. \" % self.name\n msg += \"Please fix the correlation rule and/or parser! Unexpected: %s\" % cond\n self.logger.error(msg)\n return False\n\n # condition_rule = \"(f1(1,3) and f1(2,10)) and f2(5)\"\n # eval(condition_rule, {'f1':fct1, 'f2':fct2})\n try:\n res = eval(cond, self.FunctionsEntryPoints)\n except:\n res = False\n return res", "def test_expression_equality(self):\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.id == 1)\n value = expression.evaluate(KeyedTuple([{\"id\": 1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.id==1 with models.Network.id=1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.id == 1)\n value = expression.evaluate(KeyedTuple([{\"id\": 2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.id==1 with models.Network.id=2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.id != 1)\n value = expression.evaluate(KeyedTuple([{\"id\": 1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.id!=1 with models.Network.id=1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.id != 1)\n value = expression.evaluate(KeyedTuple([{\"id\": 2}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.id!=1 with models.Network.id=2\")\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with string\n expression = BooleanExpression(\"NORMAL\", models.Network.label == \"network_1\")\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_1\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label==\"network_1\" with models.Network.label=\"network_1\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label == \"network_1\")\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label==\"network_1\" with models.Network.label=\"network_2\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label != \"network_1\")\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_1\"}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label!=\"network_1\" with models.Network.label=\"network_1\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label != \"network_1\")\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label!=\"network_1\" with models.Network.label=\"network_2\" \"\"\")\n\n # Checks on a specified attribute with operators \"IS\" with string\n expression = BooleanExpression(\"NORMAL\", models.Network.label == None)\n value = expression.evaluate(KeyedTuple([{\"label\": None}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label==None with models.Network.label=None \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label == None)\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label==None with models.Network.label=\"network_2\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label != None)\n value = expression.evaluate(KeyedTuple([{\"label\": None}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label!=None with models.Network.label=None \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label != None)\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label!=None with models.Network.label=\"network_2\" \"\"\")", "def evaluate(self, edict):\n pass", "def evaluate(self, operand: object) -> bool:\n pass", "def evalBoolean(tree):\n # check if children the children is a \"or\" or a \"and\" tokken\n if (tree.children[0].data == \"or\"):\n return evalBoolean(tree.children[0].children[0]) or evalBoolean(tree.children[0].children[1])\n if (tree.children[0].data) == \"and\":\n return evalBoolean(tree.children[0].children[0]) and evalBoolean(tree.children[0].children[1])\n \n # set var1\n if(tree.children[0].data == \"integer\"):\n var1 = evalInteger(tree.children[0])\n elif(tree.children[0].data == \"variable\"):\n var1 = getValue(tree.children[0].children[0].value)\n\n # set var2\n if(tree.children[2].data == \"integer\"):\n var2 = evalInteger(tree.children[2])\n elif(tree.children[2].data == \"variable\"):\n var2 = getValue(tree.children[2].children[0].value)\n\n if(tree.children[1].children[0].data == \"greater\"):\n return var1 > var2\n if(tree.children[1].children[0].data == \"less\"):\n return var1 < var2\n if(tree.children[1].children[0].data == \"equals\"):\n return var1 == var2\n if(tree.children[1].children[0].data == \"nequal\"):\n return var1 != var2\n\n print(\"ERROR : UNEXPECTED TOKKEN\")\n return False", "def checkCondition(self, left_context, mod, right_context):\n if self.condition == \"\":\n return(True)\n else:\n if self.ruleType == self.TYPE_OL:\n keys = self.symParam\n values = mod.param \n elif self.ruleType == self.TYPE_L1L:\n keys = self.left_context.param + self.symParam\n values = left_context.param + mod.param \n elif self.ruleType == self.TYPE_R1L:\n keys = self.symParam + self.right_context.param\n values = mod.param + right_context.param\n elif self.ruleType == self.TYPE_2L:\n keys = self.left_context.param + self.symParam + self.right_context.param\n values = left_context.param + mod.param + right_context.param\n new_dict = dict(zip(keys, values)) \n return(self.condition.evaluate(new_dict))", "def test_RestrictingNodeTransformer__visit_NotIn_Dict():\n assert restricted_eval('2 not in {1: 1, 2: 2, 3: 3}') is False", "def logical_eval(value, logic, check, var):\r\n # Logical statement aquired from input\r\n if logic == '=':\r\n return value == check\r\n # All other string logical expressions can be evaluated with eval()\r\n else:\r\n return eval(var)", "def evaluateBoolean(compiled_expression):", "def parse_logic(self, logic):\n if 'xxx' in logic.conf:\n # self.function(logic['name'])\n pass", "def eval_if_condition(condition):\n if type(condition).__name__ == 'BinaryOp':\n if type(condition.left).__name__ == 'ID':\n # case: if (mask & XXX) {...} in \"provenance_inode_permission\"; mask can only be determined at runtime\n if condition.left.name == 'mask':\n return True\n # case: if (shmflg & SHM_RDONLY) {...} in \"provenance_shm_shmat\"; shmflg can be only be determined at runtime\n if condition.left.name == 'shmflg':\n return True\n elif type(condition.left).__name__ == 'BinaryOp':\n if type(condition.left.left).__name__ == 'ID':\n # case: if ((perms & (DIR__WRITE)) != 0) in \"provenance_file_permission\"; perms can only be determined at runtime\n if condition.left.left.name == 'perms':\n return True\n # case: if ((prot & (PROT_WRITE)) != 0) in \"provenance_mmap_file\"; prot can only be determined at runtime\n elif condition.left.left.name == 'prot':\n return True\n elif type(condition.left.left).__name__ == 'BinaryOp':\n if type(condition.left.left.left).__name__ == 'ID':\n # case: if ((flags & MAP_TYPE) == MAP_SHARED...) in \"provenance_mmap_file\"; flags can only be determined at runtime\n if condition.left.left.left.name == 'flags':\n return True\n elif type(condition.left.right).__name__ == 'ID':\n # case: if (sock->sk->sk_family == PF_UNIX &&...) in \"provenance_socket_recvmsg\", \"provenance_socket_recvmsg_always\", \"provenance_socket_sendmsg\", \"provenance_socket_sendmsg_always\"; sock->sk->sk_family can only be determined at runtime\n if condition.left.right.name == 'PF_UNIX':\n return True\n elif type(condition).__name__ == 'FuncCall':\n # case: if (is_inode_dir(inode)) in \"provenance_file_permission\"; inode type can only be determined at runtime\n if condition.name.name == 'is_inode_dir':\n return True\n # case: else if (is_inode_socket(inode)) in \"provenance_file_permission\"\n elif condition.name.name == 'is_inode_socket':\n return True\n # case: if ( vm_mayshare(flags) ) in \"provenance_mmap_munmap\"; flags can only be determined at runtime\n elif condition.name.name == 'vm_mayshare':\n return True\n elif type(condition).__name__ == 'ID':\n # case: if (iprovb) in \"provenance_socket_sendmsg\", \"provenance_socket_sendmsg_always\"\n if condition.name == 'iprovb':\n return True\n # case: if (pprov) in \"provenance_socket_recvmsg\", \"provenance_socket_recvmsg_always\"\n elif condition.name == 'pprov':\n return True\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n else:\n return False", "def evaluate_logical_form(self, logical_form: str, target_list: List[str]) -> bool:\n try:\n denotation = self.execute(logical_form)\n except ExecutionError as error:\n logger.warning(f'Failed to execute: {logical_form}. Error: {error}')\n return False\n return self.evaluate_denotation(denotation, target_list)", "def test_eval(self):\n # expr and expr\n base = abs_path('./specs/')\n ps = Parser(base + 'script3-6.py', base)\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 2)\n\n # expr or expr\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if or B == b1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 6)\n\n # expr and (expr or expr)\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a == if and (B == b1 or B == b2)\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing !=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a != if\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing >=\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"a.index >= 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing index\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b.index == 1\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with integer type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing option with float type\n ps.spec['constraints'] = [{\"block\": \"D\", \"condition\": \"b == 1.5\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 4)\n\n # testing unmade decision\n ps.spec['constraints'] = [{\"block\": \"A\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)\n\n # testing if the decision is made when the block depends on a variable\n # inside the block\n ps.spec['constraints'] = [{\"block\": \"B\", \"condition\": \"b.index == 0\"}]\n ps._parse_constraints()\n ps.main(verbose=False)\n self.assertEqual(ps.wrangler.counter, 0)", "def eval_value(self, v):\n okay = False\n if ast_class(v) == 'Dict':\n # dict\n if self.eval_dict(v):\n okay = True\n elif ast_class(v) == 'List':\n # list\n if self.eval_list(v):\n okay = True\n elif ast_class(v) == 'Str':\n # string\n okay = True\n elif ast_class(v) == 'Name' and v.id in ('True', 'False', 'None'):\n # booleans or None\n okay = True\n elif ast_class(v) == 'Num':\n # numbers\n okay = True\n elif ast_class(v) == 'UnaryOp' and ast_class(v.op) == 'USub' and ast_class(v.operand) == 'Num':\n # negative numbers\n okay = True\n return okay", "def is_logic(self):\n return self.value in ('and_logic', 'or_logic')", "def test(self, values: Dict[str, Any]) -> Optional[str]:\n # This is always True\n if self.cond == '#':\n return None\n\n def why(cond, field, explanation) -> Optional[str]:\n if cond:\n return None\n return '{}: {}'.format(field, explanation)\n\n # If it's missing, it's only True if it's a missing test.\n if self.field not in values:\n # Default to ignoring id field as long as no version.\n if self.field == '':\n return why('-' not in self.value, 'id', 'unknown version {}'.format(self.value))\n return why(self.cond == '!', self.field, 'is missing')\n\n # If they supply a function, hand it to them.\n if callable(values[self.field]):\n return values[self.field](self)\n\n val = str(values[self.field])\n if self.cond == '!':\n return why(False, self.field, 'is present')\n elif self.cond == '=':\n return why(val == self.value,\n self.field,\n '!= {}'.format(self.value))\n elif self.cond == '/':\n return why(val != self.value,\n self.field,\n '= {}'.format(self.value))\n elif self.cond == '^':\n return why(val.startswith(self.value),\n self.field,\n 'does not start with {}'.format(self.value))\n elif self.cond == '$':\n return why(val.endswith(self.value),\n self.field,\n 'does not end with {}'.format(self.value))\n elif self.cond == '~':\n return why(self.value in val,\n self.field,\n 'does not contain {}'.format(self.value))\n elif self.cond == '<':\n try:\n actual_int = int(val)\n except ValueError:\n return why(False, self.field, \"not an integer field\")\n try:\n restriction_val = int(self.value)\n except ValueError:\n return why(False, self.field, \"not a valid integer\")\n return why(actual_int < restriction_val,\n self.field,\n \">= {}\".format(restriction_val))\n elif self.cond == '>':\n try:\n actual_int = int(val)\n except ValueError:\n return why(False, self.field, \"not an integer field\")\n try:\n restriction_val = int(self.value)\n except ValueError:\n return why(False, self.field, \"not a valid integer\")\n return why(actual_int > restriction_val,\n self.field,\n \"<= {}\".format(restriction_val))\n elif self.cond == '{':\n return why(val < self.value,\n self.field,\n 'is the same or ordered after {}'.format(self.value))\n elif self.cond == '}':\n return why(val > self.value,\n self.field,\n 'is the same or ordered before {}'.format(self.value))\n else:\n # We checked this in init!\n assert False", "def test_evaluate_eq_expression(self):\n value = self.evaluate_common(\"2M eq 3M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"2D eq 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"2F eq 2D\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"2 eq 2L\")\n self.assertTrue(value.value is True, \"Expected True\")\n try:\n value = self.evaluate_common(\"2 eq '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"'2' eq '2'\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\n \"datetime'2013-08-30T18:49' eq datetime'2013-08-30T18:49'\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\n \"datetime'2013-08-30T18:49' eq datetime'2013-08-30T18:49:01'\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\n \"datetimeoffset'2013-08-30T18:49:00Z' eq \"\n \"datetimeoffset'2013-08-30T19:49:00+01:00'\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\n \"datetimeoffset'2013-08-30T18:49:00Z' eq \"\n \"datetimeoffset'2013-08-30T18:49:00+01:00'\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\n \"guid'b3afeebc-9658-4699-9d9c-1df551fd6814' eq \"\n \"guid'b3afeebc-9658-4699-9d9c-1df551fd6814'\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\n \"guid'b3afeebc-9658-4699-9d9c-1df551fd6814' eq \"\n \"guid'3fa6109e-f09c-4c5e-a5f3-6cf38d35c9b5'\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"X'DEADBEEF' eq binary'deadbeef'\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"X'DEAD' eq binary'BEEF'\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"2 eq null\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"null eq null\")\n self.assertTrue(value.value is True, \"Expected True\")", "def test_RestrictingNodeTransformer__visit_Eq__1():\n assert restricted_eval('1 == int(\"1\")') is True", "def test_evaluate_and_expression(self):\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"false and 0\")\n self.fail(\"Integer promotion to Boolean\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"false and true\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and false\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and true\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true and null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and null\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(value.value is False, \"Expected False\")", "def handle_operator(node, dic, validator, entry_list, messages, whole_validator, current_elem):\n if node == '$reference':\n new_list = dc(entry_list)\n new_list.append(validator[node])\n check_dict_alg(\n dic, whole_validator[validator[node]], new_list, messages, whole_validator, current_elem\n )\n elif node == '$forElem':\n traversed_dic = traverse_dict(dic, entry_list)\n if traversed_dic is not None:\n for elem in traversed_dic:\n new_list = dc(entry_list)\n new_list.append(elem)\n check_dict_alg(\n dic, validator['$forElem'], new_list, messages, whole_validator, elem\n )\n else:\n add_message(messages, current_elem, \"Error in traversing dict!\")\n elif node.startswith('$selection__'):\n select_type = node.split('__')[1]\n select_dic = traverse_dict(dic, entry_list)\n if select_type in select_dic:\n select = select_dic[select_type]\n rest_validator = validator[node][select]\n check_dict_alg(dic, rest_validator, entry_list, messages, whole_validator, current_elem)\n else:\n add_message(\n messages, current_elem, \"Could not find \" + select_type + \" in \" + str(entry_list)\n )\n elif node.startswith('$exists__'):\n # TODO handle it somehow...\n pass", "def test_evaluate_boolean_literal_expression(self):\n value = self.evaluate_common(\"true\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")", "def test_logic_mode(self):\n\n World.reset()\n World._evaluation_mode = tfl.LOGIC_MODE\n\n def inside(x, y):\n centers_distance = tf.sqrt(tf.reduce_sum(tf.squared_difference(x[:, 0:2], y[:, 0:2]), axis=1) + 1e-6)\n return tf.cast((centers_distance + x[:, 2]) < y[:, 2], tf.float32)\n\n circles = tfl.Domain(label=\"Circles\", data=[[0., 0, 1], [0, 0, 2], [0, 0, 3]])\n inside = tfl.Predicate(label=\"inside\", domains=[\"Circles\", \"Circles\"], function=inside)\n tfl.setTNorm(id=tfl.SS, p=1)\n sess = tf.Session()\n\n # Constraint 1\n x = tfl.variable(circles, name=\"x\")\n y = tfl.variable(circles, name=\"y\")\n a = tfl.atom(inside, (x, y))\n b = tfl.atom(inside, (y, x))\n rule = tfl.and_n(a, b)\n\n assert np.equal(sess.run(rule), np.zeros(shape=[3, 3, 3])).all()\n assert len(World._predicates_cache) == 1", "def test_expression_and_or(self):\n\n # Checks several examples with \"and\" and \"or\" operators\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_3\", \"multi_host\": False}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (1)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": True}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (2)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": False}], [\"networks\"]))\n self.assertFalse(value, \"complex expression (3)\")" ]
[ "0.61918426", "0.6141805", "0.6042099", "0.5969799", "0.5877197", "0.586158", "0.58614135", "0.58304363", "0.581069", "0.5801263", "0.5798829", "0.57972205", "0.57903963", "0.57780564", "0.57741153", "0.57624376", "0.57561654", "0.57198983", "0.5661767", "0.56597126", "0.5608706", "0.5602319", "0.5598884", "0.5573062", "0.55559254", "0.5548853", "0.55254793", "0.5498122", "0.5473167", "0.5471663" ]
0.6642864
0
add the keeper to the player's field
def play(self, game, playerNumber): # needs check for keeper limit? super(Keeper, self).play(game, playerNumber) p = game.players[playerNumber] p.field.add(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_player(self, player):\n\t\tself.players.append(player)", "def add_player(self, players):\n try:\n players[self.ward]\n except:\n players[self.ward] = self", "def add_played_disk(self, x, y, player):\n self.played_disks.append((x, y, player))", "def add_player(self, player: str) -> None:\n if not player or player == \"<world>\":\n return\n\n self.games_list[self.game_index][\"players\"].append(player)\n if not self.games_list[self.game_index][\"kills\"].get(player):\n self.games_list[self.game_index][\"kills\"][player] = 0\n\n return", "def set_player(self, char_data):\n self.player = self.server.object_manager.add_player(char_data)", "def appendPlayer(self, player):\n #if (not self.__configuring) and (not (player in self.__players)):\n if (not (player in self.__players)):\n self.__players.append(player)\n player.bind(self)\n if self.__playing:\n player.configure(self)\n self.__notifyPlayer(player, TetrisEvent.TETROMINO_NEXT, \n self.__nextTetroType)\n self.__notifyPlayer(player, TetrisEvent.TETROMINO_START, \n self.__nextTetroType)\n self.__notifyPlayer(player, TetrisEvent.BOARD_CHANGE, [])", "def on_key_press(self, key, modifiers):\n if self.player_enabled:\n super().on_key_press(key, modifiers)", "def add_new(self, e=0):\n # clean controlls\n self.clear_controls()\n\n # new dictionary\n d_new = dict()\n # to add above\n\n # find level of current item\n level = len(str(self.n_parent).split(\":\"))+1\n # find items that level with higher number\n for k, v in self.d.items():\n num = int(str(k).split(\":\")[-1])\n if len(str(k).split(\":\")) >= level and num >= int(self.n):\n l_elem = str(k).split(\":\")\n num = int(l_elem[level-1]) + 1\n\n # summon key\n s_first = \"\" # first part of string\n s_last = \"\" # last part of string\n for i in range(0, level-1):\n s_first = s_first + l_elem[i]\n try:\n for j in range(level, len(l_elem)):\n s_last = s_last + l_elem[j]\n except:\n pass\n\n # summon\n if s_last:\n s_summon = str(s_first) + \":\" + str(num) + \":\" + str(s_last)\n else:\n s_summon = str(s_first) + \":\" + str(num)\n\n # write to dictionary\n d_new[s_summon] = v\n\n # delete item from self.d\n self.d.pop(k)\n else:\n d_new[k] = self.d[k]\n\n # change dictionary\n self.d = d_new\n\n # renum childs\n self.go_down()\n\n # write data from dictionary even that current element is empty\n self.add_item(True)\n\n\n\n self.set_value(self.n_parent, self.n)", "def addPlayer(self, userid):\r\n self.players[int(userid)] = PlayerObject(userid)", "def placeKeeper (self, keeper_char, row, column):\n self.maze[row][column] = keeper_char", "def add_player(self, player):\r\n self.players[player.id] = copy.copy(player)\r\n return self.players[player.id]", "def perspective_newclient(self):\n #print \"adding player :\", self.name\n players.append(self)\n for player in players:\n print \"server has this player:\", player.name", "def _addPlayerInfo(self, player, playerLayers, isBottomPlayer):\n\n lifeMeter = MeterLayer(192, 16, player.maxLife,\n (255, 255, 255, 127), # background color\n (255, 0, 0, 255), # empty life color\n (0, 255, 0, 255)) # full life color\n lifeMeter.value = player.life\n self.add(lifeMeter)\n # Don't tie the life meter directly to the display, because for animating\n # attacks we prefer to update the life to sync up with the attack.\n #player.lifeChanged.addHandler(lambda x: lifeMeter.setValue(x))\n\n manaMeter = MeterLayer(192, 16, player.maxMana,\n (255, 255, 255, 127), # background color\n (130, 130, 130, 255), # empty mana color\n (0, 0, 255, 255)) # full mana color\n manaMeter.value = player.mana\n self.add(manaMeter)\n player.manaChanged.addHandler(lambda x: manaMeter.setValue(x))\n\n movesTextBox = TextBoxLayer(player.maxMoves)\n self.add(movesTextBox)\n player.moveChanged.addHandler(lambda x: movesTextBox.setValue(x))\n\n unitsTextBox = TextBoxLayer(player.maxUnitTotal)\n self.add(unitsTextBox)\n player.unitChanged.addHandler(lambda x: unitsTextBox.setValue(x))\n\n boardY = BOTTOM_MARGIN\n if not isBottomPlayer:\n boardY += BOARD_HEIGHT + BOARD_GAP\n\n lifeMeter.position = (32, boardY + 112 + 16 + 32)\n manaMeter.position = (32, boardY + 112)\n movesTextBox.position = (32, boardY + 80)\n unitsTextBox.position = (32, boardY + 50)\n\n playerLayers.lifeMeter = lifeMeter\n playerLayers.manaMeter = manaMeter\n playerLayers.movesCounter = movesTextBox\n playerLayers.unitsCounter = unitsTextBox", "def LevelUpPlayer(self):\n self.lvl += 1\n self.skillPts += 1\n percent = 0.5\n if self.lvl > 8:\n percent = 0.45 # reduce how much xp is added once higher level\n elif self.lvl > 16:\n percent = 0.4\n elif self.lvl > 25:\n percent = 0.3\n self.xpNeeded = floor(self.xpNeeded + self.xpNeeded * percent)", "def AddKE(self, ke):\n self._totalke += ke", "def player_update(self,p,player):\n node = self._validate(p)\n node._player = player", "def on_key_press(self, key, modifiers):\n player_controller.input_press(self, key, self.player)", "def add_new_player(self) -> None:\n\n # 1\n for elem in self.data:\n key = ''\n value = ''\n for k, v in elem.items():\n if k == 'name':\n key = v\n else:\n value = v.get()\n self.attributs.update({key: value})\n\n # 2\n order = ct.Controls.verify_players_creation(self.attributs)\n self.master.master.list_instances_menus_tournament = Menu.update_menus_tournament(order, self.master)\n self.master.master.left_window.update_and_display(self.master.master.list_instances_menus_tournament)\n # 3\n if order['order'] == 'repeat_step':\n self.display()\n else:\n self.destroy_window()\n self.master.master.launch()", "def bcp_player_added(self, num, **kwargs):\n del kwargs\n self.machine.bcp.transport.send_to_clients_with_handler('_player_vars', 'player_added', player_num=num)", "def on_key_press(self, key, modifiers):\n if key == arcade.key.LEFT:\n self.player_sprite.go_left()\n self.player_sprite.face_left()\n elif key == arcade.key.RIGHT:\n self.player_sprite.go_right()\n self.player_sprite.face_right()\n elif key == arcade.key.UP:\n if self.player_sprite.change_y == 0:\n self.player_sprite.jump()", "def addkey(unsafe_import_key):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if not unsafe_import_key:\n unsafe_import_key = click.prompt(\"Enter private key\", confirmation_prompt=False, hide_input=True)\n mph.wallet.addPrivateKey(unsafe_import_key)\n set_shared_morphene_instance(stm)", "def on_key_press(self, key: int, modifiers: int):\n if self.ship.alive:\n self.held_keys.add(key)\n\n if key == arcade.key.SPACE:\n bullet = Bullet(self.ship.angle, self.ship.center.x, self.ship.center.y)\n self.bullets_list.append(bullet)", "def add_species(self, side='R'): \n #self.disable_all_buttons() \n Player.add_species(self, side=side)", "def on_key_press(self, key: int, modifiers: int):\r\n self.held_keys.add(key)\r\n\r\n if key == arcade.key.SPACE:\r\n pass", "def on_key_press(self, key, modifiers):\n\n if key == arcade.key.UP or key == arcade.key.W:\n self.player.change_y += .2\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.player.change_x -= .2\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.player.change_x += .2\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.player.change_y -= .2", "def __init__(self):\r\n self.players = {}", "def handle_event(self,event,player1,player2):\n if event.type != KEYDOWN:\n return\n if event.key == pygame.K_l:\n player1.amount+=1\n if event.key == pygame.K_j:\n player1.amount-=1\n if event.key == pygame.K_d:\n player2.amount+=1\n if event.key == pygame.K_a:\n player2.amount-=1", "def on_key_press(self, key: int, modifiers: int):\r\n if self.ship.alive:\r\n self.held_keys.add(key)\r\n\r\n if key == arcade.key.SPACE:\r\n # TODO: Fire the bullet here!\r\n bullet = Bullet()\r\n bullet.fire(self.ship.angle, self.ship.center)\r\n\r\n self.bullets.append(bullet)", "def add_player(self, user):\n id = user.id\n name = user.name\n self.players[id] = Player(name, user)\n self.player_id_list.append(id)", "def on_key_press(self, key: arcade.key, modifiers: int):\n #player movement with keys\n if key == arcade.key.UP:\n self.player_sprite.change_y = movement_speed\n elif key == arcade.key.DOWN:\n self.player_sprite.change_y = -movement_speed\n elif key == arcade.key.LEFT:\n self.player_sprite.change_x = -movement_speed\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = movement_speed\n\n #go to next view from level 4\n elif key == arcade.key.ENTER and self.current_level == 3:\n self.director.next_view()" ]
[ "0.6364632", "0.6120998", "0.601679", "0.5704426", "0.5608644", "0.55661637", "0.5504809", "0.54787284", "0.54737884", "0.54316384", "0.5415775", "0.5406429", "0.5401013", "0.53916353", "0.5390193", "0.53633857", "0.53564775", "0.5346505", "0.5302579", "0.5288975", "0.52884024", "0.5275779", "0.5254522", "0.5246389", "0.5246215", "0.5238867", "0.52368623", "0.5213659", "0.52018833", "0.5198913" ]
0.7416112
0
Returns `model_fn` closure for TPUEstimator. model_fn_builder actually creates the model function using the passed parameters for num_labels, learning_rate, etc.
def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps): def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_predicting = (mode == tf.estimator.ModeKeys.PREDICT) # TRAIN and EVAL if not is_predicting: (loss, predicted_labels, log_probs) = ClassifierModel.create_model( is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) train_op = bert.optimization.create_optimizer( loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False) # Calculate evaluation metrics. def metric_fn(label_ids, predicted_labels): accuracy = tf.metrics.accuracy(label_ids, predicted_labels) f1_score = tf.contrib.metrics.f1_score( label_ids, predicted_labels) auc = tf.metrics.auc( label_ids, predicted_labels) recall = tf.metrics.recall( label_ids, predicted_labels) precision = tf.metrics.precision( label_ids, predicted_labels) true_pos = tf.metrics.true_positives( label_ids, predicted_labels) true_neg = tf.metrics.true_negatives( label_ids, predicted_labels) false_pos = tf.metrics.false_positives( label_ids, predicted_labels) false_neg = tf.metrics.false_negatives( label_ids, predicted_labels) return { "eval_accuracy": accuracy, "f1_score": f1_score, "auc": auc, "precision": precision, "recall": recall, "true_positives": true_pos, "true_negatives": true_neg, "false_positives": false_pos, "false_negatives": false_neg } eval_metrics = metric_fn(label_ids, predicted_labels) if mode == tf.estimator.ModeKeys.TRAIN: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) else: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) else: (predicted_labels, log_probs) = ClassifierModel.create_model( is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) predictions = { 'probabilities': log_probs, 'labels': predicted_labels } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Return the actual model function in the closure return model_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_fn_builder(model_config,\n train_params):\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = getattr(models, model_config.model_name)(config=model_config,\n is_training=is_training)\n _ = model(input_ids, input_mask=input_mask, token_type_ids=segment_ids)\n\n # TODO (@zhaoshenjian.01): check conditional_jit_scope\n # split loss calculation across batch\n batch_splits = train_params.get(\"batch_splits\", 1)\n if batch_splits == 1:\n # sparse_softmax_cross_entropy_with_logits\n masked_lm_output_dict = get_masked_lm_output(model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n else:\n # use for large vocab\n masked_lm_output_dict = get_masked_lm_output_split_batch(\n model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n batch_splits=batch_splits)\n\n masked_lm_loss = masked_lm_output_dict[\"loss\"]\n\n use_nsp = train_params.get(\"use_nsp\", True)\n if use_nsp:\n next_sentence_labels = features[\"next_sentence_labels\"]\n next_sentence_output_dict = get_next_sentence_output(\n model_config, model.get_pooled_output(), next_sentence_labels)\n next_sentence_loss = next_sentence_output_dict[\"loss\"]\n else:\n next_sentence_loss = 0\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.compat.v1.trainable_variables()\n # run init\n init_checkpoint = train_params.get(\"init_checkpoint\")\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n logging.info(\" name = {}, shape = {} {}\".format(var.name, var.shape,\n init_string))\n\n # default `bert_decay` lr_scheduler\n lr_params = train_params.get(\n 'lr_scheduler', {\n 'name': 'bert_decay',\n 'learning_rate': 1e-4,\n 'warmup_steps': 10000,\n 'num_train_steps': 1000000\n })\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, _ = optimizers.create_optimizer(\n loss=total_loss,\n init_lr=lr_params['learning_rate'],\n num_train_steps=lr_params['num_train_steps'],\n num_warmup_steps=lr_params['warmup_steps'])\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n return output_spec\n raise NotImplementedError\n\n return model_fn", "def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec\n\n return model_fn", "def model_fn_builder(bert_model_hub, num_labels, learning_rate,\n num_train_steps, num_warmup_steps):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(label_ids,\n predicted_labels)\n auc = tf.metrics.auc(label_ids, predicted_labels)\n recall = tf.metrics.recall(label_ids, predicted_labels)\n precision = tf.metrics.precision(label_ids, predicted_labels)\n true_pos = tf.metrics.true_positives(label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn_builder(self, bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (subject_logits, property_logits, value_logits) = self.create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n params=params,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions, depth):\n one_hot_positions = tf.one_hot(\n positions, depth=depth, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n # subject, property, value로 나오도록\n subject_label = features[\"subject\"]\n property_label = features[\"property\"]\n value_label = features[\"value\"]\n res_length = params[\"res_length\"]\n ont_length = params[\"ont_length\"]\n\n subject_loss = compute_loss(subject_logits, subject_label, res_length)\n property_loss = compute_loss(property_logits, property_label, ont_length)\n value_loss = compute_loss(value_logits, value_label, res_length)\n\n total_loss = (subject_loss + property_loss + value_loss) / 3.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"subject_logits\": subject_logits,\n \"property_logits\": property_logits,\n \"value_logits\": value_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn", "def __model_fn_builder(self, num_labels, learning_rate,\n num_train_steps,\n num_warmup_steps):\n\n def model_fn(features, labels, mode, params):\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = self.__create_model(\n input_ids,\n input_mask, segment_ids, label_ids, num_labels,\n is_predicting=is_predicting\n )\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps,\n use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = self.__create_model(\n input_ids,\n input_mask, segment_ids, label_ids, num_labels,\n is_predicting=is_predicting\n )\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn_builder(config: electra_files.configure_finetuning.FinetuningConfig, tasks,\n num_train_steps, pretraining_config=None):\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec\n\n return model_fn", "def model_fn_builder(config):\n def model_fn(features,labels,mode,params):\n \"\"\"The model_fn for Estimator\"\"\"\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec\n return model_fn", "def model_fn_builder(config):\n init_checkpoint = config.init_checkpoint\n coref_model = CorefQAModel(config)\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n predictions, total_loss = coref_model.forward(features, is_training)\n doc_idx, subtoken_map, top_span_starts, top_span_ends, antecedent_starts, antecedent_ends, antecedent_scores = predictions\n tvars = tf.trainable_variables()\n initialized_variables = {}\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, initialized_variables = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if config.use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \", *INIT_FROM_CKPT*\" if var.name in initialized_variables else \"\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = create_custom_optimizer(total_loss, config)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn(loss):\n return {\"eval_loss\": tf.metrics.mean(loss)}\n\n eval_metrics = (metric_fn, [total_loss])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"doc_idx\": doc_idx, \"subtoken_map\": subtoken_map,\n \"top_span_starts\": top_span_starts, \"top_span_ends\": top_span_ends,\n \"antecedent_starts\": antecedent_starts, \"antecedent_ends\": antecedent_ends,\n \"antecedent_scores\": antecedent_scores, \"loss\": total_loss},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn", "def model_fn_builder():\n \n def model_fn(features, labels, mode, params):\n # features name and shape\n _info('*** Features ****')\n for name in sorted(features.keys()):\n tf.logging.info(' name = {}, shape = {}'.format(name, features[name].shape))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n # get data\n input_x = features['input_x']\n input_mask = features['input_mask']\n if is_training:\n input_y = features['input_y']\n seq_length = features['seq_length']\n else:\n input_y = None\n seq_length = None\n\n # build encoder\n model = BertEncoder(\n config=cg.BertEncoderConfig,\n is_training=is_training,\n input_ids=input_x,\n input_mask=input_mask)\n embedding_table = model.get_embedding_table()\n encoder_output = tf.reduce_sum(model.get_sequence_output(), axis=1)\n\n # build decoder\n decoder_model = Decoder(\n config=cg.DecoderConfig,\n is_training=is_training,\n encoder_state=encoder_output,\n embedding_table=embedding_table,\n decoder_intput_data=input_y,\n seq_length_decoder_input_data=seq_length)\n logits, sample_id, ppl_seq, ppl = decoder_model.get_decoder_output()\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {'sample_id': sample_id, 'ppls': ppl_seq}\n output_spec = tf.estimator.EstimatorSpec(mode, predictions=predictions)\n else:\n if mode == tf.estimator.ModeKeys.TRAIN:\n max_time = ft.get_shape_list(labels, expected_rank=2)[1]\n target_weights = tf.sequence_mask(seq_length, max_time, dtype=logits.dtype)\n batch_size = tf.cast(ft.get_shape_list(labels, expected_rank=2)[0], tf.float32)\n\n loss = tf.reduce_sum(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits) * target_weights) / batch_size\n\n learning_rate = tf.train.polynomial_decay(cg.learning_rate,\n tf.train.get_or_create_global_step(),\n cg.train_steps / 100,\n end_learning_rate=1e-4,\n power=1.0,\n cycle=False)\n\n lr = tf.maximum(tf.constant(cg.lr_limit), learning_rate)\n optimizer = tf.train.AdamOptimizer(lr, name='optimizer')\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=cg.colocate_gradients_with_ops)\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())\n\n\n # this is excellent, because it could display the result each step, i.e., each step equals to batch_size.\n # the output_spec, display the result every save checkpoints step.\n logging_hook = tf.train.LoggingTensorHook({'loss' : loss, 'ppl': ppl, 'lr': lr}, every_n_iter=cg.print_info_interval)\n\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op, training_hooks=[logging_hook])\n elif mode == tf.estimator.ModeKeys.EVAL:\n # TODO\n raise NotImplementedError\n \n return output_spec\n \n return model_fn", "def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n\t\t\t\t\t num_train_steps, num_warmup_steps, use_tpu,\n\t\t\t\t\t use_one_hot_embeddings,\n\t\t\t\t\t colbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type,\n\t\t\t\t\t loss, kd_source, train_model, eval_model,\n\t\t\t\t\t is_eval, is_output):\n\tdef model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\t\t\"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\t\ttf.logging.info(\"*** Features ***\")\n\t\tfor name in sorted(features.keys()):\n\t\t\ttf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n\t\tis_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n\t\tinput_ids=[]\n\t\tinput_mask=[]\n\t\tsegment_ids=[]\n\t\tmask_lm_info = []\n\t\tif is_training:\n\t\t\tinput_ids = [features[\"rewrite_query_ids\"], features[\"doc0_ids\"], features[\"doc1_ids\"], features[\"raw_query_ids\"]]\n\t\t\tinput_mask = [features[\"rewrite_query_mask\"], features[\"doc0_mask\"], features[\"doc1_mask\"], features[\"raw_query_mask\"]]\n\t\t\tsegment_ids = [features[\"rewrite_query_segment_ids\"], features[\"doc0_segment_ids\"], features[\"doc1_segment_ids\"], features[\"raw_query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_rewrite_query_mask\"], features[\"effective_doc0_mask\"], features[\"effective_doc1_mask\"], features[\"effective_raw_query_mask\"]]\n\t\telif is_eval:\n\t\t\tinput_ids = [features[\"query_ids\"], features[\"docx_ids\"], 0, features[\"query_ids\"]]\n\t\t\tinput_mask = [features[\"query_mask\"], features[\"docx_mask\"], 0, features[\"query_mask\"]]\n\t\t\tsegment_ids = [features[\"query_segment_ids\"], features[\"docx_segment_ids\"], 0, features[\"query_segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_query_mask\"], features[\"effective_docx_mask\"], 0, features[\"effective_query_mask\"]]\n\t\telif is_output:\n\t\t\tinput_ids=[features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"], features[\"input_ids\"]]\n\t\t\tinput_mask = [features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"], features[\"input_mask\"]]\n\t\t\tsegment_ids = [features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"], features[\"segment_ids\"]]\n\t\t\teffective_mask = [features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"], features[\"effective_input_mask\"]]\n\n\n\n\t\tlabel = features[\"label\"]\n\n\n\t\ttf.logging.info(\"Create model\")\n\t\tif (is_training) or (is_eval):\n\t\t\t(total_loss, score, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\t\telif is_output:\n\t\t\t(pooling_emb, emb, doc_length) = create_model(\n\t\t\t\tbert_config, is_training, is_eval, is_output, input_ids, input_mask, segment_ids, effective_mask, label, use_one_hot_embeddings,\n\t\t\t\tcolbert_dim, dotbert_dim, max_q_len, max_p_len, doc_type, loss, kd_source, train_model, eval_model)\n\n\t\ttf.logging.info(\"Finish create model\")\n\t\ttvars = tf.trainable_variables()\n\n\t\tscaffold_fn = None\n\t\tif init_checkpoint:\n\t\t\t(assignment_map, initialized_variable_names)= modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\t\t\t(assignment_map1, initialized_variable_names1) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, 'Student/', 'query_reformulator/')\n\t\t\tassignment_maps = [assignment_map, assignment_map1]\n\t\t\tinitialized_variable_names.update(initialized_variable_names1)\n\n\t\t\ttf.logging.info(\"**** Assignment Map ****\")\n\t\t\tif use_tpu:\n\t\t\t\tdef tpu_scaffold():\n\t\t\t\t\tfor assignment_map in assignment_maps:\n\t\t\t\t\t tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\t\t\t\treturn tf.train.Scaffold()\n\n\t\t\t\tscaffold_fn = tpu_scaffold\n\t\t\telse:\n\t\t\t\ttf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\t\ttf.logging.info(\"**** Trainable Variables ****\")\n\n\t\tfor var in tvars:\n\t\t\tinit_string = \"\"\n\t\t\tif var.name in initialized_variable_names:\n\t\t\t\tinit_string = \", *INIT_FROM_CKPT*\"\n\t\t\ttf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n\t\t\t\t\t\t\tinit_string)\n\n\t\toutput_spec = None\n\t\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t\ttrain_op = optimization.create_optimizer(\n\t\t\t\t\t\ttotal_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, train_model)\n\n\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\tloss=total_loss,\n\t\t\t\t\t\ttrain_op=train_op,\n\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telif mode == tf.estimator.ModeKeys.PREDICT:\n\t\t\tif is_output:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"docid\": features['docid'],\n\t\t\t\t\t\t\t\t\t\"pooling_emb\":pooling_emb,\n\t\t\t\t\t\t\t\t\t\"emb\":emb,\n\t\t\t\t\t\t\t\t\t\"doc_length\":doc_length,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\t\t\telif is_eval:\n\t\t\t\toutput_spec = tf.contrib.tpu.TPUEstimatorSpec(\n\t\t\t\t\t\t\t\tmode=mode,\n\t\t\t\t\t\t\t\tpredictions={\n\t\t\t\t\t\t\t\t\t\"log_probs\": score,\n\t\t\t\t\t\t\t\t\t\"label_ids\": label,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tscaffold_fn=scaffold_fn)\n\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t\t\"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n\t\treturn output_spec\n\n\treturn model_fn", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, model_function):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = model_function.create(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu,\n scope=(\"loss\" if model_function.freeze else None))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n if model_function.task_type == TaskType.CLASSIFICATION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif model_function.task_type == TaskType.REGRESSION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(label_ids, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(logits, 1e-8, 1e+30))\n return {\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n \"another_loss\": tf.metrics.mean_squared_error(ground_truth, predictions)\n }\n else:\n raise NotImplementedError()\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n predictions = {\n \"result\": probabilities\n }\n print(probabilities.shape)\n print(type(probabilities))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n (total_loss, per_example_loss, log_probs) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n scaffold_fn = None\n initialized_variable_names = []\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n \"log_probs\": log_probs,\n \"label_ids\": label_ids,\n },\n scaffold_fn=scaffold_fn)\n\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn", "def model_fn_builder(\n bert_config,\n num_labels,\n init_checkpoint,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu,\n use_one_hot_embeddings,\n layer_indexes,\n):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n )\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (\n assignment_map,\n initialized_variable_names,\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu\n )\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=total_loss, train_op=train_op, scaffold=scaffold_fn\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, probabilities, is_real_example):\n\n logits_split = tf.split(probabilities, num_labels, axis=-1)\n label_ids_split = tf.split(label_ids, num_labels, axis=-1)\n # metrics change to auc of every class\n eval_dict = {}\n for j, logits in enumerate(logits_split):\n label_id_ = tf.cast(label_ids_split[j], dtype=tf.int32)\n current_auc, update_op_auc = tf.metrics.auc(label_id_, logits)\n eval_dict[str(j)] = (current_auc, update_op_auc)\n eval_dict[\"eval_loss\"] = tf.metrics.mean(values=per_example_loss)\n return eval_dict\n\n\n eval_metrics = metric_fn(\n per_example_loss, label_ids, probabilities, is_real_example\n )\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics,\n scaffold=scaffold_fn,\n )\n else:\n out = {\n \"input_ids\": input_ids,\n \"label_ids\": label_ids,\n }\n all_layers = model.get_all_encoder_layers()\n for (i, layer_index) in enumerate(layer_indexes):\n out[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode, predictions=out, scaffold=scaffold_fn\n )\n return output_spec\n\n return model_fn", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n input_mask = features[\"input_mask\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n is_prediction = (mode == tf.estimator.ModeKeys.PREDICT)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings, is_prediction)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, False)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=per_example_loss)\n eval_metrics = {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n export_outputs={'predict': tf.estimator.export.PredictOutput(outputs=probabilities)}\n )\n return output_spec\n\n return model_fn", "def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n logits = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"prediction\": tf.argmax(logits, axis=-1),\n }\n output_spec = tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n else:\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=3, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n label = features[\"label\"]\n\n loss = compute_loss(logits, label)\n predicted_classes = tf.argmax(logits, axis=-1)\n accuracy = tf.metrics.accuracy(labels=label, predictions=predicted_classes, name='acc_op')\n\n # global global_acc_list\n # global_acc_list.append(accuracy)\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn", "def create_model_fn(feature_columns):\n def _model_fn(features, mode, params):\n \"\"\"Model Function.\"\"\"\n logits = logits_fn(features, feature_columns, params)\n labels = tf.squeeze(features[\"label\"])\n\n if mode == tf_estimator.ModeKeys.EVAL:\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits\n ))\n\n def metric_fn(labels, logits):\n labels = tf.cast(labels, tf.int64)\n return {\n \"recall@1\": tf.metrics.recall_at_k(labels, logits, 1),\n \"recall@5\": tf.metrics.recall_at_k(labels, logits, 5)\n }\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(metric_fn, [labels, logits]))\n\n elif mode == tf_estimator.ModeKeys.TRAIN:\n\n optimizer = tf.train.AdamOptimizer(\n learning_rate=params[\"learning_rate\"], beta1=params[\"beta1\"],\n beta2=params[\"beta2\"], epsilon=params[\"epsilon\"])\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n ))\n\n train_op = optimizer.minimize(loss, tf.train.get_global_step())\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, train_op=train_op)\n\n else:\n raise NotImplementedError\n return _model_fn", "def build_model_fn_optimizer():\n # TODO(anjalisridhar): Move this inside the model_fn once OptimizerV2 is\n # done?\n optimizer = tf.train.GradientDescentOptimizer(0.2)\n\n def model_fn(features, labels, mode): # pylint: disable=unused-argument\n \"\"\"model_fn which uses a single unit Dense layer.\"\"\"\n # You can also use the Flatten layer if you want to test a model without any\n # weights.\n layer = tf.layers.Dense(1, use_bias=True)\n logits = tf.reduce_mean(layer(tf.cast(features[\"input_ids\"], tf.float32)))/1000\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\"logits\": logits}\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n def loss_fn():\n y = tf.reshape(logits, []) - tf.constant(1.)\n return y * y\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode, loss=loss_fn())\n\n assert mode == tf.estimator.ModeKeys.TRAIN\n\n global_step = tf.train.get_global_step()\n train_op = optimizer.minimize(loss_fn(), global_step=global_step)\n return tf.estimator.EstimatorSpec(mode, loss=loss_fn(), train_op=train_op)\n\n return model_fn", "def model_fn_builder(vocab_list, learning_rate, num_train_steps,\n num_warmup_steps, init_checkpoint, use_tpu, use_one_hot_embeddings, bert_config):\n def model_fn(features, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"input_type_ids\"]\n # label_ids = features[\"label_ids\"]\n vocab = vocab_list\n vocab_size = len(vocab_list)\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n\n\n \n # TRAIN\n if not is_predicting:\n\n (loss, predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=FLAGS.use_tpu)\n\n # if mode == tf.estimator.ModeKeys.TRAIN:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n ## else:\n # return tf.estimator.EstimatorSpec(mode=mode,\n # loss=loss,\n # eval_metric_ops=eval_metrics)\n else:\n (predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n predictions = {\n 'probabilities': log_probs,\n 'predictions': predictions\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions, scaffold_fn=scaffold_fn)\n\n return output_spec if use_tpu else output_spec.as_estimator_spec()\n # Return the actual model function in the closure\n return model_fn", "def model_fn_builder(adj_mat, w2n, n2w, bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n # INFO:tensorflow: name = input_ids, shape = (?, 180)\n # INFO:tensorflow: name = input_mask, shape = (?, 180)\n # INFO:tensorflow: name = is_real_example, shape = (?,)\n # INFO:tensorflow: name = label_ids, shape = (?,)\n # INFO:tensorflow: name = masked_lm_ids, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_positions, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_weights, shape = (?, 180)\n # INFO:tensorflow: name = segment_ids, shape = (?, 180)\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n #next_sentence_labels = features[\"next_sentence_labels\"]\n \n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n \n gcn_embedding = build_gcn_output(adj_mat, w2n, n2w, model.get_embedding_table(), bert_config, is_training)\n \n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), gcn_embedding,\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n\n masked_lm_loss = tf.identity(masked_lm_loss, name=\"masked_lm_loss\")\n\n\n total_loss = masked_lm_loss\n\n total_loss = tf.identity(total_loss, name='total_loss')\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint and (not FLAGS.use_horovod or hvd.rank() == 0):\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if not FLAGS.use_horovod or hvd.rank() == 0:\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, FLAGS.use_horovod)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n return output_spec\n elif mode == tf.estimator.ModeKeys.PREDICT:\n\n #def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n # masked_lm_weights):#, next_sentence_example_loss,\n #next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n #masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n # [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n # values=next_sentence_example_loss)\n\n predictions = {\n \"input_ids\": tf.reshape(input_ids, [-1]),\n \"predictions\": masked_lm_log_probs\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions)\n #eval_metric_ops=eval_metrics)\n return output_spec\n else:\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n }\n\n eval_metrics = metric_fn(\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights)\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics)\n\n return output_spec\n\n return model_fn", "def model_fn_builder(bert_config, \n init_checkpoint, \n layer_indexes, \n use_tpu,\n use_one_hot_embeddings):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, \n init_checkpoint)\n if use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold() \n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\"unique_id\": unique_ids}\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, \n predictions=predictions, \n scaffold_fn=scaffold_fn)\n return output_spec\n \n\n return model_fn", "def build_model_fn(self):\n # Define the model_fn we want to return\n def model_fn(features, labels, mode):\n with tf.variable_scope(self.variable_scope):\n # 1. Define the input placeholder\n if len(self.input_shape) == 2:\n net_input = tf.reshape(\n tensor=features[\"x\"],\n shape=[-1] + list(self.input_shape) + [1],\n name=\"L0_RESHAPE\"\n )\n else:\n net_input = features[\"x\"]\n\n # 2. Simply call the network\n self.tf_partial_network = sequence_to_net(\n sequence=self.encoded_network,\n input_tensor=net_input\n )\n\n # 3. Build the Fully-Connected layers after block.\n with tf.name_scope(\"L_FC\"):\n # Flatten and connect to the Dense Layer\n ll_flat = tf.layers.flatten(\n inputs=self.tf_partial_network,\n name=\"Flatten\"\n )\n dense_layer = tf.layers.dense(\n inputs=ll_flat,\n units=1024,\n activation=tf.nn.relu,\n name=\"DENSE\"\n )\n dropout_layer = tf.layers.dropout(\n inputs=dense_layer,\n rate=0.4,\n # pylint: disable=no-member\n training=mode == tf.estimator.ModeKeys.TRAIN,\n name=\"DROPOUT\"\n )\n\n # 4. Build the Prediction Layer based on a Softmax\n with tf.name_scope(\"L_PRED\"):\n # Logits layer\n logits_layer = tf.layers.dense(\n inputs=dropout_layer,\n units=self.n_clases,\n name=\"PL_Logits\"\n )\n\n predictions = {\n \"classes\": tf.argmax(\n input=logits_layer,\n axis=1,\n name=\"PL_Classes\"\n ),\n \"probabilities\": tf.nn.softmax(\n logits=logits_layer,\n name=\"PL_Softmax\"\n )\n }\n\n # If we are asked for prediction only, we return the\n # prediction and stop adding nodes to the graph.\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions\n )\n\n # 4. Build the training nodes\n with tf.name_scope(\"L_TRAIN\"):\n # Loss\n loss_layer = tf.losses.sparse_softmax_cross_entropy(\n labels=labels,\n logits=logits_layer\n )\n\n # Training Op\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.TRAIN:\n # The optimizer via Gradient Descent (we can change it)\n optimizer = tf.train.AdamOptimizer(\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=10e-08,\n name=\"OPT\"\n )\n # We say that we want to optimize the loss layer using\n # the optimizer.\n train_op = optimizer.minimize(\n loss=loss_layer,\n global_step=tf.train.get_global_step(),\n name=\"OPT_MIN\"\n )\n # And return\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n train_op=train_op\n )\n\n # 5. Build the evaluation nodes.\n with tf.name_scope(\"L_EVAL\"):\n # Evaluation metric is accuracy\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels,\n predictions=predictions[\"classes\"],\n name=\"ACC\"\n )\n }\n\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n eval_metric_ops=eval_metric_ops\n )\n # End of tf.variable_scope()\n\n # Return the model_fn function\n return model_fn", "def _call_model_fn(self, features, labels, is_export_mode=False):\n model_fn_args = util.fn_args(self._model_fn)\n kwargs = {}\n\n # Makes deep copy with `config` and params` in case user mutates them.\n config = copy.deepcopy(self._config)\n params = copy.deepcopy(self._params)\n\n if 'labels' in model_fn_args:\n kwargs['labels'] = labels\n elif labels is not None:\n raise ValueError(\n 'model_fn does not take labels, but input_fn returns labels.')\n if 'mode' in model_fn_args:\n kwargs['mode'] = self._ctx.mode\n if 'config' in model_fn_args:\n kwargs['config'] = config\n if 'params' in model_fn_args:\n kwargs['params'] = params\n\n if 'params' not in model_fn_args:\n raise ValueError('model_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\\'batch_size\\']'.format(self._model_fn))\n\n if is_export_mode:\n batch_size_for_model_fn = None\n else:\n batch_size_for_model_fn = self._ctx.batch_size_for_model_fn\n\n if batch_size_for_model_fn is not None:\n if isinstance(params, hparam.HParams):\n params.add_hparam(_BATCH_SIZE_KEY, batch_size_for_model_fn)\n else:\n params[_BATCH_SIZE_KEY] = batch_size_for_model_fn\n\n estimator_spec = self._model_fn(features=features, **kwargs)\n if (self._ctx.is_running_on_cpu(is_export_mode) and\n isinstance(estimator_spec, TPUEstimatorSpec)):\n # The estimator_spec will be passed to `Estimator` directly, which expects\n # type `EstimatorSpec`.\n return estimator_spec.as_estimator_spec()\n else:\n return estimator_spec", "def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)", "def build_model_fn(self):\n # Define the model_fn we want to return\n def model_fn(features, labels, mode):\n with tf.variable_scope(self.variable_scope):\n # 1. Define the input placeholder\n if len(self.input_shape) == 2: # Reshape if necessary\n new_shape = [-1] + list(self.input_shape) + [1]\n net_input = tf.reshape(\n tensor=features[\"x\"],\n shape=new_shape,\n name=\"L0_RESHAPE\"\n )\n else:\n net_input = features[\"x\"]\n\n # 2. Simply call the network\n self.tf_partial_network = sequence_to_net(\n sequence=self.encoded_network,\n input_tensor=net_input\n )\n\n # 3. Call here the functions for flops & density to avoid more\n # elements. The check is done because for some reason, the\n # number of FLOPS changes during training.\n if self.flops is None:\n self.flops = compute_network_flops(\n graph=tf.get_default_graph(),\n collection_name=self.variable_scope,\n logdir=self.log_path\n )\n\n if self.density is None:\n self.density = compute_network_density(\n graph=tf.get_default_graph(),\n collection_name=self.variable_scope\n )\n\n # 4. Build the fully-connected layer after the block\n with tf.name_scope(\"L_FC\"):\n # Flatten and connect to the Dense Layer\n ll_flat = tf.layers.flatten(\n inputs=self.tf_partial_network,\n name=\"Flatten\"\n )\n dense_layer = tf.layers.dense(\n inputs=ll_flat,\n units=1024,\n activation=tf.nn.relu,\n name=\"DENSE\"\n )\n dropout_layer = tf.layers.dropout(\n inputs=dense_layer,\n rate=0.4,\n # pylint: disable=no-member\n training=mode == tf.estimator.ModeKeys.TRAIN,\n name=\"DROPOUT\"\n )\n\n # 5. Build the prediction layer, based on a softmax\n with tf.name_scope(\"L_PRED\"):\n # Logits layer\n logits_layer = tf.layers.dense(\n inputs=dropout_layer,\n units=self.n_clases,\n name=\"PL_Logits\"\n )\n\n predictions = {\n \"classes\": tf.argmax(\n input=logits_layer,\n axis=1,\n name=\"PL_Classes\"\n ),\n \"probabilities\": tf.nn.softmax(\n logits=logits_layer,\n name=\"PL_Softmax\"\n )\n }\n\n # If we are asked for prediction only, we return the\n # prediction and stop adding nodes to the graph.\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions\n )\n\n # Build the training nodes\n with tf.name_scope(\"L_TRAIN\"):\n # Loss\n loss_layer = tf.losses.sparse_softmax_cross_entropy(\n labels=labels,\n logits=logits_layer\n )\n\n # Training Op\n # pylint: disable=no-member\n if mode == tf.estimator.ModeKeys.TRAIN:\n # The optimizer via Gradient Descent (we can change it)\n optimizer = tf.train.AdamOptimizer(\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=10e-08,\n name=\"OPT\"\n )\n # We say that we want to optimize the loss layer using\n # the optimizer.\n train_op = optimizer.minimize(\n loss=loss_layer,\n global_step=tf.train.get_global_step(),\n name=\"OPT_MIN\"\n )\n # And return\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n train_op=train_op\n )\n\n # Build the evaluation nodes (regular accuracy).\n with tf.name_scope(\"L_EVAL\"):\n # Evaluation metric is accuracy\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels,\n predictions=predictions[\"classes\"],\n name=\"ACC\"\n )\n }\n\n # pylint: disable=no-member\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss_layer,\n eval_metric_ops=eval_metric_ops\n )\n\n # Return the model_fn function\n return model_fn", "def model_fn_builder(albert_config,num_labels,init_checkpoint,learning_rate,\n num_train_steps,num_warmup_steps,\n use_one_hot_embeddings,optimizer='adamw'):\n\n def model_fn(features,labels,mode,params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n tf.logging.info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s ,shape = %s\" % (name,features[name].shape))\n\n input_ids = features['input_ids']\n input_mask = features['input_mask']\n segment_ids = features['segment_ids']\n label_ids = features['label_ids']\n if 'is_real_example' in features:\n is_real_example = tf.cast(features['is_real_example'],dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids),dtype=tf.float32)\n\n is_training = (mode == tf_estimator.estimator.ModeKeys.TRAIN)\n\n (total_loss,per_example_loss,probabilities,predictions) = \\\n create_model(albert_config,is_training,input_ids,input_mask,\n segment_ids,label_ids,num_labels,\n use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map,initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint,assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf_estimator.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,use_tpu=False)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf_estimator.estimator.ModeKeys.EVAL:\n def metric_fn(per_example_loss,label_ids,logits,is_real_example):\n accuracy = tf.metrics.accuracy(\n labels=label_ids,predictions=predictions,\n weights=is_real_example\n )\n loss = tf.metrics.mean(\n values=per_example_loss,weights=is_real_example\n )\n return {\n 'eval_accuracy':accuracy,\n 'eval_loss':loss,\n }\n\n eval_metrics = metric_fn(per_example_loss,label_ids,predictions,is_real_example)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'probabilities':probabilities,\n 'predictions':predictions,\n },\n )\n\n return output_spec\n\n return model_fn", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = model_function.create(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu,\n scope=(\"loss\" if model_function.freeze else None))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n if model_function.task_type == TaskType.CLASSIFICATION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif model_function.task_type == TaskType.REGRESSION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(label_ids, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(logits, 1e-8, 1e+30))\n return {\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n \"another_loss\": tf.metrics.mean_squared_error(ground_truth, predictions)\n }\n else:\n raise NotImplementedError()\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n predictions = {\n \"result\": probabilities\n }\n print(probabilities.shape)\n print(type(probabilities))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec", "def get_model_fn(params,\n graph_builder_class,\n device_assigner,\n weights_name=None,\n keys_name=None,\n early_stopping_rounds=100,\n num_trainers=1,\n trainer_id=0,\n report_feature_importances=False,\n model_dir=None,\n local_eval=False):\n def _model_fn(features, labels, mode):\n \"\"\"Function that returns predictions, training loss, and training op.\"\"\"\n weights = None\n if weights_name and weights_name in features:\n weights = features.pop(weights_name)\n\n keys = None\n if keys_name and keys_name in features:\n keys = features.pop(keys_name)\n\n # If we're doing eval, optionally ignore device_assigner.\n # Also ignore device assigner if we're exporting (mode == INFER)\n dev_assn = device_assigner\n if (mode == model_fn_lib.ModeKeys.INFER or\n (local_eval and mode == model_fn_lib.ModeKeys.EVAL)):\n dev_assn = None\n\n graph_builder = graph_builder_class(params,\n device_assigner=dev_assn)\n inference = {}\n output_alternatives = None\n if (mode == model_fn_lib.ModeKeys.EVAL or\n mode == model_fn_lib.ModeKeys.INFER):\n inference[eval_metrics.INFERENCE_PROB_NAME] = (\n graph_builder.inference_graph(features))\n\n if params.regression:\n predictions = {\n None: inference[eval_metrics.INFERENCE_PROB_NAME]}\n output_alternatives = {\n None: (constants.ProblemType.LINEAR_REGRESSION, predictions)}\n else:\n inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(\n inference[eval_metrics.INFERENCE_PROB_NAME], 1)\n\n predictions = {\n prediction_key.PredictionKey.PROBABILITIES:\n inference[eval_metrics.INFERENCE_PROB_NAME],\n prediction_key.PredictionKey.CLASSES:\n inference[eval_metrics.INFERENCE_PRED_NAME]}\n output_alternatives = {\n None: (constants.ProblemType.CLASSIFICATION, predictions)}\n\n if report_feature_importances:\n inference[eval_metrics.FEATURE_IMPORTANCE_NAME] = (\n graph_builder.feature_importances())\n\n if keys is not None:\n inference[keys_name] = keys\n\n # labels might be None if we're doing prediction (which brings up the\n # question of why we force everything to adhere to a single model_fn).\n loss_deps = []\n training_graph = None\n training_hooks = []\n scaffold = None\n if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:\n training_graph = control_flow_ops.group(\n graph_builder.training_graph(\n features, labels, input_weights=weights,\n num_trainers=num_trainers,\n trainer_id=trainer_id),\n state_ops.assign_add(contrib_framework.get_global_step(), 1))\n loss_deps.append(training_graph)\n if hasattr(graph_builder, 'finalize_training'):\n finalize_listener = EveryCheckpointPreSaveListener(\n graph_builder.finalize_training())\n scaffold = monitored_session.Scaffold()\n training_hooks.append(\n basic_session_run_hooks.CheckpointSaverHook(\n model_dir, save_secs=600, save_steps=None,\n scaffold=scaffold,\n listeners=[finalize_listener]))\n\n training_loss = None\n if (mode == model_fn_lib.ModeKeys.EVAL or\n mode == model_fn_lib.ModeKeys.TRAIN):\n with ops.control_dependencies(loss_deps):\n training_loss = graph_builder.training_loss(\n features, labels, name=LOSS_NAME)\n\n # Put weights back in\n if weights is not None:\n features[weights_name] = weights\n\n if early_stopping_rounds:\n training_hooks.append(TensorForestLossHook(early_stopping_rounds))\n\n return model_fn_lib.ModelFnOps(\n mode=mode,\n predictions=inference,\n loss=training_loss,\n train_op=training_graph,\n training_hooks=training_hooks,\n scaffold=scaffold,\n output_alternatives=output_alternatives)\n\n return _model_fn", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)" ]
[ "0.7927153", "0.79216784", "0.78488064", "0.7728147", "0.7653819", "0.7596977", "0.7499681", "0.7498984", "0.74952537", "0.74641263", "0.746087", "0.7454586", "0.7400736", "0.73545307", "0.7325875", "0.73120433", "0.72905254", "0.72691625", "0.72314054", "0.71903694", "0.7071783", "0.70535904", "0.7035695", "0.6957339", "0.6953554", "0.6881063", "0.6860833", "0.6776043", "0.67529154", "0.67199683" ]
0.81900585
0
Defines input column according to model (raw text or clean text)
def define_input_output(self): if self.classifier_model.text_preprocessing == ClassifierModel.TEXT_CLEAN: self.clean_range() self.input_col = self.df["clean_text"] elif self.classifier_model.text_preprocessing == ClassifierModel.TEXT_RAW: self.input_col = self.df[self.col_name_sentences] else: raise Exception("Text preprocessing unknown") self.output_col = self.df[self.col_name_labels]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reconstruct_input_ext(self, model_in):", "def on_columnvalue_modified( self, *data ):\n\t\tif (len(data) == 4):\t( cell, path, model, user_data ) = data\n\t\telse:\t\t\t( cell, path, new_text, model, user_data ) = data\n\t\t(datatype,) = user_data\n\t\tcolid = self.window2.type2colid[datatype]\n\t\tif \t(datatype == \"combo\"):\n\t\t\tmodel[path][colid] = new_text\n\t\telif \t(datatype == \"spin\"):\n\t\t\tmodel[path][colid] = long(new_text)\n\t\telif \t(datatype == \"text\"):\n\t\t\tmodel[path][colid] = new_text\n\t\telif \t(datatype == \"check\"):\n\t\t\tmodel[path][colid] = not model[path][colid]", "def process_raw_text(self, file_name, column_side):\n self.mvc_check()\n\n model_txt = None\n if column_side == LEFT_TEXT:\n model_txt = self.model.txt1\n elif column_side == RIGHT_TEXT:\n model_txt = self.model.txt2\n\n model_txt.open_raw(file_name)\n model_txt.process_raw()\n self.opened_txt[column_side] = True\n self.can_align = self.opened_txt[LEFT_TEXT] and self.opened_txt[RIGHT_TEXT]\n\n # Goldsmith\n model_txt.make_trie(column_side)\n model_txt.apply_goldsmith(1.1, 20, column_side)\n\n # Associate word for alignment if both text were opened\n if self.can_align:\n for view in self.views:\n view.end_task()\n view.change_task(\"Associating words\")\n self.model.associate_words(1.5)\n for view in self.views:\n view.end_task()\n\n # TODO : coherent saving to database using model.save_data\n\n return model_txt.str", "def _build_data_from_text(self, text):\n # get CSV field\n text = text.split(self._data_sep)[self._data_col]\n # tokenize\n return super()._build_data_from_text(text)", "def preprocessing_transform(self, x):\n if isinstance(self.column_text, int) and self.column_text not in x.columns:\n col = self.column_text\n else:\n col = list(x.columns).index(self.column_text)\n\n ct = x.shape[0]\n # INPUTS\n if self.method_embedding.lower() in ['roberta', \"camembert\", \"xlm-roberta\"]:\n ids = np.ones((ct, self.maxlen), dtype='int32')\n else:\n ids = np.zeros((ct, self.maxlen), dtype='int32')\n att = np.zeros((ct, self.maxlen), dtype='int32')\n tok = np.zeros((ct, self.maxlen), dtype='int32')\n\n for k in range(ct):\n text = \" \" + \" \".join(x.iloc[k, col].split())\n\n if self.method_embedding == 'RoBERTa':\n enc = self.tokenizer.encode(text)\n else:\n enc = self.tokenizer.encode(text, max_length=self.maxlen, truncation=True)\n\n # CREATE BERT INPUTS\n if self.method_embedding == 'RoBERTa':\n ids[k, :len(enc.ids)] = enc.ids[:self.maxlen]\n att[k, :len(enc.ids)] = 1\n else:\n ids[k, :len(enc)] = enc\n att[k, :len(enc)] = 1\n\n x_preprocessed = [ids, att, tok]\n if self.dimension_embedding == 'word_embedding':\n return x_preprocessed\n else:\n model_extractor = self.model_extract_document_embedding()\n document_embedding = model_extractor.predict(x_preprocessed)\n return document_embedding", "def __init__(self, question_txt, question_prefix, col_range, col_value, use_strings=False):\n super().__init__(question_txt, question_prefix, col_range)\n self.col_value = col_value\n self.use_strings = use_strings", "def input_text(self, val):\n if val:\n self.__input_text = val\n self.indexed_tokens = self.tokenizer.encode(self.input_text)\n self.input_ids = torch.tensor([self.indexed_tokens])\n self.input_ids = self.input_ids.to(self.device)\n self.input_size = len(self.input_ids[0])\n self._clear_results()", "def load_input(raw):\n\n columns = [(column, column['dataTypeName'] == 'text')\n for column in raw['meta']['view']['columns']]\n\n print('')\n print(\"[COLUMNS]\")\n for column, selected in columns:\n if selected:\n try:\n description = column['description'].strip()[:50]\n except KeyError:\n description = ''\n print(\"{}: {}\".format(column['name'],\n description))\n print('')\n\n for item in raw['data']:\n yield {column['name']: value\n for (column, selected), value in zip(columns, item)\n if selected and type(value) is str}", "def preprocess_df(df, column, preview=True):\n \n df[column] = df['Attraction'].apply(lambda x: x.lower())\n df[column] = df[column].apply(lambda x: re.sub('[%s]' % re.escape(string.punctuation), '', x))\n df[column] = df[column].apply(lambda x: re.sub('\\w*\\d\\w*','', x))\n \n return df", "def apply_column_value(raw_column_name, column_value, model, mapping, is_extra_data, cleaner):\n # If the item is the extra_data column, then make sure to save it to the\n # extra_data field of the database\n if raw_column_name in mapping:\n table_name, mapped_column_name, display_name, is_extra_data = mapping.get(raw_column_name)\n\n # special postal case:\n if mapped_column_name in ['postal_code', 'owner_postal_code']:\n if '-' in str(column_value):\n postal = str(column_value).split('-')[0].zfill(5)\n ext = str(column_value).split('-')[1].zfill(4)\n column_value = postal + '-' + ext\n column_value = str(column_value).zfill(5)\n\n cleaned_value = None\n if cleaner:\n # Get the list of Quantity fields from the Column object in SEED. This is non-ideal, since the\n # rest of the mapping code does not use SEED models. Perhaps make this an argument.\n if (model.__class__.__name__, mapped_column_name) in apps.get_model('seed',\n 'Column').QUANTITY_UNIT_COLUMNS:\n # clean against the database type first\n cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data)\n\n # This is a temporary fix for when the raw_column_name and the mapped_column_name\n # are the same. It causes the units to be cast twice since the cleaner look up finds\n # the same column twice. The cleaner needs to be cleaned up quite a bit to handle\n # this error correctly.\n if mapped_column_name != raw_column_name:\n # now clean against the raw name with pint (Quantity Units) because that's the column\n # that holds the units needed to interpret the value correctly\n cleaned_value = cleaner.clean_value(cleaned_value, raw_column_name,\n is_extra_data)\n else:\n cleaned_value = cleaner.clean_value(column_value, mapped_column_name, is_extra_data)\n else:\n cleaned_value = default_cleaner(column_value)\n\n if is_extra_data:\n if hasattr(model, 'extra_data'):\n # only save it if the model and the mapping are the same\n if model.__class__.__name__ == table_name:\n if isinstance(cleaned_value, (datetime, date)):\n # TODO: create an encoder for datetime once we are in Django 1.11\n model.extra_data[mapped_column_name] = cleaned_value.isoformat()\n else:\n model.extra_data[mapped_column_name] = cleaned_value\n else:\n # Simply set the field to the cleaned value if it is the correct model\n if model.__class__.__name__ == table_name:\n setattr(model, mapped_column_name, cleaned_value)\n\n return model", "def parse_input(input_data, dictionary, model):\n vec_text = TextBlob(input_data).words.lower().lemmatize()\n vec_bow = dictionary.doc2bow(vec_text)\n return model[vec_bow]", "def process_source(self):\n source_col = getattr(self.model_cls, self.source)\n return source_col", "def preprocessing_transform(self, x):\n if isinstance(self.column_text, int) and self.column_text not in x.columns:\n col = self.column_text\n else:\n col = list(x.columns).index(self.column_text)\n if self.dimension_embedding == 'word_embedding':\n tok = self.tokenizer.texts_to_sequences(x.iloc[:, col])\n tok = pad_sequences(tok, maxlen=self.maxlen, padding='post')\n x_preprocessed = {\"tok\": tok}\n return x_preprocessed\n else:\n try:\n try:\n embeddings_gensim_model = load_model(self.method_embedding)\n method = \"model\"\n except:\n embeddings_gensim_model = load_keyedvectors(self.method_embedding)\n method = \"keyedvectors\"\n except Exception:\n logger.critical(\"unknown path for Word2Vec weights : '{}'\".format(self.method_embedding))\n\n document_embedding = build_embedding_documents_from_gensim_model(x.iloc[:, col],\n embeddings_gensim_model, method)\n return document_embedding", "def apply(self, text):", "def standardize_text(df: pd.DataFrame,\r\n text_field: str,\r\n output_field: str) -> pd.DataFrame:\r\n\r\n # df[output_field] = df[text_field].apply(\r\n # lambda column: emoji.get_emoji_regexp().sub(u'', column)\r\n # )\r\n\r\n df[output_field] = df[text_field].str.replace(\"'m\", ' am')\r\n df[output_field] = df[output_field].str.replace(\"’m\", ' am')\r\n df[output_field] = df[output_field].str.replace(\"´m\", ' am')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'ve\", ' have')\r\n df[output_field] = df[output_field].str.replace(\"’ve\", ' have')\r\n df[output_field] = df[output_field].str.replace(\"´ve\", ' have')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'d\", ' would')\r\n df[output_field] = df[output_field].str.replace(\"’d\", ' would')\r\n df[output_field] = df[output_field].str.replace(\"´d\", ' would')\r\n\r\n df[output_field] = df[output_field].str.replace(\"n't\", ' not')\r\n df[output_field] = df[output_field].str.replace(\"n’t\", ' not')\r\n df[output_field] = df[output_field].str.replace(\"n´t\", ' not')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'ll\", ' will')\r\n df[output_field] = df[output_field].str.replace(\"’ll\", ' will')\r\n df[output_field] = df[output_field].str.replace(\"´ll\", ' will')\r\n\r\n df[output_field] = df[output_field].str.replace(\"'s\", ' is')\r\n df[output_field] = df[output_field].str.replace(\"’\", ' is')\r\n df[output_field] = df[output_field].str.replace(\"´s\", ' is')\r\n\r\n df[output_field] = df[output_field].str.replace('/', ' ')\r\n df[output_field] = df[output_field].str.replace('\\.{2,}', '.')\r\n df[output_field] = df[output_field].str.replace('!{2,}', '!')\r\n df[output_field] = df[output_field].str.replace('\\?{2,}', '?')\r\n df[output_field] = df[output_field].str.replace('€+', '')\r\n df[output_field] = df[output_field].str.replace('[0-9$&~\\\\()[\\]{}<>%\\'\"“”‘’,;…+\\-_=*]+', '')\r\n df[output_field] = df[output_field].str.replace(r'http\\S+', '')\r\n df[output_field] = df[output_field].str.replace(r'http', '')\r\n df[output_field] = df[output_field].str.replace(r'@\\S+', '')\r\n df[output_field] = df[output_field].str.replace(r'@', 'at')\r\n df[output_field] = df[output_field].str.lower()\r\n df[output_field] = df[output_field].astype(str)\r\n\r\n return df", "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None", "def preprocess(self, text):\r\n return text", "def _create_string_input_trainable_model():\n\n class BlockWithStringInputs(onnxblock.ForwardBlock):\n def __init__(self):\n super().__init__()\n self.cast = onnxblock.blocks.Cast(to=onnx.TensorProto.FLOAT)\n self.linear = onnxblock.blocks.Linear(4, 2)\n\n def build(self, string_input):\n return self.linear(self.cast(string_input))\n\n string_block = BlockWithStringInputs()\n with onnxblock.empty_base() as model_accessor:\n model_accessor.model.graph.input.extend(\n [\n onnx.helper.make_tensor_value_info(\"input\", onnx.TensorProto.STRING, [1, 4]),\n ]\n )\n _ = string_block(\"input\")\n\n return string_block.to_model_proto()", "def process_text(input_txt):\r\n # if input is string\r\n tidy_txt = remove_pattern(input_txt,\"@[\\w]*\")\r\n ##=============================== if input is dataframe ====================##\r\n # tidy_txt = np.vectorize(remove_pattern)(input_txt,\"@[\\w]*\") #\r\n ##==========================================================================##\r\n # remove special characters\r\n tidy_txt = tidy_txt.replace(\"[^a-zA-Z#]\",\" \")\r\n # split into words\r\n tokenized_txt = tidy_txt.split()\r\n # perform stemming\r\n stemmer = PorterStemmer()\r\n tokenized_txt = [stemmer.stem(i) for i in tokenized_txt]\r\n print(tokenized_txt)\r\n # joining words back\r\n tokenized_txt = ' '.join(tokenized_txt)\r\n return tokenized_txt", "def column(self, value):\n\n # Escape |\n return value.replace(\"|\", \"&#124;\") if value else value", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def preProcessText(col):\n reponct = string.punctuation.replace(\"?\",\"\").replace(\"/\",\"\")\n rehtml = re.compile('<.*>')\n extr = col.str.strip()\n extr = extr.str.replace(rehtml, '', regex=True)\n extr = extr.str.translate(str.maketrans('','',reponct))\n extr = extr.str.replace('[^0-9a-zA-Z?/ ]+', ' ', regex=True)\n extr = extr.str.replace('\\s+', ' ', regex=True)\n extr = extr.str.lower()\n return extr", "def _getTextType(self, lineData, column):\n if lineData is None:\n return ' ' # default is code\n \n textTypeMap = lineData[1]\n if column >= len(textTypeMap): # probably, not actual data, not updated yet\n return ' '\n \n return textTypeMap[column]", "def predict_on_input(model, model_type, path_in, config, max_examples, device):\n char_to_idx = load_char_to_idx()\n max_length = load_max_len() if 'max_length_text' not in config else config['max_length_text']\n if not max_examples:\n max_examples = get_num_examples(path_in)\n predictions = []\n if model_type == 'torch':\n reader = csv.reader(open(path_in, 'r', encoding='utf8'))\n for i, row in enumerate(reader):\n text_id, text, masked, label_binary, label_ternary, label_finegrained, source = row\n adjust_text = adjust_text_len(text, max_length, config)\n text_idxs = [char_to_idx.get(char, char_to_idx['unk']) for char in adjust_text]\n x = np.zeros(max_length)\n for j, idx in enumerate(text_idxs):\n x[j] = idx\n output_raw = model(torch.LongTensor([x]).to(device))\n output = torch.squeeze(output_raw)\n max_prob, prediction = torch.max(output, 0)\n pred_binary = prediction.item() if prediction.item() <= 1 else 1\n if config['granularity'] != 'binary':\n pred_ternary = prediction.item() if prediction.item() <= 2 else 2\n if config['granularity'] == 'finegrained':\n pred_finegrained = prediction.item()\n else:\n pred_finegrained = 'NULL'\n else:\n pred_ternary = 'NULL'\n pred_finegrained = 'NULL'\n predictions.append((text_id, label_binary, label_ternary, label_finegrained, pred_binary,\n pred_ternary, pred_finegrained, text, masked, source))\n if i == max_examples - 1:\n print('Predicted on example [{}/{}]'.format(i, max_examples))\n break\n else:\n print('Predicted on example [{}/{}]\\r'.format(i, max_examples), end='\\r')\n return predictions", "def _clean_text(self, X):\n\n def normalize(text):\n text = text.translate(str.maketrans('', '', string.punctuation))\n return text.lower()\n\n for col_name in X.columns:\n # we assume non-str values will have been filtered out prior to calling TextFeaturizer. casting to str is a safeguard.\n col = X[col_name].astype(str)\n X[col_name] = col.apply(normalize)\n return X", "def _quoter(self, col) :\n\n j = self.cols.index(col)\n if self.types[j] == 'TEXT' :\n return '\"%s\"'\n else :\n return '%s'", "def process_column(self, column):\r\n processed_column = DatasetColumn(name=self.get_or_fail(column, 'name'),\r\n description=self.get_or_default(\r\n column, 'description', ''))\r\n if 'type' in column:\r\n original_type = column['type'].lower()\r\n processed_column.original_type = original_type\r\n if (original_type == 'string' or original_type == 'date'\r\n or original_type == 'time' or original_type == 'yearmonth'\r\n or original_type == 'duration'\r\n or original_type == 'geopoint'\r\n or original_type == 'geojson'):\r\n processed_column.type = 'string'\r\n elif (original_type == 'numeric' or original_type == 'number'\r\n or original_type == 'year'):\r\n processed_column.type = 'numeric'\r\n elif original_type == 'boolean':\r\n processed_column.type = 'boolean'\r\n elif original_type == 'datetime':\r\n processed_column.type = 'datetime'\r\n else:\r\n # Possibly extended data type - not going to try to track those\r\n # here. Will set the type and let the server handle it.\r\n processed_column.type = original_type\r\n return processed_column", "def parse_columns(self):\n self.data['ID'], self.data['SSSSSSSS.mmmuuun'] = self.data['ID SSSSSSSS.mmmuuun'].str.split(' ', 1).str\n self.data['SSSSSSSS.mmmuuun'] = self.data['SSSSSSSS.mmmuuun'].astype(str).str.strip()", "def prepopulate(self, model, exclude=[]):\n for col in model.columns():\n if col not in exclude and hasattr(self, col):\n setattr(getattr(self, col), 'data', getattr(model, col))" ]
[ "0.5679452", "0.56548154", "0.5609584", "0.5550475", "0.5462915", "0.5444646", "0.53719866", "0.52677375", "0.5246684", "0.52359515", "0.52223074", "0.51953554", "0.5169166", "0.5150608", "0.51421463", "0.5139192", "0.5078079", "0.507589", "0.50427675", "0.5032137", "0.49962822", "0.49943298", "0.4961035", "0.49527472", "0.49405164", "0.49380076", "0.49348617", "0.49220076", "0.49078652", "0.49041092" ]
0.6507603
0
Return a saved state value, None if item is undefined.
def __getitem__(self, item): return self._state["data"].get(item, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getitem__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getattr__(self, item):\r\n return self._state[\"data\"].get(item, None)", "def __getattr__(self, item):\r\n return self._state[\"data\"].get(item, None)", "def GetItem3StateValue(self, item):\r\n\r\n return item.Get3StateValue()", "def latest_state_data(self):\n if not self.state_list:\n return None\n if not self.state_list[-1]:\n return None\n return self.state_list[-1]", "def __getattr__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getattr__(self, item):\n return self._state[\"data\"].get(item, None)", "def __getattr__(self, item):\n return self._state[\"data\"].get(item, None)", "def get_state(self) -> Optional[BaseModel]:\n return self.state", "def get_value(self) -> CartState:\n return self._state", "def state(self, key):\n\n if key in st.session_state:\n return st.session_state[key]\n\n return None", "def __getstate__(self):\n state = self.__dict__.copy()\n self.__cleanState__(state)\n return state", "def __getstate__(self):\n return None", "def get_switched_item(self):\r\n res = self._switched_item\r\n # logging.warn('get switched item: %s' % res)\r\n self._switched_item = None\r\n return res", "def state(self):\n if \"state\" in self._prop_dict:\n return self._prop_dict[\"state\"]\n else:\n return None", "def state(self):\n if \"state\" in self._prop_dict:\n return self._prop_dict[\"state\"]\n else:\n return None", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state" ]
[ "0.63567495", "0.63567495", "0.63567495", "0.6147721", "0.6147721", "0.6143838", "0.61314815", "0.60985714", "0.60985714", "0.60985714", "0.5938411", "0.5815865", "0.5792894", "0.57494944", "0.57324725", "0.57256", "0.5721121", "0.5721121", "0.5684823", "0.5684823", "0.5684823", "0.5684823", "0.5684823", "0.5684823", "0.5684823", "0.5684823", "0.5684823", "0.5684823", "0.5684823", "0.5684823" ]
0.6427416
0
Interleave bits from two sort keys to form a joint sort key. Examples that are similar in both of the provided keys will have similar values for the key defined by this function. Useful for tasks with two text fields like machine translation or natural language inference.
def interleave_keys(a, b): def interleave(args): return ''.join([x for t in zip(*args) for x in t]) return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addKey(s1, s2): \r\n return [i ^ j for i, j in zip(s1, s2)]", "def concatKey(str1,str2):\n return concat(concat(str1, '_'), str2)", "def inner_join(sorted1, sorted2, key1, key2):\n p1 = 0\n p2 = 0\n result = []\n\n while (p1 < len(sorted1) and p2 < len(sorted2)):\n # if entries\n if sorted1[p1][key1] == sorted2[p2][key2]:\n entry = {}\n entry.update(sorted1[p1])\n entry.update(sorted2[p2])\n result.append(entry)\n p2 += 1\n elif sorted1[p1][key1] < sorted2[p2][key2]:\n p1 += 1\n elif sorted1[p1][key1] > sorted2[p2][key2]:\n p2 += 1\n return result", "def mpairs(seq1, seq2, key1, key2=None):\n key2 = key2 or key1\n\n seq1, seq2 = iter(seq1), iter(seq2)\n\n s1, s2 = next(seq1), next(seq2)\n k1, k2 = key1(s1), key2(s2)\n\n while True:\n try:\n if k1 == k2:\n yield (s1, s2)\n s1, s2 = next(seq1), next(seq2)\n k1, k2 = key1(s1), key2(s2)\n elif k1 < k2:\n s1 = next(dropwhile(lambda x: key1(x) < k2, seq1))\n k1 = key1(s1)\n else:\n s2 = next(dropwhile(lambda x: key2(x) < k1, seq2))\n k2 = key2(s2)\n\n except StopIteration:\n break", "def combine_keys(*keys: bytes) -> bytes:\n key = hashlib.sha3_512(keys[0]).digest()\n for k in keys[1:]:\n next_key = hashlib.sha3_512(k).digest()\n\n key = bytes([\n a ^ b\n for (a, b)\n in zip(key, next_key)\n ])\n return key", "def translate(word1, key, word2):\n key = dict(zip(word1, key))\n return ''.join(key[sym] for sym in word2)", "def gen_comparison_pairs(self, a, b, subset=None):\n # union of the keys of the two records\n # the ordering of the first record takes precedence\n # an alternative option would be to sort them, lexicographically or with a custom criteria\n keys_union = {**a, **b}.keys()\n\n if subset:\n keys_comp_a_b = self.gen_comparison_keys_subset(subset)\n else:\n keys_comp_a_b = self.gen_comparison_keys_common(keys_union)\n\n for key_comp, key_a, key_b in keys_comp_a_b:\n yield key_comp, (a.get(key_a, {}), b.get(key_b, {}))", "def xor_compare(bin1, bin2):\n return '{0:0{1}b}'.format(int(bin1,2) ^ int(proper_key(bin2, len(bin1)), 2), len(bin1))", "def _key_func_1(entry: tuple[str, list]) -> tuple[tuple[int, str], str]:\n key, (_targets, _sub_items, category_key) = entry\n if category_key:\n # using the specified category key to sort\n key = category_key\n lc_key = unicodedata.normalize('NFD', key.lower())\n if lc_key.startswith('\\N{RIGHT-TO-LEFT MARK}'):\n lc_key = lc_key[1:]\n\n if not lc_key[0:1].isalpha() and not lc_key.startswith('_'):\n # put symbols at the front of the index (0)\n group = 0\n else:\n # put non-symbol characters at the following group (1)\n group = 1\n # ensure a deterministic order *within* letters by also sorting on\n # the entry itself\n return (group, lc_key), entry[0]", "def add2chain2nd(splitted_text):\n # Our key is the unique occurrence of a pair of words\n inputText = splitted_text\n if len(inputText) > 1:\n for i, word in enumerate(inputText):\n if i == 0: # Chaining the first and second word in tweet to start key\n if (None, startKey) not in chain:\n chain[(None, startKey)] = [word]\n else:\n chain[(None, startKey)].append(word)\n elif i == 1:\n if (startKey,inputText[i-1]) not in chain:\n chain[(startKey,inputText[i-1])] = [word]\n else:\n chain[(startKey,inputText[i-1])].append(word)\n else:\n if (inputText[i-2],inputText[i-1]) not in chain:\n chain[(inputText[i-2],inputText[i-1])] = [word]\n else:\n chain[(inputText[i-2],inputText[i-1])].append(word)\n if i == len(inputText)-1: # Use last two words as key to end\n if (inputText[i-1],word) not in chain:\n chain[(inputText[i-1],word)] = [endKey]\n else:\n chain[(inputText[i-1],word)].append(endKey)\n if (None,startKey) not in chain:\n chain[(None,startKey)] = [inputText[0]]\n else:\n chain[(None,startKey)].append(inputText[0])\n if (inputText[0],endKey) not in chain:\n chain[(inputText[0],endKey)] = [None]\n else:\n chain[(inputText[0],endKey)].append(None)", "def key_ordenation(tupla):\n\n return tupla[0]", "def interleave(one, other):\r\n\r\n inter = \"\"\r\n for i in range(len(one)):\r\n inter = inter + (one[i] + other[i])\r\n return inter", "def MakeKey(self, string, string_1, string_2):\n ...", "def _construct_key(previous_key, separator, new_key):\n if previous_key:\n return u\"{}{}{}\".format(previous_key, separator, new_key)\n else:\n return new_key", "def shared_words(d1, d2):\n shared_keys = []\n d1_keys= d1.keys()\n i=0\n while i < len(d1):\n if d1_keys[i] in d2.keys():\n shared_keys.append(d1_keys[i])\n i+=1\n\n sorted_shared = sorted(shared_keys, key = len, reverse=True)\n\n if len(sorted_shared) > 20 :\n return sorted_shared[:20]\n return sorted_shared[0:]", "def natsort_key(s):\n # key consists of triplets (type:int, magnitude:int, value:str)\n key = []\n if '~' in s:\n s = s.replace('~', '\\0')\n for frag in _rc.findall(s):\n if frag < '0':\n key.extend((1, 0, frag + '\\1'))\n elif frag < '1':\n key.extend((2, len(frag.lstrip('0')) - len(frag), frag))\n elif frag < ':':\n key.extend((2, len(frag), frag))\n else:\n key.extend((3, 0, frag + '\\1'))\n if not key or key[-3] == 2:\n key.extend((1, 0, '\\1'))\n return tuple(key)", "def __swap_kv(self, node1, node2):\r\n node1.key, node2.key = node2.key, node1.key\r\n node1.value, node2.value = node2.value, node1.value", "def __generate_key_from_list_of(self, list_of_keys):\r\n list_of_keys = list(list_of_keys)\r\n list_of_keys.sort()\r\n return \",\".join(list_of_keys)", "def join_params(**params):\n\tparam_list = get_sorted_keys(params)\n\tvalues = []\n\tfor k in param_list:\n\t\tvalues.append(k+'-'+join_items(params[k]))\n\treturn \"_\".join(values)", "def variant_key_set_merge(k, v_left, v_right, ordering=None):\n out_v = set(v_left) & set(v_right)\n return sorted(out_v, key=partial(_version_order, ordering=ordering))", "def sorter(a, b):\n ret = 0\n if isinstance(a, list):\n for key in args.sort:\n if key >= len(a):\n ret = -1\n break\n elif key >= len(b):\n ret = 1\n break\n elif a[key] != b[key]:\n ret = cmp(to_numeric(a[key]), to_numeric(b[key]))\n break\n else:\n for key in args.sort:\n if (key not in a) and (key in b):\n ret = -1\n break\n elif (key in a) and (key not in b):\n ret = 1\n break\n elif (key in a) and (key in b) and (a[key] != b[key]):\n ret = cmp(to_numeric(a[key]), to_numeric(b[key]))\n break\n return ret", "def linear_merge(sorted1, sorted2):\n first_pointer = 0\n second_pointer = 0\n sorted_result = []\n\n while second_pointer < len(sorted2) and first_pointer < len(sorted1):\n if sorted1[first_pointer] < sorted2[second_pointer]:\n sorted_result.append(sorted1[first_pointer])\n first_pointer += 1\n else:\n sorted_result.append(sorted2[second_pointer])\n second_pointer += 1\n\n while second_pointer < len(sorted2):\n sorted_result.append(sorted2[second_pointer])\n second_pointer += 1\n\n while first_pointer < len(sorted1):\n sorted_result.append(sorted1[first_pointer])\n first_pointer += 1\n\n\n return sorted_result", "def _tokey(self, keys: Union[str, Iterable]):\n if hasattr(keys, \"encode\"): # str\n return keys.encode(\"utf-8\")\n elif hasattr(keys, \"decode\"): # bytes\n return keys\n return (self.Sep.join(keys).encode(\"utf-8\"))", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def zip_args(keys, values):\n if len(values) < len(keys):\n raise ValueError('not enough values to zip')\n\n if len(values) > len(keys):\n offset = len(keys) - 1\n values[offset:] = [' '.join(values[offset:])]\n\n return dict(zip(keys, values))", "def join_duplicate_keys(ordered_pairs):\n d = {}\n for k, v in ordered_pairs:\n if k in d:\n if isinstance(d[k], list):\n d[k].append(v)\n else:\n newlist = []\n newlist.append(d[k])\n newlist.append(v)\n d[k] = newlist\n else:\n d[k] = v\n return d", "def __compound_key(key):\n x_int = int(key[0])\n y_int = int(key[1])\n zeros = len(str(y_int))\n key = x_int * (10 ** zeros) + y_int\n\n return key", "def _compare_and_swap(self, element0, element1, key):\n if key(element0) > key(element1):\n mapped_id0 = element0.current_column + element0.current_row * self.num_columns\n mapped_id1 = element1.current_column + element1.current_row * self.num_columns\n swap_operation = (mapped_id0, mapped_id1)\n # swap elements but update also current position:\n tmp_0 = element0.final_row\n tmp_1 = element0.final_column\n tmp_2 = element0.row_after_step_1\n element0.final_row = element1.final_row\n element0.final_column = element1.final_column\n element0.row_after_step_1 = element1.row_after_step_1\n element1.final_row = tmp_0\n element1.final_column = tmp_1\n element1.row_after_step_1 = tmp_2\n return swap_operation\n return None", "def merge_extras(extras1, extras2):\n if not extras1:\n return extras2\n if not extras2:\n return extras1\n return tuple(sorted(set(extras1) | set(extras2)))", "def _custom_sorter(self, key1, key2):\n\n col = self._col\n ascending = self._colSortFlag[col]\n real = self.get_real_col(col)\n item1 = self.itemDataMap[key1][real]\n item2 = self.itemDataMap[key2][real]\n\n # Internationalization of string sorting with locale module\n if isinstance(item1, str) and isinstance(item2, str):\n cmpVal = locale.strcoll(item1, item2)\n elif isinstance(item1, bytes) or isinstance(item2, bytes):\n cmpVal = locale.strcoll(str(item1), str(item2))\n else:\n cmpVal = cmp(item1, item2)\n\n # If the items are equal, then pick something else to make the sort value unique\n if cmpVal == 0:\n cmpVal = cmp(*self.GetSecondarySortValues(col, key1, key2))\n\n if ascending:\n return cmpVal\n else:\n return -cmpVal" ]
[ "0.61941266", "0.61463404", "0.5867205", "0.5854237", "0.57316273", "0.57308954", "0.56912524", "0.56198055", "0.5488593", "0.53060853", "0.53048044", "0.52917767", "0.5274578", "0.5135413", "0.5129714", "0.5127249", "0.5121423", "0.51084983", "0.5099582", "0.50963557", "0.5085391", "0.5084217", "0.5069561", "0.50641394", "0.5050079", "0.50408864", "0.5038692", "0.50311685", "0.50251824", "0.501429" ]
0.7362512
0
verifica se tem o link para voltar para a listagem de animais
def test_animais_list_link(self): PropriedadeUser.objects.create(propriedade=self.propriedade1, user=self.user1, owner=True) login = self.client.login(username='user1', password='12345') response = self.client.get(reverse('animal_pesagem_form', kwargs={'animal_pk': self.animal.pk,})) expected = 'href="{}"'.format(reverse('animais_list', kwargs={'propriedade_pk': self.animal.propriedade.pk,})) self.assertContains(response, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLink(self):", "def add_link():\n return True", "def requestShowLink(self, *args, **kwargs): # real signature unknown\n pass", "def is_valid_listings(link):\n if link.has_attr(\"href\") and link.attrs[\"href\"].startswith(LISTING_PREFIX):\n return True\n return False", "def remove_link():", "def format_link(self):\n self.url = sys.argv[1]\n video_link_regex = re.compile(\n r'(https?://)?(www\\.)?youtube\\.(com|nl)/watch\\?v=([\\w-]+)')\n playlist_link_regex = re.compile(\n r'(https?://)?(www\\.)?youtube\\.(com|nl)/playlist\\?list=([\\w-]+)')\n # check if it's a single video link\n if video_link_regex.search(self.url):\n result_regex = video_link_regex.search(self.url)\n self. url = result_regex.group().split('&')[0]\n self.show_formats()\n # check if it's a playlist link\n elif playlist_link_regex.search(self.url):\n logging. debug('Yes it a playlist')\n result_regex = playlist_link_regex.search(self.url)\n playlist_link = result_regex.group().split('&')[0]\n self. get_videos_in_playlist()\n # check if link is not a youtube link\n else:\n logging.debug('Not even a yt link')\n sys. exit()", "def ShowHTML(pTitle, href):\n\n oc = ObjectContainer(title2=pTitle)\n\n href = href if href else ''\n html = HTML.ElementFromURL(BASE_URL + href)\n\n if '/pornstars-click/' in href:\n href = '/profiles/' + href.rsplit('/', 1)[1]\n url = BASE_URL + href\n\n xvideosBest = \"thumb-block \"\n if (len(html.xpath('//div[@class=\"thumbBlock\"]')) > 0):\n xvideosBest = \"thumbBlock\"\n\n if (len(html.xpath('//title//text()')) > 0):\n if 'Pornstar page' in html.xpath('//title//text()')[0]:\n url = url + '/pornstar_videos/0/0'\n html = HTML.ElementFromURL(url)\n elif 'Channel page' in html.xpath('//title//text()')[0]:\n url = url + '/uploads/0/0'\n html = HTML.ElementFromURL(url)\n\n for video in html.xpath('//div[@class=\"%s\"]' %xvideosBest):\n try:\n if '/profiles/' not in url and '/pornstars-click' not in url:\n if (len(video.xpath('./div/div/a//@href')) == 0):\n oc.add(VideoClipObject(\n url=BASE_URL + video.xpath('./p/a//@href')[0],\n title=video.xpath('./p/a//text()')[0],\n thumb=THUMB_REG.search(video.xpath('./div/div/script//text()')[0]).group(1)\n ))\n else:\n vhref = video.xpath('./p/a//@href')[0]\n vtitle = video.xpath('./p/a//text()')[0]\n oc.add(DirectoryObject(\n key=Callback(ShowHTML, href=vhref, pTitle=vtitle),\n title=vtitle, thumb=THUMB_REG.search(video.xpath('./div/div/a/script//text()')[0]).group(1)\n ))\n else:\n oc.add(VideoClipObject(\n url=BASE_URL + video.xpath('./div/p/a//@href')[0],\n title=video.xpath('./div/p/a//text()')[0],\n thumb=video.xpath('./div/div/a/img//@src')[0]\n ))\n except:\n Log.Warn('nothing')\n\n # setup nextURL\n try:\n nextURL = None\n if html.xpath('//li/a[@data-page][text()=\"Next\"]'):\n next_page = int(html.xpath('//li/a[text()=\"Next\"]/@data-page')[0])\n nextURL = '/{}/{}'.format(url.split('/', 3)[3].rsplit('/', 1)[0], next_page)\n elif html.xpath('//li/a[@class=\"no-page\"][text()=\"Next\"]'):\n nextURL = html.xpath('//li/a[@class=\"no-page\"][text()=\"Next\"]/@href')[0]\n elif html.xpath('//div[contains(@class,\"pagination\")]//a[@class=\"active\"]/../following-sibling::li/a/@href'):\n nextURL = html.xpath(\"//div[contains(@class,'pagination')]/ul/li/a[@class='active']/../following-sibling::li/a/@href\")[0]\n\n if nextURL:\n next_page_num = nextURL.split('=')[-1] if '&' in nextURL else nextURL.split('/')[-1]\n next_page_num = next_page_num if next_page_num else nextURL.split('/')[-2]\n #Log(u\"next page number = '{}'\".format(next_page_num))\n oc.add(NextPageObject(\n key=Callback(ShowHTML, href=nextURL, pTitle='Page ' + next_page_num),\n title=\"More ...\"))\n except:\n Log.Exception(\"Cannot find next page\")\n # it will loop through and return the values for all items in the page\n return oc", "def needToLink(self):\n return _osgAnimation.AnimationManagerBase_needToLink(self)", "def getVotacion(self, url):", "def exactor_links(self, response: BeautifulSoup):\n raise NotImplementedError", "def _link_clicked(self, href):\n\n self.main_frame.load(href)", "def iter_links(self):", "async def cmd_galremlinkuwl(self, ctx):\n links = re.findall(r\"(?P<url>http[s]?://[^\\s]+)\", ctx.message.content)\n\n if not links:\n await ctx.channel.send('Useage: [p]galremlinkuwl <startoflink>, [Bot Owner] Removes a link from gallery link whitelist.')\n\n # ===== REMOVE THE LINKS FROM THE LIST\n new_gal_link_wl = list(set(self.cogset['link_wl']) - set(links))\n\n if Gallery.compare(new_gal_link_wl, self.cogset['link_wl']):\n await ctx.channel.send(content=\"{}\\n are not in the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return \n \n else:\n self.cogset['link_wl'] = new_gal_link_wl\n\n # ===== WRITE TO THE DATABASE\n await cogset.SAVE(self.cogset, cogname=self.qualified_name)\n\n # ===== RETURN\n await ctx.channel.send(content=\"{}\\n have been removed from the gallery link whitelist.\".format('\\n'.join(links)), delete_after=Gallery.delete_after)\n return", "def followlink(self, event):\n webbrowser.open(self.url)", "async def ig(self, ctx, url):\n response = requests.get(url.replace(\"`\", \"\"), headers={\"Accept-Encoding\": \"utf-8\"})\n soup = BeautifulSoup(response.text, 'html.parser')\n script = soup.find_all('script')\n sources = []\n found_date = False\n post_date = None\n for i in range(len(script)):\n urls = re.findall('\"display_url\":\"(.*?)\"', script[i].text)\n if urls:\n sources = urls\n if not found_date:\n try:\n data = json.loads(script[i].text, encoding='utf-8')\n datestring = data.get('uploadDate')\n post_date = datetime.datetime.strptime(datestring, \"%Y-%m-%dT%H:%M:%S\")\n found_date = True\n except json.JSONDecodeError:\n pass\n sources = list(set(sources))\n\n date = re.findall('<script type=\"application/ld+json\">(.*?)</script>', response.text)\n print(date)\n\n if sources:\n content = discord.Embed(title=soup.title.string, url=url)\n if post_date is not None:\n content.timestamp = post_date\n for url in sources:\n content.set_image(url=url)\n await ctx.send(embed=content)\n self.logger.info(misolog.format_log(ctx, f\"Success\"))\n else:\n await ctx.send(\"Found nothing, sorry!\")\n self.logger.warning(misolog.format_log(ctx, f\"Found nothing\"))", "def check_link(self, link, links_para):\n href = link['href']\n if not href.startswith('/wiki/') or href == '/wiki/Latin' or href.startswith('#'):\n return False\n if \"<i>\" in link or href in links_para:\n return False\n title = href[6:]\n if title.startswith('Help:') or title.startswith('File:') or title.endswith('.ogg') or title.startswith('Wikipedia:'):\n return False\n return True", "def scraper_voto(self):\n\n #per trovare il link a fantacalcio.it devo prima trovare il link della squadra e trovare il suo nome\n soup_rosa = BeautifulSoup(\n requests.get(f\"{self.LINK_FANTACALCIO_IT}/{self.team}#rosa\").text,\n \"html.parser\",\n )\n print(self.name)\n\n displayed_name = self.name\n if displayed_name == \"Coulibaly\": # caso estremo, il sito si confonde\n displayed_name = \"Coulibaly M.\"\n\n # trovo il link personale del giocatore e glielo assegno\n link = soup_rosa.find(\"a\", text=displayed_name.upper())[\"href\"]\n self.scheda_giocatore = link\n\n # leggo voto e media voto\n soup = BeautifulSoup(requests.get(link).text, \"html.parser\")\n\n self.media_voto = float(soup.find_all(class_=\"nbig2\")[0].text.replace(\",\", \".\"))\n self.media_fantavoto = float(\n soup.find_all(class_=\"nbig2\")[1].text.replace(\",\", \".\")\n )\n\n # leggo anche il ruolodalla schedina delle info\n infos = soup.find_all(class_=\"col-lg-6 col-md-6 col-sm-12 col-xs-12\")[-2]\n self.ruolo = str(infos.find(\"span\").text)\n\n # compilo i dati: partite, gol e assist\n dati_partite = soup.find_all(class_=\"nbig\")\n\n partite = \"🥅 \" + dati_partite[0].text\n # i portieri hanno statistiche diverse!\n if self.ruolo == \"P\":\n goal = \"❌ \" + dati_partite[1].text\n self.dati = \"<br>\".join([partite, goal])\n else:\n goal = \"⚽ \" + dati_partite[1].text\n assist = \"👟 \" + dati_partite[2].text\n self.dati = \"<br>\".join([partite, goal, assist])\n\n # aggiungo stellina al nome se hanno una bella media voto\n if self.media_fantavoto > 7:\n self.name += \" ⭐\"", "def play_url(url, name):\n video_url = scraper.get_media_url(url)\n if video_url == -1:\n GUI.info_box(u\"Vesen\", u\"Fann ekki upptöku\")\n else:\n player.play(video_url, name)", "def getLinks(tvshow, season, episode):\n numPage = 1\n possible_links = []\n doNext = True\n while(doNext):\n urltv = getTvShowUrl(tvshow, season, episode, numPage)\n src_urltv = getPage(urltv)\n if (src_urltv == -1):\n return possible_links\n npage = False\n for line in src_urltv:\n if (\"next_page\" in line):\n npage = True\n if (\"disabled next_page\" in line):\n doNext = False\n for nameModule in sidereel_mod.__all__:\n realName = sidereel_mod.__all2__[nameModule]\n if ((realName in line) and ('data-viewable-url') in line):\n possible_links.append([line.split('\"')[5], \\\n \"sidereel_mod.\" + nameModule])\n numPage += 1\n if (npage == False):\n doNext = False\n return possible_links\n \n \n \n\n ## liste=[]\n ## for i in sidereel_mod.__all__:\n ## __import__(\"aggregators.sidereel_mod.\" + i)\n ## liste += sys.modules[\"aggregators.sidereel_mod.\"+i].getFlv(a)\n ## return liste", "def dod():\n file = requests.get(\"https://www.bewakoof.com/design-of-the-day\")\n soup = bs4.BeautifulSoup(file.text, \"lxml\")\n # print(soup)\n\n linkList = soup.select(\"a[class='col-sm-4 col-xs-6'] > div > div > div > img:nth-of-type(2)]\")\n # soup.select(\"div[id=foo] > div > div > div[class=fee] > span > span > a\")\n for i in linkList:\n if \"t-shirt-men\" in str(i):\n # print(i.get('src'))\n webbrowser.open(i.get('src'))", "def video_link_collector(self, count):\n pass", "def link_click(_):\r\n\r\n tag_name = about_content.tag_names(tkinter.CURRENT)[0]\r\n about_content.tag_config(tag_name, foreground=\"#551A8B\")\r\n if tag_name == 'hyper':\r\n webbrowser.open(\"https://www.facebook.com/nihal.agarwal.14\")\r\n else:\r\n webbrowser.open(\"https://github.com/NihalAgarwal/Windows-Wi-Fi-Manager\")", "def _parse_links(self, item, start, links_list):\n result_list = []\n target_str_1 = start.strftime(\"%m-%d-%Y\").replace(\" 0\", \" \")\n target_str_2 = start.strftime(\"%m-%d-%y\").replace(\" 0\", \" \")\n for item in links_list:\n if item[\"date\"] in target_str_1 or item[\"date\"] in target_str_2:\n new_dict = {}\n new_dict[\"href\"] = item[\"href\"]\n new_dict[\"title\"] = item[\"title\"]\n result_list.append(new_dict)\n return result_list", "def _parse_links(self, response, start):\n links = self.document_date_map[start.date()]\n for link in response.css(\".agenda-min-pres .field a\"):\n link_url = response.urljoin(link.xpath(\"@href\").extract_first())\n title = link.xpath(\"./text()\").extract_first()\n if title.strip().startswith(\"Agenda\"):\n title = \"Agenda\"\n links.append(\n {\"title\": re.sub(r\"\\s+\", \" \", title).strip(), \"href\": link_url}\n )\n return links", "def _get_links(self, from_year):\n self.links = []\n self.titles = []\n self.speakers = []\n self.dates = []\n\n r = requests.get(self.calendar_url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n if self.verbose:\n print(\"Getting links for press conference scripts...\")\n presconfs = soup.find_all(\n \"a\", href=re.compile(\"^/monetarypolicy/fomcpresconf\\d{8}.htm\")\n )\n presconf_urls = [\n self.base_url + presconf.attrs[\"href\"] for presconf in presconfs\n ]\n for presconf_url in presconf_urls:\n r_presconf = requests.get(presconf_url)\n soup_presconf = BeautifulSoup(r_presconf.text, \"html.parser\")\n contents = soup_presconf.find_all(\n \"a\", href=re.compile(\"^/mediacenter/files/FOMCpresconf\\d{8}.pdf\")\n )\n for content in contents:\n # print(content)\n self.links.append(content.attrs[\"href\"])\n self.speakers.append(\n self._speaker_from_date(self._date_from_link(content.attrs[\"href\"]))\n )\n self.titles.append(\"FOMC Press Conference Transcript\")\n self.dates.append(\n datetime.strptime(\n self._date_from_link(content.attrs[\"href\"]), \"%Y-%m-%d\"\n )\n )\n if self.verbose:\n print(\"{} links found in current page.\".format(len(self.links)))\n\n # Archived before 2015\n if from_year <= 2014:\n print(\"Getting links from archive pages...\")\n for year in range(from_year, 2015):\n yearly_contents = []\n fomc_yearly_url = (\n self.base_url\n + \"/monetarypolicy/fomchistorical\"\n + str(year)\n + \".htm\"\n )\n r_year = requests.get(fomc_yearly_url)\n soup_yearly = BeautifulSoup(r_year.text, \"html.parser\")\n\n presconf_hists = soup_yearly.find_all(\n \"a\", href=re.compile(\"^/monetarypolicy/fomcpresconf\\d{8}.htm\")\n )\n presconf_hist_urls = [\n self.base_url + presconf_hist.attrs[\"href\"]\n for presconf_hist in presconf_hists\n ]\n for presconf_hist_url in presconf_hist_urls:\n # print(presconf_hist_url)\n r_presconf_hist = requests.get(presconf_hist_url)\n soup_presconf_hist = BeautifulSoup(\n r_presconf_hist.text, \"html.parser\"\n )\n yearly_contents = soup_presconf_hist.find_all(\n \"a\",\n href=re.compile(\"^/mediacenter/files/FOMCpresconf\\d{8}.pdf\"),\n )\n for yearly_content in yearly_contents:\n # print(yearly_content)\n self.links.append(yearly_content.attrs[\"href\"])\n self.speakers.append(\n self._speaker_from_date(\n self._date_from_link(yearly_content.attrs[\"href\"])\n )\n )\n self.titles.append(\"FOMC Press Conference Transcript\")\n self.dates.append(\n datetime.strptime(\n self._date_from_link(yearly_content.attrs[\"href\"]),\n \"%Y-%m-%d\",\n )\n )\n if self.verbose:\n print(\n \"YEAR: {} - {} links found.\".format(\n year, len(presconf_hist_urls)\n )\n )\n print(\"There are total \", len(self.links), \" links for \", self.content_type)", "def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/media/{}'.format(obj.id)\n )", "async def imgran(self):\r\n search=\"random\"\r\n search = client.gallery()\r\n holder=[]\r\n for d in search:\r\n holder.append(d.link)\r\n await self.bot.say(random.choice(holder))", "def test_link_is_tracked_false_archive(self):\n self.assertFalse(link_is_tracked(\"https://web.archive.org/https://test.com/\"))", "async def link_to(self, *args):\n pass", "def test_link_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"example.com\")" ]
[ "0.5543123", "0.5494667", "0.53836703", "0.5344362", "0.52966595", "0.5271885", "0.52616286", "0.5257237", "0.5244915", "0.5165959", "0.51506627", "0.5111753", "0.50975", "0.5086898", "0.5062339", "0.5051371", "0.50483936", "0.5039112", "0.50307196", "0.49970207", "0.49806777", "0.49797776", "0.49632186", "0.49626932", "0.49493825", "0.49292612", "0.4922194", "0.4920745", "0.49190366", "0.4912568" ]
0.5964893
0
verifica os textos no html
def test_textos_no_html(self): PropriedadeUser.objects.create(propriedade=self.propriedade1, user=self.user1, owner=True) login = self.client.login(username='user1', password='12345') response = self.client.get(reverse('animal_pesagem_form', kwargs={'animal_pk': self.animal.pk})) contents = [ 'Peso', 'Data', # Pesagem animal brinco A123456, / Propriedade Fazenda Vera Cruz: 61.675.372/0001-02 'Pesagens:', 'Pesagem animal brinco', # / Propriedade Fazenda Vera Cruz: 61.675.372/0001-02', 'A123456', 'Propriedade Fazenda Vera Cruz: 61.675.372/0001-02', ] for expected in contents: with self.subTest(): self.assertContains(response, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def is_text( self ):\n return self.get_main_type() == 'text'", "def obtain_text():\n pass", "def hasRawText(self, text):\n r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|dl|pre|h\\d)[^>]*?>.*</\\1>',\n re.S).sub('', text.strip()).strip()\n r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)\n return '' != r", "def has_text(self, page: fitz.Page) -> bool:\n return page.get_text(clip=page.trimbox).strip() != \"\"", "def test_text(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text), self.text)", "def is_plain_text(self):\n return self._tag == 'plain_text'", "def _is_text_tag(tag):\n return tag.name not in ['script', 'style']", "def clean_content(self) -> str:", "def hasContents():", "def get_text(self):", "def _html_text(self, html):\n ee = None\n try: return html.html_text()\n except Exception, e: ee = e; pass\n try: return html.xml_text()\n except Exception, e: print \"HtmlDocument/text\", ee, e; pass\n try: return str(html)\n except Exception, e: print \"HtmlDocument/text\", e; return \"&nbsp;\"", "def get_text(downgrade_titles=False):", "def plain(self):\n return not self.html", "def _get_plain_text(self, url, soup, site):\n print('Get plaint text: ' + url)\n title = str(soup.find(class_=self._title_tags[site]))\n content = str(soup.find(class_=self._content_tags[site]))\n # h = html2text.HTML2Text() # uncomment this segment of code\n # h.ignore_links = True # if you want to get plain text\n # h.ignore_images = True\n # title = h.handle(title)\n # content = h.handle(content)\n if title == None or content == None:\n print('Different website structure: ' + url)\n return ''\n return self._clean(title + content, no_punc=True) # with symbols\n # return title + content # without symbols", "def ISNONTEXT(value):\n return not ISTEXT(value)", "def test_get_texts_ignores():\n file_map = sd.get_file_map(\".\")\n texts = sd.get_texts(file_map)\n ingnores = \"[:.,;:!?\\\"-()]\\n\".split()\n for text in texts:\n for char in ingnores:\n assert text.find(char) == -1", "def remove_html( html):\n return html2txt(html)", "def _hidden_in_unicode(self, txt):", "def remove_extra_text(self, text):\n if text:\n parsed_text = text\n if parsed_text.find('== Referencias ==') > 0:\n parsed_text = parsed_text[:parsed_text.find('== Referencias ==\\n')]\n if parsed_text.find('== Fuentes ==') > 0:\n parsed_text = parsed_text[:parsed_text.find('== Fuentes ==\\n')]\n if parsed_text.find('== Fuente ==') > 0:\n parsed_text = parsed_text[:parsed_text.find('== Fuente ==\\n')]\n if parsed_text.find('== Ver también =='.decode('utf-8')) > 0:\n parsed_text = parsed_text[:parsed_text.find('== Ver también ==\\n'.decode('utf-8'))]\n if parsed_text.find(\"== Noticia relacionada ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Noticia relacionada ==\".decode('utf-8'))]\n if parsed_text.find(\"== Artículos relacionados ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Artículos relacionados ==\".decode('utf-8'))]\n if parsed_text.find(\"== Enlace externo ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Enlace externo ==\".decode('utf-8'))]\n if parsed_text.find(\"== Enlaces externos ==\".decode(\"utf-8\")) > 0:\n parsed_text = parsed_text[:parsed_text.find(\"== Enlaces externos ==\".decode('utf-8'))]\n parsed_text = parsed_text.replace('ABr)', '')\n return parsed_text", "def textfrombodies(self) -> str:\n type_priority = [\"plain\", \"html\", \"other\"] # TODO: Make configurable\n\n for texttype in type_priority:\n if texttype == \"plain\" and texttype in self.textbodies:\n \"\"\"Text is plain, so it can be used verbatim\"\"\"\n return self.textbodies[texttype]\n if texttype == \"html\" and texttype in self.textbodies:\n \"\"\"HTML text. Convert to markup with html2text and remove extra spaces\"\"\"\n text = html2text.html2text(self.textbodies[texttype])\n # Remove every second newline which is added to distinguish between paragraphs in Markdown, but makes\n # the jira ticket hard to read.\n return re.sub(\"(\\n.*?)\\n\", \"\\g<1>\", text)\n if texttype == \"other\" and len(self.textbodies):\n # If no other text is found, return the first available body if any.\n return self.textbodies[list(self.textbodies.keys())[0]]\n return \"The email contained no text bodies.\"", "def txt(input):\n output=atpic.cleaner_alex.txtclean(input)\n return output", "def rich(text):\n return full(text, False)", "def verify_text(self, text):\n pass", "def test_lessthan(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text_lessthan_noencd), self.text_lessthan_encode)", "def test__markHTML_textOnly1(self):\n self._degrotesque._restoreDefaultElementsToSkip()\n assert(self._degrotesque._markHTML(\"Hallo\")==\"00000\")" ]
[ "0.675838", "0.675838", "0.675838", "0.675838", "0.675838", "0.66166764", "0.6557563", "0.65164775", "0.6472427", "0.64069116", "0.6390485", "0.6258951", "0.6230542", "0.6212179", "0.6210339", "0.6207658", "0.6206313", "0.6205104", "0.62009335", "0.61976314", "0.6178452", "0.6170501", "0.61488056", "0.6148319", "0.6124201", "0.6121526", "0.6109759", "0.6082724", "0.60412836", "0.6039718" ]
0.6883659
0